code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from graph.types.activations import SoftMaxParameters
from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type
from quantization.new_qrec import QRec
from utils.exp_17_15 import exp_fp_17_15
def softmax_func(arg, axis=None):
if axis is None:
axis = -1
v = arg - np.max(arg, axis=axis, keepdims=True)
exp_v = np.exp(v)
v = exp_v/np.sum(exp_v, axis=axis, keepdims=True)
if len(arg.shape) == 1:
v = v.flatten()
return v
@params_type(SoftMaxParameters)
@qrec_type('symmetric')
class SoftMaxSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
old_err = np.seterr(over='raise')
in_tensor = qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")[0]
# TODO - Implement properly quantized version
in_tensor = qrec.in_qs[0].dequantize(in_tensor)
in_tensor = qrec.out_qs[0].quantize(softmax_func(in_tensor, axis=params.axis))
np.seterr(**old_err)
return qrec.get_outputs(params, [in_tensor], ktype="symmetric")
@params_type(SoftMaxParameters)
@qrec_type('scaled')
class SoftMaxSymmetricMult(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
# in_tensor = in_tensors[0].flatten()
in_tensor = in_tensors[0].astype(np.int32)
max_val = np.max(in_tensor, axis=params.axis, keepdims=True)
norm = 15 + np.ceil(np.log2(qrec.in_qs[0].scale)).astype(np.int32)
exp = exp_fp_17_15((in_tensor.astype(np.int32) - max_val) << (norm))
sum_exp = np.sum(exp, axis=params.axis, keepdims=True)
inv_sum = (np.array([(1 << 15)-1], dtype=np.uint32) << 15)//sum_exp
res = np.abs((exp * inv_sum + (1 << 14)) >> 15)
iinfo = np.iinfo(np.int16)
res = np.clip(res, iinfo.min, iinfo.max).astype(
np.int16).reshape(params.out_dims[0].shape)
return qrec.get_outputs(params, [res], ktype="symmetric")
| [
"numpy.sum",
"numpy.abs",
"quantization.kernels.kernel_base.qrec_type",
"numpy.seterr",
"numpy.log2",
"numpy.iinfo",
"numpy.clip",
"quantization.kernels.kernel_base.params_type",
"numpy.max",
"numpy.array",
"numpy.exp"
] | [((1211, 1241), 'quantization.kernels.kernel_base.params_type', 'params_type', (['SoftMaxParameters'], {}), '(SoftMaxParameters)\n', (1222, 1241), False, 'from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type\n'), ((1243, 1265), 'quantization.kernels.kernel_base.qrec_type', 'qrec_type', (['"""symmetric"""'], {}), "('symmetric')\n", (1252, 1265), False, 'from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type\n'), ((1870, 1900), 'quantization.kernels.kernel_base.params_type', 'params_type', (['SoftMaxParameters'], {}), '(SoftMaxParameters)\n', (1881, 1900), False, 'from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type\n'), ((1902, 1921), 'quantization.kernels.kernel_base.qrec_type', 'qrec_type', (['"""scaled"""'], {}), "('scaled')\n", (1911, 1921), False, 'from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type\n'), ((1079, 1088), 'numpy.exp', 'np.exp', (['v'], {}), '(v)\n', (1085, 1088), True, 'import numpy as np\n'), ((1029, 1066), 'numpy.max', 'np.max', (['arg'], {'axis': 'axis', 'keepdims': '(True)'}), '(arg, axis=axis, keepdims=True)\n', (1035, 1066), True, 'import numpy as np\n'), ((1103, 1142), 'numpy.sum', 'np.sum', (['exp_v'], {'axis': 'axis', 'keepdims': '(True)'}), '(exp_v, axis=axis, keepdims=True)\n', (1109, 1142), True, 'import numpy as np\n'), ((1450, 1473), 'numpy.seterr', 'np.seterr', ([], {'over': '"""raise"""'}), "(over='raise')\n", (1459, 1473), True, 'import numpy as np\n'), ((1774, 1794), 'numpy.seterr', 'np.seterr', ([], {}), '(**old_err)\n', (1783, 1794), True, 'import numpy as np\n'), ((2206, 2256), 'numpy.max', 'np.max', (['in_tensor'], {'axis': 'params.axis', 'keepdims': '(True)'}), '(in_tensor, axis=params.axis, keepdims=True)\n', (2212, 2256), True, 'import numpy as np\n'), ((2427, 2471), 'numpy.sum', 'np.sum', (['exp'], {'axis': 'params.axis', 'keepdims': '(True)'}), '(exp, axis=params.axis, keepdims=True)\n', (2433, 2471), True, 'import numpy as np\n'), ((2562, 2601), 'numpy.abs', 'np.abs', (['(exp * inv_sum + (1 << 14) >> 15)'], {}), '(exp * inv_sum + (1 << 14) >> 15)\n', (2568, 2601), True, 'import numpy as np\n'), ((2620, 2638), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (2628, 2638), True, 'import numpy as np\n'), ((2491, 2533), 'numpy.array', 'np.array', (['[(1 << 15) - 1]'], {'dtype': 'np.uint32'}), '([(1 << 15) - 1], dtype=np.uint32)\n', (2499, 2533), True, 'import numpy as np\n'), ((2285, 2313), 'numpy.log2', 'np.log2', (['qrec.in_qs[0].scale'], {}), '(qrec.in_qs[0].scale)\n', (2292, 2313), True, 'import numpy as np\n'), ((2653, 2687), 'numpy.clip', 'np.clip', (['res', 'iinfo.min', 'iinfo.max'], {}), '(res, iinfo.min, iinfo.max)\n', (2660, 2687), True, 'import numpy as np\n')] |
"""
Implementation of the Determinant-based Mutual Information (DMI) Mechanism (Kong 2019).
@author: <NAME> <<EMAIL>>
"""
from itertools import combinations
from random import shuffle
import numpy as np
def dmi_mechanism(grader_dict, assignment_num, cluster_size):
"""
Computes payments for students according to the DMI mechanism.
Parameters
----------
grader_dict : dict.
Maps a Submission object to a list of graders (Student objects).
assignment_num : int.
Unique identifier of the assignment for which payments are being computed.
cluster_size : int.
The size of the clusters in which students grade submissions (should evenly divide the number of students.)
All students in a cluster grade the same submissions (the submissions from another cluster of students.)
Returns
-------
None.
"""
grade_map = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 1,
8: 1,
9: 1,
10: 1
}
submission_dict = {submission.student_id: submission for submission in grader_dict.keys()}
task_list = [submission.student_id for submission in grader_dict.keys()]
task_list.sort()
for i in range(0, len(task_list), cluster_size):
submission0 = submission_dict[i]
graders = grader_dict[submission0]
tasks = [task_list[i + j] for j in range(cluster_size)]
shuffle(tasks)
for (j, k) in combinations(graders, 2):
M_1 = np.zeros((2, 2), dtype=np.uint8)
M_2 = np.zeros((2, 2), dtype=np.uint8)
for t in range(cluster_size):
j_grade = grade_map[j.grades[assignment_num][tasks[t]]]
k_grade = grade_map[k.grades[assignment_num][tasks[t]]]
if t < cluster_size/2:
M_1[j_grade, k_grade] += 1
else:
M_2[j_grade, k_grade] += 1
d1 = np.linalg.det(M_1)
d2 = np.linalg.det(M_2)
score = d1*d2
j.payment += score
k.payment += score | [
"itertools.combinations",
"random.shuffle",
"numpy.linalg.det",
"numpy.zeros"
] | [((1530, 1544), 'random.shuffle', 'shuffle', (['tasks'], {}), '(tasks)\n', (1537, 1544), False, 'from random import shuffle\n'), ((1576, 1600), 'itertools.combinations', 'combinations', (['graders', '(2)'], {}), '(graders, 2)\n', (1588, 1600), False, 'from itertools import combinations\n'), ((1620, 1652), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.uint8'}), '((2, 2), dtype=np.uint8)\n', (1628, 1652), True, 'import numpy as np\n'), ((1671, 1703), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.uint8'}), '((2, 2), dtype=np.uint8)\n', (1679, 1703), True, 'import numpy as np\n'), ((2123, 2141), 'numpy.linalg.det', 'np.linalg.det', (['M_1'], {}), '(M_1)\n', (2136, 2141), True, 'import numpy as np\n'), ((2159, 2177), 'numpy.linalg.det', 'np.linalg.det', (['M_2'], {}), '(M_2)\n', (2172, 2177), True, 'import numpy as np\n')] |
"""Contains tests for the private _solvers module"""
from numpy import insert, loadtxt, allclose
import pytest
from qmpy.solvers import schroedinger
from qmpy._fileio import _read_schrodinger
PROBLEMS = ['inf_potwell', 'fin_potwell', 'double_well', 'asym_potwell',
'harm_osci']
@pytest.mark.parametrize('problem', PROBLEMS)
def test_computing(problem):
"""
Tests whether the computed wavefunctions and energies match the
reference data.
"""
path = 'tests/test_data/{}.inp'.format(problem)
specs = _read_schrodinger(path)
vals = dict()
vals['mass'] = specs['mass']
vals['xcords'] = specs['interpolxydecs'][:, 0]
vals['potential'] = specs['interpolxydecs'][:, 1]
vals['xopt'], kind = (specs['xopt'], specs['interpoltype'])
evs = (specs['first_ev'] - 1, specs['last_ev'] - 1)
comp_energies, wfuncs, pot = schroedinger(vals, interpol=True,
interpoltype=kind,
select_range=evs)
comp_funcs = insert(wfuncs.T, 0, values=pot[:, 1].T, axis=1)
ref_energies = loadtxt('tests/test_data/energies_{}.ref'.format(problem))
ref_wfuncs = loadtxt('tests/test_data/wfuncs_{}.ref'.format(problem))
assert allclose(ref_energies, comp_energies)
assert allclose(ref_wfuncs, comp_funcs)
| [
"numpy.allclose",
"qmpy.solvers.schroedinger",
"numpy.insert",
"qmpy._fileio._read_schrodinger",
"pytest.mark.parametrize"
] | [((295, 339), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""problem"""', 'PROBLEMS'], {}), "('problem', PROBLEMS)\n", (318, 339), False, 'import pytest\n'), ((538, 561), 'qmpy._fileio._read_schrodinger', '_read_schrodinger', (['path'], {}), '(path)\n', (555, 561), False, 'from qmpy._fileio import _read_schrodinger\n'), ((873, 943), 'qmpy.solvers.schroedinger', 'schroedinger', (['vals'], {'interpol': '(True)', 'interpoltype': 'kind', 'select_range': 'evs'}), '(vals, interpol=True, interpoltype=kind, select_range=evs)\n', (885, 943), False, 'from qmpy.solvers import schroedinger\n'), ((1054, 1101), 'numpy.insert', 'insert', (['wfuncs.T', '(0)'], {'values': 'pot[:, 1].T', 'axis': '(1)'}), '(wfuncs.T, 0, values=pot[:, 1].T, axis=1)\n', (1060, 1101), False, 'from numpy import insert, loadtxt, allclose\n'), ((1266, 1303), 'numpy.allclose', 'allclose', (['ref_energies', 'comp_energies'], {}), '(ref_energies, comp_energies)\n', (1274, 1303), False, 'from numpy import insert, loadtxt, allclose\n'), ((1315, 1347), 'numpy.allclose', 'allclose', (['ref_wfuncs', 'comp_funcs'], {}), '(ref_wfuncs, comp_funcs)\n', (1323, 1347), False, 'from numpy import insert, loadtxt, allclose\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import logging
import time
import multiprocessing
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.crazyflie.syncLogger import SyncLogger
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.gridspec import GridSpec
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
from matplotlib import use
import numpy as np
import cftune
import cflog
# design requirements
overshoot_tgt = .1 # 10% overshoot
rise_time_tgt = 1 # 1s rise time (5% -> 95%)
settle_time_tgt = 3 # 3s settling time (5%)
style.use('seaborn-whitegrid')
URI = 'radio://0/80/2M/E7E7E7E711'
alt_takeoff = .5 # target takeoff altitude [m]
alt_target = 1 # setpoint altitude [m]
# x y z YAW [m, m, m, deg]
setpoint_pos = np.array(
[.5, .5, alt_target, 0]
)
#
# Crazyflie control methods
#
def takeoff():
for i in range(60):
cf.commander.send_position_setpoint(.5,
.5,
alt_takeoff,
0)
time.sleep(.1)
def land():
for i in range(10):
cf.commander.send_position_setpoint(.5,
.5,
0.1,
0.0)
time.sleep(0.1)
def alt_setpoint(cf, t):
alt_sp = setpoint_pos + np.array([0, 0, alt_takeoff, 0])
time_start = time.time()
while time.time() < (time_start + t):
cf.commander.send_position_setpoint(alt_sp[0],
alt_sp[1],
alt_sp[2],
alt_sp[3])
time.sleep(0.1)
time.sleep(0.1)
def wait_for_position_estimator(scf):
print('Waiting for estimator to find position...')
log_config = LogConfig(name='Kalman Variance', period_in_ms=500)
log_config.add_variable('kalman.varPX', 'float')
log_config.add_variable('kalman.varPY', 'float')
log_config.add_variable('kalman.varPZ', 'float')
var_y_history = [1000] * 10
var_x_history = [1000] * 10
var_z_history = [1000] * 10
threshold = 0.001
with SyncLogger(scf, log_config) as logger:
for log_entry in logger:
data = log_entry[1]
var_x_history.append(data['kalman.varPX'])
var_x_history.pop(0)
var_y_history.append(data['kalman.varPY'])
var_y_history.pop(0)
var_z_history.append(data['kalman.varPZ'])
var_z_history.pop(0)
min_x = min(var_x_history)
max_x = max(var_x_history)
min_y = min(var_y_history)
max_y = max(var_y_history)
min_z = min(var_z_history)
max_z = max(var_z_history)
if (max_x - min_x) < threshold and (
max_y - min_y) < threshold and (
max_z - min_z) < threshold:
break
print("Estimator reset.")
def reset_estimator(scf):
cf = scf.cf
cf.param.set_value('kalman.resetEstimation', '1')
time.sleep(0.1)
cf.param.set_value('kalman.resetEstimation', '0')
wait_for_position_estimator(cf)
#
# Plotting methods
#
def start_plots(x, y, z, pos_ts, tX, tY, tZ, tar_ts):
position = [x, y, z, pos_ts]
setpoint = [tX, tY, tZ, tar_ts]
fig = plt.figure()
fig.set_size_inches(15, 8)
gs = GridSpec(2, 4)
ax_x = fig.add_subplot(gs[0, 0])
ax_y = fig.add_subplot(gs[0, 1])
ax_z = fig.add_subplot(gs[1, :-2])
ax_3d = fig.add_subplot(gs[0:, 2:], projection='3d')
ani = FuncAnimation(fig, plot, interval=100,
fargs=(ax_x, ax_y, ax_z, ax_3d,
position, setpoint))
plt.show()
def plot(i, ax_x, ax_y, ax_z, ax_3d, position, setpoint):
x, y, z, pos_ts = position
tX, tY, tZ, tar_ts = setpoint
ax_x.clear()
ax_y.clear()
ax_z.clear()
ax_3d.clear()
x_line, = ax_x.plot(np.subtract(pos_ts, pos_ts[0]) / 1000, x)
targetX_line, = ax_x.plot(np.subtract(tar_ts, tar_ts[0]) / 1000, tX)
ax_x.set_title("X Position Time History")
ax_x.set_xlabel("Time Elapsed (seconds)")
ax_x.set_ylabel("Local X Position (m)")
ax_x.legend((x_line, targetX_line),
("Local X Position", "Local X Setpoint"))
y_line, = ax_y.plot(np.subtract(pos_ts, pos_ts[0]) / 1000, y)
targetY_line, = ax_y.plot(np.subtract(tar_ts, tar_ts[0]) / 1000, tY)
ax_y.set_title("Y Position Time History")
ax_y.set_xlabel("Time Elapsed (seconds)")
ax_y.set_ylabel("Local Y Position (m)")
ax_y.legend((y_line, targetY_line),
("Local Y Position", "Local Y Setpoint"))
z_line, = ax_z.plot(np.subtract(pos_ts, pos_ts[0]) / 1000, z)
targetZ_line, = ax_z.plot(np.subtract(tar_ts, tar_ts[0]) / 1000, tZ)
ax_z.set_title("Z Position Time History")
ax_z.set_xlabel("Time Elapsed (seconds)")
ax_z.set_ylabel("Local Z Position (m)")
ax_z.legend((z_line, targetZ_line),
("Local Z Position", "Local Z Setpoint"))
ax_3d.plot(x[-50:], y[-50:], z[-50:], label="Quadrotor Position")
ax_3d.set_xlim3d(-3, 3)
ax_3d.set_ylim3d(-3, 3)
ax_3d.set_zlim3d(0, 3)
ax_3d.legend(['x', 'y', 'z'])
def plot_step_response(tuner):
# Get trial PID results and params
timestamp, response, setpoint = tuner.get_response()
Kp, Ki, Kd = tuner.get_alt_pid()
# Get step response info
response = np.array(response) - alt_takeoff
setpoint = np.array(setpoint) - alt_takeoff
rise_time, e_ss, p_over, settle_time = tuner.step_info(timestamp,
response,
0,
alt_target) # noqa
fig = plt.figure()
fig.set_size_inches(14, 10.5)
ax = plt.gca()
ax.plot(np.subtract(timestamp, timestamp[0]) / 1000, response,
label="Alt Response")
ax.plot(np.subtract(timestamp, timestamp[0]) / 1000, setpoint,
label="Alt Setpoint")
ax.set_title("Altitude Step Response, Kp={:1.2f} Ki={:1.2f} Kd={:1.2f}".format(Kp, Ki, Kd)) # noqa
ax.set_xlabel("Time Elapsed (seconds)")
ax.set_ylabel("Altitude (m)")
plt.suptitle("Rise Time: {:2.2f} s\nError SS: {:2.2f} m\nPercent Overshoot: {:1.2f}\nSettling Time: {:2.2f} s".format(rise_time, e_ss, p_over*100, settle_time)) # noqa
ax.legend()
fig.savefig("alt_ctl_step_" + time.strftime("%Y%m%d-%H%M%S"))
print("Close the plot window to continue")
plt.show()
def save_motor_data(t_strt, t_range, m1, m2, m3, m4, timestamps):
# get motor data only during step response recording
t_end = t_strt + (t_range * 1000)
idx_strt = (np.abs(np.array(timestamps) - t_strt)).argmin()
idx_end = (np.abs(np.array(timestamps) - t_end)).argmin()
timestamps = timestamps[idx_strt:idx_end]
m1 = m1[idx_strt:idx_end]
m2 = m2[idx_strt:idx_end]
m3 = m3[idx_strt:idx_end]
m4 = m4[idx_strt:idx_end]
m_all = np.add(m1, np.add(m2, np.add(m3, m4)))
fig = plt.figure()
fig.set_size_inches(11, 8)
ax = plt.gca()
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m1,
label='Motor 1 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m2,
label='Motor 2 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m3,
label='Motor 3 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m4,
label='Motor 4 Output')
ax.plot(np.subtract(timestamps, timestamps[0]) / 1000, m_all,
label='Combined Motor Ouput')
ax.set_title("Motor Response from Altitude Step Input")
ax.set_xlabel("Time Elapsed (Seconds)")
ax.set_ylabel("Motor Output")
ax.legend()
fig.savefig("motor_output_" + time.strftime("%Y%m%d-%H%M%S"))
if __name__ == "__main__":
# Initialize low-level drivers
cflib.crtp.init_drivers(enable_debug_driver=False)
with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:
# cf position and setpoint logger
log = cflog.CFLog(scf)
# PID analyzer and parameter manager
pidtune = cftune.PositionTuner(scf)
# Dirty implementation of cf data piping
x, y, z, pos_ts = log.get_position()
tX, tY, tZ, tar_ts = log.get_target_position()
m1, m2, m3, m4, motor_ts = log.get_motor_output()
time.sleep(1)
p_plot = multiprocessing.Process(target=start_plots,
args=(x, y, z, pos_ts,
tX, tY, tZ, tar_ts))
p_plot.daemon = True
p_plot.start()
cf = scf.cf
while True:
user_input = -1
print("Select an item:")
print("01) Takeoff and land while recording data.")
print("02) Set new PID parameters.")
print("10) Exit program")
try:
user_input = int(input("Item select: "))
except ValueError:
print("Error, Unknown Input")
continue
if user_input == 1:
Kp, Ki, Kd = pidtune.get_alt_pid()
print("Current z-position PID controller gains:")
print("\tKp: {:2.2f}".format(Kp))
print("\tKi: {:2.2f}".format(Ki))
print("\tKd: {:2.2f}".format(Kd))
reset_estimator(scf)
print("Taking off.")
takeoff()
pidtune.record_response()
print("Ascending to setpoint altitude.")
alt_setpoint(cf, 20) # takeoff for 20 seconds
print("Landing")
land()
# Flight data
timestamps, z, targetZ = pidtune.get_response()
rise_time, e_ss, p_over, settle_time = pidtune.step_info(
timestamps, np.array(z) - alt_takeoff, # noqa
0,
alt_target # noqa
)
print("Flight results:")
print("\tRise Time: {:2.2f} s, [{}]".format(rise_time,
'Success' if rise_time < rise_time_tgt else 'Failed'))
print("\tError SS: {:2.2f} m".format(e_ss))
print("\tOvershoot: {:2.2f} %, [{}]".format(p_over * 100,
'Success' if p_over < overshoot_tgt else 'Failed'))
print("\tSettling Time: {:2.2f} s, [{}]".format(settle_time,
'Success' if settle_time < settle_time_tgt else 'Failed')) # noqa
time.sleep(.5)
save_motor_data(timestamps[1], 15, m1, m2, m3, m4, motor_ts)
plot_step_response(pidtune)
elif user_input == 2:
# Updating cf posCtlZ PID gains
print("Enter new PID params")
Kp_new = float(input("New Kp: "))
Ki_new = float(input("New Ki: "))
Kd_new = float(input("New Kd: "))
pidtune.set_alt_pid(Kp_new, Ki_new, Kd_new)
elif user_input == 10:
print("Exiting Program.")
break
else:
print("Error, unknown input.")
| [
"cflib.crazyflie.syncLogger.SyncLogger",
"cflog.CFLog",
"matplotlib.pyplot.show",
"cflib.crazyflie.log.LogConfig",
"matplotlib.style.use",
"numpy.subtract",
"time.strftime",
"time.sleep",
"time.time",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"cftune.PositionTuner",
"... | [((795, 825), 'matplotlib.style.use', 'style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (804, 825), False, 'from matplotlib import style\n'), ((1031, 1066), 'numpy.array', 'np.array', (['[0.5, 0.5, alt_target, 0]'], {}), '([0.5, 0.5, alt_target, 0])\n', (1039, 1066), True, 'import numpy as np\n'), ((1769, 1780), 'time.time', 'time.time', ([], {}), '()\n', (1778, 1780), False, 'import time\n'), ((2080, 2095), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2090, 2095), False, 'import time\n'), ((2215, 2266), 'cflib.crazyflie.log.LogConfig', 'LogConfig', ([], {'name': '"""Kalman Variance"""', 'period_in_ms': '(500)'}), "(name='Kalman Variance', period_in_ms=500)\n", (2224, 2266), False, 'from cflib.crazyflie.log import LogConfig\n'), ((3504, 3519), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3514, 3519), False, 'import time\n'), ((3783, 3795), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3793, 3795), True, 'import matplotlib.pyplot as plt\n'), ((3838, 3852), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(4)'], {}), '(2, 4)\n', (3846, 3852), False, 'from matplotlib.gridspec import GridSpec\n'), ((4040, 4135), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'plot'], {'interval': '(100)', 'fargs': '(ax_x, ax_y, ax_z, ax_3d, position, setpoint)'}), '(fig, plot, interval=100, fargs=(ax_x, ax_y, ax_z, ax_3d,\n position, setpoint))\n', (4053, 4135), False, 'from matplotlib.animation import FuncAnimation\n'), ((4194, 4204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4202, 4204), True, 'import matplotlib.pyplot as plt\n'), ((6347, 6359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6357, 6359), True, 'import matplotlib.pyplot as plt\n'), ((6405, 6414), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6412, 6414), True, 'import matplotlib.pyplot as plt\n'), ((7117, 7127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7125, 7127), True, 'import matplotlib.pyplot as plt\n'), ((7664, 7676), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7674, 7676), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7728), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7726, 7728), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1392), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1387, 1392), False, 'import time\n'), ((1641, 1656), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1651, 1656), False, 'import time\n'), ((1716, 1748), 'numpy.array', 'np.array', (['[0, 0, alt_takeoff, 0]'], {}), '([0, 0, alt_takeoff, 0])\n', (1724, 1748), True, 'import numpy as np\n'), ((1792, 1803), 'time.time', 'time.time', ([], {}), '()\n', (1801, 1803), False, 'import time\n'), ((2057, 2072), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2067, 2072), False, 'import time\n'), ((2567, 2594), 'cflib.crazyflie.syncLogger.SyncLogger', 'SyncLogger', (['scf', 'log_config'], {}), '(scf, log_config)\n', (2577, 2594), False, 'from cflib.crazyflie.syncLogger import SyncLogger\n'), ((5968, 5986), 'numpy.array', 'np.array', (['response'], {}), '(response)\n', (5976, 5986), True, 'import numpy as np\n'), ((6017, 6035), 'numpy.array', 'np.array', (['setpoint'], {}), '(setpoint)\n', (6025, 6035), True, 'import numpy as np\n'), ((8725, 8741), 'cflog.CFLog', 'cflog.CFLog', (['scf'], {}), '(scf)\n', (8736, 8741), False, 'import cflog\n'), ((8809, 8834), 'cftune.PositionTuner', 'cftune.PositionTuner', (['scf'], {}), '(scf)\n', (8829, 8834), False, 'import cftune\n'), ((9057, 9070), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9067, 9070), False, 'import time\n'), ((9089, 9180), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'start_plots', 'args': '(x, y, z, pos_ts, tX, tY, tZ, tar_ts)'}), '(target=start_plots, args=(x, y, z, pos_ts, tX, tY,\n tZ, tar_ts))\n', (9112, 9180), False, 'import multiprocessing\n'), ((4435, 4465), 'numpy.subtract', 'np.subtract', (['pos_ts', 'pos_ts[0]'], {}), '(pos_ts, pos_ts[0])\n', (4446, 4465), True, 'import numpy as np\n'), ((4508, 4538), 'numpy.subtract', 'np.subtract', (['tar_ts', 'tar_ts[0]'], {}), '(tar_ts, tar_ts[0])\n', (4519, 4538), True, 'import numpy as np\n'), ((4817, 4847), 'numpy.subtract', 'np.subtract', (['pos_ts', 'pos_ts[0]'], {}), '(pos_ts, pos_ts[0])\n', (4828, 4847), True, 'import numpy as np\n'), ((4890, 4920), 'numpy.subtract', 'np.subtract', (['tar_ts', 'tar_ts[0]'], {}), '(tar_ts, tar_ts[0])\n', (4901, 4920), True, 'import numpy as np\n'), ((5199, 5229), 'numpy.subtract', 'np.subtract', (['pos_ts', 'pos_ts[0]'], {}), '(pos_ts, pos_ts[0])\n', (5210, 5229), True, 'import numpy as np\n'), ((5272, 5302), 'numpy.subtract', 'np.subtract', (['tar_ts', 'tar_ts[0]'], {}), '(tar_ts, tar_ts[0])\n', (5283, 5302), True, 'import numpy as np\n'), ((6428, 6464), 'numpy.subtract', 'np.subtract', (['timestamp', 'timestamp[0]'], {}), '(timestamp, timestamp[0])\n', (6439, 6464), True, 'import numpy as np\n'), ((6531, 6567), 'numpy.subtract', 'np.subtract', (['timestamp', 'timestamp[0]'], {}), '(timestamp, timestamp[0])\n', (6542, 6567), True, 'import numpy as np\n'), ((7032, 7062), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (7045, 7062), False, 'import time\n'), ((7634, 7648), 'numpy.add', 'np.add', (['m3', 'm4'], {}), '(m3, m4)\n', (7640, 7648), True, 'import numpy as np\n'), ((7742, 7780), 'numpy.subtract', 'np.subtract', (['timestamps', 'timestamps[0]'], {}), '(timestamps, timestamps[0])\n', (7753, 7780), True, 'import numpy as np\n'), ((7843, 7881), 'numpy.subtract', 'np.subtract', (['timestamps', 'timestamps[0]'], {}), '(timestamps, timestamps[0])\n', (7854, 7881), True, 'import numpy as np\n'), ((7944, 7982), 'numpy.subtract', 'np.subtract', (['timestamps', 'timestamps[0]'], {}), '(timestamps, timestamps[0])\n', (7955, 7982), True, 'import numpy as np\n'), ((8045, 8083), 'numpy.subtract', 'np.subtract', (['timestamps', 'timestamps[0]'], {}), '(timestamps, timestamps[0])\n', (8056, 8083), True, 'import numpy as np\n'), ((8146, 8184), 'numpy.subtract', 'np.subtract', (['timestamps', 'timestamps[0]'], {}), '(timestamps, timestamps[0])\n', (8157, 8184), True, 'import numpy as np\n'), ((8438, 8468), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (8451, 8468), False, 'import time\n'), ((8628, 8657), 'cflib.crazyflie.Crazyflie', 'Crazyflie', ([], {'rw_cache': '"""./cache"""'}), "(rw_cache='./cache')\n", (8637, 8657), False, 'from cflib.crazyflie import Crazyflie\n'), ((11480, 11495), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (11490, 11495), False, 'import time\n'), ((7322, 7342), 'numpy.array', 'np.array', (['timestamps'], {}), '(timestamps)\n', (7330, 7342), True, 'import numpy as np\n'), ((7386, 7406), 'numpy.array', 'np.array', (['timestamps'], {}), '(timestamps)\n', (7394, 7406), True, 'import numpy as np\n'), ((10650, 10661), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (10658, 10661), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import healpy
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--weight-paths", nargs="+", required=True)
parser.add_argument("--jk-def", required=True)
parser.add_argument("--output", required=True)
args = parser.parse_args()
jk_def = np.load(args.jk_def)
print("Loading ", args.weight_paths[0])
m = healpy.read_map(args.weight_paths[0], dtype=np.float64)
for p in args.weight_paths[1:]:
print("Loading ", p)
m *= healpy.read_map(p, dtype=np.float64)
jk_resolutions = [k for k in jk_def if k != "nside"]
jk_weights = {}
for jk_res in jk_resolutions:
jk_weights[jk_res] = np.zeros(jk_def[jk_res].shape[0], dtype=np.float64)
for i, idx in enumerate(jk_def[jk_res]):
w = m[idx]
w = w[w != healpy.UNSEEN]
jk_weights[jk_res][i] = np.sum(w)
np.savez(args.output, **jk_weights) | [
"numpy.load",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.zeros",
"healpy.read_map",
"numpy.savez"
] | [((91, 116), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (114, 116), False, 'import argparse\n'), ((333, 353), 'numpy.load', 'np.load', (['args.jk_def'], {}), '(args.jk_def)\n', (340, 353), True, 'import numpy as np\n'), ((407, 462), 'healpy.read_map', 'healpy.read_map', (['args.weight_paths[0]'], {'dtype': 'np.float64'}), '(args.weight_paths[0], dtype=np.float64)\n', (422, 462), False, 'import healpy\n'), ((933, 968), 'numpy.savez', 'np.savez', (['args.output'], {}), '(args.output, **jk_weights)\n', (941, 968), True, 'import numpy as np\n'), ((541, 577), 'healpy.read_map', 'healpy.read_map', (['p'], {'dtype': 'np.float64'}), '(p, dtype=np.float64)\n', (556, 577), False, 'import healpy\n'), ((720, 771), 'numpy.zeros', 'np.zeros', (['jk_def[jk_res].shape[0]'], {'dtype': 'np.float64'}), '(jk_def[jk_res].shape[0], dtype=np.float64)\n', (728, 771), True, 'import numpy as np\n'), ((918, 927), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (924, 927), True, 'import numpy as np\n')] |
# multithread demo
# https://nrsyed.com/2018/07/05/multithreading-with-opencv-python-to-improve-video-processing-performance/
# object-oriented programming + multithread
#
# 1 thread - acquire image from camera
# 1 thread - to disply raw image
# 1 thread - to calculate Laplacian of Guassian image of the raw image
# see demo_mthread.py for a simplified version
import numpy as np
from threading import Thread
import cv2
from datetime import datetime
class CountsPerSec:
"""
Class that tracks the number of occurrences ("counts") of an
arbitrary event and returns the frequency in occurrences
(counts) per second. The caller must increment the count.
"""
def __init__(self):
self._start_time = None
self._num_occurrences = 0
def start(self):
self._start_time = datetime.now()
return self
def increment(self):
self._num_occurrences += 1
def countsPerSec(self):
elapsed_time = (datetime.now() - self._start_time).total_seconds()
return self._num_occurrences / ( elapsed_time + np.finfo(float).eps )
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self.stream.read()
def stop(self):
self.stopped = True
def threadVideoGet(source=0):
"""
Dedicated thread for grabbing video frames with VideoGet object.
Main thread shows video frames.
"""
video_getter = VideoGet(source).start()
cps = CountsPerSec().start()
while True:
if (cv2.waitKey(1) == ord("q")) or video_getter.stopped:
video_getter.stop()
break
frame = video_getter.frame
frame = putIterationsPerSec(frame, cps.countsPerSec())
cv2.imshow("Video", frame)
cps.increment()
class VideoShow:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self, frame=None):
self.frame = frame
self.stopped = False
def start(self):
Thread(target=self.show, args=()).start()
return self
def show(self):
while not self.stopped:
cv2.imshow("Video", self.frame)
if cv2.waitKey(1) == ord("q"):
self.stopped = True
def stop(self):
self.stopped = True
class VideoShow_edge:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self, frame=None):
self.frame = frame
self.stopped = False
def start(self):
Thread(target=self.show, args=()).start()
return self
def show(self):
while not self.stopped:
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
x = cv2.filter2D( self.frame, -1, laplacian )
cv2.imshow("edge", x)
if cv2.waitKey(1) == ord("q"):
self.stopped = True
def stop(self):
self.stopped = True
def threadVideoShow(source=0):
"""
Dedicated thread for showing video frames with VideoShow object.
Main thread grabs video frames.
"""
cap = cv2.VideoCapture(source)
(grabbed, frame) = cap.read()
video_shower = VideoShow(frame).start()
cps = CountsPerSec().start()
while True:
(grabbed, frame) = cap.read()
if not grabbed or video_shower.stopped:
video_shower.stop()
break
frame = putIterationsPerSec(frame, cps.countsPerSec())
video_shower.frame = frame
cps.increment()
import argparse
def putIterationsPerSec(frame, iterations_per_sec):
"""
Add iterations per second text to lower-left corner of a frame.
"""
cv2.putText(frame, "{:.0f} iterations/sec".format(iterations_per_sec),
(10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
return frame
def threadAll(source=0):
"""
Dedicated thread for grabbing video frames with VideoGet object.
Dedicated thread for showing video frames with VideoShow object.
Main thread serves only to pass frames between VideoGet and
VideoShow objects/threads.
"""
video_getter = VideoGet(source).start()
video_shower = VideoShow(video_getter.frame).start()
video_edgerr = VideoShow_edge(video_getter.frame).start() # to show image edge online
cps = CountsPerSec().start()
while True:
if video_getter.stopped or video_shower.stopped or video_edgerr.stopped:
video_shower.stop()
video_getter.stop()
video_edgerr.stop()
break
frame = video_getter.frame
frame = putIterationsPerSec(frame, cps.countsPerSec())
video_shower.frame = frame
video_edgerr.frame = frame
cps.increment()
if __name__ == '__main__':
threadAll(0)
| [
"threading.Thread",
"cv2.filter2D",
"cv2.waitKey",
"cv2.VideoCapture",
"numpy.finfo",
"numpy.array",
"cv2.imshow",
"datetime.datetime.now"
] | [((3835, 3859), 'cv2.VideoCapture', 'cv2.VideoCapture', (['source'], {}), '(source)\n', (3851, 3859), False, 'import cv2\n'), ((858, 872), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (870, 872), False, 'from datetime import datetime\n'), ((1344, 1365), 'cv2.VideoCapture', 'cv2.VideoCapture', (['src'], {}), '(src)\n', (1360, 1365), False, 'import cv2\n'), ((2295, 2321), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (2305, 2321), False, 'import cv2\n'), ((2722, 2753), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'self.frame'], {}), "('Video', self.frame)\n", (2732, 2753), False, 'import cv2\n'), ((3275, 3332), 'numpy.array', 'np.array', (['([0, 1, 0], [1, -4, 1], [0, 1, 0])'], {'dtype': '"""int"""'}), "(([0, 1, 0], [1, -4, 1], [0, 1, 0]), dtype='int')\n", (3283, 3332), True, 'import numpy as np\n'), ((3448, 3487), 'cv2.filter2D', 'cv2.filter2D', (['self.frame', '(-1)', 'laplacian'], {}), '(self.frame, -1, laplacian)\n', (3460, 3487), False, 'import cv2\n'), ((3505, 3526), 'cv2.imshow', 'cv2.imshow', (['"""edge"""', 'x'], {}), "('edge', x)\n", (3515, 3526), False, 'import cv2\n'), ((1490, 1522), 'threading.Thread', 'Thread', ([], {'target': 'self.get', 'args': '()'}), '(target=self.get, args=())\n', (1496, 1522), False, 'from threading import Thread\n'), ((2079, 2093), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2090, 2093), False, 'import cv2\n'), ((2590, 2623), 'threading.Thread', 'Thread', ([], {'target': 'self.show', 'args': '()'}), '(target=self.show, args=())\n', (2596, 2623), False, 'from threading import Thread\n'), ((2770, 2784), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2781, 2784), False, 'import cv2\n'), ((3131, 3164), 'threading.Thread', 'Thread', ([], {'target': 'self.show', 'args': '()'}), '(target=self.show, args=())\n', (3137, 3164), False, 'from threading import Thread\n'), ((3544, 3558), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3555, 3558), False, 'import cv2\n'), ((1014, 1028), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1026, 1028), False, 'from datetime import datetime\n'), ((1122, 1137), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1130, 1137), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
from kerasy.ML.sampling import GibbsMsphereSampler
def test_gibbs_msphere_sampling(target=0.15):
radius = 10
num_samples = 10000
dimension = 6
sampler = GibbsMsphereSampler(dimension=dimension, radius=radius)
sample = sampler.sample(num_samples, verbose=-1)
norm = np.sum(np.square(sample), axis=1)
actual = np.count_nonzero(norm <= (radius/2)**2)
ideal = ((1/2)**dimension) * num_samples
assert np.all(norm <= radius**2)
assert abs(actual/ideal-1) <= target
| [
"kerasy.ML.sampling.GibbsMsphereSampler",
"numpy.square",
"numpy.count_nonzero",
"numpy.all"
] | [((206, 261), 'kerasy.ML.sampling.GibbsMsphereSampler', 'GibbsMsphereSampler', ([], {'dimension': 'dimension', 'radius': 'radius'}), '(dimension=dimension, radius=radius)\n', (225, 261), False, 'from kerasy.ML.sampling import GibbsMsphereSampler\n'), ((374, 417), 'numpy.count_nonzero', 'np.count_nonzero', (['(norm <= (radius / 2) ** 2)'], {}), '(norm <= (radius / 2) ** 2)\n', (390, 417), True, 'import numpy as np\n'), ((471, 498), 'numpy.all', 'np.all', (['(norm <= radius ** 2)'], {}), '(norm <= radius ** 2)\n', (477, 498), True, 'import numpy as np\n'), ((334, 351), 'numpy.square', 'np.square', (['sample'], {}), '(sample)\n', (343, 351), True, 'import numpy as np\n')] |
import torch
import numpy as np
from .event_representation import EventRepresentation
from evrepr.thirdparty.matrixlstm.classification.layers.MatrixLSTM import MatrixLSTM
from evrepr.thirdparty.matrixlstm.classification.layers.SELayer import SELayer
class MatrixLSTMRepresentation(EventRepresentation):
def __init__(self, cfg, *args, **kwargs):
super().__init__(cfg, *args, **kwargs)
self.matrixlstm = MatrixLSTM(
input_shape=tuple(cfg.frame_size),
region_shape=tuple(cfg.region_shape),
region_stride=tuple(cfg.region_stride),
input_size=cfg.input_size,
hidden_size=cfg.hidden_size,
num_layers=cfg.num_layers,
bias=cfg.bias,
lstm_type=cfg.lstm_type,
add_coords_feature=cfg.add_coords_features,
add_time_feature_mode=cfg.add_feature_mode,
normalize_relative=cfg.normalize_relative,
max_events_per_rf=cfg.max_events_per_rf,
maintain_in_shape=cfg.maintain_in_shape,
keep_most_recent=cfg.keep_most_recent,
frame_intervals=cfg.frame_intervals,
frame_intervals_mode=cfg.frame_intervals_mode
)
self.selayer = None
if cfg.add_selayer:
self.selayer = SELayer(self.matrixlstm.out_channels,
reduction=1)
@property
def output_shape(self):
return [self.matrixlstm.out_channels, *self.matrixlstm.output_shape]
def _preprocess_events_dict(self, batched_inputs):
events, events_lens = [], []
for i, inputs in enumerate(batched_inputs):
events.append(inputs['events'])
events_lens.append(inputs['events'].shape[0])
max_length = max(events_lens)
events = [np.pad(ev, ((0, max_length - ln), (0, 0)),
mode='constant', constant_values=0) for
ln, ev in zip(events_lens, events)]
events = torch.as_tensor(np.stack(events, axis=0), device=self.device)
events_lens = torch.as_tensor(events_lens, device=self.device)
return events, events_lens
def _preprocess_events_torch(self, batched_inputs):
return batched_inputs
def forward(self, batched_inputs, batched_states=None):
rois = self.get_active_regions(batched_inputs)
events, lengths = self.preprocess_events(batched_inputs)
if lengths.max() == 0:
return events.new_zeros([events.shape[0], *self.output_shape])
del batched_inputs
coords = events[:, :, 0:2].type(torch.int64)
ts = events[:, :, 2].float().unsqueeze(-1)
embed = events[:, :, 3].float().unsqueeze(-1)
images = self.matrixlstm(input=(embed, coords, ts, lengths))
images = images.permute(0, 3, 1, 2)
if self.selayer:
images = self.selayer(images)
return images, rois, None
| [
"numpy.pad",
"torch.as_tensor",
"evrepr.thirdparty.matrixlstm.classification.layers.SELayer.SELayer",
"numpy.stack"
] | [((2072, 2120), 'torch.as_tensor', 'torch.as_tensor', (['events_lens'], {'device': 'self.device'}), '(events_lens, device=self.device)\n', (2087, 2120), False, 'import torch\n'), ((1298, 1348), 'evrepr.thirdparty.matrixlstm.classification.layers.SELayer.SELayer', 'SELayer', (['self.matrixlstm.out_channels'], {'reduction': '(1)'}), '(self.matrixlstm.out_channels, reduction=1)\n', (1305, 1348), False, 'from evrepr.thirdparty.matrixlstm.classification.layers.SELayer import SELayer\n'), ((1809, 1887), 'numpy.pad', 'np.pad', (['ev', '((0, max_length - ln), (0, 0))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(ev, ((0, max_length - ln), (0, 0)), mode='constant', constant_values=0)\n", (1815, 1887), True, 'import numpy as np\n'), ((2004, 2028), 'numpy.stack', 'np.stack', (['events'], {'axis': '(0)'}), '(events, axis=0)\n', (2012, 2028), True, 'import numpy as np\n')] |
"""
:Authors: - <NAME>
"""
from setuptools import setup, find_packages
try:
import numpy as np
except ImportError:
raise ImportError("First you need to run: pip install numpy")
"""
try:
from Cython.Build import cythonize
except ImportError:
raise ImportError('First you need to run: pip install cython')
ext_modules = cythonize('**/*.pyx',
language='c++',
language_level=3)
"""
ext_modules = []
setup(
name='dgm4nlp',
license='Apache 2.0',
author='<NAME>',
description='VAEs for NLP',
packages=find_packages(),
install_requirements=['tabulate'],
include_dirs=[np.get_include()],
ext_modules=ext_modules,
)
| [
"numpy.get_include",
"setuptools.find_packages"
] | [((585, 600), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (598, 600), False, 'from setuptools import setup, find_packages\n'), ((659, 675), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (673, 675), True, 'import numpy as np\n')] |
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
train = pd.read_csv('dataset.csv') # reading the csv file
train.head() # printing first five rows of the file
print(train.columns)
train_image = []
for i in tqdm(range(train.shape[0])):
img = image.load_img(train['image'][i],target_size=(64,64,3))
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)
print(X.shape)
y = np.array(train.drop(['image'],axis=1))
print(y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.3)
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=(3, 3), activation="relu", input_shape=(64,64,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test), batch_size=16)
model.save("helmet_mask_model.h5")
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"keras.layers.Dropout",
"keras.layers.Flatten",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Conv2D",
"keras.models.Sequential",
"keras.layers.MaxP... | [((376, 402), 'pandas.read_csv', 'pd.read_csv', (['"""dataset.csv"""'], {}), "('dataset.csv')\n", (387, 402), True, 'import pandas as pd\n'), ((714, 735), 'numpy.array', 'np.array', (['train_image'], {}), '(train_image)\n', (722, 735), True, 'import numpy as np\n'), ((871, 925), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(42)', 'test_size': '(0.3)'}), '(X, y, random_state=42, test_size=0.3)\n', (887, 925), False, 'from sklearn.model_selection import train_test_split\n'), ((935, 947), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (945, 947), False, 'from keras.models import Sequential\n'), ((574, 632), 'keras.preprocessing.image.load_img', 'image.load_img', (["train['image'][i]"], {'target_size': '(64, 64, 3)'}), "(train['image'][i], target_size=(64, 64, 3))\n", (588, 632), False, 'from keras.preprocessing import image\n'), ((640, 663), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (658, 663), False, 'from keras.preprocessing import image\n'), ((958, 1044), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(8)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(64, 64, 3)'}), "(filters=8, kernel_size=(3, 3), activation='relu', input_shape=(64, \n 64, 3))\n", (964, 1044), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1049, 1079), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1061, 1079), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1091, 1104), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1098, 1104), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1116, 1173), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=16, kernel_size=(3, 3), activation='relu')\n", (1122, 1173), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1185, 1215), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1197, 1215), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1227, 1240), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1234, 1240), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1252, 1309), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), activation='relu')\n", (1258, 1309), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1321, 1351), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1333, 1351), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1363, 1376), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1370, 1376), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1388, 1445), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), activation='relu')\n", (1394, 1445), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1457, 1487), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1469, 1487), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1499, 1512), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1506, 1512), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1524, 1533), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1531, 1533), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1545, 1574), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1550, 1574), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1586, 1598), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1593, 1598), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1610, 1638), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1615, 1638), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1650, 1662), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1657, 1662), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1674, 1704), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""sigmoid"""'}), "(2, activation='sigmoid')\n", (1679, 1704), False, 'from keras.layers import Dense, Dropout, Flatten\n')] |
import numpy as np
import pandas as pd
import datetime
date = datetime.datetime.now().strftime('%Y%m%d')
from time import perf_counter
start = perf_counter()
path = '/home/users/aslee/CaCO3_NWP/'
from sklearn.model_selection import train_test_split
data_df = pd.read_csv('{}data/spe+bulk_dataset_20201215.csv'.format(path))
# The data from this core don't have CaCO3 measurements
data_df = data_df[data_df.core != 'SO178-12-3']
X = data_df.iloc[:, 1: -5].values
X = X / X.sum(axis = 1, keepdims = True)
# There are 49 zeros in measurements, I simply replace them by 0.01
y = data_df['CaCO3%'].replace(0, 0.01).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, shuffle = True, random_state = 24)
print('Begin: CaCO3')
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import NMF
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(NMF(max_iter = 8000, random_state = 24), SVR())
params = {
'nmf__n_components': range(5, 11),
'svr__C': np.logspace(2, 8, 7),
'svr__gamma': np.logspace(-4, 2, 7)
}
grid = GridSearchCV(pipe, param_grid = params, cv = 10, n_jobs = -1, return_train_score = False)
grid.fit(X_train, np.log(y_train))
print('The best cv score: {:.3f}'.format(grid.best_score_))
print('The best model\'s parameters: {}'.format(grid.best_estimator_))
pd.DataFrame(grid.cv_results_).to_csv('{}results/caco3_grid_nmf+svr_{}.csv'.format(path, date))
from joblib import dump, load
dump(grid.best_estimator_, '{}models/caco3_nmf+svr_model_{}.joblib'.format(path, date))
print("The computation takes {} hours.".format((perf_counter() - start)/3600)) | [
"pandas.DataFrame",
"sklearn.model_selection.GridSearchCV",
"sklearn.decomposition.NMF",
"sklearn.svm.SVR",
"numpy.log",
"sklearn.model_selection.train_test_split",
"numpy.logspace",
"time.perf_counter",
"datetime.datetime.now"
] | [((146, 160), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (158, 160), False, 'from time import perf_counter\n'), ((659, 727), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'shuffle': '(True)', 'random_state': '(24)'}), '(X, y, test_size=0.2, shuffle=True, random_state=24)\n', (675, 727), False, 'from sklearn.model_selection import train_test_split\n'), ((1121, 1207), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipe'], {'param_grid': 'params', 'cv': '(10)', 'n_jobs': '(-1)', 'return_train_score': '(False)'}), '(pipe, param_grid=params, cv=10, n_jobs=-1, return_train_score=\n False)\n', (1133, 1207), False, 'from sklearn.model_selection import GridSearchCV\n'), ((938, 973), 'sklearn.decomposition.NMF', 'NMF', ([], {'max_iter': '(8000)', 'random_state': '(24)'}), '(max_iter=8000, random_state=24)\n', (941, 973), False, 'from sklearn.decomposition import NMF\n'), ((979, 984), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (982, 984), False, 'from sklearn.svm import SVR\n'), ((1050, 1070), 'numpy.logspace', 'np.logspace', (['(2)', '(8)', '(7)'], {}), '(2, 8, 7)\n', (1061, 1070), True, 'import numpy as np\n'), ((1090, 1111), 'numpy.logspace', 'np.logspace', (['(-4)', '(2)', '(7)'], {}), '(-4, 2, 7)\n', (1101, 1111), True, 'import numpy as np\n'), ((1232, 1247), 'numpy.log', 'np.log', (['y_train'], {}), '(y_train)\n', (1238, 1247), True, 'import numpy as np\n'), ((64, 87), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (85, 87), False, 'import datetime\n'), ((1383, 1413), 'pandas.DataFrame', 'pd.DataFrame', (['grid.cv_results_'], {}), '(grid.cv_results_)\n', (1395, 1413), True, 'import pandas as pd\n'), ((1648, 1662), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1660, 1662), False, 'from time import perf_counter\n')] |
from numpy import random
x = random.rand()
print(x)
| [
"numpy.random.rand"
] | [((30, 43), 'numpy.random.rand', 'random.rand', ([], {}), '()\n', (41, 43), False, 'from numpy import random\n')] |
"""Module for analysing station data"""
from ast import Import
import matplotlib.dates as dt
import numpy as np
import datetime
from floodsystem.station import MonitoringStation
def polyfit(dates, levels, p):
"""From a list of dates and levels returns a best fit numpy polynomial of order p,
and the shift of the date axis as a datetime object"""
if len(dates)<2 or len(dates)!=len(levels):
raise ValueError("Input invalid")
lowest_date = dates[-1]
#Use datetime function to deduct the lowest date:
delta_dates =[]
for d in dates:
delta_dates.append(d - lowest_date)
#Then convert to floats, reducing floating point errors:
float_dates =[]
for i in delta_dates:
float_dates.append((dt.date2num(i/datetime.timedelta(microseconds=1))))
#dividing by microsceconds=1 converts timedelta back into datetime
polyline = np.poly1d(np.polyfit(np.array(float_dates),np.array(levels), p))
return polyline, lowest_date
def gradient(dates, levels):
"""Returns the gradient of the line of regression plotted for dates levels (m/day)"""
try: line, date = polyfit(dates,levels, 1)
except ValueError:
raise ValueError("Gradient could not be calculated")
#since gradiant constant, does not matter when we take derivative
grad = line.deriv(1)
return float(grad(1)) | [
"numpy.array",
"datetime.timedelta"
] | [((915, 936), 'numpy.array', 'np.array', (['float_dates'], {}), '(float_dates)\n', (923, 936), True, 'import numpy as np\n'), ((937, 953), 'numpy.array', 'np.array', (['levels'], {}), '(levels)\n', (945, 953), True, 'import numpy as np\n'), ((766, 800), 'datetime.timedelta', 'datetime.timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (784, 800), False, 'import datetime\n')] |
import cv2
import numpy as np
from mmcv.utils import Config
from mmseg.datasets import build_dataset, build_dataloader
def transform(im):
mean = [123.675, 116.28, 103.53]
mean = np.array(mean).reshape((1, 1, 3))
std = [58.395, 57.12, 57.375]
std = np.array(std).reshape((1, 1, 3))
im = im.permute(1, 2, 0).numpy()
im = im * std + mean
im = im.astype(np.uint8)
return im
def main_temporal():
config = '/ghome/zhuangjf/ST-Fusion/configs/_base_/datasets/multi_frame.py'
cfg = Config.fromfile(config)
# cfg = cfg.data.train
# cfg['data_root'] = '/ghome/zhuangjf/ST-Fusion/data/cityscapes/'
# cfg['split'] = '/gdata/zhuangjf/cityscapes/original/list/train_3_frames.txt'
# cfg['img_dir'] = '/gdata/zhuangjf/cityscapes/original/leftImg8bit_sequence/train'
# dataset = build_dataset(cfg)
# data_loader = build_dataloader(dataset, 1, 1, drop_last=True)
# for i, data in enumerate(data_loader):
# clip = data['clip']
# gt = data['gt_semantic_seg'].data[0]
# print('{}/{}'.format(i, len(data_loader)), len(clip), clip[0].data[0].shape, gt.shape)
cfg = cfg.data.val
cfg['data_root'] = '/ghome/zhuangjf/ST-Fusion/data/cityscapes/'
cfg['split'] = '/gdata/zhuangjf/cityscapes/original/list/val_3_frames.txt'
cfg['img_dir'] = '/gdata/zhuangjf/cityscapes/original/leftImg8bit_sequence/val'
dataset = build_dataset(cfg)
data_loader = build_dataloader(dataset, 1, 1, drop_last=True)
for i, data in enumerate(data_loader):
clip = data['clip']
print('{}/{}'.format(i, len(data_loader)), len(clip[0]), clip[0][0].data[0].shape)
if __name__ == '__main__':
main_temporal()
| [
"numpy.array",
"mmseg.datasets.build_dataset",
"mmcv.utils.Config.fromfile",
"mmseg.datasets.build_dataloader"
] | [((520, 543), 'mmcv.utils.Config.fromfile', 'Config.fromfile', (['config'], {}), '(config)\n', (535, 543), False, 'from mmcv.utils import Config\n'), ((1404, 1422), 'mmseg.datasets.build_dataset', 'build_dataset', (['cfg'], {}), '(cfg)\n', (1417, 1422), False, 'from mmseg.datasets import build_dataset, build_dataloader\n'), ((1441, 1488), 'mmseg.datasets.build_dataloader', 'build_dataloader', (['dataset', '(1)', '(1)'], {'drop_last': '(True)'}), '(dataset, 1, 1, drop_last=True)\n', (1457, 1488), False, 'from mmseg.datasets import build_dataset, build_dataloader\n'), ((189, 203), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (197, 203), True, 'import numpy as np\n'), ((267, 280), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (275, 280), True, 'import numpy as np\n')] |
"""
This script contains code for useful data transformations and data checks
used in the other modules of squidward.
"""
import sys
import warnings
import functools
import numpy as np
import scipy.linalg as la
try:
# python 2
numeric_types = (int, float, long, complex)
except:
# python 3
numeric_types = (int, float, complex)
np.seterr(over="raise")
# run np.show_config() to see
# if numpy is running with MKL
# backend
# ---------------------------------------------------------------------------------------------------------------------
# Array Checks
# ---------------------------------------------------------------------------------------------------------------------
def array_equal(alpha, beta):
"""
Function returns true if two arrays are identical.
"""
# if alpha.shape == beta.shape:
# if np.all(np.sort(alpha) == np.sort(beta):
# return True
# return False
return np.array_equal(alpha, beta)
def exactly_1d(arr):
"""
Function to ensure that an array has a most 1 dimension. Used to
formalize output / input dimensions for certain functions.
"""
if not isinstance(arr, np.ndarray):
if isinstance(arr, numeric_types):
return np.array([arr])
else:
raise Exception("Not appropriate input type.")
if len(arr.shape) == 1:
return arr
if len(arr.shape) == 2:
if arr.shape[0] == 1:
return arr[0, :]
if arr.shape[1] == 1:
return arr[:, 0]
raise Exception("Not appropriate input shape.")
def exactly_2d(arr):
"""
Function to ensure that an array has a least 2 dimensions. Used to
formalize output / input dimensions for certain functions.
"""
arr = np.asarray(arr)
if len(arr.shape) == 1:
return arr.reshape(-1, 1)
if len(arr.shape) == 2:
if arr.shape[0] == 1:
return arr.reshape(-1, 1)
return arr
if len(arr.shape) == 3:
if arr.shape[0] == 1:
return arr[0, :, :]
if arr.shape[2] == 1:
return arr[:, :, 0]
raise Exception("Not appropriate input shape.")
if len(arr.shape) > 3:
raise Exception("Not appropriate input shape.")
raise Exception("Not appropriate input shape.")
# ---------------------------------------------------------------------------------------------------------------------
# Inversions
# ---------------------------------------------------------------------------------------------------------------------
def is_invertible(arr, strength='condition'):
"""
Function to return True is matrix is safely invertible and
False is the matrix is not safely invertable.
"""
if strength == 'cramer':
return np.linalg.det(arr) == 0.0
if strength == 'rank':
return arr.shape[0] == arr.shape[1] and np.linalg.matrix_rank(arr) == arr.shape[0]
return 1.0 / np.linalg.cond(arr) >= sys.float_info.epsilon
def check_valid_cov(cov, safe=True):
"""
Function to do safety checks on covariance matrices.
"""
if not safe:
return None
if not is_invertible(cov):
warnings.warn('Cov has high condition. Inverting matrix may result in errors.')
var = np.diag(cov)
if var[var < 0].shape[0] != 0:
raise Exception('Negative values in diagonal of covariance matrix.\nLikely cause is kernel inversion instability.\nCheck kernel variance.')
class Invert(object):
"""Invert matrices."""
def __init__(self, method='inv'):
"""
Description
----------
Class to handle inverting matrices.
Parameters
----------
method: String
The name of the method to be used for inverting matrices.
Options: inv, pinv, solve, cholesky, svd, lu, mp_lu
"""
if method == 'inv':
self.inv = np.linalg.inv
elif method == 'pinv':
self.inv = np.linalg.pinv
elif method == 'solve':
self.inv = self.solve
elif method == 'cholesky':
self.inv = self.cholesky
elif method == 'svd':
self.inv = self.svd
elif method == 'lu':
self.inv = self.lu
elif method == 'mp_lu':
self.inv = self.mp_lu
else:
raise Exception('Invalid inversion method argument.')
def __call__(self, arr):
"""
Inverts matrix.
"""
if not is_invertible(arr):
warnings.warn('Matrix has high condition. Inverting matrix may result in errors.')
return self.inv(arr)
def solve(self, arr):
"""
Use cramer emthod for finding matrix inversion.
"""
identity = np.identity(arr.shape[-1], dtype=arr.dtype)
return np.linalg.solve(arr, identity)
def cholesky(self, arr):
"""
Use cholesky decomposition for finding matrix inversion.
"""
inv_cholesky = np.linalg.inv(np.linalg.cholesky(arr))
return np.dot(inv_cholesky.T, inv_cholesky)
def svd(self, arr):
"""
Use singular value decomposition for finidng matrix inversion.
"""
unitary_u, singular_values, unitary_v = np.linalg.svd(arr)
return np.dot(unitary_v.T, np.dot(np.diag(singular_values**-1), unitary_u.T))
def lu(self, arr):
"""
Use lower upper decomposition for finding amtrix inversion.
"""
permutation, lower, upper = la.lu(arr)
inv_u = np.linalg.inv(upper)
inv_l = np.linalg.inv(lower)
inv_p = np.linalg.inv(permutation)
return inv_u.dot(inv_l).dot(inv_p)
# ---------------------------------------------------------------------------------------------------------------------
# Pre-processing
# ---------------------------------------------------------------------------------------------------------------------
def onehot(arr, num_classes=None, safe=True):
"""
Function to take in a 1D label array and returns the one hot encoded
transformation.
"""
arr = exactly_1d(arr)
if num_classes is None:
num_classes = np.unique(arr).shape[0]
if safe:
if num_classes != np.unique(arr).shape[0]:
raise Exception('Number of unique values does not match num_classes argument.')
return np.squeeze(np.eye(num_classes)[arr.reshape(-1)])
def reversehot(arr):
"""
Function to reverse the one hot transformation.
"""
if len(arr.shape) > 1:
if len(arr.shape) == 2:
if arr.shape[0] == 1:
return arr[0, :]
if arr.shape[1] == 1:
return arr[:, 0]
return arr.argmax(axis=1)
return arr
# ---------------------------------------------------------------------------------------------------------------------
# Classification Specific
# ---------------------------------------------------------------------------------------------------------------------
def sigmoid(z):
"""
Function to return the sigmoid transformation for every
term in an array.
"""
# TODO: find a better way to get this working
np.seterr(over="warn")
sig = 1.0 / (1.0 + np.exp(-z))
sig = np.minimum(sig, 1.0) # Set upper bound
sig = np.maximum(sig, 0.0) # Set lower bound
np.seterr(over="raise")
return sig
def softmax(z):
"""
Function to return the softmax transformation over an
input vector.
"""
z = sigmoid(z)
return z / z.sum(axis=1).reshape(-1, 1)
# ---------------------------------------------------------------------------------------------------------------------
# Miscellaneous
# ---------------------------------------------------------------------------------------------------------------------
def deprecated(func):
"""
A decorator used to mark functions that are deprecated with a warning.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
# https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
# may not want to turn filter on and off
warnings.simplefilter('always', DeprecationWarning) # turn off filter
primary_message = "Call to deprecated function {}.".format(func.__name__)
warnings.warn(primary_message, category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return wrapper
| [
"numpy.maximum",
"numpy.linalg.cond",
"numpy.linalg.svd",
"numpy.exp",
"numpy.diag",
"numpy.linalg.solve",
"numpy.unique",
"warnings.simplefilter",
"numpy.eye",
"numpy.identity",
"numpy.linalg.matrix_rank",
"numpy.linalg.det",
"numpy.linalg.cholesky",
"numpy.minimum",
"numpy.asarray",
... | [((347, 370), 'numpy.seterr', 'np.seterr', ([], {'over': '"""raise"""'}), "(over='raise')\n", (356, 370), True, 'import numpy as np\n'), ((947, 974), 'numpy.array_equal', 'np.array_equal', (['alpha', 'beta'], {}), '(alpha, beta)\n', (961, 974), True, 'import numpy as np\n'), ((1765, 1780), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (1775, 1780), True, 'import numpy as np\n'), ((3261, 3273), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (3268, 3273), True, 'import numpy as np\n'), ((7171, 7193), 'numpy.seterr', 'np.seterr', ([], {'over': '"""warn"""'}), "(over='warn')\n", (7180, 7193), True, 'import numpy as np\n'), ((7239, 7259), 'numpy.minimum', 'np.minimum', (['sig', '(1.0)'], {}), '(sig, 1.0)\n', (7249, 7259), True, 'import numpy as np\n'), ((7289, 7309), 'numpy.maximum', 'np.maximum', (['sig', '(0.0)'], {}), '(sig, 0.0)\n', (7299, 7309), True, 'import numpy as np\n'), ((7333, 7356), 'numpy.seterr', 'np.seterr', ([], {'over': '"""raise"""'}), "(over='raise')\n", (7342, 7356), True, 'import numpy as np\n'), ((7922, 7943), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (7937, 7943), False, 'import functools\n'), ((3171, 3250), 'warnings.warn', 'warnings.warn', (['"""Cov has high condition. Inverting matrix may result in errors."""'], {}), "('Cov has high condition. Inverting matrix may result in errors.')\n", (3184, 3250), False, 'import warnings\n'), ((4752, 4795), 'numpy.identity', 'np.identity', (['arr.shape[-1]'], {'dtype': 'arr.dtype'}), '(arr.shape[-1], dtype=arr.dtype)\n', (4763, 4795), True, 'import numpy as np\n'), ((4811, 4841), 'numpy.linalg.solve', 'np.linalg.solve', (['arr', 'identity'], {}), '(arr, identity)\n', (4826, 4841), True, 'import numpy as np\n'), ((5038, 5074), 'numpy.dot', 'np.dot', (['inv_cholesky.T', 'inv_cholesky'], {}), '(inv_cholesky.T, inv_cholesky)\n', (5044, 5074), True, 'import numpy as np\n'), ((5243, 5261), 'numpy.linalg.svd', 'np.linalg.svd', (['arr'], {}), '(arr)\n', (5256, 5261), True, 'import numpy as np\n'), ((5500, 5510), 'scipy.linalg.lu', 'la.lu', (['arr'], {}), '(arr)\n', (5505, 5510), True, 'import scipy.linalg as la\n'), ((5527, 5547), 'numpy.linalg.inv', 'np.linalg.inv', (['upper'], {}), '(upper)\n', (5540, 5547), True, 'import numpy as np\n'), ((5564, 5584), 'numpy.linalg.inv', 'np.linalg.inv', (['lower'], {}), '(lower)\n', (5577, 5584), True, 'import numpy as np\n'), ((5601, 5627), 'numpy.linalg.inv', 'np.linalg.inv', (['permutation'], {}), '(permutation)\n', (5614, 5627), True, 'import numpy as np\n'), ((8151, 8202), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'DeprecationWarning'], {}), "('always', DeprecationWarning)\n", (8172, 8202), False, 'import warnings\n'), ((8312, 8385), 'warnings.warn', 'warnings.warn', (['primary_message'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), '(primary_message, category=DeprecationWarning, stacklevel=2)\n', (8325, 8385), False, 'import warnings\n'), ((8394, 8446), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""', 'DeprecationWarning'], {}), "('default', DeprecationWarning)\n", (8415, 8446), False, 'import warnings\n'), ((1248, 1263), 'numpy.array', 'np.array', (['[arr]'], {}), '([arr])\n', (1256, 1263), True, 'import numpy as np\n'), ((2776, 2794), 'numpy.linalg.det', 'np.linalg.det', (['arr'], {}), '(arr)\n', (2789, 2794), True, 'import numpy as np\n'), ((2937, 2956), 'numpy.linalg.cond', 'np.linalg.cond', (['arr'], {}), '(arr)\n', (2951, 2956), True, 'import numpy as np\n'), ((4514, 4601), 'warnings.warn', 'warnings.warn', (['"""Matrix has high condition. Inverting matrix may result in errors."""'], {}), "(\n 'Matrix has high condition. Inverting matrix may result in errors.')\n", (4527, 4601), False, 'import warnings\n'), ((4998, 5021), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['arr'], {}), '(arr)\n', (5016, 5021), True, 'import numpy as np\n'), ((6364, 6383), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (6370, 6383), True, 'import numpy as np\n'), ((7217, 7227), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (7223, 7227), True, 'import numpy as np\n'), ((2877, 2903), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['arr'], {}), '(arr)\n', (2898, 2903), True, 'import numpy as np\n'), ((5304, 5334), 'numpy.diag', 'np.diag', (['(singular_values ** -1)'], {}), '(singular_values ** -1)\n', (5311, 5334), True, 'import numpy as np\n'), ((6162, 6176), 'numpy.unique', 'np.unique', (['arr'], {}), '(arr)\n', (6171, 6176), True, 'import numpy as np\n'), ((6225, 6239), 'numpy.unique', 'np.unique', (['arr'], {}), '(arr)\n', (6234, 6239), True, 'import numpy as np\n')] |
###################### INFORMATION #############################
# akf-mocrin is a "Multiple OCR Interface"
# Program: **akf-mocrin**
# Info: **Python 3.6**
# Author: **<NAME>**
# Date: **12.01.2018**
########## IMPORT ##########
import configparser
import numpy as np
import argparse
import subprocess
from multiprocessing import Process
import json
import shlex
import datetime
import os
import sys
from mocrinlib.abbyyrunner import start_abbyy
from mocrinlib.tessapi import tess_pprocess
from mocrinlib.common import create_dir, get_iopath, get_filenames
from mocrinlib.imgproc import safe_imread, get_binary
########## CMD-PARSER-SETTINGS ##########
def get_args(argv):
"""
This function parses the command line-options
:param:no params
:return:the parsed cl-options
"""
if argv:
sys.argv.extend(argv)
argparser = argparse.ArgumentParser()
argparser.add_argument("--info", type=str, default="datetime", help="Text that will be tagged to the outputdirectory. Default prints datetime (year-month-day_hour%minutes).")
argparser.add_argument("--fpathin", type=str, default="", help="Set Input Filenname/Path without config.ini")
argparser.add_argument("--fpathout", type=str, default="", help="Set Output Filenname/Path without config.ini")
argparser.add_argument("-c", "--cut", action='store_true', help="Cut certain areas of the image (see tess_profile['Cutter'].")
argparser.add_argument("-f", "--fileformat", default="jpg",help="Fileformat of the images")
argparser.add_argument("-b", "--binary", action='store_true', help="Binarize the image")
argparser.add_argument("--no-tess", action='store_true', help="Don't perfom tessract.")
argparser.add_argument("--no-ocropy", action='store_true', help="Don't perfom ocropy.")
argparser.add_argument("-r","--rewrite-ocropy", action='store_true', help="Don't bin and seg ocropy.")
argparser.add_argument("-n", "--new-ocropy", action='store_false', help="Use the new bin files ocropy.")
argparser.add_argument("--no-abbyy", action='store_true', help="Don't perfom abbyy.")
argparser.add_argument("--tess-profile", default='test', choices=["default","test",""], help="Don't perfom tessract. If the value is an empty string take name from config.ini")
argparser.add_argument("--ocropy-profile", default='test', choices=["default","test",""], help="Don't perfom ocropy. If the value is an empty string take name from config.ini")
argparser.add_argument("--filter", type=str, default="sauvola",choices=["sauvola","niblack","otsu","yen","triangle","isodata","minimum","li","mean"],help='Chose your favorite threshold filter: %(choices)s')
argparser.add_argument("--threshwindow", type=int, default=31, help='Size of the window (binarization): %(default)s')
argparser.add_argument("--threshweight", type=float, default=0.2, choices=np.arange(0, 1.0),help='Weight the effect of the standard deviation (binarization): %(default)s')
argparser.add_argument("--threshbin", type=int, default=256,
help='Number of bins used to calculate histogram. This value is ignored for integer arrays.')
argparser.add_argument("--threshhitter", type=int, default=10000,
help='Maximum number of iterations to smooth the histogram.')
args = argparser.parse_args()
return args
########## JSON_DefaultRemover ##########
class DefaultRemover(json.JSONDecoder):
"""
Overloads a class for the json decoder
Removes all Null/None and all parameters if value == default
"""
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
delarr = []
for key, value in obj.items():
if value == None:
delarr.append(key)
if key == 'value':
if "default" in obj:
if obj["default"] == obj[key]:
del obj
return
else:
stop =1
if len(delarr) == len(obj):
del obj
return
for delitem in delarr:
del obj[delitem]
return obj
########## FUNCTIONS ##########
def cut_check(args,tess_profile:dict)->int:
"""
Checks requirements for cutting
:param args: see 'get_args'
:param tess_profile: see 'get_profile'
:return:
"""
try:
if '--cut' in list(tess_profile["parameters"].keys()) and tess_profile["parameters"]["--cut"]["value"]:
args.cut = True
if args.cut:
args.no_ocropy = True
else:
del tess_profile["cutter"]
except:
args.cut = False
return 0
def get_profiles(args,config):
"""
This function loads the json-profiles for tesseract and ocropy,
which contains user-specific parameters and options.
:param args:
:param config:
:return: json-profiles (dictionaries) for tesseract and ocropy
"""
tess_profile, ocropy_profile = {}, {}
if not args.no_tess:
profilename = config['DEFAULT']['TESSPROFILENAME']
if args.tess_profile != "": profilename = args.tess_profile
tess_profile_path = config['DEFAULT']['TESSPROFILEPATH'] + profilename + "_tess_profile.json"
with open(tess_profile_path,"r") as file:
tess_profile = json.load(file, cls=DefaultRemover)
cut_check(args,tess_profile)
if tess_profile == None:
tess_profile = ""
if not args.no_ocropy:
profilename = config['DEFAULT']['OCROPYPROFILENAME']
if args.ocropy_profile != "": profilename = args.ocropy_profile
ocropy_profile_path = config['DEFAULT']['OCROPYPROFILEPATH']+profilename+"_ocropy_profile.json"
with open(ocropy_profile_path,"r") as file:
ocropy_profile = json.load(file,cls=DefaultRemover)
if ocropy_profile == None:
ocropy_profile = ""
return (tess_profile,ocropy_profile)
def update_args(args,config):
"""
Updates arguments if given by terminal call
:param args:
:param config:
:return:
"""
cli_args_profile_path = config['DEFAULT']['CLI_ARGSPATH']+config['DEFAULT']['CLI_ARGSNAME']+"_argparse_profile.json"
with open(cli_args_profile_path, "r") as file:
args_profile = json.load(file, cls=DefaultRemover)
for key, value in args_profile.items():
if key in sys.argv: continue
if "alias" in value and value["alias"] in sys.argv: continue
key = key.lstrip("-").replace("-","_")
vars(args)[key] = value["value"]
return 0
def store_settings(path_out:str,profile:dict,args,ocrname:str)->int:
"""
Saves the used settings in the folder of the output file
:param path_out:
:param profile:
:param args:
:param ocrname:
:return:
"""
with open(path_out+ocrname+"_"+args.infotxt+"settings.txt","w") as settings:
justnow = datetime.datetime.now()
settings.write("-" * 200 + "\n")
settings.write(ocrname+"-Settings for the run "+'"'+args.info+'"'+"\n"+"Timestamp:"+justnow.ctime()+"\n")
settings.write("-" * 200 + "\n")
settings.write("Arguments:\n")
json.dump(vars(args), settings, sort_keys=True, indent=4)
settings.write("\n"+"-" * 200 + "\n")
settings.write("Profile:\n")
json.dump(profile,settings, sort_keys=True, indent=4)
return 0
########## TESSERACT FUNCTIONS ##########
def start_tess(file:str,path_out:str, tess_profile:dict,args)->int:
"""
Start tesseract over "tesserocr" a cli-module
:param file: fileinputpath
:param fop: fileoutputpath
:param profile: contains user-specific parameters/option for tesseract
:return:
"""
create_dir(path_out)
if args.idx == 0:
store_settings(path_out,tess_profile,args, "Tesseract")
path_out+= args.infotxt
file_out = path_out + file.split('/')[-1]
tess_pprocess(file, file_out, args.cut, tess_profile)
print("Finished tesseract for: "+file.split('/')[-1])
return 0
########## OCROPY FUNCTIONS ##########
def start_ocropy(file:str,path_out:str, ocropy_profile:dict,args)->int:
"""
Start tesseract over a cli
:param file: fileinputpath
:param fop: fileoutputpath
:param profile: contains user-specific parameters/option for ocropy
:return:
"""
fname = file.split('/')[-1].split('.')[0]
create_dir(path_out)
if args.idx == 0:
store_settings(path_out,ocropy_profile,args, "Ocropy")
# gets all user-specific parameters from the ocropy-profile
parameters = get_ocropy_param(ocropy_profile)
opath = path_out + args.infotxt + fname
if not os.path.isdir(opath) or args.rewrite_ocropy:
subprocess.Popen(args=["ocropus-nlbin",file,"-o"+opath+"/"]+parameters["ocropus-nlbin"]).wait()
subprocess.Popen(args=["ocropus-gpageseg",opath+"/????.bin.png","-n","--maxlines","2000"]+parameters["ocropus-gpageseg"]).wait()
if os.path.isfile(opath+"/0001.nrm.png"): os.remove(opath+"/0001.nrm.png")
fext = ""
if not args.new_ocropy: fext = ".new"
subprocess.Popen(args=["ocropus-rpred",opath+"/????/??????"+fext+".bin.png"]+parameters["ocropus-rpred"]).wait()
subprocess.Popen(args=["ocropus-hocr",opath+"/????.bin.png","-o"+path_out+"/"+file.split('/')[-1]+".hocr"]+parameters["ocropus-hocr"]).wait()
print("Finished ocropy for: " + file.split('/')[-1])
return 0
def get_ocropy_param(ocropy_profile:dict)->dict:
"""
Get all user-specific parameters from the ocropy-profile,
but only for "ocropus-nlbin","ocropus-gpageseg","ocropus-rpred","ocropus-hocr".
:param ocropy_profile:
:return: dict with parameters and values which are different to the default values
"""
parameters = {}
# only search for specific func parameters
for funcall in ["ocropus-nlbin","ocropus-gpageseg","ocropus-rpred","ocropus-hocr"]:
if funcall in ocropy_profile['parameters']:
parameters[funcall] = ""
for param in ocropy_profile['parameters'][funcall]:
# ignore 'files' param
if param == "files":
continue
# Go one level deeper if necessary
if "-" not in param:
for next_param in ocropy_profile['parameters'][funcall][param]:
if next_param == "files":
continue
if ocropy_profile['parameters'][funcall][param][next_param]['value'] != "" and \
ocropy_profile['parameters'][funcall][param][next_param]['value'] != False:
if "action" in ocropy_profile['parameters'][funcall][param][next_param]:
parameters[funcall] += next_param + " "
else:
parameters[funcall] += next_param + " " + ocropy_profile['parameters'][funcall][param][next_param][
'value'] + " "
else:
if ocropy_profile['parameters'][funcall][param]['value'] != "" and ocropy_profile['parameters'][funcall][param]['value'] != False:
if "action" in ocropy_profile['parameters'][funcall][param]:
parameters[funcall] += param + " "
else:
parameters[funcall] += param+" "+ocropy_profile['parameters'][funcall][param]['value']+" "
parameters[funcall].strip()
parameters[funcall] = shlex.split(parameters[funcall])
return parameters
########### MAIN FUNCTION ##########
def start_mocrin(*argv)->int:
"""
The filespath are stored in the config.ini file.
And can be changed there.
:param *argv: argument vector with arguments parsed by function call not commandline
"""
config = configparser.ConfigParser()
config.sections()
config.read('config.ini')
args = get_args(argv)
update_args(args,config)
args.infofolder = ""
args.infotxt = ""
#args.no_abbyy = False
if args.info != "":
args.infotxt = ""
if args.info == "datetime":
args.infofolder = datetime.datetime.now().strftime("%Y-%m-%d_T%HH%MM")+"/"
else:
args.infofolder = args.info+"/"
args.infotxt = args.info+"_"
PATHINPUT, PATHOUTPUT = get_iopath(args.fpathin,args.fpathout,config)
files = get_filenames(args.fileformat,PATHINPUT)
tess_profile, ocropy_profile = get_profiles(args, config)
for idx,file in enumerate(files):
#if "1969" not in file: continue
args.idx = idx
path_out = PATHOUTPUT+os.path.dirname(file).replace(PATHINPUT[:-1],"")+"/"
# Safe image read function
image = safe_imread(file)
# Produce a binary image, could improve the results of the ocr?
if args.binary:
file = get_binary(args, image, file,path_out+'bin/')
# Start the ocr-processes ("p") asynchronously
procs = []
if not args.no_abbyy:
print("Call abbyy! All other ocr engines got deactivated!")
start_abbyy(file, path_out + "abbyy/"+args.infofolder)
args.no_tess = True
args.no_ocropy= True
if not args.no_tess:
p1 = Process(target=start_tess, args=[file, path_out + "tess/"+args.infofolder, tess_profile,args])
print("Call tesseract!")
p1.start()
procs.append(p1)
if not args.no_ocropy:
p2 = Process(target=start_ocropy, args=[file, path_out + "ocropy/"+args.infofolder, ocropy_profile,args])
print("Call ocropy!")
p2.start()
procs.append(p2)
# Wait till all ocr-processes are finished
for p in procs:
p.join()
print("Next image!")
print("All images were processed!")
return 0
########## START ##########
if __name__ == "__main__":
"""
Entrypoint: Searches for the files and parse them into the mainfunction (can be multiprocessed)
"""
start_mocrin()
| [
"os.remove",
"mocrinlib.tessapi.tess_pprocess",
"argparse.ArgumentParser",
"mocrinlib.imgproc.safe_imread",
"mocrinlib.common.create_dir",
"os.path.isfile",
"numpy.arange",
"mocrinlib.common.get_filenames",
"mocrinlib.common.get_iopath",
"mocrinlib.abbyyrunner.start_abbyy",
"os.path.dirname",
... | [((878, 903), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (901, 903), False, 'import argparse\n'), ((7876, 7896), 'mocrinlib.common.create_dir', 'create_dir', (['path_out'], {}), '(path_out)\n', (7886, 7896), False, 'from mocrinlib.common import create_dir, get_iopath, get_filenames\n'), ((8061, 8114), 'mocrinlib.tessapi.tess_pprocess', 'tess_pprocess', (['file', 'file_out', 'args.cut', 'tess_profile'], {}), '(file, file_out, args.cut, tess_profile)\n', (8074, 8114), False, 'from mocrinlib.tessapi import tess_pprocess\n'), ((8542, 8562), 'mocrinlib.common.create_dir', 'create_dir', (['path_out'], {}), '(path_out)\n', (8552, 8562), False, 'from mocrinlib.common import create_dir, get_iopath, get_filenames\n'), ((12045, 12072), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (12070, 12072), False, 'import configparser\n'), ((12554, 12601), 'mocrinlib.common.get_iopath', 'get_iopath', (['args.fpathin', 'args.fpathout', 'config'], {}), '(args.fpathin, args.fpathout, config)\n', (12564, 12601), False, 'from mocrinlib.common import create_dir, get_iopath, get_filenames\n'), ((12612, 12653), 'mocrinlib.common.get_filenames', 'get_filenames', (['args.fileformat', 'PATHINPUT'], {}), '(args.fileformat, PATHINPUT)\n', (12625, 12653), False, 'from mocrinlib.common import create_dir, get_iopath, get_filenames\n'), ((839, 860), 'sys.argv.extend', 'sys.argv.extend', (['argv'], {}), '(argv)\n', (854, 860), False, 'import sys\n'), ((3648, 3726), 'json.JSONDecoder.__init__', 'json.JSONDecoder.__init__', (['self', '*args'], {'object_hook': 'self.object_hook'}), '(self, *args, object_hook=self.object_hook, **kwargs)\n', (3673, 3726), False, 'import json\n'), ((6418, 6453), 'json.load', 'json.load', (['file'], {'cls': 'DefaultRemover'}), '(file, cls=DefaultRemover)\n', (6427, 6453), False, 'import json\n'), ((7062, 7085), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7083, 7085), False, 'import datetime\n'), ((7478, 7532), 'json.dump', 'json.dump', (['profile', 'settings'], {'sort_keys': '(True)', 'indent': '(4)'}), '(profile, settings, sort_keys=True, indent=4)\n', (7487, 7532), False, 'import json\n'), ((9114, 9153), 'os.path.isfile', 'os.path.isfile', (["(opath + '/0001.nrm.png')"], {}), "(opath + '/0001.nrm.png')\n", (9128, 9153), False, 'import os\n'), ((12953, 12970), 'mocrinlib.imgproc.safe_imread', 'safe_imread', (['file'], {}), '(file)\n', (12964, 12970), False, 'from mocrinlib.imgproc import safe_imread, get_binary\n'), ((2898, 2915), 'numpy.arange', 'np.arange', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (2907, 2915), True, 'import numpy as np\n'), ((5451, 5486), 'json.load', 'json.load', (['file'], {'cls': 'DefaultRemover'}), '(file, cls=DefaultRemover)\n', (5460, 5486), False, 'import json\n'), ((5936, 5971), 'json.load', 'json.load', (['file'], {'cls': 'DefaultRemover'}), '(file, cls=DefaultRemover)\n', (5945, 5971), False, 'import json\n'), ((8817, 8837), 'os.path.isdir', 'os.path.isdir', (['opath'], {}), '(opath)\n', (8830, 8837), False, 'import os\n'), ((9153, 9187), 'os.remove', 'os.remove', (["(opath + '/0001.nrm.png')"], {}), "(opath + '/0001.nrm.png')\n", (9162, 9187), False, 'import os\n'), ((9246, 9364), 'subprocess.Popen', 'subprocess.Popen', ([], {'args': "(['ocropus-rpred', opath + '/????/??????' + fext + '.bin.png'] + parameters\n ['ocropus-rpred'])"}), "(args=['ocropus-rpred', opath + '/????/??????' + fext +\n '.bin.png'] + parameters['ocropus-rpred'])\n", (9262, 9364), False, 'import subprocess\n'), ((11720, 11752), 'shlex.split', 'shlex.split', (['parameters[funcall]'], {}), '(parameters[funcall])\n', (11731, 11752), False, 'import shlex\n'), ((13087, 13135), 'mocrinlib.imgproc.get_binary', 'get_binary', (['args', 'image', 'file', "(path_out + 'bin/')"], {}), "(args, image, file, path_out + 'bin/')\n", (13097, 13135), False, 'from mocrinlib.imgproc import safe_imread, get_binary\n'), ((13322, 13378), 'mocrinlib.abbyyrunner.start_abbyy', 'start_abbyy', (['file', "(path_out + 'abbyy/' + args.infofolder)"], {}), "(file, path_out + 'abbyy/' + args.infofolder)\n", (13333, 13378), False, 'from mocrinlib.abbyyrunner import start_abbyy\n'), ((13488, 13589), 'multiprocessing.Process', 'Process', ([], {'target': 'start_tess', 'args': "[file, path_out + 'tess/' + args.infofolder, tess_profile, args]"}), "(target=start_tess, args=[file, path_out + 'tess/' + args.infofolder,\n tess_profile, args])\n", (13495, 13589), False, 'from multiprocessing import Process\n'), ((13720, 13828), 'multiprocessing.Process', 'Process', ([], {'target': 'start_ocropy', 'args': "[file, path_out + 'ocropy/' + args.infofolder, ocropy_profile, args]"}), "(target=start_ocropy, args=[file, path_out + 'ocropy/' + args.\n infofolder, ocropy_profile, args])\n", (13727, 13828), False, 'from multiprocessing import Process\n'), ((8870, 8970), 'subprocess.Popen', 'subprocess.Popen', ([], {'args': "(['ocropus-nlbin', file, '-o' + opath + '/'] + parameters['ocropus-nlbin'])"}), "(args=['ocropus-nlbin', file, '-o' + opath + '/'] +\n parameters['ocropus-nlbin'])\n", (8886, 8970), False, 'import subprocess\n'), ((8974, 9107), 'subprocess.Popen', 'subprocess.Popen', ([], {'args': "(['ocropus-gpageseg', opath + '/????.bin.png', '-n', '--maxlines', '2000'] +\n parameters['ocropus-gpageseg'])"}), "(args=['ocropus-gpageseg', opath + '/????.bin.png', '-n',\n '--maxlines', '2000'] + parameters['ocropus-gpageseg'])\n", (8990, 9107), False, 'import subprocess\n'), ((12370, 12393), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12391, 12393), False, 'import datetime\n'), ((12848, 12869), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (12863, 12869), False, 'import os\n')] |
# This script opens a 2206B driver device, sets up both channels and a trigger then collects a block of data.
# This data is then plotted as mV against time in us.
import ctypes
import numpy as np
from picosdk.ps2000a import ps2000a as ps
import matplotlib.pyplot as plt
from picosdk.functions import adc2mV, assert_pico_ok
import csv
from datetime import datetime
from itertools import zip_longest
import os.path
from math import log
import time
# Specify sampling frequency
SAMPLING_FREQUENCY = 500e3 # Hz
if SAMPLING_FREQUENCY >= 125e6:
timebase = round(log(500e6/SAMPLING_FREQUENCY,2))
print('Sampling frequency: {:,}'.format(1/(2**timebase/5)*1e8) + ' Hz')
else:
timebase=round(62.5e6/SAMPLING_FREQUENCY+2)
print('Sampling frequency: {:,}'.format(62.5e6/(timebase-2)) + ' Hz')
# Specify acquisition time
ACQUISITION_TIME = 1 # s
samplingInterval = 1/SAMPLING_FREQUENCY
totalSamples = round(ACQUISITION_TIME/samplingInterval)
print('Number of total samples (for each channel): {:,}'.format(totalSamples))
# Buffer memory size: 32 M
# Create chandle and status ready for use.
# The c_int16 constructor accepts an optional integer initializer. Default: 0.
chandle = ctypes.c_int16()
status = {}
# Open 2000 series PicoScope
print('Setting up PiscoScope 2206B unit...')
# Returns handle to chandle for use in future API functions
# First argument: number that uniquely identifies the scope (address of chandle)
# Second argument: first scope found (None)
status["openunit"] = ps.ps2000aOpenUnit(ctypes.byref(chandle), None)
# If any error, the following line will raise one.
assert_pico_ok(status["openunit"])
# Set up channel A
# handle = chandle
# channel = PS2000A_CHANNEL_A = 0
# enabled = 1
# coupling type = PS2000A_DC = 1
# range = PS2000A_2V = 7
# analogue offset = -2 V = -2
chARange = 7
status["setChA"] = ps.ps2000aSetChannel(chandle, 0, 1, 1, chARange, 0)
assert_pico_ok(status["setChA"])
# Set up channel B
# handle = chandle
# channel = PS2000A_CHANNEL_B = 1
# enabled = 1
# coupling type = PS2000A_DC = 1
# range = PS2000A_2V = 7
# analogue offset = 0 V
chBRange = 7
status["setChB"] = ps.ps2000aSetChannel(chandle, 1, 1, 1, chBRange, 0)
assert_pico_ok(status["setChB"])
# Set up single trigger
# handle = chandle
# enabled = 1
# source = PS2000A_CHANNEL_A = 0
# threshold = 1024 ADC counts
# direction = PS2000A_RISING = 2
# delay = 0 sample periods
# auto Trigger = 1000 ms (if no trigger events occurs)
status["trigger"] = ps.ps2000aSetSimpleTrigger(chandle, 1, 0, 1024, 2, 0, 1000)
assert_pico_ok(status["trigger"])
# Set number of pre and post trigger samples to be collected
preTriggerSamples = round(totalSamples/2)
postTriggerSamples = totalSamples-preTriggerSamples
# Get timebase information
# handle = chandle
# timebase: obtained by samplingFrequency (sample interval formula: (timebase-2)*16 ns [for timebase>=3])
# noSamples = totalSamples
# pointer to timeIntervalNanoseconds = ctypes.byref(timeIntervalNs)
# oersample: not used, just initialized
# pointer to totalSamples = ctypes.byref(returnedMaxSamples)
# segment index = 0 (index of the memory segment to use, only 1 segment by default)
timeIntervalns = ctypes.c_float()
returnedMaxSamples = ctypes.c_int32()
oversample = ctypes.c_int16(0)
status["getTimebase2"] = ps.ps2000aGetTimebase2(chandle,
timebase,
totalSamples,
ctypes.byref(timeIntervalns),
oversample,
ctypes.byref(returnedMaxSamples),
0)
assert_pico_ok(status["getTimebase2"])
print('Done.')
# Block sampling mode
# The scope stores data in internal buffer memory and then transfer it to the PC via USB.
# The data is lost when a new run is started in the same segment.
# For PicoScope 2206B the buffer memory is 32 MS, maximum sampling rate 500 MS/s.
print('Running block capture...')
# Run block capture
# handle = chandle
# number of pre-trigger samples = preTriggerSamples
# number of post-trigger samples = PostTriggerSamples
# timebase (already defined when using ps2000aGetTimebase2)
# oversample: not used
# time indisposed ms = None (not needed, it's the time the scope will spend collecting samples)
# segment index = 0 (the only one defined by default, this index is zero-based)
# lpReady = None (using ps2000aIsReady rather than ps2000aBlockReady; callback functions that the driver will call when the data has been collected).
# pParameter = None (void pointer passed to ps2000aBlockReady() to return arbitrary data to the application)
status["runBlock"] = ps.ps2000aRunBlock(chandle,
preTriggerSamples,
postTriggerSamples,
timebase,
oversample,
None,
0,
None,
None)
assert_pico_ok(status["runBlock"])
# Check for data collection to finish using ps2000aIsReady
ready = ctypes.c_int16(0)
check = ctypes.c_int16(0)
while ready.value == check.value:
status["isReady"] = ps.ps2000aIsReady(chandle, ctypes.byref(ready))
# Create buffers ready for assigning pointers for data collection
bufferAMax = (ctypes.c_int16 * totalSamples)()
bufferAMin = (ctypes.c_int16 * totalSamples)() # used for downsampling which isn't in the scope of this example
bufferBMax = (ctypes.c_int16 * totalSamples)()
bufferBMin = (ctypes.c_int16 * totalSamples)() # used for downsampling which isn't in the scope of this example
# Set data buffer location for data collection from channel A
# handle = chandle
# source = PS2000A_CHANNEL_A = 0
# pointer to buffer max = ctypes.byref(bufferDPort0Max)
# pointer to buffer min = ctypes.byref(bufferDPort0Min)
# buffer length = totalSamples
# segment index = 0
# ratio mode = PS2000A_RATIO_MODE_NONE = 0
status["setDataBuffersA"] = ps.ps2000aSetDataBuffers(chandle,
0,
ctypes.byref(bufferAMax),
ctypes.byref(bufferAMin),
totalSamples,
0,
0)
assert_pico_ok(status["setDataBuffersA"])
# Set data buffer location for data collection from channel B
# handle = chandle
# source = PS2000A_CHANNEL_B = 1
# pointer to buffer max = ctypes.byref(bufferBMax)
# pointer to buffer min = ctypes.byref(bufferBMin)
# buffer length = totalSamples
# segment index = 0
# ratio mode = PS2000A_RATIO_MODE_NONE = 0
status["setDataBuffersB"] = ps.ps2000aSetDataBuffers(chandle,
1,
ctypes.byref(bufferBMax),
ctypes.byref(bufferBMin),
totalSamples,
0,
0)
assert_pico_ok(status["setDataBuffersB"])
# Create overflow location
overflow = ctypes.c_int16()
# Create converted type totalSamples
cTotalSamples = ctypes.c_int32(totalSamples)
# Retrived data from scope to buffers assigned above
# handle = chandle
# start index = 0 (zero-based index, sample intervals from the start of the buffer)
# pointer to number of samples = ctypes.byref(cTotalSamples)
# downsample ratio = 0
# downsample ratio mode = PS2000A_RATIO_MODE_NONE (downsampling disabled)
# pointer to overflow = ctypes.byref(overflow))
status["getValues"] = ps.ps2000aGetValues(chandle, 0, ctypes.byref(cTotalSamples), 0, 0, 0, ctypes.byref(overflow))
assert_pico_ok(status["getValues"])
# find maximum ADC count value
# handle = chandle
# pointer to value = ctypes.byref(maxADC)
maxADC = ctypes.c_int16()
status["maximumValue"] = ps.ps2000aMaximumValue(chandle, ctypes.byref(maxADC))
assert_pico_ok(status["maximumValue"])
# convert ADC counts data to mV
adc2mVChAMax = adc2mV(bufferAMax, chARange, maxADC)
adc2mVChBMax = adc2mV(bufferBMax, chBRange, maxADC)
# Create time data
timeAxis = np.linspace(0, (cTotalSamples.value) * (timeIntervalns.value-1), cTotalSamples.value)
print('Done.')
# plot data from channel A and B
plt.plot(timeAxis, adc2mVChAMax[:])
plt.plot(timeAxis, adc2mVChBMax[:])
plt.xlabel('Time (ns)')
plt.ylabel('Voltage (mV)')
plt.show()
# Stop the scope
print('Closing the scope...')
# handle = chandle
status["stop"] = ps.ps2000aStop(chandle)
assert_pico_ok(status["stop"])
# Close unitDisconnect the scope
# handle = chandle
status["close"] = ps.ps2000aCloseUnit(chandle)
assert_pico_ok(status["close"])
print('Done.')
print(status)
# Save raw samples to .csv file (with timestamp)
startTime = time.time()
print('Saving raw samples to .csv file...')
timestamp = datetime.now().strftime("%Y%m%d_%I%M%S_%p")
samplesFileNameChA = timestamp + "_ChA.csv"
completeFileNameChA = os.path.join('../raw-samples',samplesFileNameChA)
with open(completeFileNameChA,'w') as file:
writer = csv.writer(file)
writer.writerows(zip(adc2mVChAMax,timeAxis))
samplesFileNameChB = timestamp + "_ChB.csv"
completeFileNameChB = os.path.join('../raw-samples',samplesFileNameChB)
with open(completeFileNameChB,'w') as file:
writer = csv.writer(file)
writer.writerows(zip(adc2mVChBMax,timeAxis))
elapsedTime = time.time() - startTime
print('Done. Elapsed time for .csv files generation: {:.1f}'.format(elapsedTime) + ' s.') | [
"ctypes.c_int16",
"picosdk.ps2000a.ps2000a.ps2000aSetChannel",
"ctypes.c_int32",
"ctypes.byref",
"picosdk.functions.assert_pico_ok",
"ctypes.c_float",
"numpy.linspace",
"math.log",
"datetime.datetime.now",
"matplotlib.pyplot.show",
"csv.writer",
"picosdk.ps2000a.ps2000a.ps2000aRunBlock",
"pi... | [((1189, 1205), 'ctypes.c_int16', 'ctypes.c_int16', ([], {}), '()\n', (1203, 1205), False, 'import ctypes\n'), ((1599, 1633), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['openunit']"], {}), "(status['openunit'])\n", (1613, 1633), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((1841, 1892), 'picosdk.ps2000a.ps2000a.ps2000aSetChannel', 'ps.ps2000aSetChannel', (['chandle', '(0)', '(1)', '(1)', 'chARange', '(0)'], {}), '(chandle, 0, 1, 1, chARange, 0)\n', (1861, 1892), True, 'from picosdk.ps2000a import ps2000a as ps\n'), ((1893, 1925), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['setChA']"], {}), "(status['setChA'])\n", (1907, 1925), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((2126, 2177), 'picosdk.ps2000a.ps2000a.ps2000aSetChannel', 'ps.ps2000aSetChannel', (['chandle', '(1)', '(1)', '(1)', 'chBRange', '(0)'], {}), '(chandle, 1, 1, 1, chBRange, 0)\n', (2146, 2177), True, 'from picosdk.ps2000a import ps2000a as ps\n'), ((2178, 2210), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['setChB']"], {}), "(status['setChB'])\n", (2192, 2210), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((2467, 2526), 'picosdk.ps2000a.ps2000a.ps2000aSetSimpleTrigger', 'ps.ps2000aSetSimpleTrigger', (['chandle', '(1)', '(0)', '(1024)', '(2)', '(0)', '(1000)'], {}), '(chandle, 1, 0, 1024, 2, 0, 1000)\n', (2493, 2526), True, 'from picosdk.ps2000a import ps2000a as ps\n'), ((2527, 2560), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['trigger']"], {}), "(status['trigger'])\n", (2541, 2560), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((3165, 3181), 'ctypes.c_float', 'ctypes.c_float', ([], {}), '()\n', (3179, 3181), False, 'import ctypes\n'), ((3203, 3219), 'ctypes.c_int32', 'ctypes.c_int32', ([], {}), '()\n', (3217, 3219), False, 'import ctypes\n'), ((3233, 3250), 'ctypes.c_int16', 'ctypes.c_int16', (['(0)'], {}), '(0)\n', (3247, 3250), False, 'import ctypes\n'), ((3699, 3737), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['getTimebase2']"], {}), "(status['getTimebase2'])\n", (3713, 3737), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((4732, 4845), 'picosdk.ps2000a.ps2000a.ps2000aRunBlock', 'ps.ps2000aRunBlock', (['chandle', 'preTriggerSamples', 'postTriggerSamples', 'timebase', 'oversample', 'None', '(0)', 'None', 'None'], {}), '(chandle, preTriggerSamples, postTriggerSamples, timebase,\n oversample, None, 0, None, None)\n', (4750, 4845), True, 'from picosdk.ps2000a import ps2000a as ps\n'), ((5162, 5196), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['runBlock']"], {}), "(status['runBlock'])\n", (5176, 5196), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((5265, 5282), 'ctypes.c_int16', 'ctypes.c_int16', (['(0)'], {}), '(0)\n', (5279, 5282), False, 'import ctypes\n'), ((5291, 5308), 'ctypes.c_int16', 'ctypes.c_int16', (['(0)'], {}), '(0)\n', (5305, 5308), False, 'import ctypes\n'), ((6576, 6617), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['setDataBuffersA']"], {}), "(status['setDataBuffersA'])\n", (6590, 6617), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((7384, 7425), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['setDataBuffersB']"], {}), "(status['setDataBuffersB'])\n", (7398, 7425), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((7465, 7481), 'ctypes.c_int16', 'ctypes.c_int16', ([], {}), '()\n', (7479, 7481), False, 'import ctypes\n'), ((7535, 7563), 'ctypes.c_int32', 'ctypes.c_int32', (['totalSamples'], {}), '(totalSamples)\n', (7549, 7563), False, 'import ctypes\n'), ((8043, 8078), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['getValues']"], {}), "(status['getValues'])\n", (8057, 8078), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((8182, 8198), 'ctypes.c_int16', 'ctypes.c_int16', ([], {}), '()\n', (8196, 8198), False, 'import ctypes\n'), ((8278, 8316), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['maximumValue']"], {}), "(status['maximumValue'])\n", (8292, 8316), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((8366, 8402), 'picosdk.functions.adc2mV', 'adc2mV', (['bufferAMax', 'chARange', 'maxADC'], {}), '(bufferAMax, chARange, maxADC)\n', (8372, 8402), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((8419, 8455), 'picosdk.functions.adc2mV', 'adc2mV', (['bufferBMax', 'chBRange', 'maxADC'], {}), '(bufferBMax, chBRange, maxADC)\n', (8425, 8455), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((8487, 8576), 'numpy.linspace', 'np.linspace', (['(0)', '(cTotalSamples.value * (timeIntervalns.value - 1))', 'cTotalSamples.value'], {}), '(0, cTotalSamples.value * (timeIntervalns.value - 1),\n cTotalSamples.value)\n', (8498, 8576), True, 'import numpy as np\n'), ((8622, 8657), 'matplotlib.pyplot.plot', 'plt.plot', (['timeAxis', 'adc2mVChAMax[:]'], {}), '(timeAxis, adc2mVChAMax[:])\n', (8630, 8657), True, 'import matplotlib.pyplot as plt\n'), ((8658, 8693), 'matplotlib.pyplot.plot', 'plt.plot', (['timeAxis', 'adc2mVChBMax[:]'], {}), '(timeAxis, adc2mVChBMax[:])\n', (8666, 8693), True, 'import matplotlib.pyplot as plt\n'), ((8694, 8717), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (ns)"""'], {}), "('Time (ns)')\n", (8704, 8717), True, 'import matplotlib.pyplot as plt\n'), ((8718, 8744), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Voltage (mV)"""'], {}), "('Voltage (mV)')\n", (8728, 8744), True, 'import matplotlib.pyplot as plt\n'), ((8745, 8755), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8753, 8755), True, 'import matplotlib.pyplot as plt\n'), ((8840, 8863), 'picosdk.ps2000a.ps2000a.ps2000aStop', 'ps.ps2000aStop', (['chandle'], {}), '(chandle)\n', (8854, 8863), True, 'from picosdk.ps2000a import ps2000a as ps\n'), ((8864, 8894), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['stop']"], {}), "(status['stop'])\n", (8878, 8894), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((8965, 8993), 'picosdk.ps2000a.ps2000a.ps2000aCloseUnit', 'ps.ps2000aCloseUnit', (['chandle'], {}), '(chandle)\n', (8984, 8993), True, 'from picosdk.ps2000a import ps2000a as ps\n'), ((8994, 9025), 'picosdk.functions.assert_pico_ok', 'assert_pico_ok', (["status['close']"], {}), "(status['close'])\n", (9008, 9025), False, 'from picosdk.functions import adc2mV, assert_pico_ok\n'), ((9117, 9128), 'time.time', 'time.time', ([], {}), '()\n', (9126, 9128), False, 'import time\n'), ((1519, 1540), 'ctypes.byref', 'ctypes.byref', (['chandle'], {}), '(chandle)\n', (1531, 1540), False, 'import ctypes\n'), ((3476, 3504), 'ctypes.byref', 'ctypes.byref', (['timeIntervalns'], {}), '(timeIntervalns)\n', (3488, 3504), False, 'import ctypes\n'), ((3614, 3646), 'ctypes.byref', 'ctypes.byref', (['returnedMaxSamples'], {}), '(returnedMaxSamples)\n', (3626, 3646), False, 'import ctypes\n'), ((6292, 6316), 'ctypes.byref', 'ctypes.byref', (['bufferAMax'], {}), '(bufferAMax)\n', (6304, 6316), False, 'import ctypes\n'), ((6371, 6395), 'ctypes.byref', 'ctypes.byref', (['bufferAMin'], {}), '(bufferAMin)\n', (6383, 6395), False, 'import ctypes\n'), ((7100, 7124), 'ctypes.byref', 'ctypes.byref', (['bufferBMax'], {}), '(bufferBMax)\n', (7112, 7124), False, 'import ctypes\n'), ((7179, 7203), 'ctypes.byref', 'ctypes.byref', (['bufferBMin'], {}), '(bufferBMin)\n', (7191, 7203), False, 'import ctypes\n'), ((7981, 8008), 'ctypes.byref', 'ctypes.byref', (['cTotalSamples'], {}), '(cTotalSamples)\n', (7993, 8008), False, 'import ctypes\n'), ((8019, 8041), 'ctypes.byref', 'ctypes.byref', (['overflow'], {}), '(overflow)\n', (8031, 8041), False, 'import ctypes\n'), ((8256, 8276), 'ctypes.byref', 'ctypes.byref', (['maxADC'], {}), '(maxADC)\n', (8268, 8276), False, 'import ctypes\n'), ((9403, 9419), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (9413, 9419), False, 'import csv\n'), ((9647, 9663), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (9657, 9663), False, 'import csv\n'), ((9727, 9738), 'time.time', 'time.time', ([], {}), '()\n', (9736, 9738), False, 'import time\n'), ((563, 603), 'math.log', 'log', (['(500000000.0 / SAMPLING_FREQUENCY)', '(2)'], {}), '(500000000.0 / SAMPLING_FREQUENCY, 2)\n', (566, 603), False, 'from math import log\n'), ((5394, 5413), 'ctypes.byref', 'ctypes.byref', (['ready'], {}), '(ready)\n', (5406, 5413), False, 'import ctypes\n'), ((9185, 9199), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9197, 9199), False, 'from datetime import datetime\n')] |
#https://www.youtube.com/watch?v=ZGU5kIG7b2I
#dependencies
import json #files
import numpy as np #vectorization
import random #generating random text
import tensorflow as tf #ML
import datetime #clock training time
text = open('issa_haikus_english').read()
#sort characters
chars = sorted(list(set(text)))
char_size = len(chars)
char2id = dict((c, i) for i, c in enumerate(chars))
id2char = dict((i, c) for i, c in enumerate(chars))
#generate probability of each next character
def sample(prediction):
r = random.uniform(0,1)
#store prediction char
s = 0
char_id = len(prediction) - 1
#for each char prediction probability
for i in range(len(prediction)):
s += prediction[i]
#generated threshold
if s >= r:
char_id = i
break
#one hot = know difference between values without ranking
char_one_hot = np.zeros(shape=[char_size])
char_one_hot[char_id] = 1.0
return char_one_hot
#vectorize data to feed into model
len_per_section = 5
skip = 2
sections = []
next_chars = []
for i in range(0, len(text) - len_per_section, skip):
sections.append(text[i: i + len_per_section])
next_chars.append(text[i + len_per_section])
#vectorize
#matrix of section length by num of characters
X = np.zeros((len(sections), len_per_section, char_size))
#label column for all the character id's, still zero
y = np.zeros((len(sections), char_size))
#for each char in each section, convert each char to an ID
#for each section convert the labels to id's
for i, section in enumerate(sections):
for j, char in enumerate(section):
X[i, j, char2id[char]] = 1
y[i, char2id[next_chars[i]]] = 1
#machine learning time
batch_size = 512
#number of iterations
#below values 1/100 of tutorial
max_steps = 600
log_every = 10
save_every = 60
#needs to be set to avoid under and over fitting
hidden_nodes = 1024
#save model
checkpoint_directory = 'ckpt'
'''
#create checkpoint directory
if tf.gfile.Exists(checkpoint_directory):
tf.gfile.DeleteRecursively(checkpoint_directory)
tf.gfile.MakeDirs(checkpoint_directory)
'''
#build model
graph = tf.Graph()
with graph.as_default():
global_step = tf.Variable(0)
data = tf.placeholder(tf.float32, [batch_size, len_per_section, char_size])
labels = tf.placeholder(tf.float32, [batch_size, char_size])
#input gate, output gate, forget gate, internal state
#calculated in vacuums
#input gate - weights for input, weights for previous output, bias
w_ii = tf.Variable(tf.truncated_normal([char_size, hidden_nodes], -0.1, 0.1))
w_io = tf.Variable(tf.truncated_normal([hidden_nodes, hidden_nodes], -0.1, 0.1))
b_i = tf.Variable(tf.zeros([1, hidden_nodes]))
#forget gate
w_fi = tf.Variable(tf.truncated_normal([char_size, hidden_nodes], -0.1, 0.1))
w_fo = tf.Variable(tf.truncated_normal([hidden_nodes, hidden_nodes], -0.1, 0.1))
b_f = tf.Variable(tf.zeros([1, hidden_nodes]))
#output gate
w_oi = tf.Variable(tf.truncated_normal([char_size, hidden_nodes], -0.1, 0.1))
w_oo = tf.Variable(tf.truncated_normal([hidden_nodes, hidden_nodes], -0.1, 0.1))
b_o = tf.Variable(tf.zeros([1, hidden_nodes]))
#memory cell
w_ci = tf.Variable(tf.truncated_normal([char_size, hidden_nodes], -0.1, 0.1))
w_co = tf.Variable(tf.truncated_normal([hidden_nodes, hidden_nodes], -0.1, 0.1))
b_c = tf.Variable(tf.zeros([1, hidden_nodes]))
def lstm(i, o, state):
#calculated sperately, no overlap, until...
#(input * input weights) + (output * weights for previous output) + bias
input_gate = tf.sigmoid(tf.matmul(i, w_ii) + tf.matmul(o, w_io) + b_i)
#(input * forget weights) + (output * weights for previous output) + bias
forget_gate = tf.sigmoid(tf.matmul(i, w_fi) + tf.matmul(o, w_fo) + b_f)
#(input * output weights) + (output * weights for previous output) + bias
output_gate = tf.sigmoid(tf.matmul(i, w_oi) + tf.matmul(o, w_oo) + b_o)
#(input * internal state weights) + (output * weights for previous output) + bias
memory_cell = tf.sigmoid(tf.matmul(i, w_ci) + tf.matmul(o, w_co) + b_c)
#... now! multiply forget gate * given state + input gate * hidden state
state = forget_gate * state + input_gate * memory_cell
#squash that state with tanh nonlin (computes hyperbolic tangent of x element-wise)
#multiply by output
output = output_gate * tf.tanh(state)
#return
return output, state
############
#operation
############
#LSTM
#both start off as empty, LSTM will calculate this
output = tf.zeros([batch_size, hidden_nodes])
state = tf.zeros([batch_size, hidden_nodes])
#unrolled LSTM loop
#for each input set
for i in range(len_per_section):
#calculate state and output from LSTM
output, state = lstm(data[:, i, :], output, state)
#to start,
if i == 0:
#store initial output and labels
outputs_all_i = output
labels_all_i = data[:, i+1, :]
#for each new set, concat outputs and labels
elif i != len_per_section - 1:
#concatenates (combines) vectors along a dimension axis, not multiply
outputs_all_i = tf.concat([outputs_all_i, output], 0)
labels_all_i = tf.concat([labels_all_i, data[:, i+1, :]], 0)
else:
#final store
outputs_all_i = tf.concat([outputs_all_i, output], 0)
labels_all_i = tf.concat([labels_all_i, labels], 0)
#Classifier
#The Classifier will only run after saved_output and saved_state were assigned.
#calculate weight and bias values for the network
#generated randomly given a size and distribution
w = tf.Variable(tf.truncated_normal([hidden_nodes, char_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([char_size]))
#Logits simply means that the function operates on the unscaled output
#of earlier layers and that the relative scale to understand the units
#is linear. It means, in particular, the sum of the inputs may not equal 1,
#that the values are not probabilities (you might have an input of 5).
logits = tf.matmul(outputs_all_i, w) + b
#logits is our prediction outputs, lets compare it with our labels
#cross entropy since multiclass classification
#computes the cost for a softmax layer
#then Computes the mean of elements across dimensions of a tensor.
#average loss across all values
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels_all_i))
#Optimizer
#minimize loss with graident descent, learning rate 10, keep track of batches
optimizer = tf.train.GradientDescentOptimizer(10.).minimize(loss, global_step=global_step)
###########
#Test
###########
test_data = tf.placeholder(tf.float32, shape=[1, char_size])
test_output = tf.Variable(tf.zeros([1, hidden_nodes]))
test_state = tf.Variable(tf.zeros([1, hidden_nodes]))
#Reset at the beginning of each test
reset_test_state = tf.group(test_output.assign(tf.zeros([1, hidden_nodes])), test_state.assign(tf.zeros([1, hidden_nodes])))
#LSTM
test_output, test_state = lstm(test_data, test_output, test_state)
test_prediction = tf.nn.softmax(tf.matmul(test_output, w) + b)
test_start = 'the '
with tf.Session(graph=graph) as sess:
#init graph, load model
tf.global_variables_initializer().run()
model = tf.train.latest_checkpoint(checkpoint_directory)
saver = tf.train.Saver()
saver.restore(sess, model)
#set input variable to generate chars from
reset_test_state.run()
test_generated = test_start
#for every char in the input sentennce
for i in range(len(test_start) - 1):
#initialize an empty char store
test_X = np.zeros((1, char_size))
#store it in id from
test_X[0, char2id[test_start[i]]] = 1.
#feed it to model, test_prediction is the output value
_ = sess.run(test_prediction, feed_dict={test_data: test_X})
#where we store encoded char predictions
test_X = np.zeros((1, char_size))
test_X[0, char2id[test_start[-1]]] = 1.
#generate 500 characters
for i in range(500):
#get each prediction probability
prediction = test_prediction.eval({test_data: test_X})[0]
#one hot encode it
next_char_one_hot = sample(prediction)
#get the indices of the max values (highest probability) and convert to char
next_char = id2char[np.argmax(next_char_one_hot)]
#add each char to the output text iteratively
test_generated += next_char
#update the
test_X = next_char_one_hot.reshape((1, char_size))
file = open("output/" + str(datetime.datetime.now()) + ".txt", "w")
file.write(test_generated)
file.close() | [
"tensorflow.train.Saver",
"random.uniform",
"tensorflow.global_variables_initializer",
"numpy.argmax",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.concat",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorfl... | [((2041, 2051), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2049, 2051), True, 'import tensorflow as tf\n'), ((512, 532), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (526, 532), False, 'import random\n'), ((824, 851), 'numpy.zeros', 'np.zeros', ([], {'shape': '[char_size]'}), '(shape=[char_size])\n', (832, 851), True, 'import numpy as np\n'), ((2093, 2107), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {}), '(0)\n', (2104, 2107), True, 'import tensorflow as tf\n'), ((2117, 2185), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, len_per_section, char_size]'], {}), '(tf.float32, [batch_size, len_per_section, char_size])\n', (2131, 2185), True, 'import tensorflow as tf\n'), ((2196, 2247), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, char_size]'], {}), '(tf.float32, [batch_size, char_size])\n', (2210, 2247), True, 'import tensorflow as tf\n'), ((4389, 4425), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, hidden_nodes]'], {}), '([batch_size, hidden_nodes])\n', (4397, 4425), True, 'import tensorflow as tf\n'), ((4435, 4471), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, hidden_nodes]'], {}), '([batch_size, hidden_nodes])\n', (4443, 4471), True, 'import tensorflow as tf\n'), ((6420, 6468), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1, char_size]'}), '(tf.float32, shape=[1, char_size])\n', (6434, 6468), True, 'import tensorflow as tf\n'), ((6912, 6935), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (6922, 6935), True, 'import tensorflow as tf\n'), ((7029, 7077), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_directory'], {}), '(checkpoint_directory)\n', (7055, 7077), True, 'import tensorflow as tf\n'), ((7090, 7106), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7104, 7106), True, 'import tensorflow as tf\n'), ((7685, 7709), 'numpy.zeros', 'np.zeros', (['(1, char_size)'], {}), '((1, char_size))\n', (7693, 7709), True, 'import numpy as np\n'), ((2417, 2474), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[char_size, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([char_size, hidden_nodes], -0.1, 0.1)\n', (2436, 2474), True, 'import tensorflow as tf\n'), ((2496, 2556), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_nodes, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([hidden_nodes, hidden_nodes], -0.1, 0.1)\n', (2515, 2556), True, 'import tensorflow as tf\n'), ((2577, 2604), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (2585, 2604), True, 'import tensorflow as tf\n'), ((2641, 2698), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[char_size, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([char_size, hidden_nodes], -0.1, 0.1)\n', (2660, 2698), True, 'import tensorflow as tf\n'), ((2720, 2780), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_nodes, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([hidden_nodes, hidden_nodes], -0.1, 0.1)\n', (2739, 2780), True, 'import tensorflow as tf\n'), ((2801, 2828), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (2809, 2828), True, 'import tensorflow as tf\n'), ((2865, 2922), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[char_size, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([char_size, hidden_nodes], -0.1, 0.1)\n', (2884, 2922), True, 'import tensorflow as tf\n'), ((2944, 3004), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_nodes, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([hidden_nodes, hidden_nodes], -0.1, 0.1)\n', (2963, 3004), True, 'import tensorflow as tf\n'), ((3025, 3052), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (3033, 3052), True, 'import tensorflow as tf\n'), ((3089, 3146), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[char_size, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([char_size, hidden_nodes], -0.1, 0.1)\n', (3108, 3146), True, 'import tensorflow as tf\n'), ((3168, 3228), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_nodes, hidden_nodes]', '(-0.1)', '(0.1)'], {}), '([hidden_nodes, hidden_nodes], -0.1, 0.1)\n', (3187, 3228), True, 'import tensorflow as tf\n'), ((3249, 3276), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (3257, 3276), True, 'import tensorflow as tf\n'), ((5390, 5447), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_nodes, char_size]', '(-0.1)', '(0.1)'], {}), '([hidden_nodes, char_size], -0.1, 0.1)\n', (5409, 5447), True, 'import tensorflow as tf\n'), ((5466, 5487), 'tensorflow.zeros', 'tf.zeros', (['[char_size]'], {}), '([char_size])\n', (5474, 5487), True, 'import tensorflow as tf\n'), ((5795, 5822), 'tensorflow.matmul', 'tf.matmul', (['outputs_all_i', 'w'], {}), '(outputs_all_i, w)\n', (5804, 5822), True, 'import tensorflow as tf\n'), ((6108, 6186), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'labels_all_i'}), '(logits=logits, labels=labels_all_i)\n', (6150, 6186), True, 'import tensorflow as tf\n'), ((6496, 6523), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (6504, 6523), True, 'import tensorflow as tf\n'), ((6551, 6578), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (6559, 6578), True, 'import tensorflow as tf\n'), ((7388, 7412), 'numpy.zeros', 'np.zeros', (['(1, char_size)'], {}), '((1, char_size))\n', (7396, 7412), True, 'import numpy as np\n'), ((4231, 4245), 'tensorflow.tanh', 'tf.tanh', (['state'], {}), '(state)\n', (4238, 4245), True, 'import tensorflow as tf\n'), ((6294, 6333), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(10.0)'], {}), '(10.0)\n', (6327, 6333), True, 'import tensorflow as tf\n'), ((6667, 6694), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (6675, 6694), True, 'import tensorflow as tf\n'), ((6715, 6742), 'tensorflow.zeros', 'tf.zeros', (['[1, hidden_nodes]'], {}), '([1, hidden_nodes])\n', (6723, 6742), True, 'import tensorflow as tf\n'), ((6854, 6879), 'tensorflow.matmul', 'tf.matmul', (['test_output', 'w'], {}), '(test_output, w)\n', (6863, 6879), True, 'import tensorflow as tf\n'), ((6977, 7010), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7008, 7010), True, 'import tensorflow as tf\n'), ((8104, 8132), 'numpy.argmax', 'np.argmax', (['next_char_one_hot'], {}), '(next_char_one_hot)\n', (8113, 8132), True, 'import numpy as np\n'), ((4937, 4974), 'tensorflow.concat', 'tf.concat', (['[outputs_all_i, output]', '(0)'], {}), '([outputs_all_i, output], 0)\n', (4946, 4974), True, 'import tensorflow as tf\n'), ((4993, 5040), 'tensorflow.concat', 'tf.concat', (['[labels_all_i, data[:, i + 1, :]]', '(0)'], {}), '([labels_all_i, data[:, i + 1, :]], 0)\n', (5002, 5040), True, 'import tensorflow as tf\n'), ((5082, 5119), 'tensorflow.concat', 'tf.concat', (['[outputs_all_i, output]', '(0)'], {}), '([outputs_all_i, output], 0)\n', (5091, 5119), True, 'import tensorflow as tf\n'), ((5138, 5174), 'tensorflow.concat', 'tf.concat', (['[labels_all_i, labels]', '(0)'], {}), '([labels_all_i, labels], 0)\n', (5147, 5174), True, 'import tensorflow as tf\n'), ((3451, 3469), 'tensorflow.matmul', 'tf.matmul', (['i', 'w_ii'], {}), '(i, w_ii)\n', (3460, 3469), True, 'import tensorflow as tf\n'), ((3472, 3490), 'tensorflow.matmul', 'tf.matmul', (['o', 'w_io'], {}), '(o, w_io)\n', (3481, 3490), True, 'import tensorflow as tf\n'), ((3604, 3622), 'tensorflow.matmul', 'tf.matmul', (['i', 'w_fi'], {}), '(i, w_fi)\n', (3613, 3622), True, 'import tensorflow as tf\n'), ((3625, 3643), 'tensorflow.matmul', 'tf.matmul', (['o', 'w_fo'], {}), '(o, w_fo)\n', (3634, 3643), True, 'import tensorflow as tf\n'), ((3757, 3775), 'tensorflow.matmul', 'tf.matmul', (['i', 'w_oi'], {}), '(i, w_oi)\n', (3766, 3775), True, 'import tensorflow as tf\n'), ((3778, 3796), 'tensorflow.matmul', 'tf.matmul', (['o', 'w_oo'], {}), '(o, w_oo)\n', (3787, 3796), True, 'import tensorflow as tf\n'), ((3918, 3936), 'tensorflow.matmul', 'tf.matmul', (['i', 'w_ci'], {}), '(i, w_ci)\n', (3927, 3936), True, 'import tensorflow as tf\n'), ((3939, 3957), 'tensorflow.matmul', 'tf.matmul', (['o', 'w_co'], {}), '(o, w_co)\n', (3948, 3957), True, 'import tensorflow as tf\n'), ((8337, 8360), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8358, 8360), False, 'import datetime\n')] |
#!/usr/bin/env python3
import shelve
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
import plot_helpers
import argparse
parser = argparse.ArgumentParser(description="produce boxplots for sojourn "
"time vs. d/n")
parser.add_argument('dataset', help="name of the .tsv file used.")
parser.add_argument('sigma', type=float, help="sigma parameter for the "
"log-normal error function")
parser.add_argument('--load', type=float, default=0.9,
help="average load in the simulated cluster; default is "
"0.9")
parser.add_argument('--paper', dest='for_paper', action='store_const',
const=True, default=False, help="render plots with "
"LaTeX and output them as "
"sojourn-vs-error_DATASET_SIGMA_LOAD.pdf")
args = parser.parse_args()
if args.for_paper:
plot_helpers.config_paper()
glob_str = 'results_{}_{}_[0-9.]*_{}.s'.format(args.dataset, args.sigma,
args.load)
shelve_files = sorted((float(fname.split('_')[3]), fname)
for fname in glob(glob_str))
dns = [dn for dn, _ in shelve_files]
no_error = ['FIFO', 'PS', 'LAS', 'FSP (no error)', 'SRPT (no error)']
with_error = ['FIFO', 'PS', 'LAS', 'FSP + FIFO', 'FSP + PS', 'SRPT']
no_error_data = [[] for _ in no_error]
with_error_data = [[] for _ in with_error]
for dn, fname in shelve_files:
res = shelve.open(fname, 'r')
for i, scheduler in enumerate(no_error):
no_error_data[i].append(np.array(res[scheduler]).mean())
for i, scheduler in enumerate(with_error):
with_error_data[i].append(np.array(res[scheduler]).mean())
figures = [("No error", float(0), no_error, no_error_data),
(r"$\sigma={}$".format(args.sigma),
args.sigma, with_error, with_error_data)]
for title, sigma, schedulers, data in figures:
plt.figure(title)
plt.xlabel("$d/n$")
plt.ylabel("mean sojourn time (s)")
for scheduler, mst, style in zip(schedulers, data,
plot_helpers.cycle_styles('x')):
plt.semilogy(dns, mst, style, label=scheduler)
plt.grid()
plt.legend(loc=2)
if args.for_paper:
fmt = 'sojourn-vs-dn_{}_{}_{}.pdf'
fname = fmt.format(args.dataset, sigma, args.load)
plt.savefig(fname)
if not args.for_paper:
plt.show()
| [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.legend",
"plot_helpers.cycle_styles",
"shelve.open",
"plot_helpers.config_paper",
"matplotlib.pyplot.figure",
"numpy.array",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.xlabel"... | [((161, 246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""produce boxplots for sojourn time vs. d/n"""'}), "(description='produce boxplots for sojourn time vs. d/n'\n )\n", (184, 246), False, 'import argparse\n'), ((933, 960), 'plot_helpers.config_paper', 'plot_helpers.config_paper', ([], {}), '()\n', (958, 960), False, 'import plot_helpers\n'), ((1504, 1527), 'shelve.open', 'shelve.open', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1515, 1527), False, 'import shelve\n'), ((1966, 1983), 'matplotlib.pyplot.figure', 'plt.figure', (['title'], {}), '(title)\n', (1976, 1983), True, 'import matplotlib.pyplot as plt\n'), ((1988, 2007), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$d/n$"""'], {}), "('$d/n$')\n", (1998, 2007), True, 'import matplotlib.pyplot as plt\n'), ((2012, 2047), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean sojourn time (s)"""'], {}), "('mean sojourn time (s)')\n", (2022, 2047), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2243), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2241, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2248, 2265), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (2258, 2265), True, 'import matplotlib.pyplot as plt\n'), ((2447, 2457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2455, 2457), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2171), 'plot_helpers.cycle_styles', 'plot_helpers.cycle_styles', (['"""x"""'], {}), "('x')\n", (2166, 2171), False, 'import plot_helpers\n'), ((2182, 2228), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['dns', 'mst', 'style'], {'label': 'scheduler'}), '(dns, mst, style, label=scheduler)\n', (2194, 2228), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2418), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (2411, 2418), True, 'import matplotlib.pyplot as plt\n'), ((1186, 1200), 'glob.glob', 'glob', (['glob_str'], {}), '(glob_str)\n', (1190, 1200), False, 'from glob import glob\n'), ((1605, 1629), 'numpy.array', 'np.array', (['res[scheduler]'], {}), '(res[scheduler])\n', (1613, 1629), True, 'import numpy as np\n'), ((1719, 1743), 'numpy.array', 'np.array', (['res[scheduler]'], {}), '(res[scheduler])\n', (1727, 1743), True, 'import numpy as np\n')] |
from math import *
import sys, argparse, time
import numpy as np
import mdtraj as md
import general_scripts as gs
try:
import psutil
bPsutil = True
except:
print( "= = = NOTE: Module psutil isnot available, cannot respect memory usage on this run.." )
bPsutil = False
def print_selection_help():
print( "Notes: This python program uses MDTraj as its underlying engine to analyse trajectories and select atoms." )
print( "It uses selection syntax such as 'chain A and resname GLY and name HA1 HA2', in a manner similar to GROMACS and VMD." )
def P2(x):
return 1.5*x**2.0-0.5
def assert_seltxt(mol, txt):
ind = mol.topology.select(txt)
if len(ind) == 0:
print( "= = = ERROR: selection text failed to find atoms! ", seltxt )
print( " ....debug: N(%s) = %i " % (txt, ind) )
return []
else:
return ind
# There may be minor mistakes in the selection text. Try to identify what is wrong.
def confirm_seltxt(ref, Hseltxt, Xseltxt):
bError=False
indH = assert_seltxt(ref, Hseltxt)
indX = assert_seltxt(ref, Xseltxt)
if len(indH) == 0:
bError=True
t1 = mol.topology.select('name H')
t2 = mol.topology.select('name HN')
t3 = mol.topology.select('name HA')
print( " .... Note: The 'name H' selects %i atoms, 'name HN' selects %i atoms, and 'name HA' selects %i atoms." % (t1, t2, t3) )
if len(indX) == 0:
bError=True
t1 = mol.topology.select('name N')
t2 = mol.topology.select('name NT')
print( " .... Note: The 'name N' selects %i atoms, and 'name NT' selects %i atoms." % (t1, t2) )
if bError:
sys.exit(1)
return indH, indX
# Note: Return just the res number for easier plotting...
def obtain_XHres(traj, seltxt):
indexH = traj.topology.select(seltxt)
if len(indexH) == 0:
print( "= = = ERROR: selection text failed to find atoms! ", seltxt )
print( " ....debug: N(%s) = %i " % (Hseltxt, numH) )
sys.exit(1)
# indexH = traj.topology.select("name H and resSeq 3")
resXH = [ traj.topology.atom(indexH[i]).residue.resSeq for i in range(len(indexH)) ]
return resXH
def obtain_XHvecs(traj, Hseltxt, Xseltxt, bSuppressPrint = False):
if not bSuppressPrint:
print( "= = = Obtaining XH-vectors from trajectory..." )
#nFrames= traj.n_frames
indexX = traj.topology.select(Xseltxt)
indexH = traj.topology.select(Hseltxt)
numX = len(indexX) ; numH = len(indexH)
if numX == 0 or numH == 0 :
print( "= = = ERROR: selection text failed to find atoms!" )
print( " ....debug: N(%s) = %i , N(%s) = %i" % (Xseltxt, numX, Hseltxt, numH) )
sys.exit(1)
if len(indexH) != len(indexX):
print( "= = = ERROR: selection text found different number of atoms!" )
print( " ....debug: N(%s) = %i , N(%s) = %i" % (Xseltxt, numX, Hseltxt, numH) )
sys.exit(1)
#Do dangerous trick to select nitrogens connexted to HN..
#indexX = [ indexH[i]-1 for i in range(len(indexH))]
# Extract submatrix of vector trajectory
vecXH = np.take(traj.xyz, indexH, axis=1) - np.take(traj.xyz, indexX, axis=1)
vecXH = vecnorm_NDarray(vecXH, axis=2)
return vecXH
def vecnorm_NDarray(v, axis=-1):
"""
Vector normalisation performed along an arbitrary dimension, which by default is the last one.
Comes with workaround by casting that to zero instead of keeping np.nan or np.inf.
"""
# = = = need to protect against 0/0 errors when the vector is (0,0,0)
if len(v.shape)>1:
# = = = Obey broadcasting rules by applying 1 to the axis that is being reduced.
sh=list(v.shape)
sh[axis]=1
return np.nan_to_num( v / np.linalg.norm(v,axis=axis).reshape(sh) )
else:
return np.nan_to_num( v/np.linalg.norm(v) )
# 3 Sum_i,j <e_i * e_j >^2 - 1
def S2_by_outerProduct(v):
"""
Two
"""
outer = np.mean([ np.outer(v[i],v[i]) for i in range(len(v))], axis=0)
return 1.5*np.sum(outer**2.0)-0.5
def calculate_S2_by_outerProduct(vecs, delta_t=-1, tau_memory=-1):
"""
Calculates the general order parameter S2 by using the quantity 3*Sum_i,j <e_i * e_j >^2 - 1 , which is akin to P2( CosTheta )
Expects vecs to be of dimensions (time, 3) or ( time, nResidues, 3 )
This directly collapses all dimensions in two steps:
- 1. calculate the outer product <v_i v_j >
- 2. calculate Sum <v_i v_j>^2
When both delta_t and tau_memory are given, then returns average and SEM of the S2 samples of dimensions ( nResidues, 2 )
"""
sh=vecs.shape
nDim=sh[-1]
if len(sh)==2:
nFrames=vecs.shape[0]
if delta_t < 0 or tau_memory < 0:
#Use no block-averaging
tmp = np.einsum( 'ij,ik->jk', vecs,vecs) / nFrames
return 1.5*np.einsum('ij,ij->',tmp,tmp)-0.5
else:
nFramesPerBlock = int( tau_memory / delta_t )
nBlocks = int( nFrames / nFramesPerBlock )
# Reshape while dumping extra frames
vecs = vecs[:nBlocks*nFramesPerBlock].reshape( nBlocks, nFramesPerBlock, nDim )
tmp = np.einsum( 'ijk,ijl->ikl', vecs,vecs) / nFramesPerBlock
tmp = 1.5*np.einsum('ijk,ijk->i',tmp,tmp)-0.5
S2 = np.mean( tmp )
dS2 = np.std( tmp ) / ( np.sqrt(nBlocks) - 1.0 )
return np.array( [S2,dS2])
elif len(sh)==3:
# = = = Expect dimensions (time, nResidues, 3)
nFrames = vecs.shape[0]
nResidues = vecs.shape[1]
if delta_t < 0 or tau_memory < 0:
#Use no block-averaging
tmp = np.einsum( 'ijk,ijl->jkl', vecs,vecs) / nFrames
return 1.5*np.einsum('...ij,...ij->...',tmp,tmp)-0.5
else:
nFramesPerBlock = int( tau_memory / delta_t )
nBlocks = int( nFrames / nFramesPerBlock )
# Reshape while dumping extra frames
vecs = vecs[:nBlocks*nFramesPerBlock].reshape( nBlocks, nFramesPerBlock, nResidues, nDim )
tmp = np.einsum( 'ijkl,ijkm->iklm', vecs,vecs) / nFramesPerBlock
tmp = 1.5*np.einsum('...ij,...ij->...',tmp,tmp)-0.5
S2 = np.mean( tmp, axis=0 )
dS2 = np.std ( tmp, axis=0 ) / ( np.sqrt(nBlocks) - 1.0 )
return np.stack( (S2,dS2), axis=-1 )
else:
print( "= = = ERROR in calculate_S2_by_outerProduct: unsupported number of dimensions! vecs.shape: ", sh, file=sys.stderr )
sys.exit(1)
# iRED and wiRED implemented according to reading of
# <NAME>, and Bruschweiler, JCTC, 2014
# http://dx.doi.org/10.1021/ct500181v
#
# 1. Implementation depends on an prior estimate of isotropic tumbling time, tau,
# that is used to determine averaging window size, and statistics.
# 2. Then we calculate the diagonalised matrix, spanning n by n of the cosine angle between each unit vector.
# 3. S2 is then the component that is covered by the internal motions,
# excluding the first 5 motions governing global reorientation.
def calculate_S2_by_wiRED(vecs, dt, tau):
print( tau, dt )
# Todo.
# Construct M for each chunk
# Average over all frames
# Diagonalise
# Report on eigenvalues and eigenvectors
#1. Determine chunks
nFrames=len(vecs)
nChunks=floor(nFrames*dt/(2*tau))
nFrChunk=floor(2*tau/dt)
def calculate_S2_by_iRED(vecs, dt, tau):
print( tau, dt )
# Todo.
# Construct M for each chunk
# Average over all frames
# Diagonalise
# Report on eigenvalues and eigenvectors
#1. Determine chunks
nFrames=len(vecs)
nChunks=floor(nFrames*dt/(5*tau))
nFrChunk=floor(5*tau/dt)
def reformat_vecs_by_tau(vecs, dt, tau):
"""
This proc assumes that vecs list is N 3D-arrays in the form <Nfile>,(frames, bonds, XYZ).
We take advantage of Palmer's iteration where the trajectory is divided into N chunks each of tau in length,
to reformulate everything into fast 4D np.arrays of form (nchunk, frames, bonds, XYZ) so as to
take full advantage of broadcasting.
This will throw away additional frame data in each trajectory that does not fit into a single block of memory time tau.
"""
# Don't assume all files have the same number of frames.
nFiles = len(vecs)
nFramesPerChunk=int(tau/dt)
print( " ...debug: Using %i frames per chunk based on tau/dt (%g/%g)." % (nFramesPerChunk, tau, dt) )
used_frames = np.zeros(nFiles, dtype=int)
remainders = np.zeros(nFiles, dtype=int)
for i in range(nFiles):
nFrames = vecs[i].shape[0]
used_frames[i] = int(nFrames/nFramesPerChunk)*nFramesPerChunk
remainders[i] = nFrames % nFramesPerChunk
print( " ...Source %i divided into %i chunks. Usage rate: %g %%" % (i, used_frames[i]/nFramesPerChunk, 100.0*used_frames[i]/nFrames ) )
nFramesTot = int( used_frames.sum() )
out = np.zeros( ( nFramesTot, vecs[0].shape[1], vecs[0].shape[2] ) , dtype=vecs[0].dtype)
start = 0
for i in range(nFiles):
end=int(start+used_frames[i])
endv=int(used_frames[i])
out[start:end,...] = vecs[i][0:endv,...]
start=end
sh = out.shape
print( " ...Done. vecs reformatted into %i chunks." % ( nFramesTot/nFramesPerChunk ) )
return out.reshape ( (nFramesTot/nFramesPerChunk, nFramesPerChunk, sh[-2], sh[-1]) )
def LS_one(x, S2, tau_c):
if tau_c > 0.0:
return (1-S2)*np.exp(-x/tau_c)+S2
else:
return S2
def get_indices_mdtraj( seltxt, top, filename):
"""
NB: A workaround for MDTraj is needed becusae the standard reader
does not return topologies.
"""
if seltxt == 'custom occupancy':
pdb = md.formats.pdb.pdbstructure.PdbStructure(open(filename))
mask = [ atom.get_occupancy() for atom in pdb.iter_atoms() ]
inds = top.select('all')
return [ inds[i] for i in range(len(mask)) if mask[i] > 0.0 ]
else:
return top.select(seltxt)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Obtain the unit X-H vectors from one of more trajectories,'
'and conduct calculations on it, such as S^2, C(t), and others analyses.'
'N.B. Since we\'re following Palmer\'s formalisams by dividing data into chunks '
'of length tau, ',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-s', type=str, dest='topfn', required=True, nargs='+',
help='Suitable topology PDB file for use in the MDTraj module.'
'This file is used as the reference frame for fits. If multiple are given, one refpdb will be loaded for each trajectory.')
parser.add_argument('-f', '--infn', type=str, dest='infn', required=True, nargs='+',
help='One or more trajectories. Data from multiple trajectories will be analysed separately'
'in C(t)-calculations, but otherwise aggregated.' )
parser.add_argument('-o', '--outpref', type=str, dest='out_pref', default='out',
help='Output file prefix.')
parser.add_argument('-t','--tau', type=float, dest='tau', required=False, default=None,
help='Use the isotropic global tumbling time to split trjeactory into samples.'
'This excludes internal motions that are slower than measured by NMR relaxation.'
'Same time units as trajectory, usually ps.')
parser.add_argument('--split', type=int, dest='nSplitFrames', default=-1,
help='Optionally split the reading of large trajectories into N frames to reduce memory footprint.')
parser.add_argument('--zeta', action='store_true', dest='bZeta', default=False,
help='Apply a prefactor that accounts for the librations of the XH-vector not seen in classical MD.'
'See, e.g. Trbovic et al., Proteins, 2008 -who references- Case, J. Biomol. NMR (1999)')
parser.add_argument('--vecAvg', dest='bDoVecAverage', action='store_true', default=False,
help='Print the average unit XH-vector.')
parser.add_argument('--Hsel', '--selection', type=str, dest='Hseltxt', default='name H',
help='Selection of the H-atoms to which the N-atoms are attached. E.g. "name H and resSeq 2 to 50 and chain A"')
parser.add_argument('--Xsel', type=str, dest='Xseltxt', default='name N and not resname PRO',
help='Selection of the X-atoms to which the H-atoms are attached. E.g. "name N and resSeq 2 to 50 and chain A"')
parser.add_argument('--fitsel', type=str, dest='fittxt', default='custom occupancy',
help='Selection in which atoms will be fitted. Examples include: \'name CA\' and '
'\'resSeq 3 to 30\'. The default selection invokes a workaround to read the occupancy data and take positive entries.')
parser.add_argument('--help_sel', action='store_true', help='Display help for selection texts and exit.')
args = parser.parse_args()
time_start=time.time()
if args.help_sel:
print_selection_help()
sys.exit(0)
# = = = Read Parameters here = = =
tau_memory=args.tau
bDoVecAverage=args.bDoVecAverage
if args.bZeta:
zeta=(1.02/1.04)**6
else:
zeta=1.0
if args.nSplitFrames > 0:
bSplitRead=True
nSplitFrames=args.nSplitFrames
else:
bSplitRead=False
in_flist=args.infn
in_reflist=args.topfn
#top_fname=args.topfn
out_pref=args.out_pref
Hseltxt=args.Hseltxt
Xseltxt=args.Xseltxt
#seltxt='name H and resSeq 3 to 4'
fittxt=args.fittxt
#Determine the input format and construct 3D arrays of dimension (n_file, n_frame, XYZ)
n_refs = len(in_reflist)
n_trjs = len(in_flist)
if n_refs == 1:
bMultiRef=False
top_filename=in_reflist[0]
ref = md.load(top_filename)
print( "= = = Loaded single reference file: %s" % (top_filename) )
# Load the atom indices over which the atom fit will take place.
fit_indices = get_indices_mdtraj(top=ref.topology, filename=top_filename, seltxt=fittxt)
print( "= = = Debug: fit_indices number: %i" % len(fit_indices) )
else:
print( "= = = Detected multiple reference file inputs." )
bMultiRef=True
if n_refs != n_trjs:
print( "= = ERROR: When giving multiple reference files, you must have one for each trajecfile file given!", file=sys.stderr )
sys.exit(1)
# = = Load all trajectory data. Notes: Each file's resXH is 1D, vecXH is 3D in (frame, bond, XYZ)
resXH = [] ; vecXHfit = []
deltaT = np.nan ; nFrames = np.nan ; nBonds = np.nan
bFirst=True
for i in range(n_trjs):
if bMultiRef:
top_filename=in_reflist[i]
ref = md.load(top_filename)
print( "= = = Loaded reference file %i: %s" % (i, top_filename) )
fit_indices = get_indices_mdtraj( top=ref.topology, filename=top_filename, seltxt=fittxt)
print( "= = = Debug: fit_indices number: %i" % len(fit_indices) )
if bSplitRead:
# = = = To tackle trajectory files that take too much memory, split into N frames
print( "= = = Loading trajectory file %s in chunks..." % (in_flist[i]) )
nFrames_loc = 0
for trjChunk in md.iterload(in_flist[i], chunk=nSplitFrames, top=top_filename):
trjChunk.center_coordinates()
trjChunk.superpose(ref, frame=0, atom_indices=fit_indices )
tempV2 = obtain_XHvecs(trjChunk, Hseltxt, Xseltxt, bSuppressPrint=True)
if nFrames_loc == 0:
confirm_seltxt(trjChunk, Hseltxt, Xseltxt)
resXH_loc = obtain_XHres(trjChunk, Hseltxt)
deltaT_loc = trjChunk.timestep
nFrames_loc = trjChunk.n_frames
vecXHfit_loc = tempV2
else:
nFrames_loc += trjChunk.n_frames
vecXHfit_loc = np.concatenate( (vecXHfit_loc, tempV2), axis=0 )
print( "= = = ...loaded %i frames so far." % (nFrames_loc) )
print( "= = = Finished loading trajectory file %s. It has %i atoms and %i frames." % (in_flist[i], trjChunk.n_atoms, nFrames_loc) )
print( vecXHfit_loc.shape )
nBonds_loc = vecXHfit_loc.shape[1]
del trjChunk
else:
# = = = Faster single-step read
print( "= = = Reading trajectory file %s ..." % (in_flist[i]) )
trj = md.load(in_flist[i], top=top_filename)
print( "= = = File loaded - it has %i atoms and %i frames." % (trj.n_atoms, trj.n_frames) )
# = = Run sanity check
confirm_seltxt(trj, Hseltxt, Xseltxt)
deltaT_loc = trj.timestep ; nFrames_loc = trj.n_frames
resXH_loc = obtain_XHres(trj, Hseltxt)
trj.center_coordinates()
trj.superpose(ref, frame=0, atom_indices=fit_indices )
print( "= = = Molecule centered and fitted." )
#msds = md.rmsd(trj, ref, 0, precentered=True)
vecXHfit_loc = obtain_XHvecs(trj, Hseltxt, Xseltxt)
nBonds_loc = vecXHfit_loc.shape[1]
del trj
if tau_memory is not None and deltaT > 0.5*tau_memory:
print( "= = = ERROR: delta-t form the trajectory is too small relative to tau! %g vs. %g" % (deltaT, tau_memory), file=sys.stderr )
sys.exit(1)
# = = Update overall variables
if bFirst:
resXH = resXH_loc
deltaT = deltaT_loc ; nFrames = nFrames_loc ; nBonds = nBonds_loc
else:
if deltaT != deltaT_loc or nBonds != nBonds_loc or not np.equal(resXH, resXH_loc):
print( "= = = ERROR: Differences in trajectories have been detected! Aborting.", file=sys.stderr )
print( " ...delta-t: %g vs.%g " % (deltaT, deltaT_loc), file=sys.stderr )
print( " ...n-bonds: %g vs.%g " % (nBonds, nBonds_loc), file=sys.stderr )
print( " ...Residue-XORs: %s " % ( set(resXH)^set(resXH_loc) ), file=sys.stderr )
vecXHfit.append(vecXHfit_loc)
print( " ... XH-vector data added to memory, using 2x %.2f MB" % ( sys.getsizeof(vecXHfit_loc)/1024.0**2.0 ) )
# print( "= = = Loaded trajectory %s - Found %i XH-vectors %i frames." % ( in_flist[i], nBonds_loc, vecXH_loc.shape[0] ) )
del vecXHfit_loc
# = = =
print( "= = Loading finished." )
vecXHfit=np.array(vecXHfit)
if bPsutil:
# = = = Check vector size and currently free memory. Units are in bytes.
nFreeMem = 1.0*psutil.virtual_memory()[3]/1024.0**2
nVecMem = 1.0*sys.getsizeof(vecXHfit)/1024.0**2
if nFreeMem < 2*nVecMem:
print( " = = = WARNING: the size of vectors is getting close to the amount of free system memory!" )
print( " ... %.2f MB used by one vector vecXHfit." % nVecMem )
print( " ... %.2f MB free system memory." % nFreeMem )
else:
print( " = = = Memoryfootprint debug. vecXHfit uses %.2f MB versus %.2f MB free memory" % ( nVecMem, nFreeMem ) )
else:
print( "= = = psutil module has not been loaded. Cannot check for memory footprinting." )
if tau_memory != None:
print( "= = Reformatting all vecXHfit information into chunks of tau ( %g ) " % tau_memory )
vecXHfit = reformat_vecs_by_tau(vecXHfit, deltaT, tau_memory)
# Compress 4D down to 3D for the rest of the calculations to simplify matters.
sh = vecXHfit.shape
vecXHfit = vecXHfit.reshape( ( sh[0]*sh[1], sh[-2], sh[-1]) )
# = = = All sections below assume simple 3D arrays = = =
if bDoVecAverage:
# Note: gs-script normalises along the last axis, after this mean operation
vecXHfitavg = (gs.normalise_vector_array( np.mean(vecXHfit, axis=0) ))
gs.print_xylist(out_pref+'_avgvec.dat', resXH, np.array(vecXHfitavg).T, True)
del vecXHfitavg
if tau_memory != None:
print( "= = = Conducting S2 analysis using memory time to chop input-trajectories", tau_memory, "ps" )
S2 = calculate_S2_by_outerProduct(vecXHfit, deltaT, tau_memory)
else:
print( "= = = Conducting S2 analysis directly from trajectories." )
S2 = calculate_S2_by_outerProduct(vecXHfit)
gs.print_xylist(out_pref+'_S2.dat', resXH, (S2.T)*zeta, True )
print( " ...complete." )
time_stop=time.time()
#Report time
print( "= = Finished. Total seconds elapsed: %g" % (time_stop - time_start) )
sys.exit()
| [
"psutil.virtual_memory",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.einsum",
"mdtraj.load",
"numpy.mean",
"numpy.linalg.norm",
"numpy.exp",
"sys.getsizeof",
"general_scripts.print_xylist",
"numpy.std",
"numpy.equal",
"mdtraj.iterload",
"numpy.stack",
"sys.exit",
"numpy.concatenate"... | [((20925, 20935), 'sys.exit', 'sys.exit', ([], {}), '()\n', (20933, 20935), False, 'import sys, argparse, time\n'), ((8465, 8492), 'numpy.zeros', 'np.zeros', (['nFiles'], {'dtype': 'int'}), '(nFiles, dtype=int)\n', (8473, 8492), True, 'import numpy as np\n'), ((8510, 8537), 'numpy.zeros', 'np.zeros', (['nFiles'], {'dtype': 'int'}), '(nFiles, dtype=int)\n', (8518, 8537), True, 'import numpy as np\n'), ((8921, 9000), 'numpy.zeros', 'np.zeros', (['(nFramesTot, vecs[0].shape[1], vecs[0].shape[2])'], {'dtype': 'vecs[0].dtype'}), '((nFramesTot, vecs[0].shape[1], vecs[0].shape[2]), dtype=vecs[0].dtype)\n', (8929, 9000), True, 'import numpy as np\n'), ((10038, 10364), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Obtain the unit X-H vectors from one of more trajectories,and conduct calculations on it, such as S^2, C(t), and others analyses.N.B. Since we\'re following Palmer\'s formalisams by dividing data into chunks of length tau, """', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(description=\n "Obtain the unit X-H vectors from one of more trajectories,and conduct calculations on it, such as S^2, C(t), and others analyses.N.B. Since we\'re following Palmer\'s formalisams by dividing data into chunks of length tau, "\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (10061, 10364), False, 'import sys, argparse, time\n'), ((13277, 13288), 'time.time', 'time.time', ([], {}), '()\n', (13286, 13288), False, 'import sys, argparse, time\n'), ((18842, 18860), 'numpy.array', 'np.array', (['vecXHfit'], {}), '(vecXHfit)\n', (18850, 18860), True, 'import numpy as np\n'), ((20701, 20764), 'general_scripts.print_xylist', 'gs.print_xylist', (["(out_pref + '_S2.dat')", 'resXH', '(S2.T * zeta)', '(True)'], {}), "(out_pref + '_S2.dat', resXH, S2.T * zeta, True)\n", (20716, 20764), True, 'import general_scripts as gs\n'), ((20813, 20824), 'time.time', 'time.time', ([], {}), '()\n', (20822, 20824), False, 'import sys, argparse, time\n'), ((1675, 1686), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1683, 1686), False, 'import sys, argparse, time\n'), ((2018, 2029), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2026, 2029), False, 'import sys, argparse, time\n'), ((2717, 2728), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2725, 2728), False, 'import sys, argparse, time\n'), ((2944, 2955), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2952, 2955), False, 'import sys, argparse, time\n'), ((3133, 3166), 'numpy.take', 'np.take', (['traj.xyz', 'indexH'], {'axis': '(1)'}), '(traj.xyz, indexH, axis=1)\n', (3140, 3166), True, 'import numpy as np\n'), ((3169, 3202), 'numpy.take', 'np.take', (['traj.xyz', 'indexX'], {'axis': '(1)'}), '(traj.xyz, indexX, axis=1)\n', (3176, 3202), True, 'import numpy as np\n'), ((13351, 13362), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (13359, 13362), False, 'import sys, argparse, time\n'), ((14125, 14146), 'mdtraj.load', 'md.load', (['top_filename'], {}), '(top_filename)\n', (14132, 14146), True, 'import mdtraj as md\n'), ((3974, 3994), 'numpy.outer', 'np.outer', (['v[i]', 'v[i]'], {}), '(v[i], v[i])\n', (3982, 3994), True, 'import numpy as np\n'), ((4042, 4062), 'numpy.sum', 'np.sum', (['(outer ** 2.0)'], {}), '(outer ** 2.0)\n', (4048, 4062), True, 'import numpy as np\n'), ((5318, 5330), 'numpy.mean', 'np.mean', (['tmp'], {}), '(tmp)\n', (5325, 5330), True, 'import numpy as np\n'), ((5413, 5432), 'numpy.array', 'np.array', (['[S2, dS2]'], {}), '([S2, dS2])\n', (5421, 5432), True, 'import numpy as np\n'), ((6515, 6526), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6523, 6526), False, 'import sys, argparse, time\n'), ((14745, 14756), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14753, 14756), False, 'import sys, argparse, time\n'), ((15072, 15093), 'mdtraj.load', 'md.load', (['top_filename'], {}), '(top_filename)\n', (15079, 15093), True, 'import mdtraj as md\n'), ((15611, 15673), 'mdtraj.iterload', 'md.iterload', (['in_flist[i]'], {'chunk': 'nSplitFrames', 'top': 'top_filename'}), '(in_flist[i], chunk=nSplitFrames, top=top_filename)\n', (15622, 15673), True, 'import mdtraj as md\n'), ((16844, 16882), 'mdtraj.load', 'md.load', (['in_flist[i]'], {'top': 'top_filename'}), '(in_flist[i], top=top_filename)\n', (16851, 16882), True, 'import mdtraj as md\n'), ((17767, 17778), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17775, 17778), False, 'import sys, argparse, time\n'), ((20208, 20233), 'numpy.mean', 'np.mean', (['vecXHfit'], {'axis': '(0)'}), '(vecXHfit, axis=0)\n', (20215, 20233), True, 'import numpy as np\n'), ((3849, 3866), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (3863, 3866), True, 'import numpy as np\n'), ((4800, 4834), 'numpy.einsum', 'np.einsum', (['"""ij,ik->jk"""', 'vecs', 'vecs'], {}), "('ij,ik->jk', vecs, vecs)\n", (4809, 4834), True, 'import numpy as np\n'), ((5187, 5224), 'numpy.einsum', 'np.einsum', (['"""ijk,ijl->ikl"""', 'vecs', 'vecs'], {}), "('ijk,ijl->ikl', vecs, vecs)\n", (5196, 5224), True, 'import numpy as np\n'), ((5351, 5362), 'numpy.std', 'np.std', (['tmp'], {}), '(tmp)\n', (5357, 5362), True, 'import numpy as np\n'), ((6223, 6243), 'numpy.mean', 'np.mean', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (6230, 6243), True, 'import numpy as np\n'), ((6335, 6363), 'numpy.stack', 'np.stack', (['(S2, dS2)'], {'axis': '(-1)'}), '((S2, dS2), axis=-1)\n', (6343, 6363), True, 'import numpy as np\n'), ((9456, 9474), 'numpy.exp', 'np.exp', (['(-x / tau_c)'], {}), '(-x / tau_c)\n', (9462, 9474), True, 'import numpy as np\n'), ((19041, 19064), 'sys.getsizeof', 'sys.getsizeof', (['vecXHfit'], {}), '(vecXHfit)\n', (19054, 19064), False, 'import sys, argparse, time\n'), ((20292, 20313), 'numpy.array', 'np.array', (['vecXHfitavg'], {}), '(vecXHfitavg)\n', (20300, 20313), True, 'import numpy as np\n'), ((4868, 4898), 'numpy.einsum', 'np.einsum', (['"""ij,ij->"""', 'tmp', 'tmp'], {}), "('ij,ij->', tmp, tmp)\n", (4877, 4898), True, 'import numpy as np\n'), ((5265, 5298), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->i"""', 'tmp', 'tmp'], {}), "('ijk,ijk->i', tmp, tmp)\n", (5274, 5298), True, 'import numpy as np\n'), ((5369, 5385), 'numpy.sqrt', 'np.sqrt', (['nBlocks'], {}), '(nBlocks)\n', (5376, 5385), True, 'import numpy as np\n'), ((5672, 5709), 'numpy.einsum', 'np.einsum', (['"""ijk,ijl->jkl"""', 'vecs', 'vecs'], {}), "('ijk,ijl->jkl', vecs, vecs)\n", (5681, 5709), True, 'import numpy as np\n'), ((6082, 6122), 'numpy.einsum', 'np.einsum', (['"""ijkl,ijkm->iklm"""', 'vecs', 'vecs'], {}), "('ijkl,ijkm->iklm', vecs, vecs)\n", (6091, 6122), True, 'import numpy as np\n'), ((6264, 6283), 'numpy.std', 'np.std', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (6270, 6283), True, 'import numpy as np\n'), ((16305, 16351), 'numpy.concatenate', 'np.concatenate', (['(vecXHfit_loc, tempV2)'], {'axis': '(0)'}), '((vecXHfit_loc, tempV2), axis=0)\n', (16319, 16351), True, 'import numpy as np\n'), ((18027, 18053), 'numpy.equal', 'np.equal', (['resXH', 'resXH_loc'], {}), '(resXH, resXH_loc)\n', (18035, 18053), True, 'import numpy as np\n'), ((18582, 18609), 'sys.getsizeof', 'sys.getsizeof', (['vecXHfit_loc'], {}), '(vecXHfit_loc)\n', (18595, 18609), False, 'import sys, argparse, time\n'), ((18982, 19005), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (19003, 19005), False, 'import psutil\n'), ((3765, 3793), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': 'axis'}), '(v, axis=axis)\n', (3779, 3793), True, 'import numpy as np\n'), ((5743, 5782), 'numpy.einsum', 'np.einsum', (['"""...ij,...ij->..."""', 'tmp', 'tmp'], {}), "('...ij,...ij->...', tmp, tmp)\n", (5752, 5782), True, 'import numpy as np\n'), ((6163, 6202), 'numpy.einsum', 'np.einsum', (['"""...ij,...ij->..."""', 'tmp', 'tmp'], {}), "('...ij,...ij->...', tmp, tmp)\n", (6172, 6202), True, 'import numpy as np\n'), ((6291, 6307), 'numpy.sqrt', 'np.sqrt', (['nBlocks'], {}), '(nBlocks)\n', (6298, 6307), True, 'import numpy as np\n')] |
from collections import defaultdict
import numpy as np
import io
from PDFStructureObjects import XrefEntry, XRefTable
# https://www.w3.org/TR/PNG/#9Filters
def png_algorithmPipeline(decoded_stream: bytes, number_of_columns: int, algorithm_id: int) -> bytes:
"""
Takes a png image to decompress its contents
:param decoded_stream: png as bytes
:param number_of_columns: number of columns in the image
:param algorithm_id: algorithm to be applied
:return: png decoded as bytes
"""
if algorithm_id == 10: # png was decoded with the none filter nothing to be done
return decoded_stream
# convert pdf png algorithm number to standard
algorithm_id = algorithm_id - 10 if algorithm_id > 10 else algorithm_id
filter_algorithms = {1: reverse_subFilter,
2: reverse_upFilter,
3: reverse_averageFilter,
4: reverse_paethFilter}
reshaped_stream = reshape_toMatrix(decoded_stream, number_of_columns)
applied_alg_steam = filter_algorithms[algorithm_id](reshaped_stream)
return matrix_toBytes(applied_alg_steam)
def reshape_toMatrix(data: bytes, number_of_columns) -> np.array:
"""
Reshapes bytes to a matrix
:param data: bytes to be reshaped
:param number_of_columns:
:return: A matrix representing the image
"""
big_endian = np.dtype(np.ubyte).newbyteorder(">")
value = np.frombuffer(data, big_endian)
value = np.reshape(value, (len(value) // (number_of_columns + 1), number_of_columns + 1))
return value
def reverse_subFilter(png_array: np.array) -> np.array:
raise NotImplemented("reverse sub filter yet to be implemented")
def reverse_averageFilter(png_array: np.array) -> np.array:
raise NotImplemented("reverse average filter yet to be implemented")
def reverse_paethFilter(png_array: np.array) -> np.array:
raise NotImplemented("reverse paeth filter yet to be implemented")
def reverse_upFilter(png_array: np.array) -> np.array:
"""
Reverses the png upFilter
:param png_array: A matrix representing the png image
"""
png_array = np.delete(png_array, 0, 1) # Remove the column that specifies the algorithm number
for i in range(1, len(png_array)):
png_array[i] = png_array[i] + png_array[i - 1]
png_array[i] = png_array[i] & 0xff
return png_array
def decode_XRef(png_matrix: np.array, W):
# png_matrix = png_matrix.tobytes()
decompressed_trailer = io.BytesIO(png_matrix)
size = decompressed_trailer.getbuffer().nbytes
compressed_objects = defaultdict(list)
ExtractedXRef = XRefTable([], True)
while decompressed_trailer.tell() != size:
field_1 = int.from_bytes(decompressed_trailer.read(W[0]), "big")
field_2 = int.from_bytes(decompressed_trailer.read(W[1]), "big")
field_3 = int.from_bytes(decompressed_trailer.read(W[2]), "big")
if field_1 == 0:
ExtractedXRef.table.append(XrefEntry(field_2, field_3, "f"))
elif field_1 == 1:
ExtractedXRef.table.append(XrefEntry(field_2, field_3, "n"))
elif field_1 == 2:
compressed_objects[field_2].append(field_3)
else:
raise AssertionError()
print(ExtractedXRef)
print(compressed_objects)
def matrix_toBytes(matrix: np.array) -> bytes:
"""
Converts a matrix to variable sized bytes
:param matrix: png matrix
:return: variable sized bytes representation of the numbers in the matrix
"""
encoded_matrix = matrix.tobytes()
return encoded_matrix
if __name__ == '__main__':
xref = b'\x02\x01\x00\x10\x00\x02\x00\x03\xd6\x00\x02\x00\x01\xad\x00\x02\x00\x01Y\x00\x02\x00\x05^\x00\x02\x00\x06\xe7\x00\x02\x00\x03\xf7\x00\x02\x00\x01$\x00\x02\x00\n[\x00\x02\x00\x03\xa2\x00\x02\x00\x01/\x00\x02\x00}\xa1\x00\x02\x00a[\x00\x02\x01%\t\x00\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\x00\x00\x00\x01\x02\xff\xdci\xf6'
xref_encoded = reshape_toMatrix(xref, 4)
encoding_free = reverse_upFilter(xref_encoded)
print(encoding_free)
bts = matrix_toBytes(encoding_free)
print(decode_XRef(bts, [1, 2, 1]))
print()
| [
"io.BytesIO",
"PDFStructureObjects.XRefTable",
"PDFStructureObjects.XrefEntry",
"numpy.frombuffer",
"numpy.dtype",
"collections.defaultdict",
"numpy.delete"
] | [((1437, 1468), 'numpy.frombuffer', 'np.frombuffer', (['data', 'big_endian'], {}), '(data, big_endian)\n', (1450, 1468), True, 'import numpy as np\n'), ((2153, 2179), 'numpy.delete', 'np.delete', (['png_array', '(0)', '(1)'], {}), '(png_array, 0, 1)\n', (2162, 2179), True, 'import numpy as np\n'), ((2508, 2530), 'io.BytesIO', 'io.BytesIO', (['png_matrix'], {}), '(png_matrix)\n', (2518, 2530), False, 'import io\n'), ((2607, 2624), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2618, 2624), False, 'from collections import defaultdict\n'), ((2645, 2664), 'PDFStructureObjects.XRefTable', 'XRefTable', (['[]', '(True)'], {}), '([], True)\n', (2654, 2664), False, 'from PDFStructureObjects import XrefEntry, XRefTable\n'), ((1388, 1406), 'numpy.dtype', 'np.dtype', (['np.ubyte'], {}), '(np.ubyte)\n', (1396, 1406), True, 'import numpy as np\n'), ((2996, 3028), 'PDFStructureObjects.XrefEntry', 'XrefEntry', (['field_2', 'field_3', '"""f"""'], {}), "(field_2, field_3, 'f')\n", (3005, 3028), False, 'from PDFStructureObjects import XrefEntry, XRefTable\n'), ((3096, 3128), 'PDFStructureObjects.XrefEntry', 'XrefEntry', (['field_2', 'field_3', '"""n"""'], {}), "(field_2, field_3, 'n')\n", (3105, 3128), False, 'from PDFStructureObjects import XrefEntry, XRefTable\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
import codecs
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from nparser.misc.colors import ctext, color_pattern
from nparser.neural.models.nn import NN
#***************************************************************
class BaseXTagger(NN):
""" """
PAD = 0
ROOT = 1
#=============================================================
def __call__(self, vocabs, moving_params=None):
""" """
self.moving_params = moving_params
if isinstance(vocabs, dict):
self.vocabs = vocabs
else:
self.vocabs = {vocab.name: vocab for vocab in vocabs}
input_vocabs = [self.vocabs[name] for name in self.input_vocabs]
embed = self.embed_concat(input_vocabs)
for vocab in list(self.vocabs.values()):
if vocab not in input_vocabs:
vocab.generate_placeholder()
placeholder = self.vocabs['words'].placeholder
if len(placeholder.get_shape().as_list()) == 3:
placeholder = placeholder[:,:,0]
self._tokens_to_keep = tf.to_float(tf.greater(placeholder, self.ROOT))
self._batch_size = tf.shape(placeholder)[0]
self._bucket_size = tf.shape(placeholder)[1]
self._sequence_lengths = tf.reduce_sum(tf.to_int32(tf.greater(placeholder, self.PAD)), axis=1)
self._n_tokens = tf.to_int32(tf.reduce_sum(self.tokens_to_keep))
top_recur = embed
for i in range(self.n_layers):
with tf.variable_scope('RNN%d' % i):
top_recur, _ = self.RNN(top_recur, self.recur_size)
return top_recur
#=============================================================
def process_accumulators(self, accumulators, time=None):
""" """
n_tokens, n_seqs, loss, corr, xcorr, seq_corr = accumulators
acc_dict = {
'Loss': loss,
'TS': corr/n_tokens*100,
'XTS': xcorr/n_tokens*100,
'SS': seq_corr/n_seqs*100,
}
if time is not None:
acc_dict.update({
'Token_rate': n_tokens / time,
'Seq_rate': n_seqs / time,
})
return acc_dict
#=============================================================
def update_history(self, history, accumulators):
""" """
acc_dict = self.process_accumulators(accumulators)
for key, value in acc_dict.items():
history[key].append(value)
return history['TS'][-1]
#=============================================================
def print_accuracy(self, accumulators, time, prefix='Train'):
""" """
acc_dict = self.process_accumulators(accumulators, time=time)
strings = []
strings.append(color_pattern('Loss:', '{Loss:7.3f}', 'bright_red'))
strings.append(color_pattern('TS:', '{TS:5.2f}%', 'bright_cyan'))
strings.append(color_pattern('XTS:', '{XTS:5.2f}%', 'bright_cyan'))
strings.append(color_pattern('SS:', '{SS:5.2f}%', 'bright_green'))
strings.append(color_pattern('Speed:', '{Seq_rate:6.1f} seqs/sec', 'bright_magenta'))
string = ctext('{0} ', 'bold') + ' | '.join(strings)
print(string.format(prefix, **acc_dict),file=sys.stderr)
return
#=============================================================
def plot(self, history, prefix='Train'):
""" """
pass
#=============================================================
def check(self, preds, sents, fileobj):
""" """
for tokens, preds, xpreds in zip(sents, preds[0], preds[1]):
for token, pred, xpred in zip(list(zip(*tokens)), preds, xpreds):
tag = self.vocabs['tags'][pred]
xtag = self.vocabs['xtags'][xpred]
fileobj.write('\t'.join(token+(tag, xtag))+'\n')
fileobj.write('\n')
return
#=============================================================
def write_probs(self, sents, output_file, probs, inv_idxs, metadata):
"""
`output_file` can be a string or an open file (latter will be flushed but not closed)
"""
# Turns list of tuples of tensors into list of matrices
tag_probs = [tag_prob for batch in probs for tag_prob in batch[0]]
xtag_probs = [xtag_prob for batch in probs for xtag_prob in batch[1]]
tokens_to_keep = [weight for batch in probs for weight in batch[2]]
tokens = [sent for batch in sents for sent in batch]
close_out=False
if isinstance(output_file,str):
f=codecs.open(output_file, 'w', encoding='utf-8', errors='ignore')
close_out=True
else:
f=output_file
for meta_idx,i in enumerate(inv_idxs):
sent, tag_prob, xtag_prob, weights = tokens[i], tag_probs[i], xtag_probs[i], tokens_to_keep[i]
sent = list(zip(*sent))
xtag_prob[:,self.vocabs['xtags']["UNK"]]=0.0 ## this masks UNK class probability and prevents Xtagger to produce unknown output (if input has min_occur_count set to 2 the tagger learns to predict unknown...)
tag_preds = np.argmax(tag_prob, axis=1)
xtag_preds = np.argmax(xtag_prob, axis=1)
sent_meta=metadata[meta_idx]
if sent_meta["comments"]:
if "Parserv2dummysentenceJHYSTGSH" in sent_meta["comments"][0]:
continue
f.write("\n".join(sent_meta["comments"]))
f.write("\n")
for tok_idx,(token, tag_pred, xtag_pred, weight) in enumerate(zip(sent, tag_preds[1:], xtag_preds[1:], weights[1:])):
for b,e,form in sent_meta["multiwordtokens"]:
if tok_idx+1==b: #there goes a multiword right here!
f.write("{}-{}\t{}".format(b,e,form))
f.write("\t_"*8)
f.write("\n")
token = list(token)
token.insert(5, sent_meta["feats"][tok_idx])
token.append('_')
token.append(sent_meta["miscfield"][tok_idx])
token[3] = self.vocabs['tags'][tag_pred]
token[4] = self.vocabs['xtags'][xtag_pred]
f.write('\t'.join(token)+'\n')
if sent: #WHY DO I NEED THIS? CAN THERE BE AN EMPTY SENTENCE?
f.write('\n')
f.flush()
if close_out:
f.close()
return
#=============================================================
@property
def train_keys(self):
return ('n_tokens', 'n_seqs', 'loss', 'n_tag_correct', 'n_xtag_correct', 'n_seqs_correct')
#=============================================================
@property
def valid_keys(self):
return ('tag_preds', 'xtag_preds')
#=============================================================
@property
def parse_keys(self):
return ('tag_probs', 'xtag_probs', 'tokens_to_keep')
| [
"tensorflow.reduce_sum",
"codecs.open",
"numpy.argmax",
"tensorflow.variable_scope",
"tensorflow.shape",
"nparser.misc.colors.color_pattern",
"tensorflow.greater",
"nparser.misc.colors.ctext"
] | [((1673, 1707), 'tensorflow.greater', 'tf.greater', (['placeholder', 'self.ROOT'], {}), '(placeholder, self.ROOT)\n', (1683, 1707), True, 'import tensorflow as tf\n'), ((1732, 1753), 'tensorflow.shape', 'tf.shape', (['placeholder'], {}), '(placeholder)\n', (1740, 1753), True, 'import tensorflow as tf\n'), ((1781, 1802), 'tensorflow.shape', 'tf.shape', (['placeholder'], {}), '(placeholder)\n', (1789, 1802), True, 'import tensorflow as tf\n'), ((1938, 1972), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.tokens_to_keep'], {}), '(self.tokens_to_keep)\n', (1951, 1972), True, 'import tensorflow as tf\n'), ((3205, 3256), 'nparser.misc.colors.color_pattern', 'color_pattern', (['"""Loss:"""', '"""{Loss:7.3f}"""', '"""bright_red"""'], {}), "('Loss:', '{Loss:7.3f}', 'bright_red')\n", (3218, 3256), False, 'from nparser.misc.colors import ctext, color_pattern\n'), ((3277, 3326), 'nparser.misc.colors.color_pattern', 'color_pattern', (['"""TS:"""', '"""{TS:5.2f}%"""', '"""bright_cyan"""'], {}), "('TS:', '{TS:5.2f}%', 'bright_cyan')\n", (3290, 3326), False, 'from nparser.misc.colors import ctext, color_pattern\n'), ((3347, 3398), 'nparser.misc.colors.color_pattern', 'color_pattern', (['"""XTS:"""', '"""{XTS:5.2f}%"""', '"""bright_cyan"""'], {}), "('XTS:', '{XTS:5.2f}%', 'bright_cyan')\n", (3360, 3398), False, 'from nparser.misc.colors import ctext, color_pattern\n'), ((3419, 3469), 'nparser.misc.colors.color_pattern', 'color_pattern', (['"""SS:"""', '"""{SS:5.2f}%"""', '"""bright_green"""'], {}), "('SS:', '{SS:5.2f}%', 'bright_green')\n", (3432, 3469), False, 'from nparser.misc.colors import ctext, color_pattern\n'), ((3490, 3559), 'nparser.misc.colors.color_pattern', 'color_pattern', (['"""Speed:"""', '"""{Seq_rate:6.1f} seqs/sec"""', '"""bright_magenta"""'], {}), "('Speed:', '{Seq_rate:6.1f} seqs/sec', 'bright_magenta')\n", (3503, 3559), False, 'from nparser.misc.colors import ctext, color_pattern\n'), ((3574, 3596), 'nparser.misc.colors.ctext', 'ctext', (['"""{0} """', '"""bold"""'], {}), "('{0} ', 'bold')\n", (3579, 3596), False, 'from nparser.misc.colors import ctext, color_pattern\n'), ((4913, 4977), 'codecs.open', 'codecs.open', (['output_file', '"""w"""'], {'encoding': '"""utf-8"""', 'errors': '"""ignore"""'}), "(output_file, 'w', encoding='utf-8', errors='ignore')\n", (4924, 4977), False, 'import codecs\n'), ((5435, 5462), 'numpy.argmax', 'np.argmax', (['tag_prob'], {'axis': '(1)'}), '(tag_prob, axis=1)\n', (5444, 5462), True, 'import numpy as np\n'), ((5482, 5510), 'numpy.argmax', 'np.argmax', (['xtag_prob'], {'axis': '(1)'}), '(xtag_prob, axis=1)\n', (5491, 5510), True, 'import numpy as np\n'), ((1861, 1894), 'tensorflow.greater', 'tf.greater', (['placeholder', 'self.PAD'], {}), '(placeholder, self.PAD)\n', (1871, 1894), True, 'import tensorflow as tf\n'), ((2047, 2077), 'tensorflow.variable_scope', 'tf.variable_scope', (["('RNN%d' % i)"], {}), "('RNN%d' % i)\n", (2064, 2077), True, 'import tensorflow as tf\n')] |
from builtins import zip
import numpy as np
from lsst.sims.utils import m5_flat_sed
from lsst.sims.photUtils import LSSTdefaults
def restore_files():
roots = ['61390_61603', '61573_61786', '61756_61969']
dicts = []
sbs = []
for root in roots:
restore_file = 'healpix/'+root+'.npz'
disk_data = np.load(restore_file)
required_mjds = disk_data['header'][()]['required_mjds'].copy()
dict_of_lists = disk_data['dict_of_lists'][()].copy()
disk_data.close()
sky_brightness = np.load('healpix/'+root+'.npy')
# find the indices of all the evenly spaced mjd values
even_mjd_indx = np.in1d(dict_of_lists['mjds'], required_mjds)
for key in dict_of_lists:
dict_of_lists[key] = dict_of_lists[key][even_mjd_indx]
sky_brightness = sky_brightness[even_mjd_indx]
dicts.append(dict_of_lists)
sbs.append(sky_brightness)
sky_brightness = sbs[0]
dict_of_lists = dicts[0]
try:
for i in range(1, len(dicts)):
new_mjds = np.where(dicts[i]['mjds'] > dict_of_lists['mjds'].max())[0]
for key in dict_of_lists:
if isinstance(dicts[i][key][new_mjds], list):
dict_of_lists[key].extend(dicts[i][key][new_mjds])
else:
dict_of_lists[key] = np.concatenate((dict_of_lists[key], dicts[i][key][new_mjds]))
sky_brightness = np.concatenate((sky_brightness, sbs[i][new_mjds]))
except:
import pdb ; pdb.set_trace()
return sky_brightness, dict_of_lists
def generate_percentiles(nbins=20):
"""
Make histograms of the 5-sigma limiting depths for each point and each filter.
"""
filters = ['u', 'g', 'r', 'i', 'z', 'y']
sky_brightness, dict_of_lists = restore_files()
npix = sky_brightness['r'].shape[-1]
histograms = np.zeros((nbins, npix), dtype=list(zip(filters, [float]*6)))
histogram_npts = np.zeros(npix, dtype=list(zip(filters, [int]*6)))
for filtername in filters:
# convert surface brightness to m5
FWHMeff = LSSTdefaults().FWHMeff(filtername)
# increase as a function of airmass
airmass_correction = np.power(dict_of_lists['airmass'], 0.6)
FWHMeff *= airmass_correction
m5_arr = m5_flat_sed(filtername, sky_brightness[filtername], FWHMeff, 30.,
dict_of_lists['airmass'])
for indx in np.arange(npix):
m5s = m5_arr[:, indx]
m5s = m5s[np.isfinite(m5s)]
m5s = np.sort(m5s)
percentile_points = np.round(np.linspace(0, m5s.size-1, nbins))
if m5s.size > percentile_points.size:
histograms[filtername][:, indx] = m5s[percentile_points.astype(int)]
histogram_npts[filtername][indx] = m5s.size
# make the histogram for this point in the sky
# histograms[filtername][:, indx] += np.histogram(m5s[np.isfinite(m5s)],
# bins=bins[filtername])[0]
np.savez('percentile_m5_maps.npz', histograms=histograms, histogram_npts=histogram_npts)
if __name__ == '__main__':
# make a giant 2-year file
#mjd0 = 59853
#test_length = 365.25 # days
#generate_sky(mjd0=mjd0, mjd_max=mjd0+test_length, outpath='', outfile='big_temp_sky.npz')
generate_percentiles()
| [
"numpy.load",
"lsst.sims.photUtils.LSSTdefaults",
"numpy.power",
"numpy.isfinite",
"numpy.sort",
"numpy.arange",
"pdb.set_trace",
"builtins.zip",
"numpy.linspace",
"numpy.savez",
"lsst.sims.utils.m5_flat_sed",
"numpy.concatenate",
"numpy.in1d"
] | [((3085, 3178), 'numpy.savez', 'np.savez', (['"""percentile_m5_maps.npz"""'], {'histograms': 'histograms', 'histogram_npts': 'histogram_npts'}), "('percentile_m5_maps.npz', histograms=histograms, histogram_npts=\n histogram_npts)\n", (3093, 3178), True, 'import numpy as np\n'), ((327, 348), 'numpy.load', 'np.load', (['restore_file'], {}), '(restore_file)\n', (334, 348), True, 'import numpy as np\n'), ((534, 569), 'numpy.load', 'np.load', (["('healpix/' + root + '.npy')"], {}), "('healpix/' + root + '.npy')\n", (541, 569), True, 'import numpy as np\n'), ((653, 698), 'numpy.in1d', 'np.in1d', (["dict_of_lists['mjds']", 'required_mjds'], {}), "(dict_of_lists['mjds'], required_mjds)\n", (660, 698), True, 'import numpy as np\n'), ((2219, 2258), 'numpy.power', 'np.power', (["dict_of_lists['airmass']", '(0.6)'], {}), "(dict_of_lists['airmass'], 0.6)\n", (2227, 2258), True, 'import numpy as np\n'), ((2314, 2410), 'lsst.sims.utils.m5_flat_sed', 'm5_flat_sed', (['filtername', 'sky_brightness[filtername]', 'FWHMeff', '(30.0)', "dict_of_lists['airmass']"], {}), "(filtername, sky_brightness[filtername], FWHMeff, 30.0,\n dict_of_lists['airmass'])\n", (2325, 2410), False, 'from lsst.sims.utils import m5_flat_sed\n'), ((2456, 2471), 'numpy.arange', 'np.arange', (['npix'], {}), '(npix)\n', (2465, 2471), True, 'import numpy as np\n'), ((1442, 1492), 'numpy.concatenate', 'np.concatenate', (['(sky_brightness, sbs[i][new_mjds])'], {}), '((sky_brightness, sbs[i][new_mjds]))\n', (1456, 1492), True, 'import numpy as np\n'), ((1526, 1541), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1539, 1541), False, 'import pdb\n'), ((2565, 2577), 'numpy.sort', 'np.sort', (['m5s'], {}), '(m5s)\n', (2572, 2577), True, 'import numpy as np\n'), ((1916, 1941), 'builtins.zip', 'zip', (['filters', '([float] * 6)'], {}), '(filters, [float] * 6)\n', (1919, 1941), False, 'from builtins import zip\n'), ((1989, 2012), 'builtins.zip', 'zip', (['filters', '([int] * 6)'], {}), '(filters, [int] * 6)\n', (1992, 2012), False, 'from builtins import zip\n'), ((2111, 2125), 'lsst.sims.photUtils.LSSTdefaults', 'LSSTdefaults', ([], {}), '()\n', (2123, 2125), False, 'from lsst.sims.photUtils import LSSTdefaults\n'), ((2529, 2545), 'numpy.isfinite', 'np.isfinite', (['m5s'], {}), '(m5s)\n', (2540, 2545), True, 'import numpy as np\n'), ((2619, 2654), 'numpy.linspace', 'np.linspace', (['(0)', '(m5s.size - 1)', 'nbins'], {}), '(0, m5s.size - 1, nbins)\n', (2630, 2654), True, 'import numpy as np\n'), ((1351, 1412), 'numpy.concatenate', 'np.concatenate', (['(dict_of_lists[key], dicts[i][key][new_mjds])'], {}), '((dict_of_lists[key], dicts[i][key][new_mjds]))\n', (1365, 1412), True, 'import numpy as np\n')] |
# Written by <NAME>
import PGM_h
from numpy.linalg import norm
from scipy import optimize as optimize
from math import exp
from math import log
k_features = 6
landa_init = [0.30303594, 0.30279514, 0.25452369, 0.30318327, 0.28723748, 0.27421734] # [1, 1, 1, 1, 1, 1]
# landa for 1000 train query [ 0.21909945 0.21877479 0.21689562 0.21909932 0.21211461 0.21429248] 90sec
# landa for 6000 train query [ 0.30303594 0.30279514 0.25452369 0.30318327 0.28723748 0.27421734] 310sec (5k3m)
# landa for 6000 train query [ 0.2295376 0.22583204 0.19447061 0.224045 0.23101649 0.19441793] 260sec 64k10m
# ----------------------------------------Read from Learning Model File Probabilities
with open('2gram64k10m.arpa', 'r') as f:
d = {}
l = f.read().split('\n') # Split using end line
for ieach_char in l:
values = ieach_char.split('\t') # Split using 'tab'
d[values[1]] = values[0]
lang_model_ = d
f.close()
with open('1gram_train64k.txt', 'r') as f:
freq = {}
l = f.read().split('\n')
for count_1g in l:
values = count_1g.split('\t')
freq[values[1]] = values[0]
lang_model_1g = freq
f.close()
with open('temp_train.txt', 'r') as f:
listC = []
xi = []
yi = []
oi = []
l = f.read().split('\n')
for count in l:
values = count.split(',') # Split using ','
xi.append(values[0])
yi.append(values[1])
oi.append(values[2])
listC.append(values)
f.close()
Train_Data = listC
# ----------------------------------------f Function
def f(vay_pre, vay, language_model):
vay_pre = vay_pre.lower()
vay = vay.lower()
yi_pre_and_yi = vay_pre + ' ' + vay
if yi_pre_and_yi in language_model.keys():
ix = language_model[yi_pre_and_yi]
pro = float(ix)
return pro
else:
return -99
# ----------------------------------------
def make_list(operation, x_i, length_of_check):
temp_list = []
list_of_char = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z']
if operation == "nothing":
temp_list.append(x_i)
return temp_list
elif operation == "del":
for k in range(len(x_i)):
for j in range(1, length_of_check + 1):
first_part = x_i[:k] #
second_part = x_i[k + j:] #
temp_string = first_part + second_part
temp_list.append(temp_string)
return temp_list
elif operation == "ins":
for k in range(len(x_i)):
for j in list_of_char:
first_part = x_i[:k]
second_part = x_i[k:]
temp_string = first_part + j + second_part
temp_list.append(temp_string)
return temp_list
elif operation == "subs":
for k in range(len(x_i)):
for j in list_of_char:
if j != x_i[k]:
first_part = x_i[:k]
second_part = x_i[k + 1:]
temp_string = first_part + j + second_part
temp_list.append(temp_string)
return temp_list
elif operation == "trans":
for k in range(len(x_i) - 1):
temp1 = x_i[k]
first_part = x_i[:k]
for j in range(k + 1, len(x_i)):
temp2 = x_i[j]
second_part = x_i[k + 1:j]
third_part = x_i[j + 1:]
temp_string = first_part + temp2 + second_part + temp1 + third_part
temp_list.append(temp_string)
return temp_list
elif operation == "merge" and x_i.find(" ") != -1:
x_i = x_i.replace(" ", "-")
temp_list.append(x_i)
return temp_list
elif operation == "split":
for k in range(1, len(x_i)):
first_part = x_i[:k]
second_part = x_i[k:]
temp_string = first_part + "-" + second_part
temp_list.append(temp_string)
return temp_list
return ""
# ------------------------------------------------------------------------------------------
def pr_function(landa_tmp):
pri = 0
file_length = len(Train_Data)
for i in range(file_length):
yi_pre = "<s>"
x = xi[i].split(" ")
y = yi[i].split(" ")
o = oi[i].strip('\n')
o = o.split(" ")
tmp_pri = pro_yox(x, yi_pre, y, o, landa_tmp, lang_model_, k_features, 1)
pri += tmp_pri
pri = norm(landa_tmp) ** 2 * 2 - log(pri, 10)
return pri
def pre_fi_f(vay_pre, vay, language_model, landa_tmp, features):
result = 0
for i in range(features):
result += landa_tmp[i] * f(vay_pre, vay, language_model)
return result
def pre_fi_h(vay, ou, ix, length_of_check, landa_tmp, features):
result = 0
for i in range(features):
result += landa_tmp[i] * PGM_h.h(vay, ou, ix, length_of_check, i)
return result
def pro_yox(x, yi_pre, y, o, landa_tmp, language_model, features, idx):
result = 0
z = 1 / (1 ** 1)
if idx == 0:
xij = x
yij = y
oij = o
if oij == "split":
yij = yij.replace("-", " ")
yij_temp = yij.split(" ")
yi_pre = yij_temp[0]
yij = yij_temp[1]
elif oij == "merge":
xij = xij.replace("-", " ")
lfk = pre_fi_f(yi_pre, yij, language_model, landa_tmp, features)
lhk = pre_fi_h(yij, oij, xij, 1, landa_tmp, features)
pro_tmp = lfk + lhk
result += pro_tmp
else:
length = len(o)
for ocount in range(length):
xij = x[ocount]
yij = y[ocount]
oij = o[ocount]
if oij == "split":
yij = yij.replace("-", " ")
yij_temp = yij.split(" ")
yi_pre = yij_temp[0]
yij = yij_temp[1]
elif oij == "merge":
xij = xij.replace("-", " ")
lfk = pre_fi_f(yi_pre, yij, language_model, landa_tmp, features)
lhk = pre_fi_h(yij, oij, xij, 1, landa_tmp, features)
pro_tmp = lfk + lhk
result += pro_tmp
yi_pre = y[ocount]
if result > 500:
result = 500
result = exp(result)
result /= z
return result
# ------------------------------------------------------------------------------------------
def test_model(x, landa_tmp):
o1_list = ["nothing", "del", "subs", "ins", "trans"]
count_x = x.count(" ") + 1
x = x.split(" ")
best_y1 = []
best_y2 = []
best_y3 = []
best_y = []
bfirst_oi = []
bsecond_oi = []
bthird_oi = []
bfourth_oi = []
grades = []
yi_pre = "<s>"
in_merge = 0
for i in range(count_x):
if in_merge:
in_merge = 0
continue
xip1 = '<\s>'
if i != count_x - 1:
xip1 = x[i + 1]
in_phrase = 0
in_split = 0
x_i = x[i]
temp = ['nothing', x_i, in_split, in_merge, -99, -99, -99]
temp1 = ['nothing', x_i, in_split, in_merge, -99, -99, -99]
temp2 = ['nothing', x_i, in_split, in_merge, -99, -99, -99]
temp3 = ['nothing', x_i, in_split, in_merge, -99, -99, -99]
temp4 = ['nothing', x_i, in_split, in_merge, -99, -99, -99]
temp5 = ['nothing', x_i, in_split, in_merge, -99, -99, -99]
temp = f_err_correction(o1_list, yi_pre, temp, landa_tmp)
first_ois = [temp[0]]
first_yis = [temp[1]]
temp = f_split(temp, landa_tmp)
second_ois = [temp[0]]
second_yis = [temp[1]]
temp = f_merge(xip1, temp, landa_tmp)
third_ois = [temp[0]]
third_yis = [temp[1]]
in_spliting = [temp[2]]
in_merging = [temp[3]]
best_g = [temp[4] + temp[5] + temp[6]]
temp = f_split(temp1, landa_tmp)
first_ois.append(temp[0])
first_yis.append(temp[1])
temp = f_merge(xip1, temp, landa_tmp)
second_ois.append(temp[0])
second_yis.append(temp[1])
temp = f_err_correction(o1_list, yi_pre, temp, landa_tmp)
third_ois.append(temp[0])
third_yis.append(temp[1])
in_spliting.append(temp[2])
in_merging.append(temp[3])
best_g.append(temp[4] + temp[5] + temp[6])
temp = f_err_correction(o1_list, yi_pre, temp2, landa_tmp)
first_ois.append(temp[0])
first_yis.append(temp[1])
temp = f_merge(xip1, temp, landa_tmp)
second_ois.append(temp[0])
second_yis.append(temp[1])
temp = f_split(temp, landa_tmp)
third_ois.append(temp[0])
third_yis.append(temp[1])
in_spliting.append(temp[2])
in_merging.append(temp[3])
best_g.append(temp[4] + temp[5] + temp[6])
temp = f_merge(xip1, temp3, landa_tmp)
first_ois.append(temp[0])
first_yis.append(temp[1])
temp = f_split(temp, landa_tmp)
second_ois.append(temp[0])
second_yis.append(temp[1])
temp = f_err_correction(o1_list, yi_pre, temp, landa_tmp)
third_ois.append(temp[0])
third_yis.append(temp[1])
in_spliting.append(temp[2])
in_merging.append(temp[3])
best_g.append(temp[4] + temp[5] + temp[6])
temp = f_merge(xip1, temp4, landa_tmp)
first_ois.append(temp[0])
first_yis.append(temp[1])
temp = f_err_correction(o1_list, yi_pre, temp, landa_tmp)
second_ois.append(temp[0])
second_yis.append(temp[1])
temp = f_split(temp, landa_tmp)
third_ois.append(temp[0])
third_yis.append(temp[1])
in_spliting.append(temp[2])
in_merging.append(temp[3])
best_g.append(temp[4] + temp[5] + temp[6])
temp = f_split(temp5, landa_tmp)
first_ois.append(temp[0])
first_yis.append(temp[1])
temp = f_err_correction(o1_list, yi_pre, temp, landa_tmp)
second_ois.append(temp[0])
second_yis.append(temp[1])
temp = f_merge(xip1, temp, landa_tmp)
third_ois.append(temp[0])
third_yis.append(temp[1])
in_spliting.append(temp[2])
in_merging.append(temp[3])
best_g.append(temp[4] + temp[5] + temp[6])
bg = 0
if best_g[3] >= best_g[1] and best_g[3] >= best_g[2] and best_g[3] >= best_g[0] and best_g[3] >= best_g[4] and \
best_g[3] >= best_g[5]:
bg = 3
elif best_g[1] >= best_g[0] and best_g[1] >= best_g[2] and best_g[1] >= best_g[3] and best_g[1] >= best_g[4] and \
best_g[1] >= best_g[5]:
bg = 1
elif best_g[2] >= best_g[1] and best_g[2] >= best_g[0] and best_g[2] >= best_g[3] and best_g[2] >= best_g[4] and \
best_g[2] >= best_g[5]:
bg = 2
elif best_g[4] >= best_g[0] and best_g[4] >= best_g[2] and best_g[4] >= best_g[3] and best_g[4] >= best_g[1] and \
best_g[4] >= best_g[5]:
bg = 4
elif best_g[5] >= best_g[1] and best_g[5] >= best_g[0] and best_g[5] >= best_g[3] and best_g[5] >= best_g[2] and \
best_g[5] >= best_g[4]:
bg = 5
first_oi = first_ois[bg]
first_yi = first_yis[bg]
second_oi = second_ois[bg]
second_yi = second_yis[bg]
third_oi = third_ois[bg]
third_yi = third_yis[bg]
in_split = in_spliting[bg]
in_merge = in_merging[bg]
x_i = third_yi
fourth_yi = x_i
fourth_oi = 'nothing'
if i != count_x - 1:
if in_split == 1:
merge_temp = x_i.split(' ')
x_i = merge_temp[1]
x_ip = x_i + " " + x[i + 1]
x_ip = x_ip.capitalize()
if PGM_h.h("", "phrase", x_ip, 1, 5) == 0:
fourth_oi = "phrase"
fourth_yi = x_ip
in_merge = 1
in_phrase = 1
x_i = fourth_yi
if in_phrase == 1:
fourth_yi = "\"" + fourth_yi + "\""
best_y1.append(first_yi)
best_y2.append(second_yi)
best_y3.append(third_yi)
best_y.append(fourth_yi)
bfirst_oi.append(first_oi)
bsecond_oi.append(second_oi)
bthird_oi.append(third_oi)
bfourth_oi.append(fourth_oi)
grades.append(str(temp[4]) + ' ' + str(temp[5]) + ' ' + str(temp[6]))
if in_split == 1:
merge_temp = fourth_yi.split(' ')
x_i = merge_temp[1]
yi_pre = x_i
print(x)
print(bfirst_oi)
print(best_y1)
print(bsecond_oi)
print(best_y2)
print(bthird_oi)
print(best_y3)
print(bfourth_oi)
tempo = ''
for printy in best_y:
tempo = tempo + ' ' + printy
print(tempo)
def f_err_correction(o_list, yp, temp, landa_tmp):
oy = temp
oy0 = [oy[0], '']
oy1 = [oy[1], '']
bpr = [-99, -99]
x = oy[1]
splitcheck = oy[2]
tempx = [x]
if splitcheck == 1:
tempx = x.split(' ')
for x_counter in range(len(tempx)):
inix = tempx[x_counter]
if x_counter == 1:
yp = oy1[0]
for temp_oi in o_list:
yi_list = make_list(temp_oi, inix, 1)
for temp_yi in yi_list:
temp_pr = pro_yox(inix, yp, temp_yi, temp_oi, landa_tmp, lang_model_, k_features, 0)
if temp_pr > bpr[x_counter]:
oy0[x_counter] = temp_oi
oy1[x_counter] = temp_yi
bpr[x_counter] = temp_pr
if splitcheck == 1:
oy[0] = oy0[0] + ' ' + oy0[1]
oy[1] = oy1[0] + ' ' + oy1[1]
best_pr = (bpr[0] + bpr[1]) / 2
else:
oy[0] = oy0[0]
oy[1] = oy1[0]
best_pr = bpr[0]
oy[4] = log(best_pr, 10)
return oy
def f_split(temp, landa_tmp):
best_pr = -99
oy = temp
oy[0] = 'nothing'
x = oy[1]
mergecheck = oy[3]
if mergecheck == 0:
yi_list = make_list("split", x, 1)
for temp_yi in yi_list:
temp_yi = temp_yi.replace("-", " ")
if temp_yi in lang_model_.keys():
temp_yi2 = temp_yi.replace(' ', '')
temp_pr2 = -99
if temp_yi2 in lang_model_1g.keys():
ix = lang_model_1g[temp_yi2]
temp_pr2 = float(ix)
ix = lang_model_[temp_yi]
temp_pr = float(ix)
if temp_pr > temp_pr2 and temp_pr > best_pr:
oy[0] = "split"
oy[1] = temp_yi
best_pr = temp_pr / 2
oy[2] = 1
oy[5] = best_pr
return oy
def f_merge(xi1, temp, landa_tmp):
best_pr = -99
oy = temp
oy[0] = 'nothing'
x = oy[1]
splitcheck = oy[2]
if splitcheck == 0:
if xi1 != '<\s>':
temp_yi = x + xi1
if temp_yi in lang_model_1g.keys():
ix = lang_model_1g[temp_yi]
temp_pr = float(ix)
if temp_pr > best_pr:
oy[0] = "merge"
oy[1] = temp_yi
oy[3] = 1
best_pr = temp_pr
oy[6] = best_pr
return oy
# ------------------------------------------------------------------------------------------
# landa_temp = optimize.minimize(pr_function, landa_init, method = 'L-BFGS-B'
# , bounds= ((2.01e-1, 5), (2.01e-5, 5), (2.01e-5, 5), (2.01e-1, 5), (2.01e-1, 5), (2.01e-15, 5)))
# landa_temp = optimize.minimize(pr_function, landa_init, method='L-BFGS-B')
# landa = landa_temp.x
landa = landa_init
# for counter in range(k_features):
# landa[counter] += 0.5
# print(landa)
xt = "howare youdoing n abandoneds boko aree iou sured \"that\" your namen was correct califo rnia sanf rancisco chareg up agree with"
print(test_model(xt, landa))
| [
"math.log",
"math.exp",
"PGM_h.h",
"numpy.linalg.norm"
] | [((6506, 6517), 'math.exp', 'exp', (['result'], {}), '(result)\n', (6509, 6517), False, 'from math import exp\n'), ((14239, 14255), 'math.log', 'log', (['best_pr', '(10)'], {}), '(best_pr, 10)\n', (14242, 14255), False, 'from math import log\n'), ((4711, 4723), 'math.log', 'log', (['pri', '(10)'], {}), '(pri, 10)\n', (4714, 4723), False, 'from math import log\n'), ((5093, 5133), 'PGM_h.h', 'PGM_h.h', (['vay', 'ou', 'ix', 'length_of_check', 'i'], {}), '(vay, ou, ix, length_of_check, i)\n', (5100, 5133), False, 'import PGM_h\n'), ((4684, 4699), 'numpy.linalg.norm', 'norm', (['landa_tmp'], {}), '(landa_tmp)\n', (4688, 4699), False, 'from numpy.linalg import norm\n'), ((12183, 12216), 'PGM_h.h', 'PGM_h.h', (['""""""', '"""phrase"""', 'x_ip', '(1)', '(5)'], {}), "('', 'phrase', x_ip, 1, 5)\n", (12190, 12216), False, 'import PGM_h\n')] |
# -*- coding:utf-8 -*-
import random
from collections import OrderedDict
from contextlib import contextmanager
import torch
import numpy as np
def _set_seed(seed):
""" Set seed for system, numpy and pytorch. """
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
@contextmanager
def nullcontext():
yield
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def is_empty(self):
return self.cnt == 0
class EnsembleAverageMeters(object):
def __init__(self):
self.AverageMeters = None
def is_empty(self):
return self.AverageMeters is None
def update(self, perfs, n=1):
if self.is_empty():
self.AverageMeters = OrderedDict([(name, AvgrageMeter()) for name in perfs])
[self.AverageMeters[name].update(val, n) for name, val in perfs.items()]
def avgs(self):
return OrderedDict((name, val.avg) for name, val in self.AverageMeters.items()) if not self.is_empty() else None
def items(self):
return self.AverageMeters.items() if not self.is_empty() else None
def reset(self):
self.AverageMeters = None | [
"torch.manual_seed",
"random.seed",
"numpy.random.seed"
] | [((224, 244), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (238, 244), True, 'import numpy as np\n'), ((249, 266), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (260, 266), False, 'import random\n'), ((271, 294), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (288, 294), False, 'import torch\n')] |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from psyke.schema import DiscreteFeature
from psyke.utils import get_default_random_seed
from tuprolog.theory import Theory
from typing import Iterable
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('psyke')
class Extractor(object):
"""
An explanator capable of extracting rules from trained black box.
Parameters
----------
predictor : the underling black box predictor.
discretization : A collection of sets of discretised features.
Each set corresponds to a set of features derived from a single non-discrete feature.
"""
def __init__(self, predictor, discretization: Iterable[DiscreteFeature] = None):
self.predictor = predictor
self.discretization = [] if discretization is None else list(discretization)
def extract(self, dataframe: pd.DataFrame) -> Theory:
"""
Extracts rules from the underlying predictor.
:param dataframe: is the set of instances to be used for the extraction.
:return: the theory created from the extracted rules.
"""
raise NotImplementedError('extract')
def predict(self, dataframe: pd.DataFrame) -> Iterable:
"""
Predicts the output values of every sample in dataset.
:param dataframe: is the set of instances to predict.
:return: a list of predictions.
"""
raise NotImplementedError('predict')
def mae(self, dataframe: pd.DataFrame) -> float:
"""
Calculates the predictions' MAE w.r.t. the instances given as input.
:param dataframe: is the set of instances to be used to calculate the mean absolute error.
:return: the mean absolute error (MAE) of the predictions.
"""
predictions = np.array(self.predict(dataframe.iloc[:, :-1]))
idx = ~np.isnan(predictions)
return mean_absolute_error(dataframe.iloc[idx, -1], predictions[idx])
def mse(self, dataframe: pd.DataFrame) -> float:
"""
Calculates the predictions' MSE w.r.t. the instances given as input.
:param dataframe: is the set of instances to be used to calculate the mean squared error.
:return: the mean squared error (MSE) of the predictions.
"""
predictions = np.array(self.predict(dataframe.iloc[:, :-1]))
idx = ~np.isnan(predictions)
return mean_squared_error(dataframe.iloc[idx, -1], predictions[idx])
def r2(self, dataframe: pd.DataFrame) -> float:
"""
Calculates the predictions' R2 score w.r.t. the instances given as input.
:param dataframe: is the set of instances to be used to calculate the R2 score.
:return: the R2 score of the predictions.
"""
predictions = np.array(self.predict(dataframe.iloc[:, :-1]))
idx = ~np.isnan(predictions)
return r2_score(dataframe.iloc[idx, -1], predictions[idx])
@staticmethod
def cart(predictor: cart.CartPredictor, discretization=None) -> Extractor:
"""
Creates a new Cart extractor.
"""
from psyke.cart import Cart
return Cart(predictor, discretization)
@staticmethod
def iter(predictor, min_update: float = 0.1, n_points: int = 1, max_iterations: int = 600, min_examples: int = 250,
threshold: float = 0.1, fill_gaps: bool = True, seed: int = get_default_random_seed()) -> Extractor:
"""
Creates a new ITER extractor.
"""
from psyke.regression.iter import ITER
return ITER(predictor, min_update, n_points, max_iterations, min_examples, threshold, fill_gaps, seed)
@staticmethod
def gridex(predictor, grid, min_examples: int = 250, threshold: float = 0.1,
seed: int = get_default_random_seed()) -> Extractor:
"""
Creates a new GridEx extractor.
"""
from psyke.regression.gridex import GridEx
return GridEx(predictor, grid, min_examples, threshold, seed)
@staticmethod
def gridrex(predictor, grid, min_examples: int = 250, threshold: float = 0.1,
seed: int = get_default_random_seed()) -> Extractor:
"""
Creates a new GridREx extractor.
"""
from psyke.regression.gridrex import GridREx
return GridREx(predictor, grid, min_examples, threshold, seed)
@staticmethod
def real(predictor, discretization=None) -> Extractor:
"""
Creates a new REAL extractor.
"""
from psyke.classification.real import REAL
return REAL(predictor, [] if discretization is None else discretization)
@staticmethod
def trepan(predictor, discretization=None, min_examples: int = 0, max_depth: int = 3,
split_logic=None) -> Extractor:
"""
Creates a new Trepan extractor.
"""
from psyke.classification.trepan import Trepan, SplitLogic
if split_logic is None:
split_logic = SplitLogic.DEFAULT
return Trepan(predictor, [] if discretization is None else discretization, min_examples, max_depth, split_logic)
| [
"psyke.classification.real.REAL",
"psyke.cart.Cart",
"logging.basicConfig",
"sklearn.metrics.r2_score",
"psyke.classification.trepan.Trepan",
"sklearn.metrics.mean_absolute_error",
"psyke.regression.iter.ITER",
"psyke.regression.gridrex.GridREx",
"numpy.isnan",
"psyke.regression.gridex.GridEx",
... | [((322, 362), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (341, 362), False, 'import logging\n'), ((372, 398), 'logging.getLogger', 'logging.getLogger', (['"""psyke"""'], {}), "('psyke')\n", (389, 398), False, 'import logging\n'), ((2026, 2088), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['dataframe.iloc[idx, -1]', 'predictions[idx]'], {}), '(dataframe.iloc[idx, -1], predictions[idx])\n', (2045, 2088), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n'), ((2530, 2591), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['dataframe.iloc[idx, -1]', 'predictions[idx]'], {}), '(dataframe.iloc[idx, -1], predictions[idx])\n', (2548, 2591), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n'), ((3011, 3062), 'sklearn.metrics.r2_score', 'r2_score', (['dataframe.iloc[idx, -1]', 'predictions[idx]'], {}), '(dataframe.iloc[idx, -1], predictions[idx])\n', (3019, 3062), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n'), ((3274, 3305), 'psyke.cart.Cart', 'Cart', (['predictor', 'discretization'], {}), '(predictor, discretization)\n', (3278, 3305), False, 'from psyke.cart import Cart\n'), ((3518, 3543), 'psyke.utils.get_default_random_seed', 'get_default_random_seed', ([], {}), '()\n', (3541, 3543), False, 'from psyke.utils import get_default_random_seed\n'), ((3683, 3782), 'psyke.regression.iter.ITER', 'ITER', (['predictor', 'min_update', 'n_points', 'max_iterations', 'min_examples', 'threshold', 'fill_gaps', 'seed'], {}), '(predictor, min_update, n_points, max_iterations, min_examples,\n threshold, fill_gaps, seed)\n', (3687, 3782), False, 'from psyke.regression.iter import ITER\n'), ((3906, 3931), 'psyke.utils.get_default_random_seed', 'get_default_random_seed', ([], {}), '()\n', (3929, 3931), False, 'from psyke.utils import get_default_random_seed\n'), ((4077, 4131), 'psyke.regression.gridex.GridEx', 'GridEx', (['predictor', 'grid', 'min_examples', 'threshold', 'seed'], {}), '(predictor, grid, min_examples, threshold, seed)\n', (4083, 4131), False, 'from psyke.regression.gridex import GridEx\n'), ((4261, 4286), 'psyke.utils.get_default_random_seed', 'get_default_random_seed', ([], {}), '()\n', (4284, 4286), False, 'from psyke.utils import get_default_random_seed\n'), ((4435, 4490), 'psyke.regression.gridrex.GridREx', 'GridREx', (['predictor', 'grid', 'min_examples', 'threshold', 'seed'], {}), '(predictor, grid, min_examples, threshold, seed)\n', (4442, 4490), False, 'from psyke.regression.gridrex import GridREx\n'), ((4697, 4762), 'psyke.classification.real.REAL', 'REAL', (['predictor', '([] if discretization is None else discretization)'], {}), '(predictor, [] if discretization is None else discretization)\n', (4701, 4762), False, 'from psyke.classification.real import REAL\n'), ((5142, 5251), 'psyke.classification.trepan.Trepan', 'Trepan', (['predictor', '([] if discretization is None else discretization)', 'min_examples', 'max_depth', 'split_logic'], {}), '(predictor, [] if discretization is None else discretization,\n min_examples, max_depth, split_logic)\n', (5148, 5251), False, 'from psyke.classification.trepan import Trepan, SplitLogic\n'), ((1989, 2010), 'numpy.isnan', 'np.isnan', (['predictions'], {}), '(predictions)\n', (1997, 2010), True, 'import numpy as np\n'), ((2493, 2514), 'numpy.isnan', 'np.isnan', (['predictions'], {}), '(predictions)\n', (2501, 2514), True, 'import numpy as np\n'), ((2974, 2995), 'numpy.isnan', 'np.isnan', (['predictions'], {}), '(predictions)\n', (2982, 2995), True, 'import numpy as np\n')] |
import random
import time
import numbers
import numpy as np
from collections import deque
def append_angle(Rover, new_angle):
'''append the latest angle to the list, and drop the oldest from the list to max length'''
Rover.last_nav_angles.append(new_angle)
while len(Rover.last_nav_angles) > Rover.use_last_n_angles_for_steering:
Rover.last_nav_angles.popleft()
return Rover.last_nav_angles
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover):
# Implement conditionals to decide what to do given perception data
# Here you're all set up with some basic functionality but you'll need to
# improve on this decision tree to do a good job of navigating autonomously!
# Example:
# Check if we have vision data to make decisions with
if Rover.nav_angles is not None:
# Check for Rover.mode status
stuck_precision = 10
if Rover.mode == 'unstick':
turn_direction = 1
if time.time() - Rover.time_last_checked_pos > 1:
# we;ve been at this for at least 1 seconds (or just started)
# check if we're still stuck
x, y = Rover.pos
# round
x = int(x * stuck_precision)
y = int(y * stuck_precision)
if Rover.last_pos != (x, y):
# we seem to have moved
# try going forward again
Rover.mode = 'forward'
else:
# randomise the turn direction every stuck_timeout seconds
turn_direction = [-1, 1][random.randrange(2)]
Rover.steer = turn_direction * 15
Rover.time_last_checked_pos = time.time()
Rover.last_pos = (x, y)
# stuck - try turning
Rover.brake = 0
Rover.throttle = -10 # try backing up hard
if Rover.mode == 'forward':
# calculate go_froward_distance (for use later in if case)
if Rover.rock_in_sight:
if Rover.near_sample:
# if we're close enough to pick up - just stop
current_max_vel = 0
else:
# otherwise, slow down as we get closer. Limit speed to Rover.max_vel.
current_max_vel = min(0.01 * np.mean(Rover.rock_dists), Rover.max_vel)
# set stop_forward_distance to rock_stop_forward
stop_forward_distance = Rover.rock_stop_forward
else:
# no rock near here, just go at max vel
current_max_vel = Rover.max_vel
# use regular stop_forward when no rock present
stop_forward_distance = Rover.stop_forward
# check pos every 3 seconds
# if it's identical, and velocity is low, assume we're stuck
if time.time() - Rover.time_last_checked_pos > Rover.stuck_timeout:
x, y = Rover.pos
# round
x = int(x * stuck_precision)
y = int(y * stuck_precision)
if Rover.vel < current_max_vel:
if Rover.last_pos == (x, y):
# stuck!
# Set mode to "unstick" and hit the brakes!
Rover.throttle = 0
# Set brake to stored brake value
Rover.brake = Rover.brake_set
Rover.steer = 0
Rover.mode = 'unstick'
Rover.time_last_checked_pos = time.time()
Rover.last_pos = (x, y)
# Check the extent of navigable terrain
elif len(Rover.nav_angles) >= stop_forward_distance:
# If mode is forward, navigable terrain looks good
# and velocity is below max, then throttle, else brake
if Rover.vel > current_max_vel:
# remove throttle and apply brake - but gently
Rover.throttle = 0
Rover.brake = Rover.brake_set * 0.1
elif Rover.vel < current_max_vel:
# Set throttle value to throttle setting
Rover.throttle = Rover.throttle_set
Rover.brake = 0
else: # Else coast
Rover.throttle = 0
Rover.brake = 0
# Set steering to average angle clipped to the range +/- 15
# use an historic weighted average to try and steer more to one side than teh other (avoid circles)
# check if there's a rock nearby. If so, aim for it.
if Rover.rock_in_sight:
steer_degrees = np.mean(Rover.rock_angles * 180/np.pi)
else:
# append absolute mean nav_angle - this way we smooth out angles
last_angles = np.mean(append_angle(Rover, np.mean(Rover.nav_angles * 180/np.pi)))
angle_compensation = last_angles * 0.1
steer_degrees = np.mean((Rover.nav_angles * 180/np.pi) + angle_compensation)
Rover.steer = np.clip(steer_degrees, -15, 15)
# If there's a lack of navigable terrain pixels then go to 'stop' mode
elif len(Rover.nav_angles) < stop_forward_distance:
# Set mode to "stop" and hit the brakes!
Rover.throttle = 0
# Set brake to stored brake value
Rover.brake = Rover.brake_set
Rover.steer = 0
Rover.mode = 'stop'
# If we're already in "stop" mode then make different decisions
elif Rover.mode == 'stop':
# If we're in stop mode but still moving keep braking
if Rover.vel > 0.2:
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
# If we're not moving (vel < 0.2) then do something else
elif Rover.vel <= 0.2:
# Now we're stopped and we have vision data to see if there's a path forward
if len(Rover.nav_angles) < Rover.go_forward:
Rover.throttle = 0
# Release the brake to allow turning
Rover.brake = 0
# Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning
Rover.steer = -15 # Could be more clever here about which way to turn
# If we're stopped but see sufficient navigable terrain in front then go!
if len(Rover.nav_angles) >= Rover.go_forward:
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)
Rover.mode = 'forward'
# Just to make the rover do something
# even if no modifications have been made to the code
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
# If in a state where want to pickup a rock send pickup command
if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:
Rover.send_pickup = True
return Rover
| [
"numpy.mean",
"random.randrange",
"numpy.clip",
"time.time"
] | [((1857, 1868), 'time.time', 'time.time', ([], {}), '()\n', (1866, 1868), False, 'import time\n'), ((3721, 3732), 'time.time', 'time.time', ([], {}), '()\n', (3730, 3732), False, 'import time\n'), ((1094, 1105), 'time.time', 'time.time', ([], {}), '()\n', (1103, 1105), False, 'import time\n'), ((3022, 3033), 'time.time', 'time.time', ([], {}), '()\n', (3031, 3033), False, 'import time\n'), ((5342, 5373), 'numpy.clip', 'np.clip', (['steer_degrees', '(-15)', '(15)'], {}), '(steer_degrees, -15, 15)\n', (5349, 5373), True, 'import numpy as np\n'), ((1736, 1755), 'random.randrange', 'random.randrange', (['(2)'], {}), '(2)\n', (1752, 1755), False, 'import random\n'), ((4907, 4947), 'numpy.mean', 'np.mean', (['(Rover.rock_angles * 180 / np.pi)'], {}), '(Rover.rock_angles * 180 / np.pi)\n', (4914, 4947), True, 'import numpy as np\n'), ((5250, 5310), 'numpy.mean', 'np.mean', (['(Rover.nav_angles * 180 / np.pi + angle_compensation)'], {}), '(Rover.nav_angles * 180 / np.pi + angle_compensation)\n', (5257, 5310), True, 'import numpy as np\n'), ((2478, 2503), 'numpy.mean', 'np.mean', (['Rover.rock_dists'], {}), '(Rover.rock_dists)\n', (2485, 2503), True, 'import numpy as np\n'), ((5115, 5154), 'numpy.mean', 'np.mean', (['(Rover.nav_angles * 180 / np.pi)'], {}), '(Rover.nav_angles * 180 / np.pi)\n', (5122, 5154), True, 'import numpy as np\n'), ((7114, 7153), 'numpy.mean', 'np.mean', (['(Rover.nav_angles * 180 / np.pi)'], {}), '(Rover.nav_angles * 180 / np.pi)\n', (7121, 7153), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from config import *
def get_name_dict():
with open('config/data_names.json') as ifs:
return eval(''.join(ifs.readlines()))
def get_algorithm_name_dict():
with open('config/algorithm_info.json') as ifs:
return json.load(ifs)["algorithm_abbr"]
# data set abbreviation dictionary
data_names = get_name_dict()
# figure parameters
FIG_SIZE_MULTIPLE = (16, 4)
LABEL_SIZE = 22
TICK_SIZE = 22
LEGEND_SIZE = 22
knl_merge_vec_speedup_tag = 'scan-xp-avx512-merge'
knl_pivot_vec_speedup_tag = 'scan-xp-avx512-galloping-single'
knl_hybrid_vec_speedup_tag = 'scan-xp-avx512-hybrid'
knl_bitmap_tag = 'scan-xp-naive-bitvec-hbw-2d'
cpu_merge_vec_speedup_tag = 'scan-xp-avx2-merge'
cpu_pivot_vec_speedup_tag = 'scan-xp-avx2-galloping-single'
cpu_hybrid_vec_speedup_tag = 'scan-xp-avx2-hybrid'
cpu_bitmap_tag = 'scan-xp-naive-bitvec-2d'
algorithm_name_dict = get_algorithm_name_dict()
algorithm_tag_lst = [knl_merge_vec_speedup_tag, knl_pivot_vec_speedup_tag, knl_hybrid_vec_speedup_tag, knl_bitmap_tag,
cpu_merge_vec_speedup_tag, cpu_pivot_vec_speedup_tag, cpu_hybrid_vec_speedup_tag, cpu_bitmap_tag]
def get_speedup_lst_over_merge(tag):
# 8*(10**5), 2*(10**5), 5*(10**4), 1.25*(10**4)
if tag is knl_merge_vec_speedup_tag:
return [1, 1, 1, 1]
elif tag is knl_pivot_vec_speedup_tag:
return [0.402 / 0.371, 4.815 / 1.393, 3.682 / 0.513, 308.728 / 10.239]
elif tag is knl_hybrid_vec_speedup_tag:
return [0.402 / 0.306, 4.815 / 1.333, 3.682 / 0.503, 308.728 / 10.822]
elif tag is knl_bitmap_tag:
return [0.402 / 0.150, 4.815 / 0.321, 3.682 / 0.094, 308.728 / 0.868]
elif tag is cpu_merge_vec_speedup_tag:
return [1, 1, 1, 1]
elif tag is cpu_pivot_vec_speedup_tag:
return [0.403 / 0.159, 4.234 / 0.491, 3.510 / 0.204, 117.771 / 3.300]
elif tag is cpu_hybrid_vec_speedup_tag:
return [0.403 / 0.184, 4.234 / 0.590, 3.510 / 0.217, 117.771 / 3.479]
elif tag is cpu_bitmap_tag:
return [0.403 / 0.067, 4.234 / 0.143, 3.510 / 0.043, 117.771 / 0.332]
return [1, 1, 1, 1]
def draw_overview_elapsed_time():
g_names = list(
reversed(['$d_u = 8\\cdot 10^5$', '$d_u = 2\\cdot 10^5$', '$d_u = 5\\cdot 10^4$', '$d_u = 1.25\\cdot 10^4$']))
size_of_fig = (FIG_SIZE_MULTIPLE[0], FIG_SIZE_MULTIPLE[1])
fig, ax = plt.subplots()
N = len(g_names)
# indent lst
width = 0.08
ind = 1.1 * np.arange(N) # the x locations for the groups
indent_lst = map(lambda idx: ind + idx * width, range(8))
for idx in xrange(4, 8):
indent_lst[idx] += 0.75 * width
# other lst
hatch_lst = [
'',
'|||', '.', "**",
'',
'O', '\\', 'x', '--', '++', '//', 'o']
label_lst = [algorithm_name_dict[exec_name] for exec_name in algorithm_tag_lst]
color_lst = [
'#fe01b1',
'orange', 'green', 'red',
'black',
'#ceb301', 'm', 'brown', 'k',
'purple', 'blue', 'gray']
# 1st: bars
for idx, tag in enumerate(algorithm_tag_lst):
ax.bar(indent_lst[idx], get_speedup_lst_over_merge(tag), width, hatch=hatch_lst[1:][idx], label=label_lst[idx],
edgecolor=color_lst[1:][idx], fill=False)
# 2nd: x and y's ticks and labels
ax.set_xticks(ind + 3 * width)
ax.set_xticklabels(map(lambda name: name, g_names), fontsize=LABEL_SIZE)
plt.xticks(fontsize=TICK_SIZE)
ax.set_ylabel("Speedup over Merge", fontsize=LABEL_SIZE-2)
plt.yticks(fontsize=TICK_SIZE)
ax.set_yscale('log')
plt.ylim(0.1, 40000)
plt.yticks([1, 10, 100, 1000], fontsize=TICK_SIZE)
# 3rd: figure properties)
fig.set_size_inches(*size_of_fig) # set ratio
plt.legend(prop={'size': LEGEND_SIZE - 2, "weight": "bold"}, loc="upper left", ncol=4)
fig.savefig("./figures/" + 'synthetic_deg.pdf', bbox_inches='tight', dpi=300)
if __name__ == '__main__':
os.system('mkdir -p figures')
draw_overview_elapsed_time()
| [
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots"
] | [((2409, 2423), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2421, 2423), True, 'import matplotlib.pyplot as plt\n'), ((3449, 3479), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': 'TICK_SIZE'}), '(fontsize=TICK_SIZE)\n', (3459, 3479), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3578), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'TICK_SIZE'}), '(fontsize=TICK_SIZE)\n', (3558, 3578), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3628), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.1)', '(40000)'], {}), '(0.1, 40000)\n', (3616, 3628), True, 'import matplotlib.pyplot as plt\n'), ((3633, 3683), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[1, 10, 100, 1000]'], {'fontsize': 'TICK_SIZE'}), '([1, 10, 100, 1000], fontsize=TICK_SIZE)\n', (3643, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3769, 3860), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': LEGEND_SIZE - 2, 'weight': 'bold'}", 'loc': '"""upper left"""', 'ncol': '(4)'}), "(prop={'size': LEGEND_SIZE - 2, 'weight': 'bold'}, loc=\n 'upper left', ncol=4)\n", (3779, 3860), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2508), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2505, 2508), True, 'import numpy as np\n')] |
# https://medium.com/@thomascountz/19-line-line-by-line-python-perceptron-b6f113b161f3
"""
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
class Perceptron(object):
def __init__(self, no_of_inputs, threshold=100, learning_rate=0.01):
print(f"Init! Threshold: {threshold}, Learning rate: {learning_rate}, Number of inputs = {no_of_inputs}")
self.threshold = threshold
self.learning_rate = learning_rate
self.weights = np.zeros(no_of_inputs + 1)
def predict(self, inputs):
print(f"Prediction for: {inputs}")
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation > 0:
activation = 1
else:
activation = 0
print(f"Activation: {activation}")
return activation
def train(self, training_inputs, labels):
print("Train")
for _ in range(self.threshold):
for inputs, label in zip(training_inputs, labels):
print(f"- - -")
print(f"Inputs: {inputs} --- Label: {label}")
print(f"Weights before: {self.weights}")
prediction = self.predict(inputs)
weight_array_addition = self.learning_rate * (label - prediction) * inputs
weight_single_addition = self.learning_rate * (label - prediction)
print(f"Addition for inputs: {weight_array_addition}")
self.weights[1:] += self.learning_rate * (label - prediction) * inputs
print(f"Addition for single add in: {weight_single_addition}")
self.weights[0] += self.learning_rate * (label - prediction)
print(f"Weights after: {self.weights}")
class PerceptronTest():
def test_mimics_logical_and(self):
weights = np.array([-1, 1, 1])
a = 1
b = 1
inputs = np.array([a, b])
perceptron = Perceptron(inputs.size)
perceptron.weights = weights
output = perceptron.predict(inputs)
print(output, a & b)
def test_trains_for_logical_and(self):
labels = np.array([1, 0, 0, 0])
input_matrix = []
input_matrix.append(np.array([1, 1]))
input_matrix.append(np.array([1, 0]))
input_matrix.append(np.array([0, 1]))
input_matrix.append(np.array([0, 0]))
perceptron = Perceptron(2, threshold=10, learning_rate=1)
perceptron.train(input_matrix, labels)
a = 1
b = 1
inputs = np.array([a, b])
output = perceptron.predict(inputs)
print(output, a & b)
if __name__ == '__main__':
unittest = PerceptronTest()
unittest.test_mimics_logical_and()
unittest.test_trains_for_logical_and()
| [
"numpy.array",
"numpy.dot",
"numpy.zeros"
] | [((1491, 1517), 'numpy.zeros', 'np.zeros', (['(no_of_inputs + 1)'], {}), '(no_of_inputs + 1)\n', (1499, 1517), True, 'import numpy as np\n'), ((2847, 2867), 'numpy.array', 'np.array', (['[-1, 1, 1]'], {}), '([-1, 1, 1])\n', (2855, 2867), True, 'import numpy as np\n'), ((2914, 2930), 'numpy.array', 'np.array', (['[a, b]'], {}), '([a, b])\n', (2922, 2930), True, 'import numpy as np\n'), ((3149, 3171), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (3157, 3171), True, 'import numpy as np\n'), ((3542, 3558), 'numpy.array', 'np.array', (['[a, b]'], {}), '([a, b])\n', (3550, 3558), True, 'import numpy as np\n'), ((1624, 1656), 'numpy.dot', 'np.dot', (['inputs', 'self.weights[1:]'], {}), '(inputs, self.weights[1:])\n', (1630, 1656), True, 'import numpy as np\n'), ((3226, 3242), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (3234, 3242), True, 'import numpy as np\n'), ((3272, 3288), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (3280, 3288), True, 'import numpy as np\n'), ((3318, 3334), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3326, 3334), True, 'import numpy as np\n'), ((3364, 3380), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3372, 3380), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
import random
import scipy.io
from sklearn.decomposition import non_negative_factorization
random.seed(3)
torch.manual_seed(4)
def pre_processed_DTnet_1():
dataset_dir = os.path.sep.join(['deepDTnet'])
i_m = np.genfromtxt(os.path.sep.join([dataset_dir, 'drugProtein.txt']), dtype=np.int32)
print(len(i_m), len(i_m[0]))
edge = []
for i in range(len(i_m)):
for j in range(len(i_m[0])):
if i_m[i][j] == 1:
edge.append([i, j])
print(len(edge))
# with open(os.path.sep.join([dataset_dir, "drug_target_interaction.txt"]), "w") as f0:
# for i in range(len(edge)):
# s = str(edge[i]).replace('[', ' ').replace(']', ' ')
# s = s.replace("'", ' ').replace(',', '') + '\n'
# f0.write(s)
def load_data_deepDTnet(dataset_train="DTnet_train_0.8_0", dataset_test="DTnet_test_0.8_0"):
dataset_dir = os.path.sep.join(['deepDTnet'])
# build incidence matrix
edge_train = np.genfromtxt(os.path.sep.join([dataset_dir, '{}.txt'.format(dataset_train)]), dtype=np.int32)
edge_all = np.genfromtxt(os.path.sep.join([dataset_dir, '{}.txt'.format("deepDTnet_all")]), dtype=np.int32)
# edge_train_pro = []
# for i in edge_all:
# edge_train_pro.append([i[0], i[1] + 732])
# with open(os.path.sep.join([dataset_dir, "edge_train_pro.txt"]), "w") as f0:
# for i in range(len(edge_train_pro)):
# s = str(edge_train_pro[i]).replace('[', ' ').replace(']', ' ')
# s = s.replace("'", ' ').replace(',', '') + '\n'
# f0.write(s)
edge_test = np.genfromtxt(os.path.sep.join([dataset_dir, '{}.txt'.format(dataset_test)]), dtype=np.int32)
# edge_test_pro = []
# for i in edge_test:
# edge_test_pro.append([i[0], i[1] + 732])
# with open(os.path.sep.join([dataset_dir, "edge_test_pro.txt"]), "w") as f0:
# for i in range(len(edge_test_pro)):
# s = str(edge_test_pro[i]).replace('[', ' ').replace(']', ' ')
# s = s.replace("'", ' ').replace(',', '') + '\n'
# f0.write(s)
i_m = np.genfromtxt(os.path.sep.join([dataset_dir, 'drugProtein.txt']), dtype=np.int32)
H_T = np.zeros((len(i_m), len(i_m[0])), dtype=np.int32)
H_T_all = np.zeros((len(i_m), len(i_m[0])), dtype=np.int32)
for i in edge_train:
H_T[i[0]][i[1]] = 1
for i in edge_all:
H_T_all[i[0]][i[1]] = 1
# val = np.zeros(len(edge_val))
test = np.zeros(len(edge_test))
for i in range(len(test)):
if i <= len(edge_test) // 2:
test[i] = 1
# val[i] = 1
np.set_printoptions(threshold=np.inf)
H_T = torch.Tensor(H_T)
H = H_T.t()
H_T_all = torch.Tensor(H_T_all)
H_all = H_T_all.t()
print("deepDTnet", H.size()) # 1915, 732
drug_feat = torch.eye(732)
prot_feat = torch.eye(1915)
drugDisease = torch.Tensor(np.genfromtxt(os.path.sep.join([dataset_dir, 'drugDisease.txt']), dtype=np.int32)) # 732, 440
proteinDisease = torch.Tensor(np.genfromtxt(os.path.sep.join([dataset_dir, 'proteinDisease.txt']), dtype=np.int32)) # 1915, 440
return drugDisease, proteinDisease, drug_feat, prot_feat, H, H_T, edge_test, test
def generate_data_2(dataset_str="drug_target_interaction"):
# 将数据集分为训练集,测试集
dataset_dir = os.path.sep.join(['deepDTnet'])
# edge = np.genfromtxt("edges.txt", dtype=np.int32)
edge = np.genfromtxt(os.path.sep.join([dataset_dir, '{}.txt'.format(dataset_str)]), dtype=np.int32) # dtype='U75'
# print(edge)
data = torch.utils.data.DataLoader(edge, shuffle=True)
edge_shuffled = []
for i in data:
edge_shuffled.append(i[0].tolist())
# print(edge_shuffled)
# drugs = []
# targets = []
# for i in edge:
# if i[0] not in drugs:
# drugs.append(i[0])
# if i[1] not in targets:
# targets.append(i[1])
test_ration = [0.2]
for d in test_ration:
for a in (range(1)):
edge_test = edge_shuffled[a * int(len(edge_shuffled) * d): (a + 1) * int(len(edge_shuffled) * d)]
edge_train = edge_shuffled[: a * int(len(edge_shuffled) * d)] + edge_shuffled[(a + 1) * int(len(edge_shuffled) * d):]
test_zeros = []
while len(test_zeros) < len(edge_test) * 1:
x1 = random.sample(range(0, 732), 1)[0]
y1 = random.sample(range(0, 1915), 1)[0]
if [x1, y1] not in edge.tolist() and [x1, y1] not in test_zeros:
test_zeros.append([x1, y1])
edge_test = edge_test + test_zeros
with open(os.path.sep.join([dataset_dir, "DTnet_train_{ratio}_{fold}.txt".format(ratio=d, fold=a)]), "w") as f0:
for i in range(len(edge_train)):
s = str(edge_train[i]).replace('[', ' ').replace(']', ' ')
s = s.replace("'", ' ').replace(',', '') + '\n'
f0.write(s)
with open(os.path.sep.join([dataset_dir, "DTnet_test_{ratio}_{fold}.txt".format(ratio=d, fold=a)]), "w") as f1:
for i in range(len(edge_test)):
s = str(edge_test[i]).replace('[', ' ').replace(']', ' ')
s = s.replace("'", ' ').replace(',', '') + '\n'
f1.write(s)
# with open(os.path.sep.join([dataset_dir, "DTnet_all.txt"]), "w") as f3:
# for i in range(len(edge)):
# s = str(edge[i]).replace('[', ' ').replace(']', ' ')
# s = s.replace("'", ' ').replace(',', '') + '\n'
# f3.write(s)
| [
"numpy.set_printoptions",
"torch.eye",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"torch.Tensor",
"random.seed",
"os.path.sep.join"
] | [((233, 247), 'random.seed', 'random.seed', (['(3)'], {}), '(3)\n', (244, 247), False, 'import random\n'), ((249, 269), 'torch.manual_seed', 'torch.manual_seed', (['(4)'], {}), '(4)\n', (266, 269), False, 'import torch\n'), ((321, 352), 'os.path.sep.join', 'os.path.sep.join', (["['deepDTnet']"], {}), "(['deepDTnet'])\n", (337, 352), False, 'import os\n'), ((1065, 1096), 'os.path.sep.join', 'os.path.sep.join', (["['deepDTnet']"], {}), "(['deepDTnet'])\n", (1081, 1096), False, 'import os\n'), ((2817, 2854), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (2836, 2854), True, 'import numpy as np\n'), ((2868, 2885), 'torch.Tensor', 'torch.Tensor', (['H_T'], {}), '(H_T)\n', (2880, 2885), False, 'import torch\n'), ((2918, 2939), 'torch.Tensor', 'torch.Tensor', (['H_T_all'], {}), '(H_T_all)\n', (2930, 2939), False, 'import torch\n'), ((3029, 3043), 'torch.eye', 'torch.eye', (['(732)'], {}), '(732)\n', (3038, 3043), False, 'import torch\n'), ((3061, 3076), 'torch.eye', 'torch.eye', (['(1915)'], {}), '(1915)\n', (3070, 3076), False, 'import torch\n'), ((3532, 3563), 'os.path.sep.join', 'os.path.sep.join', (["['deepDTnet']"], {}), "(['deepDTnet'])\n", (3548, 3563), False, 'import os\n'), ((3774, 3821), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['edge'], {'shuffle': '(True)'}), '(edge, shuffle=True)\n', (3801, 3821), False, 'import torch\n'), ((378, 428), 'os.path.sep.join', 'os.path.sep.join', (["[dataset_dir, 'drugProtein.txt']"], {}), "([dataset_dir, 'drugProtein.txt'])\n", (394, 428), False, 'import os\n'), ((2301, 2351), 'os.path.sep.join', 'os.path.sep.join', (["[dataset_dir, 'drugProtein.txt']"], {}), "([dataset_dir, 'drugProtein.txt'])\n", (2317, 2351), False, 'import os\n'), ((3123, 3173), 'os.path.sep.join', 'os.path.sep.join', (["[dataset_dir, 'drugDisease.txt']"], {}), "([dataset_dir, 'drugDisease.txt'])\n", (3139, 3173), False, 'import os\n'), ((3253, 3306), 'os.path.sep.join', 'os.path.sep.join', (["[dataset_dir, 'proteinDisease.txt']"], {}), "([dataset_dir, 'proteinDisease.txt'])\n", (3269, 3306), False, 'import os\n')] |
#! /usr/bin/env python
##########################################################################
# NSAp - Copyright (C) CEA, 2013 - 2017
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
#
# Code V Frouin
##########################################################################
"""
Monkey patch on pyradiomics module.
"""
# System import
import os
import collections
# Third party import
import numpy as np
import six
import matplotlib.pyplot as plt
import SimpleITK
from radiomics.featureextractor import RadiomicsFeaturesExtractor
class BroncoRadiomicsFeaturesExtractor(RadiomicsFeaturesExtractor):
outdir = None
def computeFeatures(self, image, mask, inputImageName, **kwargs):
#######################################################################
# Original code
featureVector = collections.OrderedDict()
# Calculate feature classes
for featureClassName, enabledFeatures in six.iteritems(
self.enabledFeatures):
# Handle calculation of shape features separately
if featureClassName == 'shape':
continue
if featureClassName in self.getFeatureClassNames():
self.logger.info('Computing %s', featureClassName)
featureClass = self.featureClasses[featureClassName](image,
mask,
**kwargs)
if enabledFeatures is None or len(enabledFeatures) == 0:
featureClass.enableAllFeatures()
else:
for feature in enabledFeatures:
featureClass.enableFeatureByName(feature)
featureClass.calculateFeatures()
for (featureName, featureValue) in six.iteritems(
featureClass.featureValues):
newFeatureName = "%s_%s_%s" % (inputImageName,
featureClassName, featureName)
featureVector[newFeatureName] = featureValue
###################################################################
# Supplementary code to create snapshots for GCLM
if featureClassName == "glcm":
# Save ROI, binned ROI and mask as Nifti
roi_image = SimpleITK.GetImageFromArray(featureClass.imageArray)
bin_roi_image = SimpleITK.GetImageFromArray(featureClass.matrix)
# mask_array = SimpleITK.GetImageFromArray(featureClass.maskArray)
mask_image = featureClass.inputMask
path_roi = os.path.join(self.outdir, "roi_%s.nii.gz" % (featureClassName))
path_bin_roi = os.path.join(self.outdir, "binned_roi_%s.nii.gz" % (featureClassName))
path_mask = os.path.join(self.outdir, "mask_%s.nii.gz" % (featureClassName))
SimpleITK.WriteImage(roi_image, path_roi)
SimpleITK.WriteImage(bin_roi_image, path_bin_roi)
SimpleITK.WriteImage(mask_image, path_mask)
# subplots: one histogram + co-occurences matrices
nb_coocc_matrices = featureClass.P_glcm.shape[2]
nb_subplots = 1 + nb_coocc_matrices
fig, axes = plt.subplots(nrows=1, ncols=nb_subplots,
figsize=(18, 2))
histo_ax, matrices_axes = axes[0], axes[1:]
fig.suptitle("GLCM matrices, image type: %s, bin width: %i"
% (inputImageName, featureClass.binWidth))
# Histogram
#bins = featureClass.binEdges # binEdges are in real data level
bins = range(1, featureClass.coefficients['Ng']+1)
# this hist consider all voxels in the bounding box
# histo_ax.hist(featureClass.matrix.flatten(), bins=bins)
# this hist consider voxel within the ROI
histo_ax.hist(
featureClass.matrix[np.where(featureClass.maskArray != 0)],
bins=bins)
histo_ax.tick_params(labelsize=3)
histo_ax.set_title("%s hist" % inputImageName, fontsize=8)
# Identify global min/max of concurrent matrices to have a
# consistent coloration across all images
co_min = featureClass.P_glcm.min()
co_max = featureClass.P_glcm.max()
# print(featureClass.P_glcm.shape )
# Create image subplot for each matrix along with colorbar
extent = [bins[0], bins[-1], bins[0], bins[-1]]
for i, ax in enumerate(matrices_axes):
co_matrix = featureClass.P_glcm[:, :, i]
im = ax.imshow(co_matrix, vmin=co_min, vmax=co_max,
extent=extent, cmap="Reds",
interpolation='nearest')
ax.tick_params(labelsize=3)
ax.set_title("angle index: %i" % i, fontsize=6)
cb = plt.colorbar(im, ax=ax, orientation="horizontal")
cb.ax.tick_params(labelsize=3)
fig.tight_layout()
name_png = '%s_%s_bw%s.png' % (featureClassName,
inputImageName,
featureClass.binWidth)
path_png = os.path.join(self.outdir, name_png)
plt.savefig(path_png, dpi=300)
if featureClassName == "glrlm":
nb_coocc_matrices = featureClass.P_glrlm.shape[2]
nb_subplots = 1 + nb_coocc_matrices
fig, axes = plt.subplots(nrows=1, ncols=nb_subplots,
figsize=(18, 2))
histo_ax, matrices_axes = axes[0], axes[1:]
fig.suptitle("GLCM matrices, image type: %s, bin width: %i"
% (inputImageName, featureClass.binWidth))
# Identify global min/max of concurrent matrices to have a
# consistent coloration across all images
co_min = featureClass.P_glrlm.min()
co_max = featureClass.P_glrlm.max()
# Create image subplot for each matrix along with colorbar
extent = [1, featureClass.P_glrlm[:,:,0].shape[1], featureClass.coefficients['Ng'], 1]
for i, ax in enumerate(matrices_axes):
co_matrix = featureClass.P_glrlm[:, :, i]
im = ax.imshow(co_matrix, vmin=co_min, vmax=co_max,
extent=extent, cmap="Reds",
interpolation='nearest')
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/10.)
ax.tick_params(labelsize=3)
ax.set_title("angle index: %i" % i, fontsize=6)
cb = plt.colorbar(im, ax=ax, orientation="horizontal")
cb.ax.tick_params(labelsize=3)
fig.tight_layout()
name_png = '%s_%s_bw%s.png' % (featureClassName,
inputImageName,
featureClass.binWidth)
path_png = os.path.join(self.outdir, name_png)
plt.savefig(path_png, dpi=300)
return featureVector
| [
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.where",
"SimpleITK.WriteImage",
"SimpleITK.GetImageFromArray",
"collections.OrderedDict",
"six.iteritems",
"os.path.join",
"matplotlib.pyplot.savefig"
] | [((981, 1006), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1004, 1006), False, 'import collections\n'), ((1093, 1128), 'six.iteritems', 'six.iteritems', (['self.enabledFeatures'], {}), '(self.enabledFeatures)\n', (1106, 1128), False, 'import six\n'), ((1980, 2021), 'six.iteritems', 'six.iteritems', (['featureClass.featureValues'], {}), '(featureClass.featureValues)\n', (1993, 2021), False, 'import six\n'), ((2519, 2571), 'SimpleITK.GetImageFromArray', 'SimpleITK.GetImageFromArray', (['featureClass.imageArray'], {}), '(featureClass.imageArray)\n', (2546, 2571), False, 'import SimpleITK\n'), ((2604, 2652), 'SimpleITK.GetImageFromArray', 'SimpleITK.GetImageFromArray', (['featureClass.matrix'], {}), '(featureClass.matrix)\n', (2631, 2652), False, 'import SimpleITK\n'), ((2815, 2876), 'os.path.join', 'os.path.join', (['self.outdir', "('roi_%s.nii.gz' % featureClassName)"], {}), "(self.outdir, 'roi_%s.nii.gz' % featureClassName)\n", (2827, 2876), False, 'import os\n'), ((2910, 2978), 'os.path.join', 'os.path.join', (['self.outdir', "('binned_roi_%s.nii.gz' % featureClassName)"], {}), "(self.outdir, 'binned_roi_%s.nii.gz' % featureClassName)\n", (2922, 2978), False, 'import os\n'), ((3010, 3072), 'os.path.join', 'os.path.join', (['self.outdir', "('mask_%s.nii.gz' % featureClassName)"], {}), "(self.outdir, 'mask_%s.nii.gz' % featureClassName)\n", (3022, 3072), False, 'import os\n'), ((3092, 3133), 'SimpleITK.WriteImage', 'SimpleITK.WriteImage', (['roi_image', 'path_roi'], {}), '(roi_image, path_roi)\n', (3112, 3133), False, 'import SimpleITK\n'), ((3150, 3199), 'SimpleITK.WriteImage', 'SimpleITK.WriteImage', (['bin_roi_image', 'path_bin_roi'], {}), '(bin_roi_image, path_bin_roi)\n', (3170, 3199), False, 'import SimpleITK\n'), ((3216, 3259), 'SimpleITK.WriteImage', 'SimpleITK.WriteImage', (['mask_image', 'path_mask'], {}), '(mask_image, path_mask)\n', (3236, 3259), False, 'import SimpleITK\n'), ((3473, 3530), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': 'nb_subplots', 'figsize': '(18, 2)'}), '(nrows=1, ncols=nb_subplots, figsize=(18, 2))\n', (3485, 3530), True, 'import matplotlib.pyplot as plt\n'), ((5663, 5698), 'os.path.join', 'os.path.join', (['self.outdir', 'name_png'], {}), '(self.outdir, name_png)\n', (5675, 5698), False, 'import os\n'), ((5715, 5745), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path_png'], {'dpi': '(300)'}), '(path_png, dpi=300)\n', (5726, 5745), True, 'import matplotlib.pyplot as plt\n'), ((5937, 5994), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': 'nb_subplots', 'figsize': '(18, 2)'}), '(nrows=1, ncols=nb_subplots, figsize=(18, 2))\n', (5949, 5994), True, 'import matplotlib.pyplot as plt\n'), ((7563, 7598), 'os.path.join', 'os.path.join', (['self.outdir', 'name_png'], {}), '(self.outdir, name_png)\n', (7575, 7598), False, 'import os\n'), ((7615, 7645), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path_png'], {'dpi': '(300)'}), '(path_png, dpi=300)\n', (7626, 7645), True, 'import matplotlib.pyplot as plt\n'), ((5302, 5351), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax', 'orientation': '"""horizontal"""'}), "(im, ax=ax, orientation='horizontal')\n", (5314, 5351), True, 'import matplotlib.pyplot as plt\n'), ((7202, 7251), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax', 'orientation': '"""horizontal"""'}), "(im, ax=ax, orientation='horizontal')\n", (7214, 7251), True, 'import matplotlib.pyplot as plt\n'), ((4227, 4264), 'numpy.where', 'np.where', (['(featureClass.maskArray != 0)'], {}), '(featureClass.maskArray != 0)\n', (4235, 4264), True, 'import numpy as np\n')] |
import numpy as np
import xgboost as xgb
import re
from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor
import pitci
import pytest
class TestInit:
"""Tests for the XGBoosterAbsoluteErrorConformalPredictor._init__ method."""
def test_inheritance(self):
"""Test that XGBoosterAbsoluteErrorConformalPredictor inherits from
AbsoluteErrorConformalPredictor.
"""
assert (
XGBoosterAbsoluteErrorConformalPredictor.__mro__[1]
is pitci.base.AbsoluteErrorConformalPredictor
), (
"XGBoosterAbsoluteErrorConformalPredictor does not inherit from "
"AbsoluteErrorConformalPredictor"
)
def test_model_type_exception(self):
"""Test an exception is raised if model is not a xgb.Booster object."""
with pytest.raises(
TypeError,
match=re.escape(
f"booster is not in expected types {[xgb.Booster]}, got {tuple}"
),
):
XGBoosterAbsoluteErrorConformalPredictor((1, 2, 3))
def test_attributes_set(self, xgboost_1_split_1_tree):
"""Test that SUPPORTED_OBJECTIVES, version and model attributes are set."""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
assert (
confo_model.__version__ == pitci.__version__
), "__version__ attribute not set to package version value"
assert (
confo_model.model is xgboost_1_split_1_tree
), "model attribute not set with the value passed in init"
assert (
confo_model.SUPPORTED_OBJECTIVES
== pitci.xgboost.SUPPORTED_OBJECTIVES_ABSOLUTE_ERROR
), "SUPPORTED_OBJECTIVES attribute incorrect"
def test_check_objective_supported_called(self, mocker, xgboost_1_split_1_tree):
"""Test that check_objective_supported is called in init."""
mocked = mocker.patch.object(pitci.xgboost, "check_objective_supported")
XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
assert (
mocked.call_count == 1
), "check_objective_supported not called (once) in init"
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert call_pos_args == (
xgboost_1_split_1_tree,
pitci.xgboost.SUPPORTED_OBJECTIVES_ABSOLUTE_ERROR,
), "positional args in check_objective_supported call not correct"
assert (
call_kwargs == {}
), "keyword args in check_objective_supported call not correct"
class TestCalibrate:
"""Tests for the XGBoosterAbsoluteErrorConformalPredictor.calibrate method."""
def test_data_type_exception(self, xgboost_1_split_1_tree):
"""Test an exception is raised if data is not a xgb.DMatrix object."""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
with pytest.raises(
TypeError,
match=re.escape(
f"data is not in expected types {[xgb.DMatrix]}, got {int}"
),
):
confo_model.calibrate(12345)
def test_super_calibrate_call_response_passed(
self, mocker, dmatrix_2x1_with_label, xgboost_1_split_1_tree
):
"""Test AbsoluteErrorConformalPredictor.calibrate call when response is passed."""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
mocked = mocker.patch.object(
pitci.base.AbsoluteErrorConformalPredictor, "calibrate"
)
response_array = np.array([4, 5])
confo_model.calibrate(
data=dmatrix_2x1_with_label, alpha=0.5, response=response_array
)
assert (
mocked.call_count == 1
), "incorrect number of calls to AbsoluteErrorConformalPredictor.calibrate"
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in call to AbsoluteErrorConformalPredictor.calibrate"
assert (
call_kwargs["alpha"] == 0.5
), "alpha incorrect in call to AbsoluteErrorConformalPredictor.calibrate"
np.testing.assert_array_equal(call_kwargs["response"], response_array)
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data incorrect in call to AbsoluteErrorConformalPredictor.calibrate"
def test_super_calibrate_call_no_response_passed(
self, mocker, dmatrix_2x1_with_label, xgboost_1_split_1_tree
):
"""Test AbsoluteErrorConformalPredictor.calibrate call when no response is passed."""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
mocked = mocker.patch.object(
pitci.base.AbsoluteErrorConformalPredictor, "calibrate"
)
confo_model.calibrate(data=dmatrix_2x1_with_label, alpha=0.99)
assert (
mocked.call_count == 1
), "incorrect number of calls to AbsoluteErrorConformalPredictor.calibrate"
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in call to AbsoluteErrorConformalPredictor.calibrate"
assert (
call_kwargs["alpha"] == 0.99
), "alpha incorrect in call to AbsoluteErrorConformalPredictor.calibrate"
np.testing.assert_array_equal(
call_kwargs["response"], dmatrix_2x1_with_label.get_label()
)
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data incorrect in call to AbsoluteErrorConformalPredictor.calibrate"
class TestPredictWithInterval:
"""Tests for the XGBoosterAbsoluteErrorConformalPredictor.predict_with_interval method."""
def test_data_type_exception(self, dmatrix_2x1_with_label, xgboost_1_split_1_tree):
"""Test an exception is raised if data is not a xgb.DMatrix object."""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
confo_model.calibrate(dmatrix_2x1_with_label)
with pytest.raises(
TypeError,
match=re.escape(
f"data is not in expected types {[xgb.DMatrix]}, got {list}"
),
):
confo_model.predict_with_interval([])
def test_super_predict_with_interval_result_returned(
self, mocker, dmatrix_2x1_with_label, xgboost_1_split_1_tree
):
"""Test that super prediction_with_interval is called and the result is returned from
the function.
"""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
confo_model.calibrate(dmatrix_2x1_with_label)
predict_return_value = np.array([123, 456])
mocked = mocker.patch.object(
pitci.base.AbsoluteErrorConformalPredictor,
"predict_with_interval",
return_value=predict_return_value,
)
results = confo_model.predict_with_interval(dmatrix_2x1_with_label)
assert (
mocked.call_count == 1
), "incorrect number of calls to AbsoluteErrorConformalPredictor.predict_with_interval"
np.testing.assert_array_equal(results, predict_return_value)
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
assert call_pos_args == (
dmatrix_2x1_with_label,
), "positional args incorrect in call to xgb.Booster.predict"
class TestGeneratePredictions:
"""Tests for the XGBoosterAbsoluteErrorConformalPredictor._generate_predictions method."""
def test_data_type_exception(self, dmatrix_2x1_with_label, xgboost_1_split_1_tree):
"""Test an exception is raised if data is not a xgb.DMatrix object."""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
confo_model.calibrate(dmatrix_2x1_with_label)
with pytest.raises(
TypeError,
match=re.escape(
f"data is not in expected types {[xgb.DMatrix]}, got {float}"
),
):
confo_model._generate_predictions(12345.0)
def test_predict_call(self, mocker, dmatrix_2x1_with_label, xgboost_1_split_1_tree):
"""Test that the output from xgb.Booster.predict with ntree_limit = best_iteration + 1
is returned from the method.
"""
confo_model = XGBoosterAbsoluteErrorConformalPredictor(xgboost_1_split_1_tree)
confo_model.calibrate(dmatrix_2x1_with_label)
predict_return_value = np.array([200, 101])
mocked = mocker.patch.object(
xgb.Booster, "predict", return_value=predict_return_value
)
results = confo_model._generate_predictions(dmatrix_2x1_with_label)
assert (
mocked.call_count == 1
), "incorrect number of calls to xgb.Booster.predict"
np.testing.assert_array_equal(results, predict_return_value)
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert call_pos_args == (
dmatrix_2x1_with_label,
), "positional args incorrect in call to xgb.Booster.predict"
assert call_kwargs == {
"ntree_limit": xgboost_1_split_1_tree.best_iteration + 1
}, "keyword args incorrect in call to xgb.Booster.predict"
class TestConformalPredictionValues:
"""Baseline tests of the conformal predictions from the
XGBoosterAbsoluteErrorConformalPredictor class.
"""
@pytest.mark.parametrize(
"alpha", [(0.1), (0.25), (0.5), (0.7), (0.8), (0.9), (0.95), (0.99)]
)
def test_calibration(self, alpha, xgbooster_diabetes_model, diabetes_xgb_data):
"""Test that the correct proportion of response values fall within the intervals, on
the calibration sample.
"""
confo_model = pitci.get_absolute_error_conformal_predictor(
xgbooster_diabetes_model
)
confo_model.calibrate(
data=diabetes_xgb_data[3],
alpha=alpha,
)
predictions_test = confo_model.predict_with_interval(diabetes_xgb_data[3])
calibration_results = pitci.helpers.check_response_within_interval(
response=diabetes_xgb_data[3].get_label(),
intervals_with_predictions=predictions_test,
)
assert (
calibration_results[True] >= alpha
), f"{type(confo_model)} not calibrated at {alpha}, got {calibration_results[True]}"
def test_conformal_predictions(self, xgbooster_diabetes_model, diabetes_xgb_data):
"""Test that the conformal intervals are as expected."""
confo_model = pitci.get_absolute_error_conformal_predictor(
xgbooster_diabetes_model
)
confo_model.calibrate(data=diabetes_xgb_data[3], alpha=0.8)
assert (
round(float(confo_model.baseline_interval), 7) == 89.2551117
), "baseline_interval not calculated as expected on diabetes dataset"
predictions_test = confo_model.predict_with_interval(diabetes_xgb_data[3])
assert (
round(float(predictions_test[:, 1].mean()), 7) == 145.7608795
), "mean test sample predicted value not calculated as expected on diabetes dataset"
expected_interval_distribution = {
0.0: 178.5102081298828,
0.05: 178.5102081298828,
0.1: 178.51022338867188,
0.2: 178.51022338867188,
0.3: 178.51022338867188,
0.4: 178.51022338867188,
0.5: 178.51022338867188,
0.6: 178.51022338867188,
0.7: 178.51022338867188,
0.8: 178.51022338867188,
0.9: 178.51022644042968,
0.95: 178.51023864746094,
1.0: 178.51023864746094,
"mean": 178.51019287109375,
"std": 6.4099735936906654e-06,
"iqr": 0.0,
}
actual_interval_distribution = pitci.helpers.check_interval_width(
intervals_with_predictions=predictions_test
).to_dict()
assert (
expected_interval_distribution == actual_interval_distribution
), "conformal interval distribution not calculated as expected"
| [
"pitci.get_absolute_error_conformal_predictor",
"pitci.helpers.check_interval_width",
"numpy.testing.assert_array_equal",
"re.escape",
"numpy.array",
"pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor",
"pytest.mark.parametrize"
] | [((9811, 9888), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alpha"""', '[0.1, 0.25, 0.5, 0.7, 0.8, 0.9, 0.95, 0.99]'], {}), "('alpha', [0.1, 0.25, 0.5, 0.7, 0.8, 0.9, 0.95, 0.99])\n", (9834, 9888), False, 'import pytest\n'), ((1237, 1301), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (1277, 1301), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((2014, 2078), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (2054, 2078), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((2917, 2981), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (2957, 2981), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((3449, 3513), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (3489, 3513), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((3657, 3673), 'numpy.array', 'np.array', (['[4, 5]'], {}), '([4, 5])\n', (3665, 3673), True, 'import numpy as np\n'), ((4338, 4408), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["call_kwargs['response']", 'response_array'], {}), "(call_kwargs['response'], response_array)\n", (4367, 4408), True, 'import numpy as np\n'), ((4814, 4878), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (4854, 4878), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((6204, 6268), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (6244, 6268), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((6845, 6909), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (6885, 6909), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((6997, 7017), 'numpy.array', 'np.array', (['[123, 456]'], {}), '([123, 456])\n', (7005, 7017), True, 'import numpy as np\n'), ((7442, 7502), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['results', 'predict_return_value'], {}), '(results, predict_return_value)\n', (7471, 7502), True, 'import numpy as np\n'), ((8046, 8110), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (8086, 8110), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((8664, 8728), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['xgboost_1_split_1_tree'], {}), '(xgboost_1_split_1_tree)\n', (8704, 8728), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((8816, 8836), 'numpy.array', 'np.array', (['[200, 101]'], {}), '([200, 101])\n', (8824, 8836), True, 'import numpy as np\n'), ((9157, 9217), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['results', 'predict_return_value'], {}), '(results, predict_return_value)\n', (9186, 9217), True, 'import numpy as np\n'), ((10163, 10233), 'pitci.get_absolute_error_conformal_predictor', 'pitci.get_absolute_error_conformal_predictor', (['xgbooster_diabetes_model'], {}), '(xgbooster_diabetes_model)\n', (10207, 10233), False, 'import pitci\n'), ((10979, 11049), 'pitci.get_absolute_error_conformal_predictor', 'pitci.get_absolute_error_conformal_predictor', (['xgbooster_diabetes_model'], {}), '(xgbooster_diabetes_model)\n', (11023, 11049), False, 'import pitci\n'), ((1018, 1069), 'pitci.xgboost.XGBoosterAbsoluteErrorConformalPredictor', 'XGBoosterAbsoluteErrorConformalPredictor', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1058, 1069), False, 'from pitci.xgboost import XGBoosterAbsoluteErrorConformalPredictor\n'), ((12261, 12340), 'pitci.helpers.check_interval_width', 'pitci.helpers.check_interval_width', ([], {'intervals_with_predictions': 'predictions_test'}), '(intervals_with_predictions=predictions_test)\n', (12295, 12340), False, 'import pitci\n'), ((887, 962), 're.escape', 're.escape', (['f"""booster is not in expected types {[xgb.Booster]}, got {tuple}"""'], {}), "(f'booster is not in expected types {[xgb.Booster]}, got {tuple}')\n", (896, 962), False, 'import re\n'), ((3052, 3122), 're.escape', 're.escape', (['f"""data is not in expected types {[xgb.DMatrix]}, got {int}"""'], {}), "(f'data is not in expected types {[xgb.DMatrix]}, got {int}')\n", (3061, 3122), False, 'import re\n'), ((6394, 6465), 're.escape', 're.escape', (['f"""data is not in expected types {[xgb.DMatrix]}, got {list}"""'], {}), "(f'data is not in expected types {[xgb.DMatrix]}, got {list}')\n", (6403, 6465), False, 'import re\n'), ((8236, 8308), 're.escape', 're.escape', (['f"""data is not in expected types {[xgb.DMatrix]}, got {float}"""'], {}), "(f'data is not in expected types {[xgb.DMatrix]}, got {float}')\n", (8245, 8308), False, 'import re\n')] |
from optparse import OptionParser
import numpy as np
# This version is adapted for mem
__VERSION__ = 0.2
class percentile:
""" Calculated 10th, 50th and 90th percentiles of an array
Properties:
tenth - returns the tenth percentile
median - returns the median (or 50th percentile)
nintieth - returns the nintieth percentile
"""
def __init__(self, array=None):
self.tenth = 0
self.median = 0
self.nintieth = 0
self.__call__(array)
def calculate(self, array):
if array is None:
return
self.tenth = np.percentile(array, 10)
self.median = np.percentile(array, 50)
self.nintieth = np.percentile(array, 90)
def __call__(self, array):
self.calculate(array)
def printt(self):
print("Value {} {}".format(self.median, self.nintieth - self.tenth))
def main():
usage = "usage: python %prog filename"
version = "%%prog %s" % __VERSION__
parser = OptionParser(usage=usage, version=version)
(options, args) = parser.parse_args()
if len(args) < 1:
print(parser.error("need a filename, -h/--help for help"))
raise SystemExit
obj = percentile()
imported_data = np.genfromtxt((args[0]))
obj(imported_data)
res = open('mem.dat', 'a+')
res.write("{} {}\n" .format(obj.median, obj.nintieth - obj.tenth))
if __name__ == '__main__':
main()
| [
"numpy.percentile",
"numpy.genfromtxt",
"optparse.OptionParser"
] | [((1020, 1062), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': 'version'}), '(usage=usage, version=version)\n', (1032, 1062), False, 'from optparse import OptionParser\n'), ((1263, 1285), 'numpy.genfromtxt', 'np.genfromtxt', (['args[0]'], {}), '(args[0])\n', (1276, 1285), True, 'import numpy as np\n'), ((626, 650), 'numpy.percentile', 'np.percentile', (['array', '(10)'], {}), '(array, 10)\n', (639, 650), True, 'import numpy as np\n'), ((673, 697), 'numpy.percentile', 'np.percentile', (['array', '(50)'], {}), '(array, 50)\n', (686, 697), True, 'import numpy as np\n'), ((722, 746), 'numpy.percentile', 'np.percentile', (['array', '(90)'], {}), '(array, 90)\n', (735, 746), True, 'import numpy as np\n')] |
"""
We are going to solve a nonlinear static system using the FETI nonlinear
static solver in this example. We decided to use an aluminium rectangular
2D structure, which is going to be fixed on the north edge and a force
of 15kN is going to beapplied on the south edge.
We start by importing the libraries needed for this example.
"""
import numpy as np
import logging
###########################################
from amfe.io.mesh import AmfeMeshConverter, GmshAsciiMeshReader
from amfe.material import KirchhoffMaterial
from amfe.component import StructuralComponent
from amfe.component.component_composite import MeshComponentComposite
from amfe.component.tree_manager import TreeBuilder
from amfe.neumann import FixedDirectionNeumann
from amfe.solver.translators import MulticomponentMechanicalSystem
from amfe import ui
from amfe.solver import AmfeSolution
from amfe.forces import constant_force
############################################
from amfeti import NonlinearStaticFetiSolver
from amfeti.solvers import PCPGsolver
############################################
logging.basicConfig(level=logging.INFO)
"""
The mesh needs to be prepared in Python before it is passed to the
AMfeti solver.
"""
# Material Properties
E_alu = 70e3
nu_alu = 0.34
rho_alu = 2.7e0
my_material = KirchhoffMaterial(E_alu, nu_alu, rho_alu, thickness=1)
# we specify the path for the input mesh and an output path,
# where we will save the solutions
input_file = '/meshes/3d_truss_with_slope.msh'
output_path = '/results/nonlinear_static_2d_example'
"""
We are setting up the material properties and defining our component
with the mesh file. It's important to remember to set the parameter
of ``surface2partition`` to ``True`` when reading the mesh.
"""
# Reading the .msh file and defining a structural component
reader = GmshAsciiMeshReader(input_file)
converter = AmfeMeshConverter()
reader.parse(converter, surface2partition=True)
my_mesh = converter.return_mesh()
my_component = StructuralComponent(my_mesh)
"""
We proceed by assigning the material properties and
mapping the global degrees of freedom for all nodes that lie on the
Dirichlet boundary. This mapping will be used to assign the Dirichlet
constraints later.
"""
# Assigning material properties on physical group called "material"
ui.assign_material_by_group(my_component, my_material, 'material')
# Mapping the degrees of freedom for nodes belonging to the physical group called "dirichlet"
glo_dofs_x = my_component.mapping.get_dofs_by_nodeids(my_component.mesh.get_nodeids_by_groups(['dirichlet']), 'ux')
glo_dofs_y = my_component.mapping.get_dofs_by_nodeids(my_component.mesh.get_nodeids_by_groups(['dirichlet']), 'uy')
my_composite = MeshComponentComposite(my_component)
"""
We define a structural composite object with the help of the
tree builder that manages the substructures and the connections
between them.
"""
# Decomposition of component
tree_builder = TreeBuilder()
tree_builder.add([0], [my_composite])
leaf_id = tree_builder.leaf_paths.max_leaf_id
tree_builder.separate_partitioned_component_by_leafid(leaf_id)
structural_composite = tree_builder.root_composite.components[0]
structural_composite.update_component_connections()
"""
Then we define the external force of 15kN and apply the Neumann
boundary condition. Finally, we apply the Dirichlet conditions as well.
"""
F = constant_force(15E3)
# Neumann conditions, with the force direction [0, -1] for the [x direction, y direction]
my_neumann = FixedDirectionNeumann(np.array([0, -1]), time_func=F)
structural_composite.assign_neumann('Neumann0', my_neumann, ['neumann'], '_groups')
# Dirichlet conditions
dirichlet = structural_composite.components[1]._constraints.create_dirichlet_constraint()
for dof in glo_dofs_x.reshape(-1):
structural_composite.assign_constraint('Dirichlet0', dirichlet, np.array([dof], dtype=int), [])
for dof in glo_dofs_y.reshape(-1):
structural_composite.assign_constraint('Dirichlet1', dirichlet, np.array([dof], dtype=int), [])
"""
Now that we have finalized the structural composite, we can create
a multicomponent mechanical system, i.e. a system consisting of
substructures. Since this is not a linear problem, we set the
``all_linear`` parameter to ``False``.
"""
# FETI-solver
substructured_system = MulticomponentMechanicalSystem(structural_composite, constant_mass=True, constant_damping=True,
all_linear=False, constraint_formulation='boolean')
"""
Now we'd like to use the NonlinearStaticFetiSolver to solve our problem.
However, this solver requires dictionaries for the K matrices,
the B matrices, f_ext, f_int, q_0. For this purpose, we write a wrapper
function that prepares these dictionaries, we need to pass to the FETI solver.
"""
def _create_K_B_f_dict(B_dict, msys_dict):
# Initialize dictionaries
K_dict_trans = dict()
B_dict_trans = dict()
f_ext_dict_trans = dict()
f_int_dict_trans = dict()
interface_dict = dict()
q_0_dict = dict()
int_num = 1
system_wrapper = dict()
class SystemWrapper:
def __init__(self, msystem):
self.msystem = msystem
self.f_ext_stored = None
def K(self, q):
return self.msystem.K(q, np.zeros_like(q), 0)
def f_ext(self, q):
if self.f_ext_stored is None:
self.f_ext_stored = self.msystem.f_ext(q, np.zeros_like(q), 0)
return self.f_ext_stored
def f_int(self, q):
return self.msystem.f_int(q, np.zeros_like(q), 0)
for i_system, msys in msys_dict.items():
subs_key = i_system
system_wrapper[subs_key] = SystemWrapper(msys)
K_dict_trans[subs_key] = system_wrapper[subs_key].K
f_ext_dict_trans[subs_key] = system_wrapper[subs_key].f_ext
f_int_dict_trans[subs_key] = system_wrapper[subs_key].f_int
q_0_dict[subs_key] = np.zeros(msys.dimension)
B_local = dict()
for key in B_dict.keys():
if key not in interface_dict:
interface_dict[key] = 'interface' + str(int_num)
interface_dict[(key[1], key[0])] = 'interface' + str(int_num)
int_num += 1
if key[0] == i_system:
B_local[interface_dict[key]] = B_dict[key]
B_dict_trans[subs_key] = B_local
return K_dict_trans, B_dict_trans, f_int_dict_trans, f_ext_dict_trans, q_0_dict
"""
We can now use this function to define the dictionaries for K, B, f_int,
f_ext and q_0 and call the nonlinear static FETI solver.
For this solver, we also specify the number of steps for the load-stepping
as well as tolerance for the residuals and a maximum number of iterations.
"""
K_dict, B_dict, f_int_dict, f_ext_dict, q_0_dict = _create_K_B_f_dict(substructured_system.connections,
substructured_system.mechanical_systems)
# Defining an instance of the Preconditioned Conjugate Projected Gradient (PCPG) solver as a global system solver to be used
global_solver = PCPGsolver()
# Specifying that a full reorthogonalization should be done at every iteration
global_solver.set_config({'full_reorthogonalization': True})
feti_solver = NonlinearStaticFetiSolver(K_dict, B_dict, f_int_dict, f_ext_dict, q_0_dict,
loadpath_controller_options={'N_steps': 10, # load-stepping is done in 10 steps
'nonlinear_solver_options': {'rtol': 1e-6,
'max_iter': 10}},
global_solver=global_solver)
feti_solver.update()
"""
A solution object, containing all global solutions, solver-information and local problems, is returned by the solver.
"""
# Solving our system
solution_obj = feti_solver.solve()
"""
We now have our solution, but it's a solution object so we need to
read it out and store the solution in a way that is readable to us.
We are going to create ``.hdf5`` and ``.xdmf`` files that contain
the results.
"""
# Initializing an empty dictionary
solution_writer = dict()
# Save all items from the solution object in the dictionary
for i_prob, local_problem in solution_obj.local_problems.items():
solution_writer[i_prob] = AmfeSolution()
q = local_problem.q
msys = substructured_system.mechanical_systems[i_prob]
if i_prob in substructured_system.constraint_formulations:
formulation = substructured_system.constraint_formulations[i_prob]
u, du, ddu = formulation.recover(q, np.zeros_like(q), np.zeros_like(q), 0)
else:
u = q
du = np.zeros_like(u)
strains, stresses = structural_composite.components[i_prob].strains_and_stresses(u, du, 0)
solution_writer[i_prob].write_timestep(0, u, None, None, strains, stresses)
# Export the items in files readable in Paraview
for i_comp, comp in structural_composite.components.items():
path = '/Component_' + str(i_comp)
ui.write_results_to_paraview(solution_writer[i_comp], comp, path)
| [
"amfe.solver.translators.MulticomponentMechanicalSystem",
"amfeti.NonlinearStaticFetiSolver",
"amfe.ui.write_results_to_paraview",
"numpy.zeros_like",
"logging.basicConfig",
"amfe.component.StructuralComponent",
"amfe.component.component_composite.MeshComponentComposite",
"amfe.solver.AmfeSolution",
... | [((1079, 1118), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1098, 1118), False, 'import logging\n'), ((1290, 1344), 'amfe.material.KirchhoffMaterial', 'KirchhoffMaterial', (['E_alu', 'nu_alu', 'rho_alu'], {'thickness': '(1)'}), '(E_alu, nu_alu, rho_alu, thickness=1)\n', (1307, 1344), False, 'from amfe.material import KirchhoffMaterial\n'), ((1820, 1851), 'amfe.io.mesh.GmshAsciiMeshReader', 'GmshAsciiMeshReader', (['input_file'], {}), '(input_file)\n', (1839, 1851), False, 'from amfe.io.mesh import AmfeMeshConverter, GmshAsciiMeshReader\n'), ((1864, 1883), 'amfe.io.mesh.AmfeMeshConverter', 'AmfeMeshConverter', ([], {}), '()\n', (1881, 1883), False, 'from amfe.io.mesh import AmfeMeshConverter, GmshAsciiMeshReader\n'), ((1981, 2009), 'amfe.component.StructuralComponent', 'StructuralComponent', (['my_mesh'], {}), '(my_mesh)\n', (2000, 2009), False, 'from amfe.component import StructuralComponent\n'), ((2299, 2365), 'amfe.ui.assign_material_by_group', 'ui.assign_material_by_group', (['my_component', 'my_material', '"""material"""'], {}), "(my_component, my_material, 'material')\n", (2326, 2365), False, 'from amfe import ui\n'), ((2709, 2745), 'amfe.component.component_composite.MeshComponentComposite', 'MeshComponentComposite', (['my_component'], {}), '(my_component)\n', (2731, 2745), False, 'from amfe.component.component_composite import MeshComponentComposite\n'), ((2941, 2954), 'amfe.component.tree_manager.TreeBuilder', 'TreeBuilder', ([], {}), '()\n', (2952, 2954), False, 'from amfe.component.tree_manager import TreeBuilder\n'), ((3370, 3393), 'amfe.forces.constant_force', 'constant_force', (['(15000.0)'], {}), '(15000.0)\n', (3384, 3393), False, 'from amfe.forces import constant_force\n'), ((4299, 4450), 'amfe.solver.translators.MulticomponentMechanicalSystem', 'MulticomponentMechanicalSystem', (['structural_composite'], {'constant_mass': '(True)', 'constant_damping': '(True)', 'all_linear': '(False)', 'constraint_formulation': '"""boolean"""'}), "(structural_composite, constant_mass=True,\n constant_damping=True, all_linear=False, constraint_formulation='boolean')\n", (4329, 4450), False, 'from amfe.solver.translators import MulticomponentMechanicalSystem\n'), ((7096, 7108), 'amfeti.solvers.PCPGsolver', 'PCPGsolver', ([], {}), '()\n', (7106, 7108), False, 'from amfeti.solvers import PCPGsolver\n'), ((7265, 7483), 'amfeti.NonlinearStaticFetiSolver', 'NonlinearStaticFetiSolver', (['K_dict', 'B_dict', 'f_int_dict', 'f_ext_dict', 'q_0_dict'], {'loadpath_controller_options': "{'N_steps': 10, 'nonlinear_solver_options': {'rtol': 1e-06, 'max_iter': 10}}", 'global_solver': 'global_solver'}), "(K_dict, B_dict, f_int_dict, f_ext_dict, q_0_dict,\n loadpath_controller_options={'N_steps': 10, 'nonlinear_solver_options':\n {'rtol': 1e-06, 'max_iter': 10}}, global_solver=global_solver)\n", (7290, 7483), False, 'from amfeti import NonlinearStaticFetiSolver\n'), ((3517, 3534), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (3525, 3534), True, 'import numpy as np\n'), ((8408, 8422), 'amfe.solver.AmfeSolution', 'AmfeSolution', ([], {}), '()\n', (8420, 8422), False, 'from amfe.solver import AmfeSolution\n'), ((9110, 9175), 'amfe.ui.write_results_to_paraview', 'ui.write_results_to_paraview', (['solution_writer[i_comp]', 'comp', 'path'], {}), '(solution_writer[i_comp], comp, path)\n', (9138, 9175), False, 'from amfe import ui\n'), ((3850, 3876), 'numpy.array', 'np.array', (['[dof]'], {'dtype': 'int'}), '([dof], dtype=int)\n', (3858, 3876), True, 'import numpy as np\n'), ((3985, 4011), 'numpy.array', 'np.array', (['[dof]'], {'dtype': 'int'}), '([dof], dtype=int)\n', (3993, 4011), True, 'import numpy as np\n'), ((5931, 5955), 'numpy.zeros', 'np.zeros', (['msys.dimension'], {}), '(msys.dimension)\n', (5939, 5955), True, 'import numpy as np\n'), ((8764, 8780), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (8777, 8780), True, 'import numpy as np\n'), ((8688, 8704), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8701, 8704), True, 'import numpy as np\n'), ((8706, 8722), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8719, 8722), True, 'import numpy as np\n'), ((5276, 5292), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (5289, 5292), True, 'import numpy as np\n'), ((5554, 5570), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (5567, 5570), True, 'import numpy as np\n'), ((5426, 5442), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (5439, 5442), True, 'import numpy as np\n')] |
import numpy as n, pylab as p, matplotlib
from scipy import stats
class NetworkPCA:
"""PCA cases of interest for observing stability in interaction networks.
1) Primacy of centrality measures for dispersion; and
2) precedence of symmetry over clustering;
3) average of all basic centrality measures for first compoment
"""
def __init__(self,network_measures,network_partitioning=None,tdir=".",tname="sample.png", measures="all",plot_sym=False):
# enable selection of measures input as string
# through measures variable and exec() or eval() methods.
self.M1=n.array(( network_measures.weighted_clusterings_,
network_measures.degrees_,
network_measures.weighted_directed_betweenness_
))
self.pca1=PCA(self.M1)
if "in_strengths_" in dir(network_measures):
self.M2=n.array(( network_measures.weighted_clusterings_,
network_measures.strengths_,
network_measures.in_strengths_,
network_measures.out_strengths_,
network_measures.degrees_,
network_measures.in_degrees_,
network_measures.out_degrees_,
network_measures.weighted_directed_betweenness_
))
self.M3=n.array(( network_measures.weighted_clusterings_,
network_measures.strengths_,
network_measures.in_strengths_,
network_measures.out_strengths_,
network_measures.degrees_,
network_measures.in_degrees_,
network_measures.out_degrees_,
network_measures.weighted_directed_betweenness_,
network_measures.asymmetries,
network_measures.asymmetries_edge_mean,
network_measures.asymmetries_edge_std,
network_measures.disequilibrium,
network_measures.disequilibrium_edge_mean,
network_measures.disequilibrium_edge_std,
))
self.pca2=PCA(self.M2)
self.pca3=PCA(self.M3)
if plot_sym:
self.pca3.plotSym(network_partitioning,tdir,tname)
# fig = matplotlib.pyplot.gcf()
# fig.set_size_inches(11.,8.4)
# p.suptitle("Symmetry prevalence over clutering for data dispersion")
# p.subplot(311)
# # plot degree x clust
# p.title("degree x clustering")
# label1=r"degree $\rightarrow$"
# label2=r"clustering $\rightarrow$"
# p.xlabel(label1, fontsize=15)
# p.ylabel(label2, fontsize=15)
# n_periphery= len(network_partitioning.sectorialized_agents__[0])
# n_intermediary=len(network_partitioning.sectorialized_agents__[1])
# n_hubs= len(network_partitioning.sectorialized_agents__[2])
# M=n.array(network_measures.degrees_)
# network_measures.degrees__=(M-M.mean())/M.std()
# M=n.array(network_measures.weighted_clusterings_)
# network_measures.weighted_clusterings__=(M-M.mean())/M.std()
# p.ylim(min(network_measures.weighted_clusterings__)-1,max(network_measures.weighted_clusterings__)+1)
# p.xlim(min(network_measures.degrees__)-1,max(network_measures.degrees__)+1)
# p.plot(network_measures.degrees__[:n_periphery],network_measures.weighted_clusterings__[:n_periphery],"ko", ms=3.9,label="periphery")
#
# p.plot(network_measures.degrees__[n_periphery:n_periphery+n_intermediary],network_measures.weighted_clusterings__[n_periphery:n_periphery+n_intermediary],"k^", ms=3.9,label="intermediary")
#
# p.plot(network_measures.degrees__[-n_hubs:],network_measures.weighted_clusterings__[-n_hubs:],"k*", ms=3.9,label="hubs")
#
# #p.subplot(312)
# #self.pca2.plot(None,network_partitioning,labels=None,tdir=None,savefig=False,clear_fig=False,title="Vertices in principal components",label1=r"PC1 - degrees and strengths",label2=r"PC3 - clustering")
# p.subplot(212)
# self.pca3.plot(None,network_partitioning,labels=None,tdir=None,savefig=False,clear_fig=False,title="Vertices in principal components",label1=r"PC1 - degrees and strengths",label2="PC2 - symmetry")
# p.subplots_adjust(left=0.08,bottom=0.12,right=0.97,top=0.88,wspace=0.13,hspace=0.88)
# p.show()
# #p.savefig("{}/{}".format(tdir,tname))
elif network_partitioning:
self.pca1.plot(tname.replace(".","_1."),network_partitioning,tdir=tdir)
self.pca2.plot(tname.replace(".","_2."),network_partitioning,tdir=tdir)
self.pca3.plot(tname.replace(".","_3."),network_partitioning,tdir=tdir)
class PCA:
"""Apply PCA to incoming datatable M (metrics x observations)
Usage
=====
Initialize with a n x m matrix of n metrics each with m observations
>>> foo=n.random(100)
>>> p1=n.vstack((foo,foo))
>>> p2=n.vstack((-foo,foo))
>>> p3=n.vstack((foo,-foo))
>>> M=n.hstack((p1,p2,p3))
>>> pca=g.PCA(M)
See attributes for information about data:
>>> pca.eig_values # for eigen values from greatest down
>>> pca.eig_values_ # for a normalized eig_values
>>> pca.eig_vectors # for eigen vectors of the eig_values
>>> pca.eig_vectors_ # for a normalized eig_vectors
>>> pca.C # for covariance matrix
>>> pca.M # for initial data
>>> pca.x # final positions in the principal component
>>> pca.y # final positions in second principal component
>>> pca.plot() # to plot observations in initial and final spaces
"""
def __init__(self,*metrics,final_dimensions=2,draw=False):
M=n.vstack(metrics)
# zscore: # USE NUMPY.stats.zscore(M, axis=1, ddof=1)
self.M_=n.copy(M)
for i in range(M.shape[0]):
if M[i].std():
M[i]=(M[i]-M[i].mean())/M[i].std()
else:
M[i]=0.
# convariance matrix:
self.C=n.cov(M,bias=1)
self.M=M
eig_values, eig_vectors = n.linalg.eig(self.C)
# Ordering eigenvalues and eigenvectors
args=n.argsort(eig_values)[::-1]
self.eig_values=eig_values[args]
self.eig_values_=100*self.eig_values/n.sum(n.abs(self.eig_values))
self.eig_vectors=eig_vectors[:,args]
self.eig_vectors_=n.array([100*self.eig_vectors[:,i]/n.abs(self.eig_vectors[:,i]).sum() for i in range(self.eig_vectors.shape[1])]).T
# retaining only some eigenvectors
self.feature_vec=self.eig_vectors[:,:final_dimensions]
self.feature_vec_=n.array([100*self.feature_vec[:,i]/n.abs(self.feature_vec[:,i]).sum() for i in range(self.feature_vec.shape[1])]).T
self.final_data=n.dot(M.T,self.feature_vec)
self.x=self.final_data[:,0]
self.y=self.final_data[:,1]
#def plot(self, tname="sample.png", network_partitioning=False,labels="full", tdir=".",savefig=True,clear_fig=True,title="Vertex plot in principal components (PCA)",label1="PC1",label2="PC2"):
def plotSym(self,network_partitioning,tdir,tname):
self.x_=self.M_[4] # degrees
self.cc=self.M[0] #clustering
p.figure(figsize=(7.,4.))
p.subplots_adjust(left=0.09,bottom=0.16,right=0.95,top=0.87,hspace=0.04)
#fig = matplotlib.pyplot.gcf()
#fig.set_size_inches(6.,8.4)
#p.suptitle("Symmetry-related measures and clutering coefficient along connectivity")
kl=max(network_partitioning.sectorialized_degrees__[0])+.5
kr=max(network_partitioning.sectorialized_degrees__[1])+.5
p.suptitle("Symmetry-related and clutering coefficient components along connectivity")
p.subplot(211)
p.plot((kl,kl),(-1000,1000),"g--",linewidth=3)
p.plot((kr,kr),(-1000,1000),"g--",linewidth=3)
p.xticks((),())
# plot degree x clust
#label1=r"degree $\rightarrow$"
label2=r"clustering $\rightarrow$"
#p.xlabel(label1, fontsize=15)
p.ylabel(label2, fontsize=15)
n_periphery= len(network_partitioning.sectorialized_agents__[0])
n_intermediary=len(network_partitioning.sectorialized_agents__[1])
n_hubs= len(network_partitioning.sectorialized_agents__[2])
#M=n.array(network_measures.degrees_)
#network_measures.degrees__=(M-M.mean())/M.std() #AQUI
#M=n.array(network_measures.weighted_clusterings_)
#network_measures.weighted_clusterings__=(M-M.mean())/M.std()
p.ylim(min(self.cc)-0.9,max(self.cc)+.9)
p.xlim(min(self.x_)-0.1,max(self.x_)+.1)
p.plot(self.x_[:n_periphery], self.cc[:n_periphery],"ko", label="periphery" ,ms=10,alpha=.4)
p.plot(self.x_[n_periphery:n_periphery+n_intermediary],self.cc[n_periphery:n_periphery+n_intermediary],"k^", label="intermediary",ms=10,alpha=.4)
p.plot(self.x_[n_periphery+n_intermediary:], self.cc[-n_hubs:],"k*", label="hubs" ,ms=10,alpha=.4)
#p.plot(self.x_[-n_hubs:], self.cc[-n_hubs:],"k*", label="hubs" ,ms=10,alpha=.4)
#p.legend()
p.legend(bbox_to_anchor=(0.17, .71, .8, .2), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
p.subplot(212)
p.plot((kl,kl),(-1000,1000),"g--",linewidth=3)
p.plot((kr,kr),(-1000,1000),"g--",linewidth=3)
p.xticks((kl,kr),(r"k_L",r"k_R"))
p.ylabel(r"PC2 $\rightarrow$", fontsize=15)
p.xlabel(r"degree $\rightarrow$", fontsize=15)
# p.title("degree x symmetry-related principal component")
#self.pca3.plot(None,network_partitioning,labels=None,tdir=None,savefig=False,clear_fig=False,title="Vertices in principal components",label1=r"PC1 - degrees and strengths",label2="PC2 - symmetry")
#p.subplots_adjust(left=0.08,bottom=0.12,right=0.97,top=0.88,wspace=0.13,hspace=0.88)
#p.show()
#p.savefig("{}/{}".format(tdir,tname))
# elifnetwork_partitioning:
# self.pca1.plot(tname.replace(".","_1."),network_partitioning,tdir=tdir)
# self.pca2.plot(tname.replace(".","_2."),network_partitioning,tdir=tdir)
# self.pca3.plot(tname.replace(".","_3."),network_partitioning,tdir=tdir)
# p.title(title)
p.ylim(min(self.y) -0.9,max(self.y)+ 0.9)
p.xlim(min(self.x_)-0.1,max(self.x_)+0.1)
#n_periphery= len(network_partitioning.sectorialized_agents__[0])
#n_intermediary=len(network_partitioning.sectorialized_agents__[1])
#n_hubs= len(network_partitioning.sectorialized_agents__[2])
p.plot(self.x_[:n_periphery],self.y[:n_periphery],"ko", label="perihpery" ,ms=10,alpha=.4)
p.plot(self.x_[n_periphery:n_periphery+n_intermediary],self.y[n_periphery:n_periphery+n_intermediary],"k^", label="intermediary",ms=10,alpha=.4)
p.plot(self.x_[-n_hubs:],self.y[-n_hubs:],"k*", label="hubs" ,ms=10,alpha=.4)
#p.show()
p.savefig("{}/{}".format(tdir,tname))
def plot(self, tname="sample.png", network_partitioning=False,labels="full", tdir=".",savefig=True,clear_fig=True,title="Vertex plot in principal components (PCA)",label1="PC1",label2="PC2"):
if clear_fig:
p.clf()
if labels=="full":
#foo=self.feature_vec[:,0]
#foo_=("%.2f, "*len(foo)) % tuple(foo)
foo=self.feature_vec_[:,0]
foo__=("%.2f, "*len(foo)) % tuple(foo)
label1+=" " + foo__
#foo=self.feature_vec[:,1]
#foo_=("%.2f, "*len(foo)) % tuple(foo)
foo=self.feature_vec_[:,1]
foo__=("%.2f, "*len(foo)) % tuple(foo)
label2+=" " +foo__
#foo=(self.eig_values[:4]/self.eig_values.sum())*100
#foo_=r"$\lambda = $"+("%.2f, "*len(foo) % tuple(foo))
foo=(self.eig_values_[:4])
foo__=r"$\lambda = $"+("%.2f, "*len(foo) % tuple(foo))
title+=" "+foo__
if labels=="sym":
pass
p.xlabel(label1, fontsize=15)
p.ylabel(label2, fontsize=15)
#p.title(foo_)
p.title(title)
p.ylim(min(self.y)-1,max(self.y)+1)
p.xlim(min(self.x)-1,max(self.x)+1)
if not network_partitioning:
p.plot(self.x,self.y,"k^", ms=3.9,label="intermediary")
else:
n_periphery= len(network_partitioning.sectorialized_agents__[0])
n_intermediary=len(network_partitioning.sectorialized_agents__[1])
n_hubs= len(network_partitioning.sectorialized_agents__[2])
p.plot(self.x[:n_periphery],self.y[:n_periphery],"k^", ms=3.9,label="perihpery")
p.plot(self.x[n_periphery:n_periphery+n_intermediary],self.y[n_periphery:n_periphery+n_intermediary],"k*", ms=3.9,label="hubs")
p.plot(self.x[-n_hubs:],self.y[-n_hubs:],"ro", ms=3.9,label="hubs")
p.legend()
if savefig:
p.savefig("{}/{}".format(tdir,tname))
x=self.M[0]
y=self.M[1]
if clear_fig:
p.clf()
p.plot(x,y,"go", ms=3.9,label="intermediary")
p.savefig("{}/Initial{}.png".format(tdir,tname))
| [
"numpy.abs",
"numpy.argsort",
"pylab.figure",
"pylab.subplots_adjust",
"pylab.title",
"numpy.copy",
"pylab.ylabel",
"numpy.linalg.eig",
"pylab.xticks",
"pylab.suptitle",
"pylab.xlabel",
"numpy.cov",
"pylab.legend",
"pylab.subplot",
"numpy.dot",
"numpy.vstack",
"numpy.array",
"pylab... | [((621, 750), 'numpy.array', 'n.array', (['(network_measures.weighted_clusterings_, network_measures.degrees_,\n network_measures.weighted_directed_betweenness_)'], {}), '((network_measures.weighted_clusterings_, network_measures.degrees_,\n network_measures.weighted_directed_betweenness_))\n', (628, 750), True, 'import numpy as n, pylab as p, matplotlib\n'), ((5960, 5977), 'numpy.vstack', 'n.vstack', (['metrics'], {}), '(metrics)\n', (5968, 5977), True, 'import numpy as n, pylab as p, matplotlib\n'), ((6056, 6065), 'numpy.copy', 'n.copy', (['M'], {}), '(M)\n', (6062, 6065), True, 'import numpy as n, pylab as p, matplotlib\n'), ((6267, 6283), 'numpy.cov', 'n.cov', (['M'], {'bias': '(1)'}), '(M, bias=1)\n', (6272, 6283), True, 'import numpy as n, pylab as p, matplotlib\n'), ((6335, 6355), 'numpy.linalg.eig', 'n.linalg.eig', (['self.C'], {}), '(self.C)\n', (6347, 6355), True, 'import numpy as n, pylab as p, matplotlib\n'), ((7021, 7049), 'numpy.dot', 'n.dot', (['M.T', 'self.feature_vec'], {}), '(M.T, self.feature_vec)\n', (7026, 7049), True, 'import numpy as n, pylab as p, matplotlib\n'), ((7458, 7486), 'pylab.figure', 'p.figure', ([], {'figsize': '(7.0, 4.0)'}), '(figsize=(7.0, 4.0))\n', (7466, 7486), True, 'import numpy as n, pylab as p, matplotlib\n'), ((7492, 7568), 'pylab.subplots_adjust', 'p.subplots_adjust', ([], {'left': '(0.09)', 'bottom': '(0.16)', 'right': '(0.95)', 'top': '(0.87)', 'hspace': '(0.04)'}), '(left=0.09, bottom=0.16, right=0.95, top=0.87, hspace=0.04)\n', (7509, 7568), True, 'import numpy as n, pylab as p, matplotlib\n'), ((7877, 7968), 'pylab.suptitle', 'p.suptitle', (['"""Symmetry-related and clutering coefficient components along connectivity"""'], {}), "(\n 'Symmetry-related and clutering coefficient components along connectivity')\n", (7887, 7968), True, 'import numpy as n, pylab as p, matplotlib\n'), ((7972, 7986), 'pylab.subplot', 'p.subplot', (['(211)'], {}), '(211)\n', (7981, 7986), True, 'import numpy as n, pylab as p, matplotlib\n'), ((7995, 8046), 'pylab.plot', 'p.plot', (['(kl, kl)', '(-1000, 1000)', '"""g--"""'], {'linewidth': '(3)'}), "((kl, kl), (-1000, 1000), 'g--', linewidth=3)\n", (8001, 8046), True, 'import numpy as n, pylab as p, matplotlib\n'), ((8050, 8101), 'pylab.plot', 'p.plot', (['(kr, kr)', '(-1000, 1000)', '"""g--"""'], {'linewidth': '(3)'}), "((kr, kr), (-1000, 1000), 'g--', linewidth=3)\n", (8056, 8101), True, 'import numpy as n, pylab as p, matplotlib\n'), ((8105, 8121), 'pylab.xticks', 'p.xticks', (['()', '()'], {}), '((), ())\n', (8113, 8121), True, 'import numpy as n, pylab as p, matplotlib\n'), ((8285, 8314), 'pylab.ylabel', 'p.ylabel', (['label2'], {'fontsize': '(15)'}), '(label2, fontsize=15)\n', (8293, 8314), True, 'import numpy as n, pylab as p, matplotlib\n'), ((8884, 8984), 'pylab.plot', 'p.plot', (['self.x_[:n_periphery]', 'self.cc[:n_periphery]', '"""ko"""'], {'label': '"""periphery"""', 'ms': '(10)', 'alpha': '(0.4)'}), "(self.x_[:n_periphery], self.cc[:n_periphery], 'ko', label=\n 'periphery', ms=10, alpha=0.4)\n", (8890, 8984), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9038, 9201), 'pylab.plot', 'p.plot', (['self.x_[n_periphery:n_periphery + n_intermediary]', 'self.cc[n_periphery:n_periphery + n_intermediary]', '"""k^"""'], {'label': '"""intermediary"""', 'ms': '(10)', 'alpha': '(0.4)'}), "(self.x_[n_periphery:n_periphery + n_intermediary], self.cc[\n n_periphery:n_periphery + n_intermediary], 'k^', label='intermediary',\n ms=10, alpha=0.4)\n", (9044, 9201), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9192, 9299), 'pylab.plot', 'p.plot', (['self.x_[n_periphery + n_intermediary:]', 'self.cc[-n_hubs:]', '"""k*"""'], {'label': '"""hubs"""', 'ms': '(10)', 'alpha': '(0.4)'}), "(self.x_[n_periphery + n_intermediary:], self.cc[-n_hubs:], 'k*',\n label='hubs', ms=10, alpha=0.4)\n", (9198, 9299), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9541, 9642), 'pylab.legend', 'p.legend', ([], {'bbox_to_anchor': '(0.17, 0.71, 0.8, 0.2)', 'loc': '(3)', 'ncol': '(3)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.17, 0.71, 0.8, 0.2), loc=3, ncol=3, mode=\n 'expand', borderaxespad=0.0)\n", (9549, 9642), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9669, 9683), 'pylab.subplot', 'p.subplot', (['(212)'], {}), '(212)\n', (9678, 9683), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9692, 9743), 'pylab.plot', 'p.plot', (['(kl, kl)', '(-1000, 1000)', '"""g--"""'], {'linewidth': '(3)'}), "((kl, kl), (-1000, 1000), 'g--', linewidth=3)\n", (9698, 9743), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9747, 9798), 'pylab.plot', 'p.plot', (['(kr, kr)', '(-1000, 1000)', '"""g--"""'], {'linewidth': '(3)'}), "((kr, kr), (-1000, 1000), 'g--', linewidth=3)\n", (9753, 9798), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9802, 9836), 'pylab.xticks', 'p.xticks', (['(kl, kr)', "('k_L', 'k_R')"], {}), "((kl, kr), ('k_L', 'k_R'))\n", (9810, 9836), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9844, 9887), 'pylab.ylabel', 'p.ylabel', (['"""PC2 $\\\\rightarrow$"""'], {'fontsize': '(15)'}), "('PC2 $\\\\rightarrow$', fontsize=15)\n", (9852, 9887), True, 'import numpy as n, pylab as p, matplotlib\n'), ((9896, 9942), 'pylab.xlabel', 'p.xlabel', (['"""degree $\\\\rightarrow$"""'], {'fontsize': '(15)'}), "('degree $\\\\rightarrow$', fontsize=15)\n", (9904, 9942), True, 'import numpy as n, pylab as p, matplotlib\n'), ((11026, 11124), 'pylab.plot', 'p.plot', (['self.x_[:n_periphery]', 'self.y[:n_periphery]', '"""ko"""'], {'label': '"""perihpery"""', 'ms': '(10)', 'alpha': '(0.4)'}), "(self.x_[:n_periphery], self.y[:n_periphery], 'ko', label='perihpery',\n ms=10, alpha=0.4)\n", (11032, 11124), True, 'import numpy as n, pylab as p, matplotlib\n'), ((11179, 11341), 'pylab.plot', 'p.plot', (['self.x_[n_periphery:n_periphery + n_intermediary]', 'self.y[n_periphery:n_periphery + n_intermediary]', '"""k^"""'], {'label': '"""intermediary"""', 'ms': '(10)', 'alpha': '(0.4)'}), "(self.x_[n_periphery:n_periphery + n_intermediary], self.y[\n n_periphery:n_periphery + n_intermediary], 'k^', label='intermediary',\n ms=10, alpha=0.4)\n", (11185, 11341), True, 'import numpy as n, pylab as p, matplotlib\n'), ((11332, 11417), 'pylab.plot', 'p.plot', (['self.x_[-n_hubs:]', 'self.y[-n_hubs:]', '"""k*"""'], {'label': '"""hubs"""', 'ms': '(10)', 'alpha': '(0.4)'}), "(self.x_[-n_hubs:], self.y[-n_hubs:], 'k*', label='hubs', ms=10,\n alpha=0.4)\n", (11338, 11417), True, 'import numpy as n, pylab as p, matplotlib\n'), ((12551, 12580), 'pylab.xlabel', 'p.xlabel', (['label1'], {'fontsize': '(15)'}), '(label1, fontsize=15)\n', (12559, 12580), True, 'import numpy as n, pylab as p, matplotlib\n'), ((12589, 12618), 'pylab.ylabel', 'p.ylabel', (['label2'], {'fontsize': '(15)'}), '(label2, fontsize=15)\n', (12597, 12618), True, 'import numpy as n, pylab as p, matplotlib\n'), ((12650, 12664), 'pylab.title', 'p.title', (['title'], {}), '(title)\n', (12657, 12664), True, 'import numpy as n, pylab as p, matplotlib\n'), ((13431, 13441), 'pylab.legend', 'p.legend', ([], {}), '()\n', (13439, 13441), True, 'import numpy as n, pylab as p, matplotlib\n'), ((913, 1212), 'numpy.array', 'n.array', (['(network_measures.weighted_clusterings_, network_measures.strengths_,\n network_measures.in_strengths_, network_measures.out_strengths_,\n network_measures.degrees_, network_measures.in_degrees_,\n network_measures.out_degrees_, network_measures.\n weighted_directed_betweenness_)'], {}), '((network_measures.weighted_clusterings_, network_measures.\n strengths_, network_measures.in_strengths_, network_measures.\n out_strengths_, network_measures.degrees_, network_measures.in_degrees_,\n network_measures.out_degrees_, network_measures.\n weighted_directed_betweenness_))\n', (920, 1212), True, 'import numpy as n, pylab as p, matplotlib\n'), ((1408, 1948), 'numpy.array', 'n.array', (['(network_measures.weighted_clusterings_, network_measures.strengths_,\n network_measures.in_strengths_, network_measures.out_strengths_,\n network_measures.degrees_, network_measures.in_degrees_,\n network_measures.out_degrees_, network_measures.\n weighted_directed_betweenness_, network_measures.asymmetries,\n network_measures.asymmetries_edge_mean, network_measures.\n asymmetries_edge_std, network_measures.disequilibrium, network_measures\n .disequilibrium_edge_mean, network_measures.disequilibrium_edge_std)'], {}), '((network_measures.weighted_clusterings_, network_measures.\n strengths_, network_measures.in_strengths_, network_measures.\n out_strengths_, network_measures.degrees_, network_measures.in_degrees_,\n network_measures.out_degrees_, network_measures.\n weighted_directed_betweenness_, network_measures.asymmetries,\n network_measures.asymmetries_edge_mean, network_measures.\n asymmetries_edge_std, network_measures.disequilibrium, network_measures\n .disequilibrium_edge_mean, network_measures.disequilibrium_edge_std))\n', (1415, 1948), True, 'import numpy as n, pylab as p, matplotlib\n'), ((6417, 6438), 'numpy.argsort', 'n.argsort', (['eig_values'], {}), '(eig_values)\n', (6426, 6438), True, 'import numpy as n, pylab as p, matplotlib\n'), ((11772, 11779), 'pylab.clf', 'p.clf', ([], {}), '()\n', (11777, 11779), True, 'import numpy as n, pylab as p, matplotlib\n'), ((12803, 12861), 'pylab.plot', 'p.plot', (['self.x', 'self.y', '"""k^"""'], {'ms': '(3.9)', 'label': '"""intermediary"""'}), "(self.x, self.y, 'k^', ms=3.9, label='intermediary')\n", (12809, 12861), True, 'import numpy as n, pylab as p, matplotlib\n'), ((13122, 13210), 'pylab.plot', 'p.plot', (['self.x[:n_periphery]', 'self.y[:n_periphery]', '"""k^"""'], {'ms': '(3.9)', 'label': '"""perihpery"""'}), "(self.x[:n_periphery], self.y[:n_periphery], 'k^', ms=3.9, label=\n 'perihpery')\n", (13128, 13210), True, 'import numpy as n, pylab as p, matplotlib\n'), ((13215, 13354), 'pylab.plot', 'p.plot', (['self.x[n_periphery:n_periphery + n_intermediary]', 'self.y[n_periphery:n_periphery + n_intermediary]', '"""k*"""'], {'ms': '(3.9)', 'label': '"""hubs"""'}), "(self.x[n_periphery:n_periphery + n_intermediary], self.y[n_periphery\n :n_periphery + n_intermediary], 'k*', ms=3.9, label='hubs')\n", (13221, 13354), True, 'import numpy as n, pylab as p, matplotlib\n'), ((13355, 13425), 'pylab.plot', 'p.plot', (['self.x[-n_hubs:]', 'self.y[-n_hubs:]', '"""ro"""'], {'ms': '(3.9)', 'label': '"""hubs"""'}), "(self.x[-n_hubs:], self.y[-n_hubs:], 'ro', ms=3.9, label='hubs')\n", (13361, 13425), True, 'import numpy as n, pylab as p, matplotlib\n'), ((13622, 13670), 'pylab.plot', 'p.plot', (['x', 'y', '"""go"""'], {'ms': '(3.9)', 'label': '"""intermediary"""'}), "(x, y, 'go', ms=3.9, label='intermediary')\n", (13628, 13670), True, 'import numpy as n, pylab as p, matplotlib\n'), ((6537, 6559), 'numpy.abs', 'n.abs', (['self.eig_values'], {}), '(self.eig_values)\n', (6542, 6559), True, 'import numpy as n, pylab as p, matplotlib\n'), ((13602, 13609), 'pylab.clf', 'p.clf', ([], {}), '()\n', (13607, 13609), True, 'import numpy as n, pylab as p, matplotlib\n'), ((6667, 6696), 'numpy.abs', 'n.abs', (['self.eig_vectors[:, i]'], {}), '(self.eig_vectors[:, i])\n', (6672, 6696), True, 'import numpy as n, pylab as p, matplotlib\n'), ((6915, 6944), 'numpy.abs', 'n.abs', (['self.feature_vec[:, i]'], {}), '(self.feature_vec[:, i])\n', (6920, 6944), True, 'import numpy as n, pylab as p, matplotlib\n')] |
from sys import path
path.append('./sunxspex')
from sunxspex import thermal_spectrum
from astropy import units as u
from astropy import constants as const
import numpy as np
from sunxspex.io import chianti_kev_cont_common_load, load_xray_abundances
from scipy.stats.mstats import gmean
from scipy.interpolate import interp1d
from copy import copy
import sunpy
class f_vth:
def __init__(self, energies=None, astropy_conversion=True):
"""Class f_vth combines the outputs of the chianti_kev_lines code already available in Sunxspex and the
output of my translation of chianti_kev_cont.pro.
Parameters:
-----------
energies : `astropy.units.Quantity` (list with units u.keV)
A list of energy bin edges. Can be arbitrary here since this is only needed to initiate the ChiantiThermalSpectrum
class.
astropy_conversion: bool
Use the angstrum to keV conversion from astropy or IDL.
Default: True
Notes
-----
The energy attribute, ChiantiThermalSpectrum().energy_edges_keV, is changed to energies you specify later
when you run the class method "f_vth." This means that the chianti_kev_lines code can be used multiple times while
the ChiantiThermalSpectrum class is only initialised once, speeds things up when fitting.
"""
# initialise the ChiantiThermalSpectrum class with the energies input so we can use the chianti_kev_lines code
self.f_vth4lines = thermal_spectrum.ChiantiThermalSpectrum(energies, abundance_type="sun_coronal_ext")
# load in everything for the chianti_kev_cont code of "mine". This only needs done once so do it here.
self.continuum_info = chianti_kev_cont_common_load()
self.abundance = load_xray_abundances(abundance_type="sun_coronal")
if astropy_conversion:
self.conversion = (const.h * const.c / u.AA).to_value(u.keV)
else:
self.conversion = self.continuum_info[1]['edge_str']['CONVERSION'] # keV to A conversion, ~12.39854
self.ewvl = self.conversion/self.continuum_info[1]['edge_str']['WVL'] # wavelengths from A to keV
self.wwvl = np.diff(self.continuum_info[1]['edge_str']['WVLEDGE']) # 'wavestep' in IDL
self.nwvl = len(self.ewvl)
self.logt = np.log10(self.continuum_info[1]['ctemp'])
# all of this could be handled as global variables in the script and then just have functions, i.e., remove the need for classes.
def call_sunxspex_with_energies(self, energy=None, temperature=None, emission_measure=None, **kwargs):
"""
Returns the line contribution to the overall spectrum for a plasma at a temperature and emission measure.
Parameters
----------
energy: `astropy.units.Quantity` (list with units u.keV)
A list of energy bin edges.
temperature: `astropy.units.Quantity`
The electron temperature of the plasma.
emission_measure: `astropy.units.Quantity`
The emission measure of the emitting plasma.
Returns
-------
Flux: Dimensionless list only because I just return the value, but the units should be ph s^-1 cm^-2 keV^-1
"""
# change the energies the class method will use to the ones you provide to the function
# I could initialise the class with the correct energies to begin with but the f_vth function needs them as
# the first input anyway for the fitting I was doing so this makes sure things stay consistent
self.f_vth4lines.energy_edges_keV = energy.value
# return the fluxes from the atomic lines from a plasma with a T and EM at coroanl abundances
return self.f_vth4lines.chianti_kev_lines(temperature, emission_measure, **kwargs).value
def chianti_kev_units(self, spectrum, funits, wedg, kev=False, earth=False, date=None):
"""
An IDL routine to convert to the correct units. Making sure we are in keV^-1 and cm^-2 using the Sun-to-Earth distance.
I feel this is almost useless now but I wrote it to be consistent with IDL.
From: https://hesperia.gsfc.nasa.gov/ssw/packages/xray/idl/chianti_kev_units.pro
Parameters
----------
spectrum: 1-D array your fluxes
A list of energy bin edges.
funits: int
Is you want to input a custom distance from the Sun rather than letting it be the Earth then this could be set
to the distnace in cm squared..
wedg: 1-D array
Width of the energy bins that correspond to the input spectrum.
kev: bool
Would you like your units in keV^-1? Then set to True.
Default: False
earth: bool
Would you like your units in cm^-2 using the Sun-to-Earth distance? Then set to True.
Default: False
date: `astropy.time.Time`
If earth=True and you want the distance to be on a certain date then pass an astrpy time here.
Default: None
Returns
-------
Flux: Dimensionless list only because I just return the value, but the units should be ph s^-1 cm^-2 keV^-1
"""
# date is an `astropy.time.Time`
if kev:
if earth:
thisdist = 1.49627e13 # cm, default in these scripts is from2 April 1992 for some reason
if type(date)!=type(None):
thisdist = sunpy.coordinates.get_sunearth_distance(time=date).to(u.cm)
funits = thisdist**2 #per cm^2, unlike mewe_kev don't use 4pi, chianti is per steradian
funits = (1e44/funits)/ wedg
# Nominally 1d44/funits is 4.4666308e17 and alog10(4.4666e17) is 17.64998
# That's for emisson measure of 1d44cm-3, so for em of 1d49cm-3 we have a factor whos log10 is 22.649, just like kjp
spectrum = spectrum * funits
return spectrum
def chianti_kev_cont(self, energy=None, temperature=None, use_interpol=True):
"""
Returns the continuum contribution from a plasma of a given temperature and emission measure.
From: https://hesperia.gsfc.nasa.gov/ssw/packages/xray/idl/chianti_kev_cont.pro
Parameters
----------
energy: `astropy.units.Quantity` (list with units u.keV)
A list of energy bin edges. Each entry is an energy bin, e.g., [[1,1.5], [1.5,2], ...].
temperature: `astropy.units.Quantity`
The electron temperature of the plasma.
Default: 5 MK
use_interpol: bool
Set to True if you want to interpolate to your energy values in the grid. The alternative is not set up yet so this
can only be True at the minute.
Returns
-------
Flux: Dimensionless list but the units should be ph s^-1 cm^-2 keV^-1. The output will be scaled by 1e49.
"""
# temp is a temperature in MK. E.g., temp=5
# energy is a list of energy bin boundaries in keV. E.g., [[1,1.5], [1.5,2], [2,2.5], ...]
# Need a default temperature?
if type(temperature)==type(None):
temperature = 5 # MK
else:
temperature = temperature.value
# Need default energies?
if type(energy)==type(None):
width = 0.006
en_lo = np.arange(3, 9, 0.006)[:,None] # [:,None] to give a second axis, each entry is now a row
en_hi = np.arange(3.006, 9.006, 0.006)[:,None] # these are just default energies
energy = np.concatenate((en_lo, en_hi), axis=1)
else:
energy = energy.value
# set up all grid information that was loaded when the class was initialised
continuum_info = self.continuum_info# chianti_kev_cont_common_load()
abundance = self.abundance #load_xray_abundances(abundance_type="sun_coronal")
conversion = self.conversion # keV to A conversion, ~12.39854
mgtemp = temperature * 1e6
u = np.log10(mgtemp)
#Add in continuum
wedg = np.diff(energy).reshape((len(energy)))
ewvl = self.ewvl # wavelengths from A to keV
wwvl = self.wwvl # 'wavestep' in IDL
nwvl = self.nwvl # number of grid wavelengths
# print("Min/max energies [keV]: ",np.min(ewvl), np.max(ewvl))
logt = self.logt # grid temperatures = log(temperature)
ntemp = len(logt)
selt = np.argwhere(logt<=u)[-1] # what gap does my temp land in the logt array (inclusive of the lower boundary)
indx = np.clip([selt-1, selt, selt+1], 0, ntemp-1) # find the indexes either side of that gap
tband = logt[indx]
s=1
x0, x1, x2 = tband[0][0], tband[1][0], tband[2][0] # temperatures either side of that gap
# print("Min/max temperatures [MK]: ",np.min(continuum_info[1]['ctemp'])/1e6, np.max(continuum_info[1]['ctemp'])/1e6)
ewvl_exp = ewvl.reshape((1,len(ewvl))) # reshape for matrix multiplication
# all wavelengths divided by corresponding temp[0] (first row), then exvl/temp[1] second row, exvl/temp[2] third row
# inverse boltzmann factor of hv/kT and 11.6e6 from keV-to-J conversion over k = 1.6e-16 / 1.381e-23 ~ 11.6e6
exponential = (np.ones((3,1)) @ ewvl_exp) / ((10**logt[indx]/11.6e6) @ np.ones((1,nwvl)))
exponential = np.exp(np.clip(exponential, None, 80)) # not sure why clipping at 80
# this is just from dE/dA = E/A from E=hc/A (A=wavelength) for change of variables from Angstrom to keV: dE = dA * (E/A)
# have this repeated for 3 rows since this is the form of the expontial's different temps
# np.matmul() is quicker than @ I think
deltae = np.matmul(np.ones((3,1)), wwvl.reshape((1,len(ewvl)))) * (ewvl / continuum_info[1]['edge_str']['WVL'])
gmean_en = gmean(energy, axis=1) # geometric mean of each energy boundary pair
# We include default_abundance because it will have zeroes for elements not included
# and ones for those included
default_abundance = abundance * 0.0
zindex = continuum_info[0]
default_abundance[zindex] = 1.0
select = np.where(default_abundance>0)
tcont = gmean_en * 0.0
spectrum = copy(tcont) # make a copy otherwise changing the original tcont changes spectrum
abundance_ratio = 1.0 + abundance*0.0
# none of this yet
# if keyword_set( rel_abun) then $
# abundance_ratio[rel_abun[0,*]-1] = rel_abun[1,*]
abundance_ratio = (default_abundance*abundance*abundance_ratio) # this is just "abundance", not sure how necessary the abundance lines down to here are in this situation in Python.
# Maybe to double check the files match up? Since "select" should == "np.sort(zindex)"
# first index is for abundance elements, middle index for totcont stuff is temp, third is for the wavelengths
# the wavelength dimension is weird because it is split into totcont_lo and totcont.
# totcont_lo is the continuum <1 keV I think and totcont is >=1 keV, so adding the wavelength dimension of each of these you get the number of wavlengths provided by continuum_info[1]['edge_str']['WVL']
# look here for more info on how the CHIANTI file is set-up **** https://hesperia.gsfc.nasa.gov/ssw/packages/xray/idl/setup_chianti_cont.pro ****
# this exact script won't create the folder Python is using the now since some of the wavelengths and deltas don't match-up
totcontindx = np.concatenate((continuum_info[1]["totcont_lo"][:, indx.T[0], :], continuum_info[1]["totcont"][:, indx.T[0], :]), axis=2) # isolate temps and then combine along wavelength axis
# careful from here on out. IDL's indexes are backwards to Pythons
# Python's a[:,:,0] == IDL's a[0,*,*], a[:,0,:] == a[*,0,*], and then a[0,:,:] == a[*,*,0]
tcdbase = totcontindx # double(totcontindx[*, *, *])
tcd = totcontindx[0,:,:] #get the first abundances continuum info #double(totcontindx[*, *, 0])
# for each temperature, multiply through by the abundances
tcd = np.tensordot(abundance_ratio[select],tcdbase,axes=([0],[0]))
# work in log space for the temperatures
u = np.log(u)
x1, x0, x2 = np.log(x1), np.log(x0), np.log(x2)
# convert to per keV with deltae and then scale the continuum values by exponential
gaunt = tcd/deltae * exponential # the 3 temps and all wavelengths
# use_interpol = True # False #
# define valid range
vrange = np.where(gaunt[0,:]>0) # no. of entries = nrange, temp1 ranges
nrange = len(vrange[0])
vrange1 = np.where(gaunt[1,:]>0) # no. of entries = nrange1, temp2 ranges
nrange1 = len(vrange1[0])
vrange = vrange if nrange<nrange1 else vrange1
vrange1 = np.where(gaunt[2,:]>0) # no. of entries = nrange1, temp3 ranges
nrange1 = len(vrange1[0])
vrange = vrange if nrange<nrange1 else vrange1
gaunt = gaunt[:,vrange[0]]
ewvl = ewvl[vrange[0]]
maxe = ewvl[0]
vgmean = np.where(gmean_en<maxe)
nvg = len(vgmean[0])
if nvg>1:
gmean_en = gmean_en[vgmean[0]]
# print(gaunt[0,:].shape, ewvl.shape, gmean_en.shape)
if use_interpol:
cont0 = interp1d(ewvl, gaunt[0,:])(gmean_en) # get the continuum values at input energies from the CHIANTI file as temp1
cont1 = interp1d(ewvl, gaunt[1,:])(gmean_en) # temp2
cont2 = interp1d(ewvl, gaunt[2,:])(gmean_en) # temp2
else:
return
# don't really see the point in this at the moment
# venergy = np.where(energy[:,1]<maxe) # only want energies <max from the CHIANTI file
# energyv = energy[venergy[0],:]
# wen = np.diff(energyv)[:,0]
# edges_in_kev = conversion / continuum_info[1]['edge_str']['WVLEDGE']
# edges_in_kev = edges_in_kev.reshape((len(edges_in_kev), 1))
# e2 = np.concatenate((edges_in_kev[:-1], edges_in_kev[1:]), axis=1)[vrange[0],:]
# # this obviously isn't the same as the IDL script but just to continue
# cont0_func = interp1d(np.mean(e2, axis=1), gaunt[0,:]*abs(np.diff(e2)[:,0]))
# cont0 = cont0_func(np.mean(energyv, axis=1))/wen
# cont1_func = interp1d(np.mean(e2, axis=1), gaunt[1,:]*abs(np.diff(e2)[:,0]))
# cont1 = cont1_func(np.mean(energyv, axis=1))/wen
# cont2_func = interp1d(np.mean(e2, axis=1), gaunt[2,:]*abs(np.diff(e2)[:,0]))
# cont2 = cont2_func(np.mean(energyv, axis=1))/wen
# work in log space with the temperatures
cont0, cont1, cont2 = np.log(cont0), np.log(cont1), np.log(cont2)
# now find weighted average of the continuum values at each temperature
# i.e., weight_in_relation_to_u_for_cont0_which_is_at_x0 = w0 = (u-x1) * (u-x2) / ((x0-x1) * (x0-x2))
# also w0+w1+w2=1, so the weights are normalised which is why we don't divide by the sum of the weights for the average
ynew = np.exp( cont0 * (u-x1) * (u-x2) / ((x0-x1) * (x0-x2)) +
cont1 * (u-x0) * (u-x2) / ((x1-x0) * (x1-x2)) +
cont2 * (u-x0) * (u-x1) / ((x2-x0) * (x2-x1)))
tcont[vgmean[0]] = tcont[vgmean[0]] + ynew
# scale values back by the exponential
tcont[vgmean[0]] = tcont[vgmean[0]] * np.exp( -np.clip((gmean_en/(temperature/11.6)), None, 80)) # no idea why this is clipped at 80 again
spectrum[vgmean[0]] = spectrum[vgmean[0]] + tcont[vgmean[0]] * wedg[vgmean[0]]
funits = 1. #default units
# now need https://hesperia.gsfc.nasa.gov/ssw/packages/xray/idl/chianti_kev_units.pro
# chianti_kev_units, spectrum, funits, kev=kev, wedg=wedg, earth=earth, date=date
spectrum = self.chianti_kev_units(spectrum, funits, wedg, kev=True, earth=True, date=None)
# And failing everything else, set all nan, inf, -inf to 0.0
spectrum[~np.isfinite(spectrum)] = 0
return energy, spectrum * 1e5 # the 1e5 makes the emission measure up to 1e49 instead of 1e44
def f_vth(self, energy_mids, temperature, emission_measure46):
"""
Returns the continuum+lines spectrum from a plasma of a given temperature and emission measure.
From: https://hesperia.gsfc.nasa.gov/ssw/packages/xray/idl/chianti_kev_cont.pro
Parameters
----------
energy_mids: `astropy.units.Quantity` (list with units u.keV)
A list of the mid-points of the energy bins. If the energy bins are [[1,1.5], [1.5,2], ...] then energy_mids=[1.25, 1.75, ...].
temperature: `astropy.units.Quantity`
The electron temperature of the plasma.
emission_measure46: `astropy.units.Quantity`
The emission measure of the emitting plasma in units of 1e46. This scaling is necessary since if the values are too large fitting
routines like scipy's minimize won't vary it.
Returns
-------
Flux: Dimensionless list of fluxes but the units should be ph s^-1 cm^-2 keV^-1
"""
# scale the emission measure up to its true value
emission_measure = emission_measure46*1e46
# temperature and EM should be in these units but incase they're not assign them with << to avoid copies
temperature, emission_measure = temperature<<u.MK, emission_measure<<u.cm**-3
# this needs changed to be more general since this assumes you are handning in symmetrical bins
# This is only because I have been with my NuSTAR data
# get the energy edges in the form [1, 1.5, 2, ...] for the chianti_kev_lines code
energy_edges = energy_mids - np.diff(energy_mids)[0]/2
energy_edges = np.append(energy_edges, energy_edges[-1]+np.diff(energy_mids)[0])<<u.keV
# get the energy bins in the form for the chianti_kev_cont code [[1,1.5], [1.5,2], ...]
en_hi = np.array(energy_edges.value)[1:,None] # [:,None] to give a second axis, each entry is now a row
en_lo = np.array(energy_edges.value)[:-1,None] # these are just default energies
energy_bins = np.concatenate((en_lo, en_hi), axis=1)<<u.keV
# Calculate the lines contribution
spectrum_lines = self.call_sunxspex_with_energies(energy=energy_edges, temperature=temperature, emission_measure=emission_measure, earth=True)
# Calculate the continuum contribution
energy, spectrum_cont = self.chianti_kev_cont(energy=energy_bins, temperature=temperature, use_interpol=True)
# scale the continuum output to what we put in
spectrum_cont *= emission_measure.value/1e49
# return the combination of the continuum & lines
return spectrum_lines + spectrum_cont | [
"numpy.ones",
"numpy.clip",
"numpy.arange",
"numpy.exp",
"scipy.interpolate.interp1d",
"sys.path.append",
"numpy.isfinite",
"sunxspex.io.chianti_kev_cont_common_load",
"sunpy.coordinates.get_sunearth_distance",
"numpy.log10",
"sunxspex.io.load_xray_abundances",
"scipy.stats.mstats.gmean",
"n... | [((21, 46), 'sys.path.append', 'path.append', (['"""./sunxspex"""'], {}), "('./sunxspex')\n", (32, 46), False, 'from sys import path\n'), ((1560, 1648), 'sunxspex.thermal_spectrum.ChiantiThermalSpectrum', 'thermal_spectrum.ChiantiThermalSpectrum', (['energies'], {'abundance_type': '"""sun_coronal_ext"""'}), "(energies, abundance_type=\n 'sun_coronal_ext')\n", (1599, 1648), False, 'from sunxspex import thermal_spectrum\n'), ((1786, 1816), 'sunxspex.io.chianti_kev_cont_common_load', 'chianti_kev_cont_common_load', ([], {}), '()\n', (1814, 1816), False, 'from sunxspex.io import chianti_kev_cont_common_load, load_xray_abundances\n'), ((1842, 1892), 'sunxspex.io.load_xray_abundances', 'load_xray_abundances', ([], {'abundance_type': '"""sun_coronal"""'}), "(abundance_type='sun_coronal')\n", (1862, 1892), False, 'from sunxspex.io import chianti_kev_cont_common_load, load_xray_abundances\n'), ((2253, 2307), 'numpy.diff', 'np.diff', (["self.continuum_info[1]['edge_str']['WVLEDGE']"], {}), "(self.continuum_info[1]['edge_str']['WVLEDGE'])\n", (2260, 2307), True, 'import numpy as np\n'), ((2384, 2425), 'numpy.log10', 'np.log10', (["self.continuum_info[1]['ctemp']"], {}), "(self.continuum_info[1]['ctemp'])\n", (2392, 2425), True, 'import numpy as np\n'), ((8174, 8190), 'numpy.log10', 'np.log10', (['mgtemp'], {}), '(mgtemp)\n', (8182, 8190), True, 'import numpy as np\n'), ((8728, 8777), 'numpy.clip', 'np.clip', (['[selt - 1, selt, selt + 1]', '(0)', '(ntemp - 1)'], {}), '([selt - 1, selt, selt + 1], 0, ntemp - 1)\n', (8735, 8777), True, 'import numpy as np\n'), ((10013, 10034), 'scipy.stats.mstats.gmean', 'gmean', (['energy'], {'axis': '(1)'}), '(energy, axis=1)\n', (10018, 10034), False, 'from scipy.stats.mstats import gmean\n'), ((10349, 10380), 'numpy.where', 'np.where', (['(default_abundance > 0)'], {}), '(default_abundance > 0)\n', (10357, 10380), True, 'import numpy as np\n'), ((10429, 10440), 'copy.copy', 'copy', (['tcont'], {}), '(tcont)\n', (10433, 10440), False, 'from copy import copy\n'), ((11709, 11834), 'numpy.concatenate', 'np.concatenate', (["(continuum_info[1]['totcont_lo'][:, indx.T[0], :], continuum_info[1][\n 'totcont'][:, indx.T[0], :])"], {'axis': '(2)'}), "((continuum_info[1]['totcont_lo'][:, indx.T[0], :],\n continuum_info[1]['totcont'][:, indx.T[0], :]), axis=2)\n", (11723, 11834), True, 'import numpy as np\n'), ((12320, 12383), 'numpy.tensordot', 'np.tensordot', (['abundance_ratio[select]', 'tcdbase'], {'axes': '([0], [0])'}), '(abundance_ratio[select], tcdbase, axes=([0], [0]))\n', (12332, 12383), True, 'import numpy as np\n'), ((12452, 12461), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (12458, 12461), True, 'import numpy as np\n'), ((12779, 12804), 'numpy.where', 'np.where', (['(gaunt[0, :] > 0)'], {}), '(gaunt[0, :] > 0)\n', (12787, 12804), True, 'import numpy as np\n'), ((12892, 12917), 'numpy.where', 'np.where', (['(gaunt[1, :] > 0)'], {}), '(gaunt[1, :] > 0)\n', (12900, 12917), True, 'import numpy as np\n'), ((13063, 13088), 'numpy.where', 'np.where', (['(gaunt[2, :] > 0)'], {}), '(gaunt[2, :] > 0)\n', (13071, 13088), True, 'import numpy as np\n'), ((13323, 13348), 'numpy.where', 'np.where', (['(gmean_en < maxe)'], {}), '(gmean_en < maxe)\n', (13331, 13348), True, 'import numpy as np\n'), ((7711, 7749), 'numpy.concatenate', 'np.concatenate', (['(en_lo, en_hi)'], {'axis': '(1)'}), '((en_lo, en_hi), axis=1)\n', (7725, 7749), True, 'import numpy as np\n'), ((8607, 8629), 'numpy.argwhere', 'np.argwhere', (['(logt <= u)'], {}), '(logt <= u)\n', (8618, 8629), True, 'import numpy as np\n'), ((9536, 9566), 'numpy.clip', 'np.clip', (['exponential', 'None', '(80)'], {}), '(exponential, None, 80)\n', (9543, 9566), True, 'import numpy as np\n'), ((12483, 12493), 'numpy.log', 'np.log', (['x1'], {}), '(x1)\n', (12489, 12493), True, 'import numpy as np\n'), ((12495, 12505), 'numpy.log', 'np.log', (['x0'], {}), '(x0)\n', (12501, 12505), True, 'import numpy as np\n'), ((12507, 12517), 'numpy.log', 'np.log', (['x2'], {}), '(x2)\n', (12513, 12517), True, 'import numpy as np\n'), ((15441, 15622), 'numpy.exp', 'np.exp', (['(cont0 * (u - x1) * (u - x2) / ((x0 - x1) * (x0 - x2)) + cont1 * (u - x0) *\n (u - x2) / ((x1 - x0) * (x1 - x2)) + cont2 * (u - x0) * (u - x1) / ((x2 -\n x0) * (x2 - x1)))'], {}), '(cont0 * (u - x1) * (u - x2) / ((x0 - x1) * (x0 - x2)) + cont1 * (u -\n x0) * (u - x2) / ((x1 - x0) * (x1 - x2)) + cont2 * (u - x0) * (u - x1) /\n ((x2 - x0) * (x2 - x1)))\n', (15447, 15622), True, 'import numpy as np\n'), ((18400, 18428), 'numpy.array', 'np.array', (['energy_edges.value'], {}), '(energy_edges.value)\n', (18408, 18428), True, 'import numpy as np\n'), ((18512, 18540), 'numpy.array', 'np.array', (['energy_edges.value'], {}), '(energy_edges.value)\n', (18520, 18540), True, 'import numpy as np\n'), ((18607, 18645), 'numpy.concatenate', 'np.concatenate', (['(en_lo, en_hi)'], {'axis': '(1)'}), '((en_lo, en_hi), axis=1)\n', (18621, 18645), True, 'import numpy as np\n'), ((7508, 7530), 'numpy.arange', 'np.arange', (['(3)', '(9)', '(0.006)'], {}), '(3, 9, 0.006)\n', (7517, 7530), True, 'import numpy as np\n'), ((7617, 7647), 'numpy.arange', 'np.arange', (['(3.006)', '(9.006)', '(0.006)'], {}), '(3.006, 9.006, 0.006)\n', (7626, 7647), True, 'import numpy as np\n'), ((8234, 8249), 'numpy.diff', 'np.diff', (['energy'], {}), '(energy)\n', (8241, 8249), True, 'import numpy as np\n'), ((9431, 9446), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (9438, 9446), True, 'import numpy as np\n'), ((9487, 9505), 'numpy.ones', 'np.ones', (['(1, nwvl)'], {}), '((1, nwvl))\n', (9494, 9505), True, 'import numpy as np\n'), ((9901, 9916), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (9908, 9916), True, 'import numpy as np\n'), ((15047, 15060), 'numpy.log', 'np.log', (['cont0'], {}), '(cont0)\n', (15053, 15060), True, 'import numpy as np\n'), ((15062, 15075), 'numpy.log', 'np.log', (['cont1'], {}), '(cont1)\n', (15068, 15075), True, 'import numpy as np\n'), ((15077, 15090), 'numpy.log', 'np.log', (['cont2'], {}), '(cont2)\n', (15083, 15090), True, 'import numpy as np\n'), ((16411, 16432), 'numpy.isfinite', 'np.isfinite', (['spectrum'], {}), '(spectrum)\n', (16422, 16432), True, 'import numpy as np\n'), ((13562, 13589), 'scipy.interpolate.interp1d', 'interp1d', (['ewvl', 'gaunt[0, :]'], {}), '(ewvl, gaunt[0, :])\n', (13570, 13589), False, 'from scipy.interpolate import interp1d\n'), ((13699, 13726), 'scipy.interpolate.interp1d', 'interp1d', (['ewvl', 'gaunt[1, :]'], {}), '(ewvl, gaunt[1, :])\n', (13707, 13726), False, 'from scipy.interpolate import interp1d\n'), ((13768, 13795), 'scipy.interpolate.interp1d', 'interp1d', (['ewvl', 'gaunt[2, :]'], {}), '(ewvl, gaunt[2, :])\n', (13776, 13795), False, 'from scipy.interpolate import interp1d\n'), ((18165, 18185), 'numpy.diff', 'np.diff', (['energy_mids'], {}), '(energy_mids)\n', (18172, 18185), True, 'import numpy as np\n'), ((15812, 15862), 'numpy.clip', 'np.clip', (['(gmean_en / (temperature / 11.6))', 'None', '(80)'], {}), '(gmean_en / (temperature / 11.6), None, 80)\n', (15819, 15862), True, 'import numpy as np\n'), ((18255, 18275), 'numpy.diff', 'np.diff', (['energy_mids'], {}), '(energy_mids)\n', (18262, 18275), True, 'import numpy as np\n'), ((5578, 5628), 'sunpy.coordinates.get_sunearth_distance', 'sunpy.coordinates.get_sunearth_distance', ([], {'time': 'date'}), '(time=date)\n', (5617, 5628), False, 'import sunpy\n')] |
import numpy
import tarfile
import zipfile
import os
import shutil
from urllib.error import URLError, HTTPError
from urllib.request import urlretrieve
import tqdm
def get_file(fname, origin, untar=False, unzip=False, cache_subdir="datasets"):
"""Downloads a file from a URL if it not already in the cache."""
# https://raw.githubusercontent.com/fchollet/keras/master/keras/utils/data_utils.py
# Copyright <NAME>, Google, others (2015)
# Under MIT license
datadir_base = os.path.expanduser(os.path.join("~", ".keras"))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar or unzip:
untar_fpath = os.path.join(datadir, fname)
if unzip:
fpath = untar_fpath + ".zip"
else:
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = tqdm.tqdm(total=total_size)
else:
progbar.update(block_size)
error_msg = "URL fetch failure on {}: {} -- {}"
if not os.path.exists(fpath):
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
if untar:
if not os.path.exists(untar_fpath):
print("Untaring file...")
tfile = tarfile.open(fpath, "r:gz")
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
elif unzip:
if not os.path.exists(untar_fpath):
print("Unzipping file...")
with zipfile.ZipFile(fpath) as file_:
try:
file_.extractall(path=datadir)
except (Exception, KeyboardInterrupt):
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
return untar_fpath
return fpath
def partition(examples, split_size):
examples = list(examples)
numpy.random.shuffle(examples)
n_docs = len(examples)
split = int(n_docs * split_size)
return examples[:split], examples[split:]
def unzip(data):
x, y = zip(*data)
return numpy.asarray(x), numpy.asarray(y)
def to_categorical(Y, n_classes=None):
# From keras
Y = numpy.array(Y, dtype="int").ravel()
if not n_classes:
n_classes = numpy.max(Y) + 1
n = Y.shape[0]
categorical = numpy.zeros((n, n_classes), dtype="float32")
categorical[numpy.arange(n), Y] = 1
return numpy.asarray(categorical)
| [
"tqdm.tqdm",
"os.remove",
"zipfile.ZipFile",
"os.makedirs",
"numpy.asarray",
"numpy.zeros",
"os.path.exists",
"urllib.request.urlretrieve",
"numpy.max",
"os.path.isfile",
"numpy.array",
"numpy.arange",
"tarfile.open",
"shutil.rmtree",
"os.path.join",
"os.access",
"numpy.random.shuffl... | [((653, 693), 'os.path.join', 'os.path.join', (['datadir_base', 'cache_subdir'], {}), '(datadir_base, cache_subdir)\n', (665, 693), False, 'import os\n'), ((3023, 3053), 'numpy.random.shuffle', 'numpy.random.shuffle', (['examples'], {}), '(examples)\n', (3043, 3053), False, 'import numpy\n'), ((3449, 3493), 'numpy.zeros', 'numpy.zeros', (['(n, n_classes)'], {'dtype': '"""float32"""'}), "((n, n_classes), dtype='float32')\n", (3460, 3493), False, 'import numpy\n'), ((3545, 3571), 'numpy.asarray', 'numpy.asarray', (['categorical'], {}), '(categorical)\n', (3558, 3571), False, 'import numpy\n'), ((511, 538), 'os.path.join', 'os.path.join', (['"""~"""', '""".keras"""'], {}), "('~', '.keras')\n", (523, 538), False, 'import os\n'), ((551, 583), 'os.access', 'os.access', (['datadir_base', 'os.W_OK'], {}), '(datadir_base, os.W_OK)\n', (560, 583), False, 'import os\n'), ((608, 638), 'os.path.join', 'os.path.join', (['"""/tmp"""', '""".keras"""'], {}), "('/tmp', '.keras')\n", (620, 638), False, 'import os\n'), ((705, 728), 'os.path.exists', 'os.path.exists', (['datadir'], {}), '(datadir)\n', (719, 728), False, 'import os\n'), ((738, 758), 'os.makedirs', 'os.makedirs', (['datadir'], {}), '(datadir)\n', (749, 758), False, 'import os\n'), ((804, 832), 'os.path.join', 'os.path.join', (['datadir', 'fname'], {}), '(datadir, fname)\n', (816, 832), False, 'import os\n'), ((976, 1004), 'os.path.join', 'os.path.join', (['datadir', 'fname'], {}), '(datadir, fname)\n', (988, 1004), False, 'import os\n'), ((1314, 1335), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1328, 1335), False, 'import os\n'), ((3216, 3232), 'numpy.asarray', 'numpy.asarray', (['x'], {}), '(x)\n', (3229, 3232), False, 'import numpy\n'), ((3234, 3250), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (3247, 3250), False, 'import numpy\n'), ((1169, 1196), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'total_size'}), '(total=total_size)\n', (1178, 1196), False, 'import tqdm\n'), ((1831, 1858), 'os.path.exists', 'os.path.exists', (['untar_fpath'], {}), '(untar_fpath)\n', (1845, 1858), False, 'import os\n'), ((1918, 1945), 'tarfile.open', 'tarfile.open', (['fpath', '"""r:gz"""'], {}), "(fpath, 'r:gz')\n", (1930, 1945), False, 'import tarfile\n'), ((3317, 3344), 'numpy.array', 'numpy.array', (['Y'], {'dtype': '"""int"""'}), "(Y, dtype='int')\n", (3328, 3344), False, 'import numpy\n'), ((3395, 3407), 'numpy.max', 'numpy.max', (['Y'], {}), '(Y)\n', (3404, 3407), False, 'import numpy\n'), ((3510, 3525), 'numpy.arange', 'numpy.arange', (['n'], {}), '(n)\n', (3522, 3525), False, 'import numpy\n'), ((1383, 1422), 'urllib.request.urlretrieve', 'urlretrieve', (['origin', 'fpath', 'dl_progress'], {}), '(origin, fpath, dl_progress)\n', (1394, 1422), False, 'from urllib.request import urlretrieve\n'), ((1704, 1725), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1718, 1725), False, 'import os\n'), ((2391, 2418), 'os.path.exists', 'os.path.exists', (['untar_fpath'], {}), '(untar_fpath)\n', (2405, 2418), False, 'import os\n'), ((1743, 1759), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (1752, 1759), False, 'import os\n'), ((2080, 2107), 'os.path.exists', 'os.path.exists', (['untar_fpath'], {}), '(untar_fpath)\n', (2094, 2107), False, 'import os\n'), ((2476, 2498), 'zipfile.ZipFile', 'zipfile.ZipFile', (['fpath'], {}), '(fpath)\n', (2491, 2498), False, 'import zipfile\n'), ((2132, 2159), 'os.path.isfile', 'os.path.isfile', (['untar_fpath'], {}), '(untar_fpath)\n', (2146, 2159), False, 'import os\n'), ((2185, 2207), 'os.remove', 'os.remove', (['untar_fpath'], {}), '(untar_fpath)\n', (2194, 2207), False, 'import os\n'), ((2258, 2284), 'shutil.rmtree', 'shutil.rmtree', (['untar_fpath'], {}), '(untar_fpath)\n', (2271, 2284), False, 'import shutil\n'), ((2659, 2686), 'os.path.exists', 'os.path.exists', (['untar_fpath'], {}), '(untar_fpath)\n', (2673, 2686), False, 'import os\n'), ((2715, 2742), 'os.path.isfile', 'os.path.isfile', (['untar_fpath'], {}), '(untar_fpath)\n', (2729, 2742), False, 'import os\n'), ((2772, 2794), 'os.remove', 'os.remove', (['untar_fpath'], {}), '(untar_fpath)\n', (2781, 2794), False, 'import os\n'), ((2853, 2879), 'shutil.rmtree', 'shutil.rmtree', (['untar_fpath'], {}), '(untar_fpath)\n', (2866, 2879), False, 'import shutil\n')] |
# ---------------------------
# <NAME>, <NAME>, <NAME> -- 2019
# The University of Oxford, The Alan Turing Institute
# contact: <EMAIL>, <EMAIL>, <EMAIL>
# ---------------------------
import tensorflow_probability as tfp
import functools
import itertools
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Reshape, Conv2D, MaxPooling2D, Flatten, Dense, Lambda, Input, Activation, Dropout, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
from tensorflow.python.ops.parallel_for.gradients import batch_jacobian
tfd = tfp.distributions
def int_shape(x):
return list(map(int, x.get_shape()))
def square_error(labels, preds):
return tf.reduce_sum(0.5 * (labels - preds)**2, axis=-1)
def cross_entropy(labels, preds):
return -tf.reduce_sum(
labels * tf.math.log(tf.nn.softmax(preds, axis=-1) + 1e-5), axis=-1)
def make_vgg13(activation, input_shape, n_output):
layer_reg = dict(kernel_regularizer=None, bias_regularizer=None)
dense = functools.partial(Dense,
kernel_initializer='glorot_normal',
activation=activation,
bias_initializer='zeros',
**layer_reg)
conv = functools.partial(Conv2D,
kernel_initializer='glorot_normal',
activation=activation,
bias_initializer='zeros',
padding='SAME',
**layer_reg)
maxpool = functools.partial(MaxPooling2D)
layers = [
Input(shape=(np.prod(input_shape), ), name='input'),
Reshape(input_shape)
]
# Block 1
layers.append(
conv(filters=64, kernel_size=3, strides=1, name='block1_conv1'))
layers.append(
conv(filters=64, kernel_size=3, strides=1, name='block1_conv2'))
layers.append(maxpool((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
layers.append(
conv(filters=128, kernel_size=3, strides=1, name='block2_conv1'))
layers.append(
conv(filters=128, kernel_size=3, strides=1, name='block2_conv2'))
layers.append(maxpool((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
layers.append(
conv(filters=128, kernel_size=3, strides=1, name='block3_conv1'))
layers.append(
conv(filters=128, kernel_size=3, strides=1, name='block3_conv2'))
layers.append(maxpool((2, 2), strides=(2, 2), name='block3_pool'))
# # Block 4
# layers.append(
# conv(filters=512, kernel_size=3, strides=1, name='block4_conv1'))
# layers.append(
# conv(filters=512, kernel_size=3, strides=1, name='block4_conv2'))
# layers.append(maxpool((2, 2), strides=(2, 2), name='block4_pool'))
layers.append(Flatten())
# --- Tf losses do softmax internally so keep activation as None
layers.append(dense(units=256))
layers.append(dense(units=n_output, activation=None))
def convnet(x):
# --- We use the Model and not Sequential API because Sequential makes
# --- it difficult to access intermediary outputs
out = [layers[1](layers[0])]
for l in layers[2:]:
out.append(l(out[-1]))
# --- Setup model
network = Model(inputs=layers[0], outputs=out)
outputs = network(x)
activations = [x] + [
o for o in outputs if any(x in o.name for x in ['dense', 'conv'])
]
layer_shapes = [[np.prod(input_shape)]] + [
int_shape(o)[1:]
for o in outputs if any(x in o.name for x in ['dense', 'conv'])
]
# --- outputs contains activations (odd numbered layers) and logits (even numbered layers)
return dict(net=network,
outputs=outputs,
activations=activations,
layer_shapes=layer_shapes)
return convnet
def make_convnet(activation, input_shape, n_output=1):
dense = functools.partial(Dense,
kernel_initializer='glorot_normal',
activation=activation,
bias_initializer='zeros')
conv = functools.partial(Conv2D,
kernel_initializer='glorot_normal',
activation=activation,
bias_initializer='zeros',
padding='SAME')
layers = [
Input(shape=(np.prod(input_shape), ), name='input'),
Reshape(input_shape)
]
layers.append(conv(filters=32, kernel_size=4, strides=2))
layers.append(conv(filters=128, kernel_size=4, strides=2))
layers.append(Flatten())
# --- Tf losses do softmax internally so keep activation as None
layers.append(dense(units=n_output, activation=None))
def convnet(x):
# --- We use the Model and not Sequential API because Sequential makes
# --- it difficult to access intermediary outputs
out = [layers[1](layers[0])]
for l in layers[2:]:
out.append(l(out[-1]))
# --- Setup model
network = Model(inputs=layers[0], outputs=out)
outputs = network(x)
activations = [x] + [
o for o in outputs if any(x in o.name for x in ['dense', 'conv'])
]
layer_shapes = [[np.prod(input_shape)]] + [
int_shape(o)[1:] for o in outputs if 'conv' in o.name
]
# --- outputs contains activations (odd numbered layers) and logits (even numbered layers)
return dict(net=network,
outputs=outputs,
activations=activations,
layer_shapes=layer_shapes)
return convnet
def make_mlp(activation, input_shape, N, H, n_output=1):
""" Creates the discriminant function.
Args:
activation: Activation function in hidden layers.
input_shape: Dimensionality of the input.
N: Number of layers
H: Size of hidden layers (# of neurons)
Returns:
mlp: A `callable` mapping a `Tensor` of inputs to a
prediction over classes.
"""
dense = functools.partial(Dense,
kernel_initializer='glorot_normal',
activation=activation,
use_bias=False)
layers = [Input(shape=(input_shape, ), name='input')]
for i in range(N):
layers.append(dense(units=H))
# --- Tf losses do softmax internally so keep activation as None
layers.append(dense(units=n_output, activation=None, use_bias=False))
def mlp(x):
# --- We use the Model and not Sequential API because Sequential makes
# --- it difficult to access intermediary outputs
out = [layers[1](layers[0])]
for l in layers[2:]:
out.append(l(out[-1]))
# --- Setup model
network = Model(inputs=layers[0], outputs=out)
outputs = network(x)
if type(outputs) is not list:
outputs = [outputs]
# --- if linear
activations = [x] + [
o for o in outputs if any(n in o.name for n in ['dense'])
]
layer_shapes = [[np.prod(input_shape)]] + [
int_shape(o)[1:] for o in outputs if 'dense' in o.name
]
# --- outputs contains activations (odd numbered layers) and logits (even numbered layers)
return dict(net=network,
outputs=outputs,
activations=activations,
layer_shapes=layer_shapes)
return mlp
def gen_noise(a, sigma, mode, n_samples, seed=0, p=0.0):
shape = [n_samples] + int_shape(a)
if mode == 'mult':
sigma *= a
noise = tf.random.normal(shape,
mean=0.0,
stddev=sigma,
dtype=tf.dtypes.float32,
name='noise')
r = tf.random.uniform(shape=shape, maxval=1)
b = tf.cast(tf.math.greater(p, r), dtype=tf.float32)
return noise
def replace_mask_layer(x,
model,
non_targeted_layers=[],
var=1.0,
n_samples=1,
mode='add'):
""" Adds / replaces dropout layers in the discriminant network
Args:
X_data: Features data used as input to discriminant model
model: Keras Model into which we are inserting new 'mask' layers
dropout_mask: Dropout mask for each layer of dim (batch_size, input_size + N*H)
traces_layers: sample 'traces' for a given layer either via VI or MCMC
input_size: Dimensionality of the input.
N: Number of layers
H: Size of hidden layers (# of neurons)
Returns:
logits: logits of new Model for input data X_data
"""
# --- Retrieve the old model's layers
data = x
sigma = np.sqrt(var)
layers = [l for l in model['net'].layers]
layer_set = list(set(range(len(layers))) - set(non_targeted_layers))
noise_gen = iter([
gen_noise(a, sigma, mode, n_samples, seed=i)
if i in layer_set else tf.zeros([n_samples] + int_shape(a))
for i, a in enumerate(model['activations'])
])
# --- Sequentially mask each layer's activations and noise the missing values,
# --- up until the penultimate logit layer (we don't noise the output layer)
noises, activations, x = [], [], data
for i, l in enumerate(layers[:-1]):
x = l(x)
if any(n in l.name for n in ['input', 'conv', 'dense']):
noises.append(next(noise_gen))
if 'input' in l.name:
x = Lambda(lambda x: x + noises[-1])(x)
activations.append(x)
x = tf.reshape(x, [-1] + int_shape(x)[2:])
else:
x = Lambda(lambda x: x + tf.reshape(noises[-1], [-1] +
int_shape(x)[1:]))(x)
activations.append(
tf.reshape(x, [n_samples, -1] + int_shape(x)[1:]))
noises.append(next(noise_gen))
pred = layers[-1](x)
activations.append(tf.reshape(pred, [n_samples, -1] + int_shape(pred)[1:]))
return dict(activations=activations, noise=noises)
def perturbative_solution(x, model, loss, EC, var, loss_fn):
""" Adds / replaces dropout layers in the discriminant network
Args:
X_data: Features data used as input to discriminant model
model: Keras Model into which we are inserting new 'mask' layers
dropout_mask: Dropout mask for each layer of dim (batch_size, input_size + N*H)
traces_layers: sample 'traces' for a given layer either via VI or MCMC
input_size: Dimensionality of the input.
N: Number of layers
H: Size of hidden layers (# of neurons)
Returns:
logits: logits of new Model for input data X_data
"""
from .pyhessian import HessianEstimator
# --- Retrieve the old model's layers
model_copy = tf.keras.models.clone_model(model)
layers = [l for l in model.layers]
new_weights = []
weights = [l.trainable_weights[0] for l in layers[1:]]
H = HessianEstimator(None, loss, None, weights, None, None, 404).get_H_op()
H_inv = tf.linalg.inv(H)
J = tf.concat(
[tf.reshape(tf.gradients([EC], [w])[0], (-1, 1)) for w in weights],
axis=0)
update = H_inv @ J
print(update)
start = 0
for i, w in enumerate(weights):
num_w = np.prod(int_shape(w))
DW = tf.reshape(update[start:num_w + start], w.shape)
DW = tf.Print(DW, [DW], 'DW')
new_w = w - DW
start += num_w
# x = x @ new_w
if i < len(layers) - 1:
x = tf.keras.activations.elu(x @ new_w)
else:
x = x @ new_w
return tf.reduce_mean(loss_fn(x))
def heavy_tail_variance(Js, loss, preds):
# --- Here we estimate each component of the regularizer
dL_dhL = tf.gradients([loss], [preds])[0]
H_L = batch_jacobian(dL_dhL, preds, use_pfor=False)
H_var = 0
# --- The is the Heavy tailed noise `'Covariance'` variance
for (J_1, J_2) in itertools.permutations(Js, 2):
H_var += 0.5 * tf.reduce_sum(J_1 @ tf.transpose(J_2, [0, 2, 1]),
axis=[-2, -1])
return H_var
def calc_taylor_expansion(Js, loss, preds, noises, B, n_samples):
noisy_Js = [noises[i] @ J for i, J in enumerate(Js)]
dL_dhL = tf.gradients([loss], [preds])[0]
H_L = batch_jacobian(dL_dhL, preds, use_pfor=False)
G, C, H = 0, 0, 0
# --- This is the Gaussian noise
for J in noisy_Js:
G += tf.reduce_sum(tf.reshape(J, (n_samples, B, -1)) * dL_dhL,
axis=[0, -1]) / n_samples
# --- The is the Chi-Squared `'Covariance'` noise
for (J1, J2) in zip(noisy_Js, noisy_Js):
C += 0.5 * tf.reduce_sum(J1 @ H_L @ tf.transpose(J2, [0, 2, 1]),
axis=[-2, -1]) / n_samples
# --- The is the Heavy tailed noise `'Covariance'` noise
for (J1, J2) in itertools.permutations(noisy_Js, 2):
H += 0.5 * tf.reduce_sum(J1 @ H_L @ tf.transpose(J2, [0, 2, 1]),
axis=[-2, -1]) / n_samples
return G, C, H
def calc_tikhonov_reg(Js, acts, preds, noise_mode, var, loss_type):
l_noise = 0
n_output = int_shape(preds)[-1]
for a, J in zip(acts, Js):
if loss_type == 'cross_entropy':
# --- Classification loss, log(p(y|x))
p = tf.nn.softmax(preds, axis=1)
H_l = tf.linalg.diag(p) - tf.expand_dims(p, 2) @ tf.expand_dims(
p, 1)
if noise_mode == 'mult':
J = tf.tile(tf.expand_dims(a, 2), [1, 1, n_output]) * J
# EC = tf.reduce_sum(J * (J @ H_l), axis=[-2, -1])
if 'diag' in noise_mode:
print("here")
EC = tf.reduce_sum(tf.linalg.diag_part(
H_l * (tf.transpose(J, [0, 2, 1]) @ J)),
axis=[-1])
print(EC)
else:
EC = tf.reduce_sum(H_l * (tf.transpose(J, [0, 2, 1]) @ J),
axis=[-2, -1])
elif loss_type == 'mse':
var_l = 1
if noise_mode == 'mult':
var_l *= a**2
EC = tf.reduce_sum(var_l * tf.reduce_sum(J**2, axis=-1), axis=[-1])
l_noise += 0.5 * var * EC
return l_noise
| [
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Reshape",
"tensorflow.reshape",
"tensorflow.linalg.inv",
"numpy.prod",
"tensorflow.keras.layers.Flatten",
"tensorflow.python.ops.parallel_for.gradients.batch_jacobian",
"tensorflow.nn.softmax",
"tensorflow.random.uniform",
"tensorflow.keras.models.... | [((770, 821), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(0.5 * (labels - preds) ** 2)'], {'axis': '(-1)'}), '(0.5 * (labels - preds) ** 2, axis=-1)\n', (783, 821), True, 'import tensorflow as tf\n'), ((1096, 1223), 'functools.partial', 'functools.partial', (['Dense'], {'kernel_initializer': '"""glorot_normal"""', 'activation': 'activation', 'bias_initializer': '"""zeros"""'}), "(Dense, kernel_initializer='glorot_normal', activation=\n activation, bias_initializer='zeros', **layer_reg)\n", (1113, 1223), False, 'import functools\n'), ((1350, 1494), 'functools.partial', 'functools.partial', (['Conv2D'], {'kernel_initializer': '"""glorot_normal"""', 'activation': 'activation', 'bias_initializer': '"""zeros"""', 'padding': '"""SAME"""'}), "(Conv2D, kernel_initializer='glorot_normal', activation=\n activation, bias_initializer='zeros', padding='SAME', **layer_reg)\n", (1367, 1494), False, 'import functools\n'), ((1650, 1681), 'functools.partial', 'functools.partial', (['MaxPooling2D'], {}), '(MaxPooling2D)\n', (1667, 1681), False, 'import functools\n'), ((4095, 4209), 'functools.partial', 'functools.partial', (['Dense'], {'kernel_initializer': '"""glorot_normal"""', 'activation': 'activation', 'bias_initializer': '"""zeros"""'}), "(Dense, kernel_initializer='glorot_normal', activation=\n activation, bias_initializer='zeros')\n", (4112, 4209), False, 'import functools\n'), ((4306, 4437), 'functools.partial', 'functools.partial', (['Conv2D'], {'kernel_initializer': '"""glorot_normal"""', 'activation': 'activation', 'bias_initializer': '"""zeros"""', 'padding': '"""SAME"""'}), "(Conv2D, kernel_initializer='glorot_normal', activation=\n activation, bias_initializer='zeros', padding='SAME')\n", (4323, 4437), False, 'import functools\n'), ((6239, 6343), 'functools.partial', 'functools.partial', (['Dense'], {'kernel_initializer': '"""glorot_normal"""', 'activation': 'activation', 'use_bias': '(False)'}), "(Dense, kernel_initializer='glorot_normal', activation=\n activation, use_bias=False)\n", (6256, 6343), False, 'import functools\n'), ((7825, 7915), 'tensorflow.random.normal', 'tf.random.normal', (['shape'], {'mean': '(0.0)', 'stddev': 'sigma', 'dtype': 'tf.dtypes.float32', 'name': '"""noise"""'}), "(shape, mean=0.0, stddev=sigma, dtype=tf.dtypes.float32,\n name='noise')\n", (7841, 7915), True, 'import tensorflow as tf\n'), ((8036, 8076), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': 'shape', 'maxval': '(1)'}), '(shape=shape, maxval=1)\n', (8053, 8076), True, 'import tensorflow as tf\n'), ((8979, 8991), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (8986, 8991), True, 'import numpy as np\n'), ((11059, 11093), 'tensorflow.keras.models.clone_model', 'tf.keras.models.clone_model', (['model'], {}), '(model)\n', (11086, 11093), True, 'import tensorflow as tf\n'), ((11308, 11324), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['H'], {}), '(H)\n', (11321, 11324), True, 'import tensorflow as tf\n'), ((12062, 12107), 'tensorflow.python.ops.parallel_for.gradients.batch_jacobian', 'batch_jacobian', (['dL_dhL', 'preds'], {'use_pfor': '(False)'}), '(dL_dhL, preds, use_pfor=False)\n', (12076, 12107), False, 'from tensorflow.python.ops.parallel_for.gradients import batch_jacobian\n'), ((12209, 12238), 'itertools.permutations', 'itertools.permutations', (['Js', '(2)'], {}), '(Js, 2)\n', (12231, 12238), False, 'import itertools\n'), ((12565, 12610), 'tensorflow.python.ops.parallel_for.gradients.batch_jacobian', 'batch_jacobian', (['dL_dhL', 'preds'], {'use_pfor': '(False)'}), '(dL_dhL, preds, use_pfor=False)\n', (12579, 12610), False, 'from tensorflow.python.ops.parallel_for.gradients import batch_jacobian\n'), ((13133, 13168), 'itertools.permutations', 'itertools.permutations', (['noisy_Js', '(2)'], {}), '(noisy_Js, 2)\n', (13155, 13168), False, 'import itertools\n'), ((1767, 1787), 'tensorflow.keras.layers.Reshape', 'Reshape', (['input_shape'], {}), '(input_shape)\n', (1774, 1787), False, 'from tensorflow.keras.layers import Reshape, Conv2D, MaxPooling2D, Flatten, Dense, Lambda, Input, Activation, Dropout, BatchNormalization\n'), ((2911, 2920), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2918, 2920), False, 'from tensorflow.keras.layers import Reshape, Conv2D, MaxPooling2D, Flatten, Dense, Lambda, Input, Activation, Dropout, BatchNormalization\n'), ((3390, 3426), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'layers[0]', 'outputs': 'out'}), '(inputs=layers[0], outputs=out)\n', (3395, 3426), False, 'from tensorflow.keras.models import Model\n'), ((4634, 4654), 'tensorflow.keras.layers.Reshape', 'Reshape', (['input_shape'], {}), '(input_shape)\n', (4641, 4654), False, 'from tensorflow.keras.layers import Reshape, Conv2D, MaxPooling2D, Flatten, Dense, Lambda, Input, Activation, Dropout, BatchNormalization\n'), ((4805, 4814), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4812, 4814), False, 'from tensorflow.keras.layers import Reshape, Conv2D, MaxPooling2D, Flatten, Dense, Lambda, Input, Activation, Dropout, BatchNormalization\n'), ((5247, 5283), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'layers[0]', 'outputs': 'out'}), '(inputs=layers[0], outputs=out)\n', (5252, 5283), False, 'from tensorflow.keras.models import Model\n'), ((6444, 6485), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(input_shape,)', 'name': '"""input"""'}), "(shape=(input_shape,), name='input')\n", (6449, 6485), False, 'from tensorflow.keras.layers import Reshape, Conv2D, MaxPooling2D, Flatten, Dense, Lambda, Input, Activation, Dropout, BatchNormalization\n'), ((6994, 7030), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'layers[0]', 'outputs': 'out'}), '(inputs=layers[0], outputs=out)\n', (6999, 7030), False, 'from tensorflow.keras.models import Model\n'), ((8093, 8114), 'tensorflow.math.greater', 'tf.math.greater', (['p', 'r'], {}), '(p, r)\n', (8108, 8114), True, 'import tensorflow as tf\n'), ((11580, 11628), 'tensorflow.reshape', 'tf.reshape', (['update[start:num_w + start]', 'w.shape'], {}), '(update[start:num_w + start], w.shape)\n', (11590, 11628), True, 'import tensorflow as tf\n'), ((11642, 11666), 'tensorflow.Print', 'tf.Print', (['DW', '[DW]', '"""DW"""'], {}), "(DW, [DW], 'DW')\n", (11650, 11666), True, 'import tensorflow as tf\n'), ((12019, 12048), 'tensorflow.gradients', 'tf.gradients', (['[loss]', '[preds]'], {}), '([loss], [preds])\n', (12031, 12048), True, 'import tensorflow as tf\n'), ((12522, 12551), 'tensorflow.gradients', 'tf.gradients', (['[loss]', '[preds]'], {}), '([loss], [preds])\n', (12534, 12551), True, 'import tensorflow as tf\n'), ((11785, 11820), 'tensorflow.keras.activations.elu', 'tf.keras.activations.elu', (['(x @ new_w)'], {}), '(x @ new_w)\n', (11809, 11820), True, 'import tensorflow as tf\n'), ((13584, 13612), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (13597, 13612), True, 'import tensorflow as tf\n'), ((13631, 13648), 'tensorflow.linalg.diag', 'tf.linalg.diag', (['p'], {}), '(p)\n', (13645, 13648), True, 'import tensorflow as tf\n'), ((1719, 1739), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (1726, 1739), True, 'import numpy as np\n'), ((3601, 3621), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (3608, 3621), True, 'import numpy as np\n'), ((4586, 4606), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (4593, 4606), True, 'import numpy as np\n'), ((5458, 5478), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (5465, 5478), True, 'import numpy as np\n'), ((7291, 7311), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (7298, 7311), True, 'import numpy as np\n'), ((9742, 9774), 'tensorflow.keras.layers.Lambda', 'Lambda', (['(lambda x: x + noises[-1])'], {}), '(lambda x: x + noises[-1])\n', (9748, 9774), False, 'from tensorflow.keras.layers import Reshape, Conv2D, MaxPooling2D, Flatten, Dense, Lambda, Input, Activation, Dropout, BatchNormalization\n'), ((11364, 11387), 'tensorflow.gradients', 'tf.gradients', (['[EC]', '[w]'], {}), '([EC], [w])\n', (11376, 11387), True, 'import tensorflow as tf\n'), ((12283, 12311), 'tensorflow.transpose', 'tf.transpose', (['J_2', '[0, 2, 1]'], {}), '(J_2, [0, 2, 1])\n', (12295, 12311), True, 'import tensorflow as tf\n'), ((12721, 12754), 'tensorflow.reshape', 'tf.reshape', (['J', '(n_samples, B, -1)'], {}), '(J, (n_samples, B, -1))\n', (12731, 12754), True, 'import tensorflow as tf\n'), ((13651, 13671), 'tensorflow.expand_dims', 'tf.expand_dims', (['p', '(2)'], {}), '(p, 2)\n', (13665, 13671), True, 'import tensorflow as tf\n'), ((13674, 13694), 'tensorflow.expand_dims', 'tf.expand_dims', (['p', '(1)'], {}), '(p, 1)\n', (13688, 13694), True, 'import tensorflow as tf\n'), ((912, 941), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['preds'], {'axis': '(-1)'}), '(preds, axis=-1)\n', (925, 941), True, 'import tensorflow as tf\n'), ((12962, 12989), 'tensorflow.transpose', 'tf.transpose', (['J2', '[0, 2, 1]'], {}), '(J2, [0, 2, 1])\n', (12974, 12989), True, 'import tensorflow as tf\n'), ((13214, 13241), 'tensorflow.transpose', 'tf.transpose', (['J2', '[0, 2, 1]'], {}), '(J2, [0, 2, 1])\n', (13226, 13241), True, 'import tensorflow as tf\n'), ((13777, 13797), 'tensorflow.expand_dims', 'tf.expand_dims', (['a', '(2)'], {}), '(a, 2)\n', (13791, 13797), True, 'import tensorflow as tf\n'), ((14444, 14474), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(J ** 2)'], {'axis': '(-1)'}), '(J ** 2, axis=-1)\n', (14457, 14474), True, 'import tensorflow as tf\n'), ((14200, 14226), 'tensorflow.transpose', 'tf.transpose', (['J', '[0, 2, 1]'], {}), '(J, [0, 2, 1])\n', (14212, 14226), True, 'import tensorflow as tf\n'), ((14034, 14060), 'tensorflow.transpose', 'tf.transpose', (['J', '[0, 2, 1]'], {}), '(J, [0, 2, 1])\n', (14046, 14060), True, 'import tensorflow as tf\n')] |
"""Classes and functions for operations with fmesh tallies.
Classes
-------
FMesh - class for storing individual fmesh tally data.
Functions
---------
read_meshtal(file) - reads all tallies from MCNP meshtal file.
read_meshtally(file) - reads individual fmesh tally from binary file (*.npy)
merge_tallies(*tally_weight) - merges tallies with specific weights.
Examples
--------
Suppose we have meshtal file - MCNP output for fmesh tallies - sample.m.
It contains several tallies with numbers 14, 24, and 34. First, we have to
read it:
```python
tally_list = list(read_meshtal('sample.m'))
```
x and z are coordinates of mesh cell centers along x and z axis. data is
2-dimensional array of fmesh tally values. err - relative errors.
In order to save fmesh data to vtk format, method save2vtk should be used.
Because it is actually regular grid the result is .vtr file.
```python
tally_list[0].save2vtk(filename='sample', data_name='heating neutron')
```
Dependencies
------------
pyevkt
https://github.com/pyscience-projects/pyevtk
https://bitbucket.org/pauloh/pyevtk
"""
# TODO dvp: redesign this class as xarray data structure.
# multidimensional array with coordinates is more appropriate for this class.
from typing import Callable, Generator, Iterable, List, Optional, TextIO, Tuple, Union
import logging
from pathlib import Path
import mckit_meshes.mesh.geometry_spec as gc
import mckit_meshes.utils as ut
import mckit_meshes.utils.no_daemon_process as ndp
import mckit_meshes.utils.rebin as rebin
import numpy as np
from mckit_meshes.particle_kind import ParticleKind as Kind
from pyevtk.hl import gridToVTK
from toolz.itertoolz import concatv
__LOG = logging.getLogger(__name__)
def expand_args(args):
return rebin.rebin_nd(*args)
class FMesh(object):
"""Fmesh tally object.
Attributes
----------
name : int
The name of tally (number).
kind : int
Kind of tally.
bins : dict
Bins along each axis. Keys: 'E' for energy, 'X' for x direction,
'Y' for y direction, 'Z' for z direction.
data : numpy.ndarray
Data values in each mesh cell.
errors : numpy.ndarray
Relative errors of data values in each mesh cell.
Methods
-------
get_slice(free, **kwargs) - gets specific slice of data.
get_spectrum(X, Y, Z) - gets energy spectrum at the specified point.
save(file) - saves this fmesh tally into file.
save2vtk(filename, data_name) - saves this fmesh to vtk file.
"""
NPZ_MARK = np.int16(5445)
"""
'Signature' to be stored in the first entry of meta entry in an npz file to check that the file is for FMesh object
"""
NPZ_FORMAT = np.int16(4)
"""
Identifies version of format of data stored in npz file
"""
class X(RuntimeError):
pass
def __init__(
self,
name: int,
kind: Union[int, Kind],
geometry_spec: gc.AbstractGeometrySpec,
ebins: np.ndarray,
data: np.ndarray,
errors: np.ndarray,
totals: np.ndarray = None,
totals_err: np.ndarray = None,
comment: str = None,
):
"""
Parameters
----------
name :
The name of tally (number).
kind :
Kind of tally: neutron, photon, electron or generic.
ebins:
Energy bin boundaries.
data :
Data values at centers of mesh cells.
Shape (Ne-1)x(Nx-1)x(Ny-1)x(Nz-1), where Ne, Nx, Ny and Nz - the number
of corresponding bin boundaries.
errors :
Relative errors of corresponding data values.
Shape (Ne-1)x(Nx-1)x(Ny-1)x(Nz-1), where Ne, Nx, Ny and Nz - the number
of corresponding bin boundaries.
"""
self.name = int(name)
self.kind = Kind(kind)
self._geometry_spec: Union[
gc.CartesianGeometrySpec, gc.CylinderGeometrySpec, gc.AbstractGeometrySpec
] = geometry_spec
self.bins = {}
self.bins["X"] = self._x = geometry_spec.ibins
self.bins["Y"] = self._y = geometry_spec.jbins
self.bins["Z"] = self._z = geometry_spec.kbins
self.bins["E"] = self._e = gc.as_float_array(ebins)
assert 2 <= self._e.size
self.data = gc.as_float_array(data)
self.errors = gc.as_float_array(errors)
assert self.data.shape == self.errors.shape
assert self.data.shape == (self.e.size - 1,) + self._geometry_spec.bins_shape
self._totals = totals
self._totals_err = totals_err
if 2 < self._e.size:
if totals is None or totals_err is None:
assert (
totals is None and totals_err is None
), "Both totals and totals_err are to be provided or omitted"
totals = np.sum(self.data, axis=0)
non_zero = totals > 0.0
totals_err = np.zeros_like(totals)
totals_err[non_zero] = (
np.sqrt(np.sum((self.errors * self.data) ** 2, axis=0))[non_zero]
/ totals[non_zero]
)
else:
assert (
totals is not None and totals_err is not None
), "Both totals and totals_err are to be provided or omitted"
totals = np.asarray(totals, dtype=float)
totals_err = np.asarray(totals_err, dtype=float)
else:
assert totals is None
assert totals_err is None
self._totals = totals
self._totals_err = totals_err
assert self._totals is None or self._totals.shape == self._totals_err.shape
assert (
self._totals is None or self._totals.shape == self._geometry_spec.bins_shape
)
self._comment = comment
# @property
# def x(self):
# return self._x
#
# @property
# def y(self):
# return self._y
#
# @property
# def z(self):
# return self._z
@property
def e(self):
return self._e
@property
def ibins(self):
return self._geometry_spec.ibins
@property
def jbins(self):
return self._geometry_spec.jbins
@property
def kbins(self):
return self._geometry_spec.kbins
@property
def totals(self):
return self._totals
@property
def totals_err(self):
return self._totals_err
@property
def comment(self):
return self._comment
@property
def origin(self):
assert self._geometry_spec.cylinder
return self._geometry_spec.origin
@property
def axis(self):
assert self._geometry_spec.cylinder
return self._geometry_spec.axs
@property
def vec(self):
assert self._geometry_spec.cylinder
return self._geometry_spec.vec
@property
def is_cylinder(self):
"""
Is this mesh cylinder?
Note: MCNP uses `origin` on mesh tally specification, both rectilinear and cylinder,
But outputs origin only for cylinder mesh.
"""
return self._geometry_spec.cylinder
@property
def total_precision(self):
if 2 < self.e.size:
return self.totals_err[-1]
else:
return self.errors[0, 0, 0, 0]
def is_equal_by_mesh(self, other: "FMesh") -> bool:
return (
self.kind == other.kind
and self._geometry_spec == other._geometry_spec
and np.array_equal(self.e, other.e)
)
def has_better_precision_than(self, other):
assert self.is_equal_by_mesh(other)
return self.total_precision < other.total_precision
def __eq__(self, other):
if not isinstance(other, FMesh):
return False
res = (
self.name == other.name
and self.is_equal_by_mesh(other)
and np.array_equal(self.data, other.data)
and np.array_equal(self.errors, other.errors)
and self.comment == other.comment
)
if res and self._totals:
res = np.all(np.isclose(self.totals, other.totals)) and np.all(
np.isclose(self.totals_err, other.totals_err)
)
# if res:
# res = self.is_cylinder == other.is_cylinder
# if res and self.is_cylinder:
# res = not self.origin(
# np.all(np.isclose(self.origin, other.origin))
# and np.all(np.isclose(self.axis, other.axis))
# )
return res
def __hash__(self):
return hash(
(
self.name,
self.kind,
self._geometry_spec,
self.e,
self.data,
self.errors,
self.comment,
)
)
def __repr__(self):
msg = "Fmesh({name}, {kind}, {xmin}..{xmax}, {ymin}..{ymax}, {zmin}..{zmax}, {emin}..{emax})"
(xmin, xmax), (ymin, ymax), (zmin, zmax) = self._geometry_spec.boundaries
return msg.format(
name=self.name,
kind=self.kind,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
zmin=zmin,
zmax=zmax,
emin=self.e[0],
emax=self.e[-1],
)
def surrounds_point(self, x, y, z):
return self._geometry_spec.surrounds_point(x, y, z)
# def get_slice(
# self, free: str = "XY", **kwargs
# ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
# """Gets slice of fmesh tally.
#
# kwargs specify names of fixed variables and their values. If the key
# corresponding to the one of fixed variables is present, then its value
# is ignored. If the point is outside the mesh then zeros are returned.
#
# Parameters
# ----------
# free : str
# Names of free parameters (those, which corresponds to slice axis).
# kwargs : dict
# Key-value pairs of fixed parameters. Possible keys:
# E - energy value of slice. If 'total' - data is summed over all
# energy bins; default: 'total';
# X - position of slice in x direction; default: first bin;
# Y - position of slice in y direction; default: first bin;
# Z - position of slice in z direction; default: first bin;
#
# Returns
# -------
# x_centers, y_centers : numpy.ndarray, ndim=1
# Coordinates of cell centers along free variables.
# result, error : numpy.ndarray, ndim=2
# An array of data values in the specified section (in phase space).
# """
# key_index = {0: "E", 1: "X", 2: "Y", 3: "Z"}
# free = free.upper()
# free_keys = [free[0], free[1]]
# slice_index = []
# sum_axis = []
# for i in range(4):
# key = key_index[i]
# if key in free_keys:
# index = np.arange(self.bins[key].size - 1)
# elif key in kwargs and isinstance(kwargs[key], (int, float)):
# index = np.searchsorted(self.bins[key], kwargs[key]) - 1
# elif key == "E":
# index = np.arange(self.bins[key].size - 1)
# sum_axis.append(i)
# else:
# index = 0
# slice_index.append(index)
#
# result_data = self.data
# result_error = self.errors
# for i, index in reversed(list(enumerate(slice_index))):
# if not isinstance(index, np.ndarray) and (
# index < 0 or index >= self.bins[key_index[i]].size - 1
# ):
# result_data *= 0
# result_error *= 0
# index = 0
# result_data = result_data.take(index, axis=i)
# result_error = result_error.take(index, axis=i)
#
# if sum_axis:
# abs_err_square = (result_data * result_error) ** 2
# abs_tot_err = np.sqrt(np.sum(abs_err_square, axis=tuple(sum_axis)))
# result_data = np.sum(result_data, axis=tuple(sum_axis))
# result_error = np.nan_to_num(abs_tot_err / result_data)
#
# xaxs = (self.bins[free_keys[0]][1:] + self.bins[free_keys[0]][:-1]) / 2.0
# yaxs = (self.bins[free_keys[1]][1:] + self.bins[free_keys[1]][:-1]) / 2.0
#
# return xaxs, yaxs, result_data, result_error
def get_spectrum(self, x, y, z):
"""Gets energy spectrum at the specified point.
Args:
x, y, z : double
X, Y and Z coordinate of the point where energy spectrum is
required. If point is located outside the mesh, zeros are returned.
Returns:
ebins, spec, err : numpy.ndarray[double]
Energy bin boundaries, group energy spectrum and relative errors.
"""
key_index = {0: "X", 1: "Y", 2: "Z"}
values = [x, y, z]
result_data = self.data
result_error = self.errors
for i, value in reversed(list(enumerate(values))):
key = key_index[i]
index = np.searchsorted(self.bins[key], value) - 1
if index < 0 or index >= self.bins[key].size - 1:
result_data *= 0
result_error *= 0
index = 0
result_data = result_data.take(index, axis=i + 1)
result_error = result_error.take(index, axis=i + 1)
return self.e, result_data, result_error
def select_indexes(self, *, x=None, y=None, z=None):
return self._geometry_spec.select_indexes(i_values=x, j_values=y, k_values=z)
def get_totals(self, *, x=None, y=None, z=None):
if self._totals is None:
return None, None
found_x, found_y, found_z = self.select_indexes(x=x, y=y, z=z)
totals, rel_error = (
self._totals[found_x, found_y, found_z],
self._totals_err[found_x, found_y, found_z],
)
return totals, rel_error
def save_2_npz(self, filename: Union[str, Path] = None) -> None:
"""Writes this object to numpy npz file_.
Args:
filename : {str,Path}
Filename to which the object is saved. If file_ is a
file-object, then the filename is unchanged. If file_ is a string,
a .npz extension will be appended to the file_ name if it does not
already have one. By default, the name of file_ is the tally name.
"""
if filename is None:
filename = f"{self.name}.npz"
if isinstance(filename, str):
filename = Path(filename)
if not filename.suffix == ".npz":
filename = filename.with_suffix(".npz")
kwd = dict(
meta=np.array(
[FMesh.NPZ_MARK, FMesh.NPZ_FORMAT, self.name, self.kind],
dtype=np.uint32,
),
E=self.e,
X=self.ibins,
Y=self.jbins,
Z=self.kbins,
data=self.data,
errors=self.errors,
totals=self.totals,
totals_err=self.totals_err,
)
if self.comment:
kwd["comment"] = np.array(self.comment)
if self.is_cylinder:
kwd["origin"] = np.array(self._geometry_spec.origin)
kwd["axis"] = np.array(self._geometry_spec.axs)
np.savez_compressed(str(filename), **kwd)
@classmethod
def load_npz(cls, file_: Union[str, Path]) -> "FMesh":
"""
Loads Fmesh object from the binary file_.
Parameters
----------
file_ : file or str
File or filename from which the object will be loaded.
"""
if isinstance(file_, Path):
file_ = str(file_)
with np.load(file_) as data:
meta = data["meta"]
mark = meta[0]
assert mark == FMesh.NPZ_MARK, "Incompatible file format %s" % file_
version = meta[1]
name, kind = meta[2:4]
if 1 <= version:
e = data["E"]
x = data["X"]
y = data["Y"]
z = data["Z"]
d = data["data"]
r = data["errors"]
if 2 < e.size:
try:
totals = data["totals"]
totals_err = data["totals_err"]
except KeyError:
totals = None
totals_err = None
else:
totals = None
totals_err = None
comment = None
origin = None
axis = None
if 2 <= version:
if "comment" in data:
comment = data["comment"]
comment = comment.item()
assert comment
if 3 <= version:
if "origin" in data:
assert "axis" in data
origin = data["origin"]
axis = data["axis"]
assert origin.size == 3
assert axis.size == 3
if 4 <= version:
pass
else:
kind = int(kind) + 1
if origin is None:
geometry_spec = gc.CartesianGeometrySpec(x, y, z)
else:
geometry_spec = gc.CylinderGeometrySpec(
x, y, z, origin=origin, axs=axis
)
return cls(
name,
kind,
geometry_spec,
e,
d,
r,
totals,
totals_err,
comment=comment,
)
else:
raise FMesh.X("Invalid version for FMesh file %d" % version)
def save2vtk(self, filename: str = None, data_name: str = None) -> None:
"""Saves this fmesh data to vtk file.
Data is saved for every energy bin and for total values (sum across
energy axis).
Args:
filename :
Name of file to which this object is stored. A .vtk extension will
be appended. By default, the name of file is the tally name.
data_name :
Name of data which will appear in vtk file. If None, tally name
and type will be used.
"""
assert not self.is_cylinder, "Not implemented for cylinder geometry"
if filename is None:
filename = str(self.name)
if data_name is None:
data_name = str(self.name) + " " + self.kind.name
cell_data = {}
for i, e in enumerate(self.e[1:]):
key = data_name + " E={0:.4e}".format(e)
cell_data[key] = self.data[i, :, :, :]
name = data_name + " total"
# if six.PY2:
# name = name.encode("ascii", "ignore")
cell_data[name] = np.sum(self.data, axis=0)
gridToVTK(filename, self.ibins, self.jbins, self.kbins, cellData=cell_data)
# noinspection PyUnresolvedReferences
def save_2_mcnp_mesh(self, stream: TextIO) -> None:
"""
Saves the mesh in a file_ in a format similar to mcnp mesh tally textual representation.
Args:
stream : stream to store the mesh.
"""
def format_comment(a):
return "\n" + a.comment if a.comment else ""
header = f"""
Mesh Tally Number {self.name}{format_comment(self)}
This is a {self.kind.name} mesh tally.
Tally bin boundaries:{self.format_cylinder_origin_and_axis_label()}
"""[
1:-1
]
e = self.e[1:]
x = 0.5 * (self.ibins[1:] + self.ibins[:-1])
y = 0.5 * (self.jbins[1:] + self.jbins[:-1])
z = 0.5 * (self.kbins[1:] + self.kbins[:-1])
print(header, file=stream)
print(
f"{'R' if self.is_cylinder else 'X'} direction:",
file=stream,
end="",
)
for f in np.nditer(self.ibins):
print("% g" % f, file=stream, end="")
print(file=stream)
print(
"{} direction:".format("Z" if self.is_cylinder else "Y"),
file=stream,
end="",
)
for f in np.nditer(self.jbins):
print(" %g" % f, file=stream, end="")
print(file=stream)
print(
"{} direction:".format("Theta" if self.is_cylinder else "Z"),
file=stream,
end="",
)
for f in np.nditer(self.kbins):
print(" %g" % f, file=stream, end="")
print(file=stream)
print("Energy bin boundaries:", file=stream, end="")
for f in np.nditer(self.e):
print(" %g" % f, file=stream, end="")
print("\n", file=stream)
if self.is_cylinder:
print(
" Energy R Z Th Result Rel Error",
file=stream,
)
else:
print(
" Energy X Y Z Result Rel Error",
file=stream,
)
for ie in range(e.size):
for ix in range(x.size):
for iy in range(y.size):
for iz in range(z.size):
value = self.data[ie, ix, iy, iz]
err = self.errors[ie, ix, iy, iz]
row = " %10.3e%10.3f%10.3f%10.3f %11.5e %11.5e" % (
e[ie],
x[ix],
y[iy],
z[iz],
value,
err,
)
print(row, file=stream)
for ix in range(x.size):
for iy in range(y.size):
for iz in range(z.size):
if self._totals:
value = self._totals[ix, iy, iz]
err = self._totals_err[ix, iy, iz]
else:
portion = self.data[:, ix, iy, iz]
value = np.sum(portion)
err = portion * self.errors[:, ix, iy, iz]
err = np.sqrt(np.sum(err * err)) / value
row = "%11s%10.3f%10.3f%10.3f %11.5e %11.5e" % (
" Total ",
x[ix],
y[iy],
z[iz],
value,
err,
)
print(row, file=stream, end="")
print("\n", file=stream)
def total_by_energy(self, new_name=0):
e = np.array([self.e[0], self.e[-1]])
data = self.totals[np.newaxis, ...]
errors = self.totals_err[np.newaxis, ...]
return FMesh(new_name, self.kind, self._geometry_spec, e, data, errors)
def shrink(
self,
emin=None,
emax=None,
xmin=None,
xmax=None,
ymin=None,
ymax=None,
zmin=None,
zmax=None,
new_name=-1,
):
trim_spec = [
f
for f in rebin.trim_spec_composer(
[self.e, self.ibins, self.jbins, self.kbins],
[emin, xmin, ymin, zmin],
[emax, xmax, ymax, zmax],
)
]
new_bins_list, new_data = rebin.shrink_nd(
self.data, iter(trim_spec), assume_sorted=True
)
_, new_errors = rebin.shrink_nd(
self.errors, iter(trim_spec), assume_sorted=True
)
assert all(np.array_equal(a, b) for a, b in zip(new_bins_list, _))
new_ebins, new_xbins, new_ybins, new_zbins = new_bins_list
if self.totals is None:
new_totals = None
new_totals_err = None
else:
totals_trim_spec = [
f
for f in rebin.trim_spec_composer(
[self.ibins, self.jbins, self.kbins],
[xmin, ymin, zmin],
[xmax, ymax, zmax],
)
]
_, new_totals = rebin.shrink_nd(
self.totals, iter(totals_trim_spec), assume_sorted=True
)
_, new_totals_err = rebin.shrink_nd(
self.totals_err, iter(totals_trim_spec), assume_sorted=True
)
return FMesh(
new_name,
self.kind,
gc.CartesianGeometrySpec(new_xbins, new_ybins, new_zbins),
new_ebins,
new_data,
new_errors,
new_totals,
new_totals_err,
)
def rebin(self, new_x, new_y, new_z, new_name=-1, extra_process_threshold=1000000):
"""
Extract data for a new spatial grid.
Parameters
----------
new_x: ndarray
A new binning over X axis.
new_y: ndarray
A new binning over Y axis.
new_z: ndarray
A new binning over Z axis.
new_name: int, optional
A name for the rebinned mesh to be created.
extra_process_threshold: optional
At which size of data use multiple Python processes
Returns
-------
mesh: FMesh
New FMesh object with the rebinned data.
"""
assert not self.is_cylinder, "Not implemented for cylinder meshes"
if self.data.size < extra_process_threshold:
return self.rebin_single(new_x, new_y, new_z, new_name)
# To avoid huge memory allocations, iterate over energy with external processes
pool = ndp.Pool(processes=4)
data_rebin_spec = [
i
for i in rebin.rebin_spec_composer(
[self.ibins, self.jbins, self.kbins],
[new_x, new_y, new_z],
axes=[0, 1, 2],
)
]
def iter_over_e(data):
for i in range(self.e.size - 1):
yield data[i], data_rebin_spec, True
new_data = np.stack(pool.map(expand_args, iter_over_e(self.data)), axis=0)
t = self.data * self.errors
new_errors = np.stack(pool.map(expand_args, iter_over_e(t)), axis=0)
new_errors /= new_data
if self.totals is None:
new_totals = None
new_totals_err = None
else:
new_totals = rebin.rebin_nd(
self.totals, data_rebin_spec, assume_sorted=True
)
t = self.totals * self.totals_err
new_totals_err = rebin.rebin_nd(t, data_rebin_spec, assume_sorted=True)
new_totals_err /= new_totals
return FMesh(
new_name,
self.kind,
gc.CartesianGeometrySpec(new_x, new_y, new_z),
self.e,
new_data,
new_errors,
new_totals,
new_totals_err,
)
def rebin_single(self, new_x, new_y, new_z, new_name=-1):
"""
Extract data for a new spatial grid.
Parameters
----------
new_x: ndarray
A new binning over X axis.
new_y: ndarray
A new binning over Y axis.
new_z: ndarray
A new binning over Z axis.
new_name: int, optional
name for the rebinned mesh to be created.
Returns
-------
mesh: FMesh
New FMesh object with the rebinned data.
"""
"""
Create FMesh object corresponding to this one by fluxes, but over new mesh
:param self:
:param new_x:
:param new_y:
:param new_z:
:return:
"""
assert not self.is_cylinder, "Not implemented for cylinder meshes"
data_rebin_spec = [
i
for i in rebin.rebin_spec_composer(
[self.ibins, self.jbins, self.kbins],
[new_x, new_y, new_z],
axes=[1, 2, 3],
)
]
new_data = rebin.rebin_nd(self.data, iter(data_rebin_spec), assume_sorted=True)
t = self.data * self.errors
new_errors = rebin.rebin_nd(t, iter(data_rebin_spec), assume_sorted=True)
new_errors /= new_data
if self.totals is None:
new_totals = None
new_totals_err = None
else:
totals_rebin_spec = [
i
for i in rebin.rebin_spec_composer(
[self.ibins, self.jbins, self.kbins],
[new_x, new_y, new_z],
axes=[0, 1, 2],
)
]
new_totals = rebin.rebin_nd(
self.totals, iter(totals_rebin_spec), assume_sorted=True
)
t = self.totals * self.totals_err
new_totals_err = rebin.rebin_nd(
t, iter(totals_rebin_spec), assume_sorted=True
)
new_totals_err /= new_totals
return FMesh(
new_name,
self.kind,
gc.CartesianGeometrySpec(new_x, new_y, new_z),
self.e,
new_data,
new_errors,
new_totals,
new_totals_err,
)
def format_cylinder_origin_and_axis_label(self):
if self.is_cylinder:
return "\n Cylinder origin at {0} {1} {2}, axis in {3} {4} {5} direction\n".format(
self._geometry_spec.origin[0],
self._geometry_spec.origin[1],
self._geometry_spec.origin[2],
self._geometry_spec.axs[0],
self._geometry_spec.axs[1],
self._geometry_spec.axs[2],
)
return ""
# noinspection PyTypeChecker,PyProtectedMember
def merge_tallies(
name: int, kind: int, *tally_weight: Tuple[FMesh, float], comment: str = None
) -> FMesh:
"""Makes superposition of tallies with specific weights.
Parameters
----------
name :
Name of new fmesh tally.
kind :
Type of new fmesh tally. It can be -1 (or any arbitrary integer).
*tally_weight :
List of tally-weight pairs (tuples). tally is FMesh instance. weight
is float or numpy.ndarray. All tallies and weights (if ndarray) must
have the same shape.
comment:
A comment to assign to the new mesh tally
Returns
-------
result :
The merged FMesh.
"""
result_data = None
errors = None
geometry_spec = None
ebins = None
for t, w in tally_weight: # type: FMesh, float
if result_data is None:
result_data = t.data * w
errors = (t.errors * t.data * w) ** 2
geometry_spec = t._geometry_spec
ebins = t.e
else:
result_data += t.data * w
errors += (t.errors * t.data * w) ** 2
assert geometry_spec == t._geometry_spec
assert np.array_equal(
ebins.size, t.e.size
) # allow merging neutron and photon heating meshes
nonzero_idx = np.logical_and(result_data > 0.0, errors > 0.0)
result_error = np.zeros_like(result_data)
result_error[nonzero_idx] = np.sqrt(errors[nonzero_idx]) / result_data[nonzero_idx]
return FMesh(
name,
kind,
geometry_spec,
ebins,
result_data,
result_error,
comment=comment,
)
# def read_meshtally(file_):
# """Reads fmesh tally from binary file_.
#
# Parameters
# ----------
# file_ : file or str
# File or filename from which tally should be loaded.
#
# Returns
# -------
# mesh : FMesh
# Fmesh tally instance.
# """
# data = np.load(file_)
# ne = int(data[0])
# nx = int(data[1])
# ny = int(data[2])
# nz = int(data[3])
# name = int(data[4])
# kind = int(data[5])
# ebins = data[6 : ne + 6]
# xbins = data[ne + 6 : ne + nx + 6]
# ybins = data[ne + nx + 6 : ne + nx + ny + 6]
# zbins = data[ne + nx + ny + 6 : ne + nx + ny + nz + 6]
# n = (ne - 1) * (nx - 1) * (ny - 1) * (nz - 1)
# sti = ne + nx + ny + nz + 6
# data_f = data[sti : sti + n].reshape((ne - 1, nx - 1, ny - 1, nz - 1))
# data_err = data[sti + n :].reshape((ne - 1, nx - 1, ny - 1, nz - 1))
# return FMesh(name, kind, xbins, ybins, zbins, ebins, data_f, data_err)
def read_meshtal(stream: TextIO, select=None, mesh_file_info=None) -> List[FMesh]:
"""Reads fmesh tallies from file_.
Args:
stream : The text stream to read.
select : predicate
Selects the meshes actually to process
mesh_file_info:
object to collect information from m-file header
Returns:
tallies :
The list of individual fmesh tally.
"""
next(stream) # TODO dvp check if we need to store problem time stamp
next(stream) # TODO dvp check if we need to store problem title
line = next(stream)
nps = int(float((line.strip().split("=")[1])))
if mesh_file_info is not None:
mesh_file_info.nps = nps
return list(iter_meshtal(stream, select))
# noinspection PyTypeChecker
def iter_meshtal(
fid: TextIO,
name_select: Callable[[int], bool] = None,
tally_select: Callable[[FMesh], bool] = None,
) -> Generator[FMesh, None, None]:
"""Iterates fmesh tallies from fid.
Parameters
----------
fid : A stream to read meshes from.
name_select: A function returning True, if tally name is acceptable
tally_select: A function returning True, if total tally content is acceptable
Returns
-------
iterator:
An iterator over meshtally file with proper filtering over names or tallies content.
"""
try:
while True:
# Skip first two comment lines ms version and model title
# noinspection PyUnresolvedReferences
name = int(_find_words_after(fid, "Mesh", "Tally", "Number")[0])
if not name_select or name_select(name):
# __LOG.debug("Reading mesh tally %s", name)
comment = fid.readline().strip()
if comment.startswith("This is a"):
kind = comment.split()[3]
comment = None
else:
# noinspection PyUnresolvedReferences
kind = _find_words_after(fid, "This", "is", "a")[0]
if comment:
comment = fix_mesh_comment(name, comment)
kind = Kind[kind]
# TODO dvp read "dose function modified" here
_find_words_after(fid, "Tally", "bin", "boundaries:")
line = next(fid).lstrip()
if line.startswith("Cylinder"):
# retrieve cylinder origin and axis
part1, part2 = line.split(",")
origin = np.fromiter(part1.split()[3:6], dtype=float)
axis = np.fromiter(part2.split()[2:5], dtype=float)
ibins = np.array(
[
float(w)
for w in _find_words_after(
concatv([line], fid), "R", "direction:"
)
]
)
jbins = np.array(
[float(w) for w in _find_words_after(fid, "Z", "direction:")]
)
kbins = np.array(
[
float(w)
for w in _find_words_after(
fid, "Theta", "direction", "(revolutions):"
)
]
)
geometry_spec = gc.CylinderGeometrySpec(
ibins, jbins, kbins, origin=origin, axs=axis
)
ebins = np.array(
[
float(w)
for w in _find_words_after(
fid, "Energy", "bin", "boundaries:"
)
]
)
with_ebins = check_ebins(
fid, ["Energy", "R", "Z", "Th", "Result", "Rel", "Error"]
)
else:
xbins = np.array(
[
float(w)
for w in _find_words_after(
concatv([line], fid), "X", "direction:"
)
]
)
ybins = np.array(
[float(w) for w in _find_words_after(fid, "Y", "direction:")]
)
zbins = np.array(
[float(w) for w in _find_words_after(fid, "Z", "direction:")]
)
geometry_spec = gc.CartesianGeometrySpec(xbins, ybins, zbins)
ebins = np.array(
[
float(w)
for w in _find_words_after(
fid, "Energy", "bin", "boundaries:"
)
]
)
with_ebins = check_ebins(
fid, ["Energy", "X", "Y", "Z", "Result", "Rel", "Error"]
)
spatial_bins_size = geometry_spec.bins_size
bins_size = spatial_bins_size * (ebins.size - 1)
def _iterate_bins(stream, n_, _with_ebins):
value_start, value_end = (41, 53) if _with_ebins else (32, 44)
for i in range(n_):
_line = next(stream)
_value = float(_line[value_start:value_end])
_error = float(_line[value_end:])
if _value < 0.0:
_value = _error = 0.0
yield _value
yield _error
data_items = np.fromiter(
_iterate_bins(fid, bins_size, with_ebins), dtype=float
)
data_items = data_items.reshape(bins_size, 2)
shape = (ebins.size - 1,) + geometry_spec.bins_shape
data, error = data_items[:, 0].reshape(shape), data_items[:, 1].reshape(
shape
)
# reading totals for energy
def _iterate_totals(stream, totals_number):
for i in range(totals_number):
_line = next(stream).split()
# TODO dvp: check for negative values in an MCNP meshtal file
assert "Total" == _line[0]
for w in _line[4:]:
yield float(w)
if (
ebins.size > 2
): # Totals are not output if there's only one bin in energy domain
totals_items = np.fromiter(
_iterate_totals(fid, spatial_bins_size), dtype=float
)
totals_items = totals_items.reshape(spatial_bins_size, 2)
shape = geometry_spec.bins_shape
totals = totals_items[:, 0].reshape(shape)
totals_err = totals_items[:, 1].reshape(shape)
else:
totals = None
totals_err = None
res = FMesh(
name,
kind,
geometry_spec,
ebins,
data,
error,
totals,
totals_err,
comment=comment,
)
if not tally_select or tally_select(res):
yield res
else:
__LOG.debug("Skipping mesh tally %s", name)
except EOFError:
pass
def check_ebins(fid: Iterable[str], keys: List[str]) -> bool:
"""Check if energy bins present in a mesh tally output values.
If next nonempty line starts with a word keys[0] (i.e. "Energy"), then the energy bins present.
Also check that the remaining keys correspond to the nonempty line.
Args:
fid: text rows to scan, including prepending empty rows
keys: sequence of words to check
Returns:
bool: True if energy bins are present, False otherwise.
Raises:
ValueError: if keys don't correspond to the nonempty line.
"""
title_line = _next_not_empty_line(fid)
if title_line is None:
raise ValueError(f"Cannot find titles {keys[1:]}")
if title_line[0] == keys[0]:
assert keys[1:] == title_line[1:]
with_ebins = True
else:
if keys[1:] != title_line:
raise ValueError(f"Unexpected values title {title_line}")
with_ebins = False
return with_ebins
def _next_not_empty_line(f: Iterable[str]) -> Optional[List[str]]:
for line in f:
words = line.split()
if 0 < len(words):
return words
return None
def _find_words_after(f, *keywords: str) -> List[str]:
"""Searches for words that follow keywords.
The line from file f is read. Then it is split into words (by spaces).
If its first words are the same as keywords, then remaining words (up to
newline character) are returned. Otherwise, new line is read.
Parameters
----------
f : iterable of text lines
File in which words are searched.
*keywords : list of str
List of keywords after which right words are. The order is important.
Returns
-------
words : list of str
The list of words that follow keywords.
"""
for line in f:
words = line.split()
i = 0
for w, kw in zip(words, keywords):
if w != kw:
break
i += 1
if i >= len(keywords):
return words[i:]
raise EOFError
def m_2_npz(
stream: TextIO,
name_select=lambda _: True,
tally_select=lambda _: True,
prefix: str = "",
suffix: str = "",
mesh_file_info=None,
):
"""Splits the tallies from the mesh file into separate npz files.
Args:
stream: File with MCNP mesh tallies
name_select: function(int)->bool
Filter fmesh by names
tally_select: function(FMesh)->bool
Filter fmesh by content.
prefix: str
Prefix for separate mesh files names
suffix: srt
Prefix for separate mesh files names
mesh_file_info: structure to store meshtal file header info: nps.
Returns:
total: Total number of files created
"""
total = 0
next(stream) # TODO dvp check if we need to store problem time stamp
next(stream) # TODO dvp check if we need to store problem title
line = next(stream)
nps = int(float((line.strip().split("=")[1])))
if mesh_file_info is not None:
mesh_file_info.nps = nps
for t in iter_meshtal(stream, name_select=name_select, tally_select=tally_select):
t.save_2_npz(prefix + str(t.name) + suffix)
total += 1
return total
def fix_mesh_comment(mesh_no: int, comment: str) -> str:
str_mesh_no = f"{mesh_no}"
chars_to_remove = len(str_mesh_no) - 3
if chars_to_remove > 0:
comment = comment[chars_to_remove:]
return comment.strip()
def meshes_to_vtk(
*meshes: FMesh,
out_dir: Path = None,
get_mesh_description_strategy: Callable[[FMesh], str] = None,
) -> None:
if out_dir:
out_dir.mkdir(parents=True, exist_ok=True)
for mesh in meshes:
particle = mesh.kind.short
function = get_mesh_description_strategy(mesh)
data_name = f"{particle}-{function}"
file_name = f"{data_name}-{mesh.name}"
if out_dir:
file_name = str(out_dir / file_name)
mesh.save2vtk(file_name, data_name)
| [
"numpy.load",
"numpy.sum",
"mckit_meshes.mesh.geometry_spec.CartesianGeometrySpec",
"mckit_meshes.utils.rebin.rebin_spec_composer",
"pathlib.Path",
"numpy.isclose",
"mckit_meshes.mesh.geometry_spec.as_float_array",
"numpy.zeros_like",
"mckit_meshes.utils.rebin.trim_spec_composer",
"mckit_meshes.ut... | [((1724, 1751), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1741, 1751), False, 'import logging\n'), ((1788, 1809), 'mckit_meshes.utils.rebin.rebin_nd', 'rebin.rebin_nd', (['*args'], {}), '(*args)\n', (1802, 1809), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((2569, 2583), 'numpy.int16', 'np.int16', (['(5445)'], {}), '(5445)\n', (2577, 2583), True, 'import numpy as np\n'), ((2738, 2749), 'numpy.int16', 'np.int16', (['(4)'], {}), '(4)\n', (2746, 2749), True, 'import numpy as np\n'), ((31534, 31581), 'numpy.logical_and', 'np.logical_and', (['(result_data > 0.0)', '(errors > 0.0)'], {}), '(result_data > 0.0, errors > 0.0)\n', (31548, 31581), True, 'import numpy as np\n'), ((31601, 31627), 'numpy.zeros_like', 'np.zeros_like', (['result_data'], {}), '(result_data)\n', (31614, 31627), True, 'import numpy as np\n'), ((3885, 3895), 'mckit_meshes.particle_kind.ParticleKind', 'Kind', (['kind'], {}), '(kind)\n', (3889, 3895), True, 'from mckit_meshes.particle_kind import ParticleKind as Kind\n'), ((4269, 4293), 'mckit_meshes.mesh.geometry_spec.as_float_array', 'gc.as_float_array', (['ebins'], {}), '(ebins)\n', (4286, 4293), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((4347, 4370), 'mckit_meshes.mesh.geometry_spec.as_float_array', 'gc.as_float_array', (['data'], {}), '(data)\n', (4364, 4370), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((4393, 4418), 'mckit_meshes.mesh.geometry_spec.as_float_array', 'gc.as_float_array', (['errors'], {}), '(errors)\n', (4410, 4418), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((19385, 19410), 'numpy.sum', 'np.sum', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (19391, 19410), True, 'import numpy as np\n'), ((19420, 19495), 'pyevtk.hl.gridToVTK', 'gridToVTK', (['filename', 'self.ibins', 'self.jbins', 'self.kbins'], {'cellData': 'cell_data'}), '(filename, self.ibins, self.jbins, self.kbins, cellData=cell_data)\n', (19429, 19495), False, 'from pyevtk.hl import gridToVTK\n'), ((20454, 20475), 'numpy.nditer', 'np.nditer', (['self.ibins'], {}), '(self.ibins)\n', (20463, 20475), True, 'import numpy as np\n'), ((20711, 20732), 'numpy.nditer', 'np.nditer', (['self.jbins'], {}), '(self.jbins)\n', (20720, 20732), True, 'import numpy as np\n'), ((20972, 20993), 'numpy.nditer', 'np.nditer', (['self.kbins'], {}), '(self.kbins)\n', (20981, 20993), True, 'import numpy as np\n'), ((21150, 21167), 'numpy.nditer', 'np.nditer', (['self.e'], {}), '(self.e)\n', (21159, 21167), True, 'import numpy as np\n'), ((23177, 23210), 'numpy.array', 'np.array', (['[self.e[0], self.e[-1]]'], {}), '([self.e[0], self.e[-1]])\n', (23185, 23210), True, 'import numpy as np\n'), ((26131, 26152), 'mckit_meshes.utils.no_daemon_process.Pool', 'ndp.Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (26139, 26152), True, 'import mckit_meshes.utils.no_daemon_process as ndp\n'), ((31660, 31688), 'numpy.sqrt', 'np.sqrt', (['errors[nonzero_idx]'], {}), '(errors[nonzero_idx])\n', (31667, 31688), True, 'import numpy as np\n'), ((7574, 7605), 'numpy.array_equal', 'np.array_equal', (['self.e', 'other.e'], {}), '(self.e, other.e)\n', (7588, 7605), True, 'import numpy as np\n'), ((7978, 8015), 'numpy.array_equal', 'np.array_equal', (['self.data', 'other.data'], {}), '(self.data, other.data)\n', (7992, 8015), True, 'import numpy as np\n'), ((8032, 8073), 'numpy.array_equal', 'np.array_equal', (['self.errors', 'other.errors'], {}), '(self.errors, other.errors)\n', (8046, 8073), True, 'import numpy as np\n'), ((14830, 14844), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (14834, 14844), False, 'from pathlib import Path\n'), ((15404, 15426), 'numpy.array', 'np.array', (['self.comment'], {}), '(self.comment)\n', (15412, 15426), True, 'import numpy as np\n'), ((15484, 15520), 'numpy.array', 'np.array', (['self._geometry_spec.origin'], {}), '(self._geometry_spec.origin)\n', (15492, 15520), True, 'import numpy as np\n'), ((15547, 15580), 'numpy.array', 'np.array', (['self._geometry_spec.axs'], {}), '(self._geometry_spec.axs)\n', (15555, 15580), True, 'import numpy as np\n'), ((15998, 16012), 'numpy.load', 'np.load', (['file_'], {}), '(file_)\n', (16005, 16012), True, 'import numpy as np\n'), ((24957, 25014), 'mckit_meshes.mesh.geometry_spec.CartesianGeometrySpec', 'gc.CartesianGeometrySpec', (['new_xbins', 'new_ybins', 'new_zbins'], {}), '(new_xbins, new_ybins, new_zbins)\n', (24981, 25014), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((26885, 26949), 'mckit_meshes.utils.rebin.rebin_nd', 'rebin.rebin_nd', (['self.totals', 'data_rebin_spec'], {'assume_sorted': '(True)'}), '(self.totals, data_rebin_spec, assume_sorted=True)\n', (26899, 26949), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((27055, 27109), 'mckit_meshes.utils.rebin.rebin_nd', 'rebin.rebin_nd', (['t', 'data_rebin_spec'], {'assume_sorted': '(True)'}), '(t, data_rebin_spec, assume_sorted=True)\n', (27069, 27109), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((27231, 27276), 'mckit_meshes.mesh.geometry_spec.CartesianGeometrySpec', 'gc.CartesianGeometrySpec', (['new_x', 'new_y', 'new_z'], {}), '(new_x, new_y, new_z)\n', (27255, 27276), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((29519, 29564), 'mckit_meshes.mesh.geometry_spec.CartesianGeometrySpec', 'gc.CartesianGeometrySpec', (['new_x', 'new_y', 'new_z'], {}), '(new_x, new_y, new_z)\n', (29543, 29564), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((31398, 31434), 'numpy.array_equal', 'np.array_equal', (['ebins.size', 't.e.size'], {}), '(ebins.size, t.e.size)\n', (31412, 31434), True, 'import numpy as np\n'), ((4893, 4918), 'numpy.sum', 'np.sum', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (4899, 4918), True, 'import numpy as np\n'), ((4988, 5009), 'numpy.zeros_like', 'np.zeros_like', (['totals'], {}), '(totals)\n', (5001, 5009), True, 'import numpy as np\n'), ((5406, 5437), 'numpy.asarray', 'np.asarray', (['totals'], {'dtype': 'float'}), '(totals, dtype=float)\n', (5416, 5437), True, 'import numpy as np\n'), ((5467, 5502), 'numpy.asarray', 'np.asarray', (['totals_err'], {'dtype': 'float'}), '(totals_err, dtype=float)\n', (5477, 5502), True, 'import numpy as np\n'), ((13311, 13349), 'numpy.searchsorted', 'np.searchsorted', (['self.bins[key]', 'value'], {}), '(self.bins[key], value)\n', (13326, 13349), True, 'import numpy as np\n'), ((14976, 15064), 'numpy.array', 'np.array', (['[FMesh.NPZ_MARK, FMesh.NPZ_FORMAT, self.name, self.kind]'], {'dtype': 'np.uint32'}), '([FMesh.NPZ_MARK, FMesh.NPZ_FORMAT, self.name, self.kind], dtype=np\n .uint32)\n', (14984, 15064), True, 'import numpy as np\n'), ((23653, 23780), 'mckit_meshes.utils.rebin.trim_spec_composer', 'rebin.trim_spec_composer', (['[self.e, self.ibins, self.jbins, self.kbins]', '[emin, xmin, ymin, zmin]', '[emax, xmax, ymax, zmax]'], {}), '([self.e, self.ibins, self.jbins, self.kbins], [\n emin, xmin, ymin, zmin], [emax, xmax, ymax, zmax])\n', (23677, 23780), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((24101, 24121), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (24115, 24121), True, 'import numpy as np\n'), ((26216, 26322), 'mckit_meshes.utils.rebin.rebin_spec_composer', 'rebin.rebin_spec_composer', (['[self.ibins, self.jbins, self.kbins]', '[new_x, new_y, new_z]'], {'axes': '[0, 1, 2]'}), '([self.ibins, self.jbins, self.kbins], [new_x,\n new_y, new_z], axes=[0, 1, 2])\n', (26241, 26322), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((28306, 28412), 'mckit_meshes.utils.rebin.rebin_spec_composer', 'rebin.rebin_spec_composer', (['[self.ibins, self.jbins, self.kbins]', '[new_x, new_y, new_z]'], {'axes': '[1, 2, 3]'}), '([self.ibins, self.jbins, self.kbins], [new_x,\n new_y, new_z], axes=[1, 2, 3])\n', (28331, 28412), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((8188, 8225), 'numpy.isclose', 'np.isclose', (['self.totals', 'other.totals'], {}), '(self.totals, other.totals)\n', (8198, 8225), True, 'import numpy as np\n'), ((8255, 8300), 'numpy.isclose', 'np.isclose', (['self.totals_err', 'other.totals_err'], {}), '(self.totals_err, other.totals_err)\n', (8265, 8300), True, 'import numpy as np\n'), ((17675, 17708), 'mckit_meshes.mesh.geometry_spec.CartesianGeometrySpec', 'gc.CartesianGeometrySpec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (17699, 17708), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((17767, 17824), 'mckit_meshes.mesh.geometry_spec.CylinderGeometrySpec', 'gc.CylinderGeometrySpec', (['x', 'y', 'z'], {'origin': 'origin', 'axs': 'axis'}), '(x, y, z, origin=origin, axs=axis)\n', (17790, 17824), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((24411, 24517), 'mckit_meshes.utils.rebin.trim_spec_composer', 'rebin.trim_spec_composer', (['[self.ibins, self.jbins, self.kbins]', '[xmin, ymin, zmin]', '[xmax, ymax, zmax]'], {}), '([self.ibins, self.jbins, self.kbins], [xmin, ymin,\n zmin], [xmax, ymax, zmax])\n', (24435, 24517), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((28906, 29012), 'mckit_meshes.utils.rebin.rebin_spec_composer', 'rebin.rebin_spec_composer', (['[self.ibins, self.jbins, self.kbins]', '[new_x, new_y, new_z]'], {'axes': '[0, 1, 2]'}), '([self.ibins, self.jbins, self.kbins], [new_x,\n new_y, new_z], axes=[0, 1, 2])\n', (28931, 29012), True, 'import mckit_meshes.utils.rebin as rebin\n'), ((36281, 36350), 'mckit_meshes.mesh.geometry_spec.CylinderGeometrySpec', 'gc.CylinderGeometrySpec', (['ibins', 'jbins', 'kbins'], {'origin': 'origin', 'axs': 'axis'}), '(ibins, jbins, kbins, origin=origin, axs=axis)\n', (36304, 36350), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((37511, 37556), 'mckit_meshes.mesh.geometry_spec.CartesianGeometrySpec', 'gc.CartesianGeometrySpec', (['xbins', 'ybins', 'zbins'], {}), '(xbins, ybins, zbins)\n', (37535, 37556), True, 'import mckit_meshes.mesh.geometry_spec as gc\n'), ((22600, 22615), 'numpy.sum', 'np.sum', (['portion'], {}), '(portion)\n', (22606, 22615), True, 'import numpy as np\n'), ((5079, 5125), 'numpy.sum', 'np.sum', (['((self.errors * self.data) ** 2)'], {'axis': '(0)'}), '((self.errors * self.data) ** 2, axis=0)\n', (5085, 5125), True, 'import numpy as np\n'), ((22721, 22738), 'numpy.sum', 'np.sum', (['(err * err)'], {}), '(err * err)\n', (22727, 22738), True, 'import numpy as np\n'), ((35667, 35687), 'toolz.itertoolz.concatv', 'concatv', (['[line]', 'fid'], {}), '([line], fid)\n', (35674, 35687), False, 'from toolz.itertoolz import concatv\n'), ((37062, 37082), 'toolz.itertoolz.concatv', 'concatv', (['[line]', 'fid'], {}), '([line], fid)\n', (37069, 37082), False, 'from toolz.itertoolz import concatv\n')] |
import fnmatch
import os
import matplotlib.pyplot as plt
import numpy as np
import librosa
import tensorflow as tf
def files_within(directory_path, pattern="*"):
for dirpath, _, filenames in os.walk(directory_path):
for file_name in fnmatch.filter(filenames, pattern):
yield os.path.join(dirpath, file_name)
def init_directory(directory):
if(not os.path.isdir(directory)):
os.makedirs(directory)
def slice_first_dim(array, slice_size):
n_sections = int(np.floor(array.shape[1]/slice_size))
has_last_mag = n_sections*slice_size < array.shape[1]
last_mag = np.zeros(shape=(1, array.shape[0], slice_size, array.shape[2]))
last_mag[:,:,:array.shape[1]-(n_sections*slice_size),:] = array[:,n_sections*int(slice_size):,:]
if(n_sections > 0):
array = np.expand_dims(array, axis=0)
sliced = np.split(array[:,:,0:n_sections*slice_size,:], n_sections, axis=2)
sliced = np.concatenate(sliced, axis=0)
if(has_last_mag): # Check for reminder
sliced = np.concatenate([sliced, last_mag], axis=0)
else:
sliced = last_mag
return sliced
def slice_magnitude(mag, slice_size):
magnitudes = np.stack([mag], axis=2)
return slice_first_dim(magnitudes, slice_size)
def join_magnitude_slices(mag_sliced, target_shape):
mag = np.zeros((mag_sliced.shape[1], mag_sliced.shape[0]*mag_sliced.shape[2]))
for i in range(mag_sliced.shape[0]):
mag[:,(i)*mag_sliced.shape[2]:(i+1)*mag_sliced.shape[2]] = mag_sliced[i,:,:,0]
mag = mag[0:target_shape[0], 0:target_shape[1]]
return mag
def amplitude_to_db(mag, amin=1/(2**16), normalize=True):
mag_db = 20*np.log1p(mag/amin)
if(normalize):
mag_db /= 20*np.log1p(1/amin)
return mag_db
def db_to_amplitude(mag_db, amin=1/(2**16), normalize=True):
if(normalize):
mag_db *= 20*np.log1p(1/amin)
return amin*np.expm1(mag_db/20)
def add_hf(mag, target_shape):
rec = np.zeros(target_shape)
rec[0:mag.shape[0],0:mag.shape[1]] = mag
return rec
def remove_hf(mag):
return mag[0:int(mag.shape[0]/2), :]
def forward_transform(audio, nfft=1024, normalize=True, crop_hf=True):
window = np.hanning(nfft)
S = librosa.stft(audio, n_fft=nfft, hop_length=int(nfft/2), window=window)
mag, phase = np.abs(S), np.angle(S)
if(crop_hf):
mag = remove_hf(mag)
if(normalize):
mag = 2 * mag / np.sum(window)
return mag, phase
def inverse_transform(mag, phase, nfft=1024, normalize=True, crop_hf=True):
window = np.hanning(nfft)
if(normalize):
mag = mag * np.sum(np.hanning(nfft)) / 2
if(crop_hf):
mag = add_hf(mag, target_shape=(phase.shape[0], mag.shape[1]))
R = mag * np.exp(1j*phase)
audio = librosa.istft(R, hop_length=int(nfft/2), window=window)
return audio
def snr(original, reconstruction):
signal_rms = np.sqrt(np.sum(original**2))
noise_rms = np.sqrt(np.sum((original - reconstruction)**2))
return 10.*np.log10(signal_rms/noise_rms)
def load_audio(filename, sr=44100):
return librosa.core.load(filename, sr=sr)[0]
def write_audio(filename, audio, sr=44100):
librosa.output.write_wav(filename, audio, sr, norm=True)
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, origin, target, base_path, batch_size=1, img_dim=(256,256,1), validation_split=0.9, is_training=True, scale_factor=1.0, shuffle=True):
self.img_dim = img_dim
self.batch_size = batch_size
self.validation_split = validation_split
self.is_training = is_training
self.scale_factor = scale_factor
self.base_path = base_path if(type(base_path) is list) else [base_path]
self.origin = origin
self.target = target
self.filenames = self.__get_filenames()
assert len(self.filenames) > 0, 'Filenames is empty'
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.filenames) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
filenames = self.filenames[index*self.batch_size:(index+1)*self.batch_size] # Generate indexes of the batch
batch = self.__data_generation(filenames) # Generate data
return batch
def get_empty_batch(self):
batch = np.zeros((self.batch_size, *self.img_dim))
return batch, batch
def get_random_batch(self):
random_idx = np.random.randint(self.__len__())
return self.__getitem__(random_idx)
def __data_generation(self, filenames):
'Generates data containing batch_size samples'
x = np.empty((self.batch_size, *self.img_dim))
y = np.empty((self.batch_size, *self.img_dim))
# Generate data
for i, filename in enumerate(filenames):
x[i,] = np.load(filename)
y[i,] = np.load(filename.replace(self.origin, self.target))
if(self.scale_factor != 1):
x[i,] *= self.scale_factor
y[i,] *= self.scale_factor
# Now images should be scaled in the range [0,1]. Make them [-1,1]
x[i,] = x[i,] * 2 - 1
y[i,] = y[i,] * 2 - 1
return x,y
def __get_filenames(self):
origin_filenames, target_filenames = [],[]
for base_path in self.base_path:
origin_temp = [os.path.join(base_path, self.origin, f) for f in os.listdir(os.path.join(base_path, self.origin))]
target_temp = [os.path.join(base_path, self.target, f) for f in os.listdir(os.path.join(base_path, self.target))]
if(self.is_training):
origin_temp = origin_temp[0:int(self.validation_split*len(origin_temp))]
target_temp = target_temp[0:int(self.validation_split*len(target_temp))]
else:
origin_temp = origin_temp[int(self.validation_split*len(origin_temp)):]
target_temp = target_temp[int(self.validation_split*len(target_temp)):]
origin_filenames += origin_temp
target_filenames += target_temp
if(len(origin_filenames) == len(target_filenames)):
return origin_filenames
return []
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle:
np.random.shuffle(self.filenames)
class DataGeneratorMultiTarget(tf.keras.utils.Sequence):
def __init__(self, origin, target, base_path, batch_size=1, img_dim=(256,256,1), validation_split=0.9, is_training=True, scale_factor=1.0, shuffle=True):
self.img_dim = img_dim
self.batch_size = batch_size
self.validation_split = validation_split
self.is_training = is_training
self.scale_factor = scale_factor
self.base_path = base_path if(type(base_path) is list) else [base_path]
self.origin = origin
self.target = target
self.filenames = self.__get_filenames()
assert len(self.filenames) > 0, 'Filenames is empty'
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.filenames) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
filenames = self.filenames[index*self.batch_size:(index+1)*self.batch_size] # Generate indexes of the batch
batch = self.__data_generation(filenames) # Generate data
return batch
def get_empty_batch(self):
x = np.zeros((self.batch_size, self.img_dim[0], self.img_dim[1], 2))
y = np.zeros((self.batch_size, *self.img_dim))
return x,y
def get_random_batch(self):
random_idx = np.random.randint(self.__len__())
return self.__getitem__(random_idx)
def __data_generation(self, filenames):
'Generates data containing batch_size samples'
x = np.empty((self.batch_size, self.img_dim[0], self.img_dim[1], 2))
y = np.empty((self.batch_size, *self.img_dim))
# Generate data
for i, filename in enumerate(filenames):
style = np.random.choice(self.filenames)['name']
x[i,:,:,0:1] = np.load(filename['name'])
x[i,:,:,1:2] = np.load(style.replace(self.origin, filename['target']))
y[i,] = np.load(filename['name'].replace(self.origin, filename['target']))
if(self.scale_factor != 1):
x[i,] *= self.scale_factor
y[i,] *= self.scale_factor
# Now images should be scaled in the range [0,1]. Make them [-1,1]
x[i,] = x[i,] * 2 - 1
y[i,] = y[i,] * 2 - 1
return x,y
def __get_filenames(self):
origin_filenames = []
for base_path in self.base_path:
origin_temp = [os.path.join(base_path, self.origin, f) for f in os.listdir(os.path.join(base_path, self.origin))]
if(self.is_training):
origin_temp = origin_temp[0:int(self.validation_split*len(origin_temp))]
else:
origin_temp = origin_temp[int(self.validation_split*len(origin_temp)):]
origin_filenames += origin_temp
filenames = []
for f in origin_filenames:
for t in self.target:
filenames.append({'name': f, 'target': t})
return filenames
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle:
np.random.shuffle(self.filenames)
| [
"numpy.load",
"numpy.abs",
"numpy.sum",
"numpy.angle",
"numpy.floor",
"os.walk",
"numpy.empty",
"librosa.core.load",
"numpy.exp",
"os.path.join",
"numpy.expm1",
"numpy.random.choice",
"numpy.hanning",
"numpy.log10",
"numpy.random.shuffle",
"numpy.log1p",
"numpy.stack",
"numpy.conca... | [((199, 222), 'os.walk', 'os.walk', (['directory_path'], {}), '(directory_path)\n', (206, 222), False, 'import os\n'), ((610, 673), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, array.shape[0], slice_size, array.shape[2])'}), '(shape=(1, array.shape[0], slice_size, array.shape[2]))\n', (618, 673), True, 'import numpy as np\n'), ((1203, 1226), 'numpy.stack', 'np.stack', (['[mag]'], {'axis': '(2)'}), '([mag], axis=2)\n', (1211, 1226), True, 'import numpy as np\n'), ((1342, 1416), 'numpy.zeros', 'np.zeros', (['(mag_sliced.shape[1], mag_sliced.shape[0] * mag_sliced.shape[2])'], {}), '((mag_sliced.shape[1], mag_sliced.shape[0] * mag_sliced.shape[2]))\n', (1350, 1416), True, 'import numpy as np\n'), ((1976, 1998), 'numpy.zeros', 'np.zeros', (['target_shape'], {}), '(target_shape)\n', (1984, 1998), True, 'import numpy as np\n'), ((2206, 2222), 'numpy.hanning', 'np.hanning', (['nfft'], {}), '(nfft)\n', (2216, 2222), True, 'import numpy as np\n'), ((2558, 2574), 'numpy.hanning', 'np.hanning', (['nfft'], {}), '(nfft)\n', (2568, 2574), True, 'import numpy as np\n'), ((3174, 3230), 'librosa.output.write_wav', 'librosa.output.write_wav', (['filename', 'audio', 'sr'], {'norm': '(True)'}), '(filename, audio, sr, norm=True)\n', (3198, 3230), False, 'import librosa\n'), ((249, 283), 'fnmatch.filter', 'fnmatch.filter', (['filenames', 'pattern'], {}), '(filenames, pattern)\n', (263, 283), False, 'import fnmatch\n'), ((379, 403), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (392, 403), False, 'import os\n'), ((414, 436), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (425, 436), False, 'import os\n'), ((499, 536), 'numpy.floor', 'np.floor', (['(array.shape[1] / slice_size)'], {}), '(array.shape[1] / slice_size)\n', (507, 536), True, 'import numpy as np\n'), ((820, 849), 'numpy.expand_dims', 'np.expand_dims', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (834, 849), True, 'import numpy as np\n'), ((867, 938), 'numpy.split', 'np.split', (['array[:, :, 0:n_sections * slice_size, :]', 'n_sections'], {'axis': '(2)'}), '(array[:, :, 0:n_sections * slice_size, :], n_sections, axis=2)\n', (875, 938), True, 'import numpy as np\n'), ((951, 981), 'numpy.concatenate', 'np.concatenate', (['sliced'], {'axis': '(0)'}), '(sliced, axis=0)\n', (965, 981), True, 'import numpy as np\n'), ((1685, 1705), 'numpy.log1p', 'np.log1p', (['(mag / amin)'], {}), '(mag / amin)\n', (1693, 1705), True, 'import numpy as np\n'), ((1914, 1935), 'numpy.expm1', 'np.expm1', (['(mag_db / 20)'], {}), '(mag_db / 20)\n', (1922, 1935), True, 'import numpy as np\n'), ((2319, 2328), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (2325, 2328), True, 'import numpy as np\n'), ((2330, 2341), 'numpy.angle', 'np.angle', (['S'], {}), '(S)\n', (2338, 2341), True, 'import numpy as np\n'), ((2745, 2765), 'numpy.exp', 'np.exp', (['(1.0j * phase)'], {}), '(1.0j * phase)\n', (2751, 2765), True, 'import numpy as np\n'), ((2908, 2929), 'numpy.sum', 'np.sum', (['(original ** 2)'], {}), '(original ** 2)\n', (2914, 2929), True, 'import numpy as np\n'), ((2953, 2993), 'numpy.sum', 'np.sum', (['((original - reconstruction) ** 2)'], {}), '((original - reconstruction) ** 2)\n', (2959, 2993), True, 'import numpy as np\n'), ((3008, 3040), 'numpy.log10', 'np.log10', (['(signal_rms / noise_rms)'], {}), '(signal_rms / noise_rms)\n', (3016, 3040), True, 'import numpy as np\n'), ((3087, 3121), 'librosa.core.load', 'librosa.core.load', (['filename'], {'sr': 'sr'}), '(filename, sr=sr)\n', (3104, 3121), False, 'import librosa\n'), ((4467, 4509), 'numpy.zeros', 'np.zeros', (['(self.batch_size, *self.img_dim)'], {}), '((self.batch_size, *self.img_dim))\n', (4475, 4509), True, 'import numpy as np\n'), ((4816, 4858), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.img_dim)'], {}), '((self.batch_size, *self.img_dim))\n', (4824, 4858), True, 'import numpy as np\n'), ((4871, 4913), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.img_dim)'], {}), '((self.batch_size, *self.img_dim))\n', (4879, 4913), True, 'import numpy as np\n'), ((7768, 7832), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.img_dim[0], self.img_dim[1], 2)'], {}), '((self.batch_size, self.img_dim[0], self.img_dim[1], 2))\n', (7776, 7832), True, 'import numpy as np\n'), ((7845, 7887), 'numpy.zeros', 'np.zeros', (['(self.batch_size, *self.img_dim)'], {}), '((self.batch_size, *self.img_dim))\n', (7853, 7887), True, 'import numpy as np\n'), ((8186, 8250), 'numpy.empty', 'np.empty', (['(self.batch_size, self.img_dim[0], self.img_dim[1], 2)'], {}), '((self.batch_size, self.img_dim[0], self.img_dim[1], 2))\n', (8194, 8250), True, 'import numpy as np\n'), ((8263, 8305), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.img_dim)'], {}), '((self.batch_size, *self.img_dim))\n', (8271, 8305), True, 'import numpy as np\n'), ((1050, 1092), 'numpy.concatenate', 'np.concatenate', (['[sliced, last_mag]'], {'axis': '(0)'}), '([sliced, last_mag], axis=0)\n', (1064, 1092), True, 'import numpy as np\n'), ((1744, 1762), 'numpy.log1p', 'np.log1p', (['(1 / amin)'], {}), '(1 / amin)\n', (1752, 1762), True, 'import numpy as np\n'), ((1881, 1899), 'numpy.log1p', 'np.log1p', (['(1 / amin)'], {}), '(1 / amin)\n', (1889, 1899), True, 'import numpy as np\n'), ((2431, 2445), 'numpy.sum', 'np.sum', (['window'], {}), '(window)\n', (2437, 2445), True, 'import numpy as np\n'), ((5007, 5024), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (5014, 5024), True, 'import numpy as np\n'), ((6496, 6529), 'numpy.random.shuffle', 'np.random.shuffle', (['self.filenames'], {}), '(self.filenames)\n', (6513, 6529), True, 'import numpy as np\n'), ((8467, 8492), 'numpy.load', 'np.load', (["filename['name']"], {}), "(filename['name'])\n", (8474, 8492), True, 'import numpy as np\n'), ((9757, 9790), 'numpy.random.shuffle', 'np.random.shuffle', (['self.filenames'], {}), '(self.filenames)\n', (9774, 9790), True, 'import numpy as np\n'), ((303, 335), 'os.path.join', 'os.path.join', (['dirpath', 'file_name'], {}), '(dirpath, file_name)\n', (315, 335), False, 'import os\n'), ((5554, 5593), 'os.path.join', 'os.path.join', (['base_path', 'self.origin', 'f'], {}), '(base_path, self.origin, f)\n', (5566, 5593), False, 'import os\n'), ((5680, 5719), 'os.path.join', 'os.path.join', (['base_path', 'self.target', 'f'], {}), '(base_path, self.target, f)\n', (5692, 5719), False, 'import os\n'), ((8399, 8431), 'numpy.random.choice', 'np.random.choice', (['self.filenames'], {}), '(self.filenames)\n', (8415, 8431), True, 'import numpy as np\n'), ((9086, 9125), 'os.path.join', 'os.path.join', (['base_path', 'self.origin', 'f'], {}), '(base_path, self.origin, f)\n', (9098, 9125), False, 'import os\n'), ((2621, 2637), 'numpy.hanning', 'np.hanning', (['nfft'], {}), '(nfft)\n', (2631, 2637), True, 'import numpy as np\n'), ((5614, 5650), 'os.path.join', 'os.path.join', (['base_path', 'self.origin'], {}), '(base_path, self.origin)\n', (5626, 5650), False, 'import os\n'), ((5740, 5776), 'os.path.join', 'os.path.join', (['base_path', 'self.target'], {}), '(base_path, self.target)\n', (5752, 5776), False, 'import os\n'), ((9146, 9182), 'os.path.join', 'os.path.join', (['base_path', 'self.origin'], {}), '(base_path, self.origin)\n', (9158, 9182), False, 'import os\n')] |
import numpy as np
def datagen(x, m=5, c=10, seed=42):
np.random.seed(seed)
subset = np.random.choice(x, int(x.shape[0] * 0.8))
noise = np.random.normal(0, 1, subset.shape)
return np.reshape(subset, (-1,1)), np.reshape((subset + noise) * m + c, (-1,1)) | [
"numpy.random.seed",
"numpy.reshape",
"numpy.random.normal"
] | [((60, 80), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (74, 80), True, 'import numpy as np\n'), ((149, 185), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'subset.shape'], {}), '(0, 1, subset.shape)\n', (165, 185), True, 'import numpy as np\n'), ((197, 224), 'numpy.reshape', 'np.reshape', (['subset', '(-1, 1)'], {}), '(subset, (-1, 1))\n', (207, 224), True, 'import numpy as np\n'), ((225, 270), 'numpy.reshape', 'np.reshape', (['((subset + noise) * m + c)', '(-1, 1)'], {}), '((subset + noise) * m + c, (-1, 1))\n', (235, 270), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Usage of modeling
#
# Please read [README.md](../README.md) in advance.
# ## Modeling
#
# Modeling means here training of several models and evaluate the trained models.
#
# We use the feature matrix which is obtained in [usage-processing.ipynb](./usage-processing.ipynb). If you have not executed it yet, try once or execute the following command on your terminal.
#
# $ jupyter nbconvert --execute usage-processing.ipynb
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
# +
from IPython.display import display, HTML
plt.style.use("fivethirtyeight")
from pylab import rcParams
rcParams['figure.figsize'] = 14, 6 ## width, height (inches)
#pd.set_option('display.max_rows', None)
# -
import warnings
warnings.filterwarnings("ignore")
# The variable `label_>50K` is the target variable.
df = pd.read_pickle("../data/feature_matrix.pkl")
target = "label_>50K"
df.describe()
# First of all we split the data set into a training set and a test set.
# +
## Train-Test splitting
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df.drop(target, axis=1), df[target],
test_size=0.4, random_state=3)
print("Size of training set:", X_train.shape[0])
print("Size of test set :", X_test.shape[0])
# -
# The distribution of the target variable is as follows.
y_train.value_counts()
100*y_train.mean() ## percentage of the label ">50K".
# ## Cross-Validation and Pipeline
#
# We will chose hyperparameters by cross-validation (CV). CV can be done by using `sklearn.model_selection.GridSearchCV`.
#
# It is often said that a training set should be rescaled before training. This is because the rescaling often optimizes the training so that we obtain a better model. To do so we construct a `Pipeline` instance, which `GridSearchCV` can be applied to. ([Good tutorial](https://iaml.it/blog/optimizing-sklearn-pipelines))
#
# ### Remark
#
# A preprocessing algorithm (rescaling, PCA, etc.) is often applied to the training dataset and then CV is applied to the the preprocessed data set. This should be avoided, because the training preprocessing involves some information from the validation data. As a result the training preprocessing can cause "overfitting". `Pipeline` solves this problem.
#
# Because of the same reason we should actually integrate `adhoc.preprocessing.MultiConverter` to `Pipeline`. Namely `MultiConverter` fills missing values by some statistics ("mean", "median" and "most frequent class"). But such statistics are hardly ever overfitted if we have enough data, and it is more important that the column names are fixed.
#
# Imagine you drop the dummy variable "White" in the first CV and "Other" in the second CV because the majority class can change. Namely your feature matrices can have different features. This is very confusing. Therefore we apply `MultiConverter` before CV.
#
# If you specify all dropping values manually, then you can integrate your `MultiConverter` instance in `Pipeline` without any problem.
try:
from adhoc.modeling import grid_params, simple_pipeline_cv
except ImportError:
import sys
sys.path.append("..")
from adhoc.modeling import grid_params, simple_pipeline_cv
# Let us try to train a model and pick the best hyperparameters. `grid_params` is a dict of simple `grid_param` for several models. You can use it for a simple analysis.
#
# `simple_pipline_cv` creates a simple `Pipeline` instance which consists of a Transformer for preprocessing (such as `MinMaxScaler`) and an ordinary estimator instance and put it in `GridSearchCV`.
#
# Remark: We use 2-fold cross-validation just only for the CI-pipeline.
# +
from sklearn.linear_model import LogisticRegression
plr = simple_pipeline_cv(name="plr", model=LogisticRegression(penalty="elasticnet", solver="saga"),
param_grid=grid_params["LogisticRegression"], cv=2) ## GridSearchCV instance
plr.fit(X_train, y_train);
# -
# The result of the cross-validation is stored in `cv_results_` attribute.
pd.DataFrame(plr.cv_results_)
# We can compute the confident interval of the cross-validation scores with `cv_results_summary`.
# +
from adhoc.modeling import cv_results_summary
cv_results_summary(plr)
# -
# If we have a linear model, then `show_coefficients` shows the regression coefficients.
#
# If you have a Pipeline with a scaler, then the regression coefficients are the coefficients after the scaler. Therefore you can use the absolute values of regression coefficients as "feature importance" (depending on your whole pipeline). But on the other hand you can not interpret the coefficients in the original scales.
# +
from adhoc.modeling import show_coefficients
show_coefficients(plr, X_train.columns).sort_values(by=1.0, ascending=False)
# +
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
tree = GridSearchCV(DecisionTreeClassifier(random_state=3),
grid_params["DecisionTree"], cv=2)
tree.fit(X_train,y_train)
cv_results_summary(tree)
# -
# A simple wrapper function for `sklearn.tree.export_graphviz` is available:
# +
from adhoc.modeling import show_tree
show_tree(tree, X_train.columns)
# -
# Here is the feature importance of the decision tree.
# +
from adhoc.modeling import show_feature_importance
s_fi_tree = show_feature_importance(tree, X_train.columns)
s_fi_tree[s_fi_tree > 0]
# +
from sklearn.ensemble import RandomForestClassifier
rf = GridSearchCV(RandomForestClassifier(random_state=3),
grid_params["RandomForest"], cv=2)
rf.fit(X_train,y_train)
cv_results_summary(rf)
# -
s_fi_rf = show_feature_importance(rf, X_train.columns).sort_values(ascending=False)
s_fi_rf[s_fi_rf>0.01]
# +
from xgboost.sklearn import XGBClassifier
xgb_params = {"n_estimators":[10,20], "learning_rate":[0.1,0.01]}
xgb = GridSearchCV(XGBClassifier(max_depth=10, random_state=51),
xgb_params, cv=2)
xgb.fit(X_train,y_train)
cv_results_summary(xgb)
# -
s_fi_xgb = show_feature_importance(xgb, X_train.columns).sort_values(ascending=False)
s_fi_xgb[s_fi_xgb>0.01]
# ## Understanding a trained model
#
# Let's look at predictions of a trained decision tree model. According to its feature importance we create 2 continuous variables and 2 discrete variables, using the following variables.
#
# - capital-gain
# - marital-status_Never-married
# - marital-status_Divorced
# - education_Bachelors
# - education_Masters
# - hours-per-week
#
# Namely we create a categorical variables `marital-status` and `education`. In other words, we apply something like `LebelBinarizer.inverse_transform` with limited values. `adhoc.modeling.recover_label` does the job.
# +
from adhoc.modeling import recover_label
field2columns = {"marital-status": ["marital-status_Never-married", "marital-status_Divorced"],
"education": ["education_Bachelors", "education_Masters"]}
df_train = X_train.copy()
recover_label(df_train, field2columns, sep="_", inplace=True)
yhat = tree.predict_proba(X_train)
df_train["prob"] = yhat[:,1]
df_train["pred"] = tree.predict(X_train)
df_train[target] = y_train
# -
# `adhoc.modeling.recover_label` create a column `marital-status` out of two columns `marital-status_Never-married` and `marital-status_Divorced`. The rule is as follows:
#
# - `marital-status` is `Never_married` if `marital-status_Never-married == 1`
# - `marital-status` is `Divorced` if `marital-status_Divorced == 1`
# - Otherwise `marital-status` is `other`.
#
# We do the similar preprocessing to create `education`. The following table show the concrete transformation.
# +
cols = []
for field, columns in field2columns.items():
cols.append(field)
cols.extend(columns)
df_train[cols].drop_duplicates()
# -
# The following scatter plot shows the predictions of the decision tree on the training set. The color shows the predicted probabilities: The red points are predicted as positive and the blue points are predicted as negative.
# +
from adhoc.utilities import facet_grid_scatter_plot
facet_grid_scatter_plot(data=df_train, col="marital-status", row="education",
x="capital-gain", y="hours-per-week", c="prob",
cmap="RdBu_r", margin_titles=True)
# -
# While the scatter plot gives a good overview of predictions, it is quite difficult to evaluate quantitatively just by looking at the diagrams. Therefore we bins x and y variables and show the average of probabilities for each pair of bins as a heat map.
#
# Each of the following heat maps corresponds to a pair of values of `marital-status` and `education`. Note that the bins are different according to heat maps. The choice of bins are optimized by decision trees. The bins without any color/any number has no instances.
# +
from adhoc.utilities import bins_heatmap
bins_heatmap(df_train, cat1="marital-status", cat2="education", x="capital-gain", y="hours-per-week",
target="prob", center=0.5, cmap="RdBu_r", fontsize=14)
# -
# Just in case. The above heat maps show the average probabilities which are predicted by a trained decision tree model on the training set, not the true values of the target variable.
# ## Evaluation of the model by AUC
#
# In some cases (especially when the target variable is a skewed binary variable) you need to evaluate of a trained model by a special metric such as AUC.
#
# **Remark**: If you want to do the following analysis, I strongly recommend to split your original dataset into 3 datasets: training set, validation set and test set. You train a model, choosing "roc_auc" as a scoring function and do the following analysis with the validation set.
#
# (Why we did not do so? Well, it might be confusing at glance. *Why don't we use the validation set for choosing hyperparameters?*)
#
# ### Lead prioritization
#
# Let us assume that our data set is a list of customers and the target customers of your product (or service) is people who earn relatively much. And therefore you have decided to contact customers directly (for example by telephone) instead of on-line advertising to huge number of audience. Because it is time-consuming to contact a customer directory, you want to reduce the number of contacts.
#
# - A contacting a customer costs €40 on average.
# - If the customer buys your product, you get €1000. (This is the price.)
#
# (These numbers have no reality. Just an assumption.)
#
# Since we do not have a data set for successful customers, so we naïvely assume that 10% of the customers with label "`>50K`" buy your product. Because 24% of the customers have label "`>50K`", the proportion of the customer buying the product would be 2.4%. (Of course, your "test set" has no information about the label, neither.)
#
# If you randomly choose a customer and contact her/him. Then your success rate is exactly 2.4%. That is, you get a customer buying your product if you contact 42 customers on average. And therefore getting such a customer costs €1680 on average. This is a deficit. On the other hand if you can perfectly find a positive customer. Then your success rate will be 10%. That is, you will get a customer buying your product by contacting 10 customers on average and it costs €400.
#
# One of the difficulties of this challenge is that the accuracy of the model is not a right metric. Let us look at the performance of the random forest classifier (on the training set).
# +
from adhoc.modeling import ROCCurve
def performance_check(model, X:pd.DataFrame, y:pd.Series, threshold:float=0.5) -> pd.DataFrame:
roc_curve = ROCCurve(y,model.predict_proba(X)[:,1])
score = roc_curve.get_scores_thru_threshold(threshold=threshold)
ct = roc_curve.get_confusion_matrix(threshold=threshold)
n_contact = ct.loc[1,:].sum()
success_rate = 0.1*score["precision"]
contact_cost = 40
price = 1000
profit = price*success_rate*n_contact - contact_cost*n_contact
print("- Your model has %d%% accuracy." % (100*score["accuracy"]))
print("- You will contact %d customers." % n_contact)
print("- You can reach %d%% of positive customers" % (100*score["recall"]))
print("- Your success rate is %0.1f%%" % (100*success_rate))
print("- Your profit would be %d" % profit)
return ct
performance_check(rf, X_train, y_train)
# -
# Let's look at another model: XGBoosting.
performance_check(xgb, X_train, y_train)
# So what is the right metric?
#
# 1. Profit. Then you definitely want to choose the XGBoosting model.
# 2. Success rate (or precision). If you want to find a small number of customers buying your product quickly, because it is challenging and therefore you want to receive feedback quickly to improve your product and to release a new version. Of course the assumption that 10% of positive customers buy the product can be too optimistic, so you want to change strategy in an early stage.
#
# If you choose the second option, then you want to choose the random forest classifier because of the high success rate. But you can obtain a better model by tweaking threshold of the XGBoosting model:
performance_check(xgb, X_train, y_train, threshold=0.8)
# The classifiers which we trained predict actually probabilities that a customer is positive and therefore we naturally use 0.5 as a threshold/boundary of predictions: If the probability is larger than 0.5, then the customer would be positive. So why don't we start with the customer with high probabilities? In the third crosstab we use 0.8 as the threshold, then you will achieve a model with a better precision. Here we should note that the accuracy becomes worse than the model with threshold 0.5.
#
# In general, the more accurate the classifier is, the better the predictions are. But this is not always the case, especially when you have a specific metric.
#
# There is a problem. The precision is often not a good metric to optimize. You can also achieve that a random forest classifier with a good precision just by choosing a high threshold:
performance_check(rf, X_train, y_train, threshold=0.7)
# But as you see, you can reach only 338 positive customers. Is it enough to close 33 contracts? If the assumption is too optimistic, you can find less than 33 customers buying your product. That is, it is also important to reach larger number of positive customers. Therefore you have actually two measures to optimize.
#
# - Precision: the proportion of positive customers among the customer you will contact.
# - Recall: the proportion of positive customers you can reach.
#
# There are two problems:
#
# 1. The metrics are determined after choosing a threshold. Then how should we train a model and tune hyperparameters?
# 2. It is problematic that there are two metrics to optimize. Which has priority and how do we measure it.
#
# The standard solutions to the above questions are following:
#
# 1. Train a model in a usual way, but tune hyperparameters by looking at area under the ROC curve.
# 2. Use F1-metric and choose the threshold which maximizes it. Or optimize your metric by varying threshold.
#
# (You might want to perform a special sampling method if you need.)
#
# ### ROC curve and AUC
#
# Assume that you have a trained model predicting probabilities (or scores). Then choosing a threshold, you have a crosstab as above, and therefore you also have False-Positive rate (FP rate) and True-Positive rate (TP rate) of the predictions. The ROC (Receiver Operating Characteristic) curve is the curve of pairs (FP rate, TP rate) by varying the threshold.
#
# The following line curve is the ROC curve of the XGBoosting model.
y_score_xgb = xgb.predict_proba(X_train)[:,1]
roc_curve_xgb = ROCCurve(y_true=y_train, y_score=y_score_xgb)
roc_curve_xgb.show_roc_curve()
# Usually this curve moves from (1,1) (Every customer is positive) to (0,0) (Every customer is negative) and "usually" the curve lies over the diagonal line (dotted line in the diagram). (If not, you have a wrong classifier.) The ROC curve shows the performance of your model with all possible threshold.
#
# Since the upper left point (FP=0, TP=1) corresponds the perfect classifier, the ROC curve approaches upper left corner if your model can predict correctly. We can measure "how the ROC curve approaches to upper left" as the area under the ROC curve (AUC).
#
# The following curve is the ROC curve of the random forest classifier. AUC is slightly worse than one of XGBoosting.
y_score_rf = rf.predict_proba(X_train)[:,1]
roc_curve_rf = ROCCurve(y_true=y_train, y_score=y_score_rf)
roc_curve_rf.show_roc_curve()
# Let's look at the F1 scores. We recall that the F1-score is defined by the harmonic mean of $P$ and $R$:
#
# $$F = \dfrac{2PR}{P+R}$$
#
# Here $P$ is the precision and $R$ is the recall. The following heat map shows the F1-scores for various precisions and recalls.
# +
from itertools import product
vals = np.round(0.1*np.arange(11),1)
df_f1 = pd.DataFrame(list(product(vals,vals)), columns=["precision","recall"])
df_f1["F1-score"] = 2*df_f1["precision"]*df_f1["recall"]/(df_f1["precision"]+df_f1["recall"])
df_f1 = df_f1.pivot(index="recall", columns="precision", values="F1-score").sort_index(ascending=False)
sns.heatmap(df_f1, cmap="YlGnBu", annot=True)
plt.title("Table of F1-scores");
# -
# Let us look at line curves of the metrics (recall, precision and F1-score) by thresholds.
roc_curve_xgb.show_metrics() ## takes relatively long
# The precise values of metrics can be obtain by `scores` property:
roc_curve_xgb.scores.head()
# By using this it is easy to find the threshold which maximizes F1-score.
roc_curve_xgb.scores.sort_values(by="f1_score", ascending=False).head()
best_threshold = roc_curve_xgb.scores.sort_values(by="f1_score", ascending=False).index[0]
roc_curve_xgb.get_confusion_matrix(threshold=best_threshold)
# The above crosstab is the result of the threshold with the best F1-score. Of course you can choose another threshold, for example 0.7, so that you obtain a better precision.
roc_curve_xgb.get_confusion_matrix(threshold=0.7)
# Or you might want to choose the threshold which maximizes the profit.
# +
df_performance = roc_curve_xgb.scores.copy()
price = 1000
cost = 40
df_performance["success_rate"] = 0.1*df_performance["precision"]
df_performance["n_contract"] = (df_performance["success_rate"]*df_performance["n_pos_pred"]).astype(int)
df_performance["profit"] = price*df_performance["n_contract"] - cost*df_performance["n_pos_pred"]
df_performance["profit"].plot()
plt.title("Profit by threshold");
# -
# The best threshold is 0.366727 and the profit (on training set) will be €160 560.
df_performance.sort_values(by="profit", ascending=False).head(1)
# Another usage of the trained model is to use probabilities as scores. The higher the score is, the more likely the customer is positive. Therefore we can contact positive customers by contacting customers in the descending order of scores.
#
# First of all the expected value of a number of positive customer agrees with the proportion of positive customers if we contact randomly. Now we have a probability of positivity for each customer. Then the expected value of the number of positive customers is just the sum of the probabilities.
#
# Here is a problem. What is the expected value of the whole (training) dataset?
print("Expected value (random contact) : %s" % y_train.sum())
print("Expected value (predicted probabilities): %s" % y_score_xgb.sum())
# In other words, the model says that you can reas more 5000 positive customers. This can not happen. But this is normal because we do not train a model so that the expected values agree. One of the easiest solution to the problem is scaling. That is, we multiply a certain constant so that the expected value of the number of positive customers with the predicted probability agrees with the expected value with equally likely probabilities.
#
# Then the expected values of number of positive customers by the number of contacts look like following.
proportion_positive = y_train.mean()
roc_curve_xgb.show_expected_value(proportion_positive=proportion_positive, scaling=True)
# According to the graph you could reach 2721 positive customers if you contact 5000 customers in the descending order of scores. (But you will actually reach 3591 positive customers.) If you contact randomly, then you can reach only 1195 positive customer.
roc_curve_xgb.optimize_expected_value(proportion_positive=proportion_positive, scaling=True).loc[5000,:]
# +
def profit_df(roc:ROCCurve, price:int, cost:int) -> pd.DataFrame:
df_profit = roc.optimize_expected_value(proportion_positive=proportion_positive,
scaling=True).copy()
df_profit["following score"] = price*0.1*df_profit["expected (score)"] - cost*df_profit.index
df_profit["random"] = price*0.1*df_profit["expected (random)"] - cost*df_profit.index
df_profit["actual"] = price*0.1*df_profit["n_true"] - cost*df_profit.index
return df_profit
df_profit = profit_df(roc_curve_xgb, price, cost)
best_n_try = df_profit.sort_values(by="following score", ascending=False).index[0]
def show_profit(df_profit:pd.DataFrame, xintercept:int):
cols = ["random", "following score", "actual"]
df_profit[cols].plot()
plt.vlines(xintercept, ymin=df_profit["actual"].min(), ymax=df_profit["actual"].max(),
linestyle=":")
plt.title("Expected profit by number of contacts")
plt.xlabel("Number of contacts")
plt.ylabel("Profit");
show_profit(df_profit, best_n_try)
# -
# According to the graph the profit is maximized if we contacts 3637 customers in the descending order of scores. The expected profit will be €78025. This number is very small than the profit we compute above. This is because of the scaling.
df_profit.loc[best_n_try,:]
# We have seen two approaches:
#
# 1. Contact all customers which are predicted as positive customers.
# 2. Use predicted probabilities as scores and contact customers in the descending order of scores.
#
# **You should not show the both approaches to the audience.** They are definitely confused by the multiple solutions and you are going to be asked which solution is correct/reliable.
#
# You should make it clear how the predicted model is going to be used? What is the goal of the analysis? If it is clear, then you should choose only one metric to optimize.
# ### Evaluation of the model on test set
y_score = xgb.predict_proba(X_test)[:,1]
roc = ROCCurve(y_true=y_test, y_score=y_score)
roc.show_roc_curve()
# When computing expected value, we have to use the proportion of the positive customers in the **training set** (not the test set). This is because we do not know the actual proportion of positive customers in the test set and we also have to predict it.
roc.show_expected_value(proportion_positive=proportion_positive, scaling=True)
df_profit = profit_df(roc, price, cost)
best_n_try_test = df_profit.sort_values(by="following score", ascending=False).index[0]
show_profit(df_profit, best_n_try_test)
df_profit.loc[best_n_try_test,:]
# ## Environment
# %load_ext watermark
# %watermark -v -n -m -p numpy,scipy,sklearn,pandas,matplotlib,seaborn
| [
"matplotlib.pyplot.title",
"seaborn.heatmap",
"adhoc.modeling.show_feature_importance",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.style.use",
"numpy.arange",
"pandas.DataFrame",
"sys.path.append",
"adhoc.modeling.ROCCurve",
"adhoc.utilities.facet_grid_scatter_plot",
"adhoc.modeli... | [((892, 924), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (905, 924), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1110), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1100, 1110), False, 'import warnings\n'), ((1170, 1214), 'pandas.read_pickle', 'pd.read_pickle', (['"""../data/feature_matrix.pkl"""'], {}), "('../data/feature_matrix.pkl')\n", (1184, 1214), True, 'import pandas as pd\n'), ((4437, 4466), 'pandas.DataFrame', 'pd.DataFrame', (['plr.cv_results_'], {}), '(plr.cv_results_)\n', (4449, 4466), True, 'import pandas as pd\n'), ((4618, 4641), 'adhoc.modeling.cv_results_summary', 'cv_results_summary', (['plr'], {}), '(plr)\n', (4636, 4641), False, 'from adhoc.modeling import cv_results_summary\n'), ((5436, 5460), 'adhoc.modeling.cv_results_summary', 'cv_results_summary', (['tree'], {}), '(tree)\n', (5454, 5460), False, 'from adhoc.modeling import cv_results_summary\n'), ((5587, 5619), 'adhoc.modeling.show_tree', 'show_tree', (['tree', 'X_train.columns'], {}), '(tree, X_train.columns)\n', (5596, 5619), False, 'from adhoc.modeling import show_tree\n'), ((5748, 5794), 'adhoc.modeling.show_feature_importance', 'show_feature_importance', (['tree', 'X_train.columns'], {}), '(tree, X_train.columns)\n', (5771, 5794), False, 'from adhoc.modeling import show_feature_importance\n'), ((6013, 6035), 'adhoc.modeling.cv_results_summary', 'cv_results_summary', (['rf'], {}), '(rf)\n', (6031, 6035), False, 'from adhoc.modeling import cv_results_summary\n'), ((6388, 6411), 'adhoc.modeling.cv_results_summary', 'cv_results_summary', (['xgb'], {}), '(xgb)\n', (6406, 6411), False, 'from adhoc.modeling import cv_results_summary\n'), ((7364, 7425), 'adhoc.modeling.recover_label', 'recover_label', (['df_train', 'field2columns'], {'sep': '"""_"""', 'inplace': '(True)'}), "(df_train, field2columns, sep='_', inplace=True)\n", (7377, 7425), False, 'from adhoc.modeling import recover_label\n'), ((8473, 8643), 'adhoc.utilities.facet_grid_scatter_plot', 'facet_grid_scatter_plot', ([], {'data': 'df_train', 'col': '"""marital-status"""', 'row': '"""education"""', 'x': '"""capital-gain"""', 'y': '"""hours-per-week"""', 'c': '"""prob"""', 'cmap': '"""RdBu_r"""', 'margin_titles': '(True)'}), "(data=df_train, col='marital-status', row=\n 'education', x='capital-gain', y='hours-per-week', c='prob', cmap=\n 'RdBu_r', margin_titles=True)\n", (8496, 8643), False, 'from adhoc.utilities import facet_grid_scatter_plot\n'), ((9262, 9428), 'adhoc.utilities.bins_heatmap', 'bins_heatmap', (['df_train'], {'cat1': '"""marital-status"""', 'cat2': '"""education"""', 'x': '"""capital-gain"""', 'y': '"""hours-per-week"""', 'target': '"""prob"""', 'center': '(0.5)', 'cmap': '"""RdBu_r"""', 'fontsize': '(14)'}), "(df_train, cat1='marital-status', cat2='education', x=\n 'capital-gain', y='hours-per-week', target='prob', center=0.5, cmap=\n 'RdBu_r', fontsize=14)\n", (9274, 9428), False, 'from adhoc.utilities import bins_heatmap\n'), ((16107, 16152), 'adhoc.modeling.ROCCurve', 'ROCCurve', ([], {'y_true': 'y_train', 'y_score': 'y_score_xgb'}), '(y_true=y_train, y_score=y_score_xgb)\n', (16115, 16152), False, 'from adhoc.modeling import ROCCurve\n'), ((16929, 16973), 'adhoc.modeling.ROCCurve', 'ROCCurve', ([], {'y_true': 'y_train', 'y_score': 'y_score_rf'}), '(y_true=y_train, y_score=y_score_rf)\n', (16937, 16973), False, 'from adhoc.modeling import ROCCurve\n'), ((17623, 17668), 'seaborn.heatmap', 'sns.heatmap', (['df_f1'], {'cmap': '"""YlGnBu"""', 'annot': '(True)'}), "(df_f1, cmap='YlGnBu', annot=True)\n", (17634, 17668), True, 'import seaborn as sns\n'), ((17669, 17700), 'matplotlib.pyplot.title', 'plt.title', (['"""Table of F1-scores"""'], {}), "('Table of F1-scores')\n", (17678, 17700), True, 'import matplotlib.pyplot as plt\n'), ((18929, 18961), 'matplotlib.pyplot.title', 'plt.title', (['"""Profit by threshold"""'], {}), "('Profit by threshold')\n", (18938, 18961), True, 'import matplotlib.pyplot as plt\n'), ((22932, 22972), 'adhoc.modeling.ROCCurve', 'ROCCurve', ([], {'y_true': 'y_test', 'y_score': 'y_score'}), '(y_true=y_test, y_score=y_score)\n', (22940, 22972), False, 'from adhoc.modeling import ROCCurve\n'), ((5315, 5353), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(3)'}), '(random_state=3)\n', (5337, 5353), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((5896, 5934), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(3)'}), '(random_state=3)\n', (5918, 5934), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6280, 6324), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'max_depth': '(10)', 'random_state': '(51)'}), '(max_depth=10, random_state=51)\n', (6293, 6324), False, 'from xgboost.sklearn import XGBClassifier\n'), ((21844, 21894), 'matplotlib.pyplot.title', 'plt.title', (['"""Expected profit by number of contacts"""'], {}), "('Expected profit by number of contacts')\n", (21853, 21894), True, 'import matplotlib.pyplot as plt\n'), ((21899, 21931), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of contacts"""'], {}), "('Number of contacts')\n", (21909, 21931), True, 'import matplotlib.pyplot as plt\n'), ((21936, 21956), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Profit"""'], {}), "('Profit')\n", (21946, 21956), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3557), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (3551, 3557), False, 'import sys\n'), ((4169, 4224), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""elasticnet"""', 'solver': '"""saga"""'}), "(penalty='elasticnet', solver='saga')\n", (4187, 4224), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5115, 5154), 'adhoc.modeling.show_coefficients', 'show_coefficients', (['plr', 'X_train.columns'], {}), '(plr, X_train.columns)\n', (5132, 5154), False, 'from adhoc.modeling import show_coefficients\n'), ((6051, 6095), 'adhoc.modeling.show_feature_importance', 'show_feature_importance', (['rf', 'X_train.columns'], {}), '(rf, X_train.columns)\n', (6074, 6095), False, 'from adhoc.modeling import show_feature_importance\n'), ((6428, 6473), 'adhoc.modeling.show_feature_importance', 'show_feature_importance', (['xgb', 'X_train.columns'], {}), '(xgb, X_train.columns)\n', (6451, 6473), False, 'from adhoc.modeling import show_feature_importance\n'), ((17329, 17342), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (17338, 17342), True, 'import numpy as np\n'), ((17372, 17391), 'itertools.product', 'product', (['vals', 'vals'], {}), '(vals, vals)\n', (17379, 17391), False, 'from itertools import product\n')] |
from numpy.random import rand
from tensorboardX import SummaryWriter
import time
num_layers_options = [2, 3, 4]
hidden_size_options = [128, 256]
for num_layers in num_layers_options:
for hidden_size in hidden_size_options:
with SummaryWriter() as writer:
writer.add_hparams_start(dict(num_layers=num_layers, hidden_size=hidden_size))
t = rand()
for n_iter in range(100):
t += rand() * 0.01
writer.add_scalar('Valid loss', t + 0.1, n_iter)
writer.add_scalar('Train Loss', t, n_iter)
writer.add_hparams_end()
time.sleep(1) | [
"numpy.random.rand",
"tensorboardX.SummaryWriter",
"time.sleep"
] | [((626, 639), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (636, 639), False, 'import time\n'), ((243, 258), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (256, 258), False, 'from tensorboardX import SummaryWriter\n'), ((377, 383), 'numpy.random.rand', 'rand', ([], {}), '()\n', (381, 383), False, 'from numpy.random import rand\n'), ((443, 449), 'numpy.random.rand', 'rand', ([], {}), '()\n', (447, 449), False, 'from numpy.random import rand\n')] |
import pandas as pd
from sklearn.ensemble.forest import RandomForestClassifier
import numpy as np
features = {
"0":"Paralysis ", # partial body paralysis
"1":"Voice ",
"2":"Feeding_Tube " ,
"3": "Vision ",
"4": "Cognitive " ,
"5": "Perception " ,
"6": "Dressing " ,
"7":"Incontinence " ,
"8": "Emotions ",
"9" : "Sex " ,
}
header = [ "Paralysis ",
"Voice ",
"Feeding_Tube " ,
"Vision ",
"Cognitive " ,
"Perception " ,
"Dressing " ,
"Incontinence " ,
"Emotions ",
"Sex " ]
#feature importance plot
def RF_Features_Importance(X,Y,outputfile="RF.csv"):
forest = RandomForestClassifier(n_estimators= 300)
forest.fit(X, Y)
importances = np.matrix(forest.feature_importances_).tolist()[0]
df = pd.DataFrame(list(zip(header,importances)),
columns = ["Features","Importance"])
df.to_csv(outputfile,index=False)
| [
"numpy.matrix",
"sklearn.ensemble.forest.RandomForestClassifier"
] | [((598, 638), 'sklearn.ensemble.forest.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(300)'}), '(n_estimators=300)\n', (620, 638), False, 'from sklearn.ensemble.forest import RandomForestClassifier\n'), ((673, 711), 'numpy.matrix', 'np.matrix', (['forest.feature_importances_'], {}), '(forest.feature_importances_)\n', (682, 711), True, 'import numpy as np\n')] |
import pytest
from scipy.stats.distributions import poisson
import statsmodels.api as sm
import numpy as np
@pytest.fixture
def scotland_data():
data = sm.datasets.scotland.load(as_pandas=False)
return data.exog, data.endog
@pytest.fixture
def poisson_data():
n_samples = 1000
int_coef, age_coef, weight_coef = -10, 0.05, 0.08
age = np.random.uniform(30, 70, n_samples)
weight = np.random.normal(150, 20, n_samples)
expected_visits = np.exp(int_coef + age * age_coef + weight * weight_coef)
observed_visits = poisson.rvs(expected_visits)
X = np.vstack([age, weight]).T
y = observed_visits
return X, y
| [
"numpy.random.uniform",
"scipy.stats.distributions.poisson.rvs",
"numpy.exp",
"numpy.random.normal",
"statsmodels.api.datasets.scotland.load",
"numpy.vstack"
] | [((157, 199), 'statsmodels.api.datasets.scotland.load', 'sm.datasets.scotland.load', ([], {'as_pandas': '(False)'}), '(as_pandas=False)\n', (182, 199), True, 'import statsmodels.api as sm\n'), ((355, 391), 'numpy.random.uniform', 'np.random.uniform', (['(30)', '(70)', 'n_samples'], {}), '(30, 70, n_samples)\n', (372, 391), True, 'import numpy as np\n'), ((405, 441), 'numpy.random.normal', 'np.random.normal', (['(150)', '(20)', 'n_samples'], {}), '(150, 20, n_samples)\n', (421, 441), True, 'import numpy as np\n'), ((464, 520), 'numpy.exp', 'np.exp', (['(int_coef + age * age_coef + weight * weight_coef)'], {}), '(int_coef + age * age_coef + weight * weight_coef)\n', (470, 520), True, 'import numpy as np\n'), ((543, 571), 'scipy.stats.distributions.poisson.rvs', 'poisson.rvs', (['expected_visits'], {}), '(expected_visits)\n', (554, 571), False, 'from scipy.stats.distributions import poisson\n'), ((580, 604), 'numpy.vstack', 'np.vstack', (['[age, weight]'], {}), '([age, weight])\n', (589, 604), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 15:45:49 2020
@author: peter
"""
import numpy as np
from pathlib import Path
import pandas as pd
import tifffile
from joblib import Parallel, delayed
from vsd_cancer.functions import cancer_functions as canf
def make_all_raw_tc(df_file, save_dir, redo=True, njobs=10, HPC_num=None):
df = pd.read_csv(df_file)
if redo or HPC_num is not None:
redo_from = 0
else:
redo_from = np.load(
Path(
save_dir,
f"{df_file.stem}_intermediate_files",
f"{df_file.stem}_redo_from_make_all_raw_tc.npy",
)
)
print(f"{len(df) - redo_from} to do")
for idx, data in enumerate(df.itertuples()):
if HPC_num is not None: # allows running in parallel on HPC
if idx != HPC_num:
continue
parts = Path(data.tif_file).parts
trial_string = "_".join(parts[parts.index("cancer") : -1])
trial_save = Path(save_dir, "ratio_stacks", trial_string)
if not redo and HPC_num is None:
if idx < redo_from:
continue
elif not redo and HPC_num is not None:
if Path(trial_save, f"{trial_string}_raw_tc.npy").is_file():
continue
seg = np.load(Path(trial_save, f"{trial_string}_seg.npy"))
masks = canf.lab2masks(seg)
try:
LED = np.load(Path(trial_save, f"{trial_string}_LED_powers.npy"))
except Exception as err:
print(Path(trial_save, f"{trial_string}_LED_powers.npy"))
LED = [0.5, 1] # hacking in for yilin
if LED[0] < LED[1]:
blue = 0
else:
blue = 1
stack = tifffile.imread(data.tif_file)[blue::2, ...]
if HPC_num is None:
with Parallel(n_jobs=njobs) as parallel:
tc = parallel(
delayed(canf.t_course_from_roi)(stack, mask) for mask in masks
)
else:
tc = [canf.t_course_from_roi(stack, mask) for mask in masks]
tc = np.array(tc)
np.save(Path(trial_save, f"{trial_string}_raw_tc.npy"), tc)
print(f"Saved {trial_string}")
redo_from += 1
np.save(
Path(
save_dir,
f"{df_file.stem}_intermediate_files",
f"{df_file.stem}_redo_from_make_all_raw_tc.npy",
),
redo_from,
)
| [
"vsd_cancer.functions.cancer_functions.t_course_from_roi",
"pandas.read_csv",
"vsd_cancer.functions.cancer_functions.lab2masks",
"pathlib.Path",
"numpy.array",
"joblib.Parallel",
"tifffile.imread",
"joblib.delayed"
] | [((370, 390), 'pandas.read_csv', 'pd.read_csv', (['df_file'], {}), '(df_file)\n', (381, 390), True, 'import pandas as pd\n'), ((1028, 1072), 'pathlib.Path', 'Path', (['save_dir', '"""ratio_stacks"""', 'trial_string'], {}), "(save_dir, 'ratio_stacks', trial_string)\n", (1032, 1072), False, 'from pathlib import Path\n'), ((1401, 1420), 'vsd_cancer.functions.cancer_functions.lab2masks', 'canf.lab2masks', (['seg'], {}), '(seg)\n', (1415, 1420), True, 'from vsd_cancer.functions import cancer_functions as canf\n'), ((2130, 2142), 'numpy.array', 'np.array', (['tc'], {}), '(tc)\n', (2138, 2142), True, 'import numpy as np\n'), ((501, 606), 'pathlib.Path', 'Path', (['save_dir', 'f"""{df_file.stem}_intermediate_files"""', 'f"""{df_file.stem}_redo_from_make_all_raw_tc.npy"""'], {}), "(save_dir, f'{df_file.stem}_intermediate_files',\n f'{df_file.stem}_redo_from_make_all_raw_tc.npy')\n", (505, 606), False, 'from pathlib import Path\n'), ((914, 933), 'pathlib.Path', 'Path', (['data.tif_file'], {}), '(data.tif_file)\n', (918, 933), False, 'from pathlib import Path\n'), ((1340, 1383), 'pathlib.Path', 'Path', (['trial_save', 'f"""{trial_string}_seg.npy"""'], {}), "(trial_save, f'{trial_string}_seg.npy')\n", (1344, 1383), False, 'from pathlib import Path\n'), ((1769, 1799), 'tifffile.imread', 'tifffile.imread', (['data.tif_file'], {}), '(data.tif_file)\n', (1784, 1799), False, 'import tifffile\n'), ((2160, 2206), 'pathlib.Path', 'Path', (['trial_save', 'f"""{trial_string}_raw_tc.npy"""'], {}), "(trial_save, f'{trial_string}_raw_tc.npy')\n", (2164, 2206), False, 'from pathlib import Path\n'), ((2304, 2409), 'pathlib.Path', 'Path', (['save_dir', 'f"""{df_file.stem}_intermediate_files"""', 'f"""{df_file.stem}_redo_from_make_all_raw_tc.npy"""'], {}), "(save_dir, f'{df_file.stem}_intermediate_files',\n f'{df_file.stem}_redo_from_make_all_raw_tc.npy')\n", (2308, 2409), False, 'from pathlib import Path\n'), ((1461, 1511), 'pathlib.Path', 'Path', (['trial_save', 'f"""{trial_string}_LED_powers.npy"""'], {}), "(trial_save, f'{trial_string}_LED_powers.npy')\n", (1465, 1511), False, 'from pathlib import Path\n'), ((1860, 1882), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'njobs'}), '(n_jobs=njobs)\n', (1868, 1882), False, 'from joblib import Parallel, delayed\n'), ((2061, 2096), 'vsd_cancer.functions.cancer_functions.t_course_from_roi', 'canf.t_course_from_roi', (['stack', 'mask'], {}), '(stack, mask)\n', (2083, 2096), True, 'from vsd_cancer.functions import cancer_functions as canf\n'), ((1564, 1614), 'pathlib.Path', 'Path', (['trial_save', 'f"""{trial_string}_LED_powers.npy"""'], {}), "(trial_save, f'{trial_string}_LED_powers.npy')\n", (1568, 1614), False, 'from pathlib import Path\n'), ((1234, 1280), 'pathlib.Path', 'Path', (['trial_save', 'f"""{trial_string}_raw_tc.npy"""'], {}), "(trial_save, f'{trial_string}_raw_tc.npy')\n", (1238, 1280), False, 'from pathlib import Path\n'), ((1947, 1978), 'joblib.delayed', 'delayed', (['canf.t_course_from_roi'], {}), '(canf.t_course_from_roi)\n', (1954, 1978), False, 'from joblib import Parallel, delayed\n')] |
'''
Created on May 20, 2016
@author: josephbakarji
'''
import numpy as np
from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi
from __init__ import settingsDir
from learning import Learn
from threading import Thread
from collections import deque
#######################################################################################################################################
#######################################################################################################################################
# def trigger_usepast(sensorq, trigon_prev, trigoff_prev, turn_state):
def triggerfun(sensvalue, thresh, dshmidt, trigon_prev, trigoff_prev, turn_state):
sensarr = np.asarray(sensvalue) # Transforming to numpy array everytime might be inefficient
sdiff = sensarr - thresh # subtract threshold from readings
trigon = sdiff - dshmidt > 0
trigoff = sdiff + dshmidt < 0
turnon = ( trigon & np.logical_not( trigon_prev ) ) & np.logical_not(turn_state)
turnoff = ( trigoff & np.logical_not( trigoff_prev ) ) & turn_state
nswitch = turnon.astype(int) - turnoff.astype(int) # Turn on if 1, Turn off if -1, No action if 0. - Also works 1*turnon - 1*turnoff
trigon_prev = trigon
trigoff_prev = trigoff
return trigon_prev, trigoff_prev, nswitch
##############################################################################
##############################################################################
## Trigger class for one sensor ##
# Thread that takes sensor queue as input and sends individual midi triggers as output
class WeighTrig(Thread):
def __init__(self, sensorq, thresh, notearr):
Thread.__init__(self)
self.sensorq = sensorq
self.thresh = thresh
self.notearr = notearr
self.daemon = True
def run(self):
trigon_prev = False
trigoff_prev = False
turn_state = np.zeros(5, dtype=bool)
dshmidt = 5 # use as input
while True:
sensvalue = self.sensorq.get(block=True)
[trigon_prev, trigoff_prev, nswitch] = triggerfun(sensvalue, self.thresh, dshmidt, trigon_prev, trigoff_prev, turn_state)
if( not(all(i == 0 for i in nswitch)) ): # use if(nswitch.any())
turn_state = nswitch + turn_state
signswitch2note(nswitch, sensvalue, self.notearr)
##############################################################################
##############################################################################
## Trigger class for one sensor ##
# Thread that takes sensor queue as input and sends individual midi triggers as output
class WeighTrig_ai(Thread):
def __init__(self, pressq, flexq, flexsize, thresh, dshmidt):
Thread.__init__(self)
self.pressq = pressq
self.flexq = flexq
self.thresh = thresh
self.dshmidt = dshmidt
self.daemon = True
self.flexsize = flexsize
self.C2midi, midi2C = make_C2midi()
print(self.C2midi)
L = Learn(includefile=settingsDir + 'excludeSingleNotes.txt', trainsize=0.9)
Fpair_full, Ndiff_full, flex_array0, flex_array1, flex_full = L.stat.get_features_wtu(idxminus=flexsize, idxplus=0)
self.tu_predictor, accuracy = L.learn_thumbunder(flex_array0, flex_array1) # Returns thumb_under predictors of length[1]
data = L.learn_transition_prob_withThumbUnder()
self.Ndomain = data.Ndomain
self.Fdomainidx = data.Fdomainidx
self.Tmat = data.Tmat
def run(self):
trigon_prev = False
trigoff_prev = False
finger_prev = 0
noteC_prev = 20
turn_state = np.zeros(5, dtype=bool)
flexdeq = deque([0 for i in range(self.flexsize)], maxlen=self.flexsize)
noteC_prev = 20
NotesOn = [0, 0, 0, 0, 0]
while True:
pressvalue = self.pressq.get(block=True)
flexvalue = self.flexq.get(block=True)
flexdeq.append( flexvalue[0] )
[trigon_prev, trigoff_prev, nswitch] = triggerfun(pressvalue, self.thresh, self.dshmidt, trigon_prev, trigoff_prev, turn_state)
if((nswitch==-1).any()):
# turn_state = nswitch + turn_state
finger = np.nonzero(nswitch==-1)[0][0] # assumes one finger released at a TrigNote_midinum.
turn_state[finger] = 0
TrigNote_midinum(self.C2midi[NotesOn[finger]], 0) # make C2midi
print(NotesOn)
print(nswitch)
print('-------')
if((nswitch==1).any()):
# turn_state = nswitch + turn_state
finger = np.nonzero(nswitch==1)[0][0] # assumes only one finger triggered at a time.
turn_state[finger] = 1
noteC = self.aifunction(finger_prev, finger, noteC_prev, flexdeq )
TrigNote_midinum(self.C2midi[noteC], 80)
finger_prev = finger
noteC_prev = noteC
NotesOn[finger] = noteC
print(noteC_prev)
print(NotesOn)
print(nswitch)
print('-------')
def aifunction(self, finger_prev, finger, noteC_prev, flexdeq):
# print(np.asarray(flexdeq).reshape(1, -1))
tupred = self.tu_predictor[finger].predict(np.asarray(flexdeq).reshape(1, -1)) if finger != 4 else [0.0]
tu = int(tupred[0])
feature = ((finger_prev, finger), tu)
noteC_diff = np.random.choice(self.Ndomain, p=self.Tmat[self.Fdomainidx[feature], :]) # choose according to distribution
return noteC_prev + noteC_diff
##############################################################################
##############################################################################
## 2 Hand triggering combined ##
class WeighTrig2h(Thread):
def __init__(self, sensorq, collectq, hand, thresh, dshmidt):
Thread.__init__(self)
self.sensorq = sensorq
self.collectq = collectq
self.hand = hand
self.thresh = thresh
self.dshmidt = dshmidt
self.daemon = True
def run(self):
trigon_prev = False
trigoff_prev = False
turn_state = np.zeros(5, dtype=bool)
while True:
sensvalue = self.sensorq.get(block=True)
[trigon_prev, trigoff_prev, nswitch] = triggerfun(sensvalue, self.thresh, self.dshmidt, trigon_prev, trigoff_prev, turn_state)
#print(self.hand)
if nswitch.any():
print(self.hand)
turn_state = nswitch + turn_state
state = [turn_state, nswitch, self.hand]
self.collectq.put(state)
# Collect and combine glove readings
class CombHandsSendMidi(Thread):
def __init__(self, collectq, basenote, key):
Thread.__init__(self)
self.collectq = collectq
self.basenote = basenote
self.key = key
self.daemon = True
def run(self):
midnote = 'C3'
mode = 'standard'
scale = 'major'
fnum = 3
#sensarr = np.array([0, 0, 0, 0, 0]) # Arbitrary just for test
NotesOn = ['' for i in range(fnum)]
[WArr, NArr] = GenerateNoteMap(midnote, scale, mode)
notearr = WindowMap(np.zeros(5), WArr, NArr)
while True:
state = self.collectq.get(block=True)
### state = [turn_state, nswitch, self.hand]
#[TrigNote(notearr[i], vel) for i in range(state[2]) if ]
## Normal 5 note usage
# if(state[2] == 'R'):
# for i in range(fnum):
# if(state[1][i] == 1):
# TrigNote(notearr[i], 80) # Add function to calculate velocity
# NotesOn[i] = notearr[i]
# elif(state[1][i] == -1):
# TrigNote(NotesOn[i], 0)
# 3 press usage
if(state[2] == 'R'):
for i, j in enumerate(range(1, 4)):
if(state[1][j] == 1):
print(i, j)
TrigNote(notearr[i], 80) # Add function to calculate velocity
NotesOn[i] = notearr[i]
print(notearr)
#print('note on: ', NotesOn)
elif(state[1][j] == -1):
TrigNote(NotesOn[i], 0)
#print('note off: ', NotesOn)
else:
notearr = WindowMap(state[0], WArr, NArr)
print(state[0])
def WindowMap(state, WArr, NArr):
for i, win in enumerate(WArr):
if (win == state).all():
return NArr[i]
return NArr[5]
def GenerateNoteMap(midnote, scale, mode):
if mode == 'standard':
WArr = [ [1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0] ]
NArr = [ ['B0', 'C1', 'D1'],
['E1', 'F1', 'G1'],
['A1', 'B1', 'C2'],
['D2', 'E2', 'F2'],
['G2', 'A2', 'B2'],
['C3', 'D3', 'E3'],
['F3', 'G3', 'A3'],
['B3', 'C4', 'D4'],
['E4', 'F4', 'G4'],
['A4', 'B4', 'C5']]
elif mode == 'test':
a1 = [0, 0, 0, 0, 1]
a2 = [0, 0, 0, 1, 0]
a3 = [0, 1, 0, 0, 0]
a4 = [1, 0, 0, 0, 0]
a5 = [1, 0, 0, 0, 1]
#print(state)
if((state==a1).all()):
notearr = ['C2', 'D2', 'E2', 'F2', 'G2']
elif((state==a2).all()):
notearr = ['A2', 'B2', 'C3', 'D3', 'E3']
elif((state==a3).all()):
notearr = ['F3', 'G3', 'A3', 'B3', 'C4']
elif((state==a4).all()):
notearr = ['D4', 'E4', 'F4', 'G4', 'A4']
elif((state==a5).all()):
notearr = ['B4', 'C5', 'D5', 'E5', 'F5']
else:
notearr = ['F1', 'G1', 'A1', 'B1', 'C1']
NArr = notearr
return WArr, NArr
def MakeNotes(midnote, scale):
name_to_number(midnote)
if scale == 'major':
f = 1
#####
################################################################################
################################################################################
# Play chords with flex (fix)
# Class plays chords by changing pitch and triggering either pressure sensors or flex sensors
class playchords(Thread):
def __init__(self, sensorqueue, sensthresh, imuq, phist, pthresh):
Thread.__init__(self)
self.sensorq = sensorqueue
self.sensthresh = sensthresh
self.imq = imuq
self.phist = phist # 127 == 0 degrees
self.pthresh = pthresh
self.daemon = True
def run(self):
trigsens = [0, 0, 0, 0, 0]
trigonprev = True
trigoffprev = True
while True:
if(len(self.imq) != 0): # REMOVE WHEN WIRE SOLDERED
pitch = self.imq[-1][1]
trigon = (pitch - self.pthresh) < - self.phist # check if state in region with off or on trigger
trigoff = (pitch - self.pthresh) > self.phist
turnon = int(trigon) > int(trigonprev) # goes from 0 to 1 (works without astype too)
turnoff = int(trigoff) > int(trigoffprev) # 0 to 1
trigonprev = trigon
trigoffprev = trigoff
#print(pitch)
#print(self.sensorq[-1])
if(turnon):
print(self.sensorq[-1])
sensarr = np.asarray(self.sensorq[-1])
trigsens = (sensarr - self.sensthresh) > 0
TriggerChordTest(trigsens, pitch, 'on')
if(turnoff):
TriggerChordTest(trigsens, pitch, 'off')
| [
"learning.Learn",
"threading.Thread.__init__",
"numpy.asarray",
"numpy.logical_not",
"numpy.zeros",
"notemidi.TrigNote_midinum",
"numpy.nonzero",
"notemidi.make_C2midi",
"notemidi.TrigNote",
"numpy.random.choice",
"notemidi.signswitch2note",
"notemidi.TriggerChordTest"
] | [((741, 762), 'numpy.asarray', 'np.asarray', (['sensvalue'], {}), '(sensvalue)\n', (751, 762), True, 'import numpy as np\n'), ((1021, 1047), 'numpy.logical_not', 'np.logical_not', (['turn_state'], {}), '(turn_state)\n', (1035, 1047), True, 'import numpy as np\n'), ((1723, 1744), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (1738, 1744), False, 'from threading import Thread\n'), ((1962, 1985), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'bool'}), '(5, dtype=bool)\n', (1970, 1985), True, 'import numpy as np\n'), ((2825, 2846), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (2840, 2846), False, 'from threading import Thread\n'), ((3056, 3069), 'notemidi.make_C2midi', 'make_C2midi', ([], {}), '()\n', (3067, 3069), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n'), ((3110, 3182), 'learning.Learn', 'Learn', ([], {'includefile': "(settingsDir + 'excludeSingleNotes.txt')", 'trainsize': '(0.9)'}), "(includefile=settingsDir + 'excludeSingleNotes.txt', trainsize=0.9)\n", (3115, 3182), False, 'from learning import Learn\n'), ((3749, 3772), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'bool'}), '(5, dtype=bool)\n', (3757, 3772), True, 'import numpy as np\n'), ((5587, 5659), 'numpy.random.choice', 'np.random.choice', (['self.Ndomain'], {'p': 'self.Tmat[self.Fdomainidx[feature], :]'}), '(self.Ndomain, p=self.Tmat[self.Fdomainidx[feature], :])\n', (5603, 5659), True, 'import numpy as np\n'), ((6030, 6051), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (6045, 6051), False, 'from threading import Thread\n'), ((6326, 6349), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'bool'}), '(5, dtype=bool)\n', (6334, 6349), True, 'import numpy as np\n'), ((6937, 6958), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (6952, 6958), False, 'from threading import Thread\n'), ((10703, 10724), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (10718, 10724), False, 'from threading import Thread\n'), ((987, 1014), 'numpy.logical_not', 'np.logical_not', (['trigon_prev'], {}), '(trigon_prev)\n', (1001, 1014), True, 'import numpy as np\n'), ((1074, 1102), 'numpy.logical_not', 'np.logical_not', (['trigoff_prev'], {}), '(trigoff_prev)\n', (1088, 1102), True, 'import numpy as np\n'), ((7399, 7410), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (7407, 7410), True, 'import numpy as np\n'), ((2390, 2439), 'notemidi.signswitch2note', 'signswitch2note', (['nswitch', 'sensvalue', 'self.notearr'], {}), '(nswitch, sensvalue, self.notearr)\n', (2405, 2439), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n'), ((4488, 4537), 'notemidi.TrigNote_midinum', 'TrigNote_midinum', (['self.C2midi[NotesOn[finger]]', '(0)'], {}), '(self.C2midi[NotesOn[finger]], 0)\n', (4504, 4537), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n'), ((4975, 5015), 'notemidi.TrigNote_midinum', 'TrigNote_midinum', (['self.C2midi[noteC]', '(80)'], {}), '(self.C2midi[noteC], 80)\n', (4991, 5015), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n'), ((11786, 11814), 'numpy.asarray', 'np.asarray', (['self.sensorq[-1]'], {}), '(self.sensorq[-1])\n', (11796, 11814), True, 'import numpy as np\n'), ((11898, 11937), 'notemidi.TriggerChordTest', 'TriggerChordTest', (['trigsens', 'pitch', '"""on"""'], {}), "(trigsens, pitch, 'on')\n", (11914, 11937), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n'), ((12004, 12044), 'notemidi.TriggerChordTest', 'TriggerChordTest', (['trigsens', 'pitch', '"""off"""'], {}), "(trigsens, pitch, 'off')\n", (12020, 12044), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n'), ((4350, 4375), 'numpy.nonzero', 'np.nonzero', (['(nswitch == -1)'], {}), '(nswitch == -1)\n', (4360, 4375), True, 'import numpy as np\n'), ((4761, 4785), 'numpy.nonzero', 'np.nonzero', (['(nswitch == 1)'], {}), '(nswitch == 1)\n', (4771, 4785), True, 'import numpy as np\n'), ((5429, 5448), 'numpy.asarray', 'np.asarray', (['flexdeq'], {}), '(flexdeq)\n', (5439, 5448), True, 'import numpy as np\n'), ((8230, 8254), 'notemidi.TrigNote', 'TrigNote', (['notearr[i]', '(80)'], {}), '(notearr[i], 80)\n', (8238, 8254), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n'), ((8501, 8524), 'notemidi.TrigNote', 'TrigNote', (['NotesOn[i]', '(0)'], {}), '(NotesOn[i], 0)\n', (8509, 8524), False, 'from notemidi import TrigNote, TrigNote_midinum, signswitch2note, TriggerChordTest, make_C2midi\n')] |
"""Solvers take a mesh and return elementwise error estimate."""
import numpy as np
from skfem import (
CellBasis,
ElementTriP1,
Functional,
InteriorFacetBasis,
condense,
solve,
)
from skfem.helpers import grad
from skfem.models.poisson import laplace as laplacian
from skfem.models.poisson import unit_load
def laplace(m, **params):
"""Solve the Laplace equation using the FEM.
Parameters
----------
m
A Mesh object.
"""
e = ElementTriP1()
basis = CellBasis(m, e)
A = laplacian.assemble(basis)
b = unit_load.assemble(basis)
u = solve(*condense(A, b, I=m.interior_nodes()))
# evaluate the error estimators
fbasis = [InteriorFacetBasis(m, e, side=i) for i in [0, 1]]
w = {"u" + str(i + 1): fbasis[i].interpolate(u) for i in [0, 1]}
@Functional
def interior_residual(w):
h = w.h
return h ** 2
eta_K = interior_residual.elemental(basis)
@Functional
def edge_jump(w):
h = w.h
n = w.n
dw1 = grad(w["u1"])
dw2 = grad(w["u2"])
return h * ((dw1[0] - dw2[0]) * n[0] + (dw1[1] - dw2[1]) * n[1]) ** 2
eta_E = edge_jump.elemental(fbasis[0], **w)
tmp = np.zeros(m.facets.shape[1])
np.add.at(tmp, fbasis[0].find, eta_E)
eta_E = np.sum(0.5 * tmp[m.t2f], axis=0)
return eta_E + eta_K
| [
"numpy.sum",
"skfem.InteriorFacetBasis",
"numpy.zeros",
"skfem.ElementTriP1",
"skfem.models.poisson.unit_load.assemble",
"skfem.CellBasis",
"skfem.models.poisson.laplace.assemble",
"skfem.helpers.grad",
"numpy.add.at"
] | [((488, 502), 'skfem.ElementTriP1', 'ElementTriP1', ([], {}), '()\n', (500, 502), False, 'from skfem import CellBasis, ElementTriP1, Functional, InteriorFacetBasis, condense, solve\n'), ((515, 530), 'skfem.CellBasis', 'CellBasis', (['m', 'e'], {}), '(m, e)\n', (524, 530), False, 'from skfem import CellBasis, ElementTriP1, Functional, InteriorFacetBasis, condense, solve\n'), ((539, 564), 'skfem.models.poisson.laplace.assemble', 'laplacian.assemble', (['basis'], {}), '(basis)\n', (557, 564), True, 'from skfem.models.poisson import laplace as laplacian\n'), ((573, 598), 'skfem.models.poisson.unit_load.assemble', 'unit_load.assemble', (['basis'], {}), '(basis)\n', (591, 598), False, 'from skfem.models.poisson import unit_load\n'), ((1220, 1247), 'numpy.zeros', 'np.zeros', (['m.facets.shape[1]'], {}), '(m.facets.shape[1])\n', (1228, 1247), True, 'import numpy as np\n'), ((1252, 1289), 'numpy.add.at', 'np.add.at', (['tmp', 'fbasis[0].find', 'eta_E'], {}), '(tmp, fbasis[0].find, eta_E)\n', (1261, 1289), True, 'import numpy as np\n'), ((1302, 1334), 'numpy.sum', 'np.sum', (['(0.5 * tmp[m.t2f])'], {'axis': '(0)'}), '(0.5 * tmp[m.t2f], axis=0)\n', (1308, 1334), True, 'import numpy as np\n'), ((703, 735), 'skfem.InteriorFacetBasis', 'InteriorFacetBasis', (['m', 'e'], {'side': 'i'}), '(m, e, side=i)\n', (721, 735), False, 'from skfem import CellBasis, ElementTriP1, Functional, InteriorFacetBasis, condense, solve\n'), ((1040, 1053), 'skfem.helpers.grad', 'grad', (["w['u1']"], {}), "(w['u1'])\n", (1044, 1053), False, 'from skfem.helpers import grad\n'), ((1068, 1081), 'skfem.helpers.grad', 'grad', (["w['u2']"], {}), "(w['u2'])\n", (1072, 1081), False, 'from skfem.helpers import grad\n')] |
"""
Provides a base test class for other test classes to inherit from.
Includes the numpy testing functions as methods.
"""
import contextlib
import os.path
import sys
import unittest
from inspect import getsourcefile
import numpy as np
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_approx_equal,
assert_array_almost_equal,
assert_array_almost_equal_nulp,
assert_array_equal,
assert_array_less,
assert_array_max_ulp,
assert_equal,
assert_raises,
assert_string_equal,
assert_warns,
)
class BaseTestCase(unittest.TestCase):
"""
Superclass for test cases, including support for numpy.
"""
# The attribute `test_directory` provides the path to the directory
# containing the file `base_test.py`, which is useful to obtain
# test resources - files which are needed to run tests.
test_directory = os.path.dirname(os.path.abspath(getsourcefile(lambda: 0)))
def __init__(self, *args, **kw):
"""Instance initialisation."""
# First to the __init__ associated with parent class
# NB: The new method is like so, but this only works on Python3
# super(self).__init__(*args, **kw)
# So we have to do this for Python2 to be supported
super(BaseTestCase, self).__init__(*args, **kw)
# Add a test to automatically use when comparing objects of
# type numpy ndarray. This will be used for self.assertEqual().
self.addTypeEqualityFunc(np.ndarray, self.assert_allclose)
@contextlib.contextmanager
def subTest(self, *args, **kwargs):
# For backwards compatability with Python < 3.4
# Gracefully degrades into no-op.
if hasattr(super(BaseTestCase, self), "subTest"):
yield super(BaseTestCase, self).subTest(*args, **kwargs)
else:
yield None
# Add assertions provided by numpy to this class, so they will be
# available as methods to all subclasses when we do our tests.
def assert_almost_equal(self, *args, **kwargs):
"""
Check if two items are not equal up to desired precision.
"""
return assert_almost_equal(*args, **kwargs)
def assert_approx_equal(self, *args, **kwargs):
"""
Check if two items are not equal up to significant digits.
"""
return assert_approx_equal(*args, **kwargs)
def assert_array_almost_equal(self, *args, **kwargs):
"""
Check if two objects are not equal up to desired precision.
"""
return assert_array_almost_equal(*args, **kwargs)
def assert_allclose(self, *args, **kwargs):
"""
Check if two objects are equal up to desired tolerance.
"""
return assert_allclose(*args, **kwargs)
def assert_array_almost_equal_nulp(self, *args, **kwargs):
"""
Compare two arrays relatively to their spacing.
"""
return assert_array_almost_equal_nulp(*args, **kwargs)
def assert_array_max_ulp(self, *args, **kwargs):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
"""
return assert_array_max_ulp(*args, **kwargs)
def assert_array_equal(self, *args, **kwargs):
"""
Check if two array_like objects are equal.
"""
return assert_array_equal(*args, **kwargs)
def assert_array_less(self, *args, **kwargs):
"""
Check if two array_like objects are not ordered by less than.
"""
return assert_array_less(*args, **kwargs)
def assert_equal(self, *args, **kwargs):
"""
Check if two objects are not equal.
"""
return assert_equal(*args, **kwargs)
def assert_raises(self, *args, **kwargs):
"""
Check that an exception of class exception_class is thrown by callable.
"""
return assert_raises(*args, **kwargs)
def assert_warns(self, *args, **kwargs):
"""
Check that the given callable throws the specified warning.
"""
return assert_warns(*args, **kwargs)
def assert_string_equal(self, *args, **kwargs):
"""
Test if two strings are equal.
"""
return assert_string_equal(*args, **kwargs)
| [
"numpy.testing.assert_raises",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_approx_equal",
"numpy.testing.assert_string_equal",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"numpy.test... | [((2165, 2201), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['*args'], {}), '(*args, **kwargs)\n', (2184, 2201), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((2361, 2397), 'numpy.testing.assert_approx_equal', 'assert_approx_equal', (['*args'], {}), '(*args, **kwargs)\n', (2380, 2397), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((2564, 2606), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['*args'], {}), '(*args, **kwargs)\n', (2589, 2606), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((2759, 2791), 'numpy.testing.assert_allclose', 'assert_allclose', (['*args'], {}), '(*args, **kwargs)\n', (2774, 2791), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((2951, 2998), 'numpy.testing.assert_array_almost_equal_nulp', 'assert_array_almost_equal_nulp', (['*args'], {}), '(*args, **kwargs)\n', (2981, 2998), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((3176, 3213), 'numpy.testing.assert_array_max_ulp', 'assert_array_max_ulp', (['*args'], {}), '(*args, **kwargs)\n', (3196, 3213), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((3356, 3391), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['*args'], {}), '(*args, **kwargs)\n', (3374, 3391), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((3552, 3586), 'numpy.testing.assert_array_less', 'assert_array_less', (['*args'], {}), '(*args, **kwargs)\n', (3569, 3586), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((3716, 3745), 'numpy.testing.assert_equal', 'assert_equal', (['*args'], {}), '(*args, **kwargs)\n', (3728, 3745), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((3912, 3942), 'numpy.testing.assert_raises', 'assert_raises', (['*args'], {}), '(*args, **kwargs)\n', (3925, 3942), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((4096, 4125), 'numpy.testing.assert_warns', 'assert_warns', (['*args'], {}), '(*args, **kwargs)\n', (4108, 4125), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((4257, 4293), 'numpy.testing.assert_string_equal', 'assert_string_equal', (['*args'], {}), '(*args, **kwargs)\n', (4276, 4293), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal, assert_raises, assert_string_equal, assert_warns\n'), ((932, 957), 'inspect.getsourcefile', 'getsourcefile', (['(lambda : 0)'], {}), '(lambda : 0)\n', (945, 957), False, 'from inspect import getsourcefile\n')] |
import h5py, time
import numpy as np
from utility import randomData
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import TimeDistributed
def buildModel(trainXShape, trainYShape, numLayers, neuonPerLayer):
'''
This function trains a RNN with given parameters
'''
model=Sequential()
model.add(LSTM(neuonPerLayer[0], input_dim=trainXShape[2], input_length=trainXShape[1], return_sequences=True))
for i in range(1,numLayers+1):
model.add(LSTM(neuonPerLayer[i-1],return_sequences=True))
model.add(TimeDistributed(Dense(trainYShape[2])))
model.add(Activation('linear'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
def varyingNeurons(low, high, f, V=1):
numSample=1
numLayers=1
trainX, trainY = randomData(f, numSample)
resultY=np.array([])
resultX=np.array([])
for numNeuron in range(low, high):
model=buildModel(trainX.shape, trainY.shape, numLayers, [numNeuron])
start=time.time()
model.fit(trainX, trainY, nb_epoch=1, batch_size=1, verbose=V)
resultY=np.append(resultY,time.time()-start)
resultX=np.append(resultX,numNeuron)
finalResult=np.concatenate((resultX[:,np.newaxis],resultY[:,np.newaxis]),axis=1)
np.save('./resultData/timeVsNumneuron',finalResult)
def varyingLayers(low, high, f, V=1):
numSample=1
trainX, trainY = randomData(f, numSample)
resultY=np.array([])
resultX=np.array([])
for numLayer in range(low, high):
numNeuron=[15]*numLayer
model=buildModel(trainX.shape, trainY.shape, numLayer, numNeuron)
start=time.time()
model.fit(trainX, trainY, nb_epoch=1, batch_size=1, verbose=V)
resultY=np.append(resultY,time.time()-start)
resultX=np.append(resultX,numLayer)
finalResult=np.concatenate((resultX[:,np.newaxis],resultY[:,np.newaxis]),axis=1)
np.save('./resultData/timeVsNumLayer',finalResult)
def varyingSamples(low, high, f, V=1):
numLayer=1
numNeuron=[15]
resultY=np.array([])
resultX=np.array([])
for numSample in range(low, high):
trainX, trainY = randomData(f, numSample)
model=buildModel(trainX.shape, trainY.shape, numLayer, numNeuron)
start=time.time()
model.fit(trainX, trainY, nb_epoch=1, batch_size=1, verbose=V)
resultY=np.append(resultY,time.time()-start)
resultX=np.append(resultX,numSample)
finalResult=np.concatenate((resultX[:,np.newaxis],resultY[:,np.newaxis]),axis=1)
np.save('./resultData/timeVsNumSample',finalResult)
if __name__=="__main__":
f=h5py.File('processedData.hdf5','r')
varyingNeurons(10,51,f)
varyingLayers(1,11,f)
varyingSamples(1,101,f)
| [
"h5py.File",
"numpy.save",
"keras.layers.core.Dense",
"keras.layers.core.Activation",
"time.time",
"numpy.append",
"numpy.array",
"utility.randomData",
"keras.layers.recurrent.LSTM",
"keras.models.Sequential",
"numpy.concatenate"
] | [((399, 411), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (409, 411), False, 'from keras.models import Sequential\n'), ((895, 919), 'utility.randomData', 'randomData', (['f', 'numSample'], {}), '(f, numSample)\n', (905, 919), False, 'from utility import randomData\n'), ((932, 944), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (940, 944), True, 'import numpy as np\n'), ((957, 969), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (965, 969), True, 'import numpy as np\n'), ((1300, 1372), 'numpy.concatenate', 'np.concatenate', (['(resultX[:, np.newaxis], resultY[:, np.newaxis])'], {'axis': '(1)'}), '((resultX[:, np.newaxis], resultY[:, np.newaxis]), axis=1)\n', (1314, 1372), True, 'import numpy as np\n'), ((1373, 1425), 'numpy.save', 'np.save', (['"""./resultData/timeVsNumneuron"""', 'finalResult'], {}), "('./resultData/timeVsNumneuron', finalResult)\n", (1380, 1425), True, 'import numpy as np\n'), ((1502, 1526), 'utility.randomData', 'randomData', (['f', 'numSample'], {}), '(f, numSample)\n', (1512, 1526), False, 'from utility import randomData\n'), ((1539, 1551), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1547, 1551), True, 'import numpy as np\n'), ((1564, 1576), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1572, 1576), True, 'import numpy as np\n'), ((1935, 2007), 'numpy.concatenate', 'np.concatenate', (['(resultX[:, np.newaxis], resultY[:, np.newaxis])'], {'axis': '(1)'}), '((resultX[:, np.newaxis], resultY[:, np.newaxis]), axis=1)\n', (1949, 2007), True, 'import numpy as np\n'), ((2008, 2059), 'numpy.save', 'np.save', (['"""./resultData/timeVsNumLayer"""', 'finalResult'], {}), "('./resultData/timeVsNumLayer', finalResult)\n", (2015, 2059), True, 'import numpy as np\n'), ((2146, 2158), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2154, 2158), True, 'import numpy as np\n'), ((2171, 2183), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2179, 2183), True, 'import numpy as np\n'), ((2561, 2633), 'numpy.concatenate', 'np.concatenate', (['(resultX[:, np.newaxis], resultY[:, np.newaxis])'], {'axis': '(1)'}), '((resultX[:, np.newaxis], resultY[:, np.newaxis]), axis=1)\n', (2575, 2633), True, 'import numpy as np\n'), ((2634, 2686), 'numpy.save', 'np.save', (['"""./resultData/timeVsNumSample"""', 'finalResult'], {}), "('./resultData/timeVsNumSample', finalResult)\n", (2641, 2686), True, 'import numpy as np\n'), ((2721, 2757), 'h5py.File', 'h5py.File', (['"""processedData.hdf5"""', '"""r"""'], {}), "('processedData.hdf5', 'r')\n", (2730, 2757), False, 'import h5py, time\n'), ((426, 531), 'keras.layers.recurrent.LSTM', 'LSTM', (['neuonPerLayer[0]'], {'input_dim': 'trainXShape[2]', 'input_length': 'trainXShape[1]', 'return_sequences': '(True)'}), '(neuonPerLayer[0], input_dim=trainXShape[2], input_length=trainXShape[1\n ], return_sequences=True)\n', (430, 531), False, 'from keras.layers.recurrent import LSTM\n'), ((698, 718), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (708, 718), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1102, 1113), 'time.time', 'time.time', ([], {}), '()\n', (1111, 1113), False, 'import h5py, time\n'), ((1254, 1283), 'numpy.append', 'np.append', (['resultX', 'numNeuron'], {}), '(resultX, numNeuron)\n', (1263, 1283), True, 'import numpy as np\n'), ((1738, 1749), 'time.time', 'time.time', ([], {}), '()\n', (1747, 1749), False, 'import h5py, time\n'), ((1890, 1918), 'numpy.append', 'np.append', (['resultX', 'numLayer'], {}), '(resultX, numLayer)\n', (1899, 1918), True, 'import numpy as np\n'), ((2249, 2273), 'utility.randomData', 'randomData', (['f', 'numSample'], {}), '(f, numSample)\n', (2259, 2273), False, 'from utility import randomData\n'), ((2363, 2374), 'time.time', 'time.time', ([], {}), '()\n', (2372, 2374), False, 'import h5py, time\n'), ((2515, 2544), 'numpy.append', 'np.append', (['resultX', 'numSample'], {}), '(resultX, numSample)\n', (2524, 2544), True, 'import numpy as np\n'), ((581, 630), 'keras.layers.recurrent.LSTM', 'LSTM', (['neuonPerLayer[i - 1]'], {'return_sequences': '(True)'}), '(neuonPerLayer[i - 1], return_sequences=True)\n', (585, 630), False, 'from keras.layers.recurrent import LSTM\n'), ((660, 681), 'keras.layers.core.Dense', 'Dense', (['trainYShape[2]'], {}), '(trainYShape[2])\n', (665, 681), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1219, 1230), 'time.time', 'time.time', ([], {}), '()\n', (1228, 1230), False, 'import h5py, time\n'), ((1855, 1866), 'time.time', 'time.time', ([], {}), '()\n', (1864, 1866), False, 'import h5py, time\n'), ((2480, 2491), 'time.time', 'time.time', ([], {}), '()\n', (2489, 2491), False, 'import h5py, time\n')] |
"""
2.8 numpy copy & deep copy
"""
import numpy as np
a = np.arange(4)
print('array a:\n',a)
b = a
c = a
d = b
# 更改a[0]
a[0] = 11
print('更改a[0]的值为11\n',a[0])
print('b is a?\n', b is a)
print('b的值也会改变\n',b)
print('c is a?\n', c is a)
print('d is a?\n', d is a)
# 如果不想关联这些变量,则需要使用深拷贝(deep copy)
b = a.copy()
print('更改b为a的深拷贝,b is a?\n',b is a)
| [
"numpy.arange"
] | [((62, 74), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (71, 74), True, 'import numpy as np\n')] |
import functools
import numpy as np
import pytest
import tensorflow as tf
from mock import mock
from tests.layers.convolutional.helper import (input_maybe_to_channels_last,
strides_tuple_to_channels_last,
output_maybe_to_channels_first)
from tfsnippet.layers import shifted_conv2d, conv2d
tf_conv2d = tf.nn.conv2d
def patched_conv2d(input, filter, strides, padding, data_format,
dilations):
"""A patched version of `tf.nn.conv2d`, emulates NCHW by NHWC."""
input = input_maybe_to_channels_last(input, data_format=data_format)
[strides, dilations] = strides_tuple_to_channels_last(
[strides, dilations], data_format=data_format)
output = tf_conv2d(
input=input, filter=filter, strides=strides, padding=padding,
data_format='NHWC', dilations=dilations
)
output = output_maybe_to_channels_first(output, data_format=data_format)
return output
class ShiftedConv2dTestCase(tf.test.TestCase):
def test_shifted_conv2d(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-6)
x = np.random.normal(size=[3, 11, 13, 7]).astype(np.float32)
def my_conv2d(*args, **kwargs):
return conv2d(*args, **kwargs)
# test the name scope derived by shifted_conv2d
with tf.Graph().as_default():
y = shifted_conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
spatial_shift=(1, -1), conv_fn=my_conv2d
)
self.assertTrue(y.name.startswith('shifted_conv2d/my_conv2d/'))
# test errors
with pytest.raises(TypeError,
match='`spatial_shift` must be a tuple with two '
'elements, and the elements can only be '
'-1, 0 or 1'):
_ = shifted_conv2d(input=x, out_channels=5, kernel_size=(2, 3),
spatial_shift=(-2, 1))
with pytest.raises(TypeError,
match='`spatial_shift` must be a tuple with two '
'elements, and the elements can only be '
'-1, 0 or 1'):
_ = shifted_conv2d(input=x, out_channels=5, kernel_size=(2, 3),
spatial_shift=(-1,))
with pytest.raises(ValueError,
match='`padding` argument is not supported'):
_ = shifted_conv2d(input=x, out_channels=5, kernel_size=(2, 3),
spatial_shift=(-1, 1), padding='SAME')
with self.test_session() as sess:
####################################################################
# spatial_shift == (0, 0) should correspond to conv2d SAME padding #
####################################################################
# kernel_size (1, 1)
kernel = np.random.normal(size=(1, 1, 7, 5)).astype(np.float32)
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
spatial_shift=(0, 0), kernel=kernel, use_bias=False
),
conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
kernel=kernel, use_bias=False
)
])
)
# kernel_size (2, 3)
kernel = np.random.normal(size=(2, 3, 7, 5)).astype(np.float32)
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(2, 3),
spatial_shift=(0, 0), kernel=kernel, use_bias=False
),
conv2d(
input=x, out_channels=5, kernel_size=(2, 3),
kernel=kernel, use_bias=False
)
])
)
############################
# spatial_shift == (-1, 0) #
############################
# kernel_size (1, 1), no shift actually
kernel = np.random.normal(size=(1, 1, 7, 5)).astype(np.float32)
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
spatial_shift=(-1, 0), kernel=kernel, use_bias=False
),
conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
kernel=kernel, use_bias=False, padding='VALID'
)
])
)
# kernel_size (2, 3), shift accordingly
kernel = np.random.normal(size=(2, 3, 7, 5)).astype(np.float32)
x2 = np.zeros([3, 12, 15, 7], dtype=np.float32)
x2[:, :-1, 1:-1, :] = x
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(2, 3),
spatial_shift=(-1, 0), kernel=kernel, use_bias=False
),
conv2d(
input=x2, out_channels=5, kernel_size=(2, 3),
kernel=kernel, use_bias=False, padding='VALID'
)
])
)
############################
# spatial_shift == (0, 1) #
############################
# kernel_size (1, 1), no shift actually
kernel = np.random.normal(size=(1, 1, 7, 5)).astype(np.float32)
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
spatial_shift=(0, 1), kernel=kernel, use_bias=False
),
conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
kernel=kernel, use_bias=False, padding='VALID'
)
])
)
# kernel_size (2, 3), shift accordingly
kernel = np.random.normal(size=(2, 3, 7, 5)).astype(np.float32)
x2 = np.zeros([3, 12, 15, 7], dtype=np.float32)
x2[:, :-1, 2:, :] = x
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(2, 3),
spatial_shift=(0, 1), kernel=kernel, use_bias=False
),
conv2d(
input=x2, out_channels=5, kernel_size=(2, 3),
kernel=kernel, use_bias=False, padding='VALID'
)
])
)
##################################
# spatial_shift == (-1, 1), NCHW #
##################################
x = np.transpose(x, [0, 3, 1, 2])
with mock.patch('tensorflow.nn.conv2d', patched_conv2d):
# kernel_size (1, 1), no shift actually
kernel = np.random.normal(size=(1, 1, 7, 5)).astype(np.float32)
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
spatial_shift=(-1, 1), kernel=kernel,
use_bias=False, channels_last=False
),
conv2d(
input=x, out_channels=5, kernel_size=(1, 1),
kernel=kernel, use_bias=False,
padding='VALID', channels_last=False
)
])
)
# kernel_size (2, 3), shift accordingly
kernel = np.random.normal(size=(2, 3, 7, 5)).astype(np.float32)
x2 = np.zeros([3, 7, 12, 15], dtype=np.float32)
x2[:, :, :-1, 2:] = x
assert_allclose(
*sess.run([
shifted_conv2d(
input=x, out_channels=5, kernel_size=(2, 3),
spatial_shift=(-1, 1), kernel=kernel,
use_bias=False, channels_last=False
),
conv2d(
input=x2, out_channels=5, kernel_size=(2, 3),
kernel=kernel, use_bias=False,
padding='VALID', channels_last=False
)
])
)
| [
"functools.partial",
"mock.mock.patch",
"tests.layers.convolutional.helper.strides_tuple_to_channels_last",
"numpy.zeros",
"numpy.transpose",
"pytest.raises",
"numpy.random.normal",
"tfsnippet.layers.shifted_conv2d",
"tfsnippet.layers.conv2d",
"tensorflow.Graph",
"tests.layers.convolutional.help... | [((591, 651), 'tests.layers.convolutional.helper.input_maybe_to_channels_last', 'input_maybe_to_channels_last', (['input'], {'data_format': 'data_format'}), '(input, data_format=data_format)\n', (619, 651), False, 'from tests.layers.convolutional.helper import input_maybe_to_channels_last, strides_tuple_to_channels_last, output_maybe_to_channels_first\n'), ((679, 756), 'tests.layers.convolutional.helper.strides_tuple_to_channels_last', 'strides_tuple_to_channels_last', (['[strides, dilations]'], {'data_format': 'data_format'}), '([strides, dilations], data_format=data_format)\n', (709, 756), False, 'from tests.layers.convolutional.helper import input_maybe_to_channels_last, strides_tuple_to_channels_last, output_maybe_to_channels_first\n'), ((927, 990), 'tests.layers.convolutional.helper.output_maybe_to_channels_first', 'output_maybe_to_channels_first', (['output'], {'data_format': 'data_format'}), '(output, data_format=data_format)\n', (957, 990), False, 'from tests.layers.convolutional.helper import input_maybe_to_channels_last, strides_tuple_to_channels_last, output_maybe_to_channels_first\n'), ((1120, 1189), 'functools.partial', 'functools.partial', (['np.testing.assert_allclose'], {'rtol': '(1e-05)', 'atol': '(1e-06)'}), '(np.testing.assert_allclose, rtol=1e-05, atol=1e-06)\n', (1137, 1189), False, 'import functools\n'), ((1330, 1353), 'tfsnippet.layers.conv2d', 'conv2d', (['*args'], {}), '(*args, **kwargs)\n', (1336, 1353), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((1465, 1571), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'spatial_shift': '(1, -1)', 'conv_fn': 'my_conv2d'}), '(input=x, out_channels=5, kernel_size=(1, 1), spatial_shift=(\n 1, -1), conv_fn=my_conv2d)\n', (1479, 1571), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((1725, 1859), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""`spatial_shift` must be a tuple with two elements, and the elements can only be -1, 0 or 1"""'}), "(TypeError, match=\n '`spatial_shift` must be a tuple with two elements, and the elements can only be -1, 0 or 1'\n )\n", (1738, 1859), False, 'import pytest\n'), ((1966, 2053), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'spatial_shift': '(-2, 1)'}), '(input=x, out_channels=5, kernel_size=(2, 3), spatial_shift=(\n -2, 1))\n', (1980, 2053), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((2093, 2227), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""`spatial_shift` must be a tuple with two elements, and the elements can only be -1, 0 or 1"""'}), "(TypeError, match=\n '`spatial_shift` must be a tuple with two elements, and the elements can only be -1, 0 or 1'\n )\n", (2106, 2227), False, 'import pytest\n'), ((2334, 2419), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'spatial_shift': '(-1,)'}), '(input=x, out_channels=5, kernel_size=(2, 3), spatial_shift=(-1,)\n )\n', (2348, 2419), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((2459, 2529), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""`padding` argument is not supported"""'}), "(ValueError, match='`padding` argument is not supported')\n", (2472, 2529), False, 'import pytest\n'), ((2574, 2677), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'spatial_shift': '(-1, 1)', 'padding': '"""SAME"""'}), "(input=x, out_channels=5, kernel_size=(2, 3), spatial_shift=(\n -1, 1), padding='SAME')\n", (2588, 2677), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((5028, 5070), 'numpy.zeros', 'np.zeros', (['[3, 12, 15, 7]'], {'dtype': 'np.float32'}), '([3, 12, 15, 7], dtype=np.float32)\n', (5036, 5070), True, 'import numpy as np\n'), ((6475, 6517), 'numpy.zeros', 'np.zeros', (['[3, 12, 15, 7]'], {'dtype': 'np.float32'}), '([3, 12, 15, 7], dtype=np.float32)\n', (6483, 6517), True, 'import numpy as np\n'), ((7195, 7224), 'numpy.transpose', 'np.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (7207, 7224), True, 'import numpy as np\n'), ((1213, 1250), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[3, 11, 13, 7]'}), '(size=[3, 11, 13, 7])\n', (1229, 1250), True, 'import numpy as np\n'), ((7243, 7293), 'mock.mock.patch', 'mock.patch', (['"""tensorflow.nn.conv2d"""', 'patched_conv2d'], {}), "('tensorflow.nn.conv2d', patched_conv2d)\n", (7253, 7293), False, 'from mock import mock\n'), ((8220, 8262), 'numpy.zeros', 'np.zeros', (['[3, 7, 12, 15]'], {'dtype': 'np.float32'}), '([3, 7, 12, 15], dtype=np.float32)\n', (8228, 8262), True, 'import numpy as np\n'), ((1424, 1434), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1432, 1434), True, 'import tensorflow as tf\n'), ((3045, 3080), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 1, 7, 5)'}), '(size=(1, 1, 7, 5))\n', (3061, 3080), True, 'import numpy as np\n'), ((3622, 3657), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(2, 3, 7, 5)'}), '(size=(2, 3, 7, 5))\n', (3638, 3657), True, 'import numpy as np\n'), ((4342, 4377), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 1, 7, 5)'}), '(size=(1, 1, 7, 5))\n', (4358, 4377), True, 'import numpy as np\n'), ((4956, 4991), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(2, 3, 7, 5)'}), '(size=(2, 3, 7, 5))\n', (4972, 4991), True, 'import numpy as np\n'), ((5790, 5825), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 1, 7, 5)'}), '(size=(1, 1, 7, 5))\n', (5806, 5825), True, 'import numpy as np\n'), ((6403, 6438), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(2, 3, 7, 5)'}), '(size=(2, 3, 7, 5))\n', (6419, 6438), True, 'import numpy as np\n'), ((7376, 7411), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 1, 7, 5)'}), '(size=(1, 1, 7, 5))\n', (7392, 7411), True, 'import numpy as np\n'), ((8144, 8179), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(2, 3, 7, 5)'}), '(size=(2, 3, 7, 5))\n', (8160, 8179), True, 'import numpy as np\n'), ((3177, 3294), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'spatial_shift': '(0, 0)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(1, 1), spatial_shift=(\n 0, 0), kernel=kernel, use_bias=False)\n', (3191, 3294), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((3381, 3468), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(1, 1), kernel=kernel, use_bias\n =False)\n', (3387, 3468), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((3754, 3871), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'spatial_shift': '(0, 0)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(2, 3), spatial_shift=(\n 0, 0), kernel=kernel, use_bias=False)\n', (3768, 3871), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((3958, 4045), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(2, 3), kernel=kernel, use_bias\n =False)\n', (3964, 4045), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((4474, 4592), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'spatial_shift': '(-1, 0)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(1, 1), spatial_shift=(\n -1, 0), kernel=kernel, use_bias=False)\n', (4488, 4592), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((4679, 4783), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'kernel': 'kernel', 'use_bias': '(False)', 'padding': '"""VALID"""'}), "(input=x, out_channels=5, kernel_size=(1, 1), kernel=kernel, use_bias\n =False, padding='VALID')\n", (4685, 4783), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((5184, 5302), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'spatial_shift': '(-1, 0)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(2, 3), spatial_shift=(\n -1, 0), kernel=kernel, use_bias=False)\n', (5198, 5302), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((5389, 5493), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x2', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'kernel': 'kernel', 'use_bias': '(False)', 'padding': '"""VALID"""'}), "(input=x2, out_channels=5, kernel_size=(2, 3), kernel=kernel,\n use_bias=False, padding='VALID')\n", (5395, 5493), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((5922, 6039), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'spatial_shift': '(0, 1)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(1, 1), spatial_shift=(\n 0, 1), kernel=kernel, use_bias=False)\n', (5936, 6039), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((6126, 6230), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'kernel': 'kernel', 'use_bias': '(False)', 'padding': '"""VALID"""'}), "(input=x, out_channels=5, kernel_size=(1, 1), kernel=kernel, use_bias\n =False, padding='VALID')\n", (6132, 6230), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((6629, 6746), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'spatial_shift': '(0, 1)', 'kernel': 'kernel', 'use_bias': '(False)'}), '(input=x, out_channels=5, kernel_size=(2, 3), spatial_shift=(\n 0, 1), kernel=kernel, use_bias=False)\n', (6643, 6746), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((6833, 6937), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x2', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'kernel': 'kernel', 'use_bias': '(False)', 'padding': '"""VALID"""'}), "(input=x2, out_channels=5, kernel_size=(2, 3), kernel=kernel,\n use_bias=False, padding='VALID')\n", (6839, 6937), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((7520, 7659), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'spatial_shift': '(-1, 1)', 'kernel': 'kernel', 'use_bias': '(False)', 'channels_last': '(False)'}), '(input=x, out_channels=5, kernel_size=(1, 1), spatial_shift=(\n -1, 1), kernel=kernel, use_bias=False, channels_last=False)\n', (7534, 7659), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((7790, 7915), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(1, 1)', 'kernel': 'kernel', 'use_bias': '(False)', 'padding': '"""VALID"""', 'channels_last': '(False)'}), "(input=x, out_channels=5, kernel_size=(1, 1), kernel=kernel, use_bias\n =False, padding='VALID', channels_last=False)\n", (7796, 7915), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((8390, 8529), 'tfsnippet.layers.shifted_conv2d', 'shifted_conv2d', ([], {'input': 'x', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'spatial_shift': '(-1, 1)', 'kernel': 'kernel', 'use_bias': '(False)', 'channels_last': '(False)'}), '(input=x, out_channels=5, kernel_size=(2, 3), spatial_shift=(\n -1, 1), kernel=kernel, use_bias=False, channels_last=False)\n', (8404, 8529), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n'), ((8660, 8785), 'tfsnippet.layers.conv2d', 'conv2d', ([], {'input': 'x2', 'out_channels': '(5)', 'kernel_size': '(2, 3)', 'kernel': 'kernel', 'use_bias': '(False)', 'padding': '"""VALID"""', 'channels_last': '(False)'}), "(input=x2, out_channels=5, kernel_size=(2, 3), kernel=kernel,\n use_bias=False, padding='VALID', channels_last=False)\n", (8666, 8785), False, 'from tfsnippet.layers import shifted_conv2d, conv2d\n')] |
import math
import torch
import numpy as np
from sklearn.decomposition import PCA
import logging
logger = logging.getLogger('nmtpytorch')
def center_emb(x):
logger.info(f'Apply centering')
zero_rows = np.all(x == 0, axis=1)
# transform embedding to make the average to zero vector
# excluding zero vectors
x -= x[~zero_rows].mean(axis=0)
# leave zero vector as it was
x[zero_rows] = 0
return x
def norm_emb(x):
x_norm = np.linalg.norm(x, axis=1, keepdims=True)
x_norm[x_norm==0] = 1.
return x / x_norm
def apply_abtt(embs, centering=True, n_pca=3, applys_norm=True):
pad_emb = embs[1]
word_embs = embs[1:]
# Centering
word_embs = center_emb(word_embs) if centering else word_embs
# Substract PCA components
if n_pca > 0:
logger.info(f'Substracting PCA components (n={n_pca})')
pca = PCA(n_components=n_pca)
pca.fit(word_embs)
# use tensor to calculate vectors for each PCA components
embs_pca = pca.transform(word_embs).reshape(-1, n_pca, 1) * \
pca.components_.reshape(1, n_pca, -1)
# and accumulate them to get final PCA values to substract
embs_pca = embs_pca.sum(axis=-2)
word_embs -= embs_pca
word_embs = norm_emb(word_embs) if applys_norm else word_embs
return np.append([pad_emb], word_embs, axis=0)
def apply_lc(embs, k=10, applys_norm=True):
assert k > 0, f'`k_nn` not found or should be a positive value'
logger.info(f'Apply localized centering with {k}-NN')
special_embs = torch.torch.from_numpy(embs[:1])
# use GPU to calculate matrix production at scale
word_embs = torch.torch.from_numpy(embs[1:])
n_vocab, dim = word_embs.shape
# use cosine similarity to select k-NN
word_emb_norms = word_embs.norm(dim=-1, keepdim=True)
word_sims = word_embs @ word_embs.t() / word_emb_norms / word_emb_norms.t()
# set self similarity to 0 to avoid argsort from selecting itself
word_sims[range(n_vocab), range(n_vocab)] = 0
top_k = word_sims.argsort(descending=True)[:, :k]
# calculate localized centroid
word_embs = word_embs - word_embs[top_k].mean(dim=-2)
embs = torch.cat([special_embs, word_embs]).numpy()
embs = norm_emb(embs) if applys_norm else embs
return embs
| [
"torch.cat",
"logging.getLogger",
"numpy.append",
"numpy.linalg.norm",
"sklearn.decomposition.PCA",
"torch.torch.from_numpy",
"numpy.all"
] | [((107, 138), 'logging.getLogger', 'logging.getLogger', (['"""nmtpytorch"""'], {}), "('nmtpytorch')\n", (124, 138), False, 'import logging\n'), ((211, 233), 'numpy.all', 'np.all', (['(x == 0)'], {'axis': '(1)'}), '(x == 0, axis=1)\n', (217, 233), True, 'import numpy as np\n'), ((459, 499), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (473, 499), True, 'import numpy as np\n'), ((1332, 1371), 'numpy.append', 'np.append', (['[pad_emb]', 'word_embs'], {'axis': '(0)'}), '([pad_emb], word_embs, axis=0)\n', (1341, 1371), True, 'import numpy as np\n'), ((1563, 1595), 'torch.torch.from_numpy', 'torch.torch.from_numpy', (['embs[:1]'], {}), '(embs[:1])\n', (1585, 1595), False, 'import torch\n'), ((1667, 1699), 'torch.torch.from_numpy', 'torch.torch.from_numpy', (['embs[1:]'], {}), '(embs[1:])\n', (1689, 1699), False, 'import torch\n'), ((873, 896), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_pca'}), '(n_components=n_pca)\n', (876, 896), False, 'from sklearn.decomposition import PCA\n'), ((2198, 2234), 'torch.cat', 'torch.cat', (['[special_embs, word_embs]'], {}), '([special_embs, word_embs])\n', (2207, 2234), False, 'import torch\n')] |
import numpy as np
import rospy
import cv2
from ..assignment.linear_assignment import LinearAssignment
from .monitor import Monitor
from ...utils.bbox_metrics import overlap
from ...utils.allocentric_spatial_relations import is_on_top, is_in, is_close
from ...utils.egocentric_spatial_relations import is_right_of, is_left_of
from scipy.spatial.distance import euclidean
INF = 10e3
class ActionStates(object):
""" The object states
"""
PLACED = 0
HELD = 1
RELEASED = 2
def centroid_cost(track_a, track_b):
"""Returns the centroid cost"""
try:
return euclidean(track_a.pose.position().to_array(), track_b.pose.position().to_array())
except Exception:
return INF
class PhysicsMonitor(Monitor):
""" Special monitor for tabletop scenario
"""
def __init__(self, internal_simulator=None, position_tolerance=0.04):
""" Tabletop monitor constructor
"""
super(PhysicsMonitor, self).__init__(internal_simulator=internal_simulator)
self.previous_object_states = {}
self.previous_object_tracks_map = {}
self.position_tolerance = position_tolerance
self.content_map = {}
self.centroid_assignement = LinearAssignment(centroid_cost, max_distance=None)
def monitor(self, object_tracks, person_tracks, time):
""" Monitor the physical consistency of the objects and detect human tabletop actions
"""
self.cleanup_relations()
next_object_states = {}
object_tracks_map = {}
corrected_object_tracks = []
for object in object_tracks:
if object.is_located() and object.has_shape():
if not self.simulator.is_entity_loaded(object.id):
self.simulator.load_node(object)
self.simulator.reset_entity_pose(object.id, object.pose)
# perform prediction
self.generate_prediction()
for object in object_tracks:
if object.is_located() and object.has_shape():
if object.is_confirmed():
simulated_object = self.simulator.get_entity(object.id)
object_tracks_map[object.id] = simulated_object
# compute scene node input
simulated_position = simulated_object.pose.position()
perceived_position = object.pose.position()
distance = euclidean(simulated_position.to_array(), perceived_position.to_array())
#print distance
is_physically_plausible = distance < self.position_tolerance
# print distance
# if distance == 0.0:
# rospy.logerr("Distance threshold value is zero: deadlock detected")
# if object.id not in self.previous_object_states:
# rospy.logerr("not in previous_object_states")
# compute next state
if is_physically_plausible:
next_object_states[object.id] = ActionStates.PLACED
else:
next_object_states[object.id] = ActionStates.HELD
self.simulator.reset_entity_pose(object.id, object.pose)
if object.id not in self.previous_object_states:
self.previous_object_states[object.id] = ActionStates.RELEASED
if next_object_states[object.id] == ActionStates.HELD:
self.assign_and_trigger_action(object, "pick", person_tracks, time)
# print "pick"
if next_object_states[object.id] == ActionStates.PLACED:
self.assign_and_trigger_action(object, "place", person_tracks, time)
# print "place"
for object_id in self.previous_object_states.keys():
if object_id in self.previous_object_tracks_map:
object = self.previous_object_tracks_map[object_id]
if object_id in next_object_states:
if self.previous_object_states[object_id] == ActionStates.HELD and \
next_object_states[object_id] == ActionStates.PLACED:
# print "place"
self.assign_and_trigger_action(object, "place", person_tracks, time)
if self.previous_object_states[object_id] == ActionStates.RELEASED and \
next_object_states[object_id] == ActionStates.PLACED:
self.assign_and_trigger_action(object, "place", person_tracks, time)
# print "place"
if self.previous_object_states[object_id] == ActionStates.PLACED and \
next_object_states[object_id] == ActionStates.HELD:
self.assign_and_trigger_action(object, "pick", person_tracks, time)
# print "pick"
if self.previous_object_states[object_id] == ActionStates.RELEASED and \
next_object_states[object_id] == ActionStates.HELD:
self.assign_and_trigger_action(object, "pick", person_tracks, time)
# print "pick"
else:
if self.previous_object_states[object_id] == ActionStates.HELD:
self.assign_and_trigger_action(object, "release", person_tracks, time)
# print "release"
next_object_states[object_id] = ActionStates.RELEASED
else:
# print "test"
pass
corrected_object_tracks = self.simulator.get_not_static_entities()
static_objects = self.simulator.get_static_entities()
self.compute_allocentric_relations(corrected_object_tracks+static_objects+person_tracks, time)
self.previous_object_states = next_object_states
self.previous_object_tracks_map = object_tracks_map
return corrected_object_tracks, self.relations
def generate_prediction(self, prediction_horizon=(1/10.0)):
""" Perform physics prediction"""
nb_step = int(prediction_horizon/(1/240.0))
for i in range(0, nb_step):
self.simulator.step_simulation()
def assign_and_trigger_action(self, object, action, person_tracks, time):
""" Assign the action to the closest person of the given object and trigger it """
matches, unmatched_objects, unmatched_person = self.centroid_assignement.match(person_tracks, [object])
if len(matches > 0):
_, person_indice = matches[0]
person = person_tracks[person_indice]
self.trigger_event(person, action, object, time)
def test_occlusion(self, object, tracks, occlusion_threshold=0.8):
""" Test occlusion with 2D bbox overlap
"""
overlap_score = np.zeros(len(tracks))
for idx, track in enumerate(tracks):
overlap_score[idx] = overlap(object, track)
idx = np.argmax(overlap_score)
object = tracks[idx]
score = overlap[idx]
if score > occlusion_threshold:
return True, object
else:
return False, None
def compute_allocentric_relations(self, objects, time):
for obj1 in objects:
if obj1.is_located() and obj1.has_shape():
for obj2 in objects:
if obj1.id != obj2.id:
# evaluate allocentric relation
if obj2.is_located() and obj2.has_shape():
# get 3d aabb
success1, aabb1 = self.simulator.get_aabb(obj1)
success2, aabb2 = self.simulator.get_aabb(obj2)
if success1 is True and success2 is True:
if obj2.label != "background" and obj1.label != "background":
if is_close(aabb1, aabb2):
self.start_fact(obj1, "close", object=obj2, time=time)
else:
self.end_fact(obj1, "close", object=obj2, time=time)
if is_on_top(aabb1, aabb2):
self.start_fact(obj1, "on", object=obj2, time=time)
else:
self.end_fact(obj1, "on", object=obj2, time=time)
if is_in(aabb1, aabb2):
self.start_fact(obj1, "in", object=obj2, time=time)
else:
self.end_fact(obj1, "in", object=obj2, time=time)
| [
"numpy.argmax"
] | [((7134, 7158), 'numpy.argmax', 'np.argmax', (['overlap_score'], {}), '(overlap_score)\n', (7143, 7158), True, 'import numpy as np\n')] |
import random
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
class KimCNN(nn.Module):
def __init__(self, word_model, **config):
super().__init__()
n_fmaps = config.get("n_feature_maps", 100)
weight_lengths = config.get("weight_lengths", [3, 4, 5])
embedding_dim = word_model.dim
self.word_model = word_model
n_c = word_model.n_channels
self.conv_layers = [nn.Conv2d(n_c, n_fmaps, (w, embedding_dim), padding=(w - 1, 0)) for w in weight_lengths]
for i, conv in enumerate(self.conv_layers):
self.add_module("conv{}".format(i), conv)
self.dropout = nn.Dropout(config.get("dropout", 0.5))
self.fc = nn.Linear(len(self.conv_layers) * n_fmaps, config.get("n_labels", 5))
def preprocess(self, sentences):
return torch.from_numpy(np.array(self.word_model.lookup(sentences)))
def compute_corr_matrix(self, output):
grads = autograd.grad(output, self.sent_weights)
grads = grads[0].squeeze(0).squeeze(0)
# return np.cov(grads.cpu().data.numpy())
sz = self.sent_weights.size(2)
corr_matrix = np.empty((sz, sz))
for i, g1 in enumerate(grads):
for j, g2 in enumerate(grads):
corr_matrix[i, j] = torch.dot(g1, g2).cpu().data[0]
return corr_matrix
def compute_grad_norm(self, output):
grad_norms = autograd.grad(output, self.sent_weights, create_graph=True)
grad_norms = grad_norms[0].squeeze(0).squeeze(0)
grad_norms = [torch.sqrt(torch.sum(g**2)) for g in grad_norms]
return torch.cat(grad_norms).cpu().data.numpy()
def rank(self, sentence):
m_in = autograd.Variable(torch.Tensor(self.word_model.lookup([sentence])).long().cuda())
m_out = self.forward(m_in)
return torch.max(m_out, 1)[1], m_out
def loss(self):
return 0
def conv_sim_loss(weights, n=5):
weights = random.sample(weights, 2 * n)
weights1, weights2 = weights[:n], weights[n:]
loss = 0
tot = 0
for i, w1 in enumerate(weights1):
for j in range(i + 1, n):
w2 = weights2[j]
loss += compute_sim_loss(w1, w2, 5E-3)
tot += 1
return loss / tot
conv_layers = [c.weight for c in self.conv_layers]
loss = conv_sim_loss(conv_layers[0].split(1, 0)) + conv_sim_loss(conv_layers[1].split(1, 0)) + \
conv_sim_loss(conv_layers[2].split(1, 0))
return loss
def forward(self, x):
self.sent_weights = x = self.word_model(x) # shape: (batch, channel, sent length, embed dim)
x = [F.relu(conv(x)).squeeze(3) for conv in self.conv_layers]
x = [F.max_pool1d(c, c.size(2)).squeeze(2) for c in x]
x = torch.cat(x, 1)
x = self.dropout(x)
return self.fc(x)
class MultiChannelWordModel(nn.Module):
def __init__(self, id_dict, weights, unknown_vocab=[]):
super().__init__()
self.n_channels = 2
self.non_static_model = SingleChannelWordModel(id_dict, weights, unknown_vocab, static=False)
self.static_model = SingleChannelWordModel(id_dict, self.non_static_model.weights)
self.dim = self.static_model.dim
def forward(self, x):
batch1 = self.static_model(x)
batch2 = self.non_static_model(x)
return torch.cat((batch1, batch2), dim=1)
def lookup(self, sentences):
return self.static_model.lookup(sentences)
class SingleChannelWordModel(nn.Module):
def __init__(self, id_dict, weights, unknown_vocab=[], static=True):
super().__init__()
vocab_size = len(id_dict) + len(unknown_vocab)
self.n_channels = 1
self.lookup_table = id_dict
last_id = max(id_dict.values())
for word in unknown_vocab:
last_id += 1
self.lookup_table[word] = last_id
self.weights = np.concatenate((weights, np.random.rand(len(unknown_vocab), 300) - 0.5))
self.dim = self.weights.shape[1]
self.embedding = nn.Embedding(vocab_size, self.dim, padding_idx=2)
self.embedding.weight.data.copy_(torch.from_numpy(self.weights))
if static:
self.embedding.weight.requires_grad = False
@classmethod
def make_random_model(cls, id_dict, unknown_vocab=[], dim=300):
weights = np.random.rand(len(id_dict), dim) - 0.5
return cls(id_dict, weights, unknown_vocab, static=False)
def forward(self, x):
batch = self.embedding(x)
return batch.unsqueeze(1)
def lookup(self, sentences):
indices_list = []
max_len = 0
for sentence in sentences:
indices = []
for word in str(sentence).split():
try:
index = self.lookup_table[word]
indices.append(index)
except KeyError:
continue
indices_list.append(indices)
if len(indices) > max_len:
max_len = len(indices)
for indices in indices_list:
indices.extend([2] * (max_len - len(indices)))
return indices_list
| [
"torch.dot",
"torch.autograd.grad",
"numpy.empty",
"torch.nn.Embedding",
"torch.nn.Conv2d",
"random.sample",
"torch.cat",
"torch.max",
"torch.sum",
"torch.from_numpy"
] | [((1039, 1079), 'torch.autograd.grad', 'autograd.grad', (['output', 'self.sent_weights'], {}), '(output, self.sent_weights)\n', (1052, 1079), True, 'import torch.autograd as autograd\n'), ((1242, 1260), 'numpy.empty', 'np.empty', (['(sz, sz)'], {}), '((sz, sz))\n', (1250, 1260), True, 'import numpy as np\n'), ((1508, 1567), 'torch.autograd.grad', 'autograd.grad', (['output', 'self.sent_weights'], {'create_graph': '(True)'}), '(output, self.sent_weights, create_graph=True)\n', (1521, 1567), True, 'import torch.autograd as autograd\n'), ((2976, 2991), 'torch.cat', 'torch.cat', (['x', '(1)'], {}), '(x, 1)\n', (2985, 2991), False, 'import torch\n'), ((3573, 3607), 'torch.cat', 'torch.cat', (['(batch1, batch2)'], {'dim': '(1)'}), '((batch1, batch2), dim=1)\n', (3582, 3607), False, 'import torch\n'), ((4279, 4328), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'self.dim'], {'padding_idx': '(2)'}), '(vocab_size, self.dim, padding_idx=2)\n', (4291, 4328), True, 'import torch.nn as nn\n'), ((509, 572), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_c', 'n_fmaps', '(w, embedding_dim)'], {'padding': '(w - 1, 0)'}), '(n_c, n_fmaps, (w, embedding_dim), padding=(w - 1, 0))\n', (518, 572), True, 'import torch.nn as nn\n'), ((2074, 2103), 'random.sample', 'random.sample', (['weights', '(2 * n)'], {}), '(weights, 2 * n)\n', (2087, 2103), False, 'import random\n'), ((4371, 4401), 'torch.from_numpy', 'torch.from_numpy', (['self.weights'], {}), '(self.weights)\n', (4387, 4401), False, 'import torch\n'), ((1660, 1677), 'torch.sum', 'torch.sum', (['(g ** 2)'], {}), '(g ** 2)\n', (1669, 1677), False, 'import torch\n'), ((1938, 1957), 'torch.max', 'torch.max', (['m_out', '(1)'], {}), '(m_out, 1)\n', (1947, 1957), False, 'import torch\n'), ((1714, 1735), 'torch.cat', 'torch.cat', (['grad_norms'], {}), '(grad_norms)\n', (1723, 1735), False, 'import torch\n'), ((1382, 1399), 'torch.dot', 'torch.dot', (['g1', 'g2'], {}), '(g1, g2)\n', (1391, 1399), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
identity = lambda x: x
class DenoisingAutoencoder(object):
"""
Denoising autoencoder.
"""
def sigmoid(self, x):
pos_mask = (x >= 0)
neg_mask = (x < 0)
z = np.zeros_like(x)
z[pos_mask] = np.exp(-x[pos_mask])
z[neg_mask] = np.exp(x[neg_mask])
top = np.ones_like(x)
top[neg_mask] = z[neg_mask]
return top / (1 + z)
def sigmoid_deriv(self, x):
return (x * (1-x))
def ac_func(self, x, function_name = 'SIGMOID'):
# Implement your activation function here
fname_upper = function_name.upper()
if fname_upper == 'SIGMOID':
return self.sigmoid(x)
elif fname_upper == 'TANH':
return np.tanh(x)
else:
raise ( fname_upper + " Not implemented Yet" )
def ac_func_deriv(self, x, function_name = 'SIGMOID'):
# Implement the derivative of your activation function here
fname_upper = function_name.upper()
if fname_upper == 'SIGMOID':
return self.sigmoid_deriv(x)
elif fname_upper == 'TANH':
return 1 - np.square(x)
else:
raise fname_upper + " Not implemented Yet"
def __init__(self, layer_units, weights=None):
self.weights = weights
self.layer_units = layer_units
self.velosities = None
def init_weights(self, seed=0):
"""
Initialize weights.
layer_units: tuple stores the size of each layer.
weights: structured weights.
"""
"""
Initialize weights.
layer_units: tuple stores the size of each layer.
weights: structured weights.
"""
# Note layer_units[2] = layer_units[0]
layer_units = self.layer_units
n_layers = len(layer_units)
assert n_layers == 3
np.random.seed(seed)
# Initialize parameters randomly based on layer sizes
r = np.sqrt(6) / np.sqrt(layer_units[1] + layer_units[0])
# We'll choose weights uniformly from the interval [-r, r)
weights = [{} for i in range(n_layers - 1)]
weights[0]['W'] = np.random.random((layer_units[0], layer_units[1])) * 2.0 * r - r
weights[1]['W'] = np.random.random((layer_units[1], layer_units[2])) * 2.0 * r - r
weights[0]['b'] = np.zeros(layer_units[1])
weights[1]['b'] = np.zeros(layer_units[2])
self.weights = weights
return self.weights
def predict(self, X_noisy, reg=3e-3, activation_function='sigmoid'):
weights = self.weights
# Weight parameters
W0 = weights[0]['W']
b0 = weights[0]['b']
W1 = weights[1]['W']
b1 = weights[1]['b']
# TODO: Implement forward pass here. It should be the same forward pass that you implemented in the loss function
h = np.dot(X_noisy, W0) + b0
g = self.ac_func(h, activation_function)
r = np.dot(g, W1) + b1
scores = self.ac_func(r, activation_function)
return scores
def loss(self, X_noisy, X, reg=3e-3, activation_function='sigmoid', just_loss=False):
weights = self.weights
# Weighting parameters
W0 = weights[0]['W']
b0 = weights[0]['b']
W1 = weights[1]['W']
b1 = weights[1]['b']
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, N). #
#############################################################################
# notations are taken from week 12th slide pg. 96
h = np.dot(X_noisy, W0) + b0
g = self.ac_func(h, activation_function)
r = np.dot(g, W1) + b1
scores = self.ac_func(r, activation_function)
#############################################################################
# END OF YOUR CODE #
#############################################################################
#############################################################################
# TODO: Compute the loss. This should include #
# (i) the data loss (square error loss), #
# (ii) L2 regularization for W1 and W2, and #
# Store the result in the variable loss, which should be a scalar. #
# (Don't forget to investigate the effect of L2 loss) #
#############################################################################
N, F = X_noisy.shape
diff = scores - X
dataloss = np.sum(np.square(diff)) * 0.5 / N
l2 = 0.5 * reg * ( np.sum(np.square(W0)) + np.sum(np.square(W1)) )
loss = dataloss + l2 # l2 difference 0.0066030086641 on loss
if just_loss:
return loss
#############################################################################
# END OF YOUR CODE #
#############################################################################
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
dout = diff/N
dr = dout * self.ac_func_deriv(scores, activation_function) # (N, F)
dg = np.dot(dr, W1.T) # (N, H)
dh = dg * self.ac_func_deriv(g, activation_function)
grads['W0'] = np.dot(X_noisy.T, dh) + reg * W0
#print(grads['W0'].shape)
grads['W1'] = np.dot(g.T, dr) + reg * W1
#print(grads['W1'].shape)
grads['b0'] = np.sum(dh, axis=0) + reg * b0
grads['b1'] = np.sum(dr, axis=0) + reg * b1
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train_with_SGD(self, X, noise=identity,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=3e-3, num_iters=100,
batchsize=128, momentum='classic', mu=0.9, verbose=False,
activation_function='sigmoid'):
num_train = X.shape[0]
loss_history = []
layer_units = self.layer_units
n_layers = len(layer_units)
velocity = [{} for i in range(n_layers - 1)]
velocity[0]['W'] = np.zeros((layer_units[0], layer_units[1]))
velocity[1]['W'] = np.zeros((layer_units[1], layer_units[2]))
velocity[0]['b'] = np.zeros(layer_units[1])
velocity[1]['b'] = np.zeros(layer_units[2])
for it in range(num_iters):
batch_indicies = np.random.choice(num_train, batchsize, replace = False)
X_batch = X[batch_indicies]
# Compute loss and gradients
noisy_X_batch = noise(X_batch)
loss, grads = self.loss(noisy_X_batch, X_batch, reg, activation_function=activation_function)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using gradient descent. #
#########################################################################
grad_w0, grad_b0 = grads['W0'], grads['b0']
grad_w1, grad_b1 = grads['W1'], grads['b1']
W0 = self.weights[0]['W']
b0 = self.weights[0]['b']
W1 = self.weights[1]['W']
b1 = self.weights[1]['b']
if self.velosities == None:
velosities = []
velosities.append(np.zeros(grad_w0.shape))
velosities.append(np.zeros(grad_b0.shape))
velosities.append(np.zeros(grad_w1.shape))
velosities.append(np.zeros(grad_b1.shape))
self.velosities = velosities
#####################################################################
# Momentum #
#####################################################################
# You can start and test your implementation without momentum. After
# making sure that it works, you can add momentum
self.velosities[0] = mu * self.velosities[0] - learning_rate * grad_w0
self.weights[0]['W'] = W0 + self.velosities[0]
self.velosities[1] = mu * self.velosities[1] - learning_rate * grad_b0
self.weights[0]['b'] = b0 + self.velosities[1]
self.velosities[2] = mu * self.velosities[2] - learning_rate * grad_w1
self.weights[1]['W'] = W1 + self.velosities[2]
self.velosities[3] = mu * self.velosities[3] - learning_rate * grad_b1
self.weights[1]['b'] = b1 + self.velosities[3]
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 10 == 0:
print( 'SGD: iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every 5 iterations.
if it % 5 == 0:
# Decay learning rate
learning_rate *= learning_rate_decay
return { 'loss_history': loss_history, }
| [
"numpy.zeros_like",
"numpy.random.seed",
"numpy.ones_like",
"numpy.sum",
"numpy.tanh",
"numpy.square",
"numpy.zeros",
"numpy.random.random",
"numpy.exp",
"numpy.random.choice",
"numpy.dot",
"numpy.sqrt"
] | [((242, 258), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (255, 258), True, 'import numpy as np\n'), ((279, 299), 'numpy.exp', 'np.exp', (['(-x[pos_mask])'], {}), '(-x[pos_mask])\n', (285, 299), True, 'import numpy as np\n'), ((320, 339), 'numpy.exp', 'np.exp', (['x[neg_mask]'], {}), '(x[neg_mask])\n', (326, 339), True, 'import numpy as np\n'), ((352, 367), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (364, 367), True, 'import numpy as np\n'), ((1895, 1915), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1909, 1915), True, 'import numpy as np\n'), ((2373, 2397), 'numpy.zeros', 'np.zeros', (['layer_units[1]'], {}), '(layer_units[1])\n', (2381, 2397), True, 'import numpy as np\n'), ((2424, 2448), 'numpy.zeros', 'np.zeros', (['layer_units[2]'], {}), '(layer_units[2])\n', (2432, 2448), True, 'import numpy as np\n'), ((6044, 6060), 'numpy.dot', 'np.dot', (['dr', 'W1.T'], {}), '(dr, W1.T)\n', (6050, 6060), True, 'import numpy as np\n'), ((7195, 7237), 'numpy.zeros', 'np.zeros', (['(layer_units[0], layer_units[1])'], {}), '((layer_units[0], layer_units[1]))\n', (7203, 7237), True, 'import numpy as np\n'), ((7265, 7307), 'numpy.zeros', 'np.zeros', (['(layer_units[1], layer_units[2])'], {}), '((layer_units[1], layer_units[2]))\n', (7273, 7307), True, 'import numpy as np\n'), ((7335, 7359), 'numpy.zeros', 'np.zeros', (['layer_units[1]'], {}), '(layer_units[1])\n', (7343, 7359), True, 'import numpy as np\n'), ((7387, 7411), 'numpy.zeros', 'np.zeros', (['layer_units[2]'], {}), '(layer_units[2])\n', (7395, 7411), True, 'import numpy as np\n'), ((1992, 2002), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (1999, 2002), True, 'import numpy as np\n'), ((2005, 2045), 'numpy.sqrt', 'np.sqrt', (['(layer_units[1] + layer_units[0])'], {}), '(layer_units[1] + layer_units[0])\n', (2012, 2045), True, 'import numpy as np\n'), ((2895, 2914), 'numpy.dot', 'np.dot', (['X_noisy', 'W0'], {}), '(X_noisy, W0)\n', (2901, 2914), True, 'import numpy as np\n'), ((2981, 2994), 'numpy.dot', 'np.dot', (['g', 'W1'], {}), '(g, W1)\n', (2987, 2994), True, 'import numpy as np\n'), ((3895, 3914), 'numpy.dot', 'np.dot', (['X_noisy', 'W0'], {}), '(X_noisy, W0)\n', (3901, 3914), True, 'import numpy as np\n'), ((3981, 3994), 'numpy.dot', 'np.dot', (['g', 'W1'], {}), '(g, W1)\n', (3987, 3994), True, 'import numpy as np\n'), ((6163, 6184), 'numpy.dot', 'np.dot', (['X_noisy.T', 'dh'], {}), '(X_noisy.T, dh)\n', (6169, 6184), True, 'import numpy as np\n'), ((6252, 6267), 'numpy.dot', 'np.dot', (['g.T', 'dr'], {}), '(g.T, dr)\n', (6258, 6267), True, 'import numpy as np\n'), ((6335, 6353), 'numpy.sum', 'np.sum', (['dh'], {'axis': '(0)'}), '(dh, axis=0)\n', (6341, 6353), True, 'import numpy as np\n'), ((6387, 6405), 'numpy.sum', 'np.sum', (['dr'], {'axis': '(0)'}), '(dr, axis=0)\n', (6393, 6405), True, 'import numpy as np\n'), ((7479, 7532), 'numpy.random.choice', 'np.random.choice', (['num_train', 'batchsize'], {'replace': '(False)'}), '(num_train, batchsize, replace=False)\n', (7495, 7532), True, 'import numpy as np\n'), ((764, 774), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (771, 774), True, 'import numpy as np\n'), ((1157, 1169), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (1166, 1169), True, 'import numpy as np\n'), ((2191, 2241), 'numpy.random.random', 'np.random.random', (['(layer_units[0], layer_units[1])'], {}), '((layer_units[0], layer_units[1]))\n', (2207, 2241), True, 'import numpy as np\n'), ((2282, 2332), 'numpy.random.random', 'np.random.random', (['(layer_units[1], layer_units[2])'], {}), '((layer_units[1], layer_units[2]))\n', (2298, 2332), True, 'import numpy as np\n'), ((5004, 5019), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (5013, 5019), True, 'import numpy as np\n'), ((5065, 5078), 'numpy.square', 'np.square', (['W0'], {}), '(W0)\n', (5074, 5078), True, 'import numpy as np\n'), ((5089, 5102), 'numpy.square', 'np.square', (['W1'], {}), '(W1)\n', (5098, 5102), True, 'import numpy as np\n'), ((8620, 8643), 'numpy.zeros', 'np.zeros', (['grad_w0.shape'], {}), '(grad_w0.shape)\n', (8628, 8643), True, 'import numpy as np\n'), ((8679, 8702), 'numpy.zeros', 'np.zeros', (['grad_b0.shape'], {}), '(grad_b0.shape)\n', (8687, 8702), True, 'import numpy as np\n'), ((8738, 8761), 'numpy.zeros', 'np.zeros', (['grad_w1.shape'], {}), '(grad_w1.shape)\n', (8746, 8761), True, 'import numpy as np\n'), ((8797, 8820), 'numpy.zeros', 'np.zeros', (['grad_b1.shape'], {}), '(grad_b1.shape)\n', (8805, 8820), True, 'import numpy as np\n')] |
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import numpy as np
from tardis.tests import montecarlo_test_wrappers as montecarlo
LINE_SIZE = 10000000
class TimeSuite:
"""
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
"""
def setup(self):
self.line = np.arange(LINE_SIZE, 1, -1).astype(np.float64)
def time_binarysearch(self):
for _ in range(LINE_SIZE):
montecarlo.binary_search_wrapper(
self.line, np.random.random() * LINE_SIZE, 0, LINE_SIZE - 1
)
def time_compute_distance2outer(self):
for _ in range(1000000):
montecarlo.compute_distance2outer_wrapper(0.0, 0.5, 1.0)
montecarlo.compute_distance2outer_wrapper(1.0, 0.5, 1.0)
montecarlo.compute_distance2outer_wrapper(0.3, 1.0, 1.0)
montecarlo.compute_distance2outer_wrapper(0.3, -1.0, 1.0)
montecarlo.compute_distance2outer_wrapper(0.5, 0.0, 1.0)
def time_compute_distance2inner(self):
for _ in range(1000000):
montecarlo.compute_distance2inner_wrapper(1.5, -1.0, 1.0)
montecarlo.compute_distance2inner_wrapper(0.0, 0.0, 0.0)
montecarlo.compute_distance2inner_wrapper(1.2, -0.7, 1.0)
def time_compute_distance2line(self):
for _ in range(1000000):
montecarlo.compute_distance2line_wrapper(
2.20866912e15,
-0.251699059004,
1.05581082105e15,
1.06020910733e15,
1693440.0,
5.90513983371e-07,
1.0602263591e15,
1.06011723237e15,
2,
)
montecarlo.compute_distance2line_wrapper(
2.23434667994e15,
-0.291130548401,
1.05581082105e15,
1.06733618121e15,
1693440.0,
5.90513983371e-07,
1.06738407486e15,
1.06732933961e15,
3,
)
def time_compute_distance2electron(self):
for _ in range(1000000):
montecarlo.compute_distance2electron_wrapper(0.0, 0.0, 2.0, 2.0)
| [
"tardis.tests.montecarlo_test_wrappers.compute_distance2line_wrapper",
"tardis.tests.montecarlo_test_wrappers.compute_distance2electron_wrapper",
"numpy.random.random",
"numpy.arange",
"tardis.tests.montecarlo_test_wrappers.compute_distance2outer_wrapper",
"tardis.tests.montecarlo_test_wrappers.compute_di... | [((745, 801), 'tardis.tests.montecarlo_test_wrappers.compute_distance2outer_wrapper', 'montecarlo.compute_distance2outer_wrapper', (['(0.0)', '(0.5)', '(1.0)'], {}), '(0.0, 0.5, 1.0)\n', (786, 801), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((814, 870), 'tardis.tests.montecarlo_test_wrappers.compute_distance2outer_wrapper', 'montecarlo.compute_distance2outer_wrapper', (['(1.0)', '(0.5)', '(1.0)'], {}), '(1.0, 0.5, 1.0)\n', (855, 870), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((883, 939), 'tardis.tests.montecarlo_test_wrappers.compute_distance2outer_wrapper', 'montecarlo.compute_distance2outer_wrapper', (['(0.3)', '(1.0)', '(1.0)'], {}), '(0.3, 1.0, 1.0)\n', (924, 939), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((952, 1009), 'tardis.tests.montecarlo_test_wrappers.compute_distance2outer_wrapper', 'montecarlo.compute_distance2outer_wrapper', (['(0.3)', '(-1.0)', '(1.0)'], {}), '(0.3, -1.0, 1.0)\n', (993, 1009), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((1022, 1078), 'tardis.tests.montecarlo_test_wrappers.compute_distance2outer_wrapper', 'montecarlo.compute_distance2outer_wrapper', (['(0.5)', '(0.0)', '(1.0)'], {}), '(0.5, 0.0, 1.0)\n', (1063, 1078), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((1168, 1225), 'tardis.tests.montecarlo_test_wrappers.compute_distance2inner_wrapper', 'montecarlo.compute_distance2inner_wrapper', (['(1.5)', '(-1.0)', '(1.0)'], {}), '(1.5, -1.0, 1.0)\n', (1209, 1225), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((1238, 1294), 'tardis.tests.montecarlo_test_wrappers.compute_distance2inner_wrapper', 'montecarlo.compute_distance2inner_wrapper', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (1279, 1294), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((1307, 1364), 'tardis.tests.montecarlo_test_wrappers.compute_distance2inner_wrapper', 'montecarlo.compute_distance2inner_wrapper', (['(1.2)', '(-0.7)', '(1.0)'], {}), '(1.2, -0.7, 1.0)\n', (1348, 1364), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((1453, 1653), 'tardis.tests.montecarlo_test_wrappers.compute_distance2line_wrapper', 'montecarlo.compute_distance2line_wrapper', (['(2208669120000000.0)', '(-0.251699059004)', '(1055810821050000.0)', '(1060209107330000.0)', '(1693440.0)', '(5.90513983371e-07)', '(1060226359100000.0)', '(1060117232370000.0)', '(2)'], {}), '(2208669120000000.0, -\n 0.251699059004, 1055810821050000.0, 1060209107330000.0, 1693440.0, \n 5.90513983371e-07, 1060226359100000.0, 1060117232370000.0, 2)\n', (1493, 1653), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((1801, 2001), 'tardis.tests.montecarlo_test_wrappers.compute_distance2line_wrapper', 'montecarlo.compute_distance2line_wrapper', (['(2234346679940000.0)', '(-0.291130548401)', '(1055810821050000.0)', '(1067336181210000.0)', '(1693440.0)', '(5.90513983371e-07)', '(1067384074860000.0)', '(1067329339610000.0)', '(3)'], {}), '(2234346679940000.0, -\n 0.291130548401, 1055810821050000.0, 1067336181210000.0, 1693440.0, \n 5.90513983371e-07, 1067384074860000.0, 1067329339610000.0, 3)\n', (1841, 2001), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((2233, 2297), 'tardis.tests.montecarlo_test_wrappers.compute_distance2electron_wrapper', 'montecarlo.compute_distance2electron_wrapper', (['(0.0)', '(0.0)', '(2.0)', '(2.0)'], {}), '(0.0, 0.0, 2.0, 2.0)\n', (2277, 2297), True, 'from tardis.tests import montecarlo_test_wrappers as montecarlo\n'), ((404, 431), 'numpy.arange', 'np.arange', (['LINE_SIZE', '(1)', '(-1)'], {}), '(LINE_SIZE, 1, -1)\n', (413, 431), True, 'import numpy as np\n'), ((593, 611), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (609, 611), True, 'import numpy as np\n')] |
import torch, os
from torch import nn
import numpy as np
from tqdm import tqdm
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss
from plotter import plotter_GAN, plotter_UNET
def binary_acc(disc_out, actual_out):#function for calculating accuracy of discriminator
m = nn.Sigmoid()#sigmoid is removed from the discriminator def to automatically handle the edge cases
output = m(disc_out)
disc_prediction = output>0.5
actual_out = actual_out*torch.ones(disc_prediction.shape)
compare = actual_out == disc_prediction
out = torch.sum(compare)/torch.prod(torch.tensor(list(actual_out.size())))
return out
def GAN_training(hparams):#separate function for doing generative training
#load the parameters of interest
device = hparams.device
epochs = hparams.epochs
lr = hparams.learn_rate
Lambda = hparams.Lambda
Lambda_b = hparams.Lambda_b
UNet1 = hparams.generator
Discriminator1 = hparams.discriminator
train_loader = hparams.train_loader
val_loader = hparams.val_loader
local_dir = hparams.local_dir + '/learning_rate_{:.4f}_epochs_{}_lambda_{}_gen_epoch_{}_disc_epoch_{}_Lambda_b_{}'.format(hparams.learn_rate,hparams.epochs,hparams.Lambda,hparams.gen_epoch,hparams.disc_epoch,Lambda_b)
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
# choosing betas as per GAN requirements
G_optimizer = optim.Adam(UNet1.parameters(), lr=lr, betas=(0.5, 0.999))
G_scheduler = StepLR(G_optimizer, hparams.step_size, gamma=hparams.decay_gamma)
D_optimizer = optim.Adam(Discriminator1.parameters(), lr=0.00001, betas=(0.5, 0.999))
D_scheduler = StepLR(D_optimizer, 5, 0.5)
# initialize arrays for storing losses
train_data_len = train_loader.__len__() # length of training_generator
# Criterions or losses to choose from
if (hparams.loss_type=='SSIM'):
main_loss = SSIMLoss().to(device)
elif (hparams.loss_type=='L1'):
main_loss = nn.L1Loss()
elif (hparams.loss_type=='L2'):
main_loss = nn.MSELoss() #same as L2 loss
elif (hparams.loss_type=='Perc_L'):#perceptual loss based on vgg
main_loss = nn.L1Loss() #I will add the VGG loss later during the loss calculation time
VGG_loss = VGGPerceptualLoss().to(device)
# figuring out the issue with weak discriminator in training GAN
disc_epoch = hparams.disc_epoch #discriminator will be trained ""x"" times as much as generator and it will be trained first
gen_epoch = hparams.gen_epoch #generator will be trained for these many iterations
#lists to store the losses of discriminator and generator
G_loss_l1, G_loss_adv = np.zeros((epochs,gen_epoch,train_data_len)), np.zeros((epochs,gen_epoch,train_data_len))
D_loss_real, D_loss_fake = np.zeros((epochs,disc_epoch,train_data_len)), np.zeros((epochs,disc_epoch,train_data_len))
D_out_real, D_out_fake = np.zeros((epochs,gen_epoch,train_data_len)), np.zeros((epochs,gen_epoch,train_data_len))
G_loss_list, D_loss_list = np.zeros((epochs,gen_epoch,train_data_len)), np.zeros((epochs,disc_epoch,train_data_len))
D_out_acc = np.zeros((epochs,disc_epoch,train_data_len))
accuracy_results = np.zeros((epochs,disc_epoch))
# Loop over epochs
for epoch in tqdm(range(epochs), total=epochs, leave=True):
# at each epoch I re-initiate the discriminator optimizer
for disc_epoch_idx in range(disc_epoch):
for index, sample in (enumerate(train_loader)):
# Move to CUDA
for key in sample.keys():
try:
sample[key] = sample[key].to(device)
except:
pass
input_img = torch.view_as_real(sample['img_motion_corrupt']).permute(0,3,1,2)
model_out = UNet1(input_img).permute(0,2,3,1)
out = torch.view_as_complex(model_out.contiguous())
generated_image = torch.abs(out)
target_img = torch.abs(sample['img_gt'])
G = Discriminator1(generated_image[:,None,:,:])
# ground truth labels real and fake
real_target = torch.ones(list(G.size())).to(device)
fake_target = torch.zeros(list(G.size())).to(device)
disc_inp_fake = generated_image.detach()
D_fake = Discriminator1(disc_inp_fake[:,None,:,:])
D_fake_loss = discriminator_loss(D_fake, fake_target)
#Disc real loss
disc_inp_real = target_img
D_real = Discriminator1(disc_inp_real[:,None,:,:])
D_real_loss = discriminator_loss(D_real, real_target)
# average discriminator loss
D_total_loss = (D_real_loss + D_fake_loss) / 2
# compute gradients and run optimizer step
D_total_loss.backward()
D_optimizer.step()
D_out_acc[epoch,disc_epoch_idx,index] = (binary_acc(D_real.cpu(), True) + binary_acc(D_fake.cpu(), False))
D_loss_list[epoch,disc_epoch_idx,index] = D_total_loss.cpu().detach().numpy()
D_loss_real[epoch,disc_epoch_idx,index] = D_real_loss.cpu().detach().numpy()
D_loss_fake[epoch,disc_epoch_idx,index] = D_fake_loss.cpu().detach().numpy()
accuracy_results[epoch,disc_epoch_idx] = np.sum(D_out_acc[epoch,disc_epoch_idx,:])/(2*train_data_len)
D_scheduler.step()
for gen_epoch_idx in range(gen_epoch):
for index, sample in (enumerate(train_loader)):
# Move to CUDA
for key in sample.keys():
try:
sample[key] = sample[key].to(device)
except:
pass
input_img = torch.view_as_real(sample['img_motion_corrupt']).permute(0,3,1,2)
model_out = UNet1(input_img).permute(0,2,3,1)
out = torch.view_as_complex(model_out.contiguous())
generated_image = torch.abs(out)
target_img = torch.abs(sample['img_gt'])
G = Discriminator1(generated_image[:,None,:,:])
# ground truth labels real and fake
real_target = (torch.ones(list(G.size())).to(device))
fake_target = torch.zeros(list(G.size())).to(device)
gen_loss = adversarial_loss(G, real_target)
# the 1 tensor need to be changed based on the max value in the input images
# all losses right now automatically have the perceptual loss included in them
if (hparams.loss_type=='SSIM'):
loss_val = main_loss(generated_image[:,None,:,:], target_img[:,None,:,:], torch.tensor([1]).to(device)) + Lambda_b*VGG_loss(generated_image[:,None,:,:], target_img[:,None,:,:])
else:
loss_val = main_loss(generated_image[:,None,:,:], target_img[:,None,:,:]) + Lambda_b*VGG_loss(generated_image[:,None,:,:], target_img[:,None,:,:])
G_loss = Lambda*gen_loss + loss_val
# compute gradients and run optimizer step
G_optimizer.zero_grad()
G_loss.backward()
G_optimizer.step()
# store loss values
G_loss_list[epoch,gen_epoch_idx,index] = G_loss.cpu().detach().numpy()
G_loss_l1[epoch,gen_epoch_idx,index], G_loss_adv[epoch,gen_epoch_idx,index] = loss_val.cpu().detach().numpy(), gen_loss.cpu().detach().numpy()
#storing discriminator outputs
D_out_fake[epoch,gen_epoch_idx,index] = np.mean(G.cpu().detach().numpy())
G_real = Discriminator1(target_img[:,None,:,:])
D_out_real[epoch,gen_epoch_idx,index] = np.mean(G_real.cpu().detach().numpy())
# Scheduler
G_scheduler.step()
torch.save({
'epoch': epoch,
'model_state_dict': UNet1.state_dict(),
'optimizer': G_optimizer.state_dict()}, local_dir + '/epoch'+str(epoch)+'_last_weights.pt')
# Save models
tosave_weights = local_dir +'/saved_weights.pt'
torch.save({
'epoch': epoch,
'model_state_dict': UNet1.state_dict(),
'optimizer_state_dict': G_optimizer.state_dict(),
'Discriminator_state_dict':Discriminator1.state_dict(),
'G_loss_list': G_loss_list,
'G_loss_l1': G_loss_l1,
'G_loss_adv': G_loss_adv,
'D_loss_list': D_loss_list,
'D_loss_real': D_loss_real,
'D_loss_fake': D_loss_fake,
'D_out_real':D_out_real,
'D_out_fake':D_out_fake,
'D_out_acc':D_out_acc,
'hparams': hparams}, tosave_weights)
plotter_GAN(hparams,tosave_weights,local_dir,UNet1,train_loader,val_loader)
def UNET_training(hparams):
device = hparams.device
epochs = hparams.epochs
lr = hparams.learn_rate
UNet1 = hparams.generator
train_loader = hparams.train_loader
val_loader = hparams.val_loader
Lambda_b = hparams.Lambda_b
local_dir = hparams.local_dir + '/learning_rate_{:.4f}_epochs_{}_lambda_{}_loss_type_{}_Lambda_b{}'.format(hparams.learn_rate,hparams.epochs,hparams.Lambda,hparams.loss_type,Lambda_b)
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
G_optimizer = optim.Adam(UNet1.parameters(), lr=lr)#right now choosing Adam, other option is SGD
scheduler = StepLR(G_optimizer, hparams.step_size, gamma=hparams.decay_gamma)
# initialize arrays for storing losses
train_data_len = train_loader.__len__() # length of training_generator
val_data_len = val_loader.__len__()
# Criterions or losses to choose from
if (hparams.loss_type=='SSIM'):
main_loss = SSIMLoss().to(device)
elif (hparams.loss_type=='L1'):
main_loss = nn.L1Loss()
elif (hparams.loss_type=='L2'):
main_loss = nn.MSELoss() #same as L2 loss
elif (hparams.loss_type=='Perc_L'):#perceptual loss based on vgg
main_loss = VGGPerceptualLoss().to(device)
VGG_loss = VGGPerceptualLoss().to(device)
train_loss = np.zeros((epochs,train_data_len)) #lists to store the losses of discriminator and generator
val_loss = np.zeros((epochs,val_data_len)) #lists to store the losses of discriminator and generator
# Loop over epochs
for epoch in tqdm(range(epochs), total=epochs, leave=True):
for sample_idx, sample in (enumerate(train_loader)):
# Move to CUDA
for key in sample.keys():
try:
sample[key] = sample[key].to(device)
except:
pass
input_img = torch.view_as_real(sample['img_motion_corrupt']).permute(0,3,1,2)
model_out = UNet1(input_img).permute(0,2,3,1)
out = torch.view_as_complex(model_out.contiguous())
generated_image = torch.abs(out)
target_img = torch.abs(sample['img_gt'])
#the 1 tensor need to be changed based on the max value in the input images
# right now added VGG to all the losses, can look at other possible combinations also
if (hparams.loss_type=='SSIM'):
loss_val = main_loss(generated_image[:,None,:,:], target_img[:,None,:,:], torch.tensor([1]).to(device)) + Lambda_b*VGG_loss(generated_image[:,None,:,:], target_img[:,None,:,:])
else:
loss_val = main_loss(generated_image[:,None,:,:], target_img[:,None,:,:]) + Lambda_b*VGG_loss(generated_image[:,None,:,:], target_img[:,None,:,:])
# compute gradients and run optimizer step
G_optimizer.zero_grad()
loss_val.backward()
G_optimizer.step()
train_loss[epoch,sample_idx] = loss_val.cpu().detach().numpy()
# Scheduler
scheduler.step()
torch.save({
'epoch': epoch,
'sample_idx': sample_idx,
'model_state_dict': UNet1.state_dict(),
'optimizer': G_optimizer.state_dict()}, local_dir + '/epoch'+str(epoch)+'_last_weights.pt')
for sample_idx, sample in (enumerate(val_loader)):
# Move to CUDA
for key in sample.keys():
try:
sample[key] = sample[key].to(device)
except:
pass
input_img = torch.view_as_real(sample['img_motion_corrupt']).permute(0,3,1,2)
model_out = UNet1(input_img).permute(0,2,3,1)
out = torch.view_as_complex(model_out.contiguous())
generated_image = torch.abs(out)
target_img = torch.abs(sample['img_gt'])
if (hparams.loss_type=='SSIM'):
loss_val = main_loss(generated_image[:,None,:,:], target_img[:,None,:,:], torch.tensor([1]).to(device))
else:
loss_val = main_loss(generated_image[:,None,:,:], target_img[:,None,:,:])
val_loss[epoch,sample_idx] = loss_val.cpu().detach().numpy()
# Save models
tosave_weights = local_dir +'/saved_weights.pt'
torch.save({
'epoch': epoch,
'model_state_dict': UNet1.state_dict(),
'optimizer_state_dict': G_optimizer.state_dict(),
'train_loss': train_loss,
'val_loss': val_loss,
'hparams': hparams}, tosave_weights)
plotter_UNET(hparams,tosave_weights,local_dir,UNet1,train_loader,val_loader) | [
"torch.optim.lr_scheduler.StepLR",
"numpy.sum",
"torch.ones",
"torch.nn.MSELoss",
"losses.SSIMLoss",
"plotter.plotter_UNET",
"plotter.plotter_GAN",
"losses.adversarial_loss",
"torch.sum",
"losses.discriminator_loss",
"torch.nn.Sigmoid",
"os.makedirs",
"torch.nn.L1Loss",
"os.path.isdir",
... | [((435, 447), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (445, 447), False, 'from torch import nn\n'), ((1641, 1706), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['G_optimizer', 'hparams.step_size'], {'gamma': 'hparams.decay_gamma'}), '(G_optimizer, hparams.step_size, gamma=hparams.decay_gamma)\n', (1647, 1706), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((1815, 1842), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['D_optimizer', '(5)', '(0.5)'], {}), '(D_optimizer, 5, 0.5)\n', (1821, 1842), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((3317, 3363), 'numpy.zeros', 'np.zeros', (['(epochs, disc_epoch, train_data_len)'], {}), '((epochs, disc_epoch, train_data_len))\n', (3325, 3363), True, 'import numpy as np\n'), ((3393, 3423), 'numpy.zeros', 'np.zeros', (['(epochs, disc_epoch)'], {}), '((epochs, disc_epoch))\n', (3401, 3423), True, 'import numpy as np\n'), ((9109, 9194), 'plotter.plotter_GAN', 'plotter_GAN', (['hparams', 'tosave_weights', 'local_dir', 'UNet1', 'train_loader', 'val_loader'], {}), '(hparams, tosave_weights, local_dir, UNet1, train_loader, val_loader\n )\n', (9120, 9194), False, 'from plotter import plotter_GAN, plotter_UNET\n'), ((9848, 9913), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['G_optimizer', 'hparams.step_size'], {'gamma': 'hparams.decay_gamma'}), '(G_optimizer, hparams.step_size, gamma=hparams.decay_gamma)\n', (9854, 9913), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((10534, 10568), 'numpy.zeros', 'np.zeros', (['(epochs, train_data_len)'], {}), '((epochs, train_data_len))\n', (10542, 10568), True, 'import numpy as np\n'), ((10641, 10673), 'numpy.zeros', 'np.zeros', (['(epochs, val_data_len)'], {}), '((epochs, val_data_len))\n', (10649, 10673), True, 'import numpy as np\n'), ((13855, 13940), 'plotter.plotter_UNET', 'plotter_UNET', (['hparams', 'tosave_weights', 'local_dir', 'UNet1', 'train_loader', 'val_loader'], {}), '(hparams, tosave_weights, local_dir, UNet1, train_loader,\n val_loader)\n', (13867, 13940), False, 'from plotter import plotter_GAN, plotter_UNET\n'), ((619, 652), 'torch.ones', 'torch.ones', (['disc_prediction.shape'], {}), '(disc_prediction.shape)\n', (629, 652), False, 'import torch, os\n'), ((707, 725), 'torch.sum', 'torch.sum', (['compare'], {}), '(compare)\n', (716, 725), False, 'import torch, os\n'), ((1445, 1469), 'os.path.isdir', 'os.path.isdir', (['local_dir'], {}), '(local_dir)\n', (1458, 1469), False, 'import torch, os\n'), ((1479, 1501), 'os.makedirs', 'os.makedirs', (['local_dir'], {}), '(local_dir)\n', (1490, 1501), False, 'import torch, os\n'), ((2833, 2878), 'numpy.zeros', 'np.zeros', (['(epochs, gen_epoch, train_data_len)'], {}), '((epochs, gen_epoch, train_data_len))\n', (2841, 2878), True, 'import numpy as np\n'), ((2878, 2923), 'numpy.zeros', 'np.zeros', (['(epochs, gen_epoch, train_data_len)'], {}), '((epochs, gen_epoch, train_data_len))\n', (2886, 2923), True, 'import numpy as np\n'), ((2954, 3000), 'numpy.zeros', 'np.zeros', (['(epochs, disc_epoch, train_data_len)'], {}), '((epochs, disc_epoch, train_data_len))\n', (2962, 3000), True, 'import numpy as np\n'), ((3000, 3046), 'numpy.zeros', 'np.zeros', (['(epochs, disc_epoch, train_data_len)'], {}), '((epochs, disc_epoch, train_data_len))\n', (3008, 3046), True, 'import numpy as np\n'), ((3076, 3121), 'numpy.zeros', 'np.zeros', (['(epochs, gen_epoch, train_data_len)'], {}), '((epochs, gen_epoch, train_data_len))\n', (3084, 3121), True, 'import numpy as np\n'), ((3121, 3166), 'numpy.zeros', 'np.zeros', (['(epochs, gen_epoch, train_data_len)'], {}), '((epochs, gen_epoch, train_data_len))\n', (3129, 3166), True, 'import numpy as np\n'), ((3196, 3241), 'numpy.zeros', 'np.zeros', (['(epochs, gen_epoch, train_data_len)'], {}), '((epochs, gen_epoch, train_data_len))\n', (3204, 3241), True, 'import numpy as np\n'), ((3241, 3287), 'numpy.zeros', 'np.zeros', (['(epochs, disc_epoch, train_data_len)'], {}), '((epochs, disc_epoch, train_data_len))\n', (3249, 3287), True, 'import numpy as np\n'), ((9674, 9698), 'os.path.isdir', 'os.path.isdir', (['local_dir'], {}), '(local_dir)\n', (9687, 9698), False, 'import torch, os\n'), ((9708, 9730), 'os.makedirs', 'os.makedirs', (['local_dir'], {}), '(local_dir)\n', (9719, 9730), False, 'import torch, os\n'), ((2139, 2150), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2148, 2150), False, 'from torch import nn\n'), ((2420, 2439), 'losses.VGGPerceptualLoss', 'VGGPerceptualLoss', ([], {}), '()\n', (2437, 2439), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((10250, 10261), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (10259, 10261), False, 'from torch import nn\n'), ((10486, 10505), 'losses.VGGPerceptualLoss', 'VGGPerceptualLoss', ([], {}), '()\n', (10503, 10505), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((11358, 11372), 'torch.abs', 'torch.abs', (['out'], {}), '(out)\n', (11367, 11372), False, 'import torch, os\n'), ((11408, 11435), 'torch.abs', 'torch.abs', (["sample['img_gt']"], {}), "(sample['img_gt'])\n", (11417, 11435), False, 'import torch, os\n'), ((13084, 13098), 'torch.abs', 'torch.abs', (['out'], {}), '(out)\n', (13093, 13098), False, 'import torch, os\n'), ((13134, 13161), 'torch.abs', 'torch.abs', (["sample['img_gt']"], {}), "(sample['img_gt'])\n", (13143, 13161), False, 'import torch, os\n'), ((2060, 2070), 'losses.SSIMLoss', 'SSIMLoss', ([], {}), '()\n', (2068, 2070), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((2208, 2220), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2218, 2220), False, 'from torch import nn\n'), ((4203, 4217), 'torch.abs', 'torch.abs', (['out'], {}), '(out)\n', (4212, 4217), False, 'import torch, os\n'), ((4257, 4284), 'torch.abs', 'torch.abs', (["sample['img_gt']"], {}), "(sample['img_gt'])\n", (4266, 4284), False, 'import torch, os\n'), ((4695, 4734), 'losses.discriminator_loss', 'discriminator_loss', (['D_fake', 'fake_target'], {}), '(D_fake, fake_target)\n', (4713, 4734), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((4923, 4962), 'losses.discriminator_loss', 'discriminator_loss', (['D_real', 'real_target'], {}), '(D_real, real_target)\n', (4941, 4962), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((5666, 5709), 'numpy.sum', 'np.sum', (['D_out_acc[epoch, disc_epoch_idx, :]'], {}), '(D_out_acc[epoch, disc_epoch_idx, :])\n', (5672, 5709), True, 'import numpy as np\n'), ((6384, 6398), 'torch.abs', 'torch.abs', (['out'], {}), '(out)\n', (6393, 6398), False, 'import torch, os\n'), ((6438, 6465), 'torch.abs', 'torch.abs', (["sample['img_gt']"], {}), "(sample['img_gt'])\n", (6447, 6465), False, 'import torch, os\n'), ((6758, 6790), 'losses.adversarial_loss', 'adversarial_loss', (['G', 'real_target'], {}), '(G, real_target)\n', (6774, 6790), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((10171, 10181), 'losses.SSIMLoss', 'SSIMLoss', ([], {}), '()\n', (10179, 10181), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((10319, 10331), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (10329, 10331), False, 'from torch import nn\n'), ((2328, 2339), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2337, 2339), False, 'from torch import nn\n'), ((11107, 11155), 'torch.view_as_real', 'torch.view_as_real', (["sample['img_motion_corrupt']"], {}), "(sample['img_motion_corrupt'])\n", (11125, 11155), False, 'import torch, os\n'), ((12833, 12881), 'torch.view_as_real', 'torch.view_as_real', (["sample['img_motion_corrupt']"], {}), "(sample['img_motion_corrupt'])\n", (12851, 12881), False, 'import torch, os\n'), ((3940, 3988), 'torch.view_as_real', 'torch.view_as_real', (["sample['img_motion_corrupt']"], {}), "(sample['img_motion_corrupt'])\n", (3958, 3988), False, 'import torch, os\n'), ((6121, 6169), 'torch.view_as_real', 'torch.view_as_real', (["sample['img_motion_corrupt']"], {}), "(sample['img_motion_corrupt'])\n", (6139, 6169), False, 'import torch, os\n'), ((10439, 10458), 'losses.VGGPerceptualLoss', 'VGGPerceptualLoss', ([], {}), '()\n', (10456, 10458), False, 'from losses import SSIMLoss, generator_loss, discriminator_loss, generator_loss_separately, adversarial_loss, NRMSELoss, VGGPerceptualLoss\n'), ((13296, 13313), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (13308, 13313), False, 'import torch, os\n'), ((11757, 11774), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (11769, 11774), False, 'import torch, os\n'), ((7121, 7138), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (7133, 7138), False, 'import torch, os\n')] |
#-*- coding:utf-8 -*-
import cv2
import numpy as np
###################################################
# O USO DE UM FILTRO PROS 'GOOD MATCHES' FAZ COM QUE HAJA UM DELAY DE ADAPTACAO
# (ao omitir a raposa, o programa ainda vai printar 'raposa encontrada' por alguns segundos)
# ISSO NAO É UM ERRO, É UMA CARACTERISTICA DE USAR O FILTRO PARA RESULTADOS MAIS
# CONSISTENTES AO ACHAR A RAPOSA E AO NAO ACHAR, COM ISSO VOCE PODE CHACOALHAR A
# RAPOSA E O PROGRAMA AINDA RECONHECE ELA.
###################################################
cap = cv2.VideoCapture(0)
LAST_GOODS = []
img1 = cv2.cvtColor(cv2.imread("ala.png"), cv2.COLOR_BGR2GRAY)
def auto_canny(image, sigma=0.05):
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged
def drawMatches(img1, kp1, img2, kp2, matches):
rows1 = img1.shape[0]
cols1 = img1.shape[1]
rows2 = img2.shape[0]
cols2 = img2.shape[1]
out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')
out[:rows1,:cols1] = np.dstack([img1, img1, img1])
out[:rows2,cols1:] = np.dstack([img2, img2, img2])
for mat in matches:
mat = mat[0]
img1_idx = mat.queryIdx
img2_idx = mat.trainIdx
(x1,y1) = kp1[img1_idx].pt
(x2,y2) = kp2[img2_idx].pt
cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)
cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)
cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)
cv2.imshow('Matched Features', cv2.resize(out,(0,0),fx=0.5,fy=0.5)) #Essa funcao é usada por curiosidade, pra saber onde os matches estao # Cria um novo cv2.imshow pra mostrar os matches
def simpledrawMatches(img2, kp2, matches):
out = frame_pure
for mat in matches:
mat = mat[0]
img2_idx = mat.trainIdx
(x2,y2) = kp2[img2_idx].pt
cv2.circle(out, (int(x2),int(y2)), 4, (255, 0, 0), 1) # Desenha no proprio frame original os matches (é o que ta em uso)
def findmatches(img2):
global LAST_GOODS
kp2, des2 = sift.detectAndCompute(img2,None)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.3*n.distance:
good.append(m)
# print(np.array(LAST_GOODS).mean()) ## Isso printa o filtro, pra eu saber qual threshold indica a existencia do madfox na imagem
LAST_GOODS.append(len(good))
if len(LAST_GOODS) > 30: # Aqui eu uso o threshold obtido usando o print
LAST_GOODS = LAST_GOODS[10:]
if np.array(LAST_GOODS).mean() > 1:
print('A raposa foi encontrada')
simpledrawMatches(img2,kp2,matches)
#############
# Eu decidi fazer tudo isso aqui, pra reduzir o processamento ja que isso é necessario só uma vez, posto que a imagem do madfox nao muda
#############
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
############
# Aqui termina
############
while True:
ret, frame_pure = cap.read()
compare = cv2.cvtColor(frame_pure, cv2.COLOR_BGR2GRAY)
frame = auto_canny(frame_pure)
if frame.any() != None:
circles = cv2.HoughCircles(frame,cv2.HOUGH_GRADIENT,1.4,70,param1=50,param2=100,minRadius=5,maxRadius=80)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
if circles is not None:
circles = np.uint16(np.around(circles))
findmatches(compare)
for i in circles[0,:]:
cv2.circle(frame_pure,(i[0],i[1]),i[2],(0,255,0),2) # Eu decidi desenhar os circulos no frame original
cv2.circle(frame_pure,(i[0],i[1]),2,(0,0,255),3)
#print numero de circulos encontrados
if len(circles[0,:]) > 1:
print(str(len(circles[0,:])) + " circles were found")
else:
print(str(len(circles[0,:])) + " circle were found")
else:
print("No circles were found")
cv2.imshow('Pure frame com circulos',frame_pure)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"numpy.dstack",
"cv2.Canny",
"cv2.HoughCircles",
"cv2.circle",
"numpy.median",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.FlannBasedMatcher",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.imread",
"numpy.around",
"numpy.array",
"cv2.xfeatures2d.SIFT_create",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((543, 562), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (559, 562), False, 'import cv2\n'), ((2746, 2775), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (2773, 2775), False, 'import cv2\n'), ((2952, 3002), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (2973, 3002), False, 'import cv2\n'), ((4003, 4026), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4024, 4026), False, 'import cv2\n'), ((601, 622), 'cv2.imread', 'cv2.imread', (['"""ala.png"""'], {}), "('ala.png')\n", (611, 622), False, 'import cv2\n'), ((685, 701), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (694, 701), True, 'import numpy as np\n'), ((794, 824), 'cv2.Canny', 'cv2.Canny', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (803, 824), False, 'import cv2\n'), ((1073, 1102), 'numpy.dstack', 'np.dstack', (['[img1, img1, img1]'], {}), '([img1, img1, img1])\n', (1082, 1102), True, 'import numpy as np\n'), ((1126, 1155), 'numpy.dstack', 'np.dstack', (['[img2, img2, img2]'], {}), '([img2, img2, img2])\n', (1135, 1155), True, 'import numpy as np\n'), ((3101, 3145), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_pure', 'cv2.COLOR_BGR2GRAY'], {}), '(frame_pure, cv2.COLOR_BGR2GRAY)\n', (3113, 3145), False, 'import cv2\n'), ((1535, 1574), 'cv2.resize', 'cv2.resize', (['out', '(0, 0)'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(out, (0, 0), fx=0.5, fy=0.5)\n', (1545, 1574), False, 'import cv2\n'), ((3218, 3324), 'cv2.HoughCircles', 'cv2.HoughCircles', (['frame', 'cv2.HOUGH_GRADIENT', '(1.4)', '(70)'], {'param1': '(50)', 'param2': '(100)', 'minRadius': '(5)', 'maxRadius': '(80)'}), '(frame, cv2.HOUGH_GRADIENT, 1.4, 70, param1=50, param2=100,\n minRadius=5, maxRadius=80)\n', (3234, 3324), False, 'import cv2\n'), ((3325, 3364), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_GRAY2BGR'], {}), '(frame, cv2.COLOR_GRAY2BGR)\n', (3337, 3364), False, 'import cv2\n'), ((3891, 3940), 'cv2.imshow', 'cv2.imshow', (['"""Pure frame com circulos"""', 'frame_pure'], {}), "('Pure frame com circulos', frame_pure)\n", (3901, 3940), False, 'import cv2\n'), ((3945, 3959), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3956, 3959), False, 'import cv2\n'), ((2466, 2486), 'numpy.array', 'np.array', (['LAST_GOODS'], {}), '(LAST_GOODS)\n', (2474, 2486), True, 'import numpy as np\n'), ((3416, 3434), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (3425, 3434), True, 'import numpy as np\n'), ((3492, 3550), 'cv2.circle', 'cv2.circle', (['frame_pure', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(frame_pure, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (3502, 3550), False, 'import cv2\n'), ((3599, 3654), 'cv2.circle', 'cv2.circle', (['frame_pure', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(frame_pure, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (3609, 3654), False, 'import cv2\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
import matplotlib
from matplotlib.colors import LogNorm
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
import utils
def plot_training_valid_loss(training_loss, valid_loss, loss1_txt, loss2_txt, output_directory):
#plot line plot of loss function per epoch
plt.figure(figsize=(16, 9))
plt.plot(np.arange(len(training_loss)), training_loss)
plt.plot(valid_loss)
plt.xlabel('Epoch', fontsize=18)
plt.ylabel('Loss', fontsize=18)
plt.legend([loss1_txt, loss2_txt], fontsize=18)
plt.savefig(output_directory + loss1_txt + '_' + loss2_txt + '_loss.svg')
plt.close()
def plot_cors(obs, pred, output_directory, title='basset_cor_hist.svg'):
correlations = []
vars = []
for i in range(len(pred)):
var = np.var(obs[i, :])
vars.append(var)
x = np.corrcoef(pred[i, :], obs[i, :])[0, 1]
correlations.append(x)
weighted_cor = np.dot(correlations, vars) / np.sum(vars)
print('weighted_cor is {}'.format(weighted_cor))
nan_cors = [value for value in correlations if math.isnan(value)]
print("number of NaN values: %d" % len(nan_cors))
correlations = [value for value in correlations if not math.isnan(value)]
plt.clf()
plt.hist(correlations, bins=30)
plt.axvline(np.mean(correlations), color='r', linestyle='dashed', linewidth=2)
plt.axvline(0, color='k', linestyle='solid', linewidth=2)
try:
plt.title("histogram of correlation. Avg cor = {%f}" % np.mean(correlations))
except Exception as e:
print("could not set the title for graph")
print(e)
plt.ylabel("Frequency")
plt.xlabel("correlation")
plt.savefig(output_directory + title)
plt.close()
return correlations
def plot_mses(obs, pred, output_directory, title='basset_mse_hist.svg'):
mses = []
var_all = []
for i in range(len(pred)):
var = np.var(obs[i, :])
var_all.append(var)
x = mean_squared_error(obs[i, :], pred[i, :])
mses.append(x)
weighted_mse = np.dot(mses, var_all) / np.sum(var_all)
print('weighted_mse is {}'.format(weighted_mse))
nan_mses = [value for value in mses if math.isnan(value)]
print("number of NaN values in MSE: %d" % len(nan_mses))
mses = [value for value in mses if not math.isnan(value)]
plt.clf()
plt.hist(mses, bins=30)
plt.axvline(np.mean(mses), color='r', linestyle='dashed', linewidth=2)
plt.axvline(0, color='k', linestyle='solid', linewidth=2)
try:
plt.title("histogram of mse. Avg mse = {%f}" % np.mean(mses))
except Exception as e:
print("could not set the title for graph")
print(e)
plt.ylabel("Frequency")
plt.xlabel("MSE")
plt.savefig(output_directory + title)
plt.close()
return mses
def plot_cors_piechart(correlations, eval_labels, output_directory, title=None):
ind_collection = []
Q0_idx = []
Q1_idx = []
Q2_idx = []
Q3_idx = []
Q4_idx = []
Q5_idx = []
ind_collection.append(Q0_idx)
ind_collection.append(Q1_idx)
ind_collection.append(Q2_idx)
ind_collection.append(Q3_idx)
ind_collection.append(Q4_idx)
ind_collection.append(Q5_idx)
for i, x in enumerate(correlations):
if x > 0.75:
Q1_idx.append(i)
if x > 0.9:
Q0_idx.append(i)
elif x > 0.5 and x <= 0.75:
Q2_idx.append(i)
elif x > 0.25 and x <= 0.5:
Q3_idx.append(i)
elif x > 0 and x <= 0.25:
Q4_idx.append(i)
elif x < 0:
Q5_idx.append(i)
# pie chart of correlations distribution
pie_labels = "cor>0.75", "0.5<cor<0.75", "0.25<cor<0.5", "0<cor<0.25", 'cor<0'
sizes = [len(Q1_idx), len(Q2_idx), len(Q3_idx), len(Q4_idx), len(Q5_idx)]
colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'red']
explode = (0.1, 0, 0, 0, 0) # explode 1st slice
plt.pie(sizes, explode=explode, labels=pie_labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.title('correlation_pie')
if not title:
title = "basset_cor_pie.svg"
plt.savefig(output_directory + title)
plt.close()
# Plot relation between SD/IQR vs prediction performance
# Q0 = eval_labels[Q0_idx]
Q1 = eval_labels[Q1_idx]
Q2 = eval_labels[Q2_idx]
Q3 = eval_labels[Q3_idx]
Q4 = eval_labels[Q4_idx]
Q5 = eval_labels[Q5_idx]
sd1 = np.std(Q1, axis=1)
sd2 = np.std(Q2, axis=1)
sd3 = np.std(Q3, axis=1)
sd4 = np.std(Q4, axis=1)
sd5 = np.std(Q5, axis=1)
qr1 = np.percentile(Q1, 75, axis=1) - np.percentile(Q1, 25, axis=1)
qr2 = np.percentile(Q2, 75, axis=1) - np.percentile(Q2, 25, axis=1)
qr3 = np.percentile(Q3, 75, axis=1) - np.percentile(Q3, 25, axis=1)
qr4 = np.percentile(Q4, 75, axis=1) - np.percentile(Q4, 25, axis=1)
qr5 = np.percentile(Q5, 75, axis=1) - np.percentile(Q5, 25, axis=1)
mean_sds = []
mean_sd1 = np.mean(sd1)
mean_sd2 = np.mean(sd2)
mean_sd3 = np.mean(sd3)
mean_sd4 = np.mean(sd4)
mean_sd5 = np.mean(sd5)
mean_sds.append(mean_sd1)
mean_sds.append(mean_sd2)
mean_sds.append(mean_sd3)
mean_sds.append(mean_sd4)
mean_sds.append(mean_sd5)
print('1st sd: {0}, 2nd sd: {1}, 3rd sd: {2}, 4th sd: {3}'.format(mean_sd1, mean_sd2, mean_sd3, mean_sd4))
mean_qrs = []
mean_qr1 = np.mean(qr1)
mean_qr2 = np.mean(qr2)
mean_qr3 = np.mean(qr3)
mean_qr4 = np.mean(qr4)
mean_qr5 = np.mean(qr5)
mean_qrs.append(mean_qr1)
mean_qrs.append(mean_qr2)
mean_qrs.append(mean_qr3)
mean_qrs.append(mean_qr4)
mean_qrs.append(mean_qr5)
print('1st qr: {0}, 2nd qr: {1}, 3rd qr: {2}, 4th qr: {3}'.format(mean_qr1, mean_qr2, mean_qr3, mean_qr4))
x_axis = np.arange(5)
width = 0.3
xticks = ["cor>0.75", "0.5<cor<0.75", "0.25<cor<0.5", "0<cor<0.25", 'cor<0']
plt.figure(figsize=(16, 9))
plt.bar(x_axis, mean_sds, width, color='#fc8d91', edgecolor='none', label='standard deviation')
plt.bar(x_axis + width, mean_qrs, width, color='#f7d00e', edgecolor='none', label='interquartile range')
plt.xticks(x_axis + width, xticks, fontsize=16)
plt.title('Comparison among good and bad peaks')
plt.xlabel('peaks class', fontsize=18)
plt.ylabel('average', fontsize=18)
plt.legend()
plt.savefig(output_directory + "basset_SD_IQR.svg")
plt.close()
return ind_collection
def plot_corr_variance(labels, correlations, output_directory):
#compute variance:
variance = np.var(labels, axis=1)
#plot scatterplot of variance-correlations
plt.figure(figsize=(16, 9))
plt.scatter(variance, correlations)
plt.xlabel('Peak variance', fontsize=18)
plt.ylabel('Prediction-ground truth correlation', fontsize=18)
plt.savefig(output_directory + "variance_correlation_plot.svg")
plt.close()
#plot 2D scatterplot of variance-correlations
plt.figure(figsize=(16, 9))
plt.hist2d(variance, correlations, bins=100)
plt.xlabel('Peak variance', fontsize=18)
plt.ylabel('Prediction-ground truth correlation', fontsize=18)
plt.colorbar()
plt.savefig(output_directory + "variance_correlation_hist2D.svg")
plt.close()
#plot 2D log transformed scatterplot of variance-correlations
plt.figure(figsize=(16, 9))
plt.hist2d(variance, correlations, bins=100, norm=LogNorm())
plt.xlabel('Peak variance', fontsize=18)
plt.ylabel('Prediction-ground truth correlation', fontsize=18)
plt.colorbar()
plt.savefig(output_directory + "variance_correlation_loghist2d.svg")
plt.close()
#get cell-wise correlations
def plot_cell_cors(obs, pred, cell_labels, output_directory,num_classes):
correlations = []
for i in range(pred.shape[1]):
x = np.corrcoef(pred[:, i], obs[:, i])[0, 1]
correlations.append(x)
#plot cell-wise correlation histogram
plt.clf()
plt.hist(correlations, bins=30)
plt.axvline(np.mean(correlations), color='k', linestyle='dashed', linewidth=2)
try:
plt.title("histogram of correlation. Avg cor = {0:.2f}".format(np.mean(correlations)))
except Exception as e:
print("could not set the title for graph")
print(e)
plt.ylabel("Frequency")
plt.xlabel("Correlation")
plt.savefig(output_directory + "basset_cell_wise_cor_hist.svg")
plt.close()
#plot cell-wise correlations by cell type
plt.clf()
plt.bar(np.arange(num_classes), correlations)
plt.title("Correlations by Cell Type")
plt.ylabel("Correlation")
plt.xlabel("Cell Type")
plt.xticks(np.arange(num_classes), cell_labels, rotation='vertical', fontsize=3.5)
plt.savefig(output_directory + "cellwise_cor_bargraph.svg")
plt.close()
return correlations
# plot some predictions vs ground_truth on test set
def plot_random_predictions(eval_labels, predictions, correlations, ind_collection, eval_names, output_directory, num_classes, cell_names, title=None, scale=True):
for n in range(3):
mum_plt_row = 1
mum_plt_col = 1
num_plt = mum_plt_row * mum_plt_col
# 3 plots for each correlation categories
for k in range(len(ind_collection) - 1):
if len(ind_collection[k + 1]) < num_plt: continue
idx = random.sample(ind_collection[k + 1], num_plt)
y_samples_eval = eval_labels[idx]
predicted_classes = predictions[idx]
sample_names = eval_names[idx]
# Plot
x_axis = np.arange(num_classes)
if cell_names==[]:
xticks = []
else:
xticks = cell_names
plt.figure(1)
width = 0.35
for i in range(num_plt):
plt.figure(figsize=(16, 9))
plt.subplot(mum_plt_row, mum_plt_col, i + 1)
plt.bar(x_axis, y_samples_eval[i], width, color='#f99fa1', edgecolor='none', label='true activity')
if scale:
plt.bar(x_axis + width, utils.minmax_scale(predicted_classes[i], y_samples_eval[i]), width, color='#014ead',
edgecolor='none', label='prediction')
scale_txt = '_normalized'
else:
plt.bar(x_axis + width, predicted_classes[i], width, color='#014ead',
edgecolor='none', label='prediction')
scale_txt = '_original'
plt.xticks(x_axis + width, xticks, rotation='vertical', fontsize=9)
plt.title('{0}, correlation = {1:.3f}'.format(sample_names[i], correlations[idx[i]]))
plt.xlabel('cell type', fontsize=12)
plt.ylabel(scale_txt + ' activity', fontsize=12)
plt.legend()
fig = plt.gcf()
fig.tight_layout()
if not title:
title = ''
plt.savefig(output_directory + title + "basset_cor_q{0}{1}".format(k, n + 1) + scale_txt + ".svg", bbox_inches='tight')
plt.close()
# Plot individual prediction cases
def plot_predictions(eval_labels, predictions, norm_flag, correlations, mses, eval_names, output_directory, num_classes, cell_names, file_txt):
for idx in range(len(eval_labels)):
y_samples_eval = eval_labels[idx]
predicted_classes = predictions[idx]
sample_names = eval_names[idx]
x_axis = np.arange(num_classes)
xticks = cell_names
plt.figure()
width = 0.35
plt.figure(figsize=(16, 9))
# plt.subplot(mum_plt_row, mum_plt_col, i + 1)
plt.bar(x_axis, y_samples_eval, width, color='#f99fa1', edgecolor='none', label='true activity')
if norm_flag:
plt.bar(x_axis + width, utils.minmax_scale(predicted_classes, y_samples_eval), width, color='#014ead',
edgecolor='none', label='prediction')
ylabel_text = 'Normalized activity'
else:
plt.bar(x_axis + width, predicted_classes, width, color='#014ead',
edgecolor='none', label='prediction')
ylabel_text = 'Activity'
plt.xticks(x_axis + width, xticks, rotation='vertical', fontsize=9)
plt.title('{0}, correlation = {1:.4f}, mse = {2:.4f}'.format(sample_names, correlations[idx], mses[idx]))
plt.xlabel('Cell type', fontsize=12)
plt.ylabel(ylabel_text, fontsize=12)
plt.legend()
fig = plt.gcf()
fig.tight_layout()
plt.savefig(output_directory + sample_names + file_txt + '.svg', bbox_inches='tight')
plt.close()
def plot_predictions_subplot(eval_labels, predictions, correlations, mses, eval_names, output_directory, num_classes, cell_names, file_txt):
for idx in range(len(eval_labels)):
y_samples_eval = eval_labels[idx]
predicted_classes = predictions[idx]
sample_names = eval_names[idx]
# Plot
x_axis = np.arange(num_classes)
xticks = cell_names
plt.figure()
width = 0.5
fig, axs = plt.subplots(2, figsize=(24, 12))
# fig.suptitle('{0}, correlation = {1:.3f}'.format(sample_names, correlations[idx]), fontsize=18)
line_labels = ['true activity', 'prediction']
l0 = axs[0].bar(x_axis, y_samples_eval, width, color='#f99fa1') #, edgecolor='none', label='true activity'
l1 = axs[1].bar(x_axis, utils.minmax_scale(predicted_classes, y_samples_eval), width, color='#014ead') #, edgecolor='none', label='prediction'
axs[0].set_title('{0}, correlation = {1:.4f}, mse = {2:.4f}'.format(sample_names, correlations[idx], mses[idx]), fontsize=30)
fig.legend([l0, l1], # The line objects
labels=line_labels, # The labels for each line
loc="upper right", # Position of legend
borderaxespad=0.1, # Small spacing around legend box
fontsize=12 # Title for the legend
)
plt.xticks(x_axis, xticks, rotation='vertical', fontsize=15)
plt.xlabel('Cell type', fontsize=24)
plt.ylabel('Normalized activity', fontsize=24)
fig = plt.gcf()
fig.tight_layout()
plt.savefig(output_directory + sample_names + file_txt + '_subplot.svg', bbox_inches='tight')
plt.close()
def plot_filt_corr_change(filt_pred, labels, correlations, output_directory, square_flag=True):
filt_corr = []
corr_change = []
for i in range(len(filt_pred)):
pred = filt_pred[i,:,:]
label = labels[i]
corr_original = np.full(filt_pred.shape[1], correlations[i])
#compute correlation between label and each prediction
def pearson_corr(pred, label):
return np.corrcoef(pred, label)[0,1]
corr = np.apply_along_axis(pearson_corr, 1, pred, label)
filt_corr.append(corr)
#compute difference in correlation between original model and leave-one-filter-out results
if square_flag:
change = np.square(corr-corr_original)
else:
change = corr_original-corr
corr_change.append(change)
#convert filt_corr and corr_change from list to array
filt_corr = np.stack(filt_corr, axis=0)
corr_change = np.stack(corr_change, axis=0)
# plot histogram of correlation values of all models
plt.clf()
plt.hist(filt_corr.flatten(), bins=30)
plt.axvline(np.mean(filt_corr), color='k', linestyle='dashed', linewidth=2)
try:
plt.title("histogram of correlation. Avg cor = {0:.2f}".format(np.mean(filt_corr)))
except Exception as e:
print("could not set the title for graph")
print(e)
plt.ylabel("Frequency")
plt.xlabel("Correlation")
plt.savefig(output_directory + "filt_corr_hist.svg")
plt.close()
# corr_change = np.sum(corr_change, axis=0)
# # Change to Average by the size of samples
# # Keep both versions
corr_change_mean = np.mean(corr_change, axis=0)
# # Change to number of nonzero samples
corr_change_mean_act = np.ma.masked_equal(corr_change, 0)
corr_change_mean_act = np.mean(corr_change_mean_act, axis=0)
corr_change_mean_act = corr_change_mean_act.filled(0.0)
# # Plot the distribution of correlation change
plt.clf()
plt.hist(corr_change_mean.flatten(), bins=30)
plt.axvline(np.mean(corr_change_mean), color='k', linestyle='dashed', linewidth=2)
try:
plt.title("histogram of correlation. Avg cor = {0:.2f}".format(np.mean(corr_change_mean)))
except Exception as e:
print("could not set the title for graph")
print(e)
plt.ylabel("Frequency")
plt.xlabel("Correlation")
plt.savefig(output_directory + "corr_change_hist.svg")
plt.close()
# # Plot bar graph of correlation change
plt.clf()
plt.bar(np.arange(filt_pred.shape[1]), corr_change_mean)
plt.title("Influence of filters on model predictions")
plt.ylabel("Influence")
plt.xlabel("Filter")
plt.savefig(output_directory + "corr_change_bar_graph.svg")
plt.close()
return filt_corr, corr_change, corr_change_mean, corr_change_mean_act
def infl_celltype_by_activation(infl):
infl_sum_celltype = np.sum(np.absolute(infl), axis=-1) # If there's no change in all celltype, mask out this OCR
infl_sum_celltype = infl_sum_celltype > 0 # Turn into a binary mask if there is change
infl_sum_celltype = np.expand_dims(infl_sum_celltype, -1)
infl_sum_celltype = np.repeat(infl_sum_celltype, infl.shape[-1], axis=-1)
masked_infl = np.ma.masked_equal(infl_sum_celltype, 0)
del infl_sum_celltype
infl_mean_act = np.multiply(infl, masked_infl)
del masked_infl, infl
infl_mean_act = np.mean(infl_mean_act, axis=0).squeeze()
infl_mean_act = infl_mean_act.filled(0.0)
return infl_mean_act
def plot_filt_infl(pred, filt_pred, output_directory, cell_labels=None):
# Expand pred array to be nx300x81
pred = np.expand_dims(pred, 1)
pred = np.repeat(pred, filt_pred.shape[1], axis=1)
# influence per celltype with signed version; result 300x81 array of influences
infl = pred - filt_pred
# Average by the size of samples
infl_signed_mean = np.mean(infl, axis=0).squeeze()
# Change to number of nonzero samples
infl_signed_mean_act = infl_celltype_by_activation(infl)
# Compute the sum of absolute/squares of differences between pred and filt_pred; result 300x81 array of influences
# infl = np.square(filt_pred - pred)
infl_absolute_mean = np.mean(np.absolute(infl), axis=0).squeeze()
infl_absolute_mean_act = infl_celltype_by_activation(np.absolute(infl))
# plot histogram
plt.clf()
plt.hist(infl_absolute_mean.flatten(), bins=30)
plt.axvline(np.mean(infl_absolute_mean), color='k', linestyle='dashed', linewidth=2)
try:
plt.title("histogram of filter influence. Avg influence = {0:.2f}".format(np.mean(infl_absolute_mean)))
except Exception as e:
print("could not set the title for graph")
print(e)
plt.ylabel("Frequency")
plt.xlabel("")
plt.savefig(output_directory + "filt_infl_celltype_hist.svg")
plt.close()
# plot heatmap
plt.clf()
infl_absolute_mean[infl_absolute_mean==0] = np.nan
sns.heatmap(np.log10(infl_absolute_mean), mask=np.isnan(infl_absolute_mean))
plt.title("log10 of Influence of filters on each cell type prediction")
plt.ylabel("Filter")
plt.xlabel("Cell Type")
if not cell_labels:
cell_labels = []
plt.xticks(np.arange(len(cell_labels)), cell_labels, rotation='vertical', fontsize=3.0)
plt.savefig(output_directory + "sns_filt_infl_celltype_log10_heatmap.svg")
plt.close()
return infl, infl_signed_mean, infl_signed_mean_act, infl_absolute_mean, infl_absolute_mean_act
def plot_confusion_matrix(obs, preds, output_directory, num_classes):
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import itertools
test_labels_max = obs
predicted_labels_max = np.argmax(preds, axis=1)
#print('max test labels:\n',test_labels_max.T)
#print('max predicted labels:\n',predicted_labels_max.T)
accuracy = accuracy_score(test_labels_max, predicted_labels_max) # , normalize=False
print('Test set accuracy:\n',accuracy)
target_names = ['class 0', 'class 1']
print(classification_report(test_labels_max, predicted_labels_max, target_names=target_names))
# TODO: Add in sample weights
cm = confusion_matrix(test_labels_max, predicted_labels_max)
print('Confusion matrix:\n',cm)
cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]
plt.clf()
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title('Normalized confusion matrix')
plt.colorbar()
plt.xlabel('True label')
plt.ylabel('Predicted label')
plt.xticks([0, 1]); plt.yticks([0, 1])
plt.grid(False)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], '.2f'),
horizontalalignment='center',
color='white' if cm[i, j] > 0.5 else 'black')
plt.savefig(output_directory + "confusion_matrix.svg")
def plot_scatter(x, y, output_directory, title, xlabel, ylabel, save_title, same_limit=False, c=None):
plt.clf()
fig, ax = plt.subplots(figsize=(16, 9))
if same_limit:
min_limit = np.minimum(np.min(x), np.min(y)) - 0.01
max_limit = np.maximum(np.max(x), np.max(y)) + 0.01
plt.xlim(min_limit, max_limit)
plt.ylim(min_limit, max_limit)
else:
plt.xlim(np.min(x)-0.01, np.max(x)+0.01)
plt.ylim(np.min(y)-0.01, np.max(y)+0.01)
if c is not None:
ax.scatter(x, y, cmap=plt.get_cmap('jet'), c=c)
else:
ax.scatter(x, y, cmap=plt.get_cmap('jet'))
plt.title(title, fontsize=18)
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.savefig(output_directory + save_title)
plt.close()
def get_memes(activations, sequences, y, output_directory, num_classes, flag_coding_order, threshold=0.5, flag_weighted=False, num_filter=300):
#find the threshold value for activation
if activations.shape[-1] != 251:
activations = np.swapaxes(activations, 1, 2)
if sequences.shape[-1] != 251:
sequences = np.swapaxes(sequences, 1, 2)
if flag_weighted:
activation_threshold = threshold*np.amax(activations, axis=(2))
else:
activation_threshold = threshold*np.amax(activations, axis=(0, 2))
#pad sequences:
num_pad = 9
npad = ((0, 0), (0, 0), (num_pad, num_pad))
sequences = np.pad(sequences, pad_width=npad, mode='constant', constant_values=0)
pwm = np.zeros((num_filter, 4, 19))
pfm = np.zeros((num_filter, 4, 19))
nsamples = activations.shape[0]
OCR_matrix = np.zeros((num_filter, y.shape[0]))
activation_indices = []
activated_OCRs = np.zeros((num_filter, num_classes))
n_activated_OCRs = np.zeros(num_filter)
total_seq = np.zeros(num_filter)
filter_to_ind_dict = {}
for i in range(num_filter):
#create list to store 19 bp sequences that activated filter
act_seqs_list = []
act_OCRs_tmp = []
list_seq_and_start=[]
for j in range(nsamples):
# find all indices where filter is activated
if flag_weighted:
indices = np.where(activations[j,i,:] > activation_threshold[j,i])
else:
indices = np.where(activations[j,i,:] > activation_threshold[i])
#save ground truth peak heights of OCRs activated by each filter
if indices[0].shape[0]>0:
act_OCRs_tmp.append(y[j, :])
OCR_matrix[i, j] = 1
for start in indices[0]:
activation_indices.append(start)
end = start+19
act_seqs_list.append(sequences[j,:,start:end])
list_seq_and_start.append((j, start-num_pad))
filter_to_ind_dict[i]=list_seq_and_start
#convert act_seqs from list to array
if act_seqs_list:
act_seqs = np.stack(act_seqs_list)
pwm_tmp = np.sum(act_seqs, axis=0)
pfm_tmp=pwm_tmp
total = np.sum(pwm_tmp, axis=0)
# Avoid divide by zero runtime error
# pwm_tmp = np.nan_to_num(pwm_tmp/total) # Original way will raise runtime error
with np.errstate(divide='ignore', invalid='ignore'):
pwm_tmp = np.true_divide(pwm_tmp,total) # When certain position of total is 0, ignore error
pwm_tmp[pwm_tmp == np.inf] = 0
pwm_tmp = np.nan_to_num(pwm_tmp)
#permute pwm from A, T, G, C order to A, C, G, T order
if flag_coding_order == 'ATGC':
order = [0, 3, 2, 1]
if flag_coding_order == 'ACGT':
order = [0, 1, 2, 3]
pwm[i,:,:] = pwm_tmp[order, :]
pfm[i,:,:] = pfm_tmp[order, :]
#store total number of sequences that activated that filter
total_seq[i] = len(act_seqs_list)
#save mean OCR activation
act_OCRs_tmp = np.stack(act_OCRs_tmp)
activated_OCRs[i, :] = np.mean(act_OCRs_tmp, axis=0)
#save the number of activated OCRs
n_activated_OCRs[i] = act_OCRs_tmp.shape[0]
#TODO: delete the following line: activated_OCRs is already an array
activated_OCRs = np.stack(activated_OCRs)
#write motifs to meme format
#PWM file:
if flag_weighted:
meme_file = open(output_directory + "filter_motifs_pwm_weighted.meme", 'w')
else:
meme_file = open(output_directory + "filter_motifs_pwm.meme", 'w')
meme_file.write("MEME version 4 \n")
#PFM file:
if flag_weighted:
meme_file_pfm = open(output_directory + "filter_motifs_pfm_weighted.meme", 'w')
else:
meme_file_pfm = open(output_directory + "filter_motifs_pfm.meme", 'w')
meme_file_pfm.write("MEME version 4 \n")
for i in range(0, num_filter):
if np.sum(pwm[i,:,:]) > 0:
meme_file.write("\n")
meme_file.write("MOTIF filter%s \n" % i)
meme_file.write("letter-probability matrix: alength= 4 w= %d \n" % np.count_nonzero(np.sum(pwm[i,:,:], axis=0)))
meme_file_pfm.write("\n")
meme_file_pfm.write("MOTIF filter%s \n" % i)
meme_file_pfm.write("letter-probability matrix: alength= 4 w= %d \n" % np.count_nonzero(np.sum(pwm[i,:,:], axis=0)))
for j in range(0, 19):
if np.sum(pwm[i,:,j]) > 0:
meme_file.write(str(pwm[i,0,j]) + "\t" + str(pwm[i,1,j]) + "\t" + str(pwm[i,2,j]) + "\t" + str(pwm[i,3,j]) + "\n")
meme_file_pfm.write(str(pfm[i,0,j]) + "\t" + str(pfm[i,1,j]) + "\t" + str(pfm[i,2,j]) + "\t" + str(pfm[i,3,j]) + "\n")
meme_file.close()
meme_file_pfm.close()
#plot indices of first position in sequence that activates the filters
activation_indices_array = np.stack(activation_indices)
plt.clf()
plt.hist(activation_indices_array.flatten(), bins=260)
plt.title("histogram of position indices.")
plt.ylabel("Frequency")
plt.xlabel("Position")
plt.savefig(output_directory + "position_hist.svg")
plt.close()
#plot total sequences that activated each filter
#TODO: delete the following line: total_seq is already an array
total_seq_array = np.stack(total_seq)
plt.clf()
plt.bar(np.arange(num_filter), total_seq_array)
plt.title("Number of sequences activating each filter")
plt.ylabel("N sequences")
plt.xlabel("Filter")
plt.savefig(output_directory + "nseqs_bar_graph.svg")
plt.close()
return filter_to_ind_dict, pwm, activation_indices_array, total_seq_array, activated_OCRs, n_activated_OCRs, OCR_matrix
#convert mouse model predictions to human cell predictions
def mouse2human(mouse_predictions, mouse_cell_types, mapping, method='average'):
human_cells = np.unique(mapping[:,1])
human_predictions = np.zeros((mouse_predictions.shape[0], human_cells.shape[0]))
for i, celltype in enumerate(human_cells):
matches = mapping[np.where(mapping[:,1] == celltype)][:,0]
idx = np.in1d(mouse_cell_types, matches).nonzero() #mouse_cell_types[:,1] Edited by Chendi
if method == 'average':
human_predictions[:, i] = np.mean(mouse_predictions[:, idx], axis=2).squeeze()
if method == 'max':
human_predictions[:, i] = np.max(mouse_predictions[:, idx], axis=2).squeeze()
if method == 'median':
human_predictions[:, i] = np.median(mouse_predictions[:, idx], axis=2).squeeze()
return human_predictions, human_cells
| [
"matplotlib.pyplot.title",
"numpy.absolute",
"numpy.sum",
"numpy.nan_to_num",
"matplotlib.pyplot.clf",
"numpy.argmax",
"random.sample",
"matplotlib.pyplot.bar",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.mean",... | [((212, 233), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (226, 233), False, 'import matplotlib\n'), ((504, 531), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (514, 531), True, 'import matplotlib.pyplot as plt\n'), ((595, 615), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss'], {}), '(valid_loss)\n', (603, 615), True, 'import matplotlib.pyplot as plt\n'), ((620, 652), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {'fontsize': '(18)'}), "('Epoch', fontsize=18)\n", (630, 652), True, 'import matplotlib.pyplot as plt\n'), ((657, 688), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {'fontsize': '(18)'}), "('Loss', fontsize=18)\n", (667, 688), True, 'import matplotlib.pyplot as plt\n'), ((693, 740), 'matplotlib.pyplot.legend', 'plt.legend', (['[loss1_txt, loss2_txt]'], {'fontsize': '(18)'}), '([loss1_txt, loss2_txt], fontsize=18)\n', (703, 740), True, 'import matplotlib.pyplot as plt\n'), ((745, 818), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + loss1_txt + '_' + loss2_txt + '_loss.svg')"], {}), "(output_directory + loss1_txt + '_' + loss2_txt + '_loss.svg')\n", (756, 818), True, 'import matplotlib.pyplot as plt\n'), ((823, 834), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1452), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1450, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1488), 'matplotlib.pyplot.hist', 'plt.hist', (['correlations'], {'bins': '(30)'}), '(correlations, bins=30)\n', (1465, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1633), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""k"""', 'linestyle': '"""solid"""', 'linewidth': '(2)'}), "(0, color='k', linestyle='solid', linewidth=2)\n", (1587, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1839, 1852), True, 'import matplotlib.pyplot as plt\n'), ((1857, 1882), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""correlation"""'], {}), "('correlation')\n", (1867, 1882), True, 'import matplotlib.pyplot as plt\n'), ((1887, 1924), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(output_directory + title)'], {}), '(output_directory + title)\n', (1898, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1929, 1940), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1938, 1940), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2554), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2552, 2554), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2582), 'matplotlib.pyplot.hist', 'plt.hist', (['mses'], {'bins': '(30)'}), '(mses, bins=30)\n', (2567, 2582), True, 'import matplotlib.pyplot as plt\n'), ((2662, 2719), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""k"""', 'linestyle': '"""solid"""', 'linewidth': '(2)'}), "(0, color='k', linestyle='solid', linewidth=2)\n", (2673, 2719), True, 'import matplotlib.pyplot as plt\n'), ((2899, 2922), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (2909, 2922), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2944), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MSE"""'], {}), "('MSE')\n", (2937, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2986), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(output_directory + title)'], {}), '(output_directory + title)\n', (2960, 2986), True, 'import matplotlib.pyplot as plt\n'), ((2991, 3002), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3000, 3002), True, 'import matplotlib.pyplot as plt\n'), ((4168, 4286), 'matplotlib.pyplot.pie', 'plt.pie', (['sizes'], {'explode': 'explode', 'labels': 'pie_labels', 'colors': 'colors', 'autopct': '"""%1.1f%%"""', 'shadow': '(True)', 'startangle': '(140)'}), "(sizes, explode=explode, labels=pie_labels, colors=colors, autopct=\n '%1.1f%%', shadow=True, startangle=140)\n", (4175, 4286), True, 'import matplotlib.pyplot as plt\n'), ((4298, 4315), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (4306, 4315), True, 'import matplotlib.pyplot as plt\n'), ((4320, 4348), 'matplotlib.pyplot.title', 'plt.title', (['"""correlation_pie"""'], {}), "('correlation_pie')\n", (4329, 4348), True, 'import matplotlib.pyplot as plt\n'), ((4408, 4445), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(output_directory + title)'], {}), '(output_directory + title)\n', (4419, 4445), True, 'import matplotlib.pyplot as plt\n'), ((4450, 4461), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4459, 4461), True, 'import matplotlib.pyplot as plt\n'), ((4718, 4736), 'numpy.std', 'np.std', (['Q1'], {'axis': '(1)'}), '(Q1, axis=1)\n', (4724, 4736), True, 'import numpy as np\n'), ((4747, 4765), 'numpy.std', 'np.std', (['Q2'], {'axis': '(1)'}), '(Q2, axis=1)\n', (4753, 4765), True, 'import numpy as np\n'), ((4776, 4794), 'numpy.std', 'np.std', (['Q3'], {'axis': '(1)'}), '(Q3, axis=1)\n', (4782, 4794), True, 'import numpy as np\n'), ((4805, 4823), 'numpy.std', 'np.std', (['Q4'], {'axis': '(1)'}), '(Q4, axis=1)\n', (4811, 4823), True, 'import numpy as np\n'), ((4834, 4852), 'numpy.std', 'np.std', (['Q5'], {'axis': '(1)'}), '(Q5, axis=1)\n', (4840, 4852), True, 'import numpy as np\n'), ((5256, 5268), 'numpy.mean', 'np.mean', (['sd1'], {}), '(sd1)\n', (5263, 5268), True, 'import numpy as np\n'), ((5284, 5296), 'numpy.mean', 'np.mean', (['sd2'], {}), '(sd2)\n', (5291, 5296), True, 'import numpy as np\n'), ((5312, 5324), 'numpy.mean', 'np.mean', (['sd3'], {}), '(sd3)\n', (5319, 5324), True, 'import numpy as np\n'), ((5340, 5352), 'numpy.mean', 'np.mean', (['sd4'], {}), '(sd4)\n', (5347, 5352), True, 'import numpy as np\n'), ((5368, 5380), 'numpy.mean', 'np.mean', (['sd5'], {}), '(sd5)\n', (5375, 5380), True, 'import numpy as np\n'), ((5680, 5692), 'numpy.mean', 'np.mean', (['qr1'], {}), '(qr1)\n', (5687, 5692), True, 'import numpy as np\n'), ((5708, 5720), 'numpy.mean', 'np.mean', (['qr2'], {}), '(qr2)\n', (5715, 5720), True, 'import numpy as np\n'), ((5736, 5748), 'numpy.mean', 'np.mean', (['qr3'], {}), '(qr3)\n', (5743, 5748), True, 'import numpy as np\n'), ((5764, 5776), 'numpy.mean', 'np.mean', (['qr4'], {}), '(qr4)\n', (5771, 5776), True, 'import numpy as np\n'), ((5792, 5804), 'numpy.mean', 'np.mean', (['qr5'], {}), '(qr5)\n', (5799, 5804), True, 'import numpy as np\n'), ((6084, 6096), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6093, 6096), True, 'import numpy as np\n'), ((6198, 6225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6208, 6225), True, 'import matplotlib.pyplot as plt\n'), ((6230, 6330), 'matplotlib.pyplot.bar', 'plt.bar', (['x_axis', 'mean_sds', 'width'], {'color': '"""#fc8d91"""', 'edgecolor': '"""none"""', 'label': '"""standard deviation"""'}), "(x_axis, mean_sds, width, color='#fc8d91', edgecolor='none', label=\n 'standard deviation')\n", (6237, 6330), True, 'import matplotlib.pyplot as plt\n'), ((6330, 6438), 'matplotlib.pyplot.bar', 'plt.bar', (['(x_axis + width)', 'mean_qrs', 'width'], {'color': '"""#f7d00e"""', 'edgecolor': '"""none"""', 'label': '"""interquartile range"""'}), "(x_axis + width, mean_qrs, width, color='#f7d00e', edgecolor='none',\n label='interquartile range')\n", (6337, 6438), True, 'import matplotlib.pyplot as plt\n'), ((6439, 6486), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(x_axis + width)', 'xticks'], {'fontsize': '(16)'}), '(x_axis + width, xticks, fontsize=16)\n', (6449, 6486), True, 'import matplotlib.pyplot as plt\n'), ((6491, 6539), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparison among good and bad peaks"""'], {}), "('Comparison among good and bad peaks')\n", (6500, 6539), True, 'import matplotlib.pyplot as plt\n'), ((6544, 6582), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""peaks class"""'], {'fontsize': '(18)'}), "('peaks class', fontsize=18)\n", (6554, 6582), True, 'import matplotlib.pyplot as plt\n'), ((6587, 6621), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average"""'], {'fontsize': '(18)'}), "('average', fontsize=18)\n", (6597, 6621), True, 'import matplotlib.pyplot as plt\n'), ((6626, 6638), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6636, 6638), True, 'import matplotlib.pyplot as plt\n'), ((6643, 6694), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'basset_SD_IQR.svg')"], {}), "(output_directory + 'basset_SD_IQR.svg')\n", (6654, 6694), True, 'import matplotlib.pyplot as plt\n'), ((6699, 6710), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6708, 6710), True, 'import matplotlib.pyplot as plt\n'), ((6846, 6868), 'numpy.var', 'np.var', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (6852, 6868), True, 'import numpy as np\n'), ((6921, 6948), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6931, 6948), True, 'import matplotlib.pyplot as plt\n'), ((6953, 6988), 'matplotlib.pyplot.scatter', 'plt.scatter', (['variance', 'correlations'], {}), '(variance, correlations)\n', (6964, 6988), True, 'import matplotlib.pyplot as plt\n'), ((6993, 7033), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Peak variance"""'], {'fontsize': '(18)'}), "('Peak variance', fontsize=18)\n", (7003, 7033), True, 'import matplotlib.pyplot as plt\n'), ((7038, 7100), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prediction-ground truth correlation"""'], {'fontsize': '(18)'}), "('Prediction-ground truth correlation', fontsize=18)\n", (7048, 7100), True, 'import matplotlib.pyplot as plt\n'), ((7105, 7168), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'variance_correlation_plot.svg')"], {}), "(output_directory + 'variance_correlation_plot.svg')\n", (7116, 7168), True, 'import matplotlib.pyplot as plt\n'), ((7173, 7184), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7182, 7184), True, 'import matplotlib.pyplot as plt\n'), ((7240, 7267), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (7250, 7267), True, 'import matplotlib.pyplot as plt\n'), ((7272, 7316), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['variance', 'correlations'], {'bins': '(100)'}), '(variance, correlations, bins=100)\n', (7282, 7316), True, 'import matplotlib.pyplot as plt\n'), ((7321, 7361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Peak variance"""'], {'fontsize': '(18)'}), "('Peak variance', fontsize=18)\n", (7331, 7361), True, 'import matplotlib.pyplot as plt\n'), ((7366, 7428), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prediction-ground truth correlation"""'], {'fontsize': '(18)'}), "('Prediction-ground truth correlation', fontsize=18)\n", (7376, 7428), True, 'import matplotlib.pyplot as plt\n'), ((7433, 7447), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7445, 7447), True, 'import matplotlib.pyplot as plt\n'), ((7452, 7517), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'variance_correlation_hist2D.svg')"], {}), "(output_directory + 'variance_correlation_hist2D.svg')\n", (7463, 7517), True, 'import matplotlib.pyplot as plt\n'), ((7522, 7533), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7531, 7533), True, 'import matplotlib.pyplot as plt\n'), ((7605, 7632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (7615, 7632), True, 'import matplotlib.pyplot as plt\n'), ((7702, 7742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Peak variance"""'], {'fontsize': '(18)'}), "('Peak variance', fontsize=18)\n", (7712, 7742), True, 'import matplotlib.pyplot as plt\n'), ((7747, 7809), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prediction-ground truth correlation"""'], {'fontsize': '(18)'}), "('Prediction-ground truth correlation', fontsize=18)\n", (7757, 7809), True, 'import matplotlib.pyplot as plt\n'), ((7814, 7828), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7826, 7828), True, 'import matplotlib.pyplot as plt\n'), ((7833, 7901), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'variance_correlation_loghist2d.svg')"], {}), "(output_directory + 'variance_correlation_loghist2d.svg')\n", (7844, 7901), True, 'import matplotlib.pyplot as plt\n'), ((7906, 7917), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7915, 7917), True, 'import matplotlib.pyplot as plt\n'), ((8210, 8219), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8217, 8219), True, 'import matplotlib.pyplot as plt\n'), ((8224, 8255), 'matplotlib.pyplot.hist', 'plt.hist', (['correlations'], {'bins': '(30)'}), '(correlations, bins=30)\n', (8232, 8255), True, 'import matplotlib.pyplot as plt\n'), ((8543, 8566), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (8553, 8566), True, 'import matplotlib.pyplot as plt\n'), ((8571, 8596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Correlation"""'], {}), "('Correlation')\n", (8581, 8596), True, 'import matplotlib.pyplot as plt\n'), ((8601, 8664), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'basset_cell_wise_cor_hist.svg')"], {}), "(output_directory + 'basset_cell_wise_cor_hist.svg')\n", (8612, 8664), True, 'import matplotlib.pyplot as plt\n'), ((8669, 8680), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8678, 8680), True, 'import matplotlib.pyplot as plt\n'), ((8736, 8745), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8743, 8745), True, 'import matplotlib.pyplot as plt\n'), ((8800, 8838), 'matplotlib.pyplot.title', 'plt.title', (['"""Correlations by Cell Type"""'], {}), "('Correlations by Cell Type')\n", (8809, 8838), True, 'import matplotlib.pyplot as plt\n'), ((8843, 8868), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Correlation"""'], {}), "('Correlation')\n", (8853, 8868), True, 'import matplotlib.pyplot as plt\n'), ((8873, 8896), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cell Type"""'], {}), "('Cell Type')\n", (8883, 8896), True, 'import matplotlib.pyplot as plt\n'), ((8989, 9048), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'cellwise_cor_bargraph.svg')"], {}), "(output_directory + 'cellwise_cor_bargraph.svg')\n", (9000, 9048), True, 'import matplotlib.pyplot as plt\n'), ((9053, 9064), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9062, 9064), True, 'import matplotlib.pyplot as plt\n'), ((15657, 15684), 'numpy.stack', 'np.stack', (['filt_corr'], {'axis': '(0)'}), '(filt_corr, axis=0)\n', (15665, 15684), True, 'import numpy as np\n'), ((15703, 15732), 'numpy.stack', 'np.stack', (['corr_change'], {'axis': '(0)'}), '(corr_change, axis=0)\n', (15711, 15732), True, 'import numpy as np\n'), ((15799, 15808), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15806, 15808), True, 'import matplotlib.pyplot as plt\n'), ((16133, 16156), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (16143, 16156), True, 'import matplotlib.pyplot as plt\n'), ((16161, 16186), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Correlation"""'], {}), "('Correlation')\n", (16171, 16186), True, 'import matplotlib.pyplot as plt\n'), ((16191, 16243), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'filt_corr_hist.svg')"], {}), "(output_directory + 'filt_corr_hist.svg')\n", (16202, 16243), True, 'import matplotlib.pyplot as plt\n'), ((16248, 16259), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16257, 16259), True, 'import matplotlib.pyplot as plt\n'), ((16413, 16441), 'numpy.mean', 'np.mean', (['corr_change'], {'axis': '(0)'}), '(corr_change, axis=0)\n', (16420, 16441), True, 'import numpy as np\n'), ((16514, 16548), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['corr_change', '(0)'], {}), '(corr_change, 0)\n', (16532, 16548), True, 'import numpy as np\n'), ((16576, 16613), 'numpy.mean', 'np.mean', (['corr_change_mean_act'], {'axis': '(0)'}), '(corr_change_mean_act, axis=0)\n', (16583, 16613), True, 'import numpy as np\n'), ((16731, 16740), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (16738, 16740), True, 'import matplotlib.pyplot as plt\n'), ((17086, 17109), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (17096, 17109), True, 'import matplotlib.pyplot as plt\n'), ((17114, 17139), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Correlation"""'], {}), "('Correlation')\n", (17124, 17139), True, 'import matplotlib.pyplot as plt\n'), ((17144, 17198), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'corr_change_hist.svg')"], {}), "(output_directory + 'corr_change_hist.svg')\n", (17155, 17198), True, 'import matplotlib.pyplot as plt\n'), ((17203, 17214), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17212, 17214), True, 'import matplotlib.pyplot as plt\n'), ((17269, 17278), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17276, 17278), True, 'import matplotlib.pyplot as plt\n'), ((17344, 17398), 'matplotlib.pyplot.title', 'plt.title', (['"""Influence of filters on model predictions"""'], {}), "('Influence of filters on model predictions')\n", (17353, 17398), True, 'import matplotlib.pyplot as plt\n'), ((17403, 17426), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Influence"""'], {}), "('Influence')\n", (17413, 17426), True, 'import matplotlib.pyplot as plt\n'), ((17431, 17451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Filter"""'], {}), "('Filter')\n", (17441, 17451), True, 'import matplotlib.pyplot as plt\n'), ((17456, 17515), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'corr_change_bar_graph.svg')"], {}), "(output_directory + 'corr_change_bar_graph.svg')\n", (17467, 17515), True, 'import matplotlib.pyplot as plt\n'), ((17520, 17531), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17529, 17531), True, 'import matplotlib.pyplot as plt\n'), ((17884, 17921), 'numpy.expand_dims', 'np.expand_dims', (['infl_sum_celltype', '(-1)'], {}), '(infl_sum_celltype, -1)\n', (17898, 17921), True, 'import numpy as np\n'), ((17946, 17999), 'numpy.repeat', 'np.repeat', (['infl_sum_celltype', 'infl.shape[-1]'], {'axis': '(-1)'}), '(infl_sum_celltype, infl.shape[-1], axis=-1)\n', (17955, 17999), True, 'import numpy as np\n'), ((18018, 18058), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['infl_sum_celltype', '(0)'], {}), '(infl_sum_celltype, 0)\n', (18036, 18058), True, 'import numpy as np\n'), ((18105, 18135), 'numpy.multiply', 'np.multiply', (['infl', 'masked_infl'], {}), '(infl, masked_infl)\n', (18116, 18135), True, 'import numpy as np\n'), ((18419, 18442), 'numpy.expand_dims', 'np.expand_dims', (['pred', '(1)'], {}), '(pred, 1)\n', (18433, 18442), True, 'import numpy as np\n'), ((18454, 18497), 'numpy.repeat', 'np.repeat', (['pred', 'filt_pred.shape[1]'], {'axis': '(1)'}), '(pred, filt_pred.shape[1], axis=1)\n', (18463, 18497), True, 'import numpy as np\n'), ((19158, 19167), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (19165, 19167), True, 'import matplotlib.pyplot as plt\n'), ((19530, 19553), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (19540, 19553), True, 'import matplotlib.pyplot as plt\n'), ((19558, 19572), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (19568, 19572), True, 'import matplotlib.pyplot as plt\n'), ((19577, 19638), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'filt_infl_celltype_hist.svg')"], {}), "(output_directory + 'filt_infl_celltype_hist.svg')\n", (19588, 19638), True, 'import matplotlib.pyplot as plt\n'), ((19643, 19654), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19652, 19654), True, 'import matplotlib.pyplot as plt\n'), ((19679, 19688), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (19686, 19688), True, 'import matplotlib.pyplot as plt\n'), ((19833, 19904), 'matplotlib.pyplot.title', 'plt.title', (['"""log10 of Influence of filters on each cell type prediction"""'], {}), "('log10 of Influence of filters on each cell type prediction')\n", (19842, 19904), True, 'import matplotlib.pyplot as plt\n'), ((19909, 19929), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Filter"""'], {}), "('Filter')\n", (19919, 19929), True, 'import matplotlib.pyplot as plt\n'), ((19934, 19957), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cell Type"""'], {}), "('Cell Type')\n", (19944, 19957), True, 'import matplotlib.pyplot as plt\n'), ((20103, 20177), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'sns_filt_infl_celltype_log10_heatmap.svg')"], {}), "(output_directory + 'sns_filt_infl_celltype_log10_heatmap.svg')\n", (20114, 20177), True, 'import matplotlib.pyplot as plt\n'), ((20182, 20193), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20191, 20193), True, 'import matplotlib.pyplot as plt\n'), ((20598, 20622), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (20607, 20622), True, 'import numpy as np\n'), ((20755, 20808), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels_max', 'predicted_labels_max'], {}), '(test_labels_max, predicted_labels_max)\n', (20769, 20808), False, 'from sklearn.metrics import accuracy_score\n'), ((21066, 21121), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_labels_max', 'predicted_labels_max'], {}), '(test_labels_max, predicted_labels_max)\n', (21082, 21121), False, 'from sklearn.metrics import confusion_matrix\n'), ((21234, 21243), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (21241, 21243), True, 'import matplotlib.pyplot as plt\n'), ((21248, 21281), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'cmap': 'plt.cm.Blues'}), '(cm, cmap=plt.cm.Blues)\n', (21258, 21281), True, 'import matplotlib.pyplot as plt\n'), ((21286, 21326), 'matplotlib.pyplot.title', 'plt.title', (['"""Normalized confusion matrix"""'], {}), "('Normalized confusion matrix')\n", (21295, 21326), True, 'import matplotlib.pyplot as plt\n'), ((21331, 21345), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (21343, 21345), True, 'import matplotlib.pyplot as plt\n'), ((21350, 21374), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""True label"""'], {}), "('True label')\n", (21360, 21374), True, 'import matplotlib.pyplot as plt\n'), ((21379, 21408), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (21389, 21408), True, 'import matplotlib.pyplot as plt\n'), ((21413, 21431), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1]'], {}), '([0, 1])\n', (21423, 21431), True, 'import matplotlib.pyplot as plt\n'), ((21433, 21451), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1]'], {}), '([0, 1])\n', (21443, 21451), True, 'import matplotlib.pyplot as plt\n'), ((21456, 21471), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (21464, 21471), True, 'import matplotlib.pyplot as plt\n'), ((21709, 21763), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'confusion_matrix.svg')"], {}), "(output_directory + 'confusion_matrix.svg')\n", (21720, 21763), True, 'import matplotlib.pyplot as plt\n'), ((21881, 21890), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (21888, 21890), True, 'import matplotlib.pyplot as plt\n'), ((21905, 21934), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (21917, 21934), True, 'import matplotlib.pyplot as plt\n'), ((22403, 22432), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(18)'}), '(title, fontsize=18)\n', (22412, 22432), True, 'import matplotlib.pyplot as plt\n'), ((22438, 22469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'fontsize': '(18)'}), '(xlabel, fontsize=18)\n', (22448, 22469), True, 'import matplotlib.pyplot as plt\n'), ((22474, 22505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'fontsize': '(18)'}), '(ylabel, fontsize=18)\n', (22484, 22505), True, 'import matplotlib.pyplot as plt\n'), ((22511, 22553), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(output_directory + save_title)'], {}), '(output_directory + save_title)\n', (22522, 22553), True, 'import matplotlib.pyplot as plt\n'), ((22558, 22569), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22567, 22569), True, 'import matplotlib.pyplot as plt\n'), ((23242, 23311), 'numpy.pad', 'np.pad', (['sequences'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(sequences, pad_width=npad, mode='constant', constant_values=0)\n", (23248, 23311), True, 'import numpy as np\n'), ((23327, 23356), 'numpy.zeros', 'np.zeros', (['(num_filter, 4, 19)'], {}), '((num_filter, 4, 19))\n', (23335, 23356), True, 'import numpy as np\n'), ((23367, 23396), 'numpy.zeros', 'np.zeros', (['(num_filter, 4, 19)'], {}), '((num_filter, 4, 19))\n', (23375, 23396), True, 'import numpy as np\n'), ((23455, 23489), 'numpy.zeros', 'np.zeros', (['(num_filter, y.shape[0])'], {}), '((num_filter, y.shape[0]))\n', (23463, 23489), True, 'import numpy as np\n'), ((23539, 23574), 'numpy.zeros', 'np.zeros', (['(num_filter, num_classes)'], {}), '((num_filter, num_classes))\n', (23547, 23574), True, 'import numpy as np\n'), ((23598, 23618), 'numpy.zeros', 'np.zeros', (['num_filter'], {}), '(num_filter)\n', (23606, 23618), True, 'import numpy as np\n'), ((23635, 23655), 'numpy.zeros', 'np.zeros', (['num_filter'], {}), '(num_filter)\n', (23643, 23655), True, 'import numpy as np\n'), ((26132, 26156), 'numpy.stack', 'np.stack', (['activated_OCRs'], {}), '(activated_OCRs)\n', (26140, 26156), True, 'import numpy as np\n'), ((27701, 27729), 'numpy.stack', 'np.stack', (['activation_indices'], {}), '(activation_indices)\n', (27709, 27729), True, 'import numpy as np\n'), ((27739, 27748), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (27746, 27748), True, 'import matplotlib.pyplot as plt\n'), ((27812, 27855), 'matplotlib.pyplot.title', 'plt.title', (['"""histogram of position indices."""'], {}), "('histogram of position indices.')\n", (27821, 27855), True, 'import matplotlib.pyplot as plt\n'), ((27860, 27883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (27870, 27883), True, 'import matplotlib.pyplot as plt\n'), ((27888, 27910), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (27898, 27910), True, 'import matplotlib.pyplot as plt\n'), ((27915, 27966), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'position_hist.svg')"], {}), "(output_directory + 'position_hist.svg')\n", (27926, 27966), True, 'import matplotlib.pyplot as plt\n'), ((27971, 27982), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27980, 27982), True, 'import matplotlib.pyplot as plt\n'), ((28131, 28150), 'numpy.stack', 'np.stack', (['total_seq'], {}), '(total_seq)\n', (28139, 28150), True, 'import numpy as np\n'), ((28160, 28169), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (28167, 28169), True, 'import matplotlib.pyplot as plt\n'), ((28226, 28281), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of sequences activating each filter"""'], {}), "('Number of sequences activating each filter')\n", (28235, 28281), True, 'import matplotlib.pyplot as plt\n'), ((28286, 28311), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""N sequences"""'], {}), "('N sequences')\n", (28296, 28311), True, 'import matplotlib.pyplot as plt\n'), ((28316, 28336), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Filter"""'], {}), "('Filter')\n", (28326, 28336), True, 'import matplotlib.pyplot as plt\n'), ((28341, 28394), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + 'nseqs_bar_graph.svg')"], {}), "(output_directory + 'nseqs_bar_graph.svg')\n", (28352, 28394), True, 'import matplotlib.pyplot as plt\n'), ((28399, 28410), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28408, 28410), True, 'import matplotlib.pyplot as plt\n'), ((28705, 28729), 'numpy.unique', 'np.unique', (['mapping[:, 1]'], {}), '(mapping[:, 1])\n', (28714, 28729), True, 'import numpy as np\n'), ((28758, 28818), 'numpy.zeros', 'np.zeros', (['(mouse_predictions.shape[0], human_cells.shape[0])'], {}), '((mouse_predictions.shape[0], human_cells.shape[0]))\n', (28766, 28818), True, 'import numpy as np\n'), ((991, 1008), 'numpy.var', 'np.var', (['obs[i, :]'], {}), '(obs[i, :])\n', (997, 1008), True, 'import numpy as np\n'), ((1138, 1164), 'numpy.dot', 'np.dot', (['correlations', 'vars'], {}), '(correlations, vars)\n', (1144, 1164), True, 'import numpy as np\n'), ((1167, 1179), 'numpy.sum', 'np.sum', (['vars'], {}), '(vars)\n', (1173, 1179), True, 'import numpy as np\n'), ((1505, 1526), 'numpy.mean', 'np.mean', (['correlations'], {}), '(correlations)\n', (1512, 1526), True, 'import numpy as np\n'), ((2117, 2134), 'numpy.var', 'np.var', (['obs[i, :]'], {}), '(obs[i, :])\n', (2123, 2134), True, 'import numpy as np\n'), ((2175, 2216), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['obs[i, :]', 'pred[i, :]'], {}), '(obs[i, :], pred[i, :])\n', (2193, 2216), False, 'from sklearn.metrics import mean_squared_error\n'), ((2260, 2281), 'numpy.dot', 'np.dot', (['mses', 'var_all'], {}), '(mses, var_all)\n', (2266, 2281), True, 'import numpy as np\n'), ((2284, 2299), 'numpy.sum', 'np.sum', (['var_all'], {}), '(var_all)\n', (2290, 2299), True, 'import numpy as np\n'), ((2599, 2612), 'numpy.mean', 'np.mean', (['mses'], {}), '(mses)\n', (2606, 2612), True, 'import numpy as np\n'), ((4868, 4897), 'numpy.percentile', 'np.percentile', (['Q1', '(75)'], {'axis': '(1)'}), '(Q1, 75, axis=1)\n', (4881, 4897), True, 'import numpy as np\n'), ((4900, 4929), 'numpy.percentile', 'np.percentile', (['Q1', '(25)'], {'axis': '(1)'}), '(Q1, 25, axis=1)\n', (4913, 4929), True, 'import numpy as np\n'), ((4940, 4969), 'numpy.percentile', 'np.percentile', (['Q2', '(75)'], {'axis': '(1)'}), '(Q2, 75, axis=1)\n', (4953, 4969), True, 'import numpy as np\n'), ((4972, 5001), 'numpy.percentile', 'np.percentile', (['Q2', '(25)'], {'axis': '(1)'}), '(Q2, 25, axis=1)\n', (4985, 5001), True, 'import numpy as np\n'), ((5012, 5041), 'numpy.percentile', 'np.percentile', (['Q3', '(75)'], {'axis': '(1)'}), '(Q3, 75, axis=1)\n', (5025, 5041), True, 'import numpy as np\n'), ((5044, 5073), 'numpy.percentile', 'np.percentile', (['Q3', '(25)'], {'axis': '(1)'}), '(Q3, 25, axis=1)\n', (5057, 5073), True, 'import numpy as np\n'), ((5084, 5113), 'numpy.percentile', 'np.percentile', (['Q4', '(75)'], {'axis': '(1)'}), '(Q4, 75, axis=1)\n', (5097, 5113), True, 'import numpy as np\n'), ((5116, 5145), 'numpy.percentile', 'np.percentile', (['Q4', '(25)'], {'axis': '(1)'}), '(Q4, 25, axis=1)\n', (5129, 5145), True, 'import numpy as np\n'), ((5156, 5185), 'numpy.percentile', 'np.percentile', (['Q5', '(75)'], {'axis': '(1)'}), '(Q5, 75, axis=1)\n', (5169, 5185), True, 'import numpy as np\n'), ((5188, 5217), 'numpy.percentile', 'np.percentile', (['Q5', '(25)'], {'axis': '(1)'}), '(Q5, 25, axis=1)\n', (5201, 5217), True, 'import numpy as np\n'), ((8272, 8293), 'numpy.mean', 'np.mean', (['correlations'], {}), '(correlations)\n', (8279, 8293), True, 'import numpy as np\n'), ((8758, 8780), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (8767, 8780), True, 'import numpy as np\n'), ((8912, 8934), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (8921, 8934), True, 'import numpy as np\n'), ((11821, 11843), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (11830, 11843), True, 'import numpy as np\n'), ((11893, 11905), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11903, 11905), True, 'import matplotlib.pyplot as plt\n'), ((11936, 11963), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (11946, 11963), True, 'import matplotlib.pyplot as plt\n'), ((12026, 12126), 'matplotlib.pyplot.bar', 'plt.bar', (['x_axis', 'y_samples_eval', 'width'], {'color': '"""#f99fa1"""', 'edgecolor': '"""none"""', 'label': '"""true activity"""'}), "(x_axis, y_samples_eval, width, color='#f99fa1', edgecolor='none',\n label='true activity')\n", (12033, 12126), True, 'import matplotlib.pyplot as plt\n'), ((12559, 12626), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(x_axis + width)', 'xticks'], {'rotation': '"""vertical"""', 'fontsize': '(9)'}), "(x_axis + width, xticks, rotation='vertical', fontsize=9)\n", (12569, 12626), True, 'import matplotlib.pyplot as plt\n'), ((12749, 12785), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cell type"""'], {'fontsize': '(12)'}), "('Cell type', fontsize=12)\n", (12759, 12785), True, 'import matplotlib.pyplot as plt\n'), ((12794, 12830), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel_text'], {'fontsize': '(12)'}), '(ylabel_text, fontsize=12)\n', (12804, 12830), True, 'import matplotlib.pyplot as plt\n'), ((12839, 12851), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12849, 12851), True, 'import matplotlib.pyplot as plt\n'), ((12867, 12876), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (12874, 12876), True, 'import matplotlib.pyplot as plt\n'), ((12912, 13001), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + sample_names + file_txt + '.svg')"], {'bbox_inches': '"""tight"""'}), "(output_directory + sample_names + file_txt + '.svg',\n bbox_inches='tight')\n", (12923, 13001), True, 'import matplotlib.pyplot as plt\n'), ((13006, 13017), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13015, 13017), True, 'import matplotlib.pyplot as plt\n'), ((13379, 13401), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (13388, 13401), True, 'import numpy as np\n'), ((13439, 13451), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13449, 13451), True, 'import matplotlib.pyplot as plt\n'), ((13492, 13525), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'figsize': '(24, 12)'}), '(2, figsize=(24, 12))\n', (13504, 13525), True, 'import matplotlib.pyplot as plt\n'), ((14395, 14455), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_axis', 'xticks'], {'rotation': '"""vertical"""', 'fontsize': '(15)'}), "(x_axis, xticks, rotation='vertical', fontsize=15)\n", (14405, 14455), True, 'import matplotlib.pyplot as plt\n'), ((14464, 14500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cell type"""'], {'fontsize': '(24)'}), "('Cell type', fontsize=24)\n", (14474, 14500), True, 'import matplotlib.pyplot as plt\n'), ((14509, 14555), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized activity"""'], {'fontsize': '(24)'}), "('Normalized activity', fontsize=24)\n", (14519, 14555), True, 'import matplotlib.pyplot as plt\n'), ((14571, 14580), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14578, 14580), True, 'import matplotlib.pyplot as plt\n'), ((14616, 14713), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_directory + sample_names + file_txt + '_subplot.svg')"], {'bbox_inches': '"""tight"""'}), "(output_directory + sample_names + file_txt + '_subplot.svg',\n bbox_inches='tight')\n", (14627, 14713), True, 'import matplotlib.pyplot as plt\n'), ((14718, 14729), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14727, 14729), True, 'import matplotlib.pyplot as plt\n'), ((15008, 15052), 'numpy.full', 'np.full', (['filt_pred.shape[1]', 'correlations[i]'], {}), '(filt_pred.shape[1], correlations[i])\n', (15015, 15052), True, 'import numpy as np\n'), ((15220, 15269), 'numpy.apply_along_axis', 'np.apply_along_axis', (['pearson_corr', '(1)', 'pred', 'label'], {}), '(pearson_corr, 1, pred, label)\n', (15239, 15269), True, 'import numpy as np\n'), ((15868, 15886), 'numpy.mean', 'np.mean', (['filt_corr'], {}), '(filt_corr)\n', (15875, 15886), True, 'import numpy as np\n'), ((16807, 16832), 'numpy.mean', 'np.mean', (['corr_change_mean'], {}), '(corr_change_mean)\n', (16814, 16832), True, 'import numpy as np\n'), ((17291, 17320), 'numpy.arange', 'np.arange', (['filt_pred.shape[1]'], {}), '(filt_pred.shape[1])\n', (17300, 17320), True, 'import numpy as np\n'), ((17683, 17700), 'numpy.absolute', 'np.absolute', (['infl'], {}), '(infl)\n', (17694, 17700), True, 'import numpy as np\n'), ((19109, 19126), 'numpy.absolute', 'np.absolute', (['infl'], {}), '(infl)\n', (19120, 19126), True, 'import numpy as np\n'), ((19236, 19263), 'numpy.mean', 'np.mean', (['infl_absolute_mean'], {}), '(infl_absolute_mean)\n', (19243, 19263), True, 'import numpy as np\n'), ((19764, 19792), 'numpy.log10', 'np.log10', (['infl_absolute_mean'], {}), '(infl_absolute_mean)\n', (19772, 19792), True, 'import numpy as np\n'), ((20929, 21021), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels_max', 'predicted_labels_max'], {'target_names': 'target_names'}), '(test_labels_max, predicted_labels_max, target_names=\n target_names)\n', (20950, 21021), False, 'from sklearn.metrics import classification_report\n'), ((22082, 22112), 'matplotlib.pyplot.xlim', 'plt.xlim', (['min_limit', 'max_limit'], {}), '(min_limit, max_limit)\n', (22090, 22112), True, 'import matplotlib.pyplot as plt\n'), ((22121, 22151), 'matplotlib.pyplot.ylim', 'plt.ylim', (['min_limit', 'max_limit'], {}), '(min_limit, max_limit)\n', (22129, 22151), True, 'import matplotlib.pyplot as plt\n'), ((22828, 22858), 'numpy.swapaxes', 'np.swapaxes', (['activations', '(1)', '(2)'], {}), '(activations, 1, 2)\n', (22839, 22858), True, 'import numpy as np\n'), ((22914, 22942), 'numpy.swapaxes', 'np.swapaxes', (['sequences', '(1)', '(2)'], {}), '(sequences, 1, 2)\n', (22925, 22942), True, 'import numpy as np\n'), ((28182, 28203), 'numpy.arange', 'np.arange', (['num_filter'], {}), '(num_filter)\n', (28191, 28203), True, 'import numpy as np\n'), ((1046, 1080), 'numpy.corrcoef', 'np.corrcoef', (['pred[i, :]', 'obs[i, :]'], {}), '(pred[i, :], obs[i, :])\n', (1057, 1080), True, 'import numpy as np\n'), ((1285, 1302), 'math.isnan', 'math.isnan', (['value'], {}), '(value)\n', (1295, 1302), False, 'import math\n'), ((2397, 2414), 'math.isnan', 'math.isnan', (['value'], {}), '(value)\n', (2407, 2414), False, 'import math\n'), ((7687, 7696), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (7694, 7696), False, 'from matplotlib.colors import LogNorm\n'), ((8091, 8125), 'numpy.corrcoef', 'np.corrcoef', (['pred[:, i]', 'obs[:, i]'], {}), '(pred[:, i], obs[:, i])\n', (8102, 8125), True, 'import numpy as np\n'), ((9606, 9651), 'random.sample', 'random.sample', (['ind_collection[k + 1]', 'num_plt'], {}), '(ind_collection[k + 1], num_plt)\n', (9619, 9651), False, 'import random\n'), ((9840, 9862), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (9849, 9862), True, 'import numpy as np\n'), ((10024, 10037), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (10034, 10037), True, 'import matplotlib.pyplot as plt\n'), ((11174, 11183), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11181, 11183), True, 'import matplotlib.pyplot as plt\n'), ((11417, 11428), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11426, 11428), True, 'import matplotlib.pyplot as plt\n'), ((12392, 12500), 'matplotlib.pyplot.bar', 'plt.bar', (['(x_axis + width)', 'predicted_classes', 'width'], {'color': '"""#014ead"""', 'edgecolor': '"""none"""', 'label': '"""prediction"""'}), "(x_axis + width, predicted_classes, width, color='#014ead',\n edgecolor='none', label='prediction')\n", (12399, 12500), True, 'import matplotlib.pyplot as plt\n'), ((13832, 13885), 'utils.minmax_scale', 'utils.minmax_scale', (['predicted_classes', 'y_samples_eval'], {}), '(predicted_classes, y_samples_eval)\n', (13850, 13885), False, 'import utils\n'), ((15455, 15486), 'numpy.square', 'np.square', (['(corr - corr_original)'], {}), '(corr - corr_original)\n', (15464, 15486), True, 'import numpy as np\n'), ((18182, 18212), 'numpy.mean', 'np.mean', (['infl_mean_act'], {'axis': '(0)'}), '(infl_mean_act, axis=0)\n', (18189, 18212), True, 'import numpy as np\n'), ((18677, 18698), 'numpy.mean', 'np.mean', (['infl'], {'axis': '(0)'}), '(infl, axis=0)\n', (18684, 18698), True, 'import numpy as np\n'), ((19799, 19827), 'numpy.isnan', 'np.isnan', (['infl_absolute_mean'], {}), '(infl_absolute_mean)\n', (19807, 19827), True, 'import numpy as np\n'), ((23015, 23043), 'numpy.amax', 'np.amax', (['activations'], {'axis': '(2)'}), '(activations, axis=2)\n', (23022, 23043), True, 'import numpy as np\n'), ((23097, 23130), 'numpy.amax', 'np.amax', (['activations'], {'axis': '(0, 2)'}), '(activations, axis=(0, 2))\n', (23104, 23130), True, 'import numpy as np\n'), ((24803, 24826), 'numpy.stack', 'np.stack', (['act_seqs_list'], {}), '(act_seqs_list)\n', (24811, 24826), True, 'import numpy as np\n'), ((24847, 24871), 'numpy.sum', 'np.sum', (['act_seqs'], {'axis': '(0)'}), '(act_seqs, axis=0)\n', (24853, 24871), True, 'import numpy as np\n'), ((24916, 24939), 'numpy.sum', 'np.sum', (['pwm_tmp'], {'axis': '(0)'}), '(pwm_tmp, axis=0)\n', (24922, 24939), True, 'import numpy as np\n'), ((25847, 25869), 'numpy.stack', 'np.stack', (['act_OCRs_tmp'], {}), '(act_OCRs_tmp)\n', (25855, 25869), True, 'import numpy as np\n'), ((25903, 25932), 'numpy.mean', 'np.mean', (['act_OCRs_tmp'], {'axis': '(0)'}), '(act_OCRs_tmp, axis=0)\n', (25910, 25932), True, 'import numpy as np\n'), ((26745, 26765), 'numpy.sum', 'np.sum', (['pwm[i, :, :]'], {}), '(pwm[i, :, :])\n', (26751, 26765), True, 'import numpy as np\n'), ((1417, 1434), 'math.isnan', 'math.isnan', (['value'], {}), '(value)\n', (1427, 1434), False, 'import math\n'), ((1707, 1728), 'numpy.mean', 'np.mean', (['correlations'], {}), '(correlations)\n', (1714, 1728), True, 'import numpy as np\n'), ((2520, 2537), 'math.isnan', 'math.isnan', (['value'], {}), '(value)\n', (2530, 2537), False, 'import math\n'), ((2785, 2798), 'numpy.mean', 'np.mean', (['mses'], {}), '(mses)\n', (2792, 2798), True, 'import numpy as np\n'), ((8420, 8441), 'numpy.mean', 'np.mean', (['correlations'], {}), '(correlations)\n', (8427, 8441), True, 'import numpy as np\n'), ((10116, 10143), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (10126, 10143), True, 'import matplotlib.pyplot as plt\n'), ((10160, 10204), 'matplotlib.pyplot.subplot', 'plt.subplot', (['mum_plt_row', 'mum_plt_col', '(i + 1)'], {}), '(mum_plt_row, mum_plt_col, i + 1)\n', (10171, 10204), True, 'import matplotlib.pyplot as plt\n'), ((10221, 10324), 'matplotlib.pyplot.bar', 'plt.bar', (['x_axis', 'y_samples_eval[i]', 'width'], {'color': '"""#f99fa1"""', 'edgecolor': '"""none"""', 'label': '"""true activity"""'}), "(x_axis, y_samples_eval[i], width, color='#f99fa1', edgecolor='none',\n label='true activity')\n", (10228, 10324), True, 'import matplotlib.pyplot as plt\n'), ((10829, 10896), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(x_axis + width)', 'xticks'], {'rotation': '"""vertical"""', 'fontsize': '(9)'}), "(x_axis + width, xticks, rotation='vertical', fontsize=9)\n", (10839, 10896), True, 'import matplotlib.pyplot as plt\n'), ((11015, 11051), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""cell type"""'], {'fontsize': '(12)'}), "('cell type', fontsize=12)\n", (11025, 11051), True, 'import matplotlib.pyplot as plt\n'), ((11068, 11116), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(scale_txt + ' activity')"], {'fontsize': '(12)'}), "(scale_txt + ' activity', fontsize=12)\n", (11078, 11116), True, 'import matplotlib.pyplot as plt\n'), ((11133, 11145), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11143, 11145), True, 'import matplotlib.pyplot as plt\n'), ((12181, 12234), 'utils.minmax_scale', 'utils.minmax_scale', (['predicted_classes', 'y_samples_eval'], {}), '(predicted_classes, y_samples_eval)\n', (12199, 12234), False, 'import utils\n'), ((15175, 15199), 'numpy.corrcoef', 'np.corrcoef', (['pred', 'label'], {}), '(pred, label)\n', (15186, 15199), True, 'import numpy as np\n'), ((16013, 16031), 'numpy.mean', 'np.mean', (['filt_corr'], {}), '(filt_corr)\n', (16020, 16031), True, 'import numpy as np\n'), ((16959, 16984), 'numpy.mean', 'np.mean', (['corr_change_mean'], {}), '(corr_change_mean)\n', (16966, 16984), True, 'import numpy as np\n'), ((19014, 19031), 'numpy.absolute', 'np.absolute', (['infl'], {}), '(infl)\n', (19025, 19031), True, 'import numpy as np\n'), ((19401, 19428), 'numpy.mean', 'np.mean', (['infl_absolute_mean'], {}), '(infl_absolute_mean)\n', (19408, 19428), True, 'import numpy as np\n'), ((21985, 21994), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (21991, 21994), True, 'import numpy as np\n'), ((21996, 22005), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (22002, 22005), True, 'import numpy as np\n'), ((22045, 22054), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (22051, 22054), True, 'import numpy as np\n'), ((22056, 22065), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (22062, 22065), True, 'import numpy as np\n'), ((22179, 22188), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (22185, 22188), True, 'import numpy as np\n'), ((22195, 22204), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (22201, 22204), True, 'import numpy as np\n'), ((22228, 22237), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (22234, 22237), True, 'import numpy as np\n'), ((22244, 22253), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (22250, 22253), True, 'import numpy as np\n'), ((22312, 22331), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (22324, 22331), True, 'import matplotlib.pyplot as plt\n'), ((22378, 22397), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (22390, 22397), True, 'import matplotlib.pyplot as plt\n'), ((24019, 24078), 'numpy.where', 'np.where', (['(activations[j, i, :] > activation_threshold[j, i])'], {}), '(activations[j, i, :] > activation_threshold[j, i])\n', (24027, 24078), True, 'import numpy as np\n'), ((24120, 24176), 'numpy.where', 'np.where', (['(activations[j, i, :] > activation_threshold[i])'], {}), '(activations[j, i, :] > activation_threshold[i])\n', (24128, 24176), True, 'import numpy as np\n'), ((25102, 25148), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (25113, 25148), True, 'import numpy as np\n'), ((25174, 25204), 'numpy.true_divide', 'np.true_divide', (['pwm_tmp', 'total'], {}), '(pwm_tmp, total)\n', (25188, 25204), True, 'import numpy as np\n'), ((25325, 25347), 'numpy.nan_to_num', 'np.nan_to_num', (['pwm_tmp'], {}), '(pwm_tmp)\n', (25338, 25347), True, 'import numpy as np\n'), ((28897, 28932), 'numpy.where', 'np.where', (['(mapping[:, 1] == celltype)'], {}), '(mapping[:, 1] == celltype)\n', (28905, 28932), True, 'import numpy as np\n'), ((28952, 28986), 'numpy.in1d', 'np.in1d', (['mouse_cell_types', 'matches'], {}), '(mouse_cell_types, matches)\n', (28959, 28986), True, 'import numpy as np\n'), ((10630, 10741), 'matplotlib.pyplot.bar', 'plt.bar', (['(x_axis + width)', 'predicted_classes[i]', 'width'], {'color': '"""#014ead"""', 'edgecolor': '"""none"""', 'label': '"""prediction"""'}), "(x_axis + width, predicted_classes[i], width, color='#014ead',\n edgecolor='none', label='prediction')\n", (10637, 10741), True, 'import matplotlib.pyplot as plt\n'), ((27245, 27265), 'numpy.sum', 'np.sum', (['pwm[i, :, j]'], {}), '(pwm[i, :, j])\n', (27251, 27265), True, 'import numpy as np\n'), ((29107, 29149), 'numpy.mean', 'np.mean', (['mouse_predictions[:, idx]'], {'axis': '(2)'}), '(mouse_predictions[:, idx], axis=2)\n', (29114, 29149), True, 'import numpy as np\n'), ((29226, 29267), 'numpy.max', 'np.max', (['mouse_predictions[:, idx]'], {'axis': '(2)'}), '(mouse_predictions[:, idx], axis=2)\n', (29232, 29267), True, 'import numpy as np\n'), ((29347, 29391), 'numpy.median', 'np.median', (['mouse_predictions[:, idx]'], {'axis': '(2)'}), '(mouse_predictions[:, idx], axis=2)\n', (29356, 29391), True, 'import numpy as np\n'), ((10391, 10450), 'utils.minmax_scale', 'utils.minmax_scale', (['predicted_classes[i]', 'y_samples_eval[i]'], {}), '(predicted_classes[i], y_samples_eval[i])\n', (10409, 10450), False, 'import utils\n'), ((26946, 26974), 'numpy.sum', 'np.sum', (['pwm[i, :, :]'], {'axis': '(0)'}), '(pwm[i, :, :], axis=0)\n', (26952, 26974), True, 'import numpy as np\n'), ((27165, 27193), 'numpy.sum', 'np.sum', (['pwm[i, :, :]'], {'axis': '(0)'}), '(pwm[i, :, :], axis=0)\n', (27171, 27193), True, 'import numpy as np\n')] |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sklearn.utils import check_consistent_length, check_array
import numpy
__all__ = ['concordance_index_censored']
def concordance_index_censored(event_indicator, event_time, estimate):
"""Concordance index for right-censored data
The concordance index is defined as the proportion of all comparable pairs
in which the predictions and outcomes are concordant.
Samples are comparable if for at least one of them an event occurred.
If the estimated risk is larger for the sample with a higher time of
event/censoring, the predictions of that pair are said to be concordant.
If an event occurred for one sample and the other is known to be
event-free at least until the time of event of the first, the second
sample is assumed to *outlive* the first.
When predicted risks are identical for a pair, 0.5 rather than 1 is added
to the count of concordant pairs.
A pair is not comparable if an event occurred for both of them at the same
time or an event occurred for one of them but the time of censoring is
smaller than the time of event of the first one.
Parameters
----------
event_indicator : array-like, shape = (n_samples,)
Boolean array denotes whether an event occurred
event_time : array-like, shape = (n_samples,)
Array containing the time of an event or time of censoring
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of pairs having an event at the same time
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>,
"Multivariable prognostic models: issues in developing models,
evaluating assumptions and adequacy, and measuring and reducing errors",
Statistics in Medicine, 15(4), 361-87, 1996.
"""
check_consistent_length(event_indicator, event_time, estimate)
event_indicator = check_array(event_indicator, ensure_2d=False)
event_time = check_array(event_time, ensure_2d=False)
estimate = check_array(estimate, ensure_2d=False)
if not numpy.issubdtype(event_indicator.dtype, numpy.bool_):
raise ValueError(
'only boolean arrays are supported as class labels for survival analysis, got {0}'.format(
event_indicator.dtype))
n_samples = len(event_time)
if n_samples < 2:
raise ValueError("Need a minimum of two samples")
if not event_indicator.any():
raise ValueError("All samples are censored")
order = numpy.argsort(event_time)
tied_time = 0
comparable = {}
for i in range(n_samples - 1):
inext = i + 1
j = inext
time_i = event_time[order[i]]
while j < n_samples and event_time[order[j]] == time_i:
j += 1
if event_indicator[order[i]]:
mask = numpy.zeros(n_samples, dtype=bool)
mask[inext:] = True
if j - i > 1:
# event times are tied, need to check for coinciding events
event_at_same_time = event_indicator[order[inext:j]]
mask[inext:j] = numpy.logical_not(event_at_same_time)
tied_time += event_at_same_time.sum()
comparable[i] = mask
elif j - i > 1:
# events at same time are comparable if at least one of them is positive
mask = numpy.zeros(n_samples, dtype=bool)
mask[inext:j] = event_indicator[order[inext:j]]
comparable[i] = mask
concordant = 0
discordant = 0
tied_risk = 0
for ind, mask in comparable.items():
est_i = estimate[order[ind]]
event_i = event_indicator[order[ind]]
est = estimate[order[mask]]
if event_i:
# an event should have a higher score
con = (est < est_i).sum()
else:
# a non-event should have a lower score
con = (est > est_i).sum()
concordant += con
tie = (est == est_i).sum()
tied_risk += tie
discordant += est.size - con - tie
cindex = (concordant + 0.5 * tied_risk) / (concordant + discordant + tied_risk)
return cindex, concordant, discordant, tied_risk, tied_time | [
"sklearn.utils.check_array",
"numpy.logical_not",
"numpy.zeros",
"numpy.argsort",
"sklearn.utils.check_consistent_length",
"numpy.issubdtype"
] | [((2803, 2865), 'sklearn.utils.check_consistent_length', 'check_consistent_length', (['event_indicator', 'event_time', 'estimate'], {}), '(event_indicator, event_time, estimate)\n', (2826, 2865), False, 'from sklearn.utils import check_consistent_length, check_array\n'), ((2889, 2934), 'sklearn.utils.check_array', 'check_array', (['event_indicator'], {'ensure_2d': '(False)'}), '(event_indicator, ensure_2d=False)\n', (2900, 2934), False, 'from sklearn.utils import check_consistent_length, check_array\n'), ((2953, 2993), 'sklearn.utils.check_array', 'check_array', (['event_time'], {'ensure_2d': '(False)'}), '(event_time, ensure_2d=False)\n', (2964, 2993), False, 'from sklearn.utils import check_consistent_length, check_array\n'), ((3010, 3048), 'sklearn.utils.check_array', 'check_array', (['estimate'], {'ensure_2d': '(False)'}), '(estimate, ensure_2d=False)\n', (3021, 3048), False, 'from sklearn.utils import check_consistent_length, check_array\n'), ((3512, 3537), 'numpy.argsort', 'numpy.argsort', (['event_time'], {}), '(event_time)\n', (3525, 3537), False, 'import numpy\n'), ((3063, 3115), 'numpy.issubdtype', 'numpy.issubdtype', (['event_indicator.dtype', 'numpy.bool_'], {}), '(event_indicator.dtype, numpy.bool_)\n', (3079, 3115), False, 'import numpy\n'), ((3843, 3877), 'numpy.zeros', 'numpy.zeros', (['n_samples'], {'dtype': 'bool'}), '(n_samples, dtype=bool)\n', (3854, 3877), False, 'import numpy\n'), ((4118, 4155), 'numpy.logical_not', 'numpy.logical_not', (['event_at_same_time'], {}), '(event_at_same_time)\n', (4135, 4155), False, 'import numpy\n'), ((4376, 4410), 'numpy.zeros', 'numpy.zeros', (['n_samples'], {'dtype': 'bool'}), '(n_samples, dtype=bool)\n', (4387, 4410), False, 'import numpy\n')] |
import numpy as np
from stereosim.compute_coordinates import compute_coordinates
def compute_CAHVOR(pinhole_model):
"""
Computation of CAHVOR from photogrammetric parameters.
Parameters
----------
dict: dict
Takes dictionary containing photogrammetric camera Parameters
such as 'camera center', 'focallength', 'rotation matrix',
'pixel size', 'principal point', 'image size' and 'az' and 'el'
to get back to origin position of PTU.
Returns:
cahvor: dict
Returns dict containing computed CAHVOR parameters from
photogrammetric model.
"""
hs = pinhole_model['f'] / pinhole_model['pixelsize']
vs = pinhole_model['f'] / pinhole_model['pixelsize']
hc = (pinhole_model['image_size'][1] / 2) + \
(pinhole_model['principal'][0] / pinhole_model['pixelsize'])
vc = (pinhole_model['image_size'][0] / 2) - \
(pinhole_model['principal'][1] / pinhole_model['pixelsize'])
C = pinhole_model['center']
A = - pinhole_model['rotation_mat'][2, :]
Hn = pinhole_model['rotation_mat'][0, :]
Vn = - pinhole_model['rotation_mat'][1, :]
H = hs * Hn + hc * A
V = vs * Hn + vc * A
O = A # We assume O = A in converted CAHVOR Model
# Fixing Axis specifically for PTU unit.
A[0], A[1], A[2] = A[2], -A[0], -A[1]
H[0], H[1], H[2] = H[2], -H[0], -H[1]
V[0], V[1], V[2] = V[2], -V[0], -V[1]
O[0], O[1], O[2] = O[2], -O[0], -O[1]
A = compute_coordinates(A, pinhole_model['az'], pinhole_model['el'])
H = compute_coordinates(H, pinhole_model['az'], pinhole_model['el'])
V = compute_coordinates(V, pinhole_model['az'], pinhole_model['el'])
O = compute_coordinates(O, pinhole_model['az'], pinhole_model['el'])
try:
R = pinhole_model['K']
except KeyError:
R = None
if not (R is None):
R = np.array([R[0], R[1] * (pinhole_model['f']**2),
R[2] * (pinhole_model['f']**4)])
R = compute_coordinates(R, pinhole_model['az'], pinhole_model['el'])
cahvor = dict([('C', C), ('A', A), ('H', H), ('V', V), ('O', O), ('R', R),
('hs', hs), ('hc', hc), ('vs', vs), ('vc', vc)])
return cahvor
| [
"numpy.array",
"stereosim.compute_coordinates.compute_coordinates"
] | [((1481, 1545), 'stereosim.compute_coordinates.compute_coordinates', 'compute_coordinates', (['A', "pinhole_model['az']", "pinhole_model['el']"], {}), "(A, pinhole_model['az'], pinhole_model['el'])\n", (1500, 1545), False, 'from stereosim.compute_coordinates import compute_coordinates\n'), ((1554, 1618), 'stereosim.compute_coordinates.compute_coordinates', 'compute_coordinates', (['H', "pinhole_model['az']", "pinhole_model['el']"], {}), "(H, pinhole_model['az'], pinhole_model['el'])\n", (1573, 1618), False, 'from stereosim.compute_coordinates import compute_coordinates\n'), ((1627, 1691), 'stereosim.compute_coordinates.compute_coordinates', 'compute_coordinates', (['V', "pinhole_model['az']", "pinhole_model['el']"], {}), "(V, pinhole_model['az'], pinhole_model['el'])\n", (1646, 1691), False, 'from stereosim.compute_coordinates import compute_coordinates\n'), ((1700, 1764), 'stereosim.compute_coordinates.compute_coordinates', 'compute_coordinates', (['O', "pinhole_model['az']", "pinhole_model['el']"], {}), "(O, pinhole_model['az'], pinhole_model['el'])\n", (1719, 1764), False, 'from stereosim.compute_coordinates import compute_coordinates\n'), ((1881, 1966), 'numpy.array', 'np.array', (["[R[0], R[1] * pinhole_model['f'] ** 2, R[2] * pinhole_model['f'] ** 4]"], {}), "([R[0], R[1] * pinhole_model['f'] ** 2, R[2] * pinhole_model['f'] ** 4]\n )\n", (1889, 1966), True, 'import numpy as np\n'), ((1996, 2060), 'stereosim.compute_coordinates.compute_coordinates', 'compute_coordinates', (['R', "pinhole_model['az']", "pinhole_model['el']"], {}), "(R, pinhole_model['az'], pinhole_model['el'])\n", (2015, 2060), False, 'from stereosim.compute_coordinates import compute_coordinates\n')] |
import numpy as np
import pycpp_examples
from pytest import approx
import time
def test_pycpp_pose():
print(pycpp_examples)
print(dir(pycpp_examples))
Pose = pycpp_examples.Pose
pa = Pose(0, 0.2, 0.1)
pb = Pose(x=0.0, y=0.2, yaw=0.2)
odom = Pose.get_odom(pa, pb)
assert odom.x == approx(0.0)
assert odom.y == approx(0.0)
assert odom.yaw == approx(-0.1)
def test_pycpp_vector_ops():
"""Show how PyBind works with vectors/lists.
Generates random data and sqrt-sum's in different ways."""
# Generate some random data
vec = np.random.random(500000)
start = time.time()
sum_np = np.sqrt(vec).sum()
print(f"Numpy Sum Time: {time.time() - start}")
start = time.time()
sum_cpp_eigen = pycpp_examples.sqrt_sum_vec(vec)
print(f"C++ Eigen Sum Time: {time.time() - start}")
# Convert to a vector
vec = list(vec)
start = time.time()
sum_loop = 0
for v in vec:
sum_loop += np.sqrt(v)
print(f"Python loop sum time: {time.time() - start}")
start = time.time()
sum_cpp_vec = pycpp_examples.sqrt_sum_vec(vec)
print(f"C++ vector sum time: {time.time() - start}")
assert sum_np == approx(sum_cpp_eigen)
assert sum_np == approx(sum_loop)
assert sum_np == approx(sum_cpp_vec)
| [
"time.time",
"pycpp_examples.sqrt_sum_vec",
"numpy.random.random",
"pytest.approx",
"numpy.sqrt"
] | [((578, 602), 'numpy.random.random', 'np.random.random', (['(500000)'], {}), '(500000)\n', (594, 602), True, 'import numpy as np\n'), ((616, 627), 'time.time', 'time.time', ([], {}), '()\n', (625, 627), False, 'import time\n'), ((725, 736), 'time.time', 'time.time', ([], {}), '()\n', (734, 736), False, 'import time\n'), ((757, 789), 'pycpp_examples.sqrt_sum_vec', 'pycpp_examples.sqrt_sum_vec', (['vec'], {}), '(vec)\n', (784, 789), False, 'import pycpp_examples\n'), ((906, 917), 'time.time', 'time.time', ([], {}), '()\n', (915, 917), False, 'import time\n'), ((1055, 1066), 'time.time', 'time.time', ([], {}), '()\n', (1064, 1066), False, 'import time\n'), ((1085, 1117), 'pycpp_examples.sqrt_sum_vec', 'pycpp_examples.sqrt_sum_vec', (['vec'], {}), '(vec)\n', (1112, 1117), False, 'import pycpp_examples\n'), ((311, 322), 'pytest.approx', 'approx', (['(0.0)'], {}), '(0.0)\n', (317, 322), False, 'from pytest import approx\n'), ((344, 355), 'pytest.approx', 'approx', (['(0.0)'], {}), '(0.0)\n', (350, 355), False, 'from pytest import approx\n'), ((379, 391), 'pytest.approx', 'approx', (['(-0.1)'], {}), '(-0.1)\n', (385, 391), False, 'from pytest import approx\n'), ((973, 983), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (980, 983), True, 'import numpy as np\n'), ((1197, 1218), 'pytest.approx', 'approx', (['sum_cpp_eigen'], {}), '(sum_cpp_eigen)\n', (1203, 1218), False, 'from pytest import approx\n'), ((1240, 1256), 'pytest.approx', 'approx', (['sum_loop'], {}), '(sum_loop)\n', (1246, 1256), False, 'from pytest import approx\n'), ((1278, 1297), 'pytest.approx', 'approx', (['sum_cpp_vec'], {}), '(sum_cpp_vec)\n', (1284, 1297), False, 'from pytest import approx\n'), ((641, 653), 'numpy.sqrt', 'np.sqrt', (['vec'], {}), '(vec)\n', (648, 653), True, 'import numpy as np\n'), ((689, 700), 'time.time', 'time.time', ([], {}), '()\n', (698, 700), False, 'import time\n'), ((823, 834), 'time.time', 'time.time', ([], {}), '()\n', (832, 834), False, 'import time\n'), ((1019, 1030), 'time.time', 'time.time', ([], {}), '()\n', (1028, 1030), False, 'import time\n'), ((1152, 1163), 'time.time', 'time.time', ([], {}), '()\n', (1161, 1163), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 13:27:32 2018
@author: <NAME>
"""
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import calinski_harabaz_score
from sklearn.cluster import AgglomerativeClustering
import numpy as np
import pca
import seaborn as sns
import matplotlib.pyplot as plt
import read_attributes_signatures
import os
import sys
def create_cluster_labels(df: pd.DataFrame, num_groups, return_score=False):
"""
Clusters a dataframe, adds the cluster labels to it and returns it
"""
np.random.seed(0)
agg_clust = AgglomerativeClustering(n_clusters=num_groups,
linkage="ward")
X = df.copy(deep=True)
X = StandardScaler().fit_transform(X)
agg_clust.fit(X)
labels = pd.DataFrame(list(agg_clust.labels_))
labels.index = df.index
labels.columns = ["Cluster"]
if return_score:
return agg_clust.connectivity
else:
return labels
def biplot(pca_df_with_labels,pca_object):
"""
Plots the clustered PCA
"""
colors= ["#e6194B", "#f58231", "#fffac8", "#bfef45", "#3cb44b",
"#42d4f4", "#4363d8", "#911eb4", "#a9a9a9", "#ffffff"]
n_list = ["1 (n=230)", "2 (n=101)", "3 (n=7)", "4 (n=52)", "5 (n=9)",
"6 (n=18)", "7 (n=23)", "8 (n=90)", "9 (n=61)", "10 (n=52)"]
# Basic set up
alpha = 0.6
fig = plt.Figure()
pca_df_with_labels["Cluster"] = pca_df_with_labels["Cluster"] + 1
# Plotting
plot = sns.lmplot(x="PC 1",
y="PC 2",
data=pca_df_with_labels,
hue="Cluster",
fit_reg=False,
palette=colors,#"gist_earth",
scatter_kws={"s": 30, "alpha":1, "edgecolor":"black", "linewidth":0.3, "zorder":2},
legend=False)
ax = plt.gca()
# Names of the factors
factors = ['Mean annual discharge', 'Mean winter discharge', 'Mean half-flow date',
'Q95 (high flow)', 'Runoff ratio', 'Mean summer discharge']
# using scaling factors to make the arrows
arrow_size, text_pos = 7.0, 8.0,
# Add the arrows
for i, v in enumerate(pca_object.components_.T):
ax.arrow(0, 0, arrow_size*v[0], arrow_size*v[1], head_width=0.2, head_length=0.2, linewidth=1.5, color="grey", zorder=3)
# Fix the overlapping text
if factors[i] == "Mean annual discharge":
txt = ax.text(v[0]*text_pos, v[1]*text_pos + 0.3, factors[i], color='black', ha='center', va='center', fontsize=9, zorder=4, alpha=0.75)
elif factors[i] == "Q95 (high flow)":
txt = ax.text(v[0]*text_pos, v[1]*text_pos -0.3, factors[i], color='black', ha='center', va='center', fontsize=9, zorder=4, alpha=0.75)
elif factors[i] == "Mean winter discharge":
txt = ax.text(v[0]*text_pos, v[1]*text_pos -0.2, factors[i], color='black', ha='center', va='center', fontsize=9, zorder=4, alpha=0.75)
elif factors[i] == "Runoff ratio":
txt = ax.text(v[0]*text_pos, v[1]*text_pos +0.2, factors[i], color='black', ha='center', va='center', fontsize=9, zorder=4, alpha=0.75)
elif factors[i] == "Mean half-flow date":
txt = ax.text(v[0]*text_pos, v[1]*text_pos -0.25, factors[i], color='black', ha='center', va='center', fontsize=9, zorder=4, alpha=0.75)
else:
txt = ax.text(v[0]*text_pos, v[1]*text_pos, factors[i], color='black', ha='center', va='center', fontsize=9, zorder=4, alpha=0.75)
txt.set_bbox(dict(facecolor="lightgrey", alpha=0.7, boxstyle="round"))
# Make plot nicer by removing the borders
ax.set_facecolor("white")
for spine in ax.spines.values():
spine.set_visible(False)
# Add correct descriptions
ax.set_ylabel("PC 2", alpha=alpha)
ax.set_xlabel("PC 1", alpha=alpha)
ax.grid(color="grey", alpha=alpha, zorder=4)
plt.setp(ax.get_yticklabels(), alpha=alpha)
plt.setp(ax.get_xticklabels(), alpha=alpha)
plt.ylim(-2.5, 7)
ax.tick_params(axis=u'both', which=u'both',length=0)
# legend = plot._legend
legend = plt.legend(bbox_to_anchor=[1,1])
for i,text in enumerate(legend.get_texts()):
text.set_text(n_list[i])
text.set_color("grey")
legend.set_title("Catchment\n Cluster")
legend.get_title().set_color("grey")
# legend.bbox_to_anchor=[0,10]
# Save the plot
fig.tight_layout()
plt.savefig("clusters.png", dpi=400, bbox_inches="tight")
def save_clusters_with_loc(labels):
# Set the cwd to the directory of the file
file_loc = os.path.dirname(sys.argv[0])
os.chdir(file_loc)
# Read in the files
os.chdir(os.pardir + os.sep + "data" + os.sep + "camels_attributes_v2.0")
# Topo
topo = pd.read_table("camels_topo.txt", sep=";", index_col=0)
labels_loc = pd.merge(topo[["gauge_lat", "gauge_lon"]], labels, left_index=True, right_index=True, how="inner")
os.chdir(file_loc)
labels_loc.to_csv("labels_loc.txt")
def elbow(min_clusters, max_clusters, df):
"""Creates an elbow plot for a dataframe to determine the number
of clusters"""
score_dict = {}
for num_clusters in range(min_clusters, max_clusters+1):
labels = create_cluster_labels(df, num_clusters)
score_dict[num_clusters] = calinski_harabaz_score(df, labels)
metrics = pd.DataFrame.from_dict(score_dict, orient="index")
metrics.plot()
if __name__ == "__main__":
variance = 0.8
pca_df = pca.pca_signatures(variance)
# metrics = elbow(5, 20, pca_df)
labels = create_cluster_labels(pca_df, 10)
# save_clusters_with_loc(labels)
pca_df_with_labels = pd.concat([pca_df, labels], axis=1)
# print(pca_df_with_labels.describe())
pca_object = pca.pca_signatures(0.80, return_pca=True)
biplot(pca_df_with_labels, pca_object)
| [
"seaborn.lmplot",
"numpy.random.seed",
"pandas.DataFrame.from_dict",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.ylim",
"pandas.concat",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.Figure",
"os.path.dirname",
"pandas.merge",
"sklearn.metrics.calinski_harabaz_score",
"matplotl... | [((566, 583), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (580, 583), True, 'import numpy as np\n'), ((601, 663), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'num_groups', 'linkage': '"""ward"""'}), "(n_clusters=num_groups, linkage='ward')\n", (624, 663), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((1427, 1439), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (1437, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1748), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': '"""PC 1"""', 'y': '"""PC 2"""', 'data': 'pca_df_with_labels', 'hue': '"""Cluster"""', 'fit_reg': '(False)', 'palette': 'colors', 'scatter_kws': "{'s': 30, 'alpha': 1, 'edgecolor': 'black', 'linewidth': 0.3, 'zorder': 2}", 'legend': '(False)'}), "(x='PC 1', y='PC 2', data=pca_df_with_labels, hue='Cluster',\n fit_reg=False, palette=colors, scatter_kws={'s': 30, 'alpha': 1,\n 'edgecolor': 'black', 'linewidth': 0.3, 'zorder': 2}, legend=False)\n", (1547, 1748), True, 'import seaborn as sns\n'), ((1865, 1874), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1872, 1874), True, 'import matplotlib.pyplot as plt\n'), ((4004, 4021), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.5)', '(7)'], {}), '(-2.5, 7)\n', (4012, 4021), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4152), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '[1, 1]'}), '(bbox_to_anchor=[1, 1])\n', (4129, 4152), True, 'import matplotlib.pyplot as plt\n'), ((4434, 4491), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""clusters.png"""'], {'dpi': '(400)', 'bbox_inches': '"""tight"""'}), "('clusters.png', dpi=400, bbox_inches='tight')\n", (4445, 4491), True, 'import matplotlib.pyplot as plt\n'), ((4592, 4620), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (4607, 4620), False, 'import os\n'), ((4625, 4643), 'os.chdir', 'os.chdir', (['file_loc'], {}), '(file_loc)\n', (4633, 4643), False, 'import os\n'), ((4672, 4745), 'os.chdir', 'os.chdir', (["(os.pardir + os.sep + 'data' + os.sep + 'camels_attributes_v2.0')"], {}), "(os.pardir + os.sep + 'data' + os.sep + 'camels_attributes_v2.0')\n", (4680, 4745), False, 'import os\n'), ((4769, 4823), 'pandas.read_table', 'pd.read_table', (['"""camels_topo.txt"""'], {'sep': '""";"""', 'index_col': '(0)'}), "('camels_topo.txt', sep=';', index_col=0)\n", (4782, 4823), True, 'import pandas as pd\n'), ((4841, 4943), 'pandas.merge', 'pd.merge', (["topo[['gauge_lat', 'gauge_lon']]", 'labels'], {'left_index': '(True)', 'right_index': '(True)', 'how': '"""inner"""'}), "(topo[['gauge_lat', 'gauge_lon']], labels, left_index=True,\n right_index=True, how='inner')\n", (4849, 4943), True, 'import pandas as pd\n'), ((4944, 4962), 'os.chdir', 'os.chdir', (['file_loc'], {}), '(file_loc)\n', (4952, 4962), False, 'import os\n'), ((5366, 5416), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['score_dict'], {'orient': '"""index"""'}), "(score_dict, orient='index')\n", (5388, 5416), True, 'import pandas as pd\n'), ((5515, 5543), 'pca.pca_signatures', 'pca.pca_signatures', (['variance'], {}), '(variance)\n', (5533, 5543), False, 'import pca\n'), ((5688, 5723), 'pandas.concat', 'pd.concat', (['[pca_df, labels]'], {'axis': '(1)'}), '([pca_df, labels], axis=1)\n', (5697, 5723), True, 'import pandas as pd\n'), ((5783, 5823), 'pca.pca_signatures', 'pca.pca_signatures', (['(0.8)'], {'return_pca': '(True)'}), '(0.8, return_pca=True)\n', (5801, 5823), False, 'import pca\n'), ((5317, 5351), 'sklearn.metrics.calinski_harabaz_score', 'calinski_harabaz_score', (['df', 'labels'], {}), '(df, labels)\n', (5339, 5351), False, 'from sklearn.metrics import calinski_harabaz_score\n'), ((739, 755), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (753, 755), False, 'from sklearn.preprocessing import StandardScaler\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# utils.py
"""
Utility functions for deconvolution
Copyright (c) 2016, <NAME>
"""
import numpy as np
from dphutils import radial_profile
def set_pyfftw_threads(threads=1):
"""A utility to set the number of threads to use in pyfftw"""
raise NotImplementedError
def _ensure_positive(data):
"""Make sure data is positive"""
return np.fmax(data, 0)
def _zero2eps(data):
"""Replace zeros and negative numbers with machine precision"""
return np.fmax(data, np.finfo(data.dtype).eps)
def _prep_img_and_psf(image, psf):
"""Do basic data checking, convert data to float, normalize psf and make
sure data are positive"""
assert psf.ndim == image.ndim, "image and psf do not have the same number" " of dimensions"
image = image.astype(np.float)
psf = psf.astype(np.float)
# need to make sure both image and PSF are totally positive.
image = _ensure_positive(image)
# I'm not actually sure if this step is necessary or a good idea.
psf = _ensure_positive(psf)
# normalize the kernel
psf /= psf.sum()
return image, psf
def radialavg(data):
"""Radially average psf/otf
Note: it only really makes sense to radially average the OTF"""
if data.ndim < 2 or data.ndim > 3:
raise ValueError("Data has wrong number of dimensions, ndim = {}".format(data.ndim))
# find data maximum, then we use this as the center
center = np.unravel_index(data.argmax(), data.shape)
yxcenter = center[-2:]
# figure out maxsize of data that is reasonable
maxsize = max(*yxcenter, *(np.array(data.shape[-2:]) - np.array(yxcenter)))
# maxsize should be odd
maxsize += 1 - maxsize % 2
if data.ndim == 2:
return radial_profile(data, yxcenter)[0][:maxsize]
elif data.ndim == 3:
# return the radial profile for each z slice
return np.array([radial_profile(d, yxcenter)[0][:maxsize] for d in data])
else:
raise RuntimeError("Something has gone wrong!")
# fixes fft issue
def expand_radialavg(data):
"""Expand a radially averaged data set to a full 2D or 3D psf/otf
Data will have maximum at center
Assumes standard numpy ordering of axes (i.e. zyx)"""
ndim = data.ndim
if ndim < 1 or ndim > 2:
raise ValueError("Data has wrong number of dimensions, ndim = {}".format(data.ndim))
half_yxsize = data.shape[-1]
quadsize = half_yxsize + 1
datashape = (quadsize, quadsize)
# start building the coordinate system
idx = np.indices((datashape))
# calculate the radius from center
r = np.sqrt(np.sum([i ** 2 for i in idx], 0))
# figure out old r for the averaged data
oldr = np.arange(half_yxsize)
# final shape
final_shape = (2 * half_yxsize,) * 2
if ndim == 1:
lrquad = np.interp(r, oldr, data)
else:
final_shape = (data.shape[0],) + final_shape
lrquad = np.array([np.interp(r, oldr, d) for d in data])
# make final array to fill
final_ar = np.empty(final_shape, dtype=lrquad.dtype)
# fill each quadrant
final_ar[..., half_yxsize:, half_yxsize:] = lrquad[..., :-1, :-1]
final_ar[..., :half_yxsize, half_yxsize:] = lrquad[..., :0:-1, :-1]
final_ar[..., half_yxsize:, :half_yxsize] = lrquad[..., :-1, :0:-1]
final_ar[..., :half_yxsize, :half_yxsize] = lrquad[..., :0:-1, :0:-1]
return final_ar
| [
"numpy.fmax",
"numpy.sum",
"dphutils.radial_profile",
"numpy.empty",
"numpy.indices",
"numpy.finfo",
"numpy.arange",
"numpy.array",
"numpy.interp"
] | [((396, 412), 'numpy.fmax', 'np.fmax', (['data', '(0)'], {}), '(data, 0)\n', (403, 412), True, 'import numpy as np\n'), ((2541, 2562), 'numpy.indices', 'np.indices', (['datashape'], {}), '(datashape)\n', (2551, 2562), True, 'import numpy as np\n'), ((2710, 2732), 'numpy.arange', 'np.arange', (['half_yxsize'], {}), '(half_yxsize)\n', (2719, 2732), True, 'import numpy as np\n'), ((3026, 3067), 'numpy.empty', 'np.empty', (['final_shape'], {'dtype': 'lrquad.dtype'}), '(final_shape, dtype=lrquad.dtype)\n', (3034, 3067), True, 'import numpy as np\n'), ((2620, 2654), 'numpy.sum', 'np.sum', (['[(i ** 2) for i in idx]', '(0)'], {}), '([(i ** 2) for i in idx], 0)\n', (2626, 2654), True, 'import numpy as np\n'), ((2827, 2851), 'numpy.interp', 'np.interp', (['r', 'oldr', 'data'], {}), '(r, oldr, data)\n', (2836, 2851), True, 'import numpy as np\n'), ((529, 549), 'numpy.finfo', 'np.finfo', (['data.dtype'], {}), '(data.dtype)\n', (537, 549), True, 'import numpy as np\n'), ((1613, 1638), 'numpy.array', 'np.array', (['data.shape[-2:]'], {}), '(data.shape[-2:])\n', (1621, 1638), True, 'import numpy as np\n'), ((1641, 1659), 'numpy.array', 'np.array', (['yxcenter'], {}), '(yxcenter)\n', (1649, 1659), True, 'import numpy as np\n'), ((1759, 1789), 'dphutils.radial_profile', 'radial_profile', (['data', 'yxcenter'], {}), '(data, yxcenter)\n', (1773, 1789), False, 'from dphutils import radial_profile\n'), ((2942, 2963), 'numpy.interp', 'np.interp', (['r', 'oldr', 'd'], {}), '(r, oldr, d)\n', (2951, 2963), True, 'import numpy as np\n'), ((1906, 1933), 'dphutils.radial_profile', 'radial_profile', (['d', 'yxcenter'], {}), '(d, yxcenter)\n', (1920, 1933), False, 'from dphutils import radial_profile\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pickle
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 160
mpl.rc('text', usetex=True)
f1 = "coef_uc.dat"
f2 = "coef_ol.dat"
f3 = "coef_cl.dat"
cases = ['uncontrolled', 'open-loop controlled', 'closed-loop controlled']
data = {cases[0]: np.loadtxt(f1, unpack=True, usecols=[0, 1, 3]),
cases[1]: np.loadtxt(f2, unpack=True, usecols=[0, 1, 3]),
cases[2]: np.loadtxt(f3, unpack=True, usecols=[0, 1, 3])}
'''
cd_mean = np.mean(data[cases[1]][1][-4000:])
cl_mean = np.mean(data[cases[1]][2][-4000:])
cd_max = np.max(data[cases[1]][1][-4000:])
cl_max = np.max(data[cases[1]][2][-4000:])
cd_min = np.min(data[cases[1]][1][-4000:])
cl_min = np.min(data[cases[1]][2][-4000:])
print(f"cd_mean = {cd_mean}, cl_mean = {cl_mean}")
print(f"cd_max = {cd_max}, cl_max = {cl_max}")
print(f"cd_min = {cd_min}, cl_min = {cl_min}")
cd_uc = [3.14741, 3.17212, 3.19653] # [cd_min, cd_mean, cd_max]
cl_uc = [-0.904919, -0.0126599, 0.878955] # [cl_min, cl_mean, cl_max]
cd_mean_percent = (cd_uc[1]-cd_mean)/cd_uc[1] * 100
cd_max_percent = (cd_uc[2]-cd_mean)/cd_uc[2] * 100
cd_min_percent = (cd_uc[0]-cd_mean)/cd_uc[0] * 100
cl_mean_percent = (cl_uc[1]-cl_mean)/cd_uc[1] * 100
cl_max_percent = (cl_uc[2]-cl_max)/cd_uc[2] * 100
cl_min_percent = (cl_uc[0]-cl_min)/cd_uc[0] * 100
'''
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(data[cases[0]][0], data[cases[0]][1], "-.", color= '#1f77b4', linewidth=1.2, markevery=70, label=cases[0])
ax.plot(data[cases[1]][0], data[cases[1]][1], ":", color= '#ff7f0e', linewidth=1.2, markevery=70, label=cases[1])
ax.plot(data[cases[2]][0], data[cases[2]][1], "-", color= '#2ca02c', linewidth=1.2, markevery=70, label=cases[2])
ax.axvline(x=2.19, color='k', linestyle='--', label='control starts for DRL')
ax.set_xlim((0, 8))
ax.set_ylim((2.38, 3.26))
ax.set_ylabel(r"$c_D$", fontsize=12)
ax.set_xlabel(r"$\tilde t$", fontsize=12)
ax.tick_params(labelsize=12)
ax.legend(loc='best', fontsize=12)
plt.savefig('cd_assessment.png')
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(data[cases[0]][0], data[cases[0]][2], "-.", color= '#1f77b4', linewidth=1.2, markevery=70, label=cases[0])
ax.plot(data[cases[1]][0], data[cases[1]][2], ":", color= '#ff7f0e', linewidth=1.2, markevery=70, label=cases[1])
ax.plot(data[cases[2]][0], data[cases[2]][2], "-", color= '#2ca02c', linewidth=1.2, markevery=70, label=cases[2])
ax.axvline(x=2.19, color='k', linestyle='--', label='control starts for DRL')
ax.set_xlim((0, 8))
ax.set_ylim((-2.2, 2.2))
ax.set_ylabel(r"$c_L$", fontsize=12)
ax.set_xlabel(r"$\tilde t$", fontsize=12)
ax.tick_params(labelsize=12)
ax.legend(loc='best', fontsize=12)
plt.savefig('cl_assessment.png')
| [
"matplotlib.rc",
"matplotlib.pyplot.subplots",
"numpy.loadtxt",
"matplotlib.pyplot.savefig"
] | [((123, 150), 'matplotlib.rc', 'mpl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (129, 150), True, 'import matplotlib as mpl\n'), ((1356, 1384), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (1368, 1384), True, 'import matplotlib.pyplot as plt\n'), ((2001, 2033), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cd_assessment.png"""'], {}), "('cd_assessment.png')\n", (2012, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2073), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (2057, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2689, 2721), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cl_assessment.png"""'], {}), "('cl_assessment.png')\n", (2700, 2721), True, 'import matplotlib.pyplot as plt\n'), ((304, 350), 'numpy.loadtxt', 'np.loadtxt', (['f1'], {'unpack': '(True)', 'usecols': '[0, 1, 3]'}), '(f1, unpack=True, usecols=[0, 1, 3])\n', (314, 350), True, 'import numpy as np\n'), ((370, 416), 'numpy.loadtxt', 'np.loadtxt', (['f2'], {'unpack': '(True)', 'usecols': '[0, 1, 3]'}), '(f2, unpack=True, usecols=[0, 1, 3])\n', (380, 416), True, 'import numpy as np\n'), ((436, 482), 'numpy.loadtxt', 'np.loadtxt', (['f3'], {'unpack': '(True)', 'usecols': '[0, 1, 3]'}), '(f3, unpack=True, usecols=[0, 1, 3])\n', (446, 482), True, 'import numpy as np\n')] |
"""Tests the functions to identify points in cycles work."""
import os
import numpy as np
from scipy.signal import argrelextrema
import pytest
from bycycle.cyclepoints import find_extrema, find_zerox, extrema_interpolated_phase
# Set data path
DATA_PATH = os.getcwd() + '/tutorials/data/'
###################################################################################################
###################################################################################################
@pytest.mark.parametrize("first_extrema",
[
'peak',
'trough',
None,
pytest.param('fail', marks=pytest.mark.xfail(raises=ValueError))
]
)
def test_find_extrema(first_extrema):
"""Test ability to find peaks and troughs."""
# Load signal
sig = np.load(DATA_PATH + 'sim_stationary.npy')
fs = 1000
f_range = (6, 14)
# find local maxima and minima using scipy
maxima = argrelextrema(sig, np.greater)
minima = argrelextrema(sig, np.less)
# Find peaks and troughs using bycycle and make sure match scipy
ps, ts = find_extrema(sig, fs, f_range, boundary=1, first_extrema=first_extrema)
if first_extrema == 'trough':
assert len(ps) == len(ts)
assert ts[0] < ps[0]
np.testing.assert_allclose(ps, maxima[0])
np.testing.assert_allclose(ts[:len(ps)], minima[0][:len(ps)])
elif first_extrema == 'peak':
assert ps[0] < ts[0]
def test_find_zerox():
"""Test ability to find peaks and troughs."""
# Load signal
sig = np.load(DATA_PATH + 'sim_stationary.npy')
fs = 1000
f_range = (6, 14)
# Find peaks and troughs
ps, ts = find_extrema(sig, fs, f_range, boundary=1, first_extrema='peak')
# Find zerocrossings
zerox_rise, zerox_decay = find_zerox(sig, ps, ts)
assert len(ps) == (len(zerox_rise) + 1)
assert len(ts) == len(zerox_decay)
assert ps[0] < zerox_decay[0]
assert zerox_decay[0] < ts[0]
assert ts[0] < zerox_rise[0]
assert zerox_rise[0] < ps[1]
def test_extrema_interpolated_phase():
"""Test waveform phase estimate."""
# Load signal
sig = np.load(DATA_PATH + 'sim_stationary.npy')
fs = 1000
f_range = (6, 14)
# Find peaks and troughs
ps, ts = find_extrema(sig, fs, f_range, boundary=1, first_extrema='peak')
# Find zerocrossings
zerox_rise, zerox_decay = find_zerox(sig, ps, ts)
# Compute phase
pha = extrema_interpolated_phase(sig, ps, ts, zerox_rise=zerox_rise, zerox_decay=zerox_decay)
assert len(pha) == len(sig)
assert np.all(np.isclose(pha[ps], 0))
assert np.all(np.isclose(pha[ts], -np.pi))
assert np.all(np.isclose(pha[zerox_rise], -np.pi/2))
assert np.all(np.isclose(pha[zerox_decay], np.pi/2))
| [
"numpy.load",
"bycycle.cyclepoints.find_extrema",
"bycycle.cyclepoints.extrema_interpolated_phase",
"os.getcwd",
"scipy.signal.argrelextrema",
"bycycle.cyclepoints.find_zerox",
"numpy.isclose",
"numpy.testing.assert_allclose",
"pytest.mark.xfail"
] | [((259, 270), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (268, 270), False, 'import os\n'), ((788, 829), 'numpy.load', 'np.load', (["(DATA_PATH + 'sim_stationary.npy')"], {}), "(DATA_PATH + 'sim_stationary.npy')\n", (795, 829), True, 'import numpy as np\n'), ((928, 958), 'scipy.signal.argrelextrema', 'argrelextrema', (['sig', 'np.greater'], {}), '(sig, np.greater)\n', (941, 958), False, 'from scipy.signal import argrelextrema\n'), ((972, 999), 'scipy.signal.argrelextrema', 'argrelextrema', (['sig', 'np.less'], {}), '(sig, np.less)\n', (985, 999), False, 'from scipy.signal import argrelextrema\n'), ((1083, 1154), 'bycycle.cyclepoints.find_extrema', 'find_extrema', (['sig', 'fs', 'f_range'], {'boundary': '(1)', 'first_extrema': 'first_extrema'}), '(sig, fs, f_range, boundary=1, first_extrema=first_extrema)\n', (1095, 1154), False, 'from bycycle.cyclepoints import find_extrema, find_zerox, extrema_interpolated_phase\n'), ((1540, 1581), 'numpy.load', 'np.load', (["(DATA_PATH + 'sim_stationary.npy')"], {}), "(DATA_PATH + 'sim_stationary.npy')\n", (1547, 1581), True, 'import numpy as np\n'), ((1662, 1726), 'bycycle.cyclepoints.find_extrema', 'find_extrema', (['sig', 'fs', 'f_range'], {'boundary': '(1)', 'first_extrema': '"""peak"""'}), "(sig, fs, f_range, boundary=1, first_extrema='peak')\n", (1674, 1726), False, 'from bycycle.cyclepoints import find_extrema, find_zerox, extrema_interpolated_phase\n'), ((1783, 1806), 'bycycle.cyclepoints.find_zerox', 'find_zerox', (['sig', 'ps', 'ts'], {}), '(sig, ps, ts)\n', (1793, 1806), False, 'from bycycle.cyclepoints import find_extrema, find_zerox, extrema_interpolated_phase\n'), ((2135, 2176), 'numpy.load', 'np.load', (["(DATA_PATH + 'sim_stationary.npy')"], {}), "(DATA_PATH + 'sim_stationary.npy')\n", (2142, 2176), True, 'import numpy as np\n'), ((2257, 2321), 'bycycle.cyclepoints.find_extrema', 'find_extrema', (['sig', 'fs', 'f_range'], {'boundary': '(1)', 'first_extrema': '"""peak"""'}), "(sig, fs, f_range, boundary=1, first_extrema='peak')\n", (2269, 2321), False, 'from bycycle.cyclepoints import find_extrema, find_zerox, extrema_interpolated_phase\n'), ((2378, 2401), 'bycycle.cyclepoints.find_zerox', 'find_zerox', (['sig', 'ps', 'ts'], {}), '(sig, ps, ts)\n', (2388, 2401), False, 'from bycycle.cyclepoints import find_extrema, find_zerox, extrema_interpolated_phase\n'), ((2433, 2525), 'bycycle.cyclepoints.extrema_interpolated_phase', 'extrema_interpolated_phase', (['sig', 'ps', 'ts'], {'zerox_rise': 'zerox_rise', 'zerox_decay': 'zerox_decay'}), '(sig, ps, ts, zerox_rise=zerox_rise, zerox_decay=\n zerox_decay)\n', (2459, 2525), False, 'from bycycle.cyclepoints import find_extrema, find_zerox, extrema_interpolated_phase\n'), ((1261, 1302), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ps', 'maxima[0]'], {}), '(ps, maxima[0])\n', (1287, 1302), True, 'import numpy as np\n'), ((2572, 2594), 'numpy.isclose', 'np.isclose', (['pha[ps]', '(0)'], {}), '(pha[ps], 0)\n', (2582, 2594), True, 'import numpy as np\n'), ((2614, 2641), 'numpy.isclose', 'np.isclose', (['pha[ts]', '(-np.pi)'], {}), '(pha[ts], -np.pi)\n', (2624, 2641), True, 'import numpy as np\n'), ((2661, 2700), 'numpy.isclose', 'np.isclose', (['pha[zerox_rise]', '(-np.pi / 2)'], {}), '(pha[zerox_rise], -np.pi / 2)\n', (2671, 2700), True, 'import numpy as np\n'), ((2718, 2757), 'numpy.isclose', 'np.isclose', (['pha[zerox_decay]', '(np.pi / 2)'], {}), '(pha[zerox_decay], np.pi / 2)\n', (2728, 2757), True, 'import numpy as np\n'), ((625, 661), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ValueError'}), '(raises=ValueError)\n', (642, 661), False, 'import pytest\n')] |
import unittest
import numpy as np
from bcipy.signal.evaluate.rules import Rule, HighVoltage, LowVoltage
from bcipy.signal.generator.generator import gen_random_data
class TestRules(unittest.TestCase):
"""Test Rules init and class methods """
def setUp(self):
"""Create rule objects to test """
# Set thresholds for testing
self.highvoltage_value = 1
self.lowvoltage_value = -1
self.highvoltage_rule = HighVoltage(self.highvoltage_value)
self.lowvoltage_rule = LowVoltage(self.lowvoltage_value)
self.channels = 32
def test_high_voltage_rule_init(self):
"""Test that high voltage inits correctly"""
self.assertEqual(self.highvoltage_rule.threshold,
self.highvoltage_value)
self.assertIsInstance(self.highvoltage_rule, Rule)
def test_low_voltage_rule_init(self):
"""Test that low voltage inits correctly"""
self.assertEqual(self.lowvoltage_rule.threshold,
self.lowvoltage_value)
self.assertIsInstance(self.lowvoltage_rule, Rule)
def test_low_voltage_failing_signal(self):
"""Test generated sub threshold signal against low voltage"""
data = gen_random_data(1, 5, self.channels)
# ascertain that at least one random datapoint is below threshold to test np.amin edgecase
data[np.random.randint(self.channels)] = -1.5
self.assertTrue(self.lowvoltage_rule.is_broken(data))
def test_low_voltage_passing_signal(self):
"""Test generated signal that is consistently above threshold"""
data = gen_random_data(-0.5, 0.5, self.channels)
self.assertFalse(self.lowvoltage_rule.is_broken(data))
def test_high_voltage_failing_signal(self):
"""Test generated signal with one data point above threshold """
data = gen_random_data(-5, 0, self.channels)
# ascertain that at least one random datapoint is above threshold to test np.amax edgecase
data[np.random.randint(self.channels)] = 1.5
self.assertTrue(self.highvoltage_rule.is_broken(data))
def test_high_voltage_passing_signal(self):
"""Test generated signal that is consistently below threshold"""
data = gen_random_data(-0.5, 0.5, self.channels)
self.assertFalse(self.highvoltage_rule.is_broken(data))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"bcipy.signal.evaluate.rules.LowVoltage",
"numpy.random.randint",
"bcipy.signal.evaluate.rules.HighVoltage",
"bcipy.signal.generator.generator.gen_random_data"
] | [((2399, 2414), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2412, 2414), False, 'import unittest\n'), ((455, 490), 'bcipy.signal.evaluate.rules.HighVoltage', 'HighVoltage', (['self.highvoltage_value'], {}), '(self.highvoltage_value)\n', (466, 490), False, 'from bcipy.signal.evaluate.rules import Rule, HighVoltage, LowVoltage\n'), ((522, 555), 'bcipy.signal.evaluate.rules.LowVoltage', 'LowVoltage', (['self.lowvoltage_value'], {}), '(self.lowvoltage_value)\n', (532, 555), False, 'from bcipy.signal.evaluate.rules import Rule, HighVoltage, LowVoltage\n'), ((1240, 1276), 'bcipy.signal.generator.generator.gen_random_data', 'gen_random_data', (['(1)', '(5)', 'self.channels'], {}), '(1, 5, self.channels)\n', (1255, 1276), False, 'from bcipy.signal.generator.generator import gen_random_data\n'), ((1628, 1669), 'bcipy.signal.generator.generator.gen_random_data', 'gen_random_data', (['(-0.5)', '(0.5)', 'self.channels'], {}), '(-0.5, 0.5, self.channels)\n', (1643, 1669), False, 'from bcipy.signal.generator.generator import gen_random_data\n'), ((1870, 1907), 'bcipy.signal.generator.generator.gen_random_data', 'gen_random_data', (['(-5)', '(0)', 'self.channels'], {}), '(-5, 0, self.channels)\n', (1885, 1907), False, 'from bcipy.signal.generator.generator import gen_random_data\n'), ((2260, 2301), 'bcipy.signal.generator.generator.gen_random_data', 'gen_random_data', (['(-0.5)', '(0.5)', 'self.channels'], {}), '(-0.5, 0.5, self.channels)\n', (2275, 2301), False, 'from bcipy.signal.generator.generator import gen_random_data\n'), ((1389, 1421), 'numpy.random.randint', 'np.random.randint', (['self.channels'], {}), '(self.channels)\n', (1406, 1421), True, 'import numpy as np\n'), ((2020, 2052), 'numpy.random.randint', 'np.random.randint', (['self.channels'], {}), '(self.channels)\n', (2037, 2052), True, 'import numpy as np\n')] |
""" Functions to generate the set of endpoints for the time series
benchmark on the HiRID database"""
import glob
import logging
import math
import os
import os.path
import pickle
import random
import sys
import lightgbm as lgbm
import numpy as np
import pandas as pd
import skfda.preprocessing.smoothing.kernel_smoothers as skks
import skfda.representation.grid as skgrid
import sklearn.linear_model as sklm
import sklearn.metrics as skmetrics
import sklearn.preprocessing as skpproc
def load_pickle(fpath):
""" Given a file path pointing to a pickle file, yields the object pickled in this file"""
with open(fpath, 'rb') as fp:
return pickle.load(fp)
SUPPOX_TO_FIO2 = {
0: 21,
1: 26,
2: 34,
3: 39,
4: 45,
5: 49,
6: 54,
7: 57,
8: 58,
9: 63,
10: 66,
11: 67,
12: 69,
13: 70,
14: 73,
15: 75}
def mix_real_est_pao2(pao2_col, pao2_meas_cnt, pao2_est_arr, bandwidth=None):
""" Mix real PaO2 measurement and PaO2 estimates using a Gaussian kernel"""
final_pao2_arr = np.copy(pao2_est_arr)
sq_scale = 57 ** 2 # 1 hour has mass 1/3 approximately
for idx in range(final_pao2_arr.size):
meas_ref = pao2_meas_cnt[idx]
real_val = None
real_val_dist = None
# Search forward and backward with priority giving to backward if equi-distant
for sidx in range(48):
if not idx - sidx < 0 and pao2_meas_cnt[idx - sidx] < meas_ref:
real_val = pao2_col[idx - sidx + 1]
real_val_dist = 5 * sidx
break
elif not idx + sidx >= final_pao2_arr.size and pao2_meas_cnt[idx + sidx] > meas_ref:
real_val = pao2_col[idx + sidx]
real_val_dist = 5 * sidx
break
if real_val is not None:
alpha_mj = math.exp(-real_val_dist ** 2 / sq_scale)
alpha_ej = 1 - alpha_mj
final_pao2_arr[idx] = alpha_mj * real_val + alpha_ej * pao2_est_arr[idx]
return final_pao2_arr
def perf_regression_model(X_list, y_list, aux_list, configs=None):
""" Initial test of a regression model to estimate the current Pao2 based
on 6 features of the past. Also pass FiO2 to calculate resulting mistakes in
the P/F ratio"""
logging.info("Testing regression model for PaO2...")
# Partition the data into 3 sets and run SGD regressor
X_train = X_list[:int(0.6 * len(X_list))]
X_train = np.vstack(X_train)
y_train = np.concatenate(y_list[:int(0.6 * len(y_list))])
X_val = X_list[int(0.6 * len(X_list)):int(0.8 * len(X_list))]
X_val = np.vstack(X_val)
y_val = np.concatenate(y_list[int(0.6 * len(y_list)):int(0.8 * len(y_list))])
X_test = X_list[int(0.8 * len(X_list)):]
X_test = np.vstack(X_test)
y_test = np.concatenate(y_list[int(0.8 * len(y_list)):])
fio2_test = np.concatenate(aux_list[int(0.8 * len(aux_list)):])
if configs["sur_model_type"] == "linear":
scaler = skpproc.StandardScaler()
X_train_std = scaler.fit_transform(X_train)
X_val_std = scaler.transform(X_val)
X_test_std = scaler.transform(X_test)
if configs["sur_model_type"] == "linear":
alpha_cands = [0.0001, 0.001, 0.01, 0.1, 1.0]
elif configs["sur_model_type"] == "lgbm":
alpha_cands = [32]
best_alpha = None
best_score = np.inf
# Search for the best model on the validation set
for alpha in alpha_cands:
logging.info("Testing alpha: {}".format(alpha))
if configs["sur_model_type"] == "linear":
lmodel_cand = sklm.SGDRegressor(alpha=alpha, random_state=2021)
elif configs["sur_model_type"] == "lgbm":
lmodel_cand = lgbm.LGBMRegressor(num_leaves=alpha, learning_rate=0.05, n_estimators=1000,
random_state=2021)
if configs["sur_model_type"] == "linear":
lmodel_cand.fit(X_train_std, y_train)
elif configs["sur_model_type"] == "lgbm":
lmodel_cand.fit(X_train_std, y_train, eval_set=(X_val_std, y_val), early_stopping_rounds=20,
eval_metric="mae")
pred_y_val = lmodel_cand.predict(X_val_std)
mae_val = np.median(np.absolute(y_val - pred_y_val))
if mae_val < best_score:
best_score = mae_val
best_alpha = alpha
lmodel = sklm.SGDRegressor(alpha=best_alpha, random_state=2021)
lmodel.fit(X_train_std, y_train)
pred_y_test = lmodel.predict(X_test_std)
pred_pf_ratio_test = pred_y_test / fio2_test
true_pf_ratio_test = y_test / fio2_test
mae_test = skmetrics.mean_absolute_error(y_test, pred_y_test)
logging.info("Mean absolute error in test set: {:.3f}".format(mae_test))
def percentile_smooth(signal_col, percentile, win_scope_mins):
""" Window percentile smoother, where percentile is in the interval [0,100]"""
out_arr = np.zeros_like(signal_col)
mins_per_window = 5
search_range = int(win_scope_mins / mins_per_window / 2)
for jdx in range(out_arr.size):
search_arr = signal_col[max(0, jdx - search_range):min(out_arr.size, jdx + search_range)]
out_arr[jdx] = np.percentile(search_arr, percentile)
return out_arr
def subsample_blocked(val_arr, meas_arr=None, ss_ratio=None, block_length=None, normal_value=None):
""" Subsample blocked with ratio and block length"""
val_arr_out = np.copy(val_arr)
meas_arr_out = np.copy(meas_arr)
meas_idxs = []
n_meas = 0
for idx in range(meas_arr.size):
if meas_arr[idx] > n_meas:
meas_idxs.append(idx)
n_meas += 1
if len(meas_idxs) == 0:
return (val_arr_out, meas_arr_out)
meas_select = int((1 - ss_ratio) * len(meas_idxs))
begin_select = meas_select // block_length
feas_begins = [meas_idxs[idx] for idx in np.arange(0, len(meas_idxs), block_length)]
sel_meas_begins = sorted(random.sample(feas_begins, begin_select))
sel_meas_delete = []
for begin in sel_meas_begins:
for add_idx in range(block_length):
sel_meas_delete.append(begin + add_idx)
# Rewrite the measuremnent array with deleted indices
for midx, meas_idx in enumerate(meas_idxs):
prev_cnt = 0 if meas_idx == 0 else meas_arr_out[meas_idx - 1]
revised_cnt = prev_cnt if meas_idx in sel_meas_delete else prev_cnt + 1
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
meas_arr_out[rewrite_idx] = revised_cnt
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
meas_arr_out[rewrite_idx] = revised_cnt
# Rewrite the value array with deleted indices, with assuming forward filling
for midx, meas_idx in enumerate(meas_idxs):
prev_val = normal_value if meas_idx == 0 else val_arr_out[meas_idx - 1]
cur_val = val_arr_out[meas_idx]
revised_value = prev_val if meas_idx in sel_meas_delete else cur_val
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
val_arr_out[rewrite_idx] = revised_value
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
val_arr_out[rewrite_idx] = revised_value
return (val_arr_out, meas_arr_out)
def subsample_individual(val_arr, meas_arr=None, ss_ratio=None, normal_value=None):
""" Subsample individual measurements completely randomly with random choice"""
val_arr_out = np.copy(val_arr)
meas_arr_out = np.copy(meas_arr)
meas_idxs = []
n_meas = 0
for idx in range(meas_arr.size):
if meas_arr[idx] > n_meas:
meas_idxs.append(idx)
n_meas += 1
if len(meas_idxs) == 0:
return (val_arr_out, meas_arr_out)
meas_select = int((1 - ss_ratio) * len(meas_idxs))
sel_meas_delete = sorted(random.sample(meas_idxs, meas_select))
# Rewrite the measuremnent array with deleted indices
for midx, meas_idx in enumerate(meas_idxs):
prev_cnt = 0 if meas_idx == 0 else meas_arr_out[meas_idx - 1]
revised_cnt = prev_cnt if meas_idx in sel_meas_delete else prev_cnt + 1
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
meas_arr_out[rewrite_idx] = revised_cnt
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
meas_arr_out[rewrite_idx] = revised_cnt
# Rewrite the value array with deleted indices, with assuming forward filling
for midx, meas_idx in enumerate(meas_idxs):
prev_val = normal_value if meas_idx == 0 else val_arr_out[meas_idx - 1]
cur_val = val_arr_out[meas_idx]
revised_value = prev_val if meas_idx in sel_meas_delete else cur_val
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
val_arr_out[rewrite_idx] = revised_value
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
val_arr_out[rewrite_idx] = revised_value
return (val_arr_out, meas_arr_out)
def merge_short_vent_gaps(vent_status_arr, short_gap_hours):
""" Merge short gaps in the ventilation status array"""
in_gap = False
gap_length = 0
before_gap_status = np.nan
for idx in range(len(vent_status_arr)):
cur_state = vent_status_arr[idx]
if in_gap and (cur_state == 0.0 or np.isnan(cur_state)):
gap_length += 5
elif not in_gap and (cur_state == 0.0 or np.isnan(cur_state)):
if idx > 0:
before_gap_status = vent_status_arr[idx - 1]
in_gap = True
in_gap_idx = idx
gap_length = 5
elif in_gap and cur_state == 1.0:
in_gap = False
after_gap_status = cur_state
if gap_length / 60. <= short_gap_hours:
vent_status_arr[in_gap_idx:idx] = 1.0
return vent_status_arr
def kernel_smooth_arr(input_arr, bandwidth=None):
""" Kernel smooth an input array with a Nadaraya-Watson kernel smoother"""
output_arr = np.copy(input_arr)
fin_arr = output_arr[np.isfinite(output_arr)]
time_axis = 5 * np.arange(len(output_arr))
fin_time = time_axis[np.isfinite(output_arr)]
# Return the unsmoothed array if fewer than 2 observations
if fin_arr.size < 2:
return output_arr
smoother = skks.NadarayaWatsonSmoother(smoothing_parameter=bandwidth)
fgrid = skgrid.FDataGrid([fin_arr], fin_time)
fd_smoothed = smoother.fit_transform(fgrid)
output_smoothed = fd_smoothed.data_matrix.flatten()
output_arr[np.isfinite(output_arr)] = output_smoothed
return output_arr
def delete_short_vent_events(vent_status_arr, short_event_hours):
""" Delete short events in the ventilation status array"""
in_event = False
event_length = 0
for idx in range(len(vent_status_arr)):
cur_state = vent_status_arr[idx]
if in_event and cur_state == 1.0:
event_length += 5
if not in_event and cur_state == 1.0:
in_event = True
event_length = 5
event_start_idx = idx
if in_event and (cur_state == 0.0 or np.isnan(cur_state)):
in_event = False
if event_length / 60. < short_event_hours:
vent_status_arr[event_start_idx:idx] = 0.0
return vent_status_arr
def ellis(x_orig):
""" ELLIS model converting SpO2 in 100 % units into a PaO2 ABGA
estimate"""
x_orig[np.isnan(x_orig)] = 98 # Normal value assumption
x = x_orig / 100
x[x == 1] = 0.999
exp_base = (11700 / ((1 / x) - 1))
exp_sqrbracket = np.sqrt(pow(50, 3) + (exp_base ** 2))
exp_first = np.cbrt(exp_base + exp_sqrbracket)
exp_second = np.cbrt(exp_base - exp_sqrbracket)
exp_full = exp_first + exp_second
return exp_full
def correct_left_edge_vent(vent_status_arr, etco2_meas_cnt, etco2_col):
""" Corrects the left edge of the ventilation status array, to pin-point the exact conditions"""
on_left_edge = False
in_event = False
# Correct left ventilation edges of the ventilation zone
for idx in range(len(vent_status_arr)):
if vent_status_arr[idx] == 1.0 and not in_event:
in_event = True
on_left_edge = True
if on_left_edge and in_event:
if vent_status_arr[idx] == 0.0:
in_event = False
on_left_edge = False
elif (idx == 0 and etco2_meas_cnt[idx] > 0 or etco2_meas_cnt[idx] - etco2_meas_cnt[idx - 1] >= 1) and \
etco2_col[idx] > 0.5:
on_left_edge = False
else:
vent_status_arr[idx] = 0.0
return vent_status_arr
def delete_small_continuous_blocks(event_arr, block_threshold=None):
""" Given an event array, deletes small contiguous blocks that are sandwiched between two other blocks, one of which
is longer, they both have the same label. For the moment we delete blocks smaller than 30 minutes. Note this
requires only a linear pass over the array"""
block_list = []
active_block = None
# Build a block list
for jdx in range(event_arr.size):
new_block = event_arr[jdx]
# Start a new block at the beginning
if active_block is None:
active_block = new_block
left_block_idx = jdx
# Change to a new block
elif not active_block == new_block:
block_list.append((active_block, left_block_idx, jdx - 1))
left_block_idx = jdx
active_block = new_block
# Same last block unconditionally
if jdx == event_arr.size - 1:
block_list.append((new_block, left_block_idx, jdx))
# Merge blocks
while True:
all_clean = True
for bidx, block in enumerate(block_list):
block_label, lidx, ridx = block
block_len = ridx - lidx + 1
# Candidate for merging
if block_len <= block_threshold:
if len(block_list) == 1:
all_clean = True
break
# Only right block
elif bidx == 0:
next_block = block_list[bidx + 1]
nb_label, nb_lidx, nb_ridx = next_block
nb_len = nb_ridx - nb_lidx + 1
# Merge blocks
if nb_len > block_len and nb_len > block_threshold:
block_list[bidx] = (nb_label, lidx, nb_ridx)
block_list.remove(next_block)
all_clean = False
break
# Only left block
elif bidx == len(block_list) - 1:
prev_block = block_list[bidx - 1]
pb_label, pb_lidx, pb_ridx = prev_block
pb_len = pb_ridx - pb_lidx + 1
if pb_len > block_len and pb_len > block_threshold:
block_list[bidx] = (pb_label, pb_lidx, ridx)
block_list.remove(prev_block)
all_clean = False
break
# Interior block
else:
prev_block = block_list[bidx - 1]
next_block = block_list[bidx + 1]
pb_label, pb_lidx, pb_ridx = prev_block
nb_label, nb_lidx, nb_ridx = next_block
pb_len = pb_ridx - pb_lidx + 1
nb_len = nb_ridx - nb_lidx + 1
if pb_label == nb_label and (pb_len > block_threshold or nb_len > block_threshold):
block_list[bidx] = (pb_label, pb_lidx, nb_ridx)
block_list.remove(prev_block)
block_list.remove(next_block)
all_clean = False
break
# Traversed block list with no actions required
if all_clean:
break
# Now back-translate the block list to the list
out_arr = np.copy(event_arr)
for blabel, lidx, ridx in block_list:
out_arr[lidx:ridx + 1] = blabel
# Additionally build an array where the two arrays are different
diff_arr = (out_arr != event_arr).astype(np.bool)
return (out_arr, diff_arr)
def collect_regression_data(spo2_col, spo2_meas_cnt, pao2_col, pao2_meas_cnt, fio2_est_arr,
sao2_col, sao2_meas_cnt, ph_col, ph_meas_cnt):
""" Collect regression data at time-stamps where we have a real PaO2 measurement, return
partial training X,y pairs for this patient"""
X_arr_collect = []
y_arr_collect = []
aux_collect = []
cur_pao2_cnt = 0
cur_spo2_cnt = 0
cur_sao2_cnt = 0
cur_ph_cnt = 0
pao2_real_meas = []
spo2_real_meas = []
sao2_real_meas = []
ph_real_meas = []
for jdx in range(spo2_col.size):
if spo2_meas_cnt[jdx] > cur_spo2_cnt:
spo2_real_meas.append(jdx)
cur_spo2_cnt = spo2_meas_cnt[jdx]
if sao2_meas_cnt[jdx] > cur_sao2_cnt:
sao2_real_meas.append(jdx)
cur_sao2_cnt = sao2_meas_cnt[jdx]
if ph_meas_cnt[jdx] > cur_ph_cnt:
ph_real_meas.append(jdx)
cur_ph_cnt = ph_meas_cnt[jdx]
if pao2_meas_cnt[jdx] > cur_pao2_cnt:
pao2_real_meas.append(jdx)
cur_pao2_cnt = pao2_meas_cnt[jdx]
# Only start fitting the model from the 2nd measurement onwards
if len(pao2_real_meas) >= 2 and len(spo2_real_meas) >= 2 and len(sao2_real_meas) >= 2 and len(
ph_real_meas) >= 2:
# Dimensions of features
# 0: Last real SpO2 measurement
# 1: Last real PaO2 measurement
# 2: Last real SaO2 measurement
# 3: Last real pH measurement
# 4: Time to last real SpO2 measurement
# 5: Time to last real PaO2 measurement
# 6: Closest SpO2 to last real PaO2 measurement
x_vect = np.array([spo2_col[jdx - 1], pao2_col[jdx - 1], sao2_col[jdx - 1], ph_col[jdx - 1],
jdx - spo2_real_meas[-2], jdx - pao2_real_meas[-2], spo2_col[pao2_real_meas[-2]]])
y_val = pao2_col[jdx]
aux_val = fio2_est_arr[jdx]
if np.isnan(x_vect).sum() == 0 and np.isfinite(y_val) and np.isfinite(aux_val):
X_arr_collect.append(x_vect)
y_arr_collect.append(y_val)
aux_collect.append(aux_val)
if len(X_arr_collect) > 0:
X_arr = np.vstack(X_arr_collect)
y_arr = np.array(y_arr_collect)
aux_arr = np.array(aux_collect)
assert (np.isnan(X_arr).sum() == 0 and np.isnan(y_arr).sum() == 0)
return (X_arr, y_arr, aux_arr)
else:
return (None, None, None)
def delete_low_density_hr_gap(vent_status_arr, hr_status_arr, configs=None):
""" Deletes gaps in ventilation which are caused by likely sensor dis-connections"""
in_event = False
in_gap = False
gap_idx = -1
for idx in range(len(vent_status_arr)):
# Beginning of new event, not from inside gap
if not in_event and not in_gap and vent_status_arr[idx] == 1.0:
in_event = True
# Beginning of potential gap that needs to be closed
elif in_event and vent_status_arr[idx] == 0.0:
in_gap = True
gap_idx = idx
in_event = False
# The gap is over, re-assign the status of ventilation to merge the gap, enter new event
if in_gap and vent_status_arr[idx] == 1.0:
hr_sub_arr = hr_status_arr[gap_idx:idx]
# Close the gap if the density of HR is too low in between
if np.sum(hr_sub_arr) / hr_sub_arr.size <= configs["vent_hr_density_threshold"]:
vent_status_arr[gap_idx:idx] = 1.0
in_gap = False
in_event = True
return vent_status_arr
def suppox_to_fio2(suppox_val):
""" Conversion of supplemental oxygen to FiO2 estimated value"""
if suppox_val > 15:
return 75
else:
return SUPPOX_TO_FIO2[suppox_val]
def conservative_state(state1, state2):
""" Given two states, return the lower one """
if state1 == state2:
return state1
for skey in ["event_0", "event_1", "event_2"]:
if state1 == skey or state2 == skey:
return skey
return "event_3"
def endpoint_gen_benchmark(configs):
var_map = configs["VAR_IDS"]
raw_var_map = configs["RAW_VAR_IDS"]
sz_window = configs["length_fw_window"]
abga_window = configs["length_ABGA_window"]
missing_unm = 0
# Threshold statistics
stat_counts_ready_and_failure = 0
stat_counts_ready_and_success = 0
stat_counts_nready_and_failure = 0
stat_counts_nready_and_success = 0
stat_counts_ready_nextube = 0
stat_counts_nready_nextube = 0
imputed_f = configs["imputed_path"]
merged_f = os.path.join(configs["merged_h5"])
out_folder = os.path.join(configs["endpoint_path"])
if not os.path.exists(out_folder):
os.mkdir(out_folder)
batch_id = configs["batch_idx"]
logging.info("Generating endpoints for batch {}".format(batch_id))
batch_fpath = os.path.join(imputed_f, "batch_{}.parquet".format(batch_id))
if not os.path.exists(batch_fpath):
logging.info("WARNING: Input file does not exist, exiting...")
sys.exit(1)
df_batch = pd.read_parquet(os.path.join(imputed_f, "batch_{}.parquet".format(batch_id)))
logging.info("Loaded imputed data done...")
cand_raw_batch = glob.glob(os.path.join(merged_f, "part-{}.parquet".format(batch_id)))
assert (len(cand_raw_batch) == 1)
pids = list(df_batch.patientid.unique())
logging.info("Number of patients in batch: {}".format(len(df_batch.patientid.unique())))
first_write = True
out_fp = os.path.join(out_folder, "batch_{}.parquet".format(batch_id))
event_count = {"FIO2_AVAILABLE": 0, "SUPPOX_NO_MEAS_12_HOURS_LIMIT": 0, "SUPPOX_MAIN_VAR": 0, "SUPPOX_HIGH_FLOW": 0,
"SUPPOX_NO_FILL_STOP": 0}
readiness_ext_count = 0
not_ready_ext_count = 0
readiness_and_extubated_cnt = 0
extubated_cnt = 0
df_static = pd.read_parquet(configs["general_data_table_path"])
X_reg_collect = []
y_reg_collect = []
aux_reg_collect = []
out_dfs = []
for pidx, pid in enumerate(pids):
df_pid = df_batch[df_batch["patientid"] == pid]
if df_pid.shape[0] == 0:
logging.info("WARNING: No input data for PID: {}".format(pid))
continue
df_merged_pid = pd.read_parquet(cand_raw_batch[0], filters=[("patientid", "=", pid)])
df_merged_pid.sort_values(by="datetime", inplace=True)
suppox_val = {}
suppox_ts = {}
# Main route of SuppOx
df_suppox_red_async = df_merged_pid[[var_map["SuppOx"], "datetime"]]
df_suppox_red_async = df_suppox_red_async.dropna(how="all", thresh=2)
suppox_async_red_ts = np.array(df_suppox_red_async["datetime"])
suppox_val["SUPPOX"] = np.array(df_suppox_red_async[var_map["SuppOx"]])
# Strategy is to create an imputed SuppOx column based on the spec using
# forward filling heuristics
# Relevant meta-variables
fio2_col = np.array(df_pid[var_map["FiO2"]])
pao2_col = np.array(df_pid[var_map["PaO2"]])
etco2_col = np.array(df_pid[var_map["etCO2"]])
paco2_col = np.array(df_pid[var_map["PaCO2"]])
gcs_a_col = np.array(df_pid[var_map["GCS_Antwort"]])
gcs_m_col = np.array(df_pid[var_map["GCS_Motorik"]])
gcs_aug_col = np.array(df_pid[var_map["GCS_Augen"]])
weight_col = np.array(df_pid[var_map["Weight"][0]])
noreph_col = np.array(df_pid[var_map["Norephenephrine"][0]])
epineph_col = np.array(df_pid[var_map["Epinephrine"][0]])
vaso_col = np.array(df_pid[var_map["Vasopressin"][0]])
milri_col = np.array(df_pid[var_map["Milrinone"][0]])
dobut_col = np.array(df_pid[var_map["Dobutamine"][0]])
levosi_col = np.array(df_pid[var_map["Levosimendan"][0]])
theo_col = np.array(df_pid[var_map["Theophyllin"][0]])
lactate_col = np.array(df_pid[var_map["Lactate"][0]])
peep_col = np.array(df_pid[var_map["PEEP"]])
# Heartrate
hr_col = np.array(df_pid[var_map["HR"]])
hr_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["HR"])])
# Temperature
temp_col = np.array(df_pid[var_map["Temp"]])
temp_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["Temp"])])
rrate_col = np.array(df_pid[var_map["RRate"]])
tv_col = np.array(df_pid[var_map["TV"]])
map_col = np.array(df_pid[var_map["MAP"][0]])
airway_col = np.array(df_pid[var_map["Airway"]])
# Ventilator mode group columns
vent_mode_col = np.array(df_pid[var_map["vent_mode"]])
spo2_col = np.array(df_pid[var_map["SpO2"]])
if configs["presmooth_spo2"]:
spo2_col = percentile_smooth(spo2_col, configs["spo2_smooth_percentile"],
configs["spo2_smooth_window_size_mins"])
sao2_col = np.array(df_pid[var_map["SaO2"]])
ph_col = np.array(df_pid[var_map["pH"]])
fio2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["FiO2"])])
pao2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["PaO2"])])
etco2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["etCO2"])])
peep_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["PEEP"])])
hr_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["HR"])])
spo2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["SpO2"])])
sao2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["SaO2"])])
ph_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["pH"])])
abs_dtime_arr = np.array(df_pid["datetime"])
event_status_arr = np.zeros(shape=(fio2_col.size), dtype="<S10")
# Status arrays
pao2_avail_arr = np.zeros(shape=(fio2_col.size))
fio2_avail_arr = np.zeros(shape=(fio2_col.size))
fio2_suppox_arr = np.zeros(shape=(fio2_col.size))
fio2_ambient_arr = np.zeros(shape=(fio2_col.size))
pao2_sao2_model_arr = np.zeros(shape=(fio2_col.size))
pao2_full_model_arr = np.zeros(shape=(fio2_col.size))
ratio_arr = np.zeros(shape=(fio2_col.size))
sur_ratio_arr = np.zeros(shape=(fio2_col.size))
pao2_est_arr = np.zeros(shape=(fio2_col.size))
fio2_est_arr = np.zeros(shape=(fio2_col.size))
vent_status_arr = np.zeros(shape=(fio2_col.size))
readiness_ext_arr = np.zeros(shape=(fio2_col.size))
readiness_ext_arr[:] = np.nan
# Votes arrays
vent_votes_arr = np.zeros(shape=(fio2_col.size))
vent_votes_etco2_arr = np.zeros(shape=(fio2_col.size))
vent_votes_ventgroup_arr = np.zeros(shape=(fio2_col.size))
vent_votes_tv_arr = np.zeros(shape=(fio2_col.size))
vent_votes_airway_arr = np.zeros(shape=(fio2_col.size))
peep_status_arr = np.zeros(shape=(fio2_col.size))
peep_threshold_arr = np.zeros(shape=(fio2_col.size))
hr_status_arr = np.zeros(shape=(fio2_col.size))
etco2_status_arr = np.zeros(shape=(fio2_col.size))
event_status_arr.fill("UNKNOWN")
# Array pointers tracking the current active value of each type
suppox_async_red_ptr = -1
# ======================== VENTILATION ================================================================================================
# Label each point in the 30 minute window with ventilation
in_vent_event = False
for jdx in range(0, len(ratio_arr)):
low_vent_idx = max(0, jdx - configs["peep_search_bw"])
high_vent_idx = min(len(ratio_arr), jdx + configs["peep_search_bw"])
low_peep_idx = max(0, jdx - configs["peep_search_bw"])
high_peep_idx = min(len(ratio_arr), jdx + configs["peep_search_bw"])
low_hr_idx = max(0, jdx - configs["hr_vent_search_bw"])
high_hr_idx = min(len(ratio_arr), jdx + configs["hr_vent_search_bw"])
win_etco2 = etco2_col[low_vent_idx:high_vent_idx]
win_etco2_meas = etco2_meas_cnt[low_vent_idx:high_vent_idx]
win_peep = peep_col[low_peep_idx:high_peep_idx]
win_peep_meas = peep_meas_cnt[low_peep_idx:high_peep_idx]
win_hr_meas = hr_meas_cnt[low_hr_idx:high_hr_idx]
etco2_meas_win = win_etco2_meas[-1] - win_etco2_meas[0] > 0
peep_meas_win = win_peep_meas[-1] - win_peep_meas[0] > 0
hr_meas_win = win_hr_meas[-1] - win_hr_meas[0] > 0
current_vent_group = vent_mode_col[jdx]
current_tv = tv_col[jdx]
current_airway = airway_col[jdx]
vote_score = 0
# EtCO2 requirement (still needed)
if etco2_meas_win and (win_etco2 > 0.5).any():
vote_score += 2
vent_votes_etco2_arr[jdx] = 2
# Ventilation group requirement (still needed)
if current_vent_group in [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0]:
vote_score += 1
vent_votes_ventgroup_arr[jdx] += 1
elif current_vent_group in [1.0]:
vote_score -= 1
vent_votes_ventgroup_arr[jdx] -= 1
elif current_vent_group in [11.0, 12.0, 13.0, 15.0, 17.0]:
vote_score -= 2
vent_votes_ventgroup_arr[jdx] -= 2
# TV presence requirement (still needed)
if current_tv > 0:
vote_score += 1
vent_votes_tv_arr[jdx] = 1
# Airway requirement (still needed)
if current_airway in [1, 2]:
vote_score += 2
vent_votes_airway_arr[jdx] = 2
# No airway (still needed)
if current_airway in [3, 4, 5, 6]:
vote_score -= 1
vent_votes_airway_arr[jdx] = -1
vent_votes_arr[jdx] = vote_score
if vote_score >= configs["vent_vote_threshold"]:
in_vent_event = True
vent_status_arr[jdx] = 1
else:
in_vent_event = False
if peep_meas_win:
peep_status_arr[jdx] = 1
if (win_peep >= configs["peep_threshold"]).any():
peep_threshold_arr[jdx] = 1
if etco2_meas_win:
etco2_status_arr[jdx] = 1
if hr_meas_win:
hr_status_arr[jdx] = 1
if configs["detect_hr_gaps"]:
vent_status_arr = delete_low_density_hr_gap(vent_status_arr, hr_status_arr, configs=configs)
if configs["merge_short_vent_gaps"]:
vent_status_arr = merge_short_vent_gaps(vent_status_arr, configs["short_gap_hours"])
if configs["delete_short_vent_events"]:
vent_status_arr = delete_short_vent_events(vent_status_arr, configs["short_event_hours"])
# vent_status_arr=correct_left_edge_vent(vent_status_arr, etco2_meas_cnt, etco2_col)
# vent_status_arr=correct_right_edge_vent(vent_status_arr, etco2_meas_cnt, etco2_col)
# Ventilation period array
vent_period_arr = np.copy(vent_status_arr)
# Delete short ventilation periods if no HR gap before
in_event = False
event_length = 0
for idx in range(len(vent_period_arr)):
cur_state = vent_period_arr[idx]
if in_event and cur_state == 1.0:
event_length += 5
if not in_event and cur_state == 1.0:
in_event = True
event_length = 5
event_start_idx = idx
if in_event and (np.isnan(cur_state) or cur_state == 0.0):
in_event = False
# Short event at beginning of stay shall never be deleted...
if event_start_idx == 0:
delete_event = False
else:
search_hr_idx = event_start_idx - 1
while search_hr_idx >= 0:
if hr_status_arr[search_hr_idx] == 1.0:
hr_gap_length = 5 * (event_start_idx - search_hr_idx)
delete_event = True
break
search_hr_idx -= 1
# Found no HR before event, do not delete event...
if search_hr_idx == -1:
delete_event = False
# Delete event in principle, then check if short enough...
if delete_event:
event_length += hr_gap_length
if event_length / 60. <= configs["short_event_hours_vent_period"]:
vent_period_arr[event_start_idx:idx] = 0.0
# ============================== OXYGENATION ENDPOINTS ==================================================================
# Label each point in the 30 minute window (except ventilation)
for jdx in range(0, len(ratio_arr)):
# Advance to the last SuppOx infos before grid point
cur_time = abs_dtime_arr[jdx]
while True:
suppox_async_red_ptr = suppox_async_red_ptr + 1
if suppox_async_red_ptr >= len(suppox_async_red_ts) or suppox_async_red_ts[
suppox_async_red_ptr] > cur_time:
suppox_async_red_ptr = suppox_async_red_ptr - 1
break
# Estimate the current FiO2 value
bw_fio2 = fio2_col[max(0, jdx - configs["sz_fio2_window"]):jdx + 1]
bw_fio2_meas = fio2_meas_cnt[max(0, jdx - configs["sz_fio2_window"]):jdx + 1]
bw_etco2_meas = etco2_meas_cnt[max(0, jdx - configs["sz_etco2_window"]):jdx + 1]
fio2_meas = bw_fio2_meas[-1] - bw_fio2_meas[0] > 0
etco2_meas = bw_etco2_meas[-1] - bw_etco2_meas[0] > 0
mode_group_est = vent_mode_col[jdx]
# FiO2 is measured since beginning of stay and EtCO2 was measured, we use FiO2 (indefinite forward filling)
# if ventilation is active or the current estimate of ventilation mode group is NIV.
if fio2_meas and (vent_status_arr[jdx] == 1.0 or mode_group_est == 4.0):
event_count["FIO2_AVAILABLE"] += 1
fio2_val = bw_fio2[-1] / 100
fio2_avail_arr[jdx] = 1
# Use supplemental oxygen or ambient air oxygen
else:
# No real measurements up to now, or the last real measurement
# was more than 8 hours away.
if suppox_async_red_ptr == -1 or (
cur_time - suppox_async_red_ts[suppox_async_red_ptr]) > np.timedelta64(
configs["suppox_max_ffill"], 'h'):
event_count["SUPPOX_NO_MEAS_12_HOURS_LIMIT"] += 1
fio2_val = configs["ambient_fio2"]
fio2_ambient_arr[jdx] = 1
# Find the most recent source variable of SuppOx
else:
suppox = suppox_val["SUPPOX"][suppox_async_red_ptr]
# SuppOx information from main source
if np.isfinite(suppox):
event_count["SUPPOX_MAIN_VAR"] += 1
fio2_val = suppox_to_fio2(int(suppox)) / 100
fio2_suppox_arr[jdx] = 1
else:
assert (False, "Impossible condition")
bw_pao2_meas = pao2_meas_cnt[max(0, jdx - configs["sz_pao2_window"]):jdx + 1]
bw_pao2 = pao2_col[max(0, jdx - configs["sz_pao2_window"]):jdx + 1]
pao2_meas = bw_pao2_meas[-1] - bw_pao2_meas[0] >= 1
# PaO2 was just measured, just use the value
if pao2_meas:
pao2_estimate = bw_pao2[-1]
pao2_avail_arr[jdx] = 1
# Have to forecast PaO2 from a previous SpO2
else:
bw_spo2 = spo2_col[max(0, jdx - abga_window):jdx + 1]
bw_spo2_meas = spo2_meas_cnt[max(0, jdx - abga_window):jdx + 1]
spo2_meas = bw_spo2_meas[-1] - bw_spo2_meas[0] >= 1
# Standard case, take the last SpO2 measurement
if spo2_meas:
spo2_val = bw_spo2[-1]
pao2_estimate = ellis(np.array([spo2_val]))[0]
# Extreme edge case, there was SpO2 measurement in the last 24 hours
else:
spo2_val = 98
pao2_estimate = ellis(np.array([spo2_val]))[0]
# Compute the individual components of the Horowitz index
pao2_est_arr[jdx] = pao2_estimate
fio2_est_arr[jdx] = fio2_val
pao2_est_arr_orig = np.copy(pao2_est_arr)
# Smooth individual components of the P/F ratio estimate
if configs["kernel_smooth_estimate_pao2"]:
pao2_est_arr = kernel_smooth_arr(pao2_est_arr, bandwidth=configs["smoothing_bandwidth"])
if configs["kernel_smooth_estimate_fio2"]:
fio2_est_arr = kernel_smooth_arr(fio2_est_arr, bandwidth=configs["smoothing_bandwidth"])
# Test2 data-set for surrogate model
pao2_sur_est = np.copy(pao2_est_arr)
assert (np.sum(np.isnan(pao2_sur_est)) == 0)
# Convex combination of the estimate
if configs["mix_real_estimated_pao2"]:
pao2_est_arr = mix_real_est_pao2(pao2_col, pao2_meas_cnt, pao2_est_arr,
bandwidth=configs["smoothing_bandwidth"])
# Compute Horowitz indices (Kernel pipeline / Surrogate model pipeline)
for jdx in range(len(ratio_arr)):
ratio_arr[jdx] = pao2_est_arr[jdx] / fio2_est_arr[jdx]
# Post-smooth Horowitz index
if configs["post_smooth_pf_ratio"]:
ratio_arr = kernel_smooth_arr(ratio_arr, bandwidth=configs["post_smoothing_bandwidth"])
if configs["pao2_version"] == "ellis_basic":
pf_event_est_arr = np.copy(ratio_arr)
elif configs["pao2_version"] == "original":
assert (False)
# Now label based on the array of estimated Horowitz indices
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
est_idx = pf_event_est_arr[idx:min(len(ratio_arr), idx + sz_window)]
est_vent = vent_status_arr[idx:min(len(ratio_arr), idx + sz_window)]
est_peep_dense = peep_status_arr[idx:min(len(ratio_arr), idx + sz_window)]
est_peep_threshold = peep_threshold_arr[idx:min(len(ratio_arr), idx + sz_window)]
if np.sum((est_idx <= 100) & (
(est_vent == 0.0) | (est_vent == 1.0) & (est_peep_dense == 0.0) | (est_vent == 1.0) & (
est_peep_dense == 1.0) & (est_peep_threshold == 1.0))) >= 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_3"
elif np.sum((est_idx <= 200) & (
(est_vent == 0.0) | (est_vent == 1.0) & (est_peep_dense == 0.0) | (est_vent == 1.0) & (
est_peep_dense == 1.0) & (est_peep_threshold == 1.0))) >= 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_2"
elif np.sum((est_idx <= 300) & (
(est_vent == 0.0) | (est_vent == 1.0) & (est_peep_dense == 0.0) | (est_vent == 1.0) & (
est_peep_dense == 1.0) & (est_peep_threshold == 1.0))) >= 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_1"
elif np.sum(np.isnan(est_idx)) < 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_0"
# Re-traverse the array and correct the right edges of events
# Correct right edges of event 0 (correct level to level 0)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_0"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_0"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] < 300:
on_right_edge = False
else:
event_status_arr[idx] = "event_0"
# Correct right edges of event 1 (correct to level 1)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_1"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_1"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] < 200 or pf_event_est_arr[idx] >= 300:
on_right_edge = False
else:
event_status_arr[idx] = "event_1"
# Correct right edges of event 2 (correct to level 2)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_2"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_2"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] < 100 or pf_event_est_arr[idx] >= 200:
on_right_edge = False
else:
event_status_arr[idx] = "event_2"
# Correct right edges of event 3 (correct to level 3)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_3"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_3"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] >= 100:
on_right_edge = False
else:
event_status_arr[idx] = "event_3"
circ_status_arr = np.zeros_like(map_col)
# Computation of the circulatory failure toy version of the endpoint
for jdx in range(0, len(event_status_arr)):
map_subarr = map_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
lact_subarr = lactate_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
milri_subarr = milri_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
dobut_subarr = dobut_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
levosi_subarr = levosi_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
theo_subarr = theo_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
noreph_subarr = noreph_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
epineph_subarr = epineph_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
vaso_subarr = vaso_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
map_crit_arr = ((map_subarr < 65) | (milri_subarr > 0) | (dobut_subarr > 0) | (levosi_subarr > 0) | (
theo_subarr > 0) | (noreph_subarr > 0) | \
(epineph_subarr > 0) | (vaso_subarr > 0))
lact_crit_arr = (lact_subarr > 2)
if np.sum(map_crit_arr) >= 2 / 3 * len(map_crit_arr) and np.sum(lact_crit_arr) >= 2 / 3 * len(map_crit_arr):
circ_status_arr[jdx] = 1.0
# Traverse the array and delete short gap
event_status_arr, relabel_arr = delete_small_continuous_blocks(event_status_arr,
block_threshold=configs[
"pf_event_merge_threshold"])
time_col = np.array(df_pid["datetime"])
rel_time_col = np.array(df_pid["rel_datetime"])
pid_col = np.array(df_pid["patientid"])
df_out_dict = {}
df_out_dict["datetime"] = time_col
df_out_dict["rel_datetime"] = rel_time_col
df_out_dict["patientid"] = pid_col
status_list = list(map(lambda raw_str: raw_str.decode("unicode_escape"), event_status_arr.tolist()))
df_out_dict["resp_failure_status"] = status_list
df_out_dict["resp_failure_status_relabel"] = relabel_arr
# Status columns
df_out_dict["fio2_available"] = fio2_avail_arr
df_out_dict["fio2_suppox"] = fio2_suppox_arr
df_out_dict["fio2_ambient"] = fio2_ambient_arr
df_out_dict["fio2_estimated"] = fio2_est_arr
df_out_dict["pao2_estimated"] = pao2_est_arr
df_out_dict["pao2_estimated_sur"] = pao2_sur_est
df_out_dict["pao2_available"] = pao2_avail_arr
df_out_dict["pao2_sao2_model"] = pao2_sao2_model_arr
df_out_dict["pao2_full_model"] = pao2_full_model_arr
df_out_dict["estimated_ratio"] = ratio_arr
df_out_dict["estimated_ratio_sur"] = sur_ratio_arr
df_out_dict["vent_state"] = vent_status_arr
df_out_dict["vent_period"] = vent_period_arr
# Ventilation voting base columns
df_out_dict["vent_votes"] = vent_votes_arr
df_out_dict["vent_votes_etco2"] = vent_votes_etco2_arr
df_out_dict["vent_votes_ventgroup"] = vent_votes_ventgroup_arr
df_out_dict["vent_votes_tv"] = vent_votes_tv_arr
df_out_dict["vent_votes_airway"] = vent_votes_airway_arr
# Circulatory failure related
df_out_dict["circ_failure_status"] = circ_status_arr
df_out = pd.DataFrame(df_out_dict)
out_dfs.append(df_out)
all_df = pd.concat(out_dfs, axis=0)
all_df.to_parquet(out_fp)
| [
"os.mkdir",
"numpy.absolute",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"random.sample",
"sklearn.metrics.mean_absolute_error",
"numpy.isnan",
"pickle.load",
"os.path.join",
"pandas.DataFrame",
"numpy.zeros_like",
"numpy.copy",
"os.path.exists",
"numpy.isfinite",
"pandas.conca... | [((1061, 1082), 'numpy.copy', 'np.copy', (['pao2_est_arr'], {}), '(pao2_est_arr)\n', (1068, 1082), True, 'import numpy as np\n'), ((2304, 2356), 'logging.info', 'logging.info', (['"""Testing regression model for PaO2..."""'], {}), "('Testing regression model for PaO2...')\n", (2316, 2356), False, 'import logging\n'), ((2477, 2495), 'numpy.vstack', 'np.vstack', (['X_train'], {}), '(X_train)\n', (2486, 2495), True, 'import numpy as np\n'), ((2636, 2652), 'numpy.vstack', 'np.vstack', (['X_val'], {}), '(X_val)\n', (2645, 2652), True, 'import numpy as np\n'), ((2793, 2810), 'numpy.vstack', 'np.vstack', (['X_test'], {}), '(X_test)\n', (2802, 2810), True, 'import numpy as np\n'), ((4405, 4459), 'sklearn.linear_model.SGDRegressor', 'sklm.SGDRegressor', ([], {'alpha': 'best_alpha', 'random_state': '(2021)'}), '(alpha=best_alpha, random_state=2021)\n', (4422, 4459), True, 'import sklearn.linear_model as sklm\n'), ((4652, 4702), 'sklearn.metrics.mean_absolute_error', 'skmetrics.mean_absolute_error', (['y_test', 'pred_y_test'], {}), '(y_test, pred_y_test)\n', (4681, 4702), True, 'import sklearn.metrics as skmetrics\n'), ((4942, 4967), 'numpy.zeros_like', 'np.zeros_like', (['signal_col'], {}), '(signal_col)\n', (4955, 4967), True, 'import numpy as np\n'), ((5444, 5460), 'numpy.copy', 'np.copy', (['val_arr'], {}), '(val_arr)\n', (5451, 5460), True, 'import numpy as np\n'), ((5480, 5497), 'numpy.copy', 'np.copy', (['meas_arr'], {}), '(meas_arr)\n', (5487, 5497), True, 'import numpy as np\n'), ((7568, 7584), 'numpy.copy', 'np.copy', (['val_arr'], {}), '(val_arr)\n', (7575, 7584), True, 'import numpy as np\n'), ((7604, 7621), 'numpy.copy', 'np.copy', (['meas_arr'], {}), '(meas_arr)\n', (7611, 7621), True, 'import numpy as np\n'), ((10211, 10229), 'numpy.copy', 'np.copy', (['input_arr'], {}), '(input_arr)\n', (10218, 10229), True, 'import numpy as np\n'), ((10508, 10566), 'skfda.preprocessing.smoothing.kernel_smoothers.NadarayaWatsonSmoother', 'skks.NadarayaWatsonSmoother', ([], {'smoothing_parameter': 'bandwidth'}), '(smoothing_parameter=bandwidth)\n', (10535, 10566), True, 'import skfda.preprocessing.smoothing.kernel_smoothers as skks\n'), ((10579, 10616), 'skfda.representation.grid.FDataGrid', 'skgrid.FDataGrid', (['[fin_arr]', 'fin_time'], {}), '([fin_arr], fin_time)\n', (10595, 10616), True, 'import skfda.representation.grid as skgrid\n'), ((11832, 11866), 'numpy.cbrt', 'np.cbrt', (['(exp_base + exp_sqrbracket)'], {}), '(exp_base + exp_sqrbracket)\n', (11839, 11866), True, 'import numpy as np\n'), ((11884, 11918), 'numpy.cbrt', 'np.cbrt', (['(exp_base - exp_sqrbracket)'], {}), '(exp_base - exp_sqrbracket)\n', (11891, 11918), True, 'import numpy as np\n'), ((16219, 16237), 'numpy.copy', 'np.copy', (['event_arr'], {}), '(event_arr)\n', (16226, 16237), True, 'import numpy as np\n'), ((21219, 21253), 'os.path.join', 'os.path.join', (["configs['merged_h5']"], {}), "(configs['merged_h5'])\n", (21231, 21253), False, 'import os\n'), ((21271, 21309), 'os.path.join', 'os.path.join', (["configs['endpoint_path']"], {}), "(configs['endpoint_path'])\n", (21283, 21309), False, 'import os\n'), ((21798, 21841), 'logging.info', 'logging.info', (['"""Loaded imputed data done..."""'], {}), "('Loaded imputed data done...')\n", (21810, 21841), False, 'import logging\n'), ((22506, 22557), 'pandas.read_parquet', 'pd.read_parquet', (["configs['general_data_table_path']"], {}), "(configs['general_data_table_path'])\n", (22521, 22557), True, 'import pandas as pd\n'), ((46821, 46847), 'pandas.concat', 'pd.concat', (['out_dfs'], {'axis': '(0)'}), '(out_dfs, axis=0)\n', (46830, 46847), True, 'import pandas as pd\n'), ((661, 676), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (672, 676), False, 'import pickle\n'), ((3005, 3029), 'sklearn.preprocessing.StandardScaler', 'skpproc.StandardScaler', ([], {}), '()\n', (3027, 3029), True, 'import sklearn.preprocessing as skpproc\n'), ((5210, 5247), 'numpy.percentile', 'np.percentile', (['search_arr', 'percentile'], {}), '(search_arr, percentile)\n', (5223, 5247), True, 'import numpy as np\n'), ((5956, 5996), 'random.sample', 'random.sample', (['feas_begins', 'begin_select'], {}), '(feas_begins, begin_select)\n', (5969, 5996), False, 'import random\n'), ((7944, 7981), 'random.sample', 'random.sample', (['meas_idxs', 'meas_select'], {}), '(meas_idxs, meas_select)\n', (7957, 7981), False, 'import random\n'), ((10255, 10278), 'numpy.isfinite', 'np.isfinite', (['output_arr'], {}), '(output_arr)\n', (10266, 10278), True, 'import numpy as np\n'), ((10352, 10375), 'numpy.isfinite', 'np.isfinite', (['output_arr'], {}), '(output_arr)\n', (10363, 10375), True, 'import numpy as np\n'), ((10736, 10759), 'numpy.isfinite', 'np.isfinite', (['output_arr'], {}), '(output_arr)\n', (10747, 10759), True, 'import numpy as np\n'), ((11625, 11641), 'numpy.isnan', 'np.isnan', (['x_orig'], {}), '(x_orig)\n', (11633, 11641), True, 'import numpy as np\n'), ((18822, 18846), 'numpy.vstack', 'np.vstack', (['X_arr_collect'], {}), '(X_arr_collect)\n', (18831, 18846), True, 'import numpy as np\n'), ((18863, 18886), 'numpy.array', 'np.array', (['y_arr_collect'], {}), '(y_arr_collect)\n', (18871, 18886), True, 'import numpy as np\n'), ((18905, 18926), 'numpy.array', 'np.array', (['aux_collect'], {}), '(aux_collect)\n', (18913, 18926), True, 'import numpy as np\n'), ((21322, 21348), 'os.path.exists', 'os.path.exists', (['out_folder'], {}), '(out_folder)\n', (21336, 21348), False, 'import os\n'), ((21358, 21378), 'os.mkdir', 'os.mkdir', (['out_folder'], {}), '(out_folder)\n', (21366, 21378), False, 'import os\n'), ((21579, 21606), 'os.path.exists', 'os.path.exists', (['batch_fpath'], {}), '(batch_fpath)\n', (21593, 21606), False, 'import os\n'), ((21616, 21678), 'logging.info', 'logging.info', (['"""WARNING: Input file does not exist, exiting..."""'], {}), "('WARNING: Input file does not exist, exiting...')\n", (21628, 21678), False, 'import logging\n'), ((21687, 21698), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21695, 21698), False, 'import sys\n'), ((22897, 22966), 'pandas.read_parquet', 'pd.read_parquet', (['cand_raw_batch[0]'], {'filters': "[('patientid', '=', pid)]"}), "(cand_raw_batch[0], filters=[('patientid', '=', pid)])\n", (22912, 22966), True, 'import pandas as pd\n'), ((23295, 23336), 'numpy.array', 'np.array', (["df_suppox_red_async['datetime']"], {}), "(df_suppox_red_async['datetime'])\n", (23303, 23336), True, 'import numpy as np\n'), ((23369, 23417), 'numpy.array', 'np.array', (["df_suppox_red_async[var_map['SuppOx']]"], {}), "(df_suppox_red_async[var_map['SuppOx']])\n", (23377, 23417), True, 'import numpy as np\n'), ((23591, 23624), 'numpy.array', 'np.array', (["df_pid[var_map['FiO2']]"], {}), "(df_pid[var_map['FiO2']])\n", (23599, 23624), True, 'import numpy as np\n'), ((23644, 23677), 'numpy.array', 'np.array', (["df_pid[var_map['PaO2']]"], {}), "(df_pid[var_map['PaO2']])\n", (23652, 23677), True, 'import numpy as np\n'), ((23698, 23732), 'numpy.array', 'np.array', (["df_pid[var_map['etCO2']]"], {}), "(df_pid[var_map['etCO2']])\n", (23706, 23732), True, 'import numpy as np\n'), ((23753, 23787), 'numpy.array', 'np.array', (["df_pid[var_map['PaCO2']]"], {}), "(df_pid[var_map['PaCO2']])\n", (23761, 23787), True, 'import numpy as np\n'), ((23809, 23849), 'numpy.array', 'np.array', (["df_pid[var_map['GCS_Antwort']]"], {}), "(df_pid[var_map['GCS_Antwort']])\n", (23817, 23849), True, 'import numpy as np\n'), ((23870, 23910), 'numpy.array', 'np.array', (["df_pid[var_map['GCS_Motorik']]"], {}), "(df_pid[var_map['GCS_Motorik']])\n", (23878, 23910), True, 'import numpy as np\n'), ((23933, 23971), 'numpy.array', 'np.array', (["df_pid[var_map['GCS_Augen']]"], {}), "(df_pid[var_map['GCS_Augen']])\n", (23941, 23971), True, 'import numpy as np\n'), ((23994, 24032), 'numpy.array', 'np.array', (["df_pid[var_map['Weight'][0]]"], {}), "(df_pid[var_map['Weight'][0]])\n", (24002, 24032), True, 'import numpy as np\n'), ((24055, 24102), 'numpy.array', 'np.array', (["df_pid[var_map['Norephenephrine'][0]]"], {}), "(df_pid[var_map['Norephenephrine'][0]])\n", (24063, 24102), True, 'import numpy as np\n'), ((24125, 24168), 'numpy.array', 'np.array', (["df_pid[var_map['Epinephrine'][0]]"], {}), "(df_pid[var_map['Epinephrine'][0]])\n", (24133, 24168), True, 'import numpy as np\n'), ((24188, 24231), 'numpy.array', 'np.array', (["df_pid[var_map['Vasopressin'][0]]"], {}), "(df_pid[var_map['Vasopressin'][0]])\n", (24196, 24231), True, 'import numpy as np\n'), ((24253, 24294), 'numpy.array', 'np.array', (["df_pid[var_map['Milrinone'][0]]"], {}), "(df_pid[var_map['Milrinone'][0]])\n", (24261, 24294), True, 'import numpy as np\n'), ((24315, 24357), 'numpy.array', 'np.array', (["df_pid[var_map['Dobutamine'][0]]"], {}), "(df_pid[var_map['Dobutamine'][0]])\n", (24323, 24357), True, 'import numpy as np\n'), ((24379, 24423), 'numpy.array', 'np.array', (["df_pid[var_map['Levosimendan'][0]]"], {}), "(df_pid[var_map['Levosimendan'][0]])\n", (24387, 24423), True, 'import numpy as np\n'), ((24443, 24486), 'numpy.array', 'np.array', (["df_pid[var_map['Theophyllin'][0]]"], {}), "(df_pid[var_map['Theophyllin'][0]])\n", (24451, 24486), True, 'import numpy as np\n'), ((24510, 24549), 'numpy.array', 'np.array', (["df_pid[var_map['Lactate'][0]]"], {}), "(df_pid[var_map['Lactate'][0]])\n", (24518, 24549), True, 'import numpy as np\n'), ((24569, 24602), 'numpy.array', 'np.array', (["df_pid[var_map['PEEP']]"], {}), "(df_pid[var_map['PEEP']])\n", (24577, 24602), True, 'import numpy as np\n'), ((24641, 24672), 'numpy.array', 'np.array', (["df_pid[var_map['HR']]"], {}), "(df_pid[var_map['HR']])\n", (24649, 24672), True, 'import numpy as np\n'), ((24807, 24840), 'numpy.array', 'np.array', (["df_pid[var_map['Temp']]"], {}), "(df_pid[var_map['Temp']])\n", (24815, 24840), True, 'import numpy as np\n'), ((24958, 24992), 'numpy.array', 'np.array', (["df_pid[var_map['RRate']]"], {}), "(df_pid[var_map['RRate']])\n", (24966, 24992), True, 'import numpy as np\n'), ((25010, 25041), 'numpy.array', 'np.array', (["df_pid[var_map['TV']]"], {}), "(df_pid[var_map['TV']])\n", (25018, 25041), True, 'import numpy as np\n'), ((25060, 25095), 'numpy.array', 'np.array', (["df_pid[var_map['MAP'][0]]"], {}), "(df_pid[var_map['MAP'][0]])\n", (25068, 25095), True, 'import numpy as np\n'), ((25117, 25152), 'numpy.array', 'np.array', (["df_pid[var_map['Airway']]"], {}), "(df_pid[var_map['Airway']])\n", (25125, 25152), True, 'import numpy as np\n'), ((25218, 25256), 'numpy.array', 'np.array', (["df_pid[var_map['vent_mode']]"], {}), "(df_pid[var_map['vent_mode']])\n", (25226, 25256), True, 'import numpy as np\n'), ((25277, 25310), 'numpy.array', 'np.array', (["df_pid[var_map['SpO2']]"], {}), "(df_pid[var_map['SpO2']])\n", (25285, 25310), True, 'import numpy as np\n'), ((25538, 25571), 'numpy.array', 'np.array', (["df_pid[var_map['SaO2']]"], {}), "(df_pid[var_map['SaO2']])\n", (25546, 25571), True, 'import numpy as np\n'), ((25589, 25620), 'numpy.array', 'np.array', (["df_pid[var_map['pH']]"], {}), "(df_pid[var_map['pH']])\n", (25597, 25620), True, 'import numpy as np\n'), ((26409, 26437), 'numpy.array', 'np.array', (["df_pid['datetime']"], {}), "(df_pid['datetime'])\n", (26417, 26437), True, 'import numpy as np\n'), ((26465, 26508), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size', 'dtype': '"""<S10"""'}), "(shape=fio2_col.size, dtype='<S10')\n", (26473, 26508), True, 'import numpy as np\n'), ((26561, 26590), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26569, 26590), True, 'import numpy as np\n'), ((26618, 26647), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26626, 26647), True, 'import numpy as np\n'), ((26676, 26705), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26684, 26705), True, 'import numpy as np\n'), ((26735, 26764), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26743, 26764), True, 'import numpy as np\n'), ((26797, 26826), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26805, 26826), True, 'import numpy as np\n'), ((26859, 26888), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26867, 26888), True, 'import numpy as np\n'), ((26912, 26941), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26920, 26941), True, 'import numpy as np\n'), ((26968, 26997), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (26976, 26997), True, 'import numpy as np\n'), ((27024, 27053), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27032, 27053), True, 'import numpy as np\n'), ((27079, 27108), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27087, 27108), True, 'import numpy as np\n'), ((27137, 27166), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27145, 27166), True, 'import numpy as np\n'), ((27197, 27226), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27205, 27226), True, 'import numpy as np\n'), ((27316, 27345), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27324, 27345), True, 'import numpy as np\n'), ((27379, 27408), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27387, 27408), True, 'import numpy as np\n'), ((27446, 27475), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27454, 27475), True, 'import numpy as np\n'), ((27506, 27535), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27514, 27535), True, 'import numpy as np\n'), ((27570, 27599), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27578, 27599), True, 'import numpy as np\n'), ((27629, 27658), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27637, 27658), True, 'import numpy as np\n'), ((27690, 27719), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27698, 27719), True, 'import numpy as np\n'), ((27746, 27775), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27754, 27775), True, 'import numpy as np\n'), ((27805, 27834), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fio2_col.size'}), '(shape=fio2_col.size)\n', (27813, 27834), True, 'import numpy as np\n'), ((31851, 31875), 'numpy.copy', 'np.copy', (['vent_status_arr'], {}), '(vent_status_arr)\n', (31858, 31875), True, 'import numpy as np\n'), ((37466, 37487), 'numpy.copy', 'np.copy', (['pao2_est_arr'], {}), '(pao2_est_arr)\n', (37473, 37487), True, 'import numpy as np\n'), ((37928, 37949), 'numpy.copy', 'np.copy', (['pao2_est_arr'], {}), '(pao2_est_arr)\n', (37935, 37949), True, 'import numpy as np\n'), ((43210, 43232), 'numpy.zeros_like', 'np.zeros_like', (['map_col'], {}), '(map_col)\n', (43223, 43232), True, 'import numpy as np\n'), ((45009, 45037), 'numpy.array', 'np.array', (["df_pid['datetime']"], {}), "(df_pid['datetime'])\n", (45017, 45037), True, 'import numpy as np\n'), ((45061, 45093), 'numpy.array', 'np.array', (["df_pid['rel_datetime']"], {}), "(df_pid['rel_datetime'])\n", (45069, 45093), True, 'import numpy as np\n'), ((45112, 45141), 'numpy.array', 'np.array', (["df_pid['patientid']"], {}), "(df_pid['patientid'])\n", (45120, 45141), True, 'import numpy as np\n'), ((46750, 46775), 'pandas.DataFrame', 'pd.DataFrame', (['df_out_dict'], {}), '(df_out_dict)\n', (46762, 46775), True, 'import pandas as pd\n'), ((1853, 1893), 'math.exp', 'math.exp', (['(-real_val_dist ** 2 / sq_scale)'], {}), '(-real_val_dist ** 2 / sq_scale)\n', (1861, 1893), False, 'import math\n'), ((3611, 3660), 'sklearn.linear_model.SGDRegressor', 'sklm.SGDRegressor', ([], {'alpha': 'alpha', 'random_state': '(2021)'}), '(alpha=alpha, random_state=2021)\n', (3628, 3660), True, 'import sklearn.linear_model as sklm\n'), ((4261, 4292), 'numpy.absolute', 'np.absolute', (['(y_val - pred_y_val)'], {}), '(y_val - pred_y_val)\n', (4272, 4292), True, 'import numpy as np\n'), ((38724, 38742), 'numpy.copy', 'np.copy', (['ratio_arr'], {}), '(ratio_arr)\n', (38731, 38742), True, 'import numpy as np\n'), ((3737, 3835), 'lightgbm.LGBMRegressor', 'lgbm.LGBMRegressor', ([], {'num_leaves': 'alpha', 'learning_rate': '(0.05)', 'n_estimators': '(1000)', 'random_state': '(2021)'}), '(num_leaves=alpha, learning_rate=0.05, n_estimators=1000,\n random_state=2021)\n', (3755, 3835), True, 'import lightgbm as lgbm\n'), ((9531, 9550), 'numpy.isnan', 'np.isnan', (['cur_state'], {}), '(cur_state)\n', (9539, 9550), True, 'import numpy as np\n'), ((11313, 11332), 'numpy.isnan', 'np.isnan', (['cur_state'], {}), '(cur_state)\n', (11321, 11332), True, 'import numpy as np\n'), ((18248, 18424), 'numpy.array', 'np.array', (['[spo2_col[jdx - 1], pao2_col[jdx - 1], sao2_col[jdx - 1], ph_col[jdx - 1], \n jdx - spo2_real_meas[-2], jdx - pao2_real_meas[-2], spo2_col[\n pao2_real_meas[-2]]]'], {}), '([spo2_col[jdx - 1], pao2_col[jdx - 1], sao2_col[jdx - 1], ph_col[\n jdx - 1], jdx - spo2_real_meas[-2], jdx - pao2_real_meas[-2], spo2_col[\n pao2_real_meas[-2]]])\n', (18256, 18424), True, 'import numpy as np\n'), ((37973, 37995), 'numpy.isnan', 'np.isnan', (['pao2_sur_est'], {}), '(pao2_sur_est)\n', (37981, 37995), True, 'import numpy as np\n'), ((39336, 39513), 'numpy.sum', 'np.sum', (['((est_idx <= 100) & ((est_vent == 0.0) | (est_vent == 1.0) & (\n est_peep_dense == 0.0) | (est_vent == 1.0) & (est_peep_dense == 1.0) &\n (est_peep_threshold == 1.0)))'], {}), '((est_idx <= 100) & ((est_vent == 0.0) | (est_vent == 1.0) & (\n est_peep_dense == 0.0) | (est_vent == 1.0) & (est_peep_dense == 1.0) &\n (est_peep_threshold == 1.0)))\n', (39342, 39513), True, 'import numpy as np\n'), ((9630, 9649), 'numpy.isnan', 'np.isnan', (['cur_state'], {}), '(cur_state)\n', (9638, 9649), True, 'import numpy as np\n'), ((18584, 18602), 'numpy.isfinite', 'np.isfinite', (['y_val'], {}), '(y_val)\n', (18595, 18602), True, 'import numpy as np\n'), ((18607, 18627), 'numpy.isfinite', 'np.isfinite', (['aux_val'], {}), '(aux_val)\n', (18618, 18627), True, 'import numpy as np\n'), ((19996, 20014), 'numpy.sum', 'np.sum', (['hr_sub_arr'], {}), '(hr_sub_arr)\n', (20002, 20014), True, 'import numpy as np\n'), ((32345, 32364), 'numpy.isnan', 'np.isnan', (['cur_state'], {}), '(cur_state)\n', (32353, 32364), True, 'import numpy as np\n'), ((35880, 35899), 'numpy.isfinite', 'np.isfinite', (['suppox'], {}), '(suppox)\n', (35891, 35899), True, 'import numpy as np\n'), ((39639, 39816), 'numpy.sum', 'np.sum', (['((est_idx <= 200) & ((est_vent == 0.0) | (est_vent == 1.0) & (\n est_peep_dense == 0.0) | (est_vent == 1.0) & (est_peep_dense == 1.0) &\n (est_peep_threshold == 1.0)))'], {}), '((est_idx <= 200) & ((est_vent == 0.0) | (est_vent == 1.0) & (\n est_peep_dense == 0.0) | (est_vent == 1.0) & (est_peep_dense == 1.0) &\n (est_peep_threshold == 1.0)))\n', (39645, 39816), True, 'import numpy as np\n'), ((44500, 44520), 'numpy.sum', 'np.sum', (['map_crit_arr'], {}), '(map_crit_arr)\n', (44506, 44520), True, 'import numpy as np\n'), ((44554, 44575), 'numpy.sum', 'np.sum', (['lact_crit_arr'], {}), '(lact_crit_arr)\n', (44560, 44575), True, 'import numpy as np\n'), ((18943, 18958), 'numpy.isnan', 'np.isnan', (['X_arr'], {}), '(X_arr)\n', (18951, 18958), True, 'import numpy as np\n'), ((18974, 18989), 'numpy.isnan', 'np.isnan', (['y_arr'], {}), '(y_arr)\n', (18982, 18989), True, 'import numpy as np\n'), ((35396, 35444), 'numpy.timedelta64', 'np.timedelta64', (["configs['suppox_max_ffill']", '"""h"""'], {}), "(configs['suppox_max_ffill'], 'h')\n", (35410, 35444), True, 'import numpy as np\n'), ((39942, 40119), 'numpy.sum', 'np.sum', (['((est_idx <= 300) & ((est_vent == 0.0) | (est_vent == 1.0) & (\n est_peep_dense == 0.0) | (est_vent == 1.0) & (est_peep_dense == 1.0) &\n (est_peep_threshold == 1.0)))'], {}), '((est_idx <= 300) & ((est_vent == 0.0) | (est_vent == 1.0) & (\n est_peep_dense == 0.0) | (est_vent == 1.0) & (est_peep_dense == 1.0) &\n (est_peep_threshold == 1.0)))\n', (39948, 40119), True, 'import numpy as np\n'), ((37045, 37065), 'numpy.array', 'np.array', (['[spo2_val]'], {}), '([spo2_val])\n', (37053, 37065), True, 'import numpy as np\n'), ((37254, 37274), 'numpy.array', 'np.array', (['[spo2_val]'], {}), '([spo2_val])\n', (37262, 37274), True, 'import numpy as np\n'), ((18552, 18568), 'numpy.isnan', 'np.isnan', (['x_vect'], {}), '(x_vect)\n', (18560, 18568), True, 'import numpy as np\n'), ((40252, 40269), 'numpy.isnan', 'np.isnan', (['est_idx'], {}), '(est_idx)\n', (40260, 40269), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import numpy.random as npr
import os
import scipy.misc
import torch
import torch.nn as nn
from colourization import (get_torch_vars, get_cat_rgb, get_rgb_cat,
process, UNet, CNN, DilatedUNet)
from load_data import load_cifar10
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train colourization")
parser.add_argument('index', default=0, type=int,
help="Image index to plot")
parser.add_argument('--checkpoint', default="",
help="Model file to load and save")
parser.add_argument('--outdir', default="outputs/act",
help="Directory to save the file")
parser.add_argument('-m', '--model', choices=["CNN", "UNet", "DUNet"],
help="Model to run")
parser.add_argument('-k', '--kernel', default=3, type=int,
help="Convolution kernel size")
parser.add_argument('-f', '--filters', default=32, type=int,
help="Base number of convolution filters")
parser.add_argument('-c', '--colours',
default='colours/colour_kmeans24_cat7.npy',
help="Discrete colour clusters to use")
args = parser.parse_args()
# LOAD THE COLOURS CATEGORIES
colours = np.load(args.colours)[0]
num_colours = np.shape(colours)[0]
# Load the data first for consistency
print("Loading data...")
npr.seed(0)
(x_train, y_train), (x_test, y_test) = load_cifar10()
test_rgb, test_grey = process(x_test, y_test)
test_rgb_cat = get_rgb_cat(test_rgb, colours)
# LOAD THE MODEL
if args.model == "CNN":
cnn = CNN(args.kernel, args.filters, num_colours)
elif args.model == "UNet":
cnn = UNet(args.kernel, args.filters, num_colours)
else: # model == "DUNet":
cnn = DilatedUNet(args.kernel, args.filters, num_colours)
print("Loading checkpoint...")
cnn.load_state_dict(torch.load(args.checkpoint, map_location=lambda storage, loc: storage))
# Take the idnex of the test image
id = args.index
outdir = args.outdir + str(id)
os.mkdir(outdir)
images, labels = get_torch_vars(np.expand_dims(test_grey[id], 0),
np.expand_dims(test_rgb_cat[id], 0))
outputs = cnn(images)
_, predicted = torch.max(outputs.data, 1, keepdim=True)
predcolor = get_cat_rgb(predicted.cpu().numpy()[0,0,:,:], colours)
scipy.misc.toimage(predcolor, cmin=0, cmax=1) \
.save(os.path.join(outdir, "filter_output_%d.png" % id))
scipy.misc.toimage(np.transpose(test_rgb[id], [1,2,0]), cmin=0, cmax=1) \
.save(os.path.join(outdir, "filter_input_rgb_%d.png" % id))
scipy.misc.toimage(test_grey[id,0,:,:], cmin=0, cmax=1) \
.save(os.path.join(outdir, "filter_input_%d.png" % id))
def add_border(img):
return np.pad(img, 1, "constant", constant_values=1.0)
def draw_activations(path, activation, imgwidth=4):
img = np.vstack([
np.hstack([
add_border(filter) for filter in
activation[i*imgwidth:(i+1)*imgwidth,:,:]])
for i in range(activation.shape[0] // imgwidth)])
scipy.misc.imsave(path, img)
for i, tensor in enumerate([cnn.out1, cnn.out2, cnn.out3, cnn.out4, cnn.out5]):
draw_activations(
os.path.join(outdir, "filter_out%d_%d.png" % (i, id)),
tensor.data.cpu().numpy()[0])
| [
"numpy.pad",
"os.mkdir",
"numpy.load",
"colourization.DilatedUNet",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.load",
"numpy.transpose",
"numpy.expand_dims",
"colourization.process",
"colourization.CNN",
"numpy.shape",
"colourization.get_rgb_cat",
"torch.max",
"colourization.... | [((332, 390), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train colourization"""'}), "(description='Train colourization')\n", (355, 390), False, 'import argparse\n'), ((1493, 1504), 'numpy.random.seed', 'npr.seed', (['(0)'], {}), '(0)\n', (1501, 1504), True, 'import numpy.random as npr\n'), ((1548, 1562), 'load_data.load_cifar10', 'load_cifar10', ([], {}), '()\n', (1560, 1562), False, 'from load_data import load_cifar10\n'), ((1589, 1612), 'colourization.process', 'process', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (1596, 1612), False, 'from colourization import get_torch_vars, get_cat_rgb, get_rgb_cat, process, UNet, CNN, DilatedUNet\n'), ((1632, 1662), 'colourization.get_rgb_cat', 'get_rgb_cat', (['test_rgb', 'colours'], {}), '(test_rgb, colours)\n', (1643, 1662), False, 'from colourization import get_torch_vars, get_cat_rgb, get_rgb_cat, process, UNet, CNN, DilatedUNet\n'), ((2188, 2204), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (2196, 2204), False, 'import os\n'), ((2393, 2433), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {'keepdim': '(True)'}), '(outputs.data, 1, keepdim=True)\n', (2402, 2433), False, 'import torch\n'), ((1353, 1374), 'numpy.load', 'np.load', (['args.colours'], {}), '(args.colours)\n', (1360, 1374), True, 'import numpy as np\n'), ((1396, 1413), 'numpy.shape', 'np.shape', (['colours'], {}), '(colours)\n', (1404, 1413), True, 'import numpy as np\n'), ((1727, 1770), 'colourization.CNN', 'CNN', (['args.kernel', 'args.filters', 'num_colours'], {}), '(args.kernel, args.filters, num_colours)\n', (1730, 1770), False, 'from colourization import get_torch_vars, get_cat_rgb, get_rgb_cat, process, UNet, CNN, DilatedUNet\n'), ((2017, 2087), 'torch.load', 'torch.load', (['args.checkpoint'], {'map_location': '(lambda storage, loc: storage)'}), '(args.checkpoint, map_location=lambda storage, loc: storage)\n', (2027, 2087), False, 'import torch\n'), ((2241, 2273), 'numpy.expand_dims', 'np.expand_dims', (['test_grey[id]', '(0)'], {}), '(test_grey[id], 0)\n', (2255, 2273), True, 'import numpy as np\n'), ((2311, 2346), 'numpy.expand_dims', 'np.expand_dims', (['test_rgb_cat[id]', '(0)'], {}), '(test_rgb_cat[id], 0)\n', (2325, 2346), True, 'import numpy as np\n'), ((2575, 2624), 'os.path.join', 'os.path.join', (['outdir', "('filter_output_%d.png' % id)"], {}), "(outdir, 'filter_output_%d.png' % id)\n", (2587, 2624), False, 'import os\n'), ((2724, 2776), 'os.path.join', 'os.path.join', (['outdir', "('filter_input_rgb_%d.png' % id)"], {}), "(outdir, 'filter_input_rgb_%d.png' % id)\n", (2736, 2776), False, 'import os\n'), ((2858, 2906), 'os.path.join', 'os.path.join', (['outdir', "('filter_input_%d.png' % id)"], {}), "(outdir, 'filter_input_%d.png' % id)\n", (2870, 2906), False, 'import os\n'), ((2949, 2996), 'numpy.pad', 'np.pad', (['img', '(1)', '"""constant"""'], {'constant_values': '(1.0)'}), "(img, 1, 'constant', constant_values=1.0)\n", (2955, 2996), True, 'import numpy as np\n'), ((1816, 1860), 'colourization.UNet', 'UNet', (['args.kernel', 'args.filters', 'num_colours'], {}), '(args.kernel, args.filters, num_colours)\n', (1820, 1860), False, 'from colourization import get_torch_vars, get_cat_rgb, get_rgb_cat, process, UNet, CNN, DilatedUNet\n'), ((1905, 1956), 'colourization.DilatedUNet', 'DilatedUNet', (['args.kernel', 'args.filters', 'num_colours'], {}), '(args.kernel, args.filters, num_colours)\n', (1916, 1956), False, 'from colourization import get_torch_vars, get_cat_rgb, get_rgb_cat, process, UNet, CNN, DilatedUNet\n'), ((3435, 3488), 'os.path.join', 'os.path.join', (['outdir', "('filter_out%d_%d.png' % (i, id))"], {}), "(outdir, 'filter_out%d_%d.png' % (i, id))\n", (3447, 3488), False, 'import os\n'), ((2651, 2688), 'numpy.transpose', 'np.transpose', (['test_rgb[id]', '[1, 2, 0]'], {}), '(test_rgb[id], [1, 2, 0])\n', (2663, 2688), True, 'import numpy as np\n')] |
from intersim.viz import Rasta
import numpy as np
def test_world_to_raster():
rasta = Rasta(
m_per_px = 0.5,
raster_fixpoint = (0.5, 0.5),
world_fixpoint = (100, 100),
camera_rotation = 0
)
coords = rasta._world_to_raster(
raster_shape=(50, 50),
points=np.array([[100, 100]]),
)
expect = np.array([[25, 25]])
assert np.allclose(coords, expect)
def test_world_to_raster_multiple():
rasta = Rasta(
m_per_px = 0.5,
raster_fixpoint = (0.5, 0.5),
world_fixpoint = (100, 100),
camera_rotation = 0
)
points = np.array([[[
[100, 100],
[100, 100],
[100, 100],
[100, 100],
]]])
coords = rasta._world_to_raster(
raster_shape=(50, 50),
points=points,
)
expect = np.array([[[
[25, 25],
[25, 25],
[25, 25],
[25, 25],
]]])
assert points.shape == expect.shape
assert coords.shape == expect.shape
assert np.allclose(coords, expect)
def test_world_to_raster_no_side_effects():
rasta = Rasta(
m_per_px = 0.5,
raster_fixpoint = (0.5, 0.5),
world_fixpoint = (100, 100),
camera_rotation = 0
)
raster_shape = (50, 50)
points = np.array([[100, 100]])
rasta._world_to_raster(
raster_shape=raster_shape,
points=points,
)
assert rasta._m_per_px == 0.5
assert np.allclose(rasta._raster_fixpoint, np.array([0.5, 0.5]))
assert np.allclose(rasta._world_fixpoint, np.array([100, 100]))
assert rasta._camera_rotation == 0
assert raster_shape == (50, 50)
assert np.allclose(points, np.array([[100, 100]]))
def test_world_to_raster_offset():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(43, 21),
camera_rotation=0
)
coords = rasta._world_to_raster(
raster_shape=(50, 50),
points=np.array([[43 + 10, 21 - 24]]),
)
expect = np.array([[50*0.5 + 10/0.5, 50*0.5 + 24/0.5]])
assert np.allclose(coords, expect)
def test_world_to_raster_rotation():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(43, 21),
camera_rotation=np.pi/2
)
coords = rasta._world_to_raster(
raster_shape=(50, 50),
points=np.array([43 + 10, 21 - 24]),
)
expect = np.array([50*0.5 - 24/0.5, 50*0.5 + 10/0.5])
assert np.allclose(coords, expect)
def test_fill_poly():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_poly(
canvas=np.zeros((200, 200)),
vertices=np.array([[[90., 90.], [110., 90.], [110., 110.], [90., 110.]]])
)
assert (canvas[80:120, 80:120] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[:, :80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, 121:] == 0).all()
def test_fill_poly_multiple():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_poly(
canvas=np.zeros((200, 200)),
vertices=np.array([
[[90., 90.], [110., 90.], [110., 110.], [90., 110.]],
[[90., 90.], [110., 90.], [110., 110.], [90., 110.]],
])
)
assert (canvas[80:120, 80:120] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[:, :80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, 121:] == 0).all()
def test_fill_poly_single():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_poly(
canvas=np.zeros((200, 200)),
vertices=np.array([[90., 90.], [110., 90.], [110., 110.], [90., 110.]])
)
assert (canvas[80:120, 80:120] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[:, :80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, 121:] == 0).all()
def test_fill_poly_array():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_poly(
canvas=np.zeros((200, 200)),
vertices=[[[90., 90.], [110., 90.], [110., 110.], [90., 110.]]]
)
assert (canvas[80:120, 80:120] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[:, :80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, 121:] == 0).all()
def test_fill_poly_array_single():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_poly(
canvas=np.zeros((200, 200)),
vertices=[[90., 90.], [110., 90.], [110., 110.], [90., 110.]]
)
assert (canvas[80:120, 80:120] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[:, :80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, 121:] == 0).all()
def test_fill_circle():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
rasta.fill_circle(
np.zeros((2000, 2000)),
(40.2, -23.572), 10,
)
rasta.fill_circle(
np.zeros((2000, 2000)),
[40.2, -23.572], np.array(10.1),
)
def test_polylines():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(0, 0),
camera_rotation=np.pi/2
)
out = rasta.polylines(
np.zeros((100, 100)),
vertices=np.zeros((5, 10, 2))
)
assert out[50, 50] == 1
def test_polylines_single():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(0, 0),
camera_rotation=np.pi/2
)
out = rasta.polylines(
np.zeros((100, 100)),
vertices=np.zeros((10, 2))
)
assert out[50, 50] == 1
def test_polylines_iterable():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(0, 0),
camera_rotation=np.pi/2
)
out = rasta.polylines(
np.zeros((100, 100)),
vertices=[[[0, 0], [0, 0], [0, 0]]]
)
assert out[50, 50] == 1
def test_polylines_iterable_single():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(0, 0),
camera_rotation=np.pi/2
)
out = rasta.polylines(
np.zeros((100, 100)),
vertices=[[0, 0], [0, 0], [0, 0]]
)
assert out[50, 50] == 1
assert out.sum() == 1
def test_rect_vertices():
vertices = Rasta._rect_vertices(
center=np.array([4, 3]),
length=np.array([14]),
width=np.array([10]),
rotation=np.array([np.pi/2]),
)
expected = np.array([
[4 - 10/2, 3 + 14/2],
[4 - 10/2, 3 - 14/2],
[4 + 10/2, 3 - 14/2],
[4 + 10/2, 3 + 14/2],
])
assert np.allclose(vertices, expected)
def test_rect_vertices_multiple():
center = np.array([4, 3])
length = np.array([14])
width = np.array([10])
rotation = np.array([np.pi/2])
vertices = Rasta._rect_vertices(
center=np.tile(center, (3, 1)),
length=np.tile(length, (3, 1)),
width=np.tile(width, (3, 1)),
rotation=np.tile(rotation, (3, 1)),
)
vertices = np.array([
[4 - 10/2, 3 + 14/2],
[4 - 10/2, 3 - 14/2],
[4 + 10/2, 3 - 14/2],
[4 + 10/2, 3 + 14/2],
])
expected = np.tile(vertices, (3, 1, 1))
assert np.allclose(vertices, expected)
def test_rect_vertices_multiple_1dim():
center = np.array([4, 3])
length = np.array([14])
width = np.array([10])
rotation = np.array([np.pi/2])
vertices = Rasta._rect_vertices(
center=np.tile(center, (3,1)),
length=np.tile(length, (3,)),
width=np.tile(width, (3,)),
rotation=np.tile(rotation, (3,)),
)
vertices = np.array([
[4 - 10/2, 3 + 14/2],
[4 - 10/2, 3 - 14/2],
[4 + 10/2, 3 - 14/2],
[4 + 10/2, 3 + 14/2],
])
expected = np.tile(vertices, (3, 1, 1))
assert np.allclose(vertices, expected)
def test_fill_tilted_rect():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_tilted_rect(
canvas=np.zeros((200, 200)),
center=np.array([100, 100]),
length=np.array([30]),
width=np.array([20]),
rotation=np.array([np.pi/2]),
)
assert (canvas[80:120, 70:130] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, :70] == 0).all()
assert (canvas[:, 131:] == 0).all()
def test_fill_tilted_rect_multiple():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
center = 100 * np.ones((10, 2))
length = 30 * np.ones((10,))
width = 20 * np.ones((10,))
rotation = np.pi/2 * np.ones((10,))
canvas = rasta.fill_tilted_rect(
canvas=np.zeros((200, 200)),
center=center,
length=length,
width=width,
rotation=rotation,
)
assert (canvas[:80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, :70] == 0).all()
assert (canvas[:, 131:] == 0).all()
assert (canvas[80:120, 70:130] == 1).all()
def test_fill_tilted_rect_scalar():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_tilted_rect(
canvas=np.zeros((200, 200)),
center=np.array([100, 100]),
length=np.array(30),
width=np.array(20),
rotation=np.array([np.pi/2]),
)
assert (canvas[80:120, 70:130] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, :70] == 0).all()
assert (canvas[:, 131:] == 0).all()
def test_fill_tilted_rect_list():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_tilted_rect(
canvas=np.zeros((200, 200)),
center=[100, 100],
length=30,
width=20,
rotation=np.pi/2,
)
assert (canvas[80:120, 70:130] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, :70] == 0).all()
assert (canvas[:, 131:] == 0).all()
def test_fill_tilted_rect_list_multiple():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_tilted_rect(
canvas=np.zeros((200, 200)),
center=[[100, 100], [100, 100]],
length=[30, 30],
width=[20, 20],
rotation=[np.pi/2, np.pi/2],
)
assert (canvas[80:120, 70:130] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, :70] == 0).all()
assert (canvas[:, 131:] == 0).all()
def test_fill_tilted_rect_offset():
rasta = Rasta(
m_per_px=0.5,
raster_fixpoint=(0.5, 0.5),
world_fixpoint=(100, 100),
camera_rotation=np.pi/2
)
canvas = rasta.fill_tilted_rect(
canvas=np.zeros((200, 200)),
center=[100, 90],
length=30,
width=20,
rotation=np.pi/2,
)
assert (canvas[80:120, 50:110] == 1).all()
assert (canvas[:80] == 0).all()
assert (canvas[121:] == 0).all()
assert (canvas[:, :50] == 0).all()
assert (canvas[:, 111:] == 0).all() | [
"intersim.viz.Rasta",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.tile"
] | [((91, 188), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(0)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=0)\n', (96, 188), False, 'from intersim.viz import Rasta\n'), ((357, 377), 'numpy.array', 'np.array', (['[[25, 25]]'], {}), '([[25, 25]])\n', (365, 377), True, 'import numpy as np\n'), ((389, 416), 'numpy.allclose', 'np.allclose', (['coords', 'expect'], {}), '(coords, expect)\n', (400, 416), True, 'import numpy as np\n'), ((467, 564), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(0)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=0)\n', (472, 564), False, 'from intersim.viz import Rasta\n'), ((621, 683), 'numpy.array', 'np.array', (['[[[[100, 100], [100, 100], [100, 100], [100, 100]]]]'], {}), '([[[[100, 100], [100, 100], [100, 100], [100, 100]]]])\n', (629, 683), True, 'import numpy as np\n'), ((834, 888), 'numpy.array', 'np.array', (['[[[[25, 25], [25, 25], [25, 25], [25, 25]]]]'], {}), '([[[[25, 25], [25, 25], [25, 25], [25, 25]]]])\n', (842, 888), True, 'import numpy as np\n'), ((1020, 1047), 'numpy.allclose', 'np.allclose', (['coords', 'expect'], {}), '(coords, expect)\n', (1031, 1047), True, 'import numpy as np\n'), ((1105, 1202), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(0)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=0)\n', (1110, 1202), False, 'from intersim.viz import Rasta\n'), ((1286, 1308), 'numpy.array', 'np.array', (['[[100, 100]]'], {}), '([[100, 100]])\n', (1294, 1308), True, 'import numpy as np\n'), ((1751, 1846), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(43, 21)', 'camera_rotation': '(0)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(43, 21),\n camera_rotation=0)\n', (1756, 1846), False, 'from intersim.viz import Rasta\n'), ((2015, 2069), 'numpy.array', 'np.array', (['[[50 * 0.5 + 10 / 0.5, 50 * 0.5 + 24 / 0.5]]'], {}), '([[50 * 0.5 + 10 / 0.5, 50 * 0.5 + 24 / 0.5]])\n', (2023, 2069), True, 'import numpy as np\n'), ((2073, 2100), 'numpy.allclose', 'np.allclose', (['coords', 'expect'], {}), '(coords, expect)\n', (2084, 2100), True, 'import numpy as np\n'), ((2151, 2254), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(43, 21)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(43, 21),\n camera_rotation=np.pi / 2)\n', (2156, 2254), False, 'from intersim.viz import Rasta\n'), ((2419, 2471), 'numpy.array', 'np.array', (['[50 * 0.5 - 24 / 0.5, 50 * 0.5 + 10 / 0.5]'], {}), '([50 * 0.5 - 24 / 0.5, 50 * 0.5 + 10 / 0.5])\n', (2427, 2471), True, 'import numpy as np\n'), ((2475, 2502), 'numpy.allclose', 'np.allclose', (['coords', 'expect'], {}), '(coords, expect)\n', (2486, 2502), True, 'import numpy as np\n'), ((2538, 2643), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (2543, 2643), False, 'from intersim.viz import Rasta\n'), ((3074, 3179), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (3079, 3179), False, 'from intersim.viz import Rasta\n'), ((3697, 3802), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (3702, 3802), False, 'from intersim.viz import Rasta\n'), ((4228, 4333), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (4233, 4333), False, 'from intersim.viz import Rasta\n'), ((4758, 4863), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (4763, 4863), False, 'from intersim.viz import Rasta\n'), ((5275, 5380), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (5280, 5380), False, 'from intersim.viz import Rasta\n'), ((5640, 5741), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(0, 0)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(0, 0),\n camera_rotation=np.pi / 2)\n', (5645, 5741), False, 'from intersim.viz import Rasta\n'), ((5945, 6046), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(0, 0)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(0, 0),\n camera_rotation=np.pi / 2)\n', (5950, 6046), False, 'from intersim.viz import Rasta\n'), ((6249, 6350), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(0, 0)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(0, 0),\n camera_rotation=np.pi / 2)\n', (6254, 6350), False, 'from intersim.viz import Rasta\n'), ((6569, 6670), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(0, 0)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(0, 0),\n camera_rotation=np.pi / 2)\n', (6574, 6670), False, 'from intersim.viz import Rasta\n'), ((7079, 7198), 'numpy.array', 'np.array', (['[[4 - 10 / 2, 3 + 14 / 2], [4 - 10 / 2, 3 - 14 / 2], [4 + 10 / 2, 3 - 14 / \n 2], [4 + 10 / 2, 3 + 14 / 2]]'], {}), '([[4 - 10 / 2, 3 + 14 / 2], [4 - 10 / 2, 3 - 14 / 2], [4 + 10 / 2, \n 3 - 14 / 2], [4 + 10 / 2, 3 + 14 / 2]])\n', (7087, 7198), True, 'import numpy as np\n'), ((7228, 7259), 'numpy.allclose', 'np.allclose', (['vertices', 'expected'], {}), '(vertices, expected)\n', (7239, 7259), True, 'import numpy as np\n'), ((7309, 7325), 'numpy.array', 'np.array', (['[4, 3]'], {}), '([4, 3])\n', (7317, 7325), True, 'import numpy as np\n'), ((7339, 7353), 'numpy.array', 'np.array', (['[14]'], {}), '([14])\n', (7347, 7353), True, 'import numpy as np\n'), ((7366, 7380), 'numpy.array', 'np.array', (['[10]'], {}), '([10])\n', (7374, 7380), True, 'import numpy as np\n'), ((7396, 7417), 'numpy.array', 'np.array', (['[np.pi / 2]'], {}), '([np.pi / 2])\n', (7404, 7417), True, 'import numpy as np\n'), ((7638, 7757), 'numpy.array', 'np.array', (['[[4 - 10 / 2, 3 + 14 / 2], [4 - 10 / 2, 3 - 14 / 2], [4 + 10 / 2, 3 - 14 / \n 2], [4 + 10 / 2, 3 + 14 / 2]]'], {}), '([[4 - 10 / 2, 3 + 14 / 2], [4 - 10 / 2, 3 - 14 / 2], [4 + 10 / 2, \n 3 - 14 / 2], [4 + 10 / 2, 3 + 14 / 2]])\n', (7646, 7757), True, 'import numpy as np\n'), ((7791, 7819), 'numpy.tile', 'np.tile', (['vertices', '(3, 1, 1)'], {}), '(vertices, (3, 1, 1))\n', (7798, 7819), True, 'import numpy as np\n'), ((7836, 7867), 'numpy.allclose', 'np.allclose', (['vertices', 'expected'], {}), '(vertices, expected)\n', (7847, 7867), True, 'import numpy as np\n'), ((7922, 7938), 'numpy.array', 'np.array', (['[4, 3]'], {}), '([4, 3])\n', (7930, 7938), True, 'import numpy as np\n'), ((7952, 7966), 'numpy.array', 'np.array', (['[14]'], {}), '([14])\n', (7960, 7966), True, 'import numpy as np\n'), ((7979, 7993), 'numpy.array', 'np.array', (['[10]'], {}), '([10])\n', (7987, 7993), True, 'import numpy as np\n'), ((8009, 8030), 'numpy.array', 'np.array', (['[np.pi / 2]'], {}), '([np.pi / 2])\n', (8017, 8030), True, 'import numpy as np\n'), ((8244, 8363), 'numpy.array', 'np.array', (['[[4 - 10 / 2, 3 + 14 / 2], [4 - 10 / 2, 3 - 14 / 2], [4 + 10 / 2, 3 - 14 / \n 2], [4 + 10 / 2, 3 + 14 / 2]]'], {}), '([[4 - 10 / 2, 3 + 14 / 2], [4 - 10 / 2, 3 - 14 / 2], [4 + 10 / 2, \n 3 - 14 / 2], [4 + 10 / 2, 3 + 14 / 2]])\n', (8252, 8363), True, 'import numpy as np\n'), ((8397, 8425), 'numpy.tile', 'np.tile', (['vertices', '(3, 1, 1)'], {}), '(vertices, (3, 1, 1))\n', (8404, 8425), True, 'import numpy as np\n'), ((8442, 8473), 'numpy.allclose', 'np.allclose', (['vertices', 'expected'], {}), '(vertices, expected)\n', (8453, 8473), True, 'import numpy as np\n'), ((8516, 8621), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (8521, 8621), False, 'from intersim.viz import Rasta\n'), ((9120, 9225), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (9125, 9225), False, 'from intersim.viz import Rasta\n'), ((9821, 9926), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (9826, 9926), False, 'from intersim.viz import Rasta\n'), ((10417, 10522), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (10422, 10522), False, 'from intersim.viz import Rasta\n'), ((10980, 11085), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (10985, 11085), False, 'from intersim.viz import Rasta\n'), ((11573, 11678), 'intersim.viz.Rasta', 'Rasta', ([], {'m_per_px': '(0.5)', 'raster_fixpoint': '(0.5, 0.5)', 'world_fixpoint': '(100, 100)', 'camera_rotation': '(np.pi / 2)'}), '(m_per_px=0.5, raster_fixpoint=(0.5, 0.5), world_fixpoint=(100, 100),\n camera_rotation=np.pi / 2)\n', (11578, 11678), False, 'from intersim.viz import Rasta\n'), ((1483, 1503), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (1491, 1503), True, 'import numpy as np\n'), ((1551, 1571), 'numpy.array', 'np.array', (['[100, 100]'], {}), '([100, 100])\n', (1559, 1571), True, 'import numpy as np\n'), ((1679, 1701), 'numpy.array', 'np.array', (['[[100, 100]]'], {}), '([[100, 100]])\n', (1687, 1701), True, 'import numpy as np\n'), ((5444, 5466), 'numpy.zeros', 'np.zeros', (['(2000, 2000)'], {}), '((2000, 2000))\n', (5452, 5466), True, 'import numpy as np\n'), ((5534, 5556), 'numpy.zeros', 'np.zeros', (['(2000, 2000)'], {}), '((2000, 2000))\n', (5542, 5556), True, 'import numpy as np\n'), ((5583, 5597), 'numpy.array', 'np.array', (['(10.1)'], {}), '(10.1)\n', (5591, 5597), True, 'import numpy as np\n'), ((5809, 5829), 'numpy.zeros', 'np.zeros', (['(100, 100)'], {}), '((100, 100))\n', (5817, 5829), True, 'import numpy as np\n'), ((6114, 6134), 'numpy.zeros', 'np.zeros', (['(100, 100)'], {}), '((100, 100))\n', (6122, 6134), True, 'import numpy as np\n'), ((6418, 6438), 'numpy.zeros', 'np.zeros', (['(100, 100)'], {}), '((100, 100))\n', (6426, 6438), True, 'import numpy as np\n'), ((6738, 6758), 'numpy.zeros', 'np.zeros', (['(100, 100)'], {}), '((100, 100))\n', (6746, 6758), True, 'import numpy as np\n'), ((9277, 9293), 'numpy.ones', 'np.ones', (['(10, 2)'], {}), '((10, 2))\n', (9284, 9293), True, 'import numpy as np\n'), ((9312, 9326), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (9319, 9326), True, 'import numpy as np\n'), ((9344, 9358), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (9351, 9358), True, 'import numpy as np\n'), ((9384, 9398), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (9391, 9398), True, 'import numpy as np\n'), ((314, 336), 'numpy.array', 'np.array', (['[[100, 100]]'], {}), '([[100, 100]])\n', (322, 336), True, 'import numpy as np\n'), ((1964, 1994), 'numpy.array', 'np.array', (['[[43 + 10, 21 - 24]]'], {}), '([[43 + 10, 21 - 24]])\n', (1972, 1994), True, 'import numpy as np\n'), ((2370, 2398), 'numpy.array', 'np.array', (['[43 + 10, 21 - 24]'], {}), '([43 + 10, 21 - 24])\n', (2378, 2398), True, 'import numpy as np\n'), ((2721, 2741), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (2729, 2741), True, 'import numpy as np\n'), ((2760, 2832), 'numpy.array', 'np.array', (['[[[90.0, 90.0], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]]]'], {}), '([[[90.0, 90.0], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]]])\n', (2768, 2832), True, 'import numpy as np\n'), ((3257, 3277), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (3265, 3277), True, 'import numpy as np\n'), ((3296, 3435), 'numpy.array', 'np.array', (['[[[90.0, 90.0], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]], [[90.0, 90.0\n ], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]]]'], {}), '([[[90.0, 90.0], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]], [[\n 90.0, 90.0], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]]])\n', (3304, 3435), True, 'import numpy as np\n'), ((3880, 3900), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (3888, 3900), True, 'import numpy as np\n'), ((3919, 3989), 'numpy.array', 'np.array', (['[[90.0, 90.0], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]]'], {}), '([[90.0, 90.0], [110.0, 90.0], [110.0, 110.0], [90.0, 110.0]])\n', (3927, 3989), True, 'import numpy as np\n'), ((4411, 4431), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (4419, 4431), True, 'import numpy as np\n'), ((4941, 4961), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (4949, 4961), True, 'import numpy as np\n'), ((5848, 5868), 'numpy.zeros', 'np.zeros', (['(5, 10, 2)'], {}), '((5, 10, 2))\n', (5856, 5868), True, 'import numpy as np\n'), ((6153, 6170), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {}), '((10, 2))\n', (6161, 6170), True, 'import numpy as np\n'), ((6941, 6957), 'numpy.array', 'np.array', (['[4, 3]'], {}), '([4, 3])\n', (6949, 6957), True, 'import numpy as np\n'), ((6974, 6988), 'numpy.array', 'np.array', (['[14]'], {}), '([14])\n', (6982, 6988), True, 'import numpy as np\n'), ((7004, 7018), 'numpy.array', 'np.array', (['[10]'], {}), '([10])\n', (7012, 7018), True, 'import numpy as np\n'), ((7037, 7058), 'numpy.array', 'np.array', (['[np.pi / 2]'], {}), '([np.pi / 2])\n', (7045, 7058), True, 'import numpy as np\n'), ((7469, 7492), 'numpy.tile', 'np.tile', (['center', '(3, 1)'], {}), '(center, (3, 1))\n', (7476, 7492), True, 'import numpy as np\n'), ((7509, 7532), 'numpy.tile', 'np.tile', (['length', '(3, 1)'], {}), '(length, (3, 1))\n', (7516, 7532), True, 'import numpy as np\n'), ((7548, 7570), 'numpy.tile', 'np.tile', (['width', '(3, 1)'], {}), '(width, (3, 1))\n', (7555, 7570), True, 'import numpy as np\n'), ((7589, 7614), 'numpy.tile', 'np.tile', (['rotation', '(3, 1)'], {}), '(rotation, (3, 1))\n', (7596, 7614), True, 'import numpy as np\n'), ((8082, 8105), 'numpy.tile', 'np.tile', (['center', '(3, 1)'], {}), '(center, (3, 1))\n', (8089, 8105), True, 'import numpy as np\n'), ((8121, 8142), 'numpy.tile', 'np.tile', (['length', '(3,)'], {}), '(length, (3,))\n', (8128, 8142), True, 'import numpy as np\n'), ((8158, 8178), 'numpy.tile', 'np.tile', (['width', '(3,)'], {}), '(width, (3,))\n', (8165, 8178), True, 'import numpy as np\n'), ((8197, 8220), 'numpy.tile', 'np.tile', (['rotation', '(3,)'], {}), '(rotation, (3,))\n', (8204, 8220), True, 'import numpy as np\n'), ((8706, 8726), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (8714, 8726), True, 'import numpy as np\n'), ((8743, 8763), 'numpy.array', 'np.array', (['[100, 100]'], {}), '([100, 100])\n', (8751, 8763), True, 'import numpy as np\n'), ((8780, 8794), 'numpy.array', 'np.array', (['[30]'], {}), '([30])\n', (8788, 8794), True, 'import numpy as np\n'), ((8810, 8824), 'numpy.array', 'np.array', (['[20]'], {}), '([20])\n', (8818, 8824), True, 'import numpy as np\n'), ((8843, 8864), 'numpy.array', 'np.array', (['[np.pi / 2]'], {}), '([np.pi / 2])\n', (8851, 8864), True, 'import numpy as np\n'), ((9451, 9471), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (9459, 9471), True, 'import numpy as np\n'), ((10011, 10031), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (10019, 10031), True, 'import numpy as np\n'), ((10048, 10068), 'numpy.array', 'np.array', (['[100, 100]'], {}), '([100, 100])\n', (10056, 10068), True, 'import numpy as np\n'), ((10085, 10097), 'numpy.array', 'np.array', (['(30)'], {}), '(30)\n', (10093, 10097), True, 'import numpy as np\n'), ((10113, 10125), 'numpy.array', 'np.array', (['(20)'], {}), '(20)\n', (10121, 10125), True, 'import numpy as np\n'), ((10144, 10165), 'numpy.array', 'np.array', (['[np.pi / 2]'], {}), '([np.pi / 2])\n', (10152, 10165), True, 'import numpy as np\n'), ((10607, 10627), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (10615, 10627), True, 'import numpy as np\n'), ((11170, 11190), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (11178, 11190), True, 'import numpy as np\n'), ((11763, 11783), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (11771, 11783), True, 'import numpy as np\n')] |
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import matplotlib.animation as animation
import matplotlib.gridspec as gridspec
import pandas as pd
import numpy as np
import talib as ta
from matplotlib.dates import date2num
from matplotlib import style
from mpl_finance import candlestick_ohlc as candlestick
from market_maker.plot import analysis
from market_maker.utils import log
import time
from market_maker.utils.singleton import singleton_data
logger = log.setup_custom_logger('root')
ticker = 'BTC-USD'
style.use('fivethirtyeight')
class bitmex_plot():
def __init__(self):
for i in range(0, 5):
logger.info("==============================================================================================================")
logger.info("[bitmex_plot][__init__]")
self.current_data_cnt = 0;
self.SMA_FAST = 50
self.SMA_SLOW = 200
self.RSI_PERIOD = 14
self.RSI_AVG_PERIOD = 15
self.MACD_FAST = 12
self.MACD_SLOW = 26
self.MACD_SIGNAL = 9
self.STOCH_K = 14
self.STOCH_D = 3
self.SIGNAL_TOL = 3
self.Y_AXIS_SIZE = 12
self.LINE_WIDTH = 1
self.update_flag = False
def run(self):
logger.info("[bitmex_plot][run]")
sec_id = singleton_data.instance().getOHLC_data()
self.sec_id_ochl = np.array(pd.DataFrame({'0':date2num(sec_id.index),#.to_pydatetime()),
'1':sec_id.open,
'2':sec_id.close,
'3':sec_id.high,
'4':sec_id.low}))
#self.analysis.Date.dt.tz_localize('UTC')
self.analysis = analysis.get_analysis(False)
# Prepare plot
self.fig, (self.ax1, self.ax2, self.ax3, self.ax4) = plt.subplots(4, 1, sharex=True)
self.ax1.set_ylabel(ticker, size=20)
self.ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M:%S'))
#size plot
self.fig.set_size_inches(15,30)
# Plot candles width=.6/(24*60)
candlestick(self.ax1, self.sec_id_ochl, width=.6/(24*60), colorup='g', colordown='r', alpha=1)
# Draw Moving Averages
self.analysis.sma_f.plot(ax=self.ax1, c='r', linewidth=self.LINE_WIDTH)
self.analysis.sma_s.plot(ax=self.ax1, c='g', linewidth=self.LINE_WIDTH)
handles, labels = self.ax1.get_legend_handles_labels()
self.ax1.legend(handles, labels)
#RSI
self.ax2.set_ylabel('RSI', size=self.Y_AXIS_SIZE)
self.analysis.rsi.plot(ax = self.ax2, c='g', label = 'Period: ' + str(self.RSI_PERIOD), linewidth=self.LINE_WIDTH)
self.analysis.sma_r.plot(ax = self.ax2, c='r', label = 'MA: ' + str(self.RSI_AVG_PERIOD), linewidth=self.LINE_WIDTH)
self.ax2.axhline(y=30, c='b', linewidth=self.LINE_WIDTH)
#self.ax2.axhline(y=50, c='black', linewidth=self.LINE_WIDTH)
self.ax2.axhline(y=70, c='b', linewidth=self.LINE_WIDTH)
self.ax2.set_ylim([0,100])
handles, labels = self.ax2.get_legend_handles_labels()
self.ax2.legend(handles, labels)
# Draw MACD computed with Talib
self.ax3.set_ylabel('MACD: '+ str(self.MACD_FAST) + ', ' + str(self.MACD_SLOW) + ', ' + str(self.MACD_SIGNAL), size=self.Y_AXIS_SIZE)
self.analysis.macd.plot(ax=self.ax3, color='b', label='Macd', linewidth=self.LINE_WIDTH)
self.analysis.macdSignal.plot(ax=self.ax3, color='g', label='Signal', linewidth=self.LINE_WIDTH)
self.analysis.macdHist.plot(ax=self.ax3, color='r', label='Hist', linewidth=self.LINE_WIDTH)
self.ax3.axhline(0, lw=2, color='0', linewidth=self.LINE_WIDTH)
handles, labels = self.ax3.get_legend_handles_labels()
self.ax3.legend(handles, labels)
# Stochastic plot
self.ax4.set_ylabel('Stoch (k,d)', size=self.Y_AXIS_SIZE)
self.analysis.stoch_k.plot(ax=self.ax4, label='stoch_k:'+ str(self.STOCH_K), color='r', linewidth=self.LINE_WIDTH)
self.analysis.stoch_d.plot(ax=self.ax4, label='stoch_d:'+ str(self.STOCH_D), color='g', linewidth=self.LINE_WIDTH)
handles, labels = self.ax4.get_legend_handles_labels()
self.ax4.legend(handles, labels)
self.ax4.axhline(y=20, c='b', linewidth=self.LINE_WIDTH)
self.ax4.axhline(y=50, c='black', linewidth=self.LINE_WIDTH)
self.ax4.axhline(y=80, c='b', linewidth=self.LINE_WIDTH)
self.ani = animation.FuncAnimation(self.fig, self.animate, interval=5000)
plt.show()
def animate(self, i):
#logger.info("[plotThread][animate] self.update_flag " + str(self.update_flag))
if self.update_flag:
sec_id = singleton_data.instance().getOHLC_data()
sec_id_ochl = np.array(pd.DataFrame({'0':date2num(sec_id.index),
'1':sec_id.open,
'2':sec_id.close,
'3':sec_id.high,
'4':sec_id.low}))
#logger.info("[plotThread][animate] sec_id_ochl " + str(sec_id_ochl))
self.analysis = pd.DataFrame(index = sec_id.index)
self.analysis['sma_f'] = sec_id.close.rolling(self.SMA_FAST).mean()
self.analysis['sma_s'] = sec_id.close.rolling(self.SMA_SLOW).mean()
self.analysis['rsi'] = ta.RSI(sec_id.close.to_numpy(), self.RSI_PERIOD)
self.analysis['sma_r'] = self.analysis.rsi.rolling(self.RSI_PERIOD).mean()
self.analysis['macd'], self.analysis['macdSignal'], self.analysis['macdHist'] = ta.MACD(sec_id.close.to_numpy(), fastperiod=self.MACD_FAST, slowperiod=self.MACD_SLOW, signalperiod=self.MACD_SIGNAL)
self.analysis['stoch_k'], self.analysis['stoch_d'] = ta.STOCH(sec_id.high.to_numpy(), sec_id.low.to_numpy(), sec_id.close.to_numpy(), fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)#slowk_period=self.STOCH_K, slowd_period=self.STOCH_D)
self.analysis['sma'] = np.where(self.analysis.sma_f > self.analysis.sma_s, 1, 0)
# Plot candles width=.6/(24*60)
candlestick(self.ax1, sec_id_ochl, width=.6/(24*60), colorup='g', colordown='r', alpha=1)
# Draw Moving Averages
self.analysis['sma_f'] = sec_id.close.rolling(self.SMA_FAST).mean()
self.analysis['sma_s'] = sec_id.close.rolling(self.SMA_SLOW).mean()
self.analysis.sma_f.plot(ax=self.ax1, c='r', linewidth=self.LINE_WIDTH)
self.analysis.sma_s.plot(ax=self.ax1, c='g', linewidth=self.LINE_WIDTH)
self.analysis.rsi.plot(ax = self.ax2, c='g', label = 'Period: ' + str(self.RSI_PERIOD), linewidth=self.LINE_WIDTH)
self.analysis.sma_r.plot(ax = self.ax2, c='r', label = 'MA: ' + str(self.RSI_AVG_PERIOD), linewidth=self.LINE_WIDTH)
self.analysis.macd.plot(ax=self.ax3, color='b', label='Macd', linewidth=self.LINE_WIDTH)
self.analysis.macdSignal.plot(ax=self.ax3, color='g', label='Signal', linewidth=self.LINE_WIDTH)
self.analysis.macdHist.plot(ax=self.ax3, color='r', label='Hist', linewidth=self.LINE_WIDTH)
self.analysis.stoch_k.plot(ax=self.ax4, label='stoch_k:'+ str(self.STOCH_K), color='r', linewidth=self.LINE_WIDTH)
self.analysis.stoch_d.plot(ax=self.ax4, label='stoch_d:'+ str(self.STOCH_D), color='g', linewidth=self.LINE_WIDTH)
self.update_flag = False
def plot_update(self):
logger.info("[bitmex_plot][plot_update]")
self.update_flag = True
wait_plotupdate = 0
while self.update_flag :
wait_plotupdate += 1
#logger.info("[bitmex_plot][plot_update] wait_plotupdate " + str(wait_plotupdate))
time.sleep(0.1)
return self.analysis.iloc[-1:]
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.style.use",
"market_maker.utils.log.setup_custom_logger",
"matplotlib.animation.FuncAnimation",
"market_maker.plot.analysis.get_analysis",
"matplotlib.dates.DateFormatter",
"numpy.where",
"time.sleep",
"market_maker.utils.singleton.singleto... | [((534, 565), 'market_maker.utils.log.setup_custom_logger', 'log.setup_custom_logger', (['"""root"""'], {}), "('root')\n", (557, 565), False, 'from market_maker.utils import log\n'), ((587, 615), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (596, 615), False, 'from matplotlib import style\n'), ((1833, 1861), 'market_maker.plot.analysis.get_analysis', 'analysis.get_analysis', (['(False)'], {}), '(False)\n', (1854, 1861), False, 'from market_maker.plot import analysis\n'), ((1947, 1978), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'sharex': '(True)'}), '(4, 1, sharex=True)\n', (1959, 1978), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2324), 'mpl_finance.candlestick_ohlc', 'candlestick', (['self.ax1', 'self.sec_id_ochl'], {'width': '(0.6 / (24 * 60))', 'colorup': '"""g"""', 'colordown': '"""r"""', 'alpha': '(1)'}), "(self.ax1, self.sec_id_ochl, width=0.6 / (24 * 60), colorup='g',\n colordown='r', alpha=1)\n", (2232, 2324), True, 'from mpl_finance import candlestick_ohlc as candlestick\n'), ((4596, 4658), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.animate'], {'interval': '(5000)'}), '(self.fig, self.animate, interval=5000)\n', (4619, 4658), True, 'import matplotlib.animation as animation\n'), ((4668, 4678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4676, 4678), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2110), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (2098, 2110), False, 'import matplotlib\n'), ((5342, 5374), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'sec_id.index'}), '(index=sec_id.index)\n', (5354, 5374), True, 'import pandas as pd\n'), ((6235, 6292), 'numpy.where', 'np.where', (['(self.analysis.sma_f > self.analysis.sma_s)', '(1)', '(0)'], {}), '(self.analysis.sma_f > self.analysis.sma_s, 1, 0)\n', (6243, 6292), True, 'import numpy as np\n'), ((6350, 6448), 'mpl_finance.candlestick_ohlc', 'candlestick', (['self.ax1', 'sec_id_ochl'], {'width': '(0.6 / (24 * 60))', 'colorup': '"""g"""', 'colordown': '"""r"""', 'alpha': '(1)'}), "(self.ax1, sec_id_ochl, width=0.6 / (24 * 60), colorup='g',\n colordown='r', alpha=1)\n", (6361, 6448), True, 'from mpl_finance import candlestick_ohlc as candlestick\n'), ((7983, 7998), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7993, 7998), False, 'import time\n'), ((1368, 1393), 'market_maker.utils.singleton.singleton_data.instance', 'singleton_data.instance', ([], {}), '()\n', (1391, 1393), False, 'from market_maker.utils.singleton import singleton_data\n'), ((1464, 1486), 'matplotlib.dates.date2num', 'date2num', (['sec_id.index'], {}), '(sec_id.index)\n', (1472, 1486), False, 'from matplotlib.dates import date2num\n'), ((4845, 4870), 'market_maker.utils.singleton.singleton_data.instance', 'singleton_data.instance', ([], {}), '()\n', (4868, 4870), False, 'from market_maker.utils.singleton import singleton_data\n'), ((4940, 4962), 'matplotlib.dates.date2num', 'date2num', (['sec_id.index'], {}), '(sec_id.index)\n', (4948, 4962), False, 'from matplotlib.dates import date2num\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from models.DCASE_baseline import AutoPool
from models.Time2vec import Time2Vec
from activation.mish import Mish
from torchlibrosa.augmentation import SpecAugmentation
class ConvBlock(nn.Module):
def __init__(self, n_input_feature_maps, n_output_feature_maps, kernel_size, batch_norm = False, pool_stride = None):
super(ConvBlock, self).__init__()
assert all(int(x) % 2 == 1 for x in kernel_size)
self.n_input = n_input_feature_maps
self.n_output = n_output_feature_maps
self.kernel_size = kernel_size
self.batch_norm = batch_norm
self.pool_stride = pool_stride
self.conv = nn.Conv2d(int(self.n_input), int(self.n_output), int(self.kernel_size), padding = tuple(int(int(x)/2) for x in self.kernel_size), bias = ~batch_norm)
if batch_norm: self.bn = nn.BatchNorm2d(int(self.n_output))
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x):
x = self.conv(x)
if self.batch_norm: x = self.bn(x)
x = F.relu(x)
if self.pool_stride is not None: x = F.max_pool2d(x, self.pool_stride)
return x
class TALNet(nn.Module):
def __init__(self, args, num_mels, num_classes):
super(TALNet, self).__init__()
self.__dict__.update(args.__dict__) # Instill all args into self
assert self.n_conv_layers % self.n_pool_layers == 0
self.input_n_freq_bins = n_freq_bins = num_mels
self.output_size = num_classes
self.conv = []
pool_interval = self.n_conv_layers / self.n_pool_layers
n_input = 1
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins /= 2
n_output = self.embedding_size / n_freq_bins
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = self.embedding_size * 2 / n_freq_bins
pool_stride = None
layer = ConvBlock(n_input, n_output, self.kernel_size, batch_norm = self.batch_norm, pool_stride = pool_stride)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
n_input = n_output
self.gru = nn.GRU(int(self.embedding_size), int(self.embedding_size / 2), 1, batch_first = True, bidirectional = True)
self.fc_prob = nn.Linear(self.embedding_size, self.output_size)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.embedding_size, self.output_size)
# Better initialization
nn.init.orthogonal_(self.gru.weight_ih_l0); nn.init.constant_(self.gru.bias_ih_l0, 0)
nn.init.orthogonal_(self.gru.weight_hh_l0); nn.init.constant_(self.gru.bias_hh_l0, 0)
nn.init.orthogonal_(self.gru.weight_ih_l0_reverse); nn.init.constant_(self.gru.bias_ih_l0_reverse, 0)
nn.init.orthogonal_(self.gru.weight_hh_l0_reverse); nn.init.constant_(self.gru.bias_hh_l0_reverse, 0)
nn.init.xavier_uniform_(self.fc_prob.weight); nn.init.constant_(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform_(self.fc_att.weight); nn.init.constant_(self.fc_att.bias, 0)
if self.pooling == 'auto':
self.autopool = AutoPool(self.output_size)
def forward(self, x):
x = x.view((-1, 1, x.size(1), x.size(2))) # x becomes (batch, channel, time, freq)
for i in range(len(self.conv)):
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.conv[i](x) # x becomes (batch, channel, time, freq)
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x, _ = self.gru(x) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, frame_prob, frame_att
elif self.pooling == 'auto':
global_prob = self.autopool(frame_prob)
return global_prob, frame_prob
def predict(self, x, verbose = True, batch_size = 100):
# Predict in batches. Both input and output are numpy arrays.
# If verbose == True, return all of global_prob, frame_prob and att
# If verbose == False, only return global_prob
result = []
for i in range(0, len(x), batch_size):
with torch.no_grad():
input = Variable(torch.from_numpy(x[i : i + batch_size])).cuda()
output = self.forward(input)
if not verbose: output = output[:1]
result.append([var.data.cpu().numpy() for var in output])
result = tuple(numpy.concatenate(items) for items in zip(*result))
return result if verbose else result[0]
class ScaledDotProductAttention(nn.Module):
"""Scaled Dot-Product Attention"""
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class MultiHead(nn.Module):
"""Multi-Head Attention module."""
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.w_qs.bias.data.fill_(0)
self.w_ks.bias.data.fill_(0)
self.w_vs.bias.data.fill_(0)
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.fc.bias.data.fill_(0)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size() # (batch_size, 80, 512)
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) # (batch_size, T, 8, 64)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk, (batch_size*8, T, 64)
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
# mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask) # (n_head * batch_size, T, 64), (n_head * batch_size, T, T)
output = output.view(n_head, sz_b, len_q, d_v) # (n_head, batch_size, T, 64)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv), (batch_size, T, 512)
output = F.relu_(self.dropout(self.fc(output)))
return output
class Normed_Linear(nn.Linear):
""" Linear Layer with weight and input L2 normalized
Could lead to better 'geometric' space and could deal with imbalance dataset issues
Args:
in_features (int) : size of each input sample
out_features (int) : size of each output sample
bias (bool) : If False, the layer will not learn an additive bias.
Shape:
Input: (N, *, in_features)
Output: (N, *, out_features)
"""
def __init__(self, in_features, out_features, bias=True):
super().__init__(in_features, out_features, bias=True)
def forward(self, x):
weight = self.weight/(torch.norm(((self.weight)), 2, 0) + 1e-5)
x = x/(torch.norm(((x)), 2, -1)+1e-5).unsqueeze(-1)
return F.linear(x, weight, self.bias)
class AvgMaxPool2d(nn.Module):
""" Average + Max Pooling layer
Average Pooling added to Max Pooling
Args:
pool_stride (int, tuple) : controls the pooling stride
"""
def __init__(self, pool_stride):
super().__init__()
self.pool_stride = pool_stride
self.avgpool = nn.MaxPool2d(self.pool_stride)
self.maxpool = nn.AvgPool2d(self.pool_stride)
def forward(self, x):
x1 = self.avgpool(x)
x2 = self.maxpool(x)
return x1+x2
class Pooling_Head(nn.Module):
""" Pooling layer for MIL
Coda adapted from 'Polyphonic Sound Event Detection with Weak Labeling' Yun Wang github
Link : https://github.com/MaigoAkisame/cmu-thesis
Args:
in_features (int) : size of each input sample
out_features (int) : size of each output sample
pooling (str) : pooling strategie, can be max, ave, lin, exp, att, auto
"""
def __init__(self, in_features, out_features, pooling):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.pooling = pooling
if self.pooling == 'att':
self.fc_att = nn.Linear(self.in_features, self.out_features)
nn.init.xavier_uniform_(self.fc_att.weight); nn.init.constant_(self.fc_att.bias, 0)
elif self.pooling == 'auto':
self.autopool = AutoPool(self.out_features)
def forward(self, frame_prob, x):
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, frame_prob #, frame_att
elif self.pooling == 'auto':
global_prob = self.autopool(frame_prob)
return global_prob, frame_prob
class ConvBlockTALNet(nn.Conv2d):
""" TALNet ConvBlock with Weight Standardization (WS)
Link to WS : https://arxiv.org/abs/1903.10520
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=None,
dilation=1, groups=1, bias=True, padding_mode='zeros',
norm=None, activation='relu', pool_stride=None, pool_strat='max'):
# Use padding depending on kernel size by default
if padding==None:
padding = tuple(int(int(x)/2) for x in kernel_size)
# Call __init__ of nn.Conv2d
super(ConvBlockTALNet, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
# Initialize norm if needed (support None, Batch Norm, Group Norm)
if norm =='GN':
self.norm = True
self.norm_layer = nn.GroupNorm(num_channels=self.out_channels, num_groups=32)
elif norm =='BN':
self.norm = True
self.norm_layer = nn.BatchNorm2d(self.n_output)
else:
self.norm = False
# Initialize activation function
if activation =='relu':
self.activation = nn.ReLU()
elif activation =='prelu':
self.activation = nn.PReLU()
elif activation == 'leaky_relu':
self.activation = nn.LeakyReLU()
elif activation == 'mish':
self.activation = Mish()
else:
raise Exception('Incorrect argument!')
# Initialize pooling if needed (support max and avg pooling)
self.pool_stride = pool_stride
if pool_strat=='max':
self.pooling = nn.MaxPool2d(self.pool_stride)
elif pool_strat=='avg':
self.pooling = nn.AvgPool2d(self.pool_stride)
elif pool_strat=='avg_max':
self.pooling = AvgMaxPool2d(self.pool_stride)
# Better Initialization
nn.init.orthogonal_(self.weight)
def forward(self, x):
# Z-Norm weights (Weight Standardization)
weight = self.weight - self.weight.view(self.weight.size(0),-1,1,1).mean(1, keepdim=True)
std = weight.view(weight.size(0),-1,1,1).std(dim=1, keepdim=True) + 1e-5 # Avoid 0 div
weight = weight / std.expand_as(weight)
# Compute conv2D with Z-Normed weights
x = F.conv2d(x, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
# Apply norm if needed
if self.norm: x = self.norm_layer(x)
# Apply activation function
x = self.activation(x)
# Apply pooling if needed
if self.pool_stride: x = self.pooling(x)
return x
class TALNetV2(nn.Module):
""" Improved TALNet architecture
Coda adapted from 'Polyphonic Sound Event Detection with Weak Labeling' Yun Wang github
Link : https://github.com/MaigoAkisame/cmu-thesis
Args:
num_mels (int) : number of mels of spectrogram
num_classes (int) : number of classes
n_conv_layers (int) : number of ConvBlockTALNet
kernel_size (tuple) : kernel size of ConvBlockTALNet
n_pool_layers (int) : number of pooling layers in the ConvBlock part
embedding_size (int) : size of embeddings
norm (str) : norm to use in ConvBlockTALNet
pooling (str) : pooling strategie of the head
dropout (float) : dropout applied before ConvBlockTALNet
conv_pool_strat (str) : pooling strategie used in ConvBlockTALNet
conv_activation (str) : activation used in ConvBlockTALNet
n_head (int) : number of head inside attention block
d_kv (int) : size of key and values of attention block
dropout_transfo (float) : dropout applied inside attention block
"""
def __init__(self, num_mels, num_classes, n_conv_layers=10, kernel_size=(3,3), n_pool_layers=5,
embedding_size=1024, norm='GN', pooling='att', dropout=0.0, conv_pool_strat='max',
conv_activation='relu', n_head=8, d_kv=128, dropout_transfo=0):
super(TALNetV2, self).__init__()
self.n_conv_layers = n_conv_layers
self.kernel_size = kernel_size
self.n_pool_layers = n_pool_layers
self.embedding_size = embedding_size
self.norm = norm
self.pooling = pooling
self.dropout = dropout
self.conv_pool_strat = conv_pool_strat
self.conv_activation = conv_activation
self.n_head = n_head
self.d_k = self.d_v = d_kv
self.dropout_transfo = dropout_transfo
self.input_n_freq_bins = n_freq_bins = num_mels
self.output_size = num_classes
assert self.n_conv_layers % self.n_pool_layers == 0
pool_interval = self.n_conv_layers / self.n_pool_layers
n_input = 1
self.conv = []
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins /= 2
n_output = self.embedding_size / n_freq_bins
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = self.embedding_size * 2 / n_freq_bins
pool_stride = None
layer = ConvBlockTALNet(int(n_input), int(n_output), self.kernel_size, norm = self.norm,
pool_stride = pool_stride, pool_strat=self.conv_pool_strat, activation=self.conv_activation)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
n_input = n_output
self.self_attention_1 = MultiHead(self.n_head, self.embedding_size, self.d_k, self.d_v, self.dropout_transfo)
self.fc_prob = Normed_Linear(self.embedding_size, self.output_size)
self.pooling_head = Pooling_Head(self.embedding_size, self.output_size, self.pooling)
def forward(self, x):
# x is (bs, time, freq)
x = x.unsqueeze(1) # x becomes (batch, channel, time, freq)
for conv_layer in self.conv:
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = conv_layer(x) # x becomes (batch, channel, time, freq)
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.self_attention_1(x,x,x) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
return self.pooling_head(frame_prob, x)
class TALNetV2_meta(nn.Module):
""" Improved TALNet architecture + metadata integration (Time2vec)
Coda adapted from 'Polyphonic Sound Event Detection with Weak Labeling' Yun Wang github
Link : https://github.com/MaigoAkisame/cmu-thesis
Args:
num_mels (int) : number of mels of spectrogram
num_classes (int) : number of classes
num_meta (int) : number of metadata
meta_emb (int) : metadata embedding size
n_conv_layers (int) : number of ConvBlockTALNet
kernel_size (tuple) : kernel size of ConvBlockTALNet
n_pool_layers (int) : number of pooling layers in the ConvBlock part
embedding_size (int) : size of embeddings
norm (str) : norm to use in ConvBlockTALNet
pooling (str) : pooling strategie of the head
dropout (float) : dropout applied before ConvBlockTALNet
conv_pool_strat (str) : pooling strategie used in ConvBlockTALNet
conv_activation (str) : activation used in ConvBlockTALNet
n_head (int) : number of head inside attention block
d_kv (int) : size of key and values of attention block
dropout_transfo (float) : dropout applied inside attention block
"""
def __init__(self, num_mels, num_classes, num_meta, meta_emb=64, n_conv_layers=10, kernel_size=(3,3), n_pool_layers=5,
embedding_size=1024, norm='GN', pooling='att', dropout=0.0, conv_pool_strat='max',
conv_activation='relu', n_head=8, d_kv=128, dropout_transfo=0):
super(TALNetV2_meta, self).__init__()
self.n_conv_layers = n_conv_layers
self.kernel_size = kernel_size
self.n_pool_layers = n_pool_layers
self.embedding_size = embedding_size
self.norm = norm
self.pooling = pooling
self.dropout = dropout
self.conv_pool_strat = conv_pool_strat
self.conv_activation = conv_activation
self.n_head = n_head
self.d_k = self.d_v = d_kv
self.dropout_transfo = dropout_transfo
self.num_meta = num_meta
self.meta_emb = meta_emb
self.input_n_freq_bins = n_freq_bins = num_mels
self.output_size = num_classes
assert self.n_conv_layers % self.n_pool_layers == 0
###
# TALNet Part
###
pool_interval = self.n_conv_layers / self.n_pool_layers
n_input = 1
self.conv = []
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins /= 2
n_output = self.embedding_size / n_freq_bins
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = self.embedding_size * 2 / n_freq_bins
pool_stride = None
layer = ConvBlockTALNet(int(n_input), int(n_output), self.kernel_size, norm = self.norm,
pool_stride = pool_stride, pool_strat=self.conv_pool_strat, activation=self.conv_activation)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
n_input = n_output
self.self_attention_1 = MultiHead(self.n_head, self.embedding_size, self.d_k, self.d_v, self.dropout_transfo)
###
# META Part
###
self.t2v = Time2Vec(self.num_meta, self.meta_emb)
self.self_attention_meta = MultiHead(self.n_head, self.num_meta, self.d_k, self.d_v, self.dropout_transfo)
###
# HEAD
###
self.self_attention_meta_talnet = MultiHead(self.n_head, self.embedding_size, self.meta_emb, self.meta_emb, self.dropout_transfo)
self.fc_prob = Normed_Linear(self.embedding_size + self.meta_emb * self.num_meta, self.output_size)
self.pooling_head = Pooling_Head(self.embedding_size + self.meta_emb * self.num_meta, self.output_size, self.pooling)
def forward(self, x, meta):
###
# TALNet Part
###
# x is (bs, time, freq)
x = x.unsqueeze(1) # x becomes (batch, channel, time, freq)
for conv_layer in self.conv:
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = conv_layer(x) # x becomes (batch, channel, time, freq)
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
x = self.self_attention_1(x,x,x) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
###
# META Part
###
meta = self.t2v(meta)
meta = self.self_attention_meta(meta, meta, meta) # [bs, n_sin, n_hid=n_meta]
meta = meta.view((-1, meta.size(1) * meta.size(2))) # [bs, emb]
###
# HEAD
###
x = torch.cat([x, meta.unsqueeze(1).expand((-1,x.size(1),-1))],2)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
return self.pooling_head(frame_prob, x)
class TALNetV3(nn.Module):
def __init__(self, args, num_mels, num_meta, num_classes):
super(TALNetV3, self).__init__()
self.__dict__.update(args.__dict__) # Install all args into self
assert self.n_conv_layers % self.n_pool_layers == 0
self.input_n_freq_bins = n_freq_bins = num_mels
self.output_size = num_classes
self.num_meta = num_meta
self.n_head = self.transfo_head
self.d_k = self.d_v = 128
self.meta_emb = self.nb_meta_emb
# Conv
self.conv = []
self.conv_v2 = []
pool_interval = self.n_conv_layers / self.n_pool_layers
n_input = 1
for i in range(self.n_conv_layers):
if (i + 1) % pool_interval == 0: # this layer has pooling
n_freq_bins /= 2
n_output = self.embedding_size / n_freq_bins
pool_stride = (2, 2) if i < pool_interval * 2 else (1, 2)
else:
n_output = self.embedding_size * 2 / n_freq_bins
pool_stride = None
layer = ConvBlock(n_input, n_output, self.kernel_size, batch_norm = self.batch_norm, pool_stride = pool_stride)
self.conv.append(layer)
self.__setattr__('conv' + str(i + 1), layer)
layer_v2 = ConvBlockTALNet(int(n_input), int(n_output), (int(self.kernel_size),int(self.kernel_size)), norm = 'GN',
pool_stride = pool_stride, pool_strat='max', activation='mish')
self.conv_v2.append(layer_v2)
self.__setattr__('conv_v2' + str(i + 1), layer_v2)
n_input = n_output
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
# Metadata + fc
self.t2v = Time2Vec(self.num_meta, self.meta_emb)
# Temp (Transfo + GRU)
self.multihead_meta = MultiHead(self.n_head, self.num_meta, self.d_k, self.d_v, self.dropout_transfo)
self.gru = nn.GRU(int(self.embedding_size), int(self.embedding_size / 2), 1, batch_first = True, bidirectional = True)
self.multihead_v2 = MultiHead(self.n_head, self.embedding_size, self.d_k, self.d_v, self.dropout_transfo)
# FC
# self.att_block = AttBlock(n_in=(self.embedding_size * 2 + self.meta_emb * self.num_meta), n_out=self.output_size, activation='sigmoid')
self.fc_prob = nn.Linear(self.embedding_size * 2 + self.meta_emb * self.num_meta, self.output_size)
if self.pooling == 'att':
self.fc_att = nn.Linear(self.embedding_size * 2 + self.meta_emb * self.num_meta, self.output_size)
# Better initialization
nn.init.orthogonal_(self.gru.weight_ih_l0); nn.init.constant_(self.gru.bias_ih_l0, 0)
nn.init.orthogonal_(self.gru.weight_hh_l0); nn.init.constant_(self.gru.bias_hh_l0, 0)
nn.init.orthogonal_(self.gru.weight_ih_l0_reverse); nn.init.constant_(self.gru.bias_ih_l0_reverse, 0)
nn.init.orthogonal_(self.gru.weight_hh_l0_reverse); nn.init.constant_(self.gru.bias_hh_l0_reverse, 0)
nn.init.xavier_uniform_(self.fc_prob.weight); nn.init.constant_(self.fc_prob.bias, 0)
if self.pooling == 'att':
nn.init.xavier_uniform_(self.fc_att.weight); nn.init.constant_(self.fc_att.bias, 0)
if self.pooling == 'auto':
self.autopool = AutoPool(self.output_size)
def forward(self, x, meta):
x = x.view((-1, 1, x.size(1), x.size(2))) # x becomes (batch, channel, time, freq)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
x_v2 = x
for i in range(len(self.conv)):
if self.dropout_AS > 0: x = F.dropout(x, p = self.dropout_AS, training = self.training)
x = self.conv[i](x) # x becomes (batch, channel, time, freq)
x = x.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
x = x.view((-1, x.size(1), x.size(2) * x.size(3))) # x becomes (batch, time, embedding_size)
if self.dropout_AS > 0: x = F.dropout(x, p = self.dropout_AS, training = self.training)
x, _ = self.gru(x)
for i in range(len(self.conv_v2)):
if self.dropout > 0: x_v2 = F.dropout(x_v2, p = self.dropout, training = self.training)
x_v2 = self.conv_v2[i](x_v2) # x becomes (batch, channel, time, freq)
x_v2 = x_v2.permute(0, 2, 1, 3).contiguous() # x becomes (batch, time, channel, freq)
x_v2 = x_v2.view((-1, x_v2.size(1), x_v2.size(2) * x_v2.size(3))) # x becomes (batch, time, embedding_size)
if self.dropout > 0: x_v2 = F.dropout(x_v2, p = self.dropout, training = self.training)
x_v2 = self.multihead_v2(x_v2, x_v2, x_v2)
if self.dropout > 0: meta = F.dropout(meta, p = self.dropout, training = self.training)
meta = self.t2v(meta)
meta = self.multihead_meta(meta, meta, meta) # [bs, n_sin, n_hid=n_meta]
meta = meta.view((-1, meta.size(1) * meta.size(2))) # [bs, emb]
x = torch.cat([x, x_v2, meta.unsqueeze(1).expand((-1,x.size(1),-1))],2)
# x = x.transpose(1,2)
# global_prob, norm_att, cla = self.att_block(x)
# return global_prob, cla
if self.dropout > 0: x = F.dropout(x, p = self.dropout, training = self.training)
frame_prob = torch.sigmoid(self.fc_prob(x)) # shape of frame_prob: (batch, time, output_size)
frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)
if self.pooling == 'max':
global_prob, _ = frame_prob.max(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'ave':
global_prob = frame_prob.mean(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'lin':
global_prob = (frame_prob * frame_prob).sum(dim = 1) / frame_prob.sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'exp':
global_prob = (frame_prob * frame_prob.exp()).sum(dim = 1) / frame_prob.exp().sum(dim = 1)
return global_prob, frame_prob
elif self.pooling == 'att':
frame_att = F.softmax(self.fc_att(x), dim = 1)
global_prob = (frame_prob * frame_att).sum(dim = 1)
return global_prob, frame_prob, frame_att
elif self.pooling == 'auto':
global_prob = self.autopool(frame_prob)
return global_prob, frame_prob | [
"torch.nn.Dropout",
"torch.bmm",
"torch.nn.functional.dropout",
"torch.nn.GroupNorm",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"torchlibrosa.augmentation.SpecAugmentation",
"torch.no_grad",
"numpy.sqrt",
"numpy.power",
"torch.nn.LayerNorm",
"models.DCASE_baseline.AutoPool",
"torch.nn.L... | [((991, 1032), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.conv.weight'], {}), '(self.conv.weight)\n', (1014, 1032), True, 'import torch.nn as nn\n'), ((1140, 1149), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1146, 1149), True, 'import torch.nn.functional as F\n'), ((2508, 2556), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_size', 'self.output_size'], {}), '(self.embedding_size, self.output_size)\n', (2517, 2556), True, 'import torch.nn as nn\n'), ((2706, 2748), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_ih_l0'], {}), '(self.gru.weight_ih_l0)\n', (2725, 2748), True, 'import torch.nn as nn\n'), ((2750, 2791), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_ih_l0', '(0)'], {}), '(self.gru.bias_ih_l0, 0)\n', (2767, 2791), True, 'import torch.nn as nn\n'), ((2800, 2842), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_hh_l0'], {}), '(self.gru.weight_hh_l0)\n', (2819, 2842), True, 'import torch.nn as nn\n'), ((2844, 2885), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_hh_l0', '(0)'], {}), '(self.gru.bias_hh_l0, 0)\n', (2861, 2885), True, 'import torch.nn as nn\n'), ((2894, 2944), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_ih_l0_reverse'], {}), '(self.gru.weight_ih_l0_reverse)\n', (2913, 2944), True, 'import torch.nn as nn\n'), ((2946, 2995), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_ih_l0_reverse', '(0)'], {}), '(self.gru.bias_ih_l0_reverse, 0)\n', (2963, 2995), True, 'import torch.nn as nn\n'), ((3004, 3054), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_hh_l0_reverse'], {}), '(self.gru.weight_hh_l0_reverse)\n', (3023, 3054), True, 'import torch.nn as nn\n'), ((3056, 3105), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_hh_l0_reverse', '(0)'], {}), '(self.gru.bias_hh_l0_reverse, 0)\n', (3073, 3105), True, 'import torch.nn as nn\n'), ((3114, 3158), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc_prob.weight'], {}), '(self.fc_prob.weight)\n', (3137, 3158), True, 'import torch.nn as nn\n'), ((3160, 3199), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc_prob.bias', '(0)'], {}), '(self.fc_prob.bias, 0)\n', (3177, 3199), True, 'import torch.nn as nn\n'), ((4691, 4732), 'torch.clamp', 'torch.clamp', (['frame_prob', '(1e-07)', '(1 - 1e-07)'], {}), '(frame_prob, 1e-07, 1 - 1e-07)\n', (4702, 4732), False, 'import torch\n'), ((6655, 6679), 'torch.nn.Dropout', 'nn.Dropout', (['attn_dropout'], {}), '(attn_dropout)\n', (6665, 6679), True, 'import torch.nn as nn\n'), ((6703, 6720), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (6713, 6720), True, 'import torch.nn as nn\n'), ((7019, 7037), 'torch.bmm', 'torch.bmm', (['attn', 'v'], {}), '(attn, v)\n', (7028, 7037), False, 'import torch\n'), ((7324, 7356), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {}), '(d_model, n_head * d_k)\n', (7333, 7356), True, 'import torch.nn as nn\n'), ((7377, 7409), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {}), '(d_model, n_head * d_k)\n', (7386, 7409), True, 'import torch.nn as nn\n'), ((7430, 7462), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_v)'], {}), '(d_model, n_head * d_v)\n', (7439, 7462), True, 'import torch.nn as nn\n'), ((7942, 7963), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (7954, 7963), True, 'import torch.nn as nn\n'), ((7983, 8015), 'torch.nn.Linear', 'nn.Linear', (['(n_head * d_v)', 'd_model'], {}), '(n_head * d_v, d_model)\n', (7992, 8015), True, 'import torch.nn as nn\n'), ((8024, 8062), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.fc.weight'], {}), '(self.fc.weight)\n', (8046, 8062), True, 'import torch.nn as nn\n'), ((8122, 8141), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (8132, 8141), True, 'import torch.nn as nn\n'), ((10111, 10141), 'torch.nn.functional.linear', 'F.linear', (['x', 'weight', 'self.bias'], {}), '(x, weight, self.bias)\n', (10119, 10141), True, 'import torch.nn.functional as F\n'), ((10458, 10488), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['self.pool_stride'], {}), '(self.pool_stride)\n', (10470, 10488), True, 'import torch.nn as nn\n'), ((10512, 10542), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['self.pool_stride'], {}), '(self.pool_stride)\n', (10524, 10542), True, 'import torch.nn as nn\n'), ((14538, 14570), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.weight'], {}), '(self.weight)\n', (14557, 14570), True, 'import torch.nn as nn\n'), ((14949, 15038), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', 'weight', 'self.bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(x, weight, self.bias, self.stride, self.padding, self.dilation,\n self.groups)\n', (14957, 15038), True, 'import torch.nn.functional as F\n'), ((23147, 23185), 'models.Time2vec.Time2Vec', 'Time2Vec', (['self.num_meta', 'self.meta_emb'], {}), '(self.num_meta, self.meta_emb)\n', (23155, 23185), False, 'from models.Time2vec import Time2Vec\n'), ((27208, 27307), 'torchlibrosa.augmentation.SpecAugmentation', 'SpecAugmentation', ([], {'time_drop_width': '(64)', 'time_stripes_num': '(2)', 'freq_drop_width': '(8)', 'freq_stripes_num': '(2)'}), '(time_drop_width=64, time_stripes_num=2, freq_drop_width=8,\n freq_stripes_num=2)\n', (27224, 27307), False, 'from torchlibrosa.augmentation import SpecAugmentation\n'), ((27323, 27341), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (27337, 27341), True, 'import torch.nn as nn\n'), ((27386, 27424), 'models.Time2vec.Time2Vec', 'Time2Vec', (['self.num_meta', 'self.meta_emb'], {}), '(self.num_meta, self.meta_emb)\n', (27394, 27424), False, 'from models.Time2vec import Time2Vec\n'), ((27989, 28078), 'torch.nn.Linear', 'nn.Linear', (['(self.embedding_size * 2 + self.meta_emb * self.num_meta)', 'self.output_size'], {}), '(self.embedding_size * 2 + self.meta_emb * self.num_meta, self.\n output_size)\n', (27998, 28078), True, 'import torch.nn as nn\n'), ((28267, 28309), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_ih_l0'], {}), '(self.gru.weight_ih_l0)\n', (28286, 28309), True, 'import torch.nn as nn\n'), ((28311, 28352), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_ih_l0', '(0)'], {}), '(self.gru.bias_ih_l0, 0)\n', (28328, 28352), True, 'import torch.nn as nn\n'), ((28361, 28403), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_hh_l0'], {}), '(self.gru.weight_hh_l0)\n', (28380, 28403), True, 'import torch.nn as nn\n'), ((28405, 28446), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_hh_l0', '(0)'], {}), '(self.gru.bias_hh_l0, 0)\n', (28422, 28446), True, 'import torch.nn as nn\n'), ((28455, 28505), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_ih_l0_reverse'], {}), '(self.gru.weight_ih_l0_reverse)\n', (28474, 28505), True, 'import torch.nn as nn\n'), ((28507, 28556), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_ih_l0_reverse', '(0)'], {}), '(self.gru.bias_ih_l0_reverse, 0)\n', (28524, 28556), True, 'import torch.nn as nn\n'), ((28565, 28615), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.gru.weight_hh_l0_reverse'], {}), '(self.gru.weight_hh_l0_reverse)\n', (28584, 28615), True, 'import torch.nn as nn\n'), ((28617, 28666), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gru.bias_hh_l0_reverse', '(0)'], {}), '(self.gru.bias_hh_l0_reverse, 0)\n', (28634, 28666), True, 'import torch.nn as nn\n'), ((28675, 28719), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc_prob.weight'], {}), '(self.fc_prob.weight)\n', (28698, 28719), True, 'import torch.nn as nn\n'), ((28721, 28760), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc_prob.bias', '(0)'], {}), '(self.fc_prob.bias, 0)\n', (28738, 28760), True, 'import torch.nn as nn\n'), ((31709, 31750), 'torch.clamp', 'torch.clamp', (['frame_prob', '(1e-07)', '(1 - 1e-07)'], {}), '(frame_prob, 1e-07, 1 - 1e-07)\n', (31720, 31750), False, 'import torch\n'), ((1195, 1228), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', 'self.pool_stride'], {}), '(x, self.pool_stride)\n', (1207, 1228), True, 'import torch.nn.functional as F\n'), ((2617, 2665), 'torch.nn.Linear', 'nn.Linear', (['self.embedding_size', 'self.output_size'], {}), '(self.embedding_size, self.output_size)\n', (2626, 2665), True, 'import torch.nn as nn\n'), ((3246, 3289), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc_att.weight'], {}), '(self.fc_att.weight)\n', (3269, 3289), True, 'import torch.nn as nn\n'), ((3291, 3329), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc_att.bias', '(0)'], {}), '(self.fc_att.bias, 0)\n', (3308, 3329), True, 'import torch.nn as nn\n'), ((3393, 3419), 'models.DCASE_baseline.AutoPool', 'AutoPool', (['self.output_size'], {}), '(self.output_size)\n', (3401, 3419), False, 'from models.DCASE_baseline import AutoPool\n'), ((4211, 4263), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (4220, 4263), True, 'import torch.nn.functional as F\n'), ((4451, 4503), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (4460, 4503), True, 'import torch.nn.functional as F\n'), ((11328, 11374), 'torch.nn.Linear', 'nn.Linear', (['self.in_features', 'self.out_features'], {}), '(self.in_features, self.out_features)\n', (11337, 11374), True, 'import torch.nn as nn\n'), ((11387, 11430), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc_att.weight'], {}), '(self.fc_att.weight)\n', (11410, 11430), True, 'import torch.nn as nn\n'), ((11432, 11470), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc_att.bias', '(0)'], {}), '(self.fc_att.bias, 0)\n', (11449, 11470), True, 'import torch.nn as nn\n'), ((13468, 13527), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_channels': 'self.out_channels', 'num_groups': '(32)'}), '(num_channels=self.out_channels, num_groups=32)\n', (13480, 13527), True, 'import torch.nn as nn\n'), ((13799, 13808), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13806, 13808), True, 'import torch.nn as nn\n'), ((14274, 14304), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['self.pool_stride'], {}), '(self.pool_stride)\n', (14286, 14304), True, 'import torch.nn as nn\n'), ((19335, 19387), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (19344, 19387), True, 'import torch.nn.functional as F\n'), ((19575, 19627), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (19584, 19627), True, 'import torch.nn.functional as F\n'), ((24643, 24695), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (24652, 24695), True, 'import torch.nn.functional as F\n'), ((24883, 24935), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (24892, 24935), True, 'import torch.nn.functional as F\n'), ((28134, 28223), 'torch.nn.Linear', 'nn.Linear', (['(self.embedding_size * 2 + self.meta_emb * self.num_meta)', 'self.output_size'], {}), '(self.embedding_size * 2 + self.meta_emb * self.num_meta, self.\n output_size)\n', (28143, 28223), True, 'import torch.nn as nn\n'), ((28807, 28850), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc_att.weight'], {}), '(self.fc_att.weight)\n', (28830, 28850), True, 'import torch.nn as nn\n'), ((28852, 28890), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc_att.bias', '(0)'], {}), '(self.fc_att.bias, 0)\n', (28869, 28890), True, 'import torch.nn as nn\n'), ((28954, 28980), 'models.DCASE_baseline.AutoPool', 'AutoPool', (['self.output_size'], {}), '(self.output_size)\n', (28962, 28980), False, 'from models.DCASE_baseline import AutoPool\n'), ((29963, 30018), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout_AS', 'training': 'self.training'}), '(x, p=self.dropout_AS, training=self.training)\n', (29972, 30018), True, 'import torch.nn.functional as F\n'), ((30752, 30807), 'torch.nn.functional.dropout', 'F.dropout', (['x_v2'], {'p': 'self.dropout', 'training': 'self.training'}), '(x_v2, p=self.dropout, training=self.training)\n', (30761, 30807), True, 'import torch.nn.functional as F\n'), ((30903, 30958), 'torch.nn.functional.dropout', 'F.dropout', (['meta'], {'p': 'self.dropout', 'training': 'self.training'}), '(meta, p=self.dropout, training=self.training)\n', (30912, 30958), True, 'import torch.nn.functional as F\n'), ((31469, 31521), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (31478, 31521), True, 'import torch.nn.functional as F\n'), ((3673, 3725), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (3682, 3725), True, 'import torch.nn.functional as F\n'), ((6034, 6049), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6047, 6049), False, 'import torch\n'), ((7517, 7547), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_model + d_k))'], {}), '(2.0 / (d_model + d_k))\n', (7524, 7547), True, 'import numpy as np\n'), ((7603, 7633), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_model + d_k))'], {}), '(2.0 / (d_model + d_k))\n', (7610, 7633), True, 'import numpy as np\n'), ((7689, 7719), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_model + d_v))'], {}), '(2.0 / (d_model + d_v))\n', (7696, 7719), True, 'import numpy as np\n'), ((7896, 7914), 'numpy.power', 'np.power', (['d_k', '(0.5)'], {}), '(d_k, 0.5)\n', (7904, 7914), True, 'import numpy as np\n'), ((9994, 10023), 'torch.norm', 'torch.norm', (['self.weight', '(2)', '(0)'], {}), '(self.weight, 2, 0)\n', (10004, 10023), False, 'import torch\n'), ((11536, 11563), 'models.DCASE_baseline.AutoPool', 'AutoPool', (['self.out_features'], {}), '(self.out_features)\n', (11544, 11563), False, 'from models.DCASE_baseline import AutoPool\n'), ((13613, 13642), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.n_output'], {}), '(self.n_output)\n', (13627, 13642), True, 'import torch.nn as nn\n'), ((13874, 13884), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (13882, 13884), True, 'import torch.nn as nn\n'), ((14364, 14394), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['self.pool_stride'], {}), '(self.pool_stride)\n', (14376, 14394), True, 'import torch.nn as nn\n'), ((18797, 18849), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (18806, 18849), True, 'import torch.nn.functional as F\n'), ((24105, 24157), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (24114, 24157), True, 'import torch.nn.functional as F\n'), ((29419, 29474), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout_AS', 'training': 'self.training'}), '(x, p=self.dropout_AS, training=self.training)\n', (29428, 29474), True, 'import torch.nn.functional as F\n'), ((30190, 30245), 'torch.nn.functional.dropout', 'F.dropout', (['x_v2'], {'p': 'self.dropout', 'training': 'self.training'}), '(x_v2, p=self.dropout, training=self.training)\n', (30199, 30245), True, 'import torch.nn.functional as F\n'), ((13956, 13970), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (13968, 13970), True, 'import torch.nn as nn\n'), ((10051, 10071), 'torch.norm', 'torch.norm', (['x', '(2)', '(-1)'], {}), '(x, 2, -1)\n', (10061, 10071), False, 'import torch\n'), ((14036, 14042), 'activation.mish.Mish', 'Mish', ([], {}), '()\n', (14040, 14042), False, 'from activation.mish import Mish\n'), ((6084, 6121), 'torch.from_numpy', 'torch.from_numpy', (['x[i:i + batch_size]'], {}), '(x[i:i + batch_size])\n', (6100, 6121), False, 'import torch\n')] |
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises)
from statsmodels.base.transform import (BoxCox)
from statsmodels.datasets import macrodata
class TestTransform:
@classmethod
def setup_class(cls):
data = macrodata.load_pandas()
cls.x = data.data['realgdp'].values
cls.bc = BoxCox()
def test_nonpositive(self):
# Testing negative values
y = [1, -1, 1]
assert_raises(ValueError, self.bc.transform_boxcox, y)
# Testing nonzero
y = [1, 0, 1]
assert_raises(ValueError, self.bc.transform_boxcox, y)
def test_invalid_bounds(self):
# more than two bounds
assert_raises(ValueError, self.bc._est_lambda, self.x, (-3, 2, 3))
# upper bound <= lower bound
assert_raises(ValueError, self.bc._est_lambda, self.x, (2, -1))
def test_unclear_methods(self):
# Both _est_lambda and untransform have a method argument that should
# be tested.
assert_raises(ValueError, self.bc._est_lambda,
self.x, (-1, 2), 'test')
assert_raises(ValueError, self.bc.untransform_boxcox,
self.x, 1, 'test')
def test_unclear_scale_parameter(self):
# bc.guerrero allows for 'mad' and 'sd', for the MAD and Standard
# Deviation, respectively
assert_raises(ValueError, self.bc._est_lambda,
self.x, scale='test')
# Next, check if mad/sd work:
self.bc._est_lambda(self.x, scale='mad')
self.bc._est_lambda(self.x, scale='MAD')
self.bc._est_lambda(self.x, scale='sd')
self.bc._est_lambda(self.x, scale='SD')
def test_valid_guerrero(self):
# `l <- BoxCox.lambda(x, method="guerrero")` on a ts object
# with frequency 4 (BoxCox.lambda defaults to 2, but we use
# Guerrero and Perera (2004) as a guideline)
lmbda = self.bc._est_lambda(self.x, method='guerrero', window_length=4)
assert_almost_equal(lmbda, 0.507624, 4)
# `l <- BoxCox.lambda(x, method="guerrero")` with the default grouping
# parameter (namely, window_length=2).
lmbda = self.bc._est_lambda(self.x, method='guerrero', window_length=2)
assert_almost_equal(lmbda, 0.513893, 4)
def test_guerrero_robust_scale(self):
# The lambda is derived from a manual check of the values for the MAD.
# Compare also the result for the standard deviation on R=4: 0.5076,
# i.e. almost the same value.
lmbda = self.bc._est_lambda(self.x, scale='mad')
assert_almost_equal(lmbda, 0.488621, 4)
def test_loglik_lambda_estimation(self):
# 0.2 is the value returned by `BoxCox.lambda(x, method="loglik")`
lmbda = self.bc._est_lambda(self.x, method='loglik')
assert_almost_equal(lmbda, 0.2, 1)
def test_boxcox_transformation_methods(self):
# testing estimated lambda vs. provided. Should result in almost
# the same transformed data. Value taken from R.
y_transformed_no_lambda = self.bc.transform_boxcox(self.x)
y_transformed_lambda = self.bc.transform_boxcox(self.x, 0.507624)
assert_almost_equal(y_transformed_no_lambda[0],
y_transformed_lambda[0], 3)
# a perfectly increasing set has a constant variance over the entire
# series, hence stabilising should result in the same scale: lmbda = 1.
y, lmbda = self.bc.transform_boxcox(np.arange(1, 100))
assert_almost_equal(lmbda, 1., 5)
def test_zero_lambda(self):
# zero lambda should be a log transform.
y_transform_zero_lambda, lmbda = self.bc.transform_boxcox(self.x, 0.)
assert_equal(lmbda, 0.)
assert_almost_equal(y_transform_zero_lambda, np.log(self.x), 5)
def test_naive_back_transformation(self):
# test both transformations functions -> 0. and .5
y_zero_lambda = self.bc.transform_boxcox(self.x, 0.)
y_half_lambda = self.bc.transform_boxcox(self.x, .5)
y_zero_lambda_un = self.bc.untransform_boxcox(*y_zero_lambda,
method='naive')
y_half_lambda_un = self.bc.untransform_boxcox(*y_half_lambda,
method='naive')
assert_almost_equal(self.x, y_zero_lambda_un, 5)
assert_almost_equal(self.x, y_half_lambda_un, 5)
| [
"numpy.log",
"numpy.testing.assert_raises",
"numpy.testing.assert_almost_equal",
"statsmodels.base.transform.BoxCox",
"numpy.arange",
"numpy.testing.assert_equal",
"statsmodels.datasets.macrodata.load_pandas"
] | [((269, 292), 'statsmodels.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ([], {}), '()\n', (290, 292), False, 'from statsmodels.datasets import macrodata\n'), ((354, 362), 'statsmodels.base.transform.BoxCox', 'BoxCox', ([], {}), '()\n', (360, 362), False, 'from statsmodels.base.transform import BoxCox\n'), ((461, 515), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'self.bc.transform_boxcox', 'y'], {}), '(ValueError, self.bc.transform_boxcox, y)\n', (474, 515), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((573, 627), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'self.bc.transform_boxcox', 'y'], {}), '(ValueError, self.bc.transform_boxcox, y)\n', (586, 627), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((703, 769), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'self.bc._est_lambda', 'self.x', '(-3, 2, 3)'], {}), '(ValueError, self.bc._est_lambda, self.x, (-3, 2, 3))\n', (716, 769), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((816, 879), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'self.bc._est_lambda', 'self.x', '(2, -1)'], {}), '(ValueError, self.bc._est_lambda, self.x, (2, -1))\n', (829, 879), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((1024, 1095), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'self.bc._est_lambda', 'self.x', '(-1, 2)', '"""test"""'], {}), "(ValueError, self.bc._est_lambda, self.x, (-1, 2), 'test')\n", (1037, 1095), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((1126, 1198), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'self.bc.untransform_boxcox', 'self.x', '(1)', '"""test"""'], {}), "(ValueError, self.bc.untransform_boxcox, self.x, 1, 'test')\n", (1139, 1198), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((1382, 1450), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'self.bc._est_lambda', 'self.x'], {'scale': '"""test"""'}), "(ValueError, self.bc._est_lambda, self.x, scale='test')\n", (1395, 1450), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((2020, 2059), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lmbda', '(0.507624)', '(4)'], {}), '(lmbda, 0.507624, 4)\n', (2039, 2059), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((2275, 2314), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lmbda', '(0.513893)', '(4)'], {}), '(lmbda, 0.513893, 4)\n', (2294, 2314), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((2617, 2656), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lmbda', '(0.488621)', '(4)'], {}), '(lmbda, 0.488621, 4)\n', (2636, 2656), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((2847, 2881), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lmbda', '(0.2)', '(1)'], {}), '(lmbda, 0.2, 1)\n', (2866, 2881), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((3213, 3288), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['y_transformed_no_lambda[0]', 'y_transformed_lambda[0]', '(3)'], {}), '(y_transformed_no_lambda[0], y_transformed_lambda[0], 3)\n', (3232, 3288), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((3546, 3580), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lmbda', '(1.0)', '(5)'], {}), '(lmbda, 1.0, 5)\n', (3565, 3580), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((3749, 3773), 'numpy.testing.assert_equal', 'assert_equal', (['lmbda', '(0.0)'], {}), '(lmbda, 0.0)\n', (3761, 3773), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((4363, 4411), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.x', 'y_zero_lambda_un', '(5)'], {}), '(self.x, y_zero_lambda_un, 5)\n', (4382, 4411), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((4420, 4468), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.x', 'y_half_lambda_un', '(5)'], {}), '(self.x, y_half_lambda_un, 5)\n', (4439, 4468), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_raises\n'), ((3519, 3536), 'numpy.arange', 'np.arange', (['(1)', '(100)'], {}), '(1, 100)\n', (3528, 3536), True, 'import numpy as np\n'), ((3826, 3840), 'numpy.log', 'np.log', (['self.x'], {}), '(self.x)\n', (3832, 3840), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import scipy.linalg as la
import numpy as np
from pyyeti import expmint
from ._base_ode_class import _BaseODE
# FIXME: We need the str/repr formatting used in Numpy < 1.14.
try:
np.set_printoptions(legacy="1.13")
except TypeError:
pass
class SolveExp2(_BaseODE):
r"""
2nd order ODE time domain solver based on the matrix exponential.
This class is for solving matrix equations of motion:
.. math::
M \ddot{q} + B \dot{q} + K q = F
The 2nd order ODE set of equations are transformed into the
1st order ODE:
.. math::
\left\{
\begin{array}{c} \ddot{q} \\ \dot{q} \end{array}
\right\} - \left[
\begin{array}{cc} -M^{-1} B & -M^{-1} K \\ I & 0 \end{array}
\right] \left\{
\begin{array}{c} \dot{q} \\ q \end{array}
\right\} = \left\{
\begin{array}{c} M^{-1} F \\ 0 \end{array} \right\}
or:
.. math::
\dot{y} - A y = w
The general solution is:
.. math::
y = e^{A t} \left (
y(0) + \int_0^t {e^{-A \tau} w d\tau}
\right )
By only requiring the solution at every time step and assuming a
constant step size of `h`:
.. math::
y_{n+1} = e^{A h} \left (
y_{n} + \int_0^h {e^{-A \tau} w(t_n+\tau) d\tau}
\right )
By assuming :math:`w(t_n+\tau)` is piece-wise linear (if `order`
is 1) or piece-wise constant (if `order` is 0) for each step, an
exact, closed form solution can be calculated. The function
:func:`pyyeti.expmint.getEPQ` computes the matrix exponential
:math:`E = e^{A h}`, and solves the integral(s) needed to
compute `P` and `Q` so that a solution can be computed by:
.. math::
y_{n+1} = E y_{n} + P w_{n} + Q w_{n+1}
Unlike for the uncoupled solver :class:`SolveUnc`, this solver
doesn't need or use the `rb` input unless static initial
conditions are requested when solving equations.
Note that :class:`SolveUnc` is also an exact solver assuming
piece-wise linear or piece-wise constant forces. :class:`SolveUnc`
is often faster than :class:`SolveExp2` since it uncouples the
equations and therefore doesn't need to work with matrices in the
inner loop. However, it is recommended to experiment with both
solvers for any particular application.
.. note::
The above equations are for the non-residual-flexibility
modes. The 'rf' modes are solved statically and any initial
conditions are ignored for them.
For a static solution:
- rigid-body displacements = zeros
- elastic displacments = inv(k[elastic]) * F[elastic]
- velocity = zeros
- rigid-body accelerations = inv(m[rigid]) * F[rigid]
- elastic accelerations = zeros
See also
--------
:class:`SolveUnc`.
Examples
--------
.. plot::
:context: close-figs
>>> from pyyeti import ode
>>> import numpy as np
>>> m = np.array([10., 30., 30., 30.]) # diag mass
>>> k = np.array([0., 6.e5, 6.e5, 6.e5]) # diag stiffness
>>> zeta = np.array([0., .05, 1., 2.]) # percent damp
>>> b = 2.*zeta*np.sqrt(k/m)*m # diag damping
>>> h = 0.001 # time step
>>> t = np.arange(0, .3001, h) # time vector
>>> c = 2*np.pi
>>> f = np.vstack((3*(1-np.cos(c*2*t)),
... 4.5*(np.cos(np.sqrt(k[1]/m[1])*t)),
... 4.5*(np.cos(np.sqrt(k[2]/m[2])*t)),
... 4.5*(np.cos(np.sqrt(k[3]/m[3])*t))))
>>> f *= 1.e4
>>> ts = ode.SolveExp2(m, b, k, h)
>>> sol = ts.tsolve(f, static_ic=1)
Solve with scipy.signal.lsim for comparison:
>>> A = ode.make_A(m, b, k)
>>> n = len(m)
>>> Z = np.zeros((n, n), float)
>>> B = np.vstack((np.eye(n), Z))
>>> C = np.vstack((A, np.hstack((Z, np.eye(n)))))
>>> D = np.vstack((B, Z))
>>> ic = np.hstack((sol.v[:, 0], sol.d[:, 0]))
>>> import scipy.signal
>>> f2 = (1./m).reshape(-1, 1) * f
>>> tl, yl, xl = scipy.signal.lsim((A, B, C, D), f2.T, t,
... X0=ic)
>>> print('acce cmp:', np.allclose(yl[:, :n], sol.a.T))
acce cmp: True
>>> print('velo cmp:', np.allclose(yl[:, n:2*n], sol.v.T))
velo cmp: True
>>> print('disp cmp:', np.allclose(yl[:, 2*n:], sol.d.T))
disp cmp: True
Plot the four accelerations:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure('Example', figsize=[8, 8])
>>> fig.clf()
>>> labels = ['Rigid-body', 'Underdamped',
... 'Critically Damped', 'Overdamped']
>>> for j, name in zip(range(4), labels):
... _ = plt.subplot(4, 1, j+1)
... _ = plt.plot(t, sol.a[j], label='SolveExp2')
... _ = plt.plot(tl, yl[:, j], label='scipy lsim')
... _ = plt.title(name)
... _ = plt.ylabel('Acceleration')
... _ = plt.xlabel('Time (s)')
... if j == 0:
... _ = plt.legend(loc='best')
>>> fig.tight_layout()
"""
def __init__(self, m, b, k, h, rb=None, rf=None, order=1, pre_eig=False):
"""
Instantiates a :class:`SolveExp2` solver.
Parameters
----------
m : 1d or 2d ndarray or None
Mass; vector (of diagonal), or full; if None, mass is
assumed identity
b : 1d or 2d ndarray
Damping; vector (of diagonal), or full
k : 1d or 2d ndarray
Stiffness; vector (of diagonal), or full
h : scalar or None
Time step; can be None if only want to solve a static
problem
rb : 1d array or None; optional
Index or bool partition vector for rigid-body modes. Set
to [] to specify no rigid-body modes. If None, the
rigid-body modes will be automatically detected by this
logic for uncoupled systems::
rb = np.nonzero(abs(k).max(0) < 0.005)[0]
And by this logic for coupled systems::
rb = ((abs(k).max(axis=0) < 0.005) &
(abs(k).max(axis=1) < 0.005) &
(abs(b).max(axis=0) < 0.005) &
(abs(b).max(axis=1) < 0.005)).nonzero()[0]
.. note::
`rb` applies only to modal-space equations. Use
`pre_eig` if necessary to convert to modal-space. This
means that if `rb` is an index vector, it specifies
the rigid-body modes *after* the `pre_eig`
operation. See also `pre_eig`.
.. note::
Unlike for the :class:`SolveUnc` solver, `rb` for this
solver is only used if using static initial conditions
in :func:`SolveExp2.tsolve`.
rf : 1d array or None; optional
Index or bool partition vector for residual-flexibility
modes; these will be solved statically. As for the `rb`
option, the `rf` option only applies to modal space
equations (possibly after the `pre_eig` operation).
order : integer; optional
Specify which solver to use:
- 0 for the zero order hold (force stays constant across
time step)
- 1 for the 1st order hold (force can vary linearly across
time step)
pre_eig : bool; optional
If True, an eigensolution will be computed with the mass
and stiffness matrices to convert the system to modal
space. This will allow the automatic detection of
rigid-body modes which is necessary for specifying
"static" initial conditions when calling the solver. No
modes are truncated. Only works if stiffness is
symmetric/hermitian and mass is positive definite (see
:func:`scipy.linalg.eigh`). Just leave it as False if the
equations are already in modal space or if not using
"static" initial conditions.
Notes
-----
The instance is populated with the following members:
========= ===================================================
Member Description
========= ===================================================
m mass for the non-rf modes
b damping for the non-rf modes
k stiffness for the non-rf modes
h time step
rb index vector or slice for the rb modes
el index vector or slice for the el modes
rf index vector or slice for the rf modes
_rb index vector or slice for the rb modes relative to
the non-rf modes
_el index vector or slice for the el modes relative to
the non-rf modes
nonrf index vector or slice for the non-rf modes
kdof index vector or slice for the non-rf modes
n number of total DOF
rbsize number of rb modes
elsize number of el modes
rfsize number of rf modes
nonrfsz number of non-rf modes
ksize number of non-rf modes
invm decomposition of m for the non-rf, non-rb modes
krf stiffness for the rf modes
ikrf inverse of stiffness for the rf modes
unc True if there are no off-diagonal terms in any
matrix; False otherwise
order order of solver (0 or 1; see above)
E_vv partition of "E" which is output of
:func:`pyyeti.expmint.getEPQ`
E_vd another partition of "E"
E_dv another partition of "E"
E_dd another partition of "E"
P, Q output of :func:`pyyeti.expmint.getEPQ`; they are
matrices used to solve the ODE
pc True if E*, P, and Q member have been calculated;
False otherwise
pre_eig True if the "pre" eigensolution was done; False
otherwise
phi the mode shape matrix from the "pre"
eigensolution; only present if `pre_eig` is True
========= ===================================================
The E, P, and Q entries are used to solve the ODE::
for j in range(1, nt):
d[:, j] = E*d[:, j-1] + P*F[:, j-1] + Q*F[:, j]
"""
self._common_precalcs(m, b, k, h, rb, rf, pre_eig)
self._inv_m()
self.order = order
ksize = self.ksize
if h and ksize > 0:
A = self._build_A()
E, P, Q = expmint.getEPQ(A, h, order, half=True)
self.P = P
self.Q = Q
# In state-space, the solution is:
# y[n+1] = E @ y[n] + pqf[n, n+1]
# Put in terms of `d` and `v`:
# y = [v; d]
# [v[n+1]; d[n+1]] = [E_v, E_d] @ [v[n]; d[n]] +
# pqf[n, n+1]
# v[n+1] = [E_vv, E_vd] @ [v[n]; d[n]] +
# pqf_v[n, n+1]
# = E_vv @ v[n] + E_vd @ d[n] + pqf_v[n, n+1]
# d[n+1] = [E_dv, E_dd] @ [v[n]; d[n]] +
# pqf_v[n, n+1]
# = E_dv @ v[n] + E_dd @ d[n] + pqf_d[n, n+1]
# copy for faster multiplies:
self.E_vv = E[:ksize, :ksize].copy()
self.E_vd = E[:ksize, ksize:].copy()
self.E_dv = E[ksize:, :ksize].copy()
self.E_dd = E[ksize:, ksize:].copy()
self.pc = True
else:
self.pc = False
self._mk_slices() # dorbel=False)
def tsolve(self, force, d0=None, v0=None, static_ic=False):
"""
Solve time-domain 2nd order ODE equations
Parameters
----------
force : 2d ndarray
The force matrix; ndof x time
d0 : 1d ndarray; optional
Displacement initial conditions; if None, zero ic's are
used unless `static_ic` is True.
v0 : 1d ndarray; optional
Velocity initial conditions; if None, zero ic's are used.
static_ic : bool; optional
If True and `d0` is None, then `d0` is calculated such
that static (steady-state) initial conditions are used. Be
sure to use the "pre_eig" option to put equations in modal
space if necessary: for static initial conditions, the
rigid-body part is initialized to 0.0 and the elastic part
is computed such that the system is in static equilibrium
(from the elastic part of ``K x = F``).
.. note::
`static_ic` is quietly ignored if `d0` is not None.
Returns
-------
A record (SimpleNamespace class) with the members:
d : 2d ndarray
Displacement; ndof x time
v : 2d ndarray
Velocity; ndof x time
a : 2d ndarray
Acceleration; ndof x time
h : scalar
Time-step
t : 1d ndarray
Time vector: np.arange(d.shape[1])*h
"""
force = np.atleast_2d(force)
d, v, a, force = self._init_dva(force, d0, v0, static_ic)
ksize = self.ksize
if ksize > 0:
nt = force.shape[1]
if nt > 1:
kdof = self.kdof
D = d[kdof]
V = v[kdof]
if self.m is not None:
if self.unc:
imf = self.invm * force[kdof]
else:
imf = la.lu_solve(self.invm, force[kdof], check_finite=False)
else:
imf = force[kdof]
if self.order == 1:
PQF = self.P @ imf[:, :-1] + self.Q @ imf[:, 1:]
else:
PQF = self.P @ imf[:, :-1]
E_dd = self.E_dd
E_dv = self.E_dv
E_vd = self.E_vd
E_vv = self.E_vv
for i in range(nt - 1):
d0 = D[:, i]
v0 = V[:, i]
D[:, i + 1] = E_dd @ d0 + E_dv @ v0 + PQF[ksize:, i]
V[:, i + 1] = E_vd @ d0 + E_vv @ v0 + PQF[:ksize, i]
if not self.slices:
d[kdof] = D
v[kdof] = V
self._calc_acce_kdof(d, v, a, force)
return self._solution(d, v, a)
def generator(self, nt, F0, d0=None, v0=None, static_ic=False):
"""
Python "generator" version of :func:`SolveExp2.tsolve`;
interactively solve (or re-solve) one step at a time.
Parameters
----------
nt : integer
Number of time steps
F0 : 1d ndarray
Initial force vector
d0 : 1d ndarray or None; optional
Displacement initial conditions; if None, zero ic's are
used.
v0 : 1d ndarray or None; optional
Velocity initial conditions; if None, zero ic's are used.
static_ic : bool; optional
If True and `d0` is None, then `d0` is calculated such
that static (steady-state) initial conditions are
used. Uses the pseudo-inverse in case there are rigid-body
modes. `static_ic` is ignored if `d0` is not None.
Returns
-------
gen : generator function
Generator function for solving equations one step at a
time
d, v : 2d ndarrays
The displacement and velocity arrays. Only the first
column of `d` and `v` are set; other values are all zero.
Notes
-----
To use the generator:
1. Instantiate a :class:`SolveExp2` instance::
ts = SolveExp2(m, b, k, h)
2. Retrieve a generator and the arrays for holding the
solution::
gen, d, v = ts.generator(len(time), f0)
3. Use :func:`gen.send` to send a tuple of the next index
and corresponding force vector in a loop. Re-do
time-steps as necessary (note that index zero cannot be
redone)::
for i in range(1, len(time)):
# Do whatever to get i'th force
# - note: d[:, :i] and v[:, :i] are available
gen.send((i, fi))
There is a second usage of :func:`gen.send`: if the
index is negative, the force is treated as an addon to
forces already included for the i'th step. This is for
efficiency and only does the necessary calculations.
This feature was originally written for running
Henkel-Mar simulations, where interface forces are
computed after computing the solution with all the
other forces applied. There may be other similar
situations. To demonstrate this usage::
for i in range(1, len(time)):
# Do whatever to get i'th force
# - note: d[:, :i] and v[:, :i] are available
gen.send((i, fi))
# Do more calculations to compute an addon
# force. Then, update the i'th solution:
gen.send((-1, fi_addon))
The class instance will have the attributes `_d`, `_v`
(same objects as `d` and `v`), `_a`, and `_force`. `d`,
`v` and `ts._force` are updated on every
:func:`gen.send`. (`ts._a` is not used until step 4.)
4. Call :func:`ts.finalize` to get final solution
in standard form::
sol = ts.finalize()
The internal references `_d`, `_v`, `_a`, and `_force`
are deleted.
The generator solver currently has these limitations:
1. Unlike the normal solver, equations cannot be
interspersed. That is, each type of equation
(rigid-body, elastic, residual-flexibility) must be
contained in a contiguous group (so that `self.slices`
is True).
2. Unlike the normal solver, the `pre_eig` option is not
available.
3. The first time step cannot be redone.
Examples
--------
Set up some equations and solve both the normal way and via
the generator:
>>> from pyyeti import ode
>>> import numpy as np
>>> m = np.array([10., 30., 30., 30.]) # diag mass
>>> k = np.array([0., 6.e5, 6.e5, 6.e5]) # diag stiffness
>>> zeta = np.array([0., .05, 1., 2.]) # % damping
>>> b = 2.*zeta*np.sqrt(k/m)*m # diag damping
>>> h = 0.001 # time step
>>> t = np.arange(0, .3001, h) # time vector
>>> c = 2*np.pi
>>> f = np.vstack((3*(1-np.cos(c*2*t)), # ffn
... 4.5*(np.cos(np.sqrt(k[1]/m[1])*t)),
... 4.5*(np.cos(np.sqrt(k[2]/m[2])*t)),
... 4.5*(np.cos(np.sqrt(k[3]/m[3])*t))))
>>> f *= 1.e4
>>> ts = ode.SolveExp2(m, b, k, h)
Solve the normal way:
>>> sol = ts.tsolve(f, static_ic=1)
Solve via the generator:
>>> nt = f.shape[1]
>>> gen, d, v = ts.generator(nt, f[:, 0], static_ic=1)
>>> for i in range(1, nt):
... # Could do stuff here using d[:, :i] & v[:, :i] to
... # get next force
... gen.send((i, f[:, i]))
>>> sol2 = ts.finalize()
Confirm the solutions are the same:
>>> np.allclose(sol2.a, sol.a)
True
>>> np.allclose(sol2.v, sol.v)
True
>>> np.allclose(sol2.d, sol.d)
True
"""
if not self.slices:
raise NotImplementedError(
"generator not yet implemented for the case"
" when different types of equations are interspersed"
" (eg, a residual-flexibility DOF in the middle of"
" the elastic DOFs)"
)
d, v, a, force = self._init_dva_part(nt, F0, d0, v0, static_ic)
self._d, self._v, self._a, self._force = d, v, a, force
generator = self._solve_se2_generator(d, v, F0)
next(generator)
return generator, d, v
def _solve_se2_generator(self, d, v, F0):
"""Generator solver for :class:`SolveExp2`"""
nt = d.shape[1]
if nt == 1:
yield
Force = self._force
unc = self.unc
rfsize = self.rfsize
if self.rfsize:
rf = self.rf
ikrf = self.ikrf
if unc:
ikrf = ikrf.ravel()
else:
ikrf = la.lu_solve(ikrf, np.eye(rfsize), check_finite=False)
drf = d[rf]
ksize = self.ksize
if not ksize:
# only rf modes
if unc:
while True:
j, F1 = yield
if j < 0:
# add to previous soln
Force[:, i] += F1
d[:, i] += ikrf * F1[rf]
else:
i = j
Force[:, i] = F1
d[:, i] = ikrf * F1[rf]
else:
while True:
j, F1 = yield
if j < 0:
# add to previous soln
Force[:, i] += F1
d[:, i] += ikrf @ F1[rf]
else:
i = j
Force[:, i] = F1
d[:, i] = ikrf @ F1[rf]
# there are rb/el modes if here
kdof = self.kdof
P = self.P
Q = self.Q
order = self.order
if self.m is not None:
if unc:
invm = self.invm.ravel()
P = P * invm
if order == 1:
Q = Q * invm
else:
# P @ invm = (invm.T @ P.T).T
P = la.lu_solve(self.invm, P.T, trans=1, check_finite=False).T
if order == 1:
Q = la.lu_solve(self.invm, Q.T, trans=1, check_finite=False).T
E_dd = self.E_dd
E_dv = self.E_dv
E_vd = self.E_vd
E_vv = self.E_vv
if rfsize:
# both rf and non-rf modes present
D = d[kdof]
V = v[kdof]
drf = d[rf]
while True:
j, F1 = yield
if j < 0:
# add new force to previous solution
Force[:, i] += F1
if self.order == 1:
PQF = Q @ F1[kdof]
D[:, i] += PQF[ksize:]
V[:, i] += PQF[:ksize]
if unc:
drf[:, i] += ikrf * F1[rf]
else:
drf[:, i] += ikrf @ F1[rf]
else:
i = j
Force[:, i] = F1
F0 = Force[:, i - 1]
if self.order == 1:
PQF = P @ F0[kdof] + Q @ F1[kdof]
else:
PQF = P @ F0[kdof]
d0 = D[:, i - 1]
v0 = V[:, i - 1]
D[:, i] = E_dd @ d0 + E_dv @ v0 + PQF[ksize:]
V[:, i] = E_vd @ d0 + E_vv @ v0 + PQF[:ksize]
if unc:
drf[:, i] = ikrf * F1[rf]
else:
drf[:, i] = ikrf @ F1[rf]
else:
# only non-rf modes present
while True:
j, F1 = yield
if j < 0:
# add new force to previous solution
Force[:, i] += F1
if self.order == 1:
PQF = Q @ F1
d[:, i] += PQF[ksize:]
v[:, i] += PQF[:ksize]
else:
i = j
Force[:, i] = F1
F0 = Force[:, i - 1]
if self.order == 1:
PQF = P @ F0 + Q @ F1
else:
PQF = P @ F0
d0 = d[:, i - 1]
v0 = v[:, i - 1]
d[:, i] = E_dd @ d0 + E_dv @ v0 + PQF[ksize:]
v[:, i] = E_vd @ d0 + E_vv @ v0 + PQF[:ksize]
def get_f2x(self, phi, velo=False):
"""
Get force-to-displacement or force-to-velocity transform
Parameters
----------
phi : 2d ndarray
Transform from ODE coordinates to physical DOF
velo : bool; optional
If True, get force to velocity transform instead
Returns
-------
flex : 2d ndarray
Force to displacement (or velocity) transform
Notes
-----
This routine was written to support Henkel-Mar simulations;
see [#hm]_. The equations of motion for two separate bodies
are solved simultaneously while enforcing joint
compatibility. This is handy for allowing the two bodies to
separate from each other. The `flex` matrix is part of the
matrix in the upper right quadrant of equation 27 in ref
[#hm]_; the remaining part comes from the other body.
The interface DOF are those DOF that interface with the other
body. The force is the interface force and the displacement
(or velocity) is of the interface DOF.
The reference does not discuss enforcing joint velocity
compatibility. This routine however lets you choose between
the two since the velocity method is fundamentally more stable
than the displacement method.
Let (see also :func:`__init__`)::
phik = phi[:, kdof]
phirf = phi[:, rf]
If `velo` is False::
flex = phik @ Q[n:] @ phik.T + phirf @ ikrf @ phirf
If `velo` is True::
flex = phik @ Q[:n] @ phik.T
.. note::
A zeros matrix is returned if `order` is 0.
Raises
------
NotImplementedError
When `systype` is not float.
References
----------
.. [#hm] <NAME>, and <NAME> "Improved Method for
Calculating Booster to Launch Pad Interface Transient
Forces", Journal of Spacecraft and Rockets, Dated Nov-Dec,
1988, pp 433-438
"""
if self.systype is not float:
raise NotImplementedError(
":func:`get_f2x` can only handle real equations of motion"
)
flex = 0.0
unc = self.unc
if self.order == 1:
if self.ksize:
# non-rf equations:
Q = self.Q
kdof = self.kdof
phik = phi[:, kdof]
if self.m is not None:
if unc:
invm = self.invm.ravel()
Q = Q * invm
else:
Q = la.lu_solve(self.invm, Q.T, trans=1, check_finite=False).T
n = self.nonrfsz
if velo:
flex = phik @ Q[:n] @ phik.T
else:
flex = phik @ Q[n:] @ phik.T
flex = self._add_rf_flex(flex, phi, velo, unc)
return self._flex(flex, phi)
| [
"numpy.set_printoptions",
"numpy.eye",
"scipy.linalg.lu_solve",
"pyyeti.expmint.getEPQ",
"numpy.atleast_2d"
] | [((208, 242), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (227, 242), True, 'import numpy as np\n'), ((13456, 13476), 'numpy.atleast_2d', 'np.atleast_2d', (['force'], {}), '(force)\n', (13469, 13476), True, 'import numpy as np\n'), ((10929, 10967), 'pyyeti.expmint.getEPQ', 'expmint.getEPQ', (['A', 'h', 'order'], {'half': '(True)'}), '(A, h, order, half=True)\n', (10943, 10967), False, 'from pyyeti import expmint\n'), ((21266, 21280), 'numpy.eye', 'np.eye', (['rfsize'], {}), '(rfsize)\n', (21272, 21280), True, 'import numpy as np\n'), ((22592, 22648), 'scipy.linalg.lu_solve', 'la.lu_solve', (['self.invm', 'P.T'], {'trans': '(1)', 'check_finite': '(False)'}), '(self.invm, P.T, trans=1, check_finite=False)\n', (22603, 22648), True, 'import scipy.linalg as la\n'), ((13918, 13973), 'scipy.linalg.lu_solve', 'la.lu_solve', (['self.invm', 'force[kdof]'], {'check_finite': '(False)'}), '(self.invm, force[kdof], check_finite=False)\n', (13929, 13973), True, 'import scipy.linalg as la\n'), ((22706, 22762), 'scipy.linalg.lu_solve', 'la.lu_solve', (['self.invm', 'Q.T'], {'trans': '(1)', 'check_finite': '(False)'}), '(self.invm, Q.T, trans=1, check_finite=False)\n', (22717, 22762), True, 'import scipy.linalg as la\n'), ((27731, 27787), 'scipy.linalg.lu_solve', 'la.lu_solve', (['self.invm', 'Q.T'], {'trans': '(1)', 'check_finite': '(False)'}), '(self.invm, Q.T, trans=1, check_finite=False)\n', (27742, 27787), True, 'import scipy.linalg as la\n')] |
import numpy as np
class Objeto(object):
def __init__(self):
super(Objeto, self).__init__()
self.x = 0
self.y = 0
self.velocidade = [0,0]
class Circulo(Objeto) :
def __init__(self, massa, x, y, raio, cor = (255,0,0)):
super(Circulo, self).__init__()
self.massa = massa
self.x = x
self.y = y
self.raio = raio
self.cor = cor
self.velocidade = [0,0]
self.largura = self.raio
self.comprimento = self.raio
class Retangulo(Objeto) :
def __init__(self, massa, x, y, largura, comprimento, cor = (255,0,0)):
super(Retangulo, self).__init__()
self.massa = massa
self.largura = largura
self.comprimento = comprimento
self.x = (x)
self.y = (y)
self.velocidade = [0,0]
self.cor = cor
self.pontos = np.array(\
[[self.x - self.largura/2 , self.y - self.comprimento/2],\
[self.x + self.largura/2 , self.y - self.comprimento/2],\
[self.x + self.largura/2 , self.y + self.comprimento/2],\
[self.x - self.largura/2 , self.y + self.comprimento/2]], dtype=np.float)
'''
self.pontos = np.array(\
[[self.x, self.y],\
[self.x , self.y + self.comprimento],\
[self.x + self.largura, self.y + self.comprimento/2],\
[self.x, self.y + self.comprimento]], dtype=np.float)
'''
def rotaciona(self, arg):
matrizDeRotacao = np.array([[np.cos(arg),-np.sin(arg)],[np.sin(arg),np.cos(arg)]],dtype=np.float)
xOriginal = self.x
yOriginal = self.y
self.translada(-xOriginal,-yOriginal)
self.pontos = np.dot(self.pontos,matrizDeRotacao)
self.translada(xOriginal,yOriginal)
def translada(self, x, y):
matrizDeTranslacao = np.array(\
[[x, y],\
[x, y],\
[x, y],\
[x, y]], dtype=np.float)
self.x += x
self.y += y
self.pontos = self.pontos + matrizDeTranslacao
| [
"numpy.dot",
"numpy.sin",
"numpy.array",
"numpy.cos"
] | [((884, 1164), 'numpy.array', 'np.array', (['[[self.x - self.largura / 2, self.y - self.comprimento / 2], [self.x + self\n .largura / 2, self.y - self.comprimento / 2], [self.x + self.largura / \n 2, self.y + self.comprimento / 2], [self.x - self.largura / 2, self.y +\n self.comprimento / 2]]'], {'dtype': 'np.float'}), '([[self.x - self.largura / 2, self.y - self.comprimento / 2], [self\n .x + self.largura / 2, self.y - self.comprimento / 2], [self.x + self.\n largura / 2, self.y + self.comprimento / 2], [self.x - self.largura / 2,\n self.y + self.comprimento / 2]], dtype=np.float)\n', (892, 1164), True, 'import numpy as np\n'), ((1691, 1727), 'numpy.dot', 'np.dot', (['self.pontos', 'matrizDeRotacao'], {}), '(self.pontos, matrizDeRotacao)\n', (1697, 1727), True, 'import numpy as np\n'), ((1832, 1890), 'numpy.array', 'np.array', (['[[x, y], [x, y], [x, y], [x, y]]'], {'dtype': 'np.float'}), '([[x, y], [x, y], [x, y], [x, y]], dtype=np.float)\n', (1840, 1890), True, 'import numpy as np\n'), ((1500, 1511), 'numpy.cos', 'np.cos', (['arg'], {}), '(arg)\n', (1506, 1511), True, 'import numpy as np\n'), ((1527, 1538), 'numpy.sin', 'np.sin', (['arg'], {}), '(arg)\n', (1533, 1538), True, 'import numpy as np\n'), ((1539, 1550), 'numpy.cos', 'np.cos', (['arg'], {}), '(arg)\n', (1545, 1550), True, 'import numpy as np\n'), ((1513, 1524), 'numpy.sin', 'np.sin', (['arg'], {}), '(arg)\n', (1519, 1524), True, 'import numpy as np\n')] |
import os
import json
from glob import glob
import numpy as np
import pandas as pd
MAPPINGS = {
"mappings": {
"Cruise ID": "cruise",
"Unnamed: 1": "date",
"Latitude": "latitude",
"Longitude": "longitude",
"Sampling Depth (meters)": "depth",
"Station": "cast",
"Bottle Number": "niskin",
"Sample Label": "sample_id",
"GSFC Lab sample code": "alternate_sample_id",
"Unnamed: 9": "project_id",
"Unnamed: 10": "replicate",
"Volume filtered (ml)": "vol_filtered",
"[Tot_Chl_a]": "Tot_Chl_a",
"[Tot_Chl_b]": "Tot_Chl_b",
"[Tot_Chl_c]": "Tot_Chl_c",
"[Alpha_beta_Car]": "alpha-beta-Car",
"[But fuco]": "But-fuco",
"[Hex fuco]": "Hex-fuco",
"[Allo]": "Allo",
"[Diadino]": "Diadino",
"[Diato]": "Diato",
"[Fuco]": "Fuco",
"[Perid]": "Perid",
"[Zea]": "Zea",
"[MV_Chl_a]": "MV_Chl_a",
"[DV_Chl_a]": "DV_Chl_a",
"[Chlide_a]": "Chlide_a",
"[MV_Chl _b]": "MV_Chl_b",
"[DV_Chl_b]": "DV_Chl_b",
"[Chl c1c2]": "Chl_c1c2",
"[Chl_c3]": "Chl_c3",
"[Lut]": "Lut",
"[Neo]": "Neo",
"[Viola]": "Viola",
"[Phytin_a]": "Phytin_a",
"[Phide_a]": "Phide_a",
"[Pras]": "Pras",
"[Gyro]": "Gyro",
"[TChl]": "TChl",
"[PPC]": "PPC",
"[PSC]": "PSC",
"[PSP]": "PSP",
"[TCar]": "TCar",
"[TAcc]": "TAcc",
"[TPg]": "TPg",
"[DP]": "DP",
"[TAcc]/[Tchla]": "TAcc_TChla",
"[PSC]/[TCar]": "PSC_TCar",
"[PPC]/[TCar]": "PPC_TCar",
"[TChl]/[TCar]": "TChl_TCar",
"[PPC]/[Tpg]": "PPC_TPg",
"[PSP]/[TPg]": "PSP_TPg",
"[TChl a]/[TPg]": "TChla_Tpg",
"comments": "comments",
"other": "comments2",
"other.1": "comments3",
"Indicate if filters are replicates": "R",
"date": "date"
},
"columns": [
"cruise",
"date",
"latitude",
"longitude",
"depth",
"cast",
"niskin",
"sample_id",
"alternate_sample_id",
"project_id",
"replicate",
"vol_filtered",
"Tot_Chl_a",
"Tot_Chl_b",
"Tot_Chl_c",
"alpha-beta-Car",
"But-fuco",
"Hex-fuco",
"Allo",
"Diadino",
"Diato",
"Fuco",
"Perid",
"Zea",
"MV_Chl_a",
"DV_Chl_a",
"Chlide_a",
"MV_Chl_b",
"DV_Chl_b",
"Chl_c1c2",
"Chl_c3",
"Lut",
"Neo",
"Viola",
"Phytin_a",
"Phide_a",
"Pras",
"Gyro",
"TChl",
"PPC",
"PSC",
"PSP",
"TCar",
"TAcc",
"TPg",
"DP",
"TAcc_TChla",
"PSC_TCar",
"PPC_TCar",
"TChl_TCar",
"PPC_TPg",
"PSP_TPg",
"TChla_Tpg",
"comments",
"comments2",
"comments3"
]
}
def hplc_report_paths(hplc_dir):
return glob(os.path.join(hplc_dir, 'Sosik*report.xlsx'))
#return [ 'Sosik08-15report.xlsx' ]
def parse_report(report_path):
assert os.path.exists(report_path)
Y = 'Year'
M = 'Month'
D = 'Day of Gregorian Month'
T = 'GMT Time'
report = pd.read_excel(report_path, skiprows=8, dtype={
Y: str,
M: str,
D: str,
T: str
})
# parse date fields
dates = report[M] + ' ' + report[D] + ' ' + report[Y] + ' ' + report[T]
report['date'] = pd.to_datetime(dates, utc=True)
# map column names
mappings = MAPPINGS['mappings']
for c in report.columns:
if c not in mappings:
report.pop(c)
report.columns = [ mappings[c] for c in report.columns ]
# produce replicate column
report.sort_values(['cruise','cast','niskin'], inplace=True)
R = report.pop('R')
is_a = (R == 'S') | ((R == 'D') & (R.shift(-1) == 'D'))
report['replicate'] = np.where(is_a, 'a', 'b')
# add project_id
report['project_id'] = 'NESLTER'
# reorder columns
report = report[MAPPINGS['columns']]
# consolidate the comments columns
report['comments'].fillna('', inplace=True)
report['comments2'].fillna('', inplace=True)
report['comments3'].fillna('', inplace=True)
report['comments'] = report.pop('comments') + ' ' \
+ report.pop('comments2') + ' ' + report.pop('comments3')
report['comments'] = report['comments'].str.strip()
return report
def parse_hplc(hplc_dir):
dfs = []
for report_path in hplc_report_paths(hplc_dir):
dfs.append(parse_report(report_path))
result = pd.concat(dfs)
result = result.replace(-8888,0)
return result | [
"os.path.exists",
"pandas.read_excel",
"numpy.where",
"pandas.to_datetime",
"os.path.join",
"pandas.concat"
] | [((3262, 3289), 'os.path.exists', 'os.path.exists', (['report_path'], {}), '(report_path)\n', (3276, 3289), False, 'import os\n'), ((3388, 3466), 'pandas.read_excel', 'pd.read_excel', (['report_path'], {'skiprows': '(8)', 'dtype': '{Y: str, M: str, D: str, T: str}'}), '(report_path, skiprows=8, dtype={Y: str, M: str, D: str, T: str})\n', (3401, 3466), True, 'import pandas as pd\n'), ((3627, 3658), 'pandas.to_datetime', 'pd.to_datetime', (['dates'], {'utc': '(True)'}), '(dates, utc=True)\n', (3641, 3658), True, 'import pandas as pd\n'), ((4076, 4100), 'numpy.where', 'np.where', (['is_a', '"""a"""', '"""b"""'], {}), "(is_a, 'a', 'b')\n", (4084, 4100), True, 'import numpy as np\n'), ((4758, 4772), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (4767, 4772), True, 'import pandas as pd\n'), ((3134, 3177), 'os.path.join', 'os.path.join', (['hplc_dir', '"""Sosik*report.xlsx"""'], {}), "(hplc_dir, 'Sosik*report.xlsx')\n", (3146, 3177), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Shunt Architectures.
Copyright 2021 Christian Doppler Laboratory for Embedded Machine Learning
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Libs
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D
from tensorflow.keras import Model
from tensorflow.keras import regularizers, initializers
from tensorflow import keras
from keras_applications import correct_pad
# Own modules
from shunt_connector.models.mobile_net_v3 import _se_block, _depth, hard_sigmoid
__author__ = '<NAME>'
__copyright__ = 'Copyright 2021, Christian Doppler Laboratory for ' \
'Embedded Machine Learning'
__credits__ = ['']
__license__ = 'Apache 2.0'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Release'
def createArch1(input_shape, output_shape, num_stride_layers, use_se, dilation_rates):
input_net = Input(shape=input_shape)
x = input_net
x = Conv2D(192, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, name="shunt_conv_1", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name="shunt_batch_norm_1")(x)
x = ReLU(6., name="shunt_relu_1")(x)
if num_stride_layers > 0:
x = ZeroPadding2D(padding=correct_pad(keras.backend, x, (3,3)), name='shunt_depthwise_pad_1')(x)
x = DepthwiseConv2D(kernel_size=(3,3), strides=(2,2), padding='valid', use_bias=False, activation=None, name="shunt_depth_conv_1", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
else:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(1,1), dilation_rate=(dilation_rates[0],dilation_rates[0]), padding='same', use_bias=False, activation=None, name="shunt_depth_conv_1", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name="shunt_batch_norm_2")(x)
x = ReLU(6., name="shunt_relu_2")(x)
x = Conv2D(64, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, name="shunt_conv_2", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name="shunt_batch_norm_3")(x)
x = Conv2D(192, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, name="shunt_conv_3", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name="shunt_batch_norm_4")(x)
x = ReLU(6., name="shunt_relu_3")(x)
if num_stride_layers > 1:
x = ZeroPadding2D(padding=correct_pad(keras.backend, x, (3,3)), name='shunt_depthwise_pad_2')(x)
x = DepthwiseConv2D(kernel_size=(3,3), strides=(2,2), padding='valid', use_bias=False, activation=None, name="shunt_depth_conv_2", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
else:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(1,1), dilation_rate=(dilation_rates[1],dilation_rates[1]), padding='same', use_bias=False, activation=None, name="shunt_depth_conv_2", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name="shunt_batch_norm_5")(x)
x = ReLU(6., name="shunt_relu_4")(x)
x = Conv2D(output_shape[-1], kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, name="shunt_conv_4", kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name="shunt_batch_norm_6")(x)
model = Model(inputs=input_net, outputs=x, name='shunt')
return model
def createArch4(input_shape, output_shape, num_stride_layers, use_se, dilation_rates):
input_net = Input(shape=input_shape)
x = input_net
x = Conv2D(128, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
if num_stride_layers > 0:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(2,2), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
else:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(1,1), dilation_rate=(dilation_rates[0],dilation_rates[0]), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
x = Conv2D(output_shape[-1], kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
model = Model(inputs=input_net, outputs=x, name='shunt')
return model
def createArch5(input_shape, output_shape, num_stride_layers, use_se, dilation_rates):
input_net = Input(shape=input_shape)
x = input_net
x = Conv2D(192, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
if num_stride_layers > 0:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(2,2), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
else:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(1,1), dilation_rate=(dilation_rates[0],dilation_rates[0]), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
x = Conv2D(64, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = Conv2D(192, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
if num_stride_layers > 1:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(2,2), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
else:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(1,1), dilation_rate=(dilation_rates[1],dilation_rates[1]), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
x = Conv2D(64, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = Conv2D(192, kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
if num_stride_layers > 2:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(2,2), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
else:
x = DepthwiseConv2D(kernel_size=(3,3), strides=(1,1), dilation_rate=(dilation_rates[2],dilation_rates[2]), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = ReLU(6.)(x)
x = Conv2D(output_shape[-1], kernel_size=(1,1), strides=(1,1), padding='same', use_bias=False, activation=None, kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5))(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
model = Model(inputs=input_net, outputs=x, name='shunt')
return model
def createShunt(input_shape, output_shape, arch, use_se=False, dilation_rate_input=1, dilation_rate_output=1, expansion_factor=6):
assert(arch in [1,4,5,6])
#assert(np.log2(input_shape[1] / output_shape[1]) == int(np.log2(input_shape[1] / output_shape[1])))
model_shunt = None
num_stride_layers = np.round(np.log2(input_shape[1] / output_shape[1]))
max_stride_list = {1:2, 4:1, 5:3, 6:1} # list of maximum strides for each architecture
if max_stride_list[arch] < num_stride_layers:
raise Exception("Chosen shunt architecture does not support {} many stride layers. Only {} are supported.".format(num_stride_layers, max_stride_list[arch]))
# get dilation rates for given architecture
dilation_rates = get_dilation_rates(arch, dilation_rate_input, dilation_rate_output)
if arch == 1:
model_shunt = createArch1(input_shape, output_shape, num_stride_layers, use_se, dilation_rates)
if arch == 4:
model_shunt = createArch4(input_shape, output_shape, num_stride_layers, use_se, dilation_rates)
if arch == 5:
model_shunt = createArch5(input_shape, output_shape, num_stride_layers, use_se, dilation_rates)
if arch == 6:
model_shunt = createArch6(input_shape, output_shape, num_stride_layers, use_se, expansion_factor, dilation_rates)
return model_shunt
def get_dilation_rates(arch, dilation_rate_input, dilation_rate_output):
dri = dilation_rate_input
dro = dilation_rate_output
if arch == 1:
return [dri, dro]
elif arch == 4:
return [dro]
elif arch == 5:
return [dri, dri, dro]
else:
raise Exception("Unknown shunt architecture: {}".format(arch)) | [
"keras_applications.correct_pad",
"tensorflow.keras.layers.BatchNormalization",
"numpy.log2",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input",
"tensorflow.keras.regularizers.l2"
] | [((1482, 1506), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1487, 1506), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((4299, 4347), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'input_net', 'outputs': 'x', 'name': '"""shunt"""'}), "(inputs=input_net, outputs=x, name='shunt')\n", (4304, 4347), False, 'from tensorflow.keras import Model\n'), ((4471, 4495), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4476, 4495), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((5590, 5638), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'input_net', 'outputs': 'x', 'name': '"""shunt"""'}), "(inputs=input_net, outputs=x, name='shunt')\n", (5595, 5638), False, 'from tensorflow.keras import Model\n'), ((5762, 5786), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (5767, 5786), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((8977, 9025), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'input_net', 'outputs': 'x', 'name': '"""shunt"""'}), "(inputs=input_net, outputs=x, name='shunt')\n", (8982, 9025), False, 'from tensorflow.keras import Model\n'), ((1735, 1811), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': '"""shunt_batch_norm_1"""'}), "(epsilon=0.001, momentum=0.999, name='shunt_batch_norm_1')\n", (1753, 1811), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((1822, 1852), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {'name': '"""shunt_relu_1"""'}), "(6.0, name='shunt_relu_1')\n", (1826, 1852), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((2492, 2568), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': '"""shunt_batch_norm_2"""'}), "(epsilon=0.001, momentum=0.999, name='shunt_batch_norm_2')\n", (2510, 2568), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((2579, 2609), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {'name': '"""shunt_relu_2"""'}), "(6.0, name='shunt_relu_2')\n", (2583, 2609), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((2820, 2896), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': '"""shunt_batch_norm_3"""'}), "(epsilon=0.001, momentum=0.999, name='shunt_batch_norm_3')\n", (2838, 2896), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((3108, 3184), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': '"""shunt_batch_norm_4"""'}), "(epsilon=0.001, momentum=0.999, name='shunt_batch_norm_4')\n", (3126, 3184), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((3195, 3225), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {'name': '"""shunt_relu_3"""'}), "(6.0, name='shunt_relu_3')\n", (3199, 3225), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((3865, 3941), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': '"""shunt_batch_norm_5"""'}), "(epsilon=0.001, momentum=0.999, name='shunt_batch_norm_5')\n", (3883, 3941), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((3952, 3982), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {'name': '"""shunt_relu_4"""'}), "(6.0, name='shunt_relu_4')\n", (3956, 3982), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((4207, 4283), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': '"""shunt_batch_norm_6"""'}), "(epsilon=0.001, momentum=0.999, name='shunt_batch_norm_6')\n", (4225, 4283), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((4703, 4752), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (4721, 4752), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((4763, 4772), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (4767, 4772), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((5252, 5301), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (5270, 5301), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((5312, 5321), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (5316, 5321), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((5525, 5574), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (5543, 5574), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((5994, 6043), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (6012, 6043), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((6054, 6063), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (6058, 6063), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((6543, 6592), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (6561, 6592), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((6603, 6612), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (6607, 6612), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((6802, 6851), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (6820, 6851), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((7042, 7091), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (7060, 7091), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((7102, 7111), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (7106, 7111), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((7591, 7640), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (7609, 7640), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((7651, 7660), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (7655, 7660), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((7850, 7899), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (7868, 7899), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((8090, 8139), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (8108, 8139), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((8150, 8159), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (8154, 8159), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((8639, 8688), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (8657, 8688), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((8699, 8708), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (8703, 8708), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((8912, 8961), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)'}), '(epsilon=0.001, momentum=0.999)\n', (8930, 8961), False, 'from tensorflow.keras.layers import Input, Conv2D, DepthwiseConv2D, BatchNormalization, ReLU, Add, Concatenate, Activation, ZeroPadding2D\n'), ((9369, 9410), 'numpy.log2', 'np.log2', (['(input_shape[1] / output_shape[1])'], {}), '(input_shape[1] / output_shape[1])\n', (9376, 9410), True, 'import numpy as np\n'), ((1701, 1723), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (1716, 1723), False, 'from tensorflow.keras import regularizers, initializers\n'), ((2786, 2808), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (2801, 2808), False, 'from tensorflow.keras import regularizers, initializers\n'), ((3074, 3096), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (3089, 3096), False, 'from tensorflow.keras import regularizers, initializers\n'), ((4173, 4195), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (4188, 4195), False, 'from tensorflow.keras import regularizers, initializers\n'), ((4669, 4691), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (4684, 4691), False, 'from tensorflow.keras import regularizers, initializers\n'), ((5491, 5513), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (5506, 5513), False, 'from tensorflow.keras import regularizers, initializers\n'), ((5960, 5982), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (5975, 5982), False, 'from tensorflow.keras import regularizers, initializers\n'), ((6768, 6790), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (6783, 6790), False, 'from tensorflow.keras import regularizers, initializers\n'), ((7008, 7030), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (7023, 7030), False, 'from tensorflow.keras import regularizers, initializers\n'), ((7816, 7838), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (7831, 7838), False, 'from tensorflow.keras import regularizers, initializers\n'), ((8056, 8078), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (8071, 8078), False, 'from tensorflow.keras import regularizers, initializers\n'), ((8878, 8900), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (8893, 8900), False, 'from tensorflow.keras import regularizers, initializers\n'), ((1919, 1956), 'keras_applications.correct_pad', 'correct_pad', (['keras.backend', 'x', '(3, 3)'], {}), '(keras.backend, x, (3, 3))\n', (1930, 1956), False, 'from keras_applications import correct_pad\n'), ((2180, 2202), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (2195, 2202), False, 'from tensorflow.keras import regularizers, initializers\n'), ((2458, 2480), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (2473, 2480), False, 'from tensorflow.keras import regularizers, initializers\n'), ((3292, 3329), 'keras_applications.correct_pad', 'correct_pad', (['keras.backend', 'x', '(3, 3)'], {}), '(keras.backend, x, (3, 3))\n', (3303, 3329), False, 'from keras_applications import correct_pad\n'), ((3553, 3575), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (3568, 3575), False, 'from tensorflow.keras import regularizers, initializers\n'), ((3831, 3853), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (3846, 3853), False, 'from tensorflow.keras import regularizers, initializers\n'), ((4967, 4989), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (4982, 4989), False, 'from tensorflow.keras import regularizers, initializers\n'), ((5218, 5240), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (5233, 5240), False, 'from tensorflow.keras import regularizers, initializers\n'), ((6258, 6280), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (6273, 6280), False, 'from tensorflow.keras import regularizers, initializers\n'), ((6509, 6531), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (6524, 6531), False, 'from tensorflow.keras import regularizers, initializers\n'), ((7306, 7328), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (7321, 7328), False, 'from tensorflow.keras import regularizers, initializers\n'), ((7557, 7579), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (7572, 7579), False, 'from tensorflow.keras import regularizers, initializers\n'), ((8354, 8376), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (8369, 8376), False, 'from tensorflow.keras import regularizers, initializers\n'), ((8605, 8627), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (8620, 8627), False, 'from tensorflow.keras import regularizers, initializers\n')] |
#!/usr/bin/env python3
"""
@author: <NAME>
@email: <EMAIL>
* FORCE MODULE *
Contains force calculations (and potential energies) using known potentials:
- Gravitational
- Lennard-Jones
Latest update: May 7th 2021
"""
import numpy as np
import system
from numba import jit, njit, vectorize
epsilon = None
sigma = None
cutoff = None
potential_shift = None
binwidth = None
# Routine implementing the minimum image convention
# for computing the shortest distance between particles
@njit
def mic(xi, xj, L):
rij = xi - xj
if abs(rij) > 0.5*L:
rij = rij - np.sign(rij) * L
return rij
@njit
def lennard_jones(force, pos, L, N, dim):
potential = 0
rijz = 0
for i in range(N-1):
for j in range(i+1, N):
rijx = mic(pos[i, 0],pos[j,0], L)
rijy = mic(pos[i, 1],pos[j,1], L)
if(dim==3):
rijz = mic(pos[i, 2],pos[j,2], L)
r = rijx*rijx + rijy*rijy + rijz*rijz
else:
r = rijx*rijx + rijy*rijy
if(r<cutoff*cutoff):
force[i,0] += 48*epsilon*(sigma**12*rijx/r**7 - 0.5*sigma**6*rijx/r**4)
force[i,1] += 48*epsilon*(sigma**12*rijy/r**7 - 0.5*sigma**6*rijy/r**4)
force[j,0] -= 48*epsilon*(sigma**12*rijx/r**7 - 0.5*sigma**6*rijx/r**4)
force[j,1] -= 48*epsilon*(sigma**12*rijy/r**7 - 0.5*sigma**6*rijy/r**4)
if(dim==3):
force[i,2] += 48*epsilon*(sigma**12*rijz/r**7 - 0.5*sigma**6*rijz/r**4)
force[j,2] -= 48*epsilon*(sigma**12*rijz/r**7 - 0.5*sigma**6*rijz/r**4)
potential += 2*4*epsilon*(1/r**6 - 1/r**3) - potential_shift
return force, potential
def radial_distribution_function(pos, L, N):
n_bins = 100
#bin_width = L*np.sqrt(3)/2/n_bins
d = []
rdf = []
rijz = 0
for i in range(N):
for j in range(N):
if(i!=j):
rijx = mic(pos[i, 0],pos[j,0], L)
rijy = mic(pos[i, 1],pos[j,1], L)
#rijz = mic(pos[i, 2],pos[j,2], L)
r = np.sqrt(rijx*rijx + rijy*rijy + rijz*rijz)
d.append(r)
max_dist = np.max(np.asarray(d))
bins = np.linspace(0., max_dist, n_bins)
delta = bins[1]-bins[0]
count = 0
for i in range(len(bins)-1):
for j in range(len(d)):
if ((d[j] >= bins[i])&(d[j]< bins[i+1])):
count +=1
#shell_volume = 4*np.pi*((bins[i]+delta)**3-bins[i]**3)/3
shell_area = np.pi*((bins[i]+delta)**2-bins[i]**2)
#count = count/N/shell_volume/system.rho
count = count/N/shell_area/system.rho
rdf.append(count)
count = 0
#Integral: coordination number
coord_number = 4*np.pi*system.rho * np.cumsum(np.asarray(rdf)*delta*bins[1:]**2)
#Integral: compressibility
func = np.asarray(rdf) - 1
compressibility = np.cumsum(1/(system.T) * func * delta) + 1/(system.T * system.rho)
return np.asarray(rdf), compressibility, coord_number, bins
def lennard_jones_numpy():
# (N,N) matrices containing all particles' positions
X = np.transpose(system.pos[:,0] * np.ones((system.N, system.N)))
Y = np.transpose(system.pos[:,1] * np.ones((system.N, system.N)))
Z = np.transpose(system.pos[:,2] * np.ones((system.N, system.N)))
# Compute "absolute" distance between particles (no PBC and MIC)
r_x = X - np.transpose(X)
r_y = Y - np.transpose(Y)
r_z = Z- np.transpose(Z)
# Compute shortest distance according to PBC and MIC (minimum image convention)
r_x = r_x - system.L * np.rint(np.divide(r_x, system.L))
r_y = r_y - system.L * np.rint(np.divide(r_y, system.L))
r_z = r_z - system.L * np.rint(np.divide(r_z, system.L))
# Compute reciprocal of r
# //I matrix are added and then subtracted in order to avoid divide by zero
r_reciprocal = np.reciprocal(np.sqrt(r_x**2 + r_y**2 + r_z**2) + np.eye(system.N)) - np.eye(system.N)
# Exclude distances longer than the cutoff radius
# by setting r to zero
r_reciprocal = np.where(r_reciprocal < np.reciprocal(cutoff), r_reciprocal, 0)
# Compute force with Lennard Jones potential
# //this evaluation already contains direction information
# //f_x, f_y, f_z are (N,N) matrices (with zero on the diagonal)
f_x = 4*epsilon*(-12*sigma**12*np.multiply(r_x, np.power(r_reciprocal, 14)) + 6*sigma**6*np.multiply(r_x, np.power(r_reciprocal, 8)))
f_y = 4*epsilon*(-12*sigma**12*np.multiply(r_y, np.power(r_reciprocal, 14)) + 6*sigma**6*np.multiply(r_y, np.power(r_reciprocal, 8)))
f_z = 4*epsilon*(-12*sigma**12*np.multiply(r_z, np.power(r_reciprocal, 14)) + 6*sigma**6*np.multiply(r_z, np.power(r_reciprocal, 8)))
# Net force on each particle is obtained by summation over the columns
# //returns forces in array of dimension (N,1)
F_x = np.sum(f_x, axis = 0)
F_y = np.sum(f_y, axis = 0)
F_z = np.sum(f_z, axis = 0)
# Stack forces in (N,3) array and save in net_force of system
system.force = np.stack((F_x, F_y, F_z), axis = 1)
# Compute the potential energy of the system taking advantage of
# the already computed minimum distance.
term = sigma*r_reciprocal
P = 4*epsilon*(np.power(term, 12) - np.power(term, 6)) + potential_shift
# Save potential energy in p_energy variable in py
system.potential = np.sum(np.triu(P))
def LJ_potential_shift():
global potential_shift
potential_shift = 4*epsilon*(np.power(sigma/cutoff, 12) - np.power(sigma/cutoff, 6))
| [
"numpy.stack",
"numpy.divide",
"numpy.sum",
"numpy.triu",
"numpy.power",
"numpy.asarray",
"numpy.transpose",
"numpy.ones",
"numpy.reciprocal",
"numpy.cumsum",
"numpy.linspace",
"numpy.sign",
"numpy.eye",
"numpy.sqrt"
] | [((1931, 1965), 'numpy.linspace', 'np.linspace', (['(0.0)', 'max_dist', 'n_bins'], {}), '(0.0, max_dist, n_bins)\n', (1942, 1965), True, 'import numpy as np\n'), ((4507, 4526), 'numpy.sum', 'np.sum', (['f_x'], {'axis': '(0)'}), '(f_x, axis=0)\n', (4513, 4526), True, 'import numpy as np\n'), ((4539, 4558), 'numpy.sum', 'np.sum', (['f_y'], {'axis': '(0)'}), '(f_y, axis=0)\n', (4545, 4558), True, 'import numpy as np\n'), ((4571, 4590), 'numpy.sum', 'np.sum', (['f_z'], {'axis': '(0)'}), '(f_z, axis=0)\n', (4577, 4590), True, 'import numpy as np\n'), ((4679, 4712), 'numpy.stack', 'np.stack', (['(F_x, F_y, F_z)'], {'axis': '(1)'}), '((F_x, F_y, F_z), axis=1)\n', (4687, 4712), True, 'import numpy as np\n'), ((1908, 1921), 'numpy.asarray', 'np.asarray', (['d'], {}), '(d)\n', (1918, 1921), True, 'import numpy as np\n'), ((2499, 2514), 'numpy.asarray', 'np.asarray', (['rdf'], {}), '(rdf)\n', (2509, 2514), True, 'import numpy as np\n'), ((2538, 2576), 'numpy.cumsum', 'np.cumsum', (['(1 / system.T * func * delta)'], {}), '(1 / system.T * func * delta)\n', (2547, 2576), True, 'import numpy as np\n'), ((2615, 2630), 'numpy.asarray', 'np.asarray', (['rdf'], {}), '(rdf)\n', (2625, 2630), True, 'import numpy as np\n'), ((3049, 3064), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (3061, 3064), True, 'import numpy as np\n'), ((3079, 3094), 'numpy.transpose', 'np.transpose', (['Y'], {}), '(Y)\n', (3091, 3094), True, 'import numpy as np\n'), ((3108, 3123), 'numpy.transpose', 'np.transpose', (['Z'], {}), '(Z)\n', (3120, 3123), True, 'import numpy as np\n'), ((3592, 3608), 'numpy.eye', 'np.eye', (['system.N'], {}), '(system.N)\n', (3598, 3608), True, 'import numpy as np\n'), ((5023, 5033), 'numpy.triu', 'np.triu', (['P'], {}), '(P)\n', (5030, 5033), True, 'import numpy as np\n'), ((2794, 2823), 'numpy.ones', 'np.ones', (['(system.N, system.N)'], {}), '((system.N, system.N))\n', (2801, 2823), True, 'import numpy as np\n'), ((2864, 2893), 'numpy.ones', 'np.ones', (['(system.N, system.N)'], {}), '((system.N, system.N))\n', (2871, 2893), True, 'import numpy as np\n'), ((2934, 2963), 'numpy.ones', 'np.ones', (['(system.N, system.N)'], {}), '((system.N, system.N))\n', (2941, 2963), True, 'import numpy as np\n'), ((3734, 3755), 'numpy.reciprocal', 'np.reciprocal', (['cutoff'], {}), '(cutoff)\n', (3747, 3755), True, 'import numpy as np\n'), ((5122, 5150), 'numpy.power', 'np.power', (['(sigma / cutoff)', '(12)'], {}), '(sigma / cutoff, 12)\n', (5130, 5150), True, 'import numpy as np\n'), ((5151, 5178), 'numpy.power', 'np.power', (['(sigma / cutoff)', '(6)'], {}), '(sigma / cutoff, 6)\n', (5159, 5178), True, 'import numpy as np\n'), ((570, 582), 'numpy.sign', 'np.sign', (['rij'], {}), '(rij)\n', (577, 582), True, 'import numpy as np\n'), ((1828, 1876), 'numpy.sqrt', 'np.sqrt', (['(rijx * rijx + rijy * rijy + rijz * rijz)'], {}), '(rijx * rijx + rijy * rijy + rijz * rijz)\n', (1835, 1876), True, 'import numpy as np\n'), ((3244, 3268), 'numpy.divide', 'np.divide', (['r_x', 'system.L'], {}), '(r_x, system.L)\n', (3253, 3268), True, 'import numpy as np\n'), ((3305, 3329), 'numpy.divide', 'np.divide', (['r_y', 'system.L'], {}), '(r_y, system.L)\n', (3314, 3329), True, 'import numpy as np\n'), ((3366, 3390), 'numpy.divide', 'np.divide', (['r_z', 'system.L'], {}), '(r_z, system.L)\n', (3375, 3390), True, 'import numpy as np\n'), ((3536, 3575), 'numpy.sqrt', 'np.sqrt', (['(r_x ** 2 + r_y ** 2 + r_z ** 2)'], {}), '(r_x ** 2 + r_y ** 2 + r_z ** 2)\n', (3543, 3575), True, 'import numpy as np\n'), ((3572, 3588), 'numpy.eye', 'np.eye', (['system.N'], {}), '(system.N)\n', (3578, 3588), True, 'import numpy as np\n'), ((4879, 4897), 'numpy.power', 'np.power', (['term', '(12)'], {}), '(term, 12)\n', (4887, 4897), True, 'import numpy as np\n'), ((4900, 4917), 'numpy.power', 'np.power', (['term', '(6)'], {}), '(term, 6)\n', (4908, 4917), True, 'import numpy as np\n'), ((2427, 2442), 'numpy.asarray', 'np.asarray', (['rdf'], {}), '(rdf)\n', (2437, 2442), True, 'import numpy as np\n'), ((4008, 4034), 'numpy.power', 'np.power', (['r_reciprocal', '(14)'], {}), '(r_reciprocal, 14)\n', (4016, 4034), True, 'import numpy as np\n'), ((4066, 4091), 'numpy.power', 'np.power', (['r_reciprocal', '(8)'], {}), '(r_reciprocal, 8)\n', (4074, 4091), True, 'import numpy as np\n'), ((4146, 4172), 'numpy.power', 'np.power', (['r_reciprocal', '(14)'], {}), '(r_reciprocal, 14)\n', (4154, 4172), True, 'import numpy as np\n'), ((4204, 4229), 'numpy.power', 'np.power', (['r_reciprocal', '(8)'], {}), '(r_reciprocal, 8)\n', (4212, 4229), True, 'import numpy as np\n'), ((4284, 4310), 'numpy.power', 'np.power', (['r_reciprocal', '(14)'], {}), '(r_reciprocal, 14)\n', (4292, 4310), True, 'import numpy as np\n'), ((4342, 4367), 'numpy.power', 'np.power', (['r_reciprocal', '(8)'], {}), '(r_reciprocal, 8)\n', (4350, 4367), True, 'import numpy as np\n')] |
import os, argparse
import numpy as np
import torch
import cv2
from torchvision.utils import save_image, make_grid
from model import FactorVAE
from dataset import return_data
from gradcam import GradCAM
def load_checkpoint(model, ckpt_dir, ckptname, device, verbose=True):
filepath = os.path.join(ckpt_dir, ckptname)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
checkpoint = torch.load(f, map_location=device)
model.load_state_dict(checkpoint['model_states']['VAE'])
if verbose:
print("loaded checkpoint '{}'".format(filepath))
return True
else:
if verbose:
print("no checkpoint found at '{}'".format(filepath))
return False
def normalize_tensor(t):
t = t - torch.min(t)
t = t / torch.max(t)
return t
def process_imgs(input, recon, first_cam, second_cam, n_factors):
input = normalize_tensor(input)
recon = normalize_tensor(recon)
input = make_grid(input, nrow=n_factors, normalize=False).transpose(0, 2).transpose(0, 1).detach().cpu().numpy()
recon = make_grid(recon, nrow=n_factors, normalize=False).transpose(0, 2).transpose(0, 1).detach().cpu().numpy()
first_cam = make_grid(first_cam, nrow=n_factors, normalize=False).transpose(0, 2).transpose(0, 1).detach().cpu().numpy()
second_cam = make_grid(second_cam, nrow=n_factors, normalize=False).transpose(0, 2).transpose(0, 1).detach().cpu().numpy()
return input, recon, first_cam, second_cam
def add_heatmap(input, gcam):
gcam = cv2.applyColorMap(np.uint8(255 * gcam), cv2.COLORMAP_JET)
gcam = np.asarray(gcam, dtype=np.float) + np.asarray(input, dtype=np.float)
gcam = 255 * gcam / np.max(gcam)
return np.uint8(gcam)
def main(args):
np.random.seed(args.seed)
use_cuda = args.cuda and torch.cuda.is_available()
device = 'cuda' if use_cuda else 'cpu'
model = FactorVAE(args.z_dim).to(device)
model_found = load_checkpoint(model, args.dir, args.name, device)
if not model_found:
return
gcam = GradCAM(model.encode, args.target_layer, device, args.image_size)
_, dataset = return_data(args)
input = dataset[np.arange(0, args.sample_count)][0].to(device)
recon, mu, logvar, z = model(input)
input, recon = input.repeat(1, 3, 1, 1), recon.repeat(1, 3, 1, 1)
maps = gcam.generate(z)
maps = maps.transpose(0,1)
first_cam, second_cam = [], []
for map in maps:
response = map.flatten(1).sum(1)
argmax = torch.argmax(response).item()
first_cam.append(normalize_tensor(map[argmax]))
response = torch.cat((response[:argmax], response[argmax+1:]))
second_cam.append(normalize_tensor(map[torch.argmax(response).item()]))
first_cam = ((torch.stack(first_cam, axis=1)).transpose(0,1)).unsqueeze(1)
second_cam = ((torch.stack(second_cam, axis=1)).transpose(0,1)).unsqueeze(1)
input, recon, first_cam, second_cam = process_imgs(input.detach(), recon.detach(), first_cam.detach(), second_cam.detach(), args.sample_count)
heatmap = add_heatmap(input, first_cam)
heatmap2 = add_heatmap(input, second_cam)
input = np.uint8(np.asarray(input, dtype=np.float)*255)
recon = np.uint8(np.asarray(recon, dtype=np.float)*255)
grid = np.concatenate((input, heatmap, heatmap2))
cv2.imshow('Attention Maps of ' + args.name, grid)
cv2.waitKey(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Visualizer')
parser.add_argument('--name', default='main', type=str, help='name of the model to be visualized')
parser.add_argument('--dir', default='checkpoints', type=str, help='name of the directory holding the models weights')
parser.add_argument('--output_dir', default='visualizations', type=str, help='name of the directory holding the visualizations')
parser.add_argument('--seed', default=0, type=int, help='the seed')
parser.add_argument('--cuda', type=bool, const=True, default=False, nargs='?', help='add if the gpu should be used')
parser.add_argument('--z_dim', default=32, type=int, help='dimension of the representation z, necessary for loading the model properly')
parser.add_argument('--target_layer', type=str, default='0', help='target layer for the attention maps')
parser.add_argument('--sample_count', default=5, type=int, help='amount of samples from the dataset to create the maps for')
parser.add_argument('--dset_dir', default='data', type=str, help='dataset directory')
parser.add_argument('--dataset', default='dsprites', type=str, help='dataset name')
parser.add_argument('--image_size', default=64, type=int, help='image size. now only (64,64) is supported')
parser.add_argument('--num_workers', default=1, type=int, help='dataloader num_workers')
parser.add_argument('--batch_size', default=1, type=int, help='place holder')
args = parser.parse_args()
main(args)
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.argmax",
"torch.cat",
"os.path.isfile",
"numpy.arange",
"model.FactorVAE",
"cv2.imshow",
"gradcam.GradCAM",
"os.path.join",
"torch.load",
"numpy.max",
"dataset.return_data",
"numpy.uint8",
"cv2.waitKey",
"numpy.asarray",
"torch.m... | [((291, 323), 'os.path.join', 'os.path.join', (['ckpt_dir', 'ckptname'], {}), '(ckpt_dir, ckptname)\n', (303, 323), False, 'import os, argparse\n'), ((331, 355), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (345, 355), False, 'import os, argparse\n'), ((1737, 1751), 'numpy.uint8', 'np.uint8', (['gcam'], {}), '(gcam)\n', (1745, 1751), True, 'import numpy as np\n'), ((1774, 1799), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1788, 1799), True, 'import numpy as np\n'), ((2066, 2131), 'gradcam.GradCAM', 'GradCAM', (['model.encode', 'args.target_layer', 'device', 'args.image_size'], {}), '(model.encode, args.target_layer, device, args.image_size)\n', (2073, 2131), False, 'from gradcam import GradCAM\n'), ((2150, 2167), 'dataset.return_data', 'return_data', (['args'], {}), '(args)\n', (2161, 2167), False, 'from dataset import return_data\n'), ((3292, 3334), 'numpy.concatenate', 'np.concatenate', (['(input, heatmap, heatmap2)'], {}), '((input, heatmap, heatmap2))\n', (3306, 3334), True, 'import numpy as np\n'), ((3340, 3390), 'cv2.imshow', 'cv2.imshow', (["('Attention Maps of ' + args.name)", 'grid'], {}), "('Attention Maps of ' + args.name, grid)\n", (3350, 3390), False, 'import cv2\n'), ((3395, 3409), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3406, 3409), False, 'import cv2\n'), ((3452, 3501), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Visualizer"""'}), "(description='Visualizer')\n", (3475, 3501), False, 'import os, argparse\n'), ((780, 792), 'torch.min', 'torch.min', (['t'], {}), '(t)\n', (789, 792), False, 'import torch\n'), ((805, 817), 'torch.max', 'torch.max', (['t'], {}), '(t)\n', (814, 817), False, 'import torch\n'), ((1568, 1588), 'numpy.uint8', 'np.uint8', (['(255 * gcam)'], {}), '(255 * gcam)\n', (1576, 1588), True, 'import numpy as np\n'), ((1619, 1651), 'numpy.asarray', 'np.asarray', (['gcam'], {'dtype': 'np.float'}), '(gcam, dtype=np.float)\n', (1629, 1651), True, 'import numpy as np\n'), ((1654, 1687), 'numpy.asarray', 'np.asarray', (['input'], {'dtype': 'np.float'}), '(input, dtype=np.float)\n', (1664, 1687), True, 'import numpy as np\n'), ((1712, 1724), 'numpy.max', 'np.max', (['gcam'], {}), '(gcam)\n', (1718, 1724), True, 'import numpy as np\n'), ((1829, 1854), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1852, 1854), False, 'import torch\n'), ((2628, 2681), 'torch.cat', 'torch.cat', (['(response[:argmax], response[argmax + 1:])'], {}), '((response[:argmax], response[argmax + 1:]))\n', (2637, 2681), False, 'import torch\n'), ((422, 456), 'torch.load', 'torch.load', (['f'], {'map_location': 'device'}), '(f, map_location=device)\n', (432, 456), False, 'import torch\n'), ((1911, 1932), 'model.FactorVAE', 'FactorVAE', (['args.z_dim'], {}), '(args.z_dim)\n', (1920, 1932), False, 'from model import FactorVAE\n'), ((3182, 3215), 'numpy.asarray', 'np.asarray', (['input'], {'dtype': 'np.float'}), '(input, dtype=np.float)\n', (3192, 3215), True, 'import numpy as np\n'), ((3242, 3275), 'numpy.asarray', 'np.asarray', (['recon'], {'dtype': 'np.float'}), '(recon, dtype=np.float)\n', (3252, 3275), True, 'import numpy as np\n'), ((2522, 2544), 'torch.argmax', 'torch.argmax', (['response'], {}), '(response)\n', (2534, 2544), False, 'import torch\n'), ((2189, 2220), 'numpy.arange', 'np.arange', (['(0)', 'args.sample_count'], {}), '(0, args.sample_count)\n', (2198, 2220), True, 'import numpy as np\n'), ((2779, 2809), 'torch.stack', 'torch.stack', (['first_cam'], {'axis': '(1)'}), '(first_cam, axis=1)\n', (2790, 2809), False, 'import torch\n'), ((2859, 2890), 'torch.stack', 'torch.stack', (['second_cam'], {'axis': '(1)'}), '(second_cam, axis=1)\n', (2870, 2890), False, 'import torch\n'), ((2727, 2749), 'torch.argmax', 'torch.argmax', (['response'], {}), '(response)\n', (2739, 2749), False, 'import torch\n'), ((985, 1034), 'torchvision.utils.make_grid', 'make_grid', (['input'], {'nrow': 'n_factors', 'normalize': '(False)'}), '(input, nrow=n_factors, normalize=False)\n', (994, 1034), False, 'from torchvision.utils import save_image, make_grid\n'), ((1102, 1151), 'torchvision.utils.make_grid', 'make_grid', (['recon'], {'nrow': 'n_factors', 'normalize': '(False)'}), '(recon, nrow=n_factors, normalize=False)\n', (1111, 1151), False, 'from torchvision.utils import save_image, make_grid\n'), ((1223, 1276), 'torchvision.utils.make_grid', 'make_grid', (['first_cam'], {'nrow': 'n_factors', 'normalize': '(False)'}), '(first_cam, nrow=n_factors, normalize=False)\n', (1232, 1276), False, 'from torchvision.utils import save_image, make_grid\n'), ((1349, 1403), 'torchvision.utils.make_grid', 'make_grid', (['second_cam'], {'nrow': 'n_factors', 'normalize': '(False)'}), '(second_cam, nrow=n_factors, normalize=False)\n', (1358, 1403), False, 'from torchvision.utils import save_image, make_grid\n')] |
from picket.globalvar import *
from picket.preprocessor.embeddings import load_embedding
from collections import OrderedDict
from tqdm import tqdm
import pandas as pd
import numpy as np
import json, logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Dataset(object):
def __init__(self, env):
self.env = env
self.config = self.env['dataset_config']
# the data-frame that holds the data
self.df = None
# the functional dependencies
self.fds = None
self.attributes = OrderedDict({})
self.attr_to_idx = {}
self.numAttr = -1
self.numTuple = -1
def load_dataset(self):
"""
Loads the data to a data-frame. Pre-processing of the data-frame,
drop columns etc.. Create dictionary with name of attribute
to the index of the attribute. Load the attributes
"""
# setup header:
if self.config['header'].lower() == 'none':
self.config['header'] = None
# load preprocessor from file
self.df = pd.read_csv(self.env['dataset_path'],
encoding='utf8',
header=self.config['header'],
sep=self.config['sep'],
na_values=self.config['na_values'])
# replace null values
self.df = self.df.fillna('Nan')
# pre-processing the dataset
self.preprocess_df(self.config['dropna'], self.config['dropcol'])
# numTuple: number of total instances/tuples in data-set
# numAttr: number of attributes in the data-set
self.numTuple, self.numAttr = self.df.shape[0], self.df.shape[1]
# dictionary with name of attribute as index to number of attribute
self.attr_to_idx = dict(zip(self.df.columns.values, list(range(self.numAttr))))
# change column types based on the user input
self.change_column_type()
# load attributes to a dictionary: num of attribute to Attribute object
self.load_attributes(self.config['dtypes'])
def load_fds(self, path):
logger.info("Load FDs...")
self.fds = json.load(open(path))
return self.fds
def preprocess_df(self, dropna, dropcol):
logger.info("Preprocessing Data...")
# (optional) drop specified columns
if dropcol is not None:
self.df = self.df.drop(dropcol, axis=1)
# (optional) drop rows with empty values
if dropna:
self.df.dropna(axis=0, how='any', inplace=True)
# (optional) replace empty cells
self.df = self.df.replace(np.nan, self.config['nan'], regex=True)
# drop empty columns
self.df.dropna(axis=1, how='all', inplace=True)
def infer_column_type(self, c, data):
if np.issubdtype(c, np.number):
return NUMERIC
if data.unique().shape[0] >= self.config['min_categories_for_text']:
return TEXT
return CATEGORICAL
def change_column_type(self):
for idx, attr in enumerate(self.df):
if self.config['dtypes'][idx] == CATEGORICAL or self.config['dtypes'][idx] == TEXT:
self.df[attr] = self.df[attr].astype(str)
logger.info("change column type from {} to '{}'".format('Numeric', 'String'))
elif self.config['dtypes'][idx] == NUMERIC:
self.df[attr] = pd.to_numeric(self.df[attr], errors='coerce')
logger.info("change column type from {} to '{}'".format('String', 'Numeric'))
def load_attributes(self, dtypes):
for idx, attr in enumerate(self.df):
# infer column type
# inferred_type = self.infer_column_type(self.df[attr].dtype, self.df[attr])
# self.attributes[idx] = Attribute(self, idx, attr, inferred_type)
self.attributes[idx] = Attribute(self, idx, attr, dtypes[idx])
def load_embedding(self, wv=None):
first = True
df_all = None
for attr in tqdm(self.attributes.values()):
if attr.dtype == TEXT:
if first:
df_all = self.df[attr.name]
first = False
else:
df_all = pd.concat([df_all, self.df[attr.name]])
for attr in tqdm(self.attributes.values()):
attr.load_embedding(wv, df_all)
class Attribute(object):
def __init__(self, ds, idx, name, dtype):
self.ds = ds
self.idx = idx
self.name = name
self.dtype = dtype
# dimension of the embedding
self.dim = 0
# the unique cells of the specified attribute
self.vocab = None
# the embeddings of the unique cells
self.vec = None
def load_embedding(self, wv, df_all):
# replace null values with a specific value based on the type of the data
'''
:param wv:
:return:
'''
'''
if self.dtype == TEXT:
self.ds.df = self.ds.df.replace(np.nan, self.ds.env[self.dtype]['nan'], regex=True)
elif self.dtype == NUMERIC:
self.ds.df = self.ds.df.replace(np.nan, self.ds.env[self.dtype]['nan'], regex=True)
elif self.dtype == CATEGORICAL:
self.ds.df = self.ds.df.replace(np.nan, self.ds.env[self.dtype]['nan'], regex=True)
'''
# for the specific attribute take all the values and create vocab and lookup table with embeddings
self.vec, self.vocab = load_embedding(self.idx, self.ds.env['embed_config'][self.dtype],
self.ds.df[self.name], self.dtype, wv, df_all)
# set the number of the dimension of the embedding
# print(self.vec.shape)
self.dim = self.vec.shape[1]
| [
"logging.basicConfig",
"pandas.read_csv",
"logging.getLogger",
"picket.preprocessor.embeddings.load_embedding",
"collections.OrderedDict",
"pandas.to_numeric",
"pandas.concat",
"numpy.issubdtype"
] | [((208, 229), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (227, 229), False, 'import json, logging\n'), ((239, 266), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (256, 266), False, 'import json, logging\n'), ((580, 595), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (591, 595), False, 'from collections import OrderedDict\n'), ((1106, 1255), 'pandas.read_csv', 'pd.read_csv', (["self.env['dataset_path']"], {'encoding': '"""utf8"""', 'header': "self.config['header']", 'sep': "self.config['sep']", 'na_values': "self.config['na_values']"}), "(self.env['dataset_path'], encoding='utf8', header=self.config[\n 'header'], sep=self.config['sep'], na_values=self.config['na_values'])\n", (1117, 1255), True, 'import pandas as pd\n'), ((2868, 2895), 'numpy.issubdtype', 'np.issubdtype', (['c', 'np.number'], {}), '(c, np.number)\n', (2881, 2895), True, 'import numpy as np\n'), ((5663, 5780), 'picket.preprocessor.embeddings.load_embedding', 'load_embedding', (['self.idx', "self.ds.env['embed_config'][self.dtype]", 'self.ds.df[self.name]', 'self.dtype', 'wv', 'df_all'], {}), "(self.idx, self.ds.env['embed_config'][self.dtype], self.ds.\n df[self.name], self.dtype, wv, df_all)\n", (5677, 5780), False, 'from picket.preprocessor.embeddings import load_embedding\n'), ((3468, 3513), 'pandas.to_numeric', 'pd.to_numeric', (['self.df[attr]'], {'errors': '"""coerce"""'}), "(self.df[attr], errors='coerce')\n", (3481, 3513), True, 'import pandas as pd\n'), ((4297, 4336), 'pandas.concat', 'pd.concat', (['[df_all, self.df[attr.name]]'], {}), '([df_all, self.df[attr.name]])\n', (4306, 4336), True, 'import pandas as pd\n')] |
#######################################################################
# Copyright (C) 2016 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import functools
# 19-state random walk
N_STATES = 19
# undiscounted
GAMMA = 1
stateValues = np.zeros(N_STATES + 2)
# all states except for terminal states
states = np.arange(1, N_STATES + 1)
# start from the middle
START_STATE = N_STATES // 2 + 1
END_STATES = [0, N_STATES + 1]
# add an extra action STAY besides LEFT and RIGHT
ACTIONS = [-1, 0, 1]
# probability of each action
ACTIONS_PROB = np.asarray([0.25, 0.5, 0.25])
# use DP to get the true state value
trueStateValues = np.copy(stateValues)
trueStateValues[0] = -1.0
trueStateValues[-1] = 1.0
while True:
delta = 0.0
for state in states:
newStateValue = np.sum(ACTIONS_PROB * [trueStateValues[state + action] for action in ACTIONS])
delta += np.abs(newStateValue - trueStateValues[state])
trueStateValues[state] = newStateValue
if delta < 1e-3:
break
trueStateValues[0] = trueStateValues[-1] = 0
print(np.sqrt(np.mean(np.power(trueStateValues[1: -1] - stateValues[1: -1], 2))))
# go to next state
def nextStep(state):
newState = state + np.random.choice(ACTIONS, p=ACTIONS_PROB)
if newState == 0:
reward = -1.0
elif newState == N_STATES + 1:
reward = 1.0
else:
reward = 0.0
return newState, reward
# n-step TD algorithm
# @sumOfTDErrors: False if use n-step TD error
# True if use sum of n TD errors
def temproalDifference(stateValues, n, alpha, sumOfTDErrors=False):
currentState = START_STATE
states = [currentState]
rewards = [0.0]
time = 0
T = float('inf')
while True:
time += 1
if time < T:
newState, reward = nextStep(currentState)
states.append(newState)
rewards.append(reward)
if newState in END_STATES:
T = time
updateTime = time - n
if updateTime >= 0:
stateToUpdate = states[updateTime]
if sumOfTDErrors:
# make a copy of current state value
shadowStateValues = np.copy(stateValues)
errors = 0.0
# perform n TD updates on the copy, get the cumulative TD error
for t in range(updateTime, min(T, updateTime + n)):
delta = rewards[t + 1] + shadowStateValues[states[t + 1]] - \
shadowStateValues[states[t]]
errors += delta
shadowStateValues[states[t]] += alpha * delta
else:
# n-step TD error
returns = 0.0
returns += np.sum(rewards[updateTime + 1: min(T, updateTime + n) + 1])
if updateTime + n <= T:
returns += stateValues[states[updateTime + n]]
errors = returns - stateValues[stateToUpdate]
# update the state value
stateValues[stateToUpdate] += alpha * errors
if updateTime == T - 1:
break
currentState = newState
def figure():
runs = 100
episodes = 50
alpha = 0.1
labels = ['sum of n TD errors', 'n-step TD error']
methods = [functools.partial(temproalDifference, n=4, alpha=alpha, sumOfTDErrors=True),
functools.partial(temproalDifference, n=4, alpha=alpha, sumOfTDErrors=False)]
errors = np.zeros((len(methods), episodes))
for run in range(runs):
for index, method in zip(range(len(methods)), methods):
# set random seed to make sure the trajectory is the same for both algorithms
np.random.seed(run)
currentStateValues = np.copy(stateValues)
for episode in range(episodes):
print('run:', run, 'episode:', episode)
method(currentStateValues)
errors[index, episode] += np.sqrt(np.mean(np.power(currentStateValues[1: -1] - trueStateValues[1: -1], 2)))
errors /= runs
plt.figure()
for i in range(len(labels)):
plt.plot(errors[i], label=labels[i])
plt.xlabel('episodes')
plt.ylabel('RMS error')
plt.legend()
figure()
plt.show()
| [
"functools.partial",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.copy",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.power",
"numpy.asarray",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.random.seed",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.random.choice",
"matplotlib.... | [((516, 538), 'numpy.zeros', 'np.zeros', (['(N_STATES + 2)'], {}), '(N_STATES + 2)\n', (524, 538), True, 'import numpy as np\n'), ((589, 615), 'numpy.arange', 'np.arange', (['(1)', '(N_STATES + 1)'], {}), '(1, N_STATES + 1)\n', (598, 615), True, 'import numpy as np\n'), ((820, 849), 'numpy.asarray', 'np.asarray', (['[0.25, 0.5, 0.25]'], {}), '([0.25, 0.5, 0.25])\n', (830, 849), True, 'import numpy as np\n'), ((906, 926), 'numpy.copy', 'np.copy', (['stateValues'], {}), '(stateValues)\n', (913, 926), True, 'import numpy as np\n'), ((4478, 4488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4486, 4488), True, 'import matplotlib.pyplot as plt\n'), ((4305, 4317), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4315, 4317), True, 'import matplotlib.pyplot as plt\n'), ((4400, 4422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""episodes"""'], {}), "('episodes')\n", (4410, 4422), True, 'import matplotlib.pyplot as plt\n'), ((4427, 4450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMS error"""'], {}), "('RMS error')\n", (4437, 4450), True, 'import matplotlib.pyplot as plt\n'), ((4455, 4467), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4465, 4467), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1134), 'numpy.sum', 'np.sum', (['(ACTIONS_PROB * [trueStateValues[state + action] for action in ACTIONS])'], {}), '(ACTIONS_PROB * [trueStateValues[state + action] for action in ACTIONS])\n', (1062, 1134), True, 'import numpy as np\n'), ((1152, 1198), 'numpy.abs', 'np.abs', (['(newStateValue - trueStateValues[state])'], {}), '(newStateValue - trueStateValues[state])\n', (1158, 1198), True, 'import numpy as np\n'), ((1472, 1513), 'numpy.random.choice', 'np.random.choice', (['ACTIONS'], {'p': 'ACTIONS_PROB'}), '(ACTIONS, p=ACTIONS_PROB)\n', (1488, 1513), True, 'import numpy as np\n'), ((3528, 3603), 'functools.partial', 'functools.partial', (['temproalDifference'], {'n': '(4)', 'alpha': 'alpha', 'sumOfTDErrors': '(True)'}), '(temproalDifference, n=4, alpha=alpha, sumOfTDErrors=True)\n', (3545, 3603), False, 'import functools\n'), ((3620, 3696), 'functools.partial', 'functools.partial', (['temproalDifference'], {'n': '(4)', 'alpha': 'alpha', 'sumOfTDErrors': '(False)'}), '(temproalDifference, n=4, alpha=alpha, sumOfTDErrors=False)\n', (3637, 3696), False, 'import functools\n'), ((4359, 4395), 'matplotlib.pyplot.plot', 'plt.plot', (['errors[i]'], {'label': 'labels[i]'}), '(errors[i], label=labels[i])\n', (4367, 4395), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1402), 'numpy.power', 'np.power', (['(trueStateValues[1:-1] - stateValues[1:-1])', '(2)'], {}), '(trueStateValues[1:-1] - stateValues[1:-1], 2)\n', (1356, 1402), True, 'import numpy as np\n'), ((3940, 3959), 'numpy.random.seed', 'np.random.seed', (['run'], {}), '(run)\n', (3954, 3959), True, 'import numpy as np\n'), ((3993, 4013), 'numpy.copy', 'np.copy', (['stateValues'], {}), '(stateValues)\n', (4000, 4013), True, 'import numpy as np\n'), ((2445, 2465), 'numpy.copy', 'np.copy', (['stateValues'], {}), '(stateValues)\n', (2452, 2465), True, 'import numpy as np\n'), ((4215, 4276), 'numpy.power', 'np.power', (['(currentStateValues[1:-1] - trueStateValues[1:-1])', '(2)'], {}), '(currentStateValues[1:-1] - trueStateValues[1:-1], 2)\n', (4223, 4276), True, 'import numpy as np\n')] |
import torch.nn as nn
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn import Parameter
import math
USE_CUDA = torch.cuda.is_available()
class CNN(nn.Module):
"""
input: [batch_size, 3, 64, 128]
output: [batch_size, 32, 256]
"""
def __init__(self):
super(CNN, self).__init__()
channel_tmp = 3
main = nn.Sequential()
for i in range(5):
main.add_module("ResBlk-{0}".format(i), ResBlk(channel_tmp, 32 * 2 ** min(i, 3)))
channel_tmp = 32 * 2 ** min(i, 3)
if i < 2:
main.add_module("MAXPOOL-{0}".format(i), nn.MaxPool2d(kernel_size=2))
elif i < 4:
main.add_module("MAXPOOL-{0}".format(i), nn.MaxPool2d(kernel_size=(2, 1)))
else:
main.add_module("MAXPOOL-{0}".format(i), nn.MaxPool2d(kernel_size=(4, 1)))
main.add_module("Dropout-{0}".format(i), nn.Dropout(0.1))
self.main = main
def forward(self, x):
out = self.main(x).squeeze(2)
out = out.transpose(1, 2)
return out
class Encoder(nn.Module):
"""
input: [batch_size, 32, 256]
output: out [batch_size, 32, 256]
hidden [2, batch_size, 128]
"""
def __init__(self, num_rnn_layers=2, rnn_hidden_size=128, dropout=0.5):
super(Encoder, self).__init__()
self.num_rnn_layers = num_rnn_layers
self.rnn_hidden_size = rnn_hidden_size
self.gru = nn.GRU(256, rnn_hidden_size, num_rnn_layers,
batch_first=True,
dropout=dropout)
def forward(self, x):
batch_size = x.size(0)
h0 = Variable(torch.zeros(self.num_rnn_layers, batch_size, self.rnn_hidden_size))
if USE_CUDA:
h0 = h0.cuda()
out, hidden = self.gru(x, h0)
return out, hidden
class RNNAttnDecoder(nn.Module):
def __init__(self, vocab_size, hidden_size=128, num_rnn_layers=2, dropout=0.5):
super(RNNAttnDecoder, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.attn = Attn(hidden_size)
self.gru = nn.GRU(vocab_size + hidden_size, hidden_size,
num_rnn_layers, batch_first=True,
dropout=dropout)
self.wc = nn.Linear(2 * hidden_size, hidden_size) # ,bias=False)
self.ws = nn.Linear(hidden_size, vocab_size)
self.tanh = nn.Tanh()
self.embedding = nn.Embedding(vocab_size, vocab_size)
fix_embedding = torch.from_numpy(np.eye(vocab_size, vocab_size).astype(np.float32))
self.embedding.weight = nn.Parameter(fix_embedding)
self.embedding.weight.requires_grad = False
def forward(self, y, encoder_outputs, encoder_hidden, is_training):
batch_size = y.size(0)
max_len = y.size(1)
last_hidden = encoder_hidden
last_ht = Variable(torch.zeros(batch_size, self.hidden_size))
outputs = []
if USE_CUDA:
last_ht = last_ht.cuda()
if not is_training:
input = y[:, 0]
for di in range(max_len - 1):
if is_training:
input = y[:, di]
output, last_ht, last_hidden, alpha = self.forward_step(input, last_ht, last_hidden, encoder_outputs)
if not is_training:
input = output.max(1)[1]
outputs.append(output.unsqueeze(1))
return torch.cat(outputs, dim=1)
def forward_2(self, encoder_outputs, encoder_hidden, max_len):
batch_size = encoder_outputs.size(0)
last_hidden = encoder_hidden
last_ht = Variable(torch.zeros(batch_size, self.hidden_size))
outputs = []
if USE_CUDA:
last_ht = last_ht.cuda()
input = torch.zeros([batch_size]).long()
if USE_CUDA:
input = input.cuda()
for di in range(max_len - 1):
output, last_ht, last_hidden, alpha = self.forward_step(input, last_ht, last_hidden, encoder_outputs)
input = output.max(1)[1]
outputs.append(output.unsqueeze(1))
return torch.cat(outputs, dim=1)
def forward_step(self, input, last_ht, last_hidden, encoder_outputs):
embed_input = self.embedding(input)
rnn_input = torch.cat((embed_input, last_ht), 1)
output, hidden = self.gru(rnn_input.unsqueeze(1), last_hidden)
output = output.squeeze(1)
weighted_context, alpha = self.attn(output, encoder_outputs)
ht = self.tanh(self.wc(torch.cat((output, weighted_context), 1)))
output = self.ws(ht)
return output, ht, hidden, alpha
class ResBlk(nn.Module):
def __init__(self, ch_in, ch_out):
super(ResBlk, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if ch_out != ch_in:
self.extra = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = self.extra(x) + out
return out
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
def forward(self, hidden, encoder_outputs):
hidden_expanded = hidden.unsqueeze(2)
energy = torch.bmm(encoder_outputs, hidden_expanded).squeeze(2)
alpha = nn.functional.softmax(energy)
weighted_context = torch.bmm(alpha.unsqueeze(1), encoder_outputs).squeeze(1)
return weighted_context, alpha
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, dot_number, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dot_number = dot_number
self.weight = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(1, 1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.A_D = Parameter(torch.from_numpy(compute_matrix(dot_number)).float(), requires_grad=False)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
support = torch.matmul(input, self.weight)
output = torch.matmul(self.A_D.detach(), support)
if self.bias is not None:
return output + self.bias
else:
return output
def compute_matrix(dot_number):
D = np.eye(dot_number, k=-1) + np.eye(dot_number, k=0) + np.eye(dot_number, k=1)
return D
class FeedForward(nn.Module):
def __init__(self, d_model, hidden, drop_prob=0.1):
super(FeedForward, self).__init__()
self.linear1 = nn.Linear(d_model, hidden)
self.linear2 = nn.Linear(hidden, d_model)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=drop_prob)
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.linear2(x)
return x
class MultiAttention(nn.Module):
def __init__(self, dim, dim_2, n_head, mask):
super(MultiAttention, self).__init__()
self.n_head = n_head
self.dim_2 = dim_2
self.mask = mask
self.W_Q = nn.Linear(dim, dim_2 * n_head, bias=False)
self.W_K = nn.Linear(dim, dim_2 * n_head, bias=False)
self.W_V = nn.Linear(dim, dim_2 * n_head, bias=False)
self.fc = nn.Linear(dim_2 * n_head, dim, bias=False)
self.norm = nn.LayerNorm(dim)
def forward(self, x):
# x: [batch_size, len, dim]
batch_size = x.size(0)
dot_number = x.size(1)
Q = self.W_Q(x).view(batch_size, -1, self.n_head, self.dim_2).transpose(1, 2)
K = self.W_K(x).view(batch_size, -1, self.n_head, self.dim_2).transpose(1, 2)
V = self.W_V(x).view(batch_size, -1, self.n_head, self.dim_2).transpose(1, 2)
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.dim_2)
if self.mask:
mask = torch.from_numpy(compute_matrix(dot_number)).float().detach()
if USE_CUDA:
mask = mask.cuda()
scores = scores.masked_fill(mask == 0, -1e-9)
attn = F.softmax(scores, dim=-1)
context = torch.matmul(attn, V)
context = context.transpose(1, 2).reshape(batch_size, -1, self.n_head * self.dim_2)
output = self.fc(context)
output = self.norm(output + x)
return output
| [
"torch.nn.Dropout",
"torch.bmm",
"torch.nn.Embedding",
"torch.cat",
"torch.nn.LayerNorm",
"torch.Tensor",
"torch.nn.Linear",
"torch.zeros",
"torch.matmul",
"torch.nn.Parameter",
"torch.nn.GRU",
"torch.nn.Tanh",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torc... | [((178, 203), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (201, 203), False, 'import torch\n'), ((414, 429), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (427, 429), True, 'import torch.nn as nn\n'), ((1527, 1606), 'torch.nn.GRU', 'nn.GRU', (['(256)', 'rnn_hidden_size', 'num_rnn_layers'], {'batch_first': '(True)', 'dropout': 'dropout'}), '(256, rnn_hidden_size, num_rnn_layers, batch_first=True, dropout=dropout)\n', (1533, 1606), True, 'import torch.nn as nn\n'), ((2221, 2322), 'torch.nn.GRU', 'nn.GRU', (['(vocab_size + hidden_size)', 'hidden_size', 'num_rnn_layers'], {'batch_first': '(True)', 'dropout': 'dropout'}), '(vocab_size + hidden_size, hidden_size, num_rnn_layers, batch_first=\n True, dropout=dropout)\n', (2227, 2322), True, 'import torch.nn as nn\n'), ((2389, 2428), 'torch.nn.Linear', 'nn.Linear', (['(2 * hidden_size)', 'hidden_size'], {}), '(2 * hidden_size, hidden_size)\n', (2398, 2428), True, 'import torch.nn as nn\n'), ((2463, 2497), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'vocab_size'], {}), '(hidden_size, vocab_size)\n', (2472, 2497), True, 'import torch.nn as nn\n'), ((2519, 2528), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2526, 2528), True, 'import torch.nn as nn\n'), ((2554, 2590), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'vocab_size'], {}), '(vocab_size, vocab_size)\n', (2566, 2590), True, 'import torch.nn as nn\n'), ((2715, 2742), 'torch.nn.Parameter', 'nn.Parameter', (['fix_embedding'], {}), '(fix_embedding)\n', (2727, 2742), True, 'import torch.nn as nn\n'), ((3523, 3548), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (3532, 3548), False, 'import torch\n'), ((4208, 4233), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (4217, 4233), False, 'import torch\n'), ((4373, 4409), 'torch.cat', 'torch.cat', (['(embed_input, last_ht)', '(1)'], {}), '((embed_input, last_ht), 1)\n', (4382, 4409), False, 'import torch\n'), ((4857, 4932), 'torch.nn.Conv2d', 'nn.Conv2d', (['ch_in', 'ch_out'], {'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)'}), '(ch_in, ch_out, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n', (4866, 4932), True, 'import torch.nn as nn\n'), ((4952, 4974), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ch_out'], {}), '(ch_out)\n', (4966, 4974), True, 'import torch.nn as nn\n'), ((4996, 5072), 'torch.nn.Conv2d', 'nn.Conv2d', (['ch_out', 'ch_out'], {'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)'}), '(ch_out, ch_out, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n', (5005, 5072), True, 'import torch.nn as nn\n'), ((5092, 5114), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ch_out'], {}), '(ch_out)\n', (5106, 5114), True, 'import torch.nn as nn\n'), ((5137, 5152), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (5150, 5152), True, 'import torch.nn as nn\n'), ((5841, 5870), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['energy'], {}), '(energy)\n', (5862, 5870), True, 'import torch.nn as nn\n'), ((6895, 6927), 'torch.matmul', 'torch.matmul', (['input', 'self.weight'], {}), '(input, self.weight)\n', (6907, 6927), False, 'import torch\n'), ((7193, 7216), 'numpy.eye', 'np.eye', (['dot_number'], {'k': '(1)'}), '(dot_number, k=1)\n', (7199, 7216), True, 'import numpy as np\n'), ((7385, 7411), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'hidden'], {}), '(d_model, hidden)\n', (7394, 7411), True, 'import torch.nn as nn\n'), ((7435, 7461), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'd_model'], {}), '(hidden, d_model)\n', (7444, 7461), True, 'import torch.nn as nn\n'), ((7482, 7491), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7489, 7491), True, 'import torch.nn as nn\n'), ((7515, 7538), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop_prob'}), '(p=drop_prob)\n', (7525, 7538), True, 'import torch.nn as nn\n'), ((7924, 7966), 'torch.nn.Linear', 'nn.Linear', (['dim', '(dim_2 * n_head)'], {'bias': '(False)'}), '(dim, dim_2 * n_head, bias=False)\n', (7933, 7966), True, 'import torch.nn as nn\n'), ((7986, 8028), 'torch.nn.Linear', 'nn.Linear', (['dim', '(dim_2 * n_head)'], {'bias': '(False)'}), '(dim, dim_2 * n_head, bias=False)\n', (7995, 8028), True, 'import torch.nn as nn\n'), ((8048, 8090), 'torch.nn.Linear', 'nn.Linear', (['dim', '(dim_2 * n_head)'], {'bias': '(False)'}), '(dim, dim_2 * n_head, bias=False)\n', (8057, 8090), True, 'import torch.nn as nn\n'), ((8109, 8151), 'torch.nn.Linear', 'nn.Linear', (['(dim_2 * n_head)', 'dim'], {'bias': '(False)'}), '(dim_2 * n_head, dim, bias=False)\n', (8118, 8151), True, 'import torch.nn as nn\n'), ((8172, 8189), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (8184, 8189), True, 'import torch.nn as nn\n'), ((8889, 8914), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (8898, 8914), True, 'import torch.nn.functional as F\n'), ((8934, 8955), 'torch.matmul', 'torch.matmul', (['attn', 'V'], {}), '(attn, V)\n', (8946, 8955), False, 'import torch\n'), ((1739, 1805), 'torch.zeros', 'torch.zeros', (['self.num_rnn_layers', 'batch_size', 'self.rnn_hidden_size'], {}), '(self.num_rnn_layers, batch_size, self.rnn_hidden_size)\n', (1750, 1805), False, 'import torch\n'), ((2992, 3033), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.hidden_size'], {}), '(batch_size, self.hidden_size)\n', (3003, 3033), False, 'import torch\n'), ((3727, 3768), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.hidden_size'], {}), '(batch_size, self.hidden_size)\n', (3738, 3768), False, 'import torch\n'), ((6306, 6345), 'torch.Tensor', 'torch.Tensor', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (6318, 6345), False, 'import torch\n'), ((7140, 7164), 'numpy.eye', 'np.eye', (['dot_number'], {'k': '(-1)'}), '(dot_number, k=-1)\n', (7146, 7164), True, 'import numpy as np\n'), ((7167, 7190), 'numpy.eye', 'np.eye', (['dot_number'], {'k': '(0)'}), '(dot_number, k=0)\n', (7173, 7190), True, 'import numpy as np\n'), ((8631, 8650), 'numpy.sqrt', 'np.sqrt', (['self.dim_2'], {}), '(self.dim_2)\n', (8638, 8650), True, 'import numpy as np\n'), ((982, 997), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (992, 997), True, 'import torch.nn as nn\n'), ((3867, 3892), 'torch.zeros', 'torch.zeros', (['[batch_size]'], {}), '([batch_size])\n', (3878, 3892), False, 'import torch\n'), ((4617, 4657), 'torch.cat', 'torch.cat', (['(output, weighted_context)', '(1)'], {}), '((output, weighted_context), 1)\n', (4626, 4657), False, 'import torch\n'), ((5237, 5296), 'torch.nn.Conv2d', 'nn.Conv2d', (['ch_in', 'ch_out'], {'kernel_size': '(1, 1)', 'stride': '(1, 1)'}), '(ch_in, ch_out, kernel_size=(1, 1), stride=(1, 1))\n', (5246, 5296), True, 'import torch.nn as nn\n'), ((5314, 5336), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ch_out'], {}), '(ch_out)\n', (5328, 5336), True, 'import torch.nn as nn\n'), ((5769, 5812), 'torch.bmm', 'torch.bmm', (['encoder_outputs', 'hidden_expanded'], {}), '(encoder_outputs, hidden_expanded)\n', (5778, 5812), False, 'import torch\n'), ((6398, 6430), 'torch.Tensor', 'torch.Tensor', (['(1)', '(1)', 'out_features'], {}), '(1, 1, out_features)\n', (6410, 6430), False, 'import torch\n'), ((676, 703), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (688, 703), True, 'import torch.nn as nn\n'), ((2632, 2662), 'numpy.eye', 'np.eye', (['vocab_size', 'vocab_size'], {}), '(vocab_size, vocab_size)\n', (2638, 2662), True, 'import numpy as np\n'), ((786, 818), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 1)'}), '(kernel_size=(2, 1))\n', (798, 818), True, 'import torch.nn as nn\n'), ((895, 927), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(4, 1)'}), '(kernel_size=(4, 1))\n', (907, 927), True, 'import torch.nn as nn\n')] |
"""Get an input to cmdstanpy.CmdStanModel.sample from a pd.DataFrame."""
import itertools
import logging
import re
import time
from dataclasses import dataclass
from typing import Dict, List
import numpy as np
import pandas as pd
from scipy.linalg import null_space
from .util import codify, rref, get_free_fluxes
logger = logging.getLogger(__name__)
REL_TOL = 1e-12
FUNCTION_TOL = 1e-12
MAX_NUM_STEPS = int(1e9)
# TAKEN FROM PTA (I think they fitted a lognormal to all met concentrations in all conditions)
DEFAULT_MET_CONC_MEAN = -8.3371
DEFAULT_MET_CONC_SCALE = 1.9885
# This still needs to be determined
DEFAULT_ENZ_CONC_MEAN = -8.3371
DEFAULT_ENZ_CONC_SCALE = 1.9885
DEFAULT_EXCHANGE_MEAN = 0 # mol/gDW/h. (Was more than 0 in the data but that wouldn't make sense here)
DEFAULT_EXCHANGE_SCALE = 0.00449 # From the (limited) exchange data in Gerosa et al. Room for improvement.
DEFAULT_B_MEAN = 3
DEFAULT_B_SCALE = 3
FIXED_MIC_EPSILON = 1e-5 # The standard deviation of values that considered to have no variance
FIXED_DGF_EPSILON = 1e-2 # The variance for dgf values that are considered to have no variance. High because of numerical issues.
@dataclass
class IndPrior1d:
parameter_name: str
location: pd.Series
scale: pd.Series
@dataclass
class IndPrior2d:
parameter_name: str
location: pd.DataFrame
scale: pd.DataFrame
def to_dataframe(self, measurement_type):
location = self.location.unstack()
scale = self.scale.unstack()
df = pd.concat([location, scale], axis=1).reset_index()
df.columns = ["parameter", "condition_id", "loc", "scale"]
df["measurement_type"] = measurement_type
return df
def extract_prior_1d(
parameter: str,
priors: pd.DataFrame,
coords: List[str],
default_loc: float,
default_scale: float
) -> IndPrior1d:
param_priors = priors.groupby("parameter").get_group(parameter).set_index("target_id")
loc, scale = (
param_priors[col].reindex(coords).fillna(default)
for col, default in [("loc", default_loc), ("scale", default_scale)]
)
return IndPrior1d(parameter, loc, scale)
def extract_prior_2d(
parameter: str,
priors: pd.DataFrame,
target_coords: List[str],
condition_coords: List[str],
default_loc: float,
default_scale: float
) -> IndPrior2d:
if parameter not in priors["parameter"].unique():
loc, scale = (
pd.DataFrame(default, index=condition_coords, columns=target_coords)
for default in [default_loc, default_scale]
)
return IndPrior2d(parameter, loc, scale)
else:
param_priors = priors.groupby("parameter").get_group(parameter)
loc, scale = (
param_priors
.set_index(["condition_id", "target_id"])
[col]
.unstack()
.reindex(condition_coords)
.reindex(target_coords, axis=1)
.fillna(default)
for col, default in [("loc", default_loc), ("scale", default_scale)]
)
return IndPrior2d(parameter, loc, scale)
def get_exchange_rxns(S):
# List of prefixes for non-internal reactions. transport, Exchange, Sink, Demand, Excluded
EXCHANGE_NAMES = [re.escape(name) for name in ["transport", "exchange", "EX_", "SK_", "DM_", "EXCL_"]]
# Search for any of the names using regex
return S.columns.str.contains("|".join(EXCHANGE_NAMES))
def get_coords(S: pd.DataFrame, measurements: pd.DataFrame, priors: pd.DataFrame, order=None):
if not type(priors) == pd.DataFrame:
raise RuntimeError("priors must be a dataframe")
# Get a list of all conditions
measurement_conditions = measurements["condition_id"]
prior_conditions = priors["condition_id"][~priors["condition_id"].isna()]
conditions = measurement_conditions.append(prior_conditions).unique()
return get_coords_condition_list(S, conditions, order)
def get_coords_condition_list(S: pd.DataFrame, conditions: [str], order=None):
# Make sure they are protected for the regular expression
is_exchange = get_exchange_rxns(S)
base_ordering = S.columns[is_exchange].to_series().append(S.index.to_series())
exchanges = S.columns[is_exchange]
internals = S.columns[~is_exchange]
num_ex = is_exchange.sum()
free_x_ind, free_rows_ind = get_free_x_and_rows(S, order)
# Get the fixed and free x values
x_names = base_ordering
free_x_names = x_names[free_x_ind]
fixed_x_names = x_names[~free_x_ind]
# A list of indices for each free x
free_x = np.arange(1, len(free_x_ind) + 1)[free_x_ind]
fixed_x = np.arange(1, len(free_x_ind) + 1)[~free_x_ind]
# This is a vector with exchange reactions first followed by the metabolites
exchange_free_ind = free_x_ind[:num_ex]
exchange_free = exchanges[exchange_free_ind]
exchange_fixed = exchanges[~exchange_free_ind]
conc_free_ind = free_x_ind[num_ex:]
conc_free = S.index[conc_free_ind]
conc_fixed = S.index[~conc_free_ind]
reaction_ind_map = codify(S.columns)
met_ind_map = codify(S.index)
return {
# Maps to stan indices
"reaction_ind": reaction_ind_map,
"metabolite_ind": met_ind_map,
"reaction": list(S.columns),
"metabolite": list(S.index),
"x_names": list(x_names),
"free_x_names": list(free_x_names),
"fixed_x_names": list(fixed_x_names),
"internal_names": list(internals),
"exchange": list(exchanges),
"free_exchange": list(exchange_free),
"fixed_exchange": list(exchange_fixed),
"free_met_conc": list(conc_free),
"fixed_met_conc": list(conc_fixed),
"condition": list(conditions),
"free_x": list(free_x),
"fixed_x": list(fixed_x),
"free_rows": list(S.index[free_rows_ind]),
"fixed_rows": list(S.index[~free_rows_ind])
}
def get_free_x_and_rows(S, order):
"""
Get the free x values of the corresponding S_c matrix.
:param S: The stoichiometric matrix
:param order: A list of x values in the desired order from free to fixed. If the list is incomplete then the rest of the
x values will be filled.
:return:
"""
# Determine the exchange reactions
is_exchange = get_exchange_rxns(S)
exchange_names = S.columns[is_exchange]
base_ordering = S.columns[is_exchange].to_series().append(S.index.to_series())
met_names = base_ordering[~base_ordering.isin(exchange_names)]
# Convert an incomplete ordering into a full ordering
if order is None:
order = base_ordering
else:
# Push the ordered columns to the front and fill in the rest
order = pd.Series(order, index=order)
order = order.append(base_ordering.drop(order))
# Now reverse the order to put the "freest" variables on the right
order = order.iloc[::-1]
num_ex = is_exchange.sum()
# Calculate the final matrix and the free variables
s_x = pd.DataFrame(0, columns=base_ordering, index=S.columns)
s_x.loc[exchange_names, exchange_names] = np.identity(num_ex)
s_x.loc[~is_exchange, met_names] = S.T[~is_exchange].to_numpy()
s_total = S @ s_x
# Reorder the columns according the the given ordering
s_total = s_total.loc[:, order]
free_x_ind, _ = get_free_fluxes(s_total.to_numpy())
if not any(free_x_ind):
raise RuntimeError("No free fluxes detected")
fixed_rows_ind, _ = get_free_fluxes(s_total.T.to_numpy())
# Revert back to original ordering
free_x_ind = free_x_ind[order.index.get_indexer(base_ordering)]
return pd.Series(free_x_ind, index=base_ordering), pd.Series(~fixed_rows_ind, s_total.index)
def check_input(measurements, priors):
if len(measurements) == 0:
raise ValueError("At least one measurement is required")
measurements_by_type = dict(measurements.groupby("measurement_type").__iter__())
# Check that enzyme and metabolite measurements are in log scale
if "enzyme" in measurements_by_type and measurements_by_type["enzyme"]["measurement"].between(0, 1).all():
raise ValueError("Enazyme log concentration measurements should be in the range of ~-13 to -5."
"Are they maybe recorded as regular concentrations?")
if "mic" in measurements_by_type and measurements_by_type["mic"]["measurement"].between(0, 1).all():
raise ValueError("Metabolite log concentration measurements should be in the range of ~-13 to -5."
"Are they maybe recorded as regular concentrations?")
priors_by_type = dict(priors.groupby("parameter").__iter__())
# Check that the lognormal priors are in the correct range
if ("enzyme" in priors_by_type and not priors_by_type["enzyme"]["loc"].between(-20, 0).all()) \
or ("concentration" in priors_by_type and not priors_by_type["concentration"]["loc"].between(-20, 0).all()):
raise ValueError("Reasonable lognormal concentration priors should be between -20 and 0. "
"Maybe you made a mistake in the formulation?")
def get_stan_input(
measurements: pd.DataFrame,
S: pd.DataFrame,
priors: pd.DataFrame,
priors_cov: pd.DataFrame,
likelihood: bool,
order=None) -> Dict:
"""Get an input to cmdstanpy.CmdStanModel.sample.
:param measurements: a pandas DataFrame whose rows represent measurements
:param model_config: a dictionary with keys "priors", "likelihood" and
"x_cols".
"""
check_input(measurements, priors)
coords = get_coords(S, measurements, priors, order)
measurements_by_type = group_measurement_types(likelihood, measurements)
# Add a small constant for concentration values that should be fixed
measurements_by_type["mic"].loc[measurements_by_type["mic"]["error_scale"] == 0, "error_scale"] = FIXED_MIC_EPSILON
# Add a small constant for dgf values with no variance
zero_cols = ~priors_cov.any()
zero_rows = ~priors_cov.any(axis=1)
assert (zero_cols == zero_rows).all(), "The covariance matrix should be symmetric"
priors_cov.loc[zero_cols, zero_cols] = np.diag(np.full(zero_cols.sum(), FIXED_DGF_EPSILON))
assert np.linalg.matrix_rank(priors_cov) == len(priors_cov), "The covariance matrix should be full rank"
# Transform into measurements for the model
free_exchange = get_name_ordered_overlap(coords, "reaction_ind", ["exchange", "free_x_names"])
free_met_conc = get_name_ordered_overlap(coords, "metabolite_ind", ["metabolite", "free_x_names"])
prior_b = extract_prior_2d("b", priors, coords["internal_names"], coords["condition"], DEFAULT_B_MEAN,
DEFAULT_B_SCALE)
prior_enzyme = extract_prior_2d("internal_names", priors, coords["internal_names"], coords["condition"],
DEFAULT_ENZ_CONC_MEAN, DEFAULT_ENZ_CONC_SCALE)
prior_met_conc_free = extract_prior_2d("metabolite", priors, free_met_conc, coords["condition"],
DEFAULT_MET_CONC_MEAN, DEFAULT_MET_CONC_SCALE)
prior_exchange_free = extract_prior_2d("exchange", priors, free_exchange, coords["condition"],
DEFAULT_EXCHANGE_MEAN, DEFAULT_EXCHANGE_SCALE)
# Add the fixed priors to the measurements
fixed_exchange_prior_df, fixed_met_prior_df = fixed_prior_to_measurements(coords, priors)
measurements_by_type["mic"] = measurements_by_type["mic"].append(fixed_met_prior_df)
measurements_by_type["flux"] = measurements_by_type["flux"].append(fixed_exchange_prior_df)
# We're going to assume full prior information on dgf
prior_dgf_mean = priors[priors["parameter"] == "dgf"]["loc"]
if len(prior_dgf_mean) != S.shape[0]:
raise ValueError("All dgf means must be provided in the priors file")
return {
# Sizes
"N_metabolite": S.shape[0],
"N_reaction": S.shape[1],
"N_exchange": len(coords["exchange"]),
"N_internal": len(coords["internal_names"]),
"N_fixed_exchange": len(coords["fixed_exchange"]),
"N_free_exchange": len(coords["free_exchange"]),
"N_fixed_met_conc": len(coords["fixed_met_conc"]),
"N_free_met_conc": len(coords["free_met_conc"]),
"N_free_x": len(coords["free_x"]),
"N_fixed_x": len(coords["fixed_x"]),
"N_free_rows": len(coords["free_rows"]),
"N_fixed_rows": len(coords["fixed_rows"]),
"N_x": len(coords["fixed_x"] + coords["free_x"]),
# Network
"S": S.values.tolist(),
# Indexing
"ix_free_met_to_free": make_index_map(coords, "free_x_names", ["free_met_conc"]),
"ix_free_ex_to_free": make_index_map(coords, "free_x_names", ["free_exchange"]),
"ix_fixed_met_to_fixed": make_index_map(coords, "fixed_x_names", ["fixed_met_conc"]),
"ix_fixed_ex_to_fixed": make_index_map(coords, "fixed_x_names", ["fixed_exchange"]),
"ix_free_to_x": make_index_map(coords, "x_names", ["free_x_names"]),
"ix_fixed_to_x": make_index_map(coords, "x_names", ["fixed_x_names"]),
"ix_ex_to_x": make_index_map(coords, "x_names", ["exchange"]),
"ix_met_to_x": make_index_map(coords, "x_names", ["metabolite"]),
"ix_internal_to_rxn": make_index_map(coords, "reaction", ["internal_names"]),
"ix_ex_to_rxn": make_index_map(coords, "reaction", ["exchange"]),
"ix_free_met_to_met": make_index_map(coords, "metabolite", ["free_met_conc"]),
"ix_fixed_met_to_met": make_index_map(coords, "metabolite", ["fixed_met_conc"]),
"ix_free_ex_to_ex": make_index_map(coords, "exchange", ["exchange", "free_x_names"]),
"ix_fixed_ex_to_ex": make_index_map(coords, "exchange", ["exchange", "fixed_x_names"]),
"ix_free_row_to_met": make_index_map(coords, "metabolite", ["free_rows"]),
"ix_fixed_row_to_met": make_index_map(coords, "metabolite", ["fixed_rows"]),
# measurements
"N_condition": len(coords["condition"]),
"N_y_enzyme": len(measurements_by_type["enzyme"]),
"N_y_metabolite": len(measurements_by_type["mic"]),
"N_y_flux": len(measurements_by_type["flux"]),
"y_flux": measurements_by_type["flux"]["measurement"].values.tolist(),
"sigma_flux": measurements_by_type["flux"]["error_scale"].values.tolist(),
"reaction_y_flux": measurements_by_type["flux"]["target_id"].map(codify(coords["reaction"])).values.tolist(),
"condition_y_flux": measurements_by_type["flux"]["condition_id"].map(
codify(coords["condition"])).values.tolist(),
# Concentrations given on a log scale
"y_enzyme": measurements_by_type["enzyme"]["measurement"].values.tolist(),
"sigma_enzyme": measurements_by_type["enzyme"]["error_scale"].values.tolist(),
"internal_y_enzyme": measurements_by_type["enzyme"]["target_id"].map(
codify(coords["internal_names"])).values.tolist(),
"condition_y_enzyme": measurements_by_type["enzyme"]["condition_id"].map(
codify(coords["condition"])).values.tolist(),
# Concentrations given on a log scale
"y_metabolite": measurements_by_type["mic"]["measurement"].values.tolist(),
"sigma_metabolite": measurements_by_type["mic"]["error_scale"].values.tolist(),
"metabolite_y_metabolite": measurements_by_type["mic"]["target_id"].map(
codify(coords["metabolite"])).values.tolist(),
"condition_y_metabolite": measurements_by_type["mic"]["condition_id"].map(
codify(coords["condition"])).values.tolist(),
# priors
"prior_dgf_mean": prior_dgf_mean.values.tolist(),
"prior_dgf_cov": priors_cov.values.tolist(),
"prior_exchange_free": [prior_exchange_free.location.values.tolist(),
prior_exchange_free.scale.values.tolist()],
"prior_enzyme": [prior_enzyme.location.values.tolist(), prior_enzyme.scale.values.tolist()],
"prior_b": [prior_b.location.values.tolist(), prior_b.scale.values.tolist()],
"prior_free_met_conc": [prior_met_conc_free.location.values.tolist(),
prior_met_conc_free.scale.values.tolist()],
}
def group_measurement_types(likelihood, measurements):
# If likelihood is off, then remove the measurements
if not likelihood:
measurements = pd.DataFrame(columns=measurements.columns)
# Make a dictionary based on the observation type
measurements_by_type = dict(measurements.groupby("measurement_type").__iter__())
for t in ["mic", "flux", "enzyme"]:
if t not in measurements_by_type:
# Only warn if likelihood is on
if likelihood:
logger.warning(f"No {t} measurements provided.")
measurements_by_type[t] = pd.DataFrame(columns=measurements.columns)
return measurements_by_type
def fixed_prior_to_measurements(coords, priors):
"""
Convert the fixed exchange and met conc priors to measurements.
"""
fixed_exchange = get_name_ordered_overlap(coords, "reaction_ind", ["exchange", "fixed_x_names"])
fixed_met_conc = get_name_ordered_overlap(coords, "metabolite_ind", ["metabolite", "fixed_x_names"])
prior_met_conc_fixed = extract_prior_2d("metabolite", priors, fixed_met_conc, coords["condition"],
DEFAULT_MET_CONC_MEAN, DEFAULT_MET_CONC_SCALE)
prior_exchange_fixed = extract_prior_2d("exchange", priors, fixed_exchange, coords["condition"],
DEFAULT_EXCHANGE_MEAN, DEFAULT_EXCHANGE_SCALE)
# Expand the IndPrior2d to the pandas dataframe format
fixed_met_prior_df = prior_met_conc_fixed.to_dataframe("mic").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
fixed_exchange_prior_df = prior_exchange_fixed.to_dataframe("flux").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
return fixed_exchange_prior_df, fixed_met_prior_df
def get_name_ordered_overlap(coords: {}, order_by: str, name_list: [str]) -> [int]:
"""
Take a list of names of vectors and return an ordered list of indices
:param coords: the coords dict
:param order_by: the indexs to order by, either metabolites or reactions
:param name_list: A list of named vectors to include
:return:
"""
assert order_by in ["reaction_ind", "metabolite_ind"]
return get_ordered_overlap(coords[order_by], [coords[name] for name in name_list])
def get_ordered_overlap(order_by: {}, inds_lists: [list]) -> []:
"""
:param order_by: A dict that defines the order of the indices
:param inds: A list of lists of indices
:return:
"""
# Get the unique indices of the overlap
unique_inds = set(inds_lists[0]).intersection(*inds_lists)
sorted_inds = sorted(unique_inds, key=order_by.get)
assert not any([ind is None for ind in sorted_inds]), "All indices should be in the sorting dict"
return sorted_inds
def make_index_map(coords: dict, to_name: str, from_names: [str]):
"""
Make a map from a list of lists of names defining overlapping conditions
:param coords: The coords dict
:param to_name: The name of the vector that should be mapped to
:param from_names: A list of names whose union should be mapped from
:return: A list of 1-based stan indices
"""
codified = codify(coords[to_name]) # Indices in stan 1-based indexing
ordered_overlap = get_ordered_overlap(codified, [coords[name] for name in from_names])
# Needs to loop through the entire first vector to maintain ordering
ind_map = [codified.get(ind, 0) for ind in ordered_overlap]
return ind_map
| [
"pandas.DataFrame",
"numpy.identity",
"re.escape",
"numpy.linalg.matrix_rank",
"pandas.Series",
"pandas.concat",
"logging.getLogger"
] | [((325, 352), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (342, 352), False, 'import logging\n'), ((7037, 7092), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'columns': 'base_ordering', 'index': 'S.columns'}), '(0, columns=base_ordering, index=S.columns)\n', (7049, 7092), True, 'import pandas as pd\n'), ((7139, 7158), 'numpy.identity', 'np.identity', (['num_ex'], {}), '(num_ex)\n', (7150, 7158), True, 'import numpy as np\n'), ((3300, 3315), 're.escape', 're.escape', (['name'], {}), '(name)\n', (3309, 3315), False, 'import re\n'), ((6746, 6775), 'pandas.Series', 'pd.Series', (['order'], {'index': 'order'}), '(order, index=order)\n', (6755, 6775), True, 'import pandas as pd\n'), ((7662, 7704), 'pandas.Series', 'pd.Series', (['free_x_ind'], {'index': 'base_ordering'}), '(free_x_ind, index=base_ordering)\n', (7671, 7704), True, 'import pandas as pd\n'), ((7706, 7747), 'pandas.Series', 'pd.Series', (['(~fixed_rows_ind)', 's_total.index'], {}), '(~fixed_rows_ind, s_total.index)\n', (7715, 7747), True, 'import pandas as pd\n'), ((10271, 10304), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['priors_cov'], {}), '(priors_cov)\n', (10292, 10304), True, 'import numpy as np\n'), ((16469, 16511), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'measurements.columns'}), '(columns=measurements.columns)\n', (16481, 16511), True, 'import pandas as pd\n'), ((2474, 2542), 'pandas.DataFrame', 'pd.DataFrame', (['default'], {'index': 'condition_coords', 'columns': 'target_coords'}), '(default, index=condition_coords, columns=target_coords)\n', (2486, 2542), True, 'import pandas as pd\n'), ((16907, 16949), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'measurements.columns'}), '(columns=measurements.columns)\n', (16919, 16949), True, 'import pandas as pd\n'), ((1501, 1537), 'pandas.concat', 'pd.concat', (['[location, scale]'], {'axis': '(1)'}), '([location, scale], axis=1)\n', (1510, 1537), True, 'import pandas as pd\n')] |
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from scipy import signal
from . import exp_decay, exp_decay_sin, get_calibration_dict, get_title, \
save_fig, smooth_data_butter, smooth_data_SG, plot_cf_data, \
get_sample_name, g_from_qubit, set_calibration_val, get_calibration_val
# TODO: write fit functions: qubit_from_ssb_measure,
# qubit_from_ssb_power_sweep,
# qubit_from_ssb_volt_sweep
# TODO: write fit for resonance and use this to find resonance
# not argmin
###########################
# VNA
###########################
def find_peaks(dataset, fs, x_key="set", y_key="mag", cutoff=5e-6, order=2,
subplot=None, widths=np.linspace(50, 150)):
"""
Function which given a 1d array smoothes the data, finds resonances
and plots results
Args:
dataset (qcodes DataSet)
fs (float): frequency of sampling, passed to smoothed_data_butter
x_key (str): string to look for data array on x axis
default "set"
y_key (str): string to look for data array on y axis
default "mag"
cutoff (float): used for smoothing passed to smooth_data_butter
default 5e-6
order (int): used for smoothing passed to smooth_data_butter, default 5
subplot (matplotlib AxesSubplot): subplot which this data should be
plotted on, default None will create new one
widths (array): array peak widths to search for, passed to
signal.find_peaks_cwt, default np.linspace(50, 150)
Returns:
peakind (array): indices of resonances found
frequencies (array): frequencies of resonances found
subplot (matplotlib AxesSubplot): plot of results
"""
try:
setpoints = next(getattr(dataset, key)
for key in dataset.arrays.keys() if x_key in key)
unsmoothed_data = next(getattr(dataset, key)
for key in dataset.arrays.keys() if y_key in key)
except Exception:
raise Exception('could not get {} and {} arrays from dataset, check dataset '
'has these keys array names'.format(x_key, y_key))
# smooth data
smoothed_data = smooth_data_butter(
unsmoothed_data, fs, cutoff=cutoff, order=order)
# find peak indices
peakind = signal.find_peaks_cwt(np.multiply(smoothed_data, -1), widths)
try:
num = dataset.data_num
except AttributeError:
num = dataset.location_provider.counter
print('warning: check title, could be wrong datanum')
# plot: unsmoothed data, smoothed data and add peak estimate values
fig, subplot = plot_cf_data([unsmoothed_data, smoothed_data],
xdata=setpoints,
data_num=num,
axes_labels=['frequency(Hz)', 'S21'])
subplot.plot(setpoints[peakind], smoothed_data[peakind], 'gs')
txt = '{} resonances found at {}'.format(len(peakind), setpoints[peakind])
fig.suptitle('{}_{}_find_peaks'.format(num, get_sample_name()),
fontsize=12)
print(txt)
return peakind, setpoints[peakind], subplot
def get_resonator_push(dataset, x_key="freq", y_key="pow", z_key="mag"):
"""
Function which gets the change in resonance frequency from a power
sweep dataset.
Args:
dataset (qcodes DataSet)
x_key (str): string to look for data array on x axis
default "set"
y_key (str): string to look for data array on y axis
default "pow"
z_key (str): string to look for data arrays on z axis
default "mag"
Returns:
low_res (float): calculated resonance freq at low power
high_res (float): calculated resonance freq at low power
dif (float): value of push in Hz
axarr (numpy.ndarray): subplot array
"""
# get data for high and low power from dataset
try:
freq_array = next(getattr(dataset, key)
for key in dataset.arrays.keys() if x_key in key)[0]
pow_array = next(getattr(dataset, key)
for key in dataset.arrays.keys() if y_key in key)
mag_arrays = next(getattr(dataset, key)
for key in dataset.arrays.keys() if z_key in key)
except Exception:
raise Exception('could not get {}, {} and {} arrays from dataset, check dataset '
'has these keys array names'.format(x_key, y_key, z_key))
mag_high = mag_arrays[0]
mag_low = mag_arrays[-1]
smoothed_mag_low = smooth_data_SG(mag_low, 15, 6)
smoothed_mag_high = smooth_data_SG(mag_high, 15, 6)
# get freqeuncy of resonances for low and high power and power values
low_res = freq_array[smoothed_mag_low.argmin()]
high_res = freq_array[smoothed_mag_high.argmin()]
low_pow = pow_array[-1]
high_pow = pow_array[0]
dif = low_res - high_res
# for all pow sweeps smooth and get resonance
res_data = np.zeros(len(mag_arrays))
for i, sweep in enumerate(mag_arrays):
smoothed_data = smooth_data_SG(sweep, 15, 6)
res_data[i] = freq_array[smoothed_data.argmin()]
# plot
fig, axarr = plt.subplots(2)
# subplot 1: high and low power cuts, smoothed and unsmoothed
plot_cf_data([smoothed_mag_high, mag_high, smoothed_mag_low, mag_low],
subplot=axarr[0], xdata=freq_array,
legend_labels=['pow={}'.format(high_pow),
'pow={},smoothed'.format(high_pow),
'pow={}'.format(low_pow),
'pow={}, smoothed'.format(low_pow)],
axes_labels=['frequency (Hz)', 'S21'])
# subplot 2: resonance for all power sweep with max and min lines plotted
axarr[1].plot(pow_array, res_data, 'k-')
axarr[1].plot([high_pow, low_pow], [high_res, high_res], 'r', lw=2,
label='high power res = {}'.format(high_res))
axarr[1].plot([high_pow, low_pow], [low_res, low_res], 'b', lw=2,
label='low power res = {}'.format(low_res))
axarr[1].set_xlabel('power (dBm)')
axarr[1].set_ylabel('frequency (Hz)')
axarr[1].legend(loc='upper right', fontsize=10)
plt.tight_layout()
try:
fig.data_num = dataset.data_num
fig.suptitle('dataset {}'.format(fig.data_num), fontsize=12)
fig.text(0, 0, 'bare res: {}, pushed res: {}, push: {}'.format(
high_res, low_res, dif))
except AttributeError as e:
fig.data_num = dataset.location_provider.counter
print('dataset has no data_num set: {}'.format(e))
return low_res, high_res, fig
###########################
# Alazar
###########################
def find_extreme(data, x_key="freq", y_key="mag", extr="min"):
"""
Function which finds the min or max along the y axis and returns the
x and y values at this point
Args:
data (qcodes dataset)
x_key (string) (default 'freq'): string to search data arrays keys
for to find x data
y_key (string) (default 'mag'): string to search data arrays keys
for to find y data
extr ('min' or 'max') (default 'min'): whether to find max or min
along this axis
Returns:
extr_x, y
"""
try:
x_key_array_name = [v for v in data.arrays.keys() if x_key in v][0]
except IndexError:
raise KeyError('keys: {} not in data array '
'names: {}'.format(x_key,
list(data.arrays.keys())))
try:
y_key_array_name = [v for v in data.arrays.keys() if y_key in v][0]
except IndexError:
raise KeyError('keys: {} not in data array '
'names: {}'.format(y_key,
list(data.arrays.keys())))
x_data = getattr(data, x_key_array_name)
y_data = getattr(data, y_key_array_name)
if extr is "min":
index = np.argmin(y_data)
extr_y = np.amin(y_data)
elif extr is "max":
index = np.argmax(y_data)
extr_y = np.amax(y_data)
else:
raise ValueError('extr must be set to "min" or "max", given'
' {}'.format(extr))
extr_x = x_data[index]
return extr_x, extr_y
def recalculate_g(calib_update=False):
"""
Function which uses the values in the calibration dictionary for expected
qubit position, actual position, resonator push and g value to recalculate
the g value for the current qubit and compare it to the old value.
value
Args:
calib_update: whether to update the calibration dictionary value of the
current qubit for g_value
"""
qubit_freq = get_calibration_val('qubit_freq')
expected_qubit_freq = get_calibration_val('expected_qubit_freq')
old_g = get_calibration_val('g_value')
bare_res = get_calibration_val('bare_res_freq')
old_pushed_res = get_calibration_val('pushed_res_freq')
new_pushed_res = get_calibration_val('cavity_freq')
old_push = old_pushed_res - bare_res
new_push = new_pushed_res - bare_res
new_g = g_from_qubit(qubit_freq, bare_res, new_pushed_res)
if calib_update:
set_calibration_val('g_value', new_g)
print('expected qubit freq: {}\n (from g of {}, push on resonator {})\n'
'actual qubit freq: {}\n (gives g of {}, push on resonator {})'
''.format(
expected_qubit_freq, old_g, old_push,
qubit_freq, new_g, new_push))
return new_g
def qubit_from_ssb_measure(dataset, gradient_sign=1, min_res_width=4e6):
raise NotImplementedError
def qubit_from_ssb_power_sweep(dataset, gradient_sign=1, min_res_width=4e6):
raise NotImplementedError
def qubit_from_ssb_volt_sweep(dataset, gradient_sign=1, min_res_width=4e6,
high_voltage=True):
raise NotImplementedError
def get_t2(data, x_name='delay', y_name='magnitude',
plot=True, subplot=None,
initial_fit_params=[0.003, 1e-7, 10e7, 0, 0.01]):
"""
Function which fits results of a data set to a sine wave modulated
by an exponential decay and returns the fit parameters and the standard
deviation errors on them.
Args:
data (qcodes dataset): 1d sweep to be fit to
x_name (str) (default 'delay'): x axis key used to search data.arrays
for corresponding data
y_name (str) (default 'magnitude'): y axis key
plot (default True)
subplot (default None): subplot to plot in otherwise makes new figure
expected_vals (default [0.003, 1e-7, 10e7, 0, 0.01]): initial values
for fit function
"""
x_data = getattr(getattr(data, x_name), 'ndarray')
y_data = getattr(getattr(data, y_name), 'ndarray')
x_units = getattr(getattr(data, x_name), 'unit')
y_units = getattr(getattr(data, y_name), 'unit')
popt, pcov = curve_fit(exp_decay_sin, x_data, y_data,
p0=initial_fit_params)
errors = np.sqrt(np.diag(pcov))
print('fit to equation of form y = a * exp(-x / b) * sin(c * x + d) + e'
'gives:\na {}, b {}, c {}, d {}, e{}\n'
'with one standard deviation errors:\n'
'a {}, b {}, c {}, d {}, e{}'.format(popt[0], popt[1], popt[2],
popt[3], popt[4], errors[0],
errors[1], errors[2],
errors[3], errors[4]))
if plot:
if subplot is None:
fig, ax = plt.subplots()
else:
ax = subplot
fig = ax.figure
num = data.data_num
try:
qubit = get_calibration_dict()['current_qubit']
title = '{}_{}_T2'.format(get_title(num), qubit)
name = '{}_{}_T2'.format(num, qubit)
except Exception:
title = '{}_T2'.format(get_title(num))
name = '{}_T2'.format(num)
if not hasattr(fig, "data_num"):
fig.data_num = num
ax.plot(x_data,
exp_decay_sin(x_data, *popt),
label='fit: T2 {}{}'.format(popt[1],
x_units))
ax.plot(x_data, y_data, label='data')
ax.set_xlabel('{} ({})'.format(x_name, x_units))
ax.set_ylabel('{} ({})'.format(y_name, y_units))
ax.set_title(title)
ax.legend(loc='upper right', fontsize=10)
save_fig(ax, name=name)
return ax, popt, errors
else:
return popt, errors
def get_t1(data, x_name='delay', y_name='magnitude',
plot=True, subplot=None, initial_fit_params=[0.05, 1e-6, 0.01]):
"""
Function which fits results of a data set to an exponential decay and
returns the fit parameters and the standard deviation errors on them.
Args:
data (qcodes dataset): 1d sweep to be fit to
x_name (str) (default 'delay'): x axis key used to search data.arrays
for corresponding data
y_name (str) (default 'magnitude'): y axis key
plot (default True)
subplot (default None): subplot to plot in otherwise makes new figure
expected_vals (default 0.05, 1e-6, 0.01]): initial values
for fit function
"""
x_data = getattr(getattr(data, x_name), 'ndarray')
y_data = getattr(getattr(data, y_name), 'ndarray')
x_units = getattr(getattr(data, x_name), 'unit')
y_units = getattr(getattr(data, y_name), 'unit')
popt, pcov = curve_fit(exp_decay, x_data, y_data, p0=initial_fit_params)
errors = np.sqrt(np.diag(pcov))
print('fit to equation of form y = a * exp(-x / b) + c gives:\n'
'a {}, b {}, c {}\n'
'with one standard deviation errors:\n'
'a {}, b {}, c {}'.format(popt[0], popt[1], popt[2],
errors[0], errors[1], errors[2]))
if plot:
if subplot is None:
fig, ax = plt.subplots()
else:
ax = subplot
fig = ax.figure
num = data.data_num
try:
qubit = get_calibration_dict()['current_qubit']
title = '{}_{}_T1'.format(get_title(num), qubit)
name = '{}_{}_T1'.format(num, qubit)
except Exception:
title = '{}_T1'.format(get_title(num))
name = '{}_T1'.format(num)
if not hasattr(fig, "data_num"):
fig.data_num = num
ax.plot(x_data,
exp_decay(x_data, *popt),
label='fit: T1 {}{}'.format(popt[1],
x_units))
ax.plot(x_data, y_data, label='data')
ax.set_xlabel('{} ({})'.format(x_name, x_units))
ax.set_ylabel('{} ({})'.format(y_name, y_units))
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
ax.set_title(title)
ax.legend(loc='upper right', fontsize=10)
save_fig(ax, name=name)
return ax, popt, errors
else:
return popt, errors
| [
"numpy.multiply",
"numpy.amin",
"numpy.argmax",
"matplotlib.pyplot.subplots",
"scipy.optimize.curve_fit",
"numpy.argmin",
"numpy.amax",
"matplotlib.ticker.FormatStrFormatter",
"numpy.linspace",
"numpy.diag",
"matplotlib.pyplot.tight_layout"
] | [((786, 806), 'numpy.linspace', 'np.linspace', (['(50)', '(150)'], {}), '(50, 150)\n', (797, 806), True, 'import numpy as np\n'), ((5418, 5433), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (5430, 5433), True, 'import matplotlib.pyplot as plt\n'), ((6469, 6487), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6485, 6487), True, 'import matplotlib.pyplot as plt\n'), ((11188, 11251), 'scipy.optimize.curve_fit', 'curve_fit', (['exp_decay_sin', 'x_data', 'y_data'], {'p0': 'initial_fit_params'}), '(exp_decay_sin, x_data, y_data, p0=initial_fit_params)\n', (11197, 11251), False, 'from scipy.optimize import curve_fit\n'), ((13804, 13863), 'scipy.optimize.curve_fit', 'curve_fit', (['exp_decay', 'x_data', 'y_data'], {'p0': 'initial_fit_params'}), '(exp_decay, x_data, y_data, p0=initial_fit_params)\n', (13813, 13863), False, 'from scipy.optimize import curve_fit\n'), ((2504, 2534), 'numpy.multiply', 'np.multiply', (['smoothed_data', '(-1)'], {}), '(smoothed_data, -1)\n', (2515, 2534), True, 'import numpy as np\n'), ((8224, 8241), 'numpy.argmin', 'np.argmin', (['y_data'], {}), '(y_data)\n', (8233, 8241), True, 'import numpy as np\n'), ((8259, 8274), 'numpy.amin', 'np.amin', (['y_data'], {}), '(y_data)\n', (8266, 8274), True, 'import numpy as np\n'), ((11300, 11313), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (11307, 11313), True, 'import numpy as np\n'), ((13885, 13898), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (13892, 13898), True, 'import numpy as np\n'), ((8315, 8332), 'numpy.argmax', 'np.argmax', (['y_data'], {}), '(y_data)\n', (8324, 8332), True, 'import numpy as np\n'), ((8350, 8365), 'numpy.amax', 'np.amax', (['y_data'], {}), '(y_data)\n', (8357, 8365), True, 'import numpy as np\n'), ((11844, 11858), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11856, 11858), True, 'import matplotlib.pyplot as plt\n'), ((14246, 14260), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14258, 14260), True, 'import matplotlib.pyplot as plt\n'), ((15098, 15130), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.1e"""'], {}), "('%.1e')\n", (15122, 15130), True, 'import matplotlib.ticker as mtick\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 11 09:56:39 2017
@author: <NAME>
"""
"""
This script visualizes data using matplotlib
``Execute``
$ python matplotlib_viz.py
"""
import numpy as np
import matplotlib.pyplot as plt
if __name__=='__main__':
# sample plot
x = np.linspace(-10, 10, 50)
y=np.sin(x)
plt.plot(x,y)
plt.title('Sine Curve using matplotlib')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
# figure
plt.figure(1)
plt.plot(x,y)
plt.title('Fig1: Sine Curve')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
plt.figure(2)
y=np.cos(x)
plt.plot(x,y)
plt.title('Fig2: Cosine Curve')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
### subplot
# fig.add_subplot
y = np.sin(x)
figure_obj = plt.figure()
ax1 = figure_obj.add_subplot(2,2,1)
ax1.plot(x,y)
ax2 = figure_obj.add_subplot(2,2,2)
ax3 = figure_obj.add_subplot(2,2,3)
ax4 = figure_obj.add_subplot(2,2,4)
ax4.plot(x+10,y)
plt.show()
# plt.subplots
fig, ax_list = plt.subplots(2,1,sharex=True)
y= np.sin(x)
ax_list[0].plot(x,y)
y= np.cos(x)
ax_list[1].plot(x,y)
plt.show()
# plt.subplot (creates figure and axes objects automatically)
plt.subplot(2,2,1)
y = np.sin(x)
plt.plot(x,y)
plt.subplot(2,2,2)
y = np.cos(x)
plt.plot(x,y)
plt.subplot(2,1,2)
y = np.tan(x)
plt.plot(x,y)
plt.show()
# subplot2grid
y = np.abs(x)
z = x**2
plt.subplot2grid((4,3), (0, 0), rowspan=4, colspan=2)
plt.plot(x, y,'b',x,z,'r')
ax2 = plt.subplot2grid((4,3), (0, 2),rowspan=2)
plt.plot(x, y,'b')
plt.setp(ax2.get_xticklabels(), visible=False)
plt.subplot2grid((4,3), (2, 2), rowspan=2)
plt.plot(x, z,'r')
plt.show()
### formatting
y = x
# color
ax1 = plt.subplot(611)
plt.plot(x,y,color='green')
ax1.set_title('Line Color')
plt.setp(ax1.get_xticklabels(), visible=False)
# linestyle
# linestyles -> '-','--','-.', ':', 'steps'
ax2 = plt.subplot(612,sharex=ax1)
plt.plot(x,y,linestyle='--')
ax2.set_title('Line Style')
plt.setp(ax2.get_xticklabels(), visible=False)
# marker
# markers -> '+', 'o', '*', 's', ',', '.', etc
ax3 = plt.subplot(613,sharex=ax1)
plt.plot(x,y,marker='*')
ax3.set_title('Point Marker')
plt.setp(ax3.get_xticklabels(), visible=False)
# line width
ax4 = plt.subplot(614,sharex=ax1)
line = plt.plot(x,y)
line[0].set_linewidth(3.0)
ax4.set_title('Line Width')
plt.setp(ax4.get_xticklabels(), visible=False)
# alpha
ax5 = plt.subplot(615,sharex=ax1)
alpha = plt.plot(x,y)
alpha[0].set_alpha(0.3)
ax5.set_title('Line Alpha')
plt.setp(ax5.get_xticklabels(), visible=False)
# combine linestyle
ax6 = plt.subplot(616,sharex=ax1)
plt.plot(x,y,'b^')
ax6.set_title('Styling Shorthand')
fig = plt.gcf()
fig.set_figheight(15)
plt.show()
# legends
y = x**2
z = x
plt.plot(x,y,'g',label='y=x^2')
plt.plot(x,z,'b:',label='y=x')
plt.legend(loc="best")
plt.title('Legend Sample')
plt.show()
# legend with latex formatting
plt.plot(x,y,'g',label='$y = x^2$')
plt.plot(x,z,'b:',linewidth=3,label='$y = x^2$')
plt.legend(loc="best",fontsize='x-large')
plt.title('Legend with LaTEX formatting')
plt.show()
## axis controls
# secondary y-axis
fig, ax1 = plt.subplots()
ax1.plot(x,y,'g')
ax1.set_ylabel(r"primary y-axis", color="green")
ax2 = ax1.twinx()
ax2.plot(x,z,'b:',linewidth=3)
ax2.set_ylabel(r"secondary y-axis", color="blue")
plt.title('Secondary Y Axis')
plt.show()
# ticks
y = np.log(x)
z = np.log2(x)
w = np.log10(x)
plt.plot(x,y,'r',x,z,'g',x,w,'b')
plt.title('Default Axis Ticks')
plt.show()
# axis-controls
plt.plot(x,y,'r',x,z,'g',x,w,'b')
# values: tight, scaled, equal,auto
plt.axis('tight')
plt.title('Tight Axis')
plt.show()
# manual
plt.plot(x,y,'r',x,z,'g',x,w,'b')
plt.axis([0,2,-1,2])
plt.title('Manual Axis Range')
plt.show()
# Manual ticks
plt.plot(x, y)
ax = plt.gca()
ax.xaxis.set_ticks(np.arange(-2, 2, 1))
plt.grid(True)
plt.title("Manual ticks on the x-axis")
plt.show()
# minor ticks
plt.plot(x, z)
plt.minorticks_on()
ax = plt.gca()
ax.yaxis.set_ticks(np.arange(0, 5))
ax.yaxis.set_ticklabels(["min", 2, 4, "max"])
plt.title("Minor ticks on the y-axis")
plt.show()
# scaling
plt.plot(x, y)
ax = plt.gca()
# values: log, logit, symlog
ax.set_yscale("log")
plt.grid(True)
plt.title("Log Scaled Axis")
plt.show()
# annotations
y = x**2
min_x = 0
min_y = min_x**2
plt.plot(x, y, "b-", min_x, min_y, "ro")
plt.axis([-10,10,-25,100])
plt.text(0, 60, "Parabola\n$y = x^2$", fontsize=15, ha="center")
plt.text(min_x, min_y+2, "Minima", ha="center")
plt.text(min_x, min_y-6, "(%0.1f, %0.1f)"%(min_x, min_y), ha='center',color='gray')
plt.title("Annotated Plot")
plt.show()
# global formatting params
params = {'legend.fontsize': 'large',
'figure.figsize': (10, 10),
'axes.labelsize': 'large',
'axes.titlesize':'large',
'xtick.labelsize':'large',
'ytick.labelsize':'large'}
plt.rcParams.update(params)
# saving
#plt.savefig("sample_plot.png", transparent=True) | [
"matplotlib.pyplot.title",
"numpy.abs",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.tan",
"matplotlib.pyplot.rcParams.update",
"numpy.linspace",
"numpy.log10",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show... | [((301, 325), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(50)'], {}), '(-10, 10, 50)\n', (312, 325), True, 'import numpy as np\n'), ((332, 341), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (338, 341), True, 'import numpy as np\n'), ((351, 365), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (359, 365), True, 'import matplotlib.pyplot as plt\n'), ((369, 409), 'matplotlib.pyplot.title', 'plt.title', (['"""Sine Curve using matplotlib"""'], {}), "('Sine Curve using matplotlib')\n", (378, 409), True, 'import matplotlib.pyplot as plt\n'), ((414, 434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-axis"""'], {}), "('x-axis')\n", (424, 434), True, 'import matplotlib.pyplot as plt\n'), ((439, 459), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-axis"""'], {}), "('y-axis')\n", (449, 459), True, 'import matplotlib.pyplot as plt\n'), ((464, 474), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (472, 474), True, 'import matplotlib.pyplot as plt\n'), ((502, 515), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (512, 515), True, 'import matplotlib.pyplot as plt\n'), ((520, 534), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (528, 534), True, 'import matplotlib.pyplot as plt\n'), ((538, 567), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig1: Sine Curve"""'], {}), "('Fig1: Sine Curve')\n", (547, 567), True, 'import matplotlib.pyplot as plt\n'), ((572, 592), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-axis"""'], {}), "('x-axis')\n", (582, 592), True, 'import matplotlib.pyplot as plt\n'), ((597, 617), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-axis"""'], {}), "('y-axis')\n", (607, 617), True, 'import matplotlib.pyplot as plt\n'), ((622, 632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (630, 632), True, 'import matplotlib.pyplot as plt\n'), ((642, 655), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (652, 655), True, 'import matplotlib.pyplot as plt\n'), ((662, 671), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (668, 671), True, 'import numpy as np\n'), ((676, 690), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (684, 690), True, 'import matplotlib.pyplot as plt\n'), ((694, 725), 'matplotlib.pyplot.title', 'plt.title', (['"""Fig2: Cosine Curve"""'], {}), "('Fig2: Cosine Curve')\n", (703, 725), True, 'import matplotlib.pyplot as plt\n'), ((730, 750), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-axis"""'], {}), "('x-axis')\n", (740, 750), True, 'import matplotlib.pyplot as plt\n'), ((755, 775), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-axis"""'], {}), "('y-axis')\n", (765, 775), True, 'import matplotlib.pyplot as plt\n'), ((780, 790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (788, 790), True, 'import matplotlib.pyplot as plt\n'), ((847, 856), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (853, 856), True, 'import numpy as np\n'), ((874, 886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (884, 886), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1106), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1104, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1185), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (1166, 1185), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1200), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1197, 1200), True, 'import numpy as np\n'), ((1238, 1247), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1244, 1247), True, 'import numpy as np\n'), ((1277, 1287), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1285, 1287), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1379, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1395, 1404), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1401, 1404), True, 'import numpy as np\n'), ((1413, 1427), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1421, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1452), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1443, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1459, 1468), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1465, 1468), True, 'import numpy as np\n'), ((1473, 1487), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1481, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1492, 1512), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1503, 1512), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1528), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (1525, 1528), True, 'import numpy as np\n'), ((1533, 1547), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1541, 1547), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1566, 1568), True, 'import matplotlib.pyplot as plt\n'), ((1606, 1615), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1612, 1615), True, 'import numpy as np\n'), ((1638, 1692), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 3)', '(0, 0)'], {'rowspan': '(4)', 'colspan': '(2)'}), '((4, 3), (0, 0), rowspan=4, colspan=2)\n', (1654, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1726), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b"""', 'x', 'z', '"""r"""'], {}), "(x, y, 'b', x, z, 'r')\n", (1704, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1738, 1781), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 3)', '(0, 2)'], {'rowspan': '(2)'}), '((4, 3), (0, 2), rowspan=2)\n', (1754, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1803), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b"""'], {}), "(x, y, 'b')\n", (1792, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1902), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 3)', '(2, 2)'], {'rowspan': '(2)'}), '((4, 3), (2, 2), rowspan=2)\n', (1875, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1925), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z', '"""r"""'], {}), "(x, z, 'r')\n", (1914, 1925), True, 'import matplotlib.pyplot as plt\n'), ((1934, 1944), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1942, 1944), True, 'import matplotlib.pyplot as plt\n'), ((2018, 2034), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(611)'], {}), '(611)\n', (2029, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2039, 2068), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""green"""'}), "(x, y, color='green')\n", (2047, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2257), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(612)'], {'sharex': 'ax1'}), '(612, sharex=ax1)\n', (2240, 2257), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2291), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'linestyle': '"""--"""'}), "(x, y, linestyle='--')\n", (2269, 2291), True, 'import matplotlib.pyplot as plt\n'), ((2452, 2480), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(613)'], {'sharex': 'ax1'}), '(613, sharex=ax1)\n', (2463, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2510), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""*"""'}), "(x, y, marker='*')\n", (2492, 2510), True, 'import matplotlib.pyplot as plt\n'), ((2626, 2654), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(614)'], {'sharex': 'ax1'}), '(614, sharex=ax1)\n', (2637, 2654), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2679), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (2673, 2679), True, 'import matplotlib.pyplot as plt\n'), ((2820, 2848), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(615)'], {'sharex': 'ax1'}), '(615, sharex=ax1)\n', (2831, 2848), True, 'import matplotlib.pyplot as plt\n'), ((2860, 2874), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (2868, 2874), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3052), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(616)'], {'sharex': 'ax1'}), '(616, sharex=ax1)\n', (3035, 3052), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3076), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b^"""'], {}), "(x, y, 'b^')\n", (3064, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3138), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3136, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3179), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3177, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3270), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""g"""'], {'label': '"""y=x^2"""'}), "(x, y, 'g', label='y=x^2')\n", (3244, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3305), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z', '"""b:"""'], {'label': '"""y=x"""'}), "(x, z, 'b:', label='y=x')\n", (3280, 3305), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3329), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3317, 3329), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3360), 'matplotlib.pyplot.title', 'plt.title', (['"""Legend Sample"""'], {}), "('Legend Sample')\n", (3343, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3375), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3373, 3375), True, 'import matplotlib.pyplot as plt\n'), ((3420, 3458), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""g"""'], {'label': '"""$y = x^2$"""'}), "(x, y, 'g', label='$y = x^2$')\n", (3428, 3458), True, 'import matplotlib.pyplot as plt\n'), ((3460, 3512), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z', '"""b:"""'], {'linewidth': '(3)', 'label': '"""$y = x^2$"""'}), "(x, z, 'b:', linewidth=3, label='$y = x^2$')\n", (3468, 3512), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3555), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '"""x-large"""'}), "(loc='best', fontsize='x-large')\n", (3523, 3555), True, 'import matplotlib.pyplot as plt\n'), ((3559, 3600), 'matplotlib.pyplot.title', 'plt.title', (['"""Legend with LaTEX formatting"""'], {}), "('Legend with LaTEX formatting')\n", (3568, 3600), True, 'import matplotlib.pyplot as plt\n'), ((3605, 3615), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3613, 3615), True, 'import matplotlib.pyplot as plt\n'), ((3685, 3699), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3697, 3699), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3934), 'matplotlib.pyplot.title', 'plt.title', (['"""Secondary Y Axis"""'], {}), "('Secondary Y Axis')\n", (3914, 3934), True, 'import matplotlib.pyplot as plt\n'), ((3939, 3949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3947, 3949), True, 'import matplotlib.pyplot as plt\n'), ((3979, 3988), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (3985, 3988), True, 'import numpy as np\n'), ((3997, 4007), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (4004, 4007), True, 'import numpy as np\n'), ((4016, 4027), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (4024, 4027), True, 'import numpy as np\n'), ((4037, 4078), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""', 'x', 'z', '"""g"""', 'x', 'w', '"""b"""'], {}), "(x, y, 'r', x, z, 'g', x, w, 'b')\n", (4045, 4078), True, 'import matplotlib.pyplot as plt\n'), ((4075, 4106), 'matplotlib.pyplot.title', 'plt.title', (['"""Default Axis Ticks"""'], {}), "('Default Axis Ticks')\n", (4084, 4106), True, 'import matplotlib.pyplot as plt\n'), ((4112, 4122), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4120, 4122), True, 'import matplotlib.pyplot as plt\n'), ((4159, 4200), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""', 'x', 'z', '"""g"""', 'x', 'w', '"""b"""'], {}), "(x, y, 'r', x, z, 'g', x, w, 'b')\n", (4167, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4254), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (4245, 4254), True, 'import matplotlib.pyplot as plt\n'), ((4259, 4282), 'matplotlib.pyplot.title', 'plt.title', (['"""Tight Axis"""'], {}), "('Tight Axis')\n", (4268, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4288, 4298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4296, 4298), True, 'import matplotlib.pyplot as plt\n'), ((4317, 4358), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""', 'x', 'z', '"""g"""', 'x', 'w', '"""b"""'], {}), "(x, y, 'r', x, z, 'g', x, w, 'b')\n", (4325, 4358), True, 'import matplotlib.pyplot as plt\n'), ((4355, 4378), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 2, -1, 2]'], {}), '([0, 2, -1, 2])\n', (4363, 4378), True, 'import matplotlib.pyplot as plt\n'), ((4380, 4410), 'matplotlib.pyplot.title', 'plt.title', (['"""Manual Axis Range"""'], {}), "('Manual Axis Range')\n", (4389, 4410), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4426), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4424, 4426), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4486), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (4480, 4486), True, 'import matplotlib.pyplot as plt\n'), ((4496, 4505), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4503, 4505), True, 'import matplotlib.pyplot as plt\n'), ((4554, 4568), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4562, 4568), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4612), 'matplotlib.pyplot.title', 'plt.title', (['"""Manual ticks on the x-axis"""'], {}), "('Manual ticks on the x-axis')\n", (4582, 4612), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4625, 4627), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4674), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z'], {}), '(x, z)\n', (4668, 4674), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4698), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (4696, 4698), True, 'import matplotlib.pyplot as plt\n'), ((4708, 4717), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4715, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4812, 4850), 'matplotlib.pyplot.title', 'plt.title', (['"""Minor ticks on the y-axis"""'], {}), "('Minor ticks on the y-axis')\n", (4821, 4850), True, 'import matplotlib.pyplot as plt\n'), ((4858, 4868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4866, 4868), True, 'import matplotlib.pyplot as plt\n'), ((4901, 4915), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (4909, 4915), True, 'import matplotlib.pyplot as plt\n'), ((4925, 4934), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4932, 4934), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5011), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5005, 5011), True, 'import matplotlib.pyplot as plt\n'), ((5016, 5044), 'matplotlib.pyplot.title', 'plt.title', (['"""Log Scaled Axis"""'], {}), "('Log Scaled Axis')\n", (5025, 5044), True, 'import matplotlib.pyplot as plt\n'), ((5049, 5059), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5057, 5059), True, 'import matplotlib.pyplot as plt\n'), ((5145, 5185), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b-"""', 'min_x', 'min_y', '"""ro"""'], {}), "(x, y, 'b-', min_x, min_y, 'ro')\n", (5153, 5185), True, 'import matplotlib.pyplot as plt\n'), ((5190, 5219), 'matplotlib.pyplot.axis', 'plt.axis', (['[-10, 10, -25, 100]'], {}), '([-10, 10, -25, 100])\n', (5198, 5219), True, 'import matplotlib.pyplot as plt\n'), ((5226, 5293), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(60)', '"""Parabola\n$y = x^2$"""'], {'fontsize': '(15)', 'ha': '"""center"""'}), '(0, 60, """Parabola\n$y = x^2$""", fontsize=15, ha=\'center\')\n', (5234, 5293), True, 'import matplotlib.pyplot as plt\n'), ((5295, 5344), 'matplotlib.pyplot.text', 'plt.text', (['min_x', '(min_y + 2)', '"""Minima"""'], {'ha': '"""center"""'}), "(min_x, min_y + 2, 'Minima', ha='center')\n", (5303, 5344), True, 'import matplotlib.pyplot as plt\n'), ((5347, 5439), 'matplotlib.pyplot.text', 'plt.text', (['min_x', '(min_y - 6)', "('(%0.1f, %0.1f)' % (min_x, min_y))"], {'ha': '"""center"""', 'color': '"""gray"""'}), "(min_x, min_y - 6, '(%0.1f, %0.1f)' % (min_x, min_y), ha='center',\n color='gray')\n", (5355, 5439), True, 'import matplotlib.pyplot as plt\n'), ((5435, 5462), 'matplotlib.pyplot.title', 'plt.title', (['"""Annotated Plot"""'], {}), "('Annotated Plot')\n", (5444, 5462), True, 'import matplotlib.pyplot as plt\n'), ((5467, 5477), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5475, 5477), True, 'import matplotlib.pyplot as plt\n'), ((5767, 5794), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (5786, 5794), True, 'import matplotlib.pyplot as plt\n'), ((4529, 4548), 'numpy.arange', 'np.arange', (['(-2)', '(2)', '(1)'], {}), '(-2, 2, 1)\n', (4538, 4548), True, 'import numpy as np\n'), ((4741, 4756), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (4750, 4756), True, 'import numpy as np\n')] |
import os
from os.path import join
from ...Functions.FEMM.draw_FEMM import draw_FEMM
from ...Functions.Electrical.coordinate_transformation import n2dq
from ...Classes._FEMMHandler import _FEMMHandler
from ...Classes.OutMagFEMM import OutMagFEMM
from numpy import linspace, pi, split
from SciDataTool.Classes.Data1D import Data1D
def comp_fluxlinkage(obj, output):
"""Compute the flux linkage using FEMM and electrical output reference currents
Parameters
----------
obj : FluxLinkFEMM or IndMagFEMM
a FluxLinkFEMM object or an IndMagFEMM object
output : Output
an Output object
Return
------
fluxdq : ndarray
the calculated fluxlinkage
"""
# get some machine and simulation parameters
L1 = output.simu.machine.stator.L1
qs = output.simu.machine.stator.winding.qs
zp = output.simu.machine.stator.get_pole_pair_number()
Nt_tot = obj.Nt_tot
rot_dir = output.get_rot_dir()
# Get save path
str_EEC = "EEC"
path_res = output.get_path_result()
save_dir = join(path_res, "Femm")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if output.simu.machine.name not in [None, ""]:
file_name = output.simu.machine.name + "_" + str_EEC + ".fem"
elif output.simu.name not in [None, ""]:
file_name = output.simu.name + "_" + str_EEC + ".fem"
else: # Default name
file_name = "FEMM_" + str_EEC + ".fem"
path_save = join(save_dir, file_name)
# Set the symmetry factor according to the machine
if obj.is_periodicity_a:
(
sym,
is_antiper_a,
_,
_,
) = obj.parent.parent.parent.parent.get_machine_periodicity()
if is_antiper_a:
sym = sym * 2
else:
sym = 1
is_antiper_a = False
# store orignal elec and make a copy to do temp. modifications
elec = output.elec
output.elec = elec.copy()
# Set rotor angle for the FEMM simulation
angle_offset_initial = output.get_angle_offset_initial()
angle_rotor = (
linspace(0, -1 * rot_dir * 2 * pi / sym, Nt_tot, endpoint=False)
+ angle_offset_initial
)
# modify some quantities
output.elec.Time = Data1D(
name="time",
unit="s",
values=(angle_rotor - angle_rotor[0]) / (2 * pi * output.elec.N0 / 60),
)
output.elec.Is = None # to compute Is from Id_ref and Iq_ref (that are mean val.)
output.elec.Is = output.elec.get_Is() # TODO get_Is disregards initial rotor angle
# Open FEMM
femm = _FEMMHandler()
if output.elec.internal is None:
output.elec.internal = OutMagFEMM()
output.elec.internal.handler_list.append(femm)
# Setup the FEMM simulation
# Geometry building and assigning property in FEMM
FEMM_dict = draw_FEMM(
femm=femm,
output=output,
is_mmfr=1,
is_mmfs=1,
sym=sym,
is_antiper=is_antiper_a,
type_calc_leakage=obj.type_calc_leakage,
kgeo_fineness=obj.Kgeo_fineness, # TODO fix inconsistent lower/upper case
path_save=path_save,
)
# Solve for all time step and store all the results in output
Phi_wind = L1 * obj.solve_FEMM(femm, output, sym, FEMM_dict)
# Close FEMM after simulation
femm.closefemm()
output.elec.internal.handler_list.remove(femm)
# Define d axis angle for the d,q transform
d_angle = (angle_rotor - angle_offset_initial) * zp
fluxdq = split(n2dq(Phi_wind, d_angle, n=qs), 2, axis=1)
# restore the original elec
output.elec = elec
return fluxdq
| [
"SciDataTool.Classes.Data1D.Data1D",
"os.makedirs",
"os.path.exists",
"numpy.linspace",
"os.path.join"
] | [((1057, 1079), 'os.path.join', 'join', (['path_res', '"""Femm"""'], {}), "(path_res, 'Femm')\n", (1061, 1079), False, 'from os.path import join\n'), ((1466, 1491), 'os.path.join', 'join', (['save_dir', 'file_name'], {}), '(save_dir, file_name)\n', (1470, 1491), False, 'from os.path import join\n'), ((2248, 2353), 'SciDataTool.Classes.Data1D.Data1D', 'Data1D', ([], {'name': '"""time"""', 'unit': '"""s"""', 'values': '((angle_rotor - angle_rotor[0]) / (2 * pi * output.elec.N0 / 60))'}), "(name='time', unit='s', values=(angle_rotor - angle_rotor[0]) / (2 *\n pi * output.elec.N0 / 60))\n", (2254, 2353), False, 'from SciDataTool.Classes.Data1D import Data1D\n'), ((1091, 1115), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1105, 1115), False, 'import os\n'), ((1125, 1146), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1136, 1146), False, 'import os\n'), ((2093, 2157), 'numpy.linspace', 'linspace', (['(0)', '(-1 * rot_dir * 2 * pi / sym)', 'Nt_tot'], {'endpoint': '(False)'}), '(0, -1 * rot_dir * 2 * pi / sym, Nt_tot, endpoint=False)\n', (2101, 2157), False, 'from numpy import linspace, pi, split\n')] |
import torch
import numpy as np
from sklearn.model_selection import train_test_split
from hpbandster.core.worker import Worker
import ConfigSpace.hyperparameters as CSH
import ConfigSpace as CS
from src.probability import pMOM_loglike, diag_w_Gauss_loglike, depth_categorical_VI
from src.utils import Datafeed
from src.datasets import load_flight, gen_spirals, load_gap_UCI
from src.DUN.training_wrappers import DUN_VI
from src.DUN.stochastic_fc_models import arq_uncert_fc_resnet, arq_uncert_fc_MLP
from src.baselines.training_wrappers import regression_baseline_net, regression_baseline_net_VI
from src.baselines.SGD import SGD_regression_homo
from src.baselines.mfvi import MFVI_regression_homo
from src.baselines.dropout import dropout_regression_homo
class DUN_none_Worker(Worker):
def __init__(self, *args, network, width, batch_size, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.batch_size = batch_size
# setup default hyper-parameter search ranges
self.lr = CSH.UniformFloatHyperparameter('lr', lower=1e-4, upper=1, default_value=1e-2, log=True)
self.momentum = CSH.UniformFloatHyperparameter('momentum', lower=0.0, upper=0.99, default_value=0.5, log=False)
self.n_layers = CSH.UniformIntegerHyperparameter('n_layers', lower=1, upper=40, default_value=5)
self.network = network
def compute(self, config, budget, working_directory, *args, **kwargs):
# setup dataloaders
if torch.cuda.is_available():
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=True, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=True, num_workers=0)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=False, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=False, num_workers=0)
# setup model
n_layers = config['n_layers']
prior_probs = [1/(n_layers + 1)] * (n_layers + 1)
cuda = torch.cuda.is_available()
if self.network == 'MLP':
model = arq_uncert_fc_MLP(self.input_dim, self.output_dim, self.width, n_layers, w_prior=None)
elif self.network == 'ResNet':
model = arq_uncert_fc_resnet(self.input_dim, self.output_dim, self.width, n_layers, w_prior=None)
prob_model = depth_categorical_VI(prior_probs, cuda=cuda)
net = DUN_VI(model, prob_model, self.N_train, lr=config['lr'], momentum=config['momentum'], cuda=cuda,
schedule=None, regression=self.regression)
return train_loop(net, trainloader, valloader, budget, self.early_stop)
def get_configspace(self):
"""
It builds the configuration space with the needed hyperparameters.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameters([self.lr, self.momentum])
cs.add_hyperparameters([self.n_layers])
return cs
class DUN_wd_Worker(Worker):
# DUN_none + weight decay
def __init__(self, *args, network, width, batch_size, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.batch_size = batch_size
# setup default hyper-parameter search ranges
self.lr = CSH.UniformFloatHyperparameter('lr', lower=1e-4, upper=1, default_value=1e-2, log=True)
self.momentum = CSH.UniformFloatHyperparameter('momentum', lower=0.0, upper=0.99, default_value=0.5, log=False)
self.n_layers = CSH.UniformIntegerHyperparameter('n_layers', lower=1, upper=40, default_value=5)
self.weight_decay = CSH.UniformFloatHyperparameter('weight_decay', lower=1e-6, upper=1e-1, default_value=5e-4,
log=True)
self.network = network
def compute(self, config, budget, working_directory, *args, **kwargs):
# setup dataloaders
if torch.cuda.is_available():
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=True, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=True, num_workers=0)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=False, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=False, num_workers=0)
# setup model
n_layers = config['n_layers']
prior_probs = [1/(n_layers + 1)] * (n_layers + 1)
cuda = torch.cuda.is_available()
if self.network == 'MLP':
model = arq_uncert_fc_MLP(self.input_dim, self.output_dim, self.width, n_layers, w_prior=None)
elif self.network == 'ResNet':
model = arq_uncert_fc_resnet(self.input_dim, self.output_dim, self.width, n_layers, w_prior=None)
prob_model = depth_categorical_VI(prior_probs, cuda=cuda)
net = DUN_VI(model, prob_model, self.N_train, lr=config['lr'], momentum=config['momentum'], cuda=cuda,
schedule=None, regression=self.regression, weight_decay=config['weight_decay'])
return train_loop(net, trainloader, valloader, budget, self.early_stop)
def get_configspace(self):
"""
It builds the configuration space with the needed hyperparameters.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameters([self.lr, self.momentum])
cs.add_hyperparameters([self.n_layers])
cs.add_hyperparameters([self.weight_decay])
return cs
class DUN_prior_Worker(Worker):
def __init__(self, *args, network, width, batch_size, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.batch_size = batch_size
# setup default hyper-parameter search ranges
self.lr = CSH.UniformFloatHyperparameter('lr', lower=1e-4, upper=1, default_value=1e-2, log=True)
self.momentum = CSH.UniformFloatHyperparameter('momentum', lower=0.0, upper=0.99, default_value=0.5, log=False)
self.n_layers = CSH.UniformIntegerHyperparameter('n_layers', lower=1, upper=40, default_value=5)
self.prior = CSH.CategoricalHyperparameter('prior', ['gauss', 'pMOM'])
self.BMA_prior = CSH.CategoricalHyperparameter('BMA_prior', [True, False])
self.gauss_σ2 = CSH.UniformFloatHyperparameter('gauss_σ2', lower=1e-2, upper=10, default_value=1, log=True)
self.pMOM_σ2 = CSH.UniformFloatHyperparameter('pMOM_σ2', lower=1e-2, upper=10, default_value=1, log=True)
self.pMOM_r = CSH.UniformIntegerHyperparameter('pMOM_r', lower=1, upper=3)
self.network = network
def compute(self, config, budget, working_directory, *args, **kwargs):
# setup dataloaders
if torch.cuda.is_available():
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=True, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=True, num_workers=0)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=False, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=False, num_workers=0)
# setup model
n_layers = config['n_layers']
prior_probs = [1 / (n_layers + 1)] * (n_layers + 1)
cuda = torch.cuda.is_available()
if config['prior'] == 'gauss':
w_prior = diag_w_Gauss_loglike(μ=0, σ2=config['gauss_σ2'])
elif config['prior'] == 'pMOM':
w_prior = pMOM_loglike(r=config['pMOM_r'], τ=1, σ2=config['pMOM_σ2'])
else:
raise Exception('We should be using a prior')
if self.network == 'MLP':
model = arq_uncert_fc_MLP(self.input_dim, self.output_dim, self.width, n_layers, w_prior=w_prior,
BMA_prior=config['BMA_prior'])
elif self.network == 'ResNet':
model = arq_uncert_fc_resnet(self.input_dim, self.output_dim, self.width, n_layers, w_prior=w_prior,
BMA_prior=config['BMA_prior'])
prob_model = depth_categorical_VI(prior_probs, cuda=cuda)
net = DUN_VI(model, prob_model, self.N_train, lr=config['lr'], momentum=config['momentum'], cuda=cuda,
schedule=None, regression=self.regression)
return train_loop(net, trainloader, valloader, budget, self.early_stop)
def get_configspace(self):
"""
It builds the configuration space with the needed hyperparameters.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameters([self.lr, self.momentum])
cs.add_hyperparameters([self.n_layers])
cs.add_hyperparameters(
[self.prior, self.gauss_σ2, self.pMOM_σ2, self.pMOM_r])
cs.add_hyperparameters([self.BMA_prior])
cond = CS.EqualsCondition(self.gauss_σ2, self.prior, 'gauss')
cs.add_condition(cond)
cond = CS.EqualsCondition(self.pMOM_σ2, self.prior, 'pMOM')
cs.add_condition(cond)
cond = CS.EqualsCondition(self.pMOM_r, self.prior, 'pMOM')
cs.add_condition(cond)
return cs
class SGDWorker(Worker):
def __init__(self, *args, network, width, batch_size, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.batch_size = batch_size
# setup default hyper-parameter search ranges
self.lr = CSH.UniformFloatHyperparameter('lr', lower=1e-4, upper=1, default_value=1e-2, log=True)
self.momentum = CSH.UniformFloatHyperparameter('momentum', lower=0.0, upper=0.99, default_value=0.5,
log=False)
self.n_layers = CSH.UniformIntegerHyperparameter('n_layers', lower=1, upper=40, default_value=2)
self.weight_decay = CSH.UniformFloatHyperparameter('weight_decay', lower=1e-6, upper=1e-1, default_value=5e-4,
log=True)
def compute(self, config, budget, working_directory, *args, **kwargs):
# setup dataloaders
if torch.cuda.is_available():
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=True, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=True, num_workers=0)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=False, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=False, num_workers=0)
# setup model
n_layers = config['n_layers']
cuda = torch.cuda.is_available()
model = SGD_regression_homo(input_dim=self.input_dim, output_dim=self.output_dim,
width=self.width, n_layers=n_layers)
net = regression_baseline_net(model, self.N_train, lr=config['lr'], momentum=config['momentum'], cuda=cuda,
schedule=None, weight_decay=config['weight_decay'])
return train_loop(net, trainloader, valloader, budget, self.early_stop)
def get_configspace(self):
"""
It builds the configuration space with the needed hyperparameters.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameters([self.lr, self.momentum])
cs.add_hyperparameters([self.n_layers])
cs.add_hyperparameters([self.weight_decay])
return cs
class MFVIWorker(Worker):
def __init__(self, *args, network, width, batch_size, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.batch_size = batch_size
# setup default hyper-parameter search ranges
self.lr = CSH.UniformFloatHyperparameter('lr', lower=1e-4, upper=1, default_value=1e-2, log=True)
self.momentum = CSH.UniformFloatHyperparameter('momentum', lower=0.0, upper=0.99, default_value=0.5,
log=False)
self.n_layers = CSH.UniformIntegerHyperparameter('n_layers', lower=1, upper=40, default_value=2)
self.prior_std = CSH.UniformFloatHyperparameter('prior_std', lower=1e-2, upper=10, default_value=1, log=True)
def compute(self, config, budget, working_directory, *args, **kwargs):
# setup dataloaders
if torch.cuda.is_available():
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=True, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=True, num_workers=0)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=False, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=False, num_workers=0)
# setup model
n_layers = config['n_layers']
cuda = torch.cuda.is_available()
model = MFVI_regression_homo(input_dim=self.input_dim, output_dim=self.output_dim,
width=self.width, n_layers=n_layers, prior_sig=config['prior_std'])
net = regression_baseline_net_VI(model, self.N_train, lr=config['lr'], momentum=config['momentum'], cuda=cuda,
schedule=None, MC_samples=20, train_samples=3)
return train_loop(net, trainloader, valloader, budget, self.early_stop)
def get_configspace(self):
"""
It builds the configuration space with the needed hyperparameters.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameters([self.lr, self.momentum])
cs.add_hyperparameters([self.n_layers])
cs.add_hyperparameters([self.prior_std])
return cs
class DropoutWorker(Worker):
def __init__(self, *args, network, width, batch_size, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.batch_size = batch_size
# setup default hyper-parameter search ranges
self.lr = CSH.UniformFloatHyperparameter('lr', lower=1e-4, upper=1, default_value=1e-2, log=True)
self.momentum = CSH.UniformFloatHyperparameter('momentum', lower=0.0, upper=0.99, default_value=0.5,
log=False)
self.n_layers = CSH.UniformIntegerHyperparameter('n_layers', lower=1, upper=40, default_value=2)
self.weight_decay = CSH.UniformFloatHyperparameter('weight_decay', lower=1e-6, upper=1e-1, default_value=5e-4,
log=True)
self.p_drop = CSH.UniformFloatHyperparameter('p_drop', lower=0.005, upper=0.5, default_value=0.2, log=True)
def compute(self, config, budget, working_directory, *args, **kwargs):
# setup dataloaders
if torch.cuda.is_available():
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=True, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=True, num_workers=0)
else:
trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True,
pin_memory=False, num_workers=0)
valloader = torch.utils.data.DataLoader(self.valset, batch_size=self.batch_size, shuffle=False,
pin_memory=False, num_workers=0)
# setup model
n_layers = config['n_layers']
cuda = torch.cuda.is_available()
model = dropout_regression_homo(input_dim=self.input_dim, output_dim=self.output_dim,
width=self.width, n_layers=n_layers, p_drop=config['p_drop'])
net = regression_baseline_net(model, self.N_train, lr=config['lr'], momentum=config['momentum'], cuda=cuda,
schedule=None, MC_samples=20, weight_decay=config['weight_decay'])
return train_loop(net, trainloader, valloader, budget, self.early_stop)
def get_configspace(self):
"""
It builds the configuration space with the needed hyperparameters.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameters([self.lr, self.momentum])
cs.add_hyperparameters([self.n_layers])
cs.add_hyperparameters([self.weight_decay, self.p_drop])
return cs
def assign_model_class(model_name):
if model_name == 'DUN_none':
base_class = DUN_none_Worker
elif model_name == 'DUN_wd':
base_class = DUN_wd_Worker
elif model_name == 'DUN_prior':
base_class = DUN_prior_Worker
elif model_name == 'Dropout':
base_class = DropoutWorker
elif model_name == 'MFVI':
base_class = MFVIWorker
elif model_name == 'SGD':
base_class = SGDWorker
else:
raise Exception('model name not recognised')
return base_class
def create_SpiralsWorker(model, network, width, batch_size):
base_class = assign_model_class(model)
class SpiralsWorker(base_class):
def __init__(self, *args, early_stop=None, **kwargs):
super().__init__(*args, network=network, width=width, batch_size=batch_size, **kwargs)
# setup dataset
X, y = gen_spirals(n_samples=2000, shuffle=True, noise=0.2, random_state=1234,
n_arms=2, start_angle=0, stop_angle=720)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=1234)
x_means, x_stds = X_train.mean(axis=0), X_train.std(axis=0)
X_train = ((X_train - x_means) / x_stds).astype(np.float32)
X_test = ((X_test - x_means) / x_stds).astype(np.float32)
y_train = y_train.astype(np.float32)
y_test = y_test.astype(np.float32)
self.trainset = Datafeed(X_train, y_train, transform=None)
self.valset = Datafeed(X_test, y_test, transform=None)
self.N_train = X_train.shape[0]
self.input_dim = 2
self.output_dim = 2
self.early_stop = early_stop
self.regression = False
return SpiralsWorker
def create_FlightWorker(model, network, width, batch_size):
base_class = assign_model_class(model)
class FlightWorker(base_class):
def __init__(self, *args, base_dir='nb_dir/data/', prop_val=0.05, k800=False, early_stop=None, **kwargs):
super().__init__(*args, network=network, width=width, batch_size=batch_size, **kwargs)
# setup dataset
X_train, X_test, x_means, x_stds, y_train, y_test, y_means, y_stds = load_flight(base_dir, k800=k800)
X_train = (X_train * x_stds) + x_means
y_train = (y_train * y_stds) + y_means
# print(X_train.shape)
Ntrain = int(X_train.shape[0] * (1-prop_val))
X_val = X_train[Ntrain:]
y_val = y_train[Ntrain:]
X_train = X_train[:Ntrain]
y_train = y_train[:Ntrain]
# print(X_train.shape)
x_means, x_stds = X_train.mean(axis=0), X_train.std(axis=0)
y_means, y_stds = y_train.mean(axis=0), y_train.std(axis=0)
x_stds[x_stds < 1e-10] = 1.
X_train = ((X_train - x_means) / x_stds)
y_train = ((y_train - y_means) / y_stds)
X_val = ((X_val - x_means) / x_stds)
y_val = ((y_val - y_means) / y_stds)
self.trainset = Datafeed(X_train, y_train, transform=None)
self.valset = Datafeed(X_val, y_val, transform=None)
self.N_train = X_train.shape[0]
self.input_dim = X_train.shape[1]
self.output_dim = y_train.shape[1]
self.early_stop = early_stop
self.regression = True
return FlightWorker
def create_UCIWorker(model, network, width, batch_size):
base_class = assign_model_class(model)
class UCI_worker(base_class):
def __init__(self, dname, *args, base_dir='nb_dir/data/', prop_val=0.15, n_split=0, early_stop=None, **kwargs):
super().__init__(*args, network=network, width=width, batch_size=batch_size, **kwargs)
gap = False
if dname in ['boston', 'concrete', 'energy', 'power', 'wine', 'yacht', 'kin8nm', 'naval', 'protein']:
pass
elif dname in ['boston_gap', 'concrete_gap', 'energy_gap', 'power_gap', 'wine_gap', 'yacht_gap',
'kin8nm_gap', 'naval_gap', 'protein_gap']:
gap = True
dname = dname[:-4]
X_train, X_test, x_means, x_stds, y_train, y_test, y_means, y_stds = \
load_gap_UCI(base_dir=base_dir, dname=dname, n_split=n_split, gap=gap)
X_train = (X_train * x_stds) + x_means
y_train = (y_train * y_stds) + y_means
# print(X_train.shape)
Ntrain = int(X_train.shape[0] * (1-prop_val))
X_val = X_train[Ntrain:]
y_val = y_train[Ntrain:]
X_train = X_train[:Ntrain]
y_train = y_train[:Ntrain]
# print(X_train.shape)
x_means, x_stds = X_train.mean(axis=0), X_train.std(axis=0)
y_means, y_stds = y_train.mean(axis=0), y_train.std(axis=0)
x_stds[x_stds < 1e-10] = 1.
X_train = ((X_train - x_means) / x_stds)
y_train = ((y_train - y_means) / y_stds)
X_val = ((X_val - x_means) / x_stds)
y_val = ((y_val - y_means) / y_stds)
self.trainset = Datafeed(X_train, y_train, transform=None)
self.valset = Datafeed(X_val, y_val, transform=None)
self.N_train = X_train.shape[0]
self.input_dim = X_train.shape[1]
self.output_dim = y_train.shape[1]
self.early_stop = early_stop
self.regression = True
return UCI_worker
def train_loop(net, trainloader, valloader, budget, early_stop=None):
# train for some budget
train_NLLs = []
train_errs = []
MLL_ests = []
valid_NLLs = []
valid_errs = []
prev_best_epoch = 0
prev_best_NLL = np.inf
for i in range(int(budget)):
print('it %d / %d' % (i, budget))
nb_samples = 0
MLL_est = 0
train_err = 0
train_NLL = 0
for x, y in trainloader:
MLL, NLL, err = net.fit(x, y)
train_NLL += NLL * x.shape[0]
train_err += err * x.shape[0]
MLL_est += MLL
nb_samples += len(x)
train_NLL /= nb_samples
train_err /= nb_samples
MLL_est /= nb_samples
train_NLLs.append(train_NLL)
train_errs.append(train_err)
MLL_ests.append(MLL)
net.update_lr()
# eval on validation set
nb_samples = 0
valid_NLL = 0
valid_err = 0
for x, y in valloader:
NLL, err = net.eval(x, y)
valid_NLL += NLL * x.shape[0]
valid_err += err * x.shape[0]
nb_samples += len(x)
valid_NLL /= nb_samples
valid_err /= nb_samples
valid_NLLs.append(valid_NLL)
if valid_NLLs[-1] == np.nan:
valid_NLLs[-1] = np.inf
valid_errs.append(valid_err)
if i > 0 and np.isnan(valid_NLL): # Dont finish runs that NaN
print('STOPPING DUE TO NAN')
break
# update vars every iteration to enable early stopping
if valid_NLL < prev_best_NLL:
prev_best_NLL = valid_NLL
prev_best_epoch = i
if early_stop is not None and (i - prev_best_epoch) > early_stop:
print('EARLY STOPPING due to no improvement for %d epochs' % early_stop)
break
best_itr = np.argmin(valid_NLLs)
print('best_itr: %d' % best_itr)
return ({
'loss': valid_NLLs[best_itr], # remember: HpBandSter always minimizes!
'info': {
'best iteration': float(best_itr),
'train err': train_errs[best_itr],
'valid err': valid_errs[best_itr],
'train NLL': train_NLLs[best_itr],
'valid NLL': valid_NLLs[best_itr],
'MLL est': MLL_ests[best_itr],
'number of parameters': net.get_nb_parameters(),
}
})
| [
"src.DUN.stochastic_fc_models.arq_uncert_fc_MLP",
"sklearn.model_selection.train_test_split",
"src.utils.Datafeed",
"numpy.argmin",
"src.datasets.load_flight",
"numpy.isnan",
"ConfigSpace.EqualsCondition",
"ConfigSpace.hyperparameters.UniformFloatHyperparameter",
"ConfigSpace.ConfigurationSpace",
... | [((25852, 25873), 'numpy.argmin', 'np.argmin', (['valid_NLLs'], {}), '(valid_NLLs)\n', (25861, 25873), True, 'import numpy as np\n'), ((1040, 1134), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""lr"""'], {'lower': '(0.0001)', 'upper': '(1)', 'default_value': '(0.01)', 'log': '(True)'}), "('lr', lower=0.0001, upper=1, default_value=\n 0.01, log=True)\n", (1070, 1134), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1152, 1251), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""momentum"""'], {'lower': '(0.0)', 'upper': '(0.99)', 'default_value': '(0.5)', 'log': '(False)'}), "('momentum', lower=0.0, upper=0.99,\n default_value=0.5, log=False)\n", (1182, 1251), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1272, 1357), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', (['"""n_layers"""'], {'lower': '(1)', 'upper': '(40)', 'default_value': '(5)'}), "('n_layers', lower=1, upper=40, default_value=5\n )\n", (1304, 1357), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((1500, 1525), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1523, 1525), False, 'import torch\n'), ((2456, 2481), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2479, 2481), False, 'import torch\n'), ((2795, 2839), 'src.probability.depth_categorical_VI', 'depth_categorical_VI', (['prior_probs'], {'cuda': 'cuda'}), '(prior_probs, cuda=cuda)\n', (2815, 2839), False, 'from src.probability import pMOM_loglike, diag_w_Gauss_loglike, depth_categorical_VI\n'), ((2854, 2998), 'src.DUN.training_wrappers.DUN_VI', 'DUN_VI', (['model', 'prob_model', 'self.N_train'], {'lr': "config['lr']", 'momentum': "config['momentum']", 'cuda': 'cuda', 'schedule': 'None', 'regression': 'self.regression'}), "(model, prob_model, self.N_train, lr=config['lr'], momentum=config[\n 'momentum'], cuda=cuda, schedule=None, regression=self.regression)\n", (2860, 2998), False, 'from src.DUN.training_wrappers import DUN_VI\n'), ((3240, 3263), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (3261, 3263), True, 'import ConfigSpace as CS\n'), ((3698, 3792), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""lr"""'], {'lower': '(0.0001)', 'upper': '(1)', 'default_value': '(0.01)', 'log': '(True)'}), "('lr', lower=0.0001, upper=1, default_value=\n 0.01, log=True)\n", (3728, 3792), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((3810, 3909), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""momentum"""'], {'lower': '(0.0)', 'upper': '(0.99)', 'default_value': '(0.5)', 'log': '(False)'}), "('momentum', lower=0.0, upper=0.99,\n default_value=0.5, log=False)\n", (3840, 3909), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((3930, 4015), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', (['"""n_layers"""'], {'lower': '(1)', 'upper': '(40)', 'default_value': '(5)'}), "('n_layers', lower=1, upper=40, default_value=5\n )\n", (3962, 4015), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((4039, 4145), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""weight_decay"""'], {'lower': '(1e-06)', 'upper': '(0.1)', 'default_value': '(0.0005)', 'log': '(True)'}), "('weight_decay', lower=1e-06, upper=0.1,\n default_value=0.0005, log=True)\n", (4069, 4145), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((4345, 4370), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4368, 4370), False, 'import torch\n'), ((5301, 5326), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5324, 5326), False, 'import torch\n'), ((5640, 5684), 'src.probability.depth_categorical_VI', 'depth_categorical_VI', (['prior_probs'], {'cuda': 'cuda'}), '(prior_probs, cuda=cuda)\n', (5660, 5684), False, 'from src.probability import pMOM_loglike, diag_w_Gauss_loglike, depth_categorical_VI\n'), ((5699, 5884), 'src.DUN.training_wrappers.DUN_VI', 'DUN_VI', (['model', 'prob_model', 'self.N_train'], {'lr': "config['lr']", 'momentum': "config['momentum']", 'cuda': 'cuda', 'schedule': 'None', 'regression': 'self.regression', 'weight_decay': "config['weight_decay']"}), "(model, prob_model, self.N_train, lr=config['lr'], momentum=config[\n 'momentum'], cuda=cuda, schedule=None, regression=self.regression,\n weight_decay=config['weight_decay'])\n", (5705, 5884), False, 'from src.DUN.training_wrappers import DUN_VI\n'), ((6122, 6145), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (6143, 6145), True, 'import ConfigSpace as CS\n'), ((6605, 6699), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""lr"""'], {'lower': '(0.0001)', 'upper': '(1)', 'default_value': '(0.01)', 'log': '(True)'}), "('lr', lower=0.0001, upper=1, default_value=\n 0.01, log=True)\n", (6635, 6699), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((6717, 6816), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""momentum"""'], {'lower': '(0.0)', 'upper': '(0.99)', 'default_value': '(0.5)', 'log': '(False)'}), "('momentum', lower=0.0, upper=0.99,\n default_value=0.5, log=False)\n", (6747, 6816), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((6838, 6923), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', (['"""n_layers"""'], {'lower': '(1)', 'upper': '(40)', 'default_value': '(5)'}), "('n_layers', lower=1, upper=40, default_value=5\n )\n", (6870, 6923), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((6941, 6998), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', (['"""prior"""', "['gauss', 'pMOM']"], {}), "('prior', ['gauss', 'pMOM'])\n", (6970, 6998), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((7024, 7081), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CSH.CategoricalHyperparameter', (['"""BMA_prior"""', '[True, False]'], {}), "('BMA_prior', [True, False])\n", (7053, 7081), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((7107, 7202), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""gauss_σ2"""'], {'lower': '(0.01)', 'upper': '(10)', 'default_value': '(1)', 'log': '(True)'}), "('gauss_σ2', lower=0.01, upper=10,\n default_value=1, log=True)\n", (7137, 7202), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((7222, 7316), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""pMOM_σ2"""'], {'lower': '(0.01)', 'upper': '(10)', 'default_value': '(1)', 'log': '(True)'}), "('pMOM_σ2', lower=0.01, upper=10,\n default_value=1, log=True)\n", (7252, 7316), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((7334, 7394), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', (['"""pMOM_r"""'], {'lower': '(1)', 'upper': '(3)'}), "('pMOM_r', lower=1, upper=3)\n", (7366, 7394), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((7542, 7567), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7565, 7567), False, 'import torch\n'), ((8500, 8525), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8523, 8525), False, 'import torch\n'), ((9291, 9335), 'src.probability.depth_categorical_VI', 'depth_categorical_VI', (['prior_probs'], {'cuda': 'cuda'}), '(prior_probs, cuda=cuda)\n', (9311, 9335), False, 'from src.probability import pMOM_loglike, diag_w_Gauss_loglike, depth_categorical_VI\n'), ((9350, 9494), 'src.DUN.training_wrappers.DUN_VI', 'DUN_VI', (['model', 'prob_model', 'self.N_train'], {'lr': "config['lr']", 'momentum': "config['momentum']", 'cuda': 'cuda', 'schedule': 'None', 'regression': 'self.regression'}), "(model, prob_model, self.N_train, lr=config['lr'], momentum=config[\n 'momentum'], cuda=cuda, schedule=None, regression=self.regression)\n", (9356, 9494), False, 'from src.DUN.training_wrappers import DUN_VI\n'), ((9736, 9759), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (9757, 9759), True, 'import ConfigSpace as CS\n'), ((10034, 10088), 'ConfigSpace.EqualsCondition', 'CS.EqualsCondition', (['self.gauss_σ2', 'self.prior', '"""gauss"""'], {}), "(self.gauss_σ2, self.prior, 'gauss')\n", (10052, 10088), True, 'import ConfigSpace as CS\n'), ((10135, 10187), 'ConfigSpace.EqualsCondition', 'CS.EqualsCondition', (['self.pMOM_σ2', 'self.prior', '"""pMOM"""'], {}), "(self.pMOM_σ2, self.prior, 'pMOM')\n", (10153, 10187), True, 'import ConfigSpace as CS\n'), ((10234, 10285), 'ConfigSpace.EqualsCondition', 'CS.EqualsCondition', (['self.pMOM_r', 'self.prior', '"""pMOM"""'], {}), "(self.pMOM_r, self.prior, 'pMOM')\n", (10252, 10285), True, 'import ConfigSpace as CS\n'), ((10612, 10706), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""lr"""'], {'lower': '(0.0001)', 'upper': '(1)', 'default_value': '(0.01)', 'log': '(True)'}), "('lr', lower=0.0001, upper=1, default_value=\n 0.01, log=True)\n", (10642, 10706), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10724, 10823), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""momentum"""'], {'lower': '(0.0)', 'upper': '(0.99)', 'default_value': '(0.5)', 'log': '(False)'}), "('momentum', lower=0.0, upper=0.99,\n default_value=0.5, log=False)\n", (10754, 10823), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10899, 10984), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', (['"""n_layers"""'], {'lower': '(1)', 'upper': '(40)', 'default_value': '(2)'}), "('n_layers', lower=1, upper=40, default_value=2\n )\n", (10931, 10984), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((11008, 11114), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""weight_decay"""'], {'lower': '(1e-06)', 'upper': '(0.1)', 'default_value': '(0.0005)', 'log': '(True)'}), "('weight_decay', lower=1e-06, upper=0.1,\n default_value=0.0005, log=True)\n", (11038, 11114), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((11283, 11308), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11306, 11308), False, 'import torch\n'), ((12180, 12205), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12203, 12205), False, 'import torch\n'), ((12223, 12337), 'src.baselines.SGD.SGD_regression_homo', 'SGD_regression_homo', ([], {'input_dim': 'self.input_dim', 'output_dim': 'self.output_dim', 'width': 'self.width', 'n_layers': 'n_layers'}), '(input_dim=self.input_dim, output_dim=self.output_dim,\n width=self.width, n_layers=n_layers)\n', (12242, 12337), False, 'from src.baselines.SGD import SGD_regression_homo\n'), ((12384, 12547), 'src.baselines.training_wrappers.regression_baseline_net', 'regression_baseline_net', (['model', 'self.N_train'], {'lr': "config['lr']", 'momentum': "config['momentum']", 'cuda': 'cuda', 'schedule': 'None', 'weight_decay': "config['weight_decay']"}), "(model, self.N_train, lr=config['lr'], momentum=\n config['momentum'], cuda=cuda, schedule=None, weight_decay=config[\n 'weight_decay'])\n", (12407, 12547), False, 'from src.baselines.training_wrappers import regression_baseline_net, regression_baseline_net_VI\n'), ((12801, 12824), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (12822, 12824), True, 'import ConfigSpace as CS\n'), ((13277, 13371), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""lr"""'], {'lower': '(0.0001)', 'upper': '(1)', 'default_value': '(0.01)', 'log': '(True)'}), "('lr', lower=0.0001, upper=1, default_value=\n 0.01, log=True)\n", (13307, 13371), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((13389, 13488), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""momentum"""'], {'lower': '(0.0)', 'upper': '(0.99)', 'default_value': '(0.5)', 'log': '(False)'}), "('momentum', lower=0.0, upper=0.99,\n default_value=0.5, log=False)\n", (13419, 13488), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((13564, 13649), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', (['"""n_layers"""'], {'lower': '(1)', 'upper': '(40)', 'default_value': '(2)'}), "('n_layers', lower=1, upper=40, default_value=2\n )\n", (13596, 13649), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((13670, 13766), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""prior_std"""'], {'lower': '(0.01)', 'upper': '(10)', 'default_value': '(1)', 'log': '(True)'}), "('prior_std', lower=0.01, upper=10,\n default_value=1, log=True)\n", (13700, 13766), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((13878, 13903), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13901, 13903), False, 'import torch\n'), ((14775, 14800), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14798, 14800), False, 'import torch\n'), ((14818, 14964), 'src.baselines.mfvi.MFVI_regression_homo', 'MFVI_regression_homo', ([], {'input_dim': 'self.input_dim', 'output_dim': 'self.output_dim', 'width': 'self.width', 'n_layers': 'n_layers', 'prior_sig': "config['prior_std']"}), "(input_dim=self.input_dim, output_dim=self.output_dim,\n width=self.width, n_layers=n_layers, prior_sig=config['prior_std'])\n", (14838, 14964), False, 'from src.baselines.mfvi import MFVI_regression_homo\n'), ((15013, 15173), 'src.baselines.training_wrappers.regression_baseline_net_VI', 'regression_baseline_net_VI', (['model', 'self.N_train'], {'lr': "config['lr']", 'momentum': "config['momentum']", 'cuda': 'cuda', 'schedule': 'None', 'MC_samples': '(20)', 'train_samples': '(3)'}), "(model, self.N_train, lr=config['lr'], momentum=\n config['momentum'], cuda=cuda, schedule=None, MC_samples=20,\n train_samples=3)\n", (15039, 15173), False, 'from src.baselines.training_wrappers import regression_baseline_net, regression_baseline_net_VI\n'), ((15431, 15454), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (15452, 15454), True, 'import ConfigSpace as CS\n'), ((15907, 16001), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""lr"""'], {'lower': '(0.0001)', 'upper': '(1)', 'default_value': '(0.01)', 'log': '(True)'}), "('lr', lower=0.0001, upper=1, default_value=\n 0.01, log=True)\n", (15937, 16001), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((16019, 16118), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""momentum"""'], {'lower': '(0.0)', 'upper': '(0.99)', 'default_value': '(0.5)', 'log': '(False)'}), "('momentum', lower=0.0, upper=0.99,\n default_value=0.5, log=False)\n", (16049, 16118), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((16194, 16279), 'ConfigSpace.hyperparameters.UniformIntegerHyperparameter', 'CSH.UniformIntegerHyperparameter', (['"""n_layers"""'], {'lower': '(1)', 'upper': '(40)', 'default_value': '(2)'}), "('n_layers', lower=1, upper=40, default_value=2\n )\n", (16226, 16279), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((16304, 16410), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""weight_decay"""'], {'lower': '(1e-06)', 'upper': '(0.1)', 'default_value': '(0.0005)', 'log': '(True)'}), "('weight_decay', lower=1e-06, upper=0.1,\n default_value=0.0005, log=True)\n", (16334, 16410), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((16486, 16583), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', (['"""p_drop"""'], {'lower': '(0.005)', 'upper': '(0.5)', 'default_value': '(0.2)', 'log': '(True)'}), "('p_drop', lower=0.005, upper=0.5,\n default_value=0.2, log=True)\n", (16516, 16583), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((16695, 16720), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16718, 16720), False, 'import torch\n'), ((17592, 17617), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17615, 17617), False, 'import torch\n'), ((17635, 17779), 'src.baselines.dropout.dropout_regression_homo', 'dropout_regression_homo', ([], {'input_dim': 'self.input_dim', 'output_dim': 'self.output_dim', 'width': 'self.width', 'n_layers': 'n_layers', 'p_drop': "config['p_drop']"}), "(input_dim=self.input_dim, output_dim=self.\n output_dim, width=self.width, n_layers=n_layers, p_drop=config['p_drop'])\n", (17658, 17779), False, 'from src.baselines.dropout import dropout_regression_homo\n'), ((17829, 18006), 'src.baselines.training_wrappers.regression_baseline_net', 'regression_baseline_net', (['model', 'self.N_train'], {'lr': "config['lr']", 'momentum': "config['momentum']", 'cuda': 'cuda', 'schedule': 'None', 'MC_samples': '(20)', 'weight_decay': "config['weight_decay']"}), "(model, self.N_train, lr=config['lr'], momentum=\n config['momentum'], cuda=cuda, schedule=None, MC_samples=20,\n weight_decay=config['weight_decay'])\n", (17852, 18006), False, 'from src.baselines.training_wrappers import regression_baseline_net, regression_baseline_net_VI\n'), ((18261, 18284), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {}), '()\n', (18282, 18284), True, 'import ConfigSpace as CS\n'), ((1553, 1673), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=True, num_workers=0)\n', (1580, 1673), False, 'import torch\n'), ((1748, 1867), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=True, num_workers=0)\n', (1775, 1867), False, 'import torch\n'), ((1956, 2077), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=False, num_workers=0)\n', (1983, 2077), False, 'import torch\n'), ((2152, 2272), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=False, num_workers=0)\n', (2179, 2272), False, 'import torch\n'), ((2537, 2627), 'src.DUN.stochastic_fc_models.arq_uncert_fc_MLP', 'arq_uncert_fc_MLP', (['self.input_dim', 'self.output_dim', 'self.width', 'n_layers'], {'w_prior': 'None'}), '(self.input_dim, self.output_dim, self.width, n_layers,\n w_prior=None)\n', (2554, 2627), False, 'from src.DUN.stochastic_fc_models import arq_uncert_fc_resnet, arq_uncert_fc_MLP\n'), ((4398, 4518), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=True, num_workers=0)\n', (4425, 4518), False, 'import torch\n'), ((4593, 4712), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=True, num_workers=0)\n', (4620, 4712), False, 'import torch\n'), ((4801, 4922), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=False, num_workers=0)\n', (4828, 4922), False, 'import torch\n'), ((4997, 5117), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=False, num_workers=0)\n', (5024, 5117), False, 'import torch\n'), ((5382, 5472), 'src.DUN.stochastic_fc_models.arq_uncert_fc_MLP', 'arq_uncert_fc_MLP', (['self.input_dim', 'self.output_dim', 'self.width', 'n_layers'], {'w_prior': 'None'}), '(self.input_dim, self.output_dim, self.width, n_layers,\n w_prior=None)\n', (5399, 5472), False, 'from src.DUN.stochastic_fc_models import arq_uncert_fc_resnet, arq_uncert_fc_MLP\n'), ((7595, 7715), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=True, num_workers=0)\n', (7622, 7715), False, 'import torch\n'), ((7790, 7909), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=True, num_workers=0)\n', (7817, 7909), False, 'import torch\n'), ((7998, 8119), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=False, num_workers=0)\n', (8025, 8119), False, 'import torch\n'), ((8194, 8314), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=False, num_workers=0)\n', (8221, 8314), False, 'import torch\n'), ((8588, 8636), 'src.probability.diag_w_Gauss_loglike', 'diag_w_Gauss_loglike', ([], {'μ': '(0)', 'σ2': "config['gauss_σ2']"}), "(μ=0, σ2=config['gauss_σ2'])\n", (8608, 8636), False, 'from src.probability import pMOM_loglike, diag_w_Gauss_loglike, depth_categorical_VI\n'), ((8886, 9010), 'src.DUN.stochastic_fc_models.arq_uncert_fc_MLP', 'arq_uncert_fc_MLP', (['self.input_dim', 'self.output_dim', 'self.width', 'n_layers'], {'w_prior': 'w_prior', 'BMA_prior': "config['BMA_prior']"}), "(self.input_dim, self.output_dim, self.width, n_layers,\n w_prior=w_prior, BMA_prior=config['BMA_prior'])\n", (8903, 9010), False, 'from src.DUN.stochastic_fc_models import arq_uncert_fc_resnet, arq_uncert_fc_MLP\n'), ((11336, 11456), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=True, num_workers=0)\n', (11363, 11456), False, 'import torch\n'), ((11531, 11650), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=True, num_workers=0)\n', (11558, 11650), False, 'import torch\n'), ((11739, 11860), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=False, num_workers=0)\n', (11766, 11860), False, 'import torch\n'), ((11935, 12055), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=False, num_workers=0)\n', (11962, 12055), False, 'import torch\n'), ((13931, 14051), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=True, num_workers=0)\n', (13958, 14051), False, 'import torch\n'), ((14126, 14245), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=True, num_workers=0)\n', (14153, 14245), False, 'import torch\n'), ((14334, 14455), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=False, num_workers=0)\n', (14361, 14455), False, 'import torch\n'), ((14530, 14650), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=False, num_workers=0)\n', (14557, 14650), False, 'import torch\n'), ((16748, 16868), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=True, num_workers=0)\n', (16775, 16868), False, 'import torch\n'), ((16943, 17062), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=True, num_workers=0)\n', (16970, 17062), False, 'import torch\n'), ((17151, 17272), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.trainset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.trainset, batch_size=self.batch_size,\n shuffle=True, pin_memory=False, num_workers=0)\n', (17178, 17272), False, 'import torch\n'), ((17347, 17467), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.valset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'pin_memory': '(False)', 'num_workers': '(0)'}), '(self.valset, batch_size=self.batch_size,\n shuffle=False, pin_memory=False, num_workers=0)\n', (17374, 17467), False, 'import torch\n'), ((19355, 19471), 'src.datasets.gen_spirals', 'gen_spirals', ([], {'n_samples': '(2000)', 'shuffle': '(True)', 'noise': '(0.2)', 'random_state': '(1234)', 'n_arms': '(2)', 'start_angle': '(0)', 'stop_angle': '(720)'}), '(n_samples=2000, shuffle=True, noise=0.2, random_state=1234,\n n_arms=2, start_angle=0, stop_angle=720)\n', (19366, 19471), False, 'from src.datasets import load_flight, gen_spirals, load_gap_UCI\n'), ((19547, 19603), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.8)', 'random_state': '(1234)'}), '(X, y, test_size=0.8, random_state=1234)\n', (19563, 19603), False, 'from sklearn.model_selection import train_test_split\n'), ((19946, 19988), 'src.utils.Datafeed', 'Datafeed', (['X_train', 'y_train'], {'transform': 'None'}), '(X_train, y_train, transform=None)\n', (19954, 19988), False, 'from src.utils import Datafeed\n'), ((20015, 20055), 'src.utils.Datafeed', 'Datafeed', (['X_test', 'y_test'], {'transform': 'None'}), '(X_test, y_test, transform=None)\n', (20023, 20055), False, 'from src.utils import Datafeed\n'), ((20734, 20766), 'src.datasets.load_flight', 'load_flight', (['base_dir'], {'k800': 'k800'}), '(base_dir, k800=k800)\n', (20745, 20766), False, 'from src.datasets import load_flight, gen_spirals, load_gap_UCI\n'), ((21574, 21616), 'src.utils.Datafeed', 'Datafeed', (['X_train', 'y_train'], {'transform': 'None'}), '(X_train, y_train, transform=None)\n', (21582, 21616), False, 'from src.utils import Datafeed\n'), ((21643, 21681), 'src.utils.Datafeed', 'Datafeed', (['X_val', 'y_val'], {'transform': 'None'}), '(X_val, y_val, transform=None)\n', (21651, 21681), False, 'from src.utils import Datafeed\n'), ((22780, 22850), 'src.datasets.load_gap_UCI', 'load_gap_UCI', ([], {'base_dir': 'base_dir', 'dname': 'dname', 'n_split': 'n_split', 'gap': 'gap'}), '(base_dir=base_dir, dname=dname, n_split=n_split, gap=gap)\n', (22792, 22850), False, 'from src.datasets import load_flight, gen_spirals, load_gap_UCI\n'), ((23656, 23698), 'src.utils.Datafeed', 'Datafeed', (['X_train', 'y_train'], {'transform': 'None'}), '(X_train, y_train, transform=None)\n', (23664, 23698), False, 'from src.utils import Datafeed\n'), ((23725, 23763), 'src.utils.Datafeed', 'Datafeed', (['X_val', 'y_val'], {'transform': 'None'}), '(X_val, y_val, transform=None)\n', (23733, 23763), False, 'from src.utils import Datafeed\n'), ((25379, 25398), 'numpy.isnan', 'np.isnan', (['valid_NLL'], {}), '(valid_NLL)\n', (25387, 25398), True, 'import numpy as np\n'), ((2683, 2776), 'src.DUN.stochastic_fc_models.arq_uncert_fc_resnet', 'arq_uncert_fc_resnet', (['self.input_dim', 'self.output_dim', 'self.width', 'n_layers'], {'w_prior': 'None'}), '(self.input_dim, self.output_dim, self.width, n_layers,\n w_prior=None)\n', (2703, 2776), False, 'from src.DUN.stochastic_fc_models import arq_uncert_fc_resnet, arq_uncert_fc_MLP\n'), ((5528, 5621), 'src.DUN.stochastic_fc_models.arq_uncert_fc_resnet', 'arq_uncert_fc_resnet', (['self.input_dim', 'self.output_dim', 'self.width', 'n_layers'], {'w_prior': 'None'}), '(self.input_dim, self.output_dim, self.width, n_layers,\n w_prior=None)\n', (5548, 5621), False, 'from src.DUN.stochastic_fc_models import arq_uncert_fc_resnet, arq_uncert_fc_MLP\n'), ((8699, 8758), 'src.probability.pMOM_loglike', 'pMOM_loglike', ([], {'r': "config['pMOM_r']", 'τ': '(1)', 'σ2': "config['pMOM_σ2']"}), "(r=config['pMOM_r'], τ=1, σ2=config['pMOM_σ2'])\n", (8711, 8758), False, 'from src.probability import pMOM_loglike, diag_w_Gauss_loglike, depth_categorical_VI\n'), ((9104, 9231), 'src.DUN.stochastic_fc_models.arq_uncert_fc_resnet', 'arq_uncert_fc_resnet', (['self.input_dim', 'self.output_dim', 'self.width', 'n_layers'], {'w_prior': 'w_prior', 'BMA_prior': "config['BMA_prior']"}), "(self.input_dim, self.output_dim, self.width, n_layers,\n w_prior=w_prior, BMA_prior=config['BMA_prior'])\n", (9124, 9231), False, 'from src.DUN.stochastic_fc_models import arq_uncert_fc_resnet, arq_uncert_fc_MLP\n')] |
#!/usr/bin/env python
import argparse
import os
def main():
last = sorted(args.snapshots, key=os.path.getmtime)
last = last[-args.num:]
print("average over", last)
avg = None
if args.backend == 'pytorch':
import torch
# sum
for path in last:
states = torch.load(path, map_location=torch.device("cpu"))["model"]
if avg is None:
avg = states
else:
for k in avg.keys():
avg[k] += states[k]
# average
for k in avg.keys():
if avg[k] is not None:
avg[k] /= args.num
torch.save(avg, args.out)
elif args.backend == 'chainer':
import numpy as np
# sum
for path in last:
states = np.load(path)
if avg is None:
keys = [x.split('main/')[1] for x in states if 'model' in x]
avg = dict()
for k in keys:
avg[k] = states['updater/model:main/{}'.format(k)]
else:
for k in keys:
avg[k] += states['updater/model:main/{}'.format(k)]
# average
for k in keys:
if avg[k] is not None:
avg[k] /= args.num
np.savez_compressed(args.out, **avg)
os.rename('{}.npz'.format(args.out), args.out) # numpy save with .npz extension
else:
raise ValueError('Incorrect type of backend')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--snapshots", required=True, type=str, nargs="+")
parser.add_argument("--out", required=True, type=str)
parser.add_argument("--num", default=10, type=int)
parser.add_argument("--backend", default='chainer', type=str)
args = parser.parse_args()
main()
| [
"numpy.load",
"argparse.ArgumentParser",
"torch.save",
"numpy.savez_compressed",
"torch.device"
] | [((1521, 1546), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1544, 1546), False, 'import argparse\n'), ((649, 674), 'torch.save', 'torch.save', (['avg', 'args.out'], {}), '(avg, args.out)\n', (659, 674), False, 'import torch\n'), ((1289, 1325), 'numpy.savez_compressed', 'np.savez_compressed', (['args.out'], {}), '(args.out, **avg)\n', (1308, 1325), True, 'import numpy as np\n'), ((799, 812), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (806, 812), True, 'import numpy as np\n'), ((340, 359), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (352, 359), False, 'import torch\n')] |
import cv2
import numpy as np
def apply_merge_transformations(
image, kernels, transformations=(cv2.MORPH_OPEN, 1), plot=False):
"""
Process image by applying morphological transformations using OpenCV.
It takes in a list of kernels as an input and itterates through that list applying each transormation to input image and merges all the results back together later.
Args:
image (numpy.ndarray):
Input image.
kernels (list of numpy.ndarray):
List of kernels to be used for morphological transformations.
transformations (tuple or list of tuples):
Tuple or list of tuples (cv2.MORPH_*, num_iterations) that describes, number, order and type of OpenCV Morphological transofrmations to be performed on input image.
Defaults to `(cv2.MORPH_OPEN, 1)`
plot (bool, optional):
Display intermediate results of transformations.
Defaults to False.
Returns:
numpy.ndarray:
Merged results of all the transformations
""" # NOQA E501
if type(transformations) is not list:
transformations = [transformations]
new_image = np.zeros_like(image)
for kernel in kernels:
morphs = image
for transform, iterations in transformations:
morphs = cv2.morphologyEx(
morphs, transform, kernel, iterations=iterations)
new_image += morphs
image = new_image
image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY)[1]
if plot: # pragma: no cover
cv2.imshow("rectangular shape enhanced image", image)
cv2.waitKey(0)
return image
def apply_thresholding(image, plot=False):
"""
Applies thresholding to the image. Sets pixel values to 0 or 255 using OTSU thresholding.
Args:
image (numpy.ndarray):
Input image.
plot (bool, optional):
Displays image after thresholding.
Defaults to False.
Returns:
numpy.ndarray:
Resulting image
""" # NOQA E501
otsu = cv2.threshold(
image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
binary = cv2.threshold(
image, np.mean(image), 255, cv2.THRESH_BINARY_INV)[1]
image = otsu + binary
if plot: # pragma: no cover
cv2.imshow("thresholded image", image)
cv2.waitKey(0)
return image
def get_rect_kernels(
width_range, height_range,
wh_ratio_range=None,
border_thickness=1,
tolerance=0.05):
"""
Returns a list of rectangular kernels for OpenCV morphological transformations.
It's using `width_range`, `height_range` params to create all the possible combinations of rectangular kernels and performs filtering based on `wh_ratio_range`.
Args:
width_range (tuple):
Min/max width range for rectangular kernels.
Should be adjusted to the pixel width of boxes to be detected on the image.
height_range (tuple):
Min/max height range for rectangular kernels.
Should be adjusted to the pixel height of boxes to be detected on the image.
wh_ratio_range (tuple, optional):
Width / Height ratio range. If None it will create ratio range based on width and height.
Defaults to None.
border_thickness (int, optional):
Rectangles border thickness.
Defaults to 1.
tolerance (float):
Expands `wh_ratio_range` upper and lower boundries by tolerance value.
Defaults to `0.05`.
Returns:
list of numpy.ndarray:
List of rectangular `numpy.ndarray` kernels
""" # NOQA E501
kernels = [
np.pad(
np.zeros(
(h, w),
# (h - (2 * border_thickness), w - (2 * border_thickness)),
dtype=np.uint8),
border_thickness, mode='constant', constant_values=1)
for w in range(
int((1 - tolerance) * width_range[0]),
int((1 + tolerance) * width_range[1]))
for h in range(
int((1 - tolerance) * height_range[0]),
int((1 + tolerance) * height_range[1]))
if w / h >= wh_ratio_range[0] and w / h <= wh_ratio_range[1]
]
return kernels
def get_line_kernels(horizontal_length, vertical_length, thickness=1):
"""
Line kernels generator. Creates a list of two `numpy.ndarray` based on length and thickness.
First kernel represents a vertical line and the second one represents a horizontal line.
Args:
horizontal_length (int):
Length of the horizontal line kernel.
vertical_length (int):
Length of the vertical line kernel.
thickness (int, optional):
Thickness of the lines.
Defaults to 1.
Returns:
list of numpy.ndarray:
List of two kernels representing vertical and horizontal lines.
""" # NOQA E501
kernels = [
np.ones((vertical_length, thickness), dtype=np.uint8),
np.ones((thickness, horizontal_length), dtype=np.uint8),
]
return kernels
def draw_rects(image, rects, color=(0, 255, 0), thickness=1):
"""
Draws rectangles (x, y, width, height) on top of the input image.
Args:
image (numpy.ndarray):
Input image.
rects (list of tuples):
List of rectangles to be drawn represented as coordinates (x, y, width, height).
color (tuple, optional):
Color definition in RGB.
Defaults to (0, 255, 0).
thickness (int, optional):
Thickness of the bounding rectangle to be drawn.
Defaults to 1.
Returns:
numpy.ndarray:
Output image.
""" # NOQA E501
# loop over the contours
for r in rects:
x, y, w, h = r
cv2.rectangle(image, (x, y), (x + w, y + h), color, thickness)
return image
def get_checkbox_crop(img, rect, border_crop_factor=0.15):
"""
Takes in image as `numpy.ndarray` and rectangle to be cropped out and returns the cropped out image.
Args:
img (numpy.ndarray):
Image as numpy array.
rect (list, tuple or array):
Rectangle from OpenCV with following values: `(x, y, width, height)`
border_crop_factor (float, optional):
Defines how much from the image border should be removed during cropping.
This is used to remove any leftovers of checkbox frame.
Defaults to 0.15.
Returns:
numpy.ndarray:
Image crop as numpy array.
""" # NOQA E501
# collect base parameters of the crop
width = rect[2]
height = rect[3]
x1 = rect[0]
y1 = rect[1]
x2 = x1 + width
y2 = y1 + height
# calculate horizontal and vertical border to be cropped
w_pad = int(width * border_crop_factor)
h_pad = int(height * border_crop_factor)
# crop checkbox area from original image
im_crop = img[y1 + h_pad:y2 - h_pad, x1 + w_pad:x2 - w_pad]
return im_crop
def contains_pixels(img, px_threshold=0.1, verbose=False):
"""
Counts white pixels inside the input image and based on the `px_threshold` it estimates if there's enough white pixels present in the image.
As this function sums pixel values you need to make sure that what you're passing as an input image is well preprocessed for that.
Args:
img (numpy.ndarray):
Image as numpy array.
px_threshold (float, optional):
This is the threshold used when estimating if pixels are present inside the checkbox.
Defaults to 0.1.
verbose (bool, optional):
Defines if messages should be printed or not.
Defaults to False.
Returns:
bool:
True - if input image contains enough white pixels
False - if input image does not contain enough white pixels
""" # NOQA E501
# calculate maximum pixel values capacity
all_px_count = img.shape[0] * img.shape[1]
# all_px = img.shape[0] * img.shape[1] * img.max()
# divide sum of pixel values for the image by maximum pixel values capacity
# return True if calculated value is above the px_threshold,
# which means that enough pixels where detected in the image
# else it returns False
nonzero_px_count = np.count_nonzero(img)
if verbose: # pragma: no cover
print("----------------------------------")
print("nonzero_px_count: ", nonzero_px_count)
print("all_px_count: ", all_px_count)
print(
"nonzero_px_count / all_px_count = ",
nonzero_px_count / all_px_count)
print("----------------------------------")
return True if nonzero_px_count / all_px_count >= px_threshold else False
# return True if np.sum(img) / all_px >= px_threshold and all_px != 0 else False # NOQA E501
def get_image(img):
"""
Helper function to take image either as `numpy.ndarray` or `str` and always return image as `numpy.ndarray`.
Args:
img (numpy.ndarray or str):
Image as numpy array or string file path.
Returns:
numpy.ndarray:
Image as `numpy.ndarray` with `dtype=np.uint8`
""" # NOQA E501
assert(type(img) in [np.ndarray, str])
if type(img) is np.ndarray:
image_org = img.copy()
image_org = image_org.astype(np.uint8)
elif type(img) is str:
print("Processing file: ", img)
image_org = cv2.imread(img)
return image_org
| [
"numpy.zeros_like",
"numpy.count_nonzero",
"cv2.waitKey",
"cv2.morphologyEx",
"cv2.threshold",
"numpy.zeros",
"numpy.ones",
"cv2.imread",
"numpy.mean",
"cv2.rectangle",
"cv2.imshow"
] | [((1185, 1205), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1198, 1205), True, 'import numpy as np\n'), ((8412, 8433), 'numpy.count_nonzero', 'np.count_nonzero', (['img'], {}), '(img)\n', (8428, 8433), True, 'import numpy as np\n'), ((1478, 1525), 'cv2.threshold', 'cv2.threshold', (['image', '(0)', '(255)', 'cv2.THRESH_BINARY'], {}), '(image, 0, 255, cv2.THRESH_BINARY)\n', (1491, 1525), False, 'import cv2\n'), ((1571, 1624), 'cv2.imshow', 'cv2.imshow', (['"""rectangular shape enhanced image"""', 'image'], {}), "('rectangular shape enhanced image', image)\n", (1581, 1624), False, 'import cv2\n'), ((1633, 1647), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1644, 1647), False, 'import cv2\n'), ((2086, 2155), 'cv2.threshold', 'cv2.threshold', (['image', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (2099, 2155), False, 'import cv2\n'), ((2327, 2365), 'cv2.imshow', 'cv2.imshow', (['"""thresholded image"""', 'image'], {}), "('thresholded image', image)\n", (2337, 2365), False, 'import cv2\n'), ((2374, 2388), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2385, 2388), False, 'import cv2\n'), ((5023, 5076), 'numpy.ones', 'np.ones', (['(vertical_length, thickness)'], {'dtype': 'np.uint8'}), '((vertical_length, thickness), dtype=np.uint8)\n', (5030, 5076), True, 'import numpy as np\n'), ((5086, 5141), 'numpy.ones', 'np.ones', (['(thickness, horizontal_length)'], {'dtype': 'np.uint8'}), '((thickness, horizontal_length), dtype=np.uint8)\n', (5093, 5141), True, 'import numpy as np\n'), ((5895, 5957), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', 'color', 'thickness'], {}), '(image, (x, y), (x + w, y + h), color, thickness)\n', (5908, 5957), False, 'import cv2\n'), ((1331, 1397), 'cv2.morphologyEx', 'cv2.morphologyEx', (['morphs', 'transform', 'kernel'], {'iterations': 'iterations'}), '(morphs, transform, kernel, iterations=iterations)\n', (1347, 1397), False, 'import cv2\n'), ((2211, 2225), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (2218, 2225), True, 'import numpy as np\n'), ((3759, 3791), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (3767, 3791), True, 'import numpy as np\n'), ((9560, 9575), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (9570, 9575), False, 'import cv2\n')] |
import albumentations as A
import mmcv
import numpy as np
from mmcv.runner import obj_from_dict
from numpy import random
from . import transforms
class PhotoMetricDistortion(object):
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, img, boxes, labels):
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
return img, boxes, labels
class Expand(object):
def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
def __call__(self, img, boxes, labels):
if random.randint(2):
return img, boxes, labels
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
img = expand_img
boxes += np.tile((left, top), 2)
return img, boxes, labels
class ExtraAugmentation(object):
def __init__(self, **kwargs):
self.transform = self.transform_from_dict(**kwargs)
def transform_from_dict(self, **kwargs):
if 'transforms' in kwargs:
kwargs['transforms'] = [self.transform_from_dict(**transform) for transform in kwargs['transforms']]
try:
return obj_from_dict(kwargs, transforms)
except AttributeError:
return obj_from_dict(kwargs, A)
def __call__(self, img, bboxes, labels):
data = self.transform(image=img, bboxes=bboxes, labels=labels)
return data['image'], np.array(data['bboxes'], dtype=np.float32), np.array(data['labels'], dtype=np.int)
| [
"numpy.random.uniform",
"mmcv.runner.obj_from_dict",
"mmcv.bgr2hsv",
"numpy.random.randint",
"numpy.array",
"numpy.tile",
"numpy.random.permutation",
"mmcv.hsv2bgr"
] | [((675, 692), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (689, 692), False, 'from numpy import random\n'), ((949, 966), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (963, 966), False, 'from numpy import random\n'), ((1227, 1244), 'mmcv.bgr2hsv', 'mmcv.bgr2hsv', (['img'], {}), '(img)\n', (1239, 1244), False, 'import mmcv\n'), ((1285, 1302), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (1299, 1302), False, 'from numpy import random\n'), ((1467, 1484), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (1481, 1484), False, 'from numpy import random\n'), ((1714, 1731), 'mmcv.hsv2bgr', 'mmcv.hsv2bgr', (['img'], {}), '(img)\n', (1726, 1731), False, 'import mmcv\n'), ((2009, 2026), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (2023, 2026), False, 'from numpy import random\n'), ((2417, 2434), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (2431, 2434), False, 'from numpy import random\n'), ((2519, 2565), 'numpy.random.uniform', 'random.uniform', (['self.min_ratio', 'self.max_ratio'], {}), '(self.min_ratio, self.max_ratio)\n', (2533, 2565), False, 'from numpy import random\n'), ((2890, 2913), 'numpy.tile', 'np.tile', (['(left, top)', '(2)'], {}), '((left, top), 2)\n', (2897, 2913), True, 'import numpy as np\n'), ((714, 775), 'numpy.random.uniform', 'random.uniform', (['(-self.brightness_delta)', 'self.brightness_delta'], {}), '(-self.brightness_delta, self.brightness_delta)\n', (728, 775), False, 'from numpy import random\n'), ((1004, 1021), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (1018, 1021), False, 'from numpy import random\n'), ((1331, 1391), 'numpy.random.uniform', 'random.uniform', (['self.saturation_lower', 'self.saturation_upper'], {}), '(self.saturation_lower, self.saturation_upper)\n', (1345, 1391), False, 'from numpy import random\n'), ((1513, 1560), 'numpy.random.uniform', 'random.uniform', (['(-self.hue_delta)', 'self.hue_delta'], {}), '(-self.hue_delta, self.hue_delta)\n', (1527, 1560), False, 'from numpy import random\n'), ((1796, 1813), 'numpy.random.randint', 'random.randint', (['(2)'], {}), '(2)\n', (1810, 1813), False, 'from numpy import random\n'), ((2709, 2741), 'numpy.random.uniform', 'random.uniform', (['(0)', '(w * ratio - w)'], {}), '(0, w * ratio - w)\n', (2723, 2741), False, 'from numpy import random\n'), ((2761, 2793), 'numpy.random.uniform', 'random.uniform', (['(0)', '(h * ratio - h)'], {}), '(0, h * ratio - h)\n', (2775, 2793), False, 'from numpy import random\n'), ((3304, 3337), 'mmcv.runner.obj_from_dict', 'obj_from_dict', (['kwargs', 'transforms'], {}), '(kwargs, transforms)\n', (3317, 3337), False, 'from mmcv.runner import obj_from_dict\n'), ((3560, 3602), 'numpy.array', 'np.array', (["data['bboxes']"], {'dtype': 'np.float32'}), "(data['bboxes'], dtype=np.float32)\n", (3568, 3602), True, 'import numpy as np\n'), ((3604, 3642), 'numpy.array', 'np.array', (["data['labels']"], {'dtype': 'np.int'}), "(data['labels'], dtype=np.int)\n", (3612, 3642), True, 'import numpy as np\n'), ((1047, 1103), 'numpy.random.uniform', 'random.uniform', (['self.contrast_lower', 'self.contrast_upper'], {}), '(self.contrast_lower, self.contrast_upper)\n', (1061, 1103), False, 'from numpy import random\n'), ((1839, 1895), 'numpy.random.uniform', 'random.uniform', (['self.contrast_lower', 'self.contrast_upper'], {}), '(self.contrast_lower, self.contrast_upper)\n', (1853, 1895), False, 'from numpy import random\n'), ((3388, 3412), 'mmcv.runner.obj_from_dict', 'obj_from_dict', (['kwargs', 'A'], {}), '(kwargs, A)\n', (3401, 3412), False, 'from mmcv.runner import obj_from_dict\n'), ((2055, 2076), 'numpy.random.permutation', 'random.permutation', (['(3)'], {}), '(3)\n', (2073, 2076), False, 'from numpy import random\n')] |
import os
import logging
import datetime
import numpy as np
import pandas as pd
from collections import OrderedDict
from .utils import (calc_num_na, load_data, inverse_logit,
choose_from_binary_probs, choose_from_multinomial_probs)
from .config import DATA_DIR, MODEL_FILES
log = logging.getLogger(__name__)
MODEL_PATHS = [os.path.join(DATA_DIR,item) for item in MODEL_FILES]
def data_file_path(modelfile):
return os.path.join(DATA_DIR, modelfile)
_default_model = (
('', {
'model': data_file_path('model_v-nv-wt.txt')
}),
('V', {
'model': data_file_path('model_f-nf.txt'),
'label_above_threshold': 'NF',
'label_below_threshold': 'F',
'threshold': 0.5
}),
('NF', {
'model': data_file_path('model_hf-hg-sc-so.txt')
})
)
def load_model(path, sep=',', index_col=[0], **kwargs):
"""Load a model for SLS HRIS LCC Prediction
:param path: path to input data, string or URL (e.g. http, s3)
:param sep: input data field separator
:param kwargs: additional keyword arguments passed to `utils.load_data`
"""
log.info('Loading model at %s' %path)
model = load_data(path, sep=sep, index_col=index_col, **kwargs)
log.info('Checking model for missing coefficients')
missing = calc_num_na(model)
if missing > 0:
raise ValueError(('Model has %s missing coefficients.' %missing))
return model
def load_observations(path, sep=',', **kwargs):
"""Load observations for SLS HRIS LCC Prediction
:param path: path to input data, string or URL (e.g. http, s3)
:param sep: input data field separator
:param kwargs: additional keyword arguments passed to `utils.load_data`
"""
log.info('Loading observations at %s' %path)
return load_data(path, sep=sep, **kwargs)
def get_features_from_model_files(paths, features_col=0,
intercept_names=['(Intercept)']):
"""Retrieve features from model files
:param paths: paths to model files
:param kwargs: keyword arguments to `load_model`
:param intercept_names: list of strings to be removed from features
"""
features = []
for path in paths:
model = load_model(path, usecols=[features_col],
index_col=None)
features += model.values.flatten().tolist()
features = set(features)
features = features.difference(set(intercept_names))
return features
# TODO: compatibility with custom models
def generate_fake_observations(n):
"""Generate fake data for SLS HRIS LCC Prediction
:param n: number of observations to generate
"""
features = get_features_from_model_files(MODEL_PATHS)
n_col = len(features)
arr = np.random.uniform(size=(n, n_col))
df = pd.DataFrame(data=arr)
df.columns = features
return df
def generate_fake_observations_file(n, path):
"""Generate fake observatiosn file for SLS HRIS LCC Prediction
:param n: number of observations
:param path: path to file
"""
df = generate_fake_observations(n)
df.to_csv(path)
def calculate_prob(observations, model, intercept_name='(Intercept)'):
"""Apply model to observations
.. warning:: for the model to be applied correctly, its row indices must be present among the column indices of the observations. If the correspondence is not meaninful (e.g. indices matched by coincidence), the result will not be meaningful or an exception will be raised!
:param observations: `pandas.DataFrame` of observations
:param model: `pandas.DataFrame` of model
:param intercept_name: name of the intercept field in the model
"""
model_variables = model.index.tolist()
intercept_index = model_variables.index(intercept_name)
model_variables.pop(intercept_index)
#TODO: isolate block below
n_obs = observations.shape[0]
log.info('Subsetting observations to variables in model')
observations_for_model = observations.loc[:,model_variables]
model_vars_set = set(model_variables)
obs_vars_set = set(observations.columns.tolist())
if not model_vars_set.issubset(obs_vars_set):
set_diff = model_vars_set.difference(obs_vars_set)
raise ValueError(
'Observations are missing variables: {}'.format(set_diff)
)
observations_for_model.loc[:,intercept_name] = pd.Series(
np.ones(n_obs), index=observations_for_model.index
)
observations_for_model.set_index([intercept_name], append=True)
#: multiply - note that pandas handles checking index names match.
#: use np.dot(X,B) for non-matching index names
log.debug('Caclulating logits')
result = observations_for_model.dot(model)
log.info('Calculating probabilities')
result = result.applymap(inverse_logit)
return result
def choose_class_from_probs(df, **kwargs):
"""Choose class from data frame of probabilities
:param df: `pandas.DataFrame` of binary or multinomial class probabilities
:param kwargs: keyword arguments to `utils.choose_from_binary_probs`
"""
if df.shape[1] == 1:
class_preds = choose_from_binary_probs(df, **kwargs)
else:
class_preds = choose_from_multinomial_probs(df)
return class_preds
class Tree:
"""Model tree
Construct a classification tree from logistic regression models
:param tuples: tuples which define classification tree - see below for example
:param path: path to model configuration file
:param kwargs: keyword arguments to shclassify.load_model; currently assumes all model files have same tabular formatting
The default model is
::
('', {
'model': data_file_path('model_v-nv-wt.txt')
}),
('V', {
'model': data_file_path('model_f-nf.txt'),
'label_above_threshold': 'NF',
'label_below_threshold': 'F',
'threshold': 0.5
}),
('NF', {
'model': data_file_path('model_hf-hg-sc-so.txt')
})
"""
def __init__(self, *tuples, path=None, **kwargs):
self.config_filepath = path
if path is not None:
tuples = self._config_file_to_tuples(path)
if tuples:
raise ValueError(('Only one of path or tuples '
'should be provided'))
if not tuples:
tuples = _default_model
self._init_from_tuples(*tuples, **kwargs)
def _config_file_to_tuples(self, path):
raise RuntimeError('Not yet implemented')
def _init_from_tuples(self, *tuples, **kwargs):
tree = OrderedDict()
for label, model_dict in tuples:
if label in tree.keys():
raise ValueError(('class (%s) must not have more than'
' one model assignment'))
# TODO: validate that true and false labels provided for
# binary models
tree[label] = {
'model': load_model(model_dict['model']),
'label_above_threshold': model_dict.get(
'label_above_threshold', '%s True' %label),
'label_below_threshold': model_dict.get(
'label_below_threshold', '%s False' %label),
'threshold': model_dict.get('threshold')
}
self.model = tree
def predict_df(self, df):
"""Make predictions for observations in data frame
.. note:: predictions will have same index as `df`
:param df: `pandas.DataFrame` of observations
"""
preds = pd.DataFrame(data='', index=df.index, columns=['class'])
for cls, model_dict in self.model.items():
model = model_dict['model']
# remove null to avoid conflict with next mask query
mask = pd.notnull(preds['class'])
if not mask.any():
log.debug('Stopping prediction for class %s as all values are null' %cls)
break
# subset to parent class of model
mask = mask & (preds['class']==cls)
if not mask.any():
log.debug('Stopping prediction because no observatiosn are class %s' %cls)
break
obs = df[mask.values]
obs_cls_probs = calculate_prob(obs, model)
cls_pred = choose_class_from_probs(
obs_cls_probs,
name_true=model_dict['label_above_threshold'],
name_false=model_dict['label_below_threshold'],
threshold=model_dict['threshold']
)
preds[mask] = cls_pred
return preds
def predict_file(self, obs_file, pred_file, overwrite=False,
sep=',', chunksize=10000, index_col=None):
"""Make predictions for observations in file
This automatically appends predictions to `pred_file`. To get
predictions in a data frame in interactive python sessions, use
`predict_df`.
:param obs_file: path to observations file
:param pred_file: path of file to write predictions
:param overwrite: overwrite `pred_file` if it exists
:param sep: observation file separator
:param chunksize: chunksize (lines) read `pred_file` for making predictions
:param index_col: integer index of column to use as data frame row index
:param kwargs: keyword arguments to `load_observations`
"""
reader = load_observations(obs_file, sep=sep,
chunksize=chunksize,
index_col=index_col)
if os.path.exists(pred_file) and not overwrite:
raise ValueError('%s already exists! Specify a new file.')
for i, chunk in enumerate(reader):
log.info('Loading chunk %s' %i)
res = self.predict_df(chunk)
mode = 'w' if i==0 else 'a'
header = mode == 'w'
res.to_csv(pred_file, header=header, mode=mode)
| [
"pandas.DataFrame",
"numpy.random.uniform",
"os.path.exists",
"numpy.ones",
"pandas.notnull",
"collections.OrderedDict",
"os.path.join",
"logging.getLogger"
] | [((303, 330), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (320, 330), False, 'import logging\n'), ((346, 374), 'os.path.join', 'os.path.join', (['DATA_DIR', 'item'], {}), '(DATA_DIR, item)\n', (358, 374), False, 'import os\n'), ((443, 476), 'os.path.join', 'os.path.join', (['DATA_DIR', 'modelfile'], {}), '(DATA_DIR, modelfile)\n', (455, 476), False, 'import os\n'), ((2758, 2792), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, n_col)'}), '(size=(n, n_col))\n', (2775, 2792), True, 'import numpy as np\n'), ((2802, 2824), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'arr'}), '(data=arr)\n', (2814, 2824), True, 'import pandas as pd\n'), ((4408, 4422), 'numpy.ones', 'np.ones', (['n_obs'], {}), '(n_obs)\n', (4415, 4422), True, 'import numpy as np\n'), ((6649, 6662), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6660, 6662), False, 'from collections import OrderedDict\n'), ((7637, 7693), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '""""""', 'index': 'df.index', 'columns': "['class']"}), "(data='', index=df.index, columns=['class'])\n", (7649, 7693), True, 'import pandas as pd\n'), ((7870, 7896), 'pandas.notnull', 'pd.notnull', (["preds['class']"], {}), "(preds['class'])\n", (7880, 7896), True, 'import pandas as pd\n'), ((9683, 9708), 'os.path.exists', 'os.path.exists', (['pred_file'], {}), '(pred_file)\n', (9697, 9708), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from bld.project_paths import project_paths_join as ppj
from src.simulation_study.data_generating_process import data_generating_process
from src.simulation_study.sim_study import fix_simulation_params
np.random.seed(123)
data_dict = {}
for model in ["linear", "poly", "nonpolynomial"]:
# Draw data from each data generating process.
sim_params = fix_simulation_params(
n=500, M=100, model=model, discrete=False, cutoff=0, tau=0.75, noise_var=0.25
)
data_temp = data_generating_process(params=sim_params)
# Bin data.
rmin = min(data_temp["r"])
binsize = 0.07
cutoff = sim_params["cutoff"]
# Calculate midpoint of lowest bin.
binmp_lowest = np.floor((rmin - cutoff) / binsize) * binsize + binsize / 2 + cutoff
# Assign each running variable observation its bin.
data_temp["binnum"] = round(
(
(
np.floor((data_temp["r"] - cutoff) / binsize) * binsize
+ binsize / 2
+ cutoff
)
- binmp_lowest
)
/ binsize
)
# Calculate mean of outcome and running variable for each discrete value.
data_temp = data_temp.groupby(["binnum"], as_index=False).mean()
# Omit first and last bins as they hold too few observations.
data_temp = data_temp[3:-3]
data_dict["data_" + model] = data_temp.rename({"y": "y_" + model})
# Plot binned data.
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(24, 6), sharex=True)
plt.subplots_adjust(wspace=0.3)
# Specify subplot arrangement and outcome by
# different data generating processes.
plot_dict = {"131": "linear", "132": "poly", "133": "nonpolynomial"}
for subplot in plot_dict.keys():
plt.subplot(subplot)
# Prepare data.
data_graph = data_dict["data_" + plot_dict[subplot]]
for d in [0, 1]:
# Plot data and quadratic fit separately for each side of cutoff.
p = sns.regplot(
"r",
"y",
data=data_graph.loc[data_graph["d"] == d],
fit_reg=False,
order=2,
ci=None,
color="blue",
scatter_kws={"s": 50, "alpha": 0.5},
truncate=True,
)
p.tick_params(labelsize=18)
plt.xlabel("R", size=22)
plt.ylabel("Y", size=22)
plt.axvline(x=cutoff, color="black", alpha=0.8, linestyle="--")
# Customize subplot's title and label for different outcomes.
if subplot == "131":
plt.title("Panel A", size=26, loc="left")
elif subplot == "132":
plt.title("Panel B", size=26, loc="left")
elif subplot == "133":
plt.title("Panel C", size=26, loc="left")
else:
pass
plt.savefig("a.png")
plt.savefig(ppj("OUT_FIGURES", "simulation_study", "simulated_rdd_graphs.png"))
| [
"matplotlib.pyplot.title",
"seaborn.set_style",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axvline",
"numpy.random.seed",
"bld.project_paths.project_paths_join",
"numpy.floor",
"src.simulation_study.sim_study.fix_simulation_params",
"seaborn.regplot",
"matplotlib.pyplot.subplots_adjust",
"m... | [((278, 297), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (292, 297), True, 'import numpy as np\n'), ((1494, 1520), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (1507, 1520), True, 'import seaborn as sns\n'), ((1532, 1574), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(24, 6)', 'sharex': '(True)'}), '(figsize=(24, 6), sharex=True)\n', (1544, 1574), True, 'import matplotlib.pyplot as plt\n'), ((1575, 1606), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.3)'}), '(wspace=0.3)\n', (1594, 1606), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2786), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""a.png"""'], {}), "('a.png')\n", (2777, 2786), True, 'import matplotlib.pyplot as plt\n'), ((432, 536), 'src.simulation_study.sim_study.fix_simulation_params', 'fix_simulation_params', ([], {'n': '(500)', 'M': '(100)', 'model': 'model', 'discrete': '(False)', 'cutoff': '(0)', 'tau': '(0.75)', 'noise_var': '(0.25)'}), '(n=500, M=100, model=model, discrete=False, cutoff=0,\n tau=0.75, noise_var=0.25)\n', (453, 536), False, 'from src.simulation_study.sim_study import fix_simulation_params\n'), ((564, 606), 'src.simulation_study.data_generating_process.data_generating_process', 'data_generating_process', ([], {'params': 'sim_params'}), '(params=sim_params)\n', (587, 606), False, 'from src.simulation_study.data_generating_process import data_generating_process\n'), ((1799, 1819), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot'], {}), '(subplot)\n', (1810, 1819), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2349), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""R"""'], {'size': '(22)'}), "('R', size=22)\n", (2335, 2349), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2378), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {'size': '(22)'}), "('Y', size=22)\n", (2364, 2378), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2446), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'cutoff', 'color': '"""black"""', 'alpha': '(0.8)', 'linestyle': '"""--"""'}), "(x=cutoff, color='black', alpha=0.8, linestyle='--')\n", (2394, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2799, 2865), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_FIGURES"""', '"""simulation_study"""', '"""simulated_rdd_graphs.png"""'], {}), "('OUT_FIGURES', 'simulation_study', 'simulated_rdd_graphs.png')\n", (2802, 2865), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((2005, 2178), 'seaborn.regplot', 'sns.regplot', (['"""r"""', '"""y"""'], {'data': "data_graph.loc[data_graph['d'] == d]", 'fit_reg': '(False)', 'order': '(2)', 'ci': 'None', 'color': '"""blue"""', 'scatter_kws': "{'s': 50, 'alpha': 0.5}", 'truncate': '(True)'}), "('r', 'y', data=data_graph.loc[data_graph['d'] == d], fit_reg=\n False, order=2, ci=None, color='blue', scatter_kws={'s': 50, 'alpha': \n 0.5}, truncate=True)\n", (2016, 2178), True, 'import seaborn as sns\n'), ((2547, 2588), 'matplotlib.pyplot.title', 'plt.title', (['"""Panel A"""'], {'size': '(26)', 'loc': '"""left"""'}), "('Panel A', size=26, loc='left')\n", (2556, 2588), True, 'import matplotlib.pyplot as plt\n'), ((2624, 2665), 'matplotlib.pyplot.title', 'plt.title', (['"""Panel B"""'], {'size': '(26)', 'loc': '"""left"""'}), "('Panel B', size=26, loc='left')\n", (2633, 2665), True, 'import matplotlib.pyplot as plt\n'), ((769, 804), 'numpy.floor', 'np.floor', (['((rmin - cutoff) / binsize)'], {}), '((rmin - cutoff) / binsize)\n', (777, 804), True, 'import numpy as np\n'), ((2701, 2742), 'matplotlib.pyplot.title', 'plt.title', (['"""Panel C"""'], {'size': '(26)', 'loc': '"""left"""'}), "('Panel C', size=26, loc='left')\n", (2710, 2742), True, 'import matplotlib.pyplot as plt\n'), ((968, 1013), 'numpy.floor', 'np.floor', (["((data_temp['r'] - cutoff) / binsize)"], {}), "((data_temp['r'] - cutoff) / binsize)\n", (976, 1013), True, 'import numpy as np\n')] |
"""
pip to do :
pip install QtAwesome
pip install pygmsh
pip install gmsh
"""
import sys
import os
from PyQt5.QtWidgets import *
#QMainWindow,QApplication,QRadioButton,QWidget,QPushButton,QAction,QLineEdit,QGridLayout,QGroupBox,QMessageBox,QHBoxLayout,QComboBox,QVBoxLayout,QLabel,QStatusBar,QCheckBox,QSlider,QFileDialog,QTabWidget
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import qtawesome as qta
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pygmsh
#from Test23 import *
#https://www.mfitzp.com/tutorials/layouts/
class Canvas(FigureCanvas):
def __init__(self,parent):
fig, self.ax = plt.subplots(figsize=(5,4),dpi=200)
super().__init__(fig)
self.setParent(parent)
t = np.arange(0,2,0.1)
s = np.sin(t)
self.ax.plot(t,s)
self.ax.set(xlabel="time",ylabel="sin",title="test")
self.ax.grid()
class Color(QWidget):
def __init__(self, color):
super(Color, self).__init__()
self.setAutoFillBackground(True)
palette = self.palette()
palette.setColor(QPalette.Window, QColor(color))
self.setPalette(palette)
class IconLabel(QWidget):
IconSize = QSize(16, 16)
HorizontalSpacing = 1
def __init__(self, qta_id, text, final_stretch=True):
super(QWidget, self).__init__()
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
icon = QLabel()
icon.setPixmap(qta.icon(qta_id).pixmap(self.IconSize))
layout.addWidget(icon)
layout.addSpacing(self.HorizontalSpacing)
layout.addWidget(QLabel(text))
if final_stretch:
layout.addStretch()
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(1200, 1000)) #Window size width and height
self.setWindowTitle("C2A - Chaine de Calcul Aerodynamique")
self.setWindowIcon(QIcon("gears.png"))
# Create new action
newAction = QAction(QIcon('new.png'), '&New', self)
newAction.setShortcut('Ctrl+N')
newAction.setStatusTip('New document')
newAction.triggered.connect(self.newCall)
# Create new action
openAction = QAction(QIcon('open.png'), '&Open', self)
openAction.setShortcut('Ctrl+O')
openAction.setStatusTip('Open document')
openAction.triggered.connect(self.getFile)
# Create exit action
exitAction = QAction(QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.exitCall)
# Create menu bar and add action
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(newAction)
fileMenu.addAction(openAction)
fileMenu.addAction(exitAction)
menuBar.addMenu('Parameters')
menuBar.addMenu('Tools')
menuBar.addMenu('Help')
# Add Status Bar
self.statusBar = QStatusBar()
self.setStatusBar(self.statusBar)
self.statusBar.showMessage("Current Folder Location :" + os.getcwd())
# Creation ComboBox
#solver
self.cb1 = QComboBox()
self.cb1.addItems(["JST","LAX-FRIEDRICH","CUSP","ROE","AUSM","HLLC","TURKEL_PREC","MSW"])
self.cb1.currentIndexChanged.connect(self.selectionchange)
self.cb2 = QComboBox()
self.cb2.addItems(["RUNGE-KUTTA_EXPLICIT","EULER_IMPLICIT","EULER_EXPLICIT"])
self.cb3 = QComboBox()
self.cb3.addItems(["EULER", "NAVIER_STOKES","WAVE_EQUATION", "HEAT_EQUATION", "FEM_ELASTICITY","POISSON_EQUATION"])
# Creation checkbox
self.checkbox1 = QCheckBox("Plot", self)
self.checkbox2 = QCheckBox("Reconstruction MUSCL", self)
self.checkbox3 = QCheckBox("Acceleration Multigrid", self)
# Creation d'un slider
self.sl1 = QSlider(Qt.Horizontal)
self.sl1.setFocusPolicy(Qt.StrongFocus)
self.sl1.setTickPosition(QSlider.TicksBothSides)
self.sl1.setTickInterval(10)
self.sl1.setSingleStep(1)
self.sl1.setMinimum(0)
self.sl1.setMaximum(10)
self.sl1.valueChanged.connect(self.value_changed)
# Creation des labels
#maillage
self.l11 = QLabel('Charger un maillage : ')
#solver
self.l0 = QLabel('Solver :')
self.l1 = QLabel('Choix du schéma spacial :', self)
self.l2 = QLabel('Choix du schéma temporel :', self)
self.l3 = QLabel('Entrer un CFL :', self)
self.l4 = QLabel('CFL Choisi :' + str(self.sl1.value()), self)
self.l5 = QLabel('Mach Number :')
self.l6 = QLabel('FREESTREAM_PRESSURE :')
self.l7 = QLabel('FREESTREAM_Temperature :')
# Creation des boutons
#maillage
self.Load_mesh = QPushButton("Charger un maillage .su2")
self.Load_mesh.clicked.connect(self.getFile)
self.Launch1 = QPushButton("Lancer GMSH")
self.Launch1.clicked.connect(self.Click_gmsh)
#solver
self.b1 = QPushButton("Lancer la simu")
self.b1.clicked.connect(self.Click1)
self.Load_cfg = QPushButton("Charger un config .cfg")
self.Load_cfg.clicked.connect(self.getFile)
self.Launch2 = QPushButton("Lancer SU2")
self.Launch2.clicked.connect(self.Click_su2)
#post
self.Launch3 = QPushButton("Lancer PARAVIEW")
self.Launch3.clicked.connect(self.Click_paraview)
# Creation textbox
#maillage
self.t11 = QLineEdit()
#solver
self.t1 = QLineEdit()
self.t2 = QLineEdit()
self.t3 = QLineEdit()
self.t4 = QLineEdit()
#Creation Groupbox
self.groupbox1 = QGroupBox("Maillage :")
self.groupbox2 = QGroupBox("Paramètre de simulation numérique :")
self.groupbox3 = QGroupBox("Post-traitement :")
#Layout horizontal
hbox = QHBoxLayout()
self.setLayout(hbox)
######### STRUCTURE ############
### MAILLAGE
vbox1 = QVBoxLayout()
self.groupbox1.setLayout(vbox1)
self.radiobutton = QRadioButton("Structuré")
vbox1.addWidget(self.radiobutton)
self.radiobutton = QRadioButton("Non Structuré")
vbox1.addWidget(self.radiobutton)
self.radiobutton = QRadioButton("Hybride")
vbox1.addWidget(self.radiobutton)
self.radiobutton = QRadioButton("Chimère")
vbox1.addWidget(self.radiobutton)
vbox1.addStretch(1)
#vbox1.addWidget(self.l11)
vbox1.addWidget(IconLabel("fa.angle-double-right", "Charger un maillage .su2 :"))
vbox1.addWidget(self.t11)
vbox1.addWidget(self.Load_mesh)
vbox1.addWidget(self.Launch1)
### SOLVER
vbox2 = QVBoxLayout()
self.groupbox2.setLayout(vbox2)
vbox2.addWidget(IconLabel("fa.wrench", "Choix du solver :"))
#vbox2.addWidget(self.l0)
vbox2.addWidget(self.cb3)
vbox2.addWidget(self.l1)
vbox2.addWidget(self.cb1)
vbox2.addStretch()
vbox2.addWidget(self.l2)
vbox2.addWidget(self.cb2)
vbox2.addStretch()
vbox2.addWidget(self.l3)
vbox2.addWidget(self.t1)
vbox2.addWidget(self.b1)
vbox2.addStretch()
vbox2.addWidget(self.l4)
vbox2.addWidget(self.sl1)
vbox2.addWidget(self.l5)
vbox2.addWidget(self.t2)
vbox2.addWidget(self.l6)
vbox2.addWidget(self.t3)
vbox2.addWidget(self.l7)
vbox2.addWidget(self.t4)
vbox2.addWidget(self.checkbox1)
vbox2.addWidget(self.checkbox2)
vbox2.addWidget(self.checkbox3)
vbox2.addWidget(self.Launch2)
### MAILLAGE
vbox3 = QVBoxLayout()
self.groupbox3.setLayout(vbox3)
self.radiobutton = QRadioButton("Structuré")
vbox3.addWidget(self.radiobutton)
self.radiobutton = QRadioButton("Non Structuré")
vbox3.addWidget(self.radiobutton)
vbox3.addStretch(1)
vbox3.addWidget(self.Launch3)
hbox.addWidget(self.groupbox1)
hbox.addWidget(self.groupbox2)
hbox.addWidget(self.groupbox3)
hbox.addWidget(Canvas(self))
widget = QWidget()
widget.setLayout(hbox)
self.setCentralWidget(widget)
def getFile(self):
""" This function will get the address of the csv file location
also calls a readData function
"""
self.filename = QFileDialog.getOpenFileName()[0] #argument : filter="csv (*.csv)"
print("File :", self.filename)
self.statusBar.showMessage("Maillage chargé : " + self.filename)
self.t11.setText(self.filename)
def value_changed(self):
self.l4.setText('CFL Choisi :' + str(self.sl1.value()))
def selectionchange(self, i):
print("Items in the list are :")
for count in range(self.cb1.count()):
print(self.cb1.itemText(count))
print("Current index", i, "selection changed ", self.cb1.currentText())
def Click1(self):
check = self.checkbox1.isChecked()
print(check)
# inputCFL = float(self.t1.text())
inputCFL = self.sl1.value()
print(self.sl1.value())
print("Run Simulation")
self.statusBar.showMessage('Run Simulation')
simu(m=201, CFL=inputCFL, plot=check) # CFL = 0.8
self.statusBar.showMessage('End Simulation')
def Click_gmsh(self):
self.statusBar.showMessage('Lancement de GMSH')
os.system("C://Users//Gameiro//Documents//CFD//gmsh-4.8.4-Windows64//gmsh.exe")
def Click_su2(self):
#On doit se deplacer pour se mettre dans le dossier du fichier ou alors on peut mettre dans le fichier config le chemin absolu du fichier maillage
#on triche :
os.chdir("C://Users//Gameiro//Documents//CFD//SU2-master//QuickStart")
print(os.getcwd())
self.statusBar.showMessage('Lancement de SU2')
#recupère le nom fichier config
config_name = self.t11.text()
os.system("SU2_CFD "+ config_name) #inv_NACA0012.cfg
def Click_paraview(self):
self.statusBar.showMessage('Lancement de PARAVIEW')
file = 'C:\\Program Files\\ParaView 5.9.1-Windows-Python3.8-msvc2017-64bit\\bin\\paraview.exe'
os.system('"' + file + '"')
def openCall(self):
print('Open')
def newCall(self):
print('New')
def exitCall(self):
print('Exit app')
sys.exit(app.exec_())
def main():
app = QApplication(sys.argv)
#ex = combodemo()
#ex.show()
mainWin = MainWindow()
#['Breeze', 'Oxygen', 'QtCurve', 'Windows', 'Fusion']
app.setStyle('Fusion')
mainWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| [
"os.getcwd",
"qtawesome.icon",
"os.system",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.subplots",
"os.chdir"
] | [((708, 745), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 4)', 'dpi': '(200)'}), '(figsize=(5, 4), dpi=200)\n', (720, 745), True, 'import matplotlib.pyplot as plt\n'), ((817, 837), 'numpy.arange', 'np.arange', (['(0)', '(2)', '(0.1)'], {}), '(0, 2, 0.1)\n', (826, 837), True, 'import numpy as np\n'), ((848, 857), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (854, 857), True, 'import numpy as np\n'), ((9728, 9807), 'os.system', 'os.system', (['"""C://Users//Gameiro//Documents//CFD//gmsh-4.8.4-Windows64//gmsh.exe"""'], {}), "('C://Users//Gameiro//Documents//CFD//gmsh-4.8.4-Windows64//gmsh.exe')\n", (9737, 9807), False, 'import os\n'), ((10018, 10088), 'os.chdir', 'os.chdir', (['"""C://Users//Gameiro//Documents//CFD//SU2-master//QuickStart"""'], {}), "('C://Users//Gameiro//Documents//CFD//SU2-master//QuickStart')\n", (10026, 10088), False, 'import os\n'), ((10257, 10292), 'os.system', 'os.system', (["('SU2_CFD ' + config_name)"], {}), "('SU2_CFD ' + config_name)\n", (10266, 10292), False, 'import os\n'), ((10512, 10539), 'os.system', 'os.system', (['(\'"\' + file + \'"\')'], {}), '(\'"\' + file + \'"\')\n', (10521, 10539), False, 'import os\n'), ((10103, 10114), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10112, 10114), False, 'import os\n'), ((3278, 3289), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3287, 3289), False, 'import os\n'), ((1565, 1581), 'qtawesome.icon', 'qta.icon', (['qta_id'], {}), '(qta_id)\n', (1573, 1581), True, 'import qtawesome as qta\n')] |
import numpy as np
def point_cross_cnt_arr(x_arr: np.array, y_arr: np.array, c: int) -> np.array:
base_arr = (x_arr == c)
compare_arr = (y_arr == c)
# row
sum_i = base_arr.sum(axis=1, keepdims=True)
row_loss = (sum_i != compare_arr).astype(int).sum()
# col
sum_j = base_arr.sum(axis=0, keepdims=True)
col_loss = (sum_j != compare_arr).astype(int).sum()
# cross
cross_arr = np.minimum(sum_i + sum_j, 1)
cross_loss = (cross_arr != compare_arr).astype(int).sum()
# delete
del_loss = compare_arr.astype(int).sum()
# keep
keep_loss = (base_arr != compare_arr).astype(int).sum()
return np.array([del_loss, keep_loss, row_loss, col_loss, cross_loss])
def point_cross_fit_arr(x_arr: np.array, c: int, op: int) -> np.array:
if op == 0:
return np.zeros(x_arr.shape, dtype=np.int)
base_arr = (x_arr == c)
# row
sum_i = base_arr.sum(axis=1, keepdims=True)
# col
sum_j = base_arr.sum(axis=0, keepdims=True)
# cross
cross_arr = np.minimum(sum_i + sum_j, 1)
if op == 1:
return base_arr.astype(np.int)
elif op == 4:
return cross_arr
elif op == 2:
return sum_i @ np.ones((1, x_arr.shape[1]), dtype=np.int)
else: # op == 3
return np.ones((x_arr.shape[0], 1), dtype=np.int) @ sum_j
if __name__ == "__main__":
# point_cross_cnt_arr
x = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]])
y = np.array([[0, 1, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0]])
print(point_cross_cnt_arr(x, y, 1))
x = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]])
y = np.array([[0, 0, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0]])
print(point_cross_cnt_arr(x, y, 1))
x = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
y = np.array([[0, 0, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0], [0, 0, 0, 0]])
print(point_cross_cnt_arr(x, y, 1))
x = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
y = np.array([[0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]])
print(point_cross_cnt_arr(x, y, 1))
# point_cross_fit_arr
x = np.array([[0, 5, 0, 0], [0, 1, 0, 0], [0, 0, 7, 0]])
print(point_cross_fit_arr(x, 1, 0))
print(point_cross_fit_arr(x, 1, 1))
print(point_cross_fit_arr(x, 1, 2))
print(point_cross_fit_arr(x, 1, 3))
print(point_cross_fit_arr(x, 1, 4))
| [
"numpy.zeros",
"numpy.minimum",
"numpy.array",
"numpy.ones"
] | [((416, 444), 'numpy.minimum', 'np.minimum', (['(sum_i + sum_j)', '(1)'], {}), '(sum_i + sum_j, 1)\n', (426, 444), True, 'import numpy as np\n'), ((648, 711), 'numpy.array', 'np.array', (['[del_loss, keep_loss, row_loss, col_loss, cross_loss]'], {}), '([del_loss, keep_loss, row_loss, col_loss, cross_loss])\n', (656, 711), True, 'import numpy as np\n'), ((1025, 1053), 'numpy.minimum', 'np.minimum', (['(sum_i + sum_j)', '(1)'], {}), '(sum_i + sum_j, 1)\n', (1035, 1053), True, 'import numpy as np\n'), ((1387, 1439), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]])\n', (1395, 1439), True, 'import numpy as np\n'), ((1448, 1500), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0]]'], {}), '([[0, 1, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0]])\n', (1456, 1500), True, 'import numpy as np\n'), ((1549, 1601), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]])\n', (1557, 1601), True, 'import numpy as np\n'), ((1610, 1662), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0]]'], {}), '([[0, 0, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0]])\n', (1618, 1662), True, 'import numpy as np\n'), ((1711, 1777), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (1719, 1777), True, 'import numpy as np\n'), ((1786, 1852), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [1, 1, 1, 1], [0, 1, 0, 0], [0, 0, 0, 0]])\n', (1794, 1852), True, 'import numpy as np\n'), ((1901, 1967), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (1909, 1967), True, 'import numpy as np\n'), ((1976, 2042), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]]'], {}), '([[0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]])\n', (1984, 2042), True, 'import numpy as np\n'), ((2118, 2170), 'numpy.array', 'np.array', (['[[0, 5, 0, 0], [0, 1, 0, 0], [0, 0, 7, 0]]'], {}), '([[0, 5, 0, 0], [0, 1, 0, 0], [0, 0, 7, 0]])\n', (2126, 2170), True, 'import numpy as np\n'), ((817, 852), 'numpy.zeros', 'np.zeros', (['x_arr.shape'], {'dtype': 'np.int'}), '(x_arr.shape, dtype=np.int)\n', (825, 852), True, 'import numpy as np\n'), ((1194, 1236), 'numpy.ones', 'np.ones', (['(1, x_arr.shape[1])'], {'dtype': 'np.int'}), '((1, x_arr.shape[1]), dtype=np.int)\n', (1201, 1236), True, 'import numpy as np\n'), ((1273, 1315), 'numpy.ones', 'np.ones', (['(x_arr.shape[0], 1)'], {'dtype': 'np.int'}), '((x_arr.shape[0], 1), dtype=np.int)\n', (1280, 1315), True, 'import numpy as np\n')] |
"""
Test a trained autoencoder
"""
import argparse
import os
import random
import time
import cv2 # pytype: disable=import-error
import imgaug
import numpy as np
import torch as th
from ae.autoencoder import load_ae, preprocess_image
from ae.data_loader import CheckFliplrPostProcessor, get_image_augmenter
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--folder", help="Log folder", type=str, default="logs/recorded_data/")
parser.add_argument("-ae", "--ae-path", help="Path to saved AE", type=str, default="")
parser.add_argument("-n", "--n-samples", help="Max number of samples", type=int, default=20)
parser.add_argument("--seed", help="Random generator seed", type=int, default=0)
parser.add_argument("-augment", "--augment", action="store_true", default=False, help="Use image augmenter")
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
th.manual_seed(args.seed)
if th.cuda.is_available():
th.backends.cudnn.deterministic = True
th.backends.cudnn.benchmark = False
if not args.folder.endswith("/"):
args.folder += "/"
autoencoder = load_ae(args.ae_path)
images = [im for im in os.listdir(args.folder) if im.endswith(".jpg")]
images = np.array(images)
n_samples = len(images)
augmenter = None
if args.augment:
augmenter = get_image_augmenter()
# Small benchmark
start_time = time.time()
for _ in range(args.n_samples):
# Load test image
image_idx = np.random.randint(n_samples)
image_path = args.folder + images[image_idx]
image = cv2.imread(image_path)
input_image = image
encoded = autoencoder.encode_from_raw_image(input_image)
reconstructed_image = autoencoder.decode(encoded)[0]
time_per_image = (time.time() - start_time) / args.n_samples
print("{:.4f}s".format(time_per_image))
print("{:.4f}Hz".format(1 / time_per_image))
errors = []
for _ in range(args.n_samples):
# Load test image
image_idx = np.random.randint(n_samples)
image_path = args.folder + images[image_idx]
image = cv2.imread(image_path)
postprocessor = CheckFliplrPostProcessor()
if augmenter is not None:
input_image = augmenter.augment_image(image, hooks=imgaug.HooksImages(postprocessor=postprocessor))
else:
input_image = image
if postprocessor.flipped:
image = imgaug.augmenters.Fliplr(1).augment_image(image)
cropped_image = preprocess_image(image, convert_to_rgb=False, normalize=False)
encoded = autoencoder.encode_from_raw_image(input_image)
reconstructed_image = autoencoder.decode(encoded)[0]
error = np.mean((cropped_image - reconstructed_image) ** 2)
errors.append(error)
# Baselines error:
# error = np.mean((cropped_image - np.zeros_like(cropped_image)) ** 2)
# print("Error {:.2f}".format(error))
# Plot reconstruction
cv2.imshow("Original", image)
# TODO: plot cropped and resized image
cv2.imshow("Cropped", cropped_image)
if augmenter is not None:
cv2.imshow("Augmented", input_image)
cv2.imshow("Reconstruction", reconstructed_image)
# stop if escape is pressed
k = cv2.waitKey(0) & 0xFF
if k == 27:
break
print("Min error: {:.2f}".format(np.min(errors)))
print("Max error: {:.2f}".format(np.max(errors)))
print("Mean error: {:.2f} +/- {:.2f}".format(np.mean(errors), np.std(errors)))
print("Median error: {:.2f}".format(np.median(errors)))
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.random.randint",
"numpy.mean",
"ae.data_loader.CheckFliplrPostProcessor",
"cv2.imshow",
"numpy.std",
"numpy.max",
"random.seed",
"ae.autoencoder.preprocess_image",
"cv2.waitKey",
"torch.manual_seed",
"numpy.median",
"numpy.min",
"tor... | [((320, 345), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (343, 345), False, 'import argparse\n'), ((1151, 1172), 'ae.autoencoder.load_ae', 'load_ae', (['args.ae_path'], {}), '(args.ae_path)\n', (1158, 1172), False, 'from ae.autoencoder import load_ae, preprocess_image\n'), ((1254, 1270), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1262, 1270), True, 'import numpy as np\n'), ((1400, 1411), 'time.time', 'time.time', ([], {}), '()\n', (1409, 1411), False, 'import time\n'), ((872, 894), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (883, 894), False, 'import random\n'), ((899, 924), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (913, 924), True, 'import numpy as np\n'), ((929, 954), 'torch.manual_seed', 'th.manual_seed', (['args.seed'], {}), '(args.seed)\n', (943, 954), True, 'import torch as th\n'), ((962, 984), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (982, 984), True, 'import torch as th\n'), ((1346, 1367), 'ae.data_loader.get_image_augmenter', 'get_image_augmenter', ([], {}), '()\n', (1365, 1367), False, 'from ae.data_loader import CheckFliplrPostProcessor, get_image_augmenter\n'), ((1482, 1510), 'numpy.random.randint', 'np.random.randint', (['n_samples'], {}), '(n_samples)\n', (1499, 1510), True, 'import numpy as np\n'), ((1572, 1594), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1582, 1594), False, 'import cv2\n'), ((1969, 1997), 'numpy.random.randint', 'np.random.randint', (['n_samples'], {}), '(n_samples)\n', (1986, 1997), True, 'import numpy as np\n'), ((2059, 2081), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2069, 2081), False, 'import cv2\n'), ((2103, 2129), 'ae.data_loader.CheckFliplrPostProcessor', 'CheckFliplrPostProcessor', ([], {}), '()\n', (2127, 2129), False, 'from ae.data_loader import CheckFliplrPostProcessor, get_image_augmenter\n'), ((2424, 2486), 'ae.autoencoder.preprocess_image', 'preprocess_image', (['image'], {'convert_to_rgb': '(False)', 'normalize': '(False)'}), '(image, convert_to_rgb=False, normalize=False)\n', (2440, 2486), False, 'from ae.autoencoder import load_ae, preprocess_image\n'), ((2618, 2669), 'numpy.mean', 'np.mean', (['((cropped_image - reconstructed_image) ** 2)'], {}), '((cropped_image - reconstructed_image) ** 2)\n', (2625, 2669), True, 'import numpy as np\n'), ((2866, 2895), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (2876, 2895), False, 'import cv2\n'), ((2943, 2979), 'cv2.imshow', 'cv2.imshow', (['"""Cropped"""', 'cropped_image'], {}), "('Cropped', cropped_image)\n", (2953, 2979), False, 'import cv2\n'), ((3061, 3110), 'cv2.imshow', 'cv2.imshow', (['"""Reconstruction"""', 'reconstructed_image'], {}), "('Reconstruction', reconstructed_image)\n", (3071, 3110), False, 'import cv2\n'), ((1197, 1220), 'os.listdir', 'os.listdir', (['args.folder'], {}), '(args.folder)\n', (1207, 1220), False, 'import os\n'), ((1757, 1768), 'time.time', 'time.time', ([], {}), '()\n', (1766, 1768), False, 'import time\n'), ((3019, 3055), 'cv2.imshow', 'cv2.imshow', (['"""Augmented"""', 'input_image'], {}), "('Augmented', input_image)\n", (3029, 3055), False, 'import cv2\n'), ((3151, 3165), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3162, 3165), False, 'import cv2\n'), ((3237, 3251), 'numpy.min', 'np.min', (['errors'], {}), '(errors)\n', (3243, 3251), True, 'import numpy as np\n'), ((3287, 3301), 'numpy.max', 'np.max', (['errors'], {}), '(errors)\n', (3293, 3301), True, 'import numpy as np\n'), ((3349, 3364), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (3356, 3364), True, 'import numpy as np\n'), ((3366, 3380), 'numpy.std', 'np.std', (['errors'], {}), '(errors)\n', (3372, 3380), True, 'import numpy as np\n'), ((3419, 3436), 'numpy.median', 'np.median', (['errors'], {}), '(errors)\n', (3428, 3436), True, 'import numpy as np\n'), ((2220, 2267), 'imgaug.HooksImages', 'imgaug.HooksImages', ([], {'postprocessor': 'postprocessor'}), '(postprocessor=postprocessor)\n', (2238, 2267), False, 'import imgaug\n'), ((2354, 2381), 'imgaug.augmenters.Fliplr', 'imgaug.augmenters.Fliplr', (['(1)'], {}), '(1)\n', (2378, 2381), False, 'import imgaug\n')] |
import numpy as np
from scipy.fftpack import fft2,ifft2
from cqt_toolbox.icqt import icqt
def inv_mcft(mcft_in, cqt_params, fbank_sr_domain):
"""
This function reconstructs a time-doman signal given its
Multi-resolution Common Fate Transform (MCFT).
The intermediary time-frequency domain representation is the
Constant-Q Transform (CQT) of the audio signal, which is computed
using the invertible and optimized CQT implementation proposed by
Schorkhuber et al.:
Toolbox webpage:
http://www.cs.tut.fi/sgn/arg/CQT/
Reference:
Schörkhuber et al. "A Matlab toolbox for efficient perfect reconstruction
time-frequency transforms with log-frequency resolution."
Audio Engineering Society Conference: 53rd International Conference:
Semantic Audio. Audio Engineering Society, 2014.
The python translation of this CQT toolbox can be found here:
https://github.com/interactiveaudiolab/MCFT/tree/master/mcft_python/cqt_toolbox
Inputs:
mcft_in: 4d numpy array containing the MCFT coefficients
cqt_params: dictionary containing cqt parameters, including:
num_freq_bin: total number of frequency bins
num_time_frame: total number of time frames
fbank_sr_domain: 4d numpy array containing the scale-rate-domain filterbank
Output:
signal_rec: numpy array containing the reconstructed time-doamin signal
Author: <NAME> (<EMAIL>)
"""
### reconstruct the signal cqt
print('Reconstructing the CQT...')
sig_cqt_rec = mcft_to_cqt(mcft_in,fbank_sr_domain)
### reconstruct the time-doamin signal
print('Reconstructing the time-domain signal...')
num_freq_bin = cqt_params['num_freq_bin']
num_time_frame = cqt_params['num_time_frame']
del cqt_params['num_freq_bin']
del cqt_params['num_time_frame']
cqt_dict_full = cqt_params
cqt_dict_full['cqt'] = sig_cqt_rec[0:num_freq_bin, 0:num_time_frame]
signal_rec, _ = icqt(cqt_dict_full)
return signal_rec
def mcft_to_cqt(mcft_in,fbank_sr_domain):
"""
This function reconstructs the time-frequency representation (CQT)
of an audio signal through inverse filtering given the 4-dimensional
MCFT representation and the scale-rate-domain filterbank.
Inputs:
mcft_in: 4d numpy array containing the MCFT coefficients
fbank_sr_domain: 4d numpy array containing a bank of scale-rate-domain filters
Ouput:
sig_cqt_rec: 2d numpy array containing the reconstructed time-frequency
representation (complex in general)
Author: <NAME> (<EMAIL>)
"""
# dimensions
num_scale_ctrs, num_rate_ctrs, nfft_scale, nfft_rate = np.shape(mcft_in)
# MCFT to time-frequency representation
fbank_sr_sum = np.zeros((nfft_scale,nfft_rate),dtype='complex128')
cqt_2dft_sum = np.zeros((nfft_scale,nfft_rate),dtype='complex128')
for i in range(num_scale_ctrs):
for j in range(num_rate_ctrs):
mcft_temp = mcft_in[i,j,:,:]
cqt_2dft_temp = fft2(mcft_temp,[nfft_scale,nfft_rate])
filt_sr_temp = fbank_sr_domain[i,j,:,:]
cqt_inv_filt = cqt_2dft_temp * np.conj(filt_sr_temp)
fbank_sr_sum += filt_sr_temp * np.conj(filt_sr_temp)
cqt_2dft_sum += cqt_inv_filt
# normalize cqt_2dft_sum by fbank_sr_sum
cqt_2dft_ratio = cqt_2dft_sum / (fbank_sr_sum+1e-16)
# compute the reconstructed cqt
sig_cqt_rec = ifft2(cqt_2dft_ratio)
return sig_cqt_rec
| [
"numpy.conj",
"cqt_toolbox.icqt.icqt",
"numpy.zeros",
"numpy.shape",
"scipy.fftpack.ifft2",
"scipy.fftpack.fft2"
] | [((1979, 1998), 'cqt_toolbox.icqt.icqt', 'icqt', (['cqt_dict_full'], {}), '(cqt_dict_full)\n', (1983, 1998), False, 'from cqt_toolbox.icqt import icqt\n'), ((2695, 2712), 'numpy.shape', 'np.shape', (['mcft_in'], {}), '(mcft_in)\n', (2703, 2712), True, 'import numpy as np\n'), ((2778, 2831), 'numpy.zeros', 'np.zeros', (['(nfft_scale, nfft_rate)'], {'dtype': '"""complex128"""'}), "((nfft_scale, nfft_rate), dtype='complex128')\n", (2786, 2831), True, 'import numpy as np\n'), ((2849, 2902), 'numpy.zeros', 'np.zeros', (['(nfft_scale, nfft_rate)'], {'dtype': '"""complex128"""'}), "((nfft_scale, nfft_rate), dtype='complex128')\n", (2857, 2902), True, 'import numpy as np\n'), ((3470, 3491), 'scipy.fftpack.ifft2', 'ifft2', (['cqt_2dft_ratio'], {}), '(cqt_2dft_ratio)\n', (3475, 3491), False, 'from scipy.fftpack import fft2, ifft2\n'), ((3046, 3086), 'scipy.fftpack.fft2', 'fft2', (['mcft_temp', '[nfft_scale, nfft_rate]'], {}), '(mcft_temp, [nfft_scale, nfft_rate])\n', (3050, 3086), False, 'from scipy.fftpack import fft2, ifft2\n'), ((3182, 3203), 'numpy.conj', 'np.conj', (['filt_sr_temp'], {}), '(filt_sr_temp)\n', (3189, 3203), True, 'import numpy as np\n'), ((3248, 3269), 'numpy.conj', 'np.conj', (['filt_sr_temp'], {}), '(filt_sr_temp)\n', (3255, 3269), True, 'import numpy as np\n')] |
import numpy as np
import utils.make_coordinates as make_coordinates
def averaged_slope_intercept(image, lines, left, right):
left_fit=[]
right_fit=[]
global left_avg
global right_avg
try:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
parameters = np.polyfit((x1, x2), (y1,y2), 1)
slope = parameters[0]
intercept= parameters[1]
if slope < 0:
left_fit.append((slope, intercept))
left = left_fit.copy()
elif slope > 0:
right_fit.append((slope, intercept))
right = right_fit.copy()
except Exception as e:
left_fit = left
right_fit = right
left_fit_average = np.average(left_fit, axis=0)
right_fit_average = np.average(right_fit, axis=0)
if str(left_fit_average) != str('nan'):
left_avg=left_fit_average
if str(right_fit_average) != str('nan'):
right_avg=right_fit_average
left_line = make_coordinates.make_coordinates(image, left_avg)
right_line = make_coordinates.make_coordinates(image, right_avg)
return np.array([left_line, right_line])
| [
"numpy.array",
"utils.make_coordinates.make_coordinates",
"numpy.average",
"numpy.polyfit"
] | [((751, 779), 'numpy.average', 'np.average', (['left_fit'], {'axis': '(0)'}), '(left_fit, axis=0)\n', (761, 779), True, 'import numpy as np\n'), ((804, 833), 'numpy.average', 'np.average', (['right_fit'], {'axis': '(0)'}), '(right_fit, axis=0)\n', (814, 833), True, 'import numpy as np\n'), ((1009, 1059), 'utils.make_coordinates.make_coordinates', 'make_coordinates.make_coordinates', (['image', 'left_avg'], {}), '(image, left_avg)\n', (1042, 1059), True, 'import utils.make_coordinates as make_coordinates\n'), ((1077, 1128), 'utils.make_coordinates.make_coordinates', 'make_coordinates.make_coordinates', (['image', 'right_avg'], {}), '(image, right_avg)\n', (1110, 1128), True, 'import utils.make_coordinates as make_coordinates\n'), ((1140, 1173), 'numpy.array', 'np.array', (['[left_line, right_line]'], {}), '([left_line, right_line])\n', (1148, 1173), True, 'import numpy as np\n'), ((308, 341), 'numpy.polyfit', 'np.polyfit', (['(x1, x2)', '(y1, y2)', '(1)'], {}), '((x1, x2), (y1, y2), 1)\n', (318, 341), True, 'import numpy as np\n')] |
from bimt.query.cfg import config
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
class ProcessQuery:
def __init__(self, xml_file):
self.xml_file = xml_file
def transform(self, raw_query):
query = raw_query.strip(";,?!()\{\}\\/'")
query = query.upper()
return query
def from_tag_to_consultas_json(self, tag):
return {
"QueryNumber": tag.find("QueryNumber").text,
"QueryText": tag.find("QueryText").text,
"ProcessedQuery": self.transform(tag.find("QueryText").text)
}
def write_consultas_file(self):
consulta_file_path = config["DEFAULT"]["CONSULTAS"]
queries_tags = self.xml_file.findall('QUERY')
consulta_data = [ self.from_tag_to_consultas_json(tag) for tag in queries_tags ]
consulta_df = pd.DataFrame(consulta_data)
consulta_df.to_csv(consulta_file_path, sep=";", index=False)
def parse_doc_score_field(self, score_as_text):
score_values = [int(n) for n in score_as_text]
return np.mean(score_values)
def from_tag_to_esperados_json(self, tag):
query_number = tag.find("QueryNumber").text
records_list = tag.findall("Records/Item")
return [{
"QueryNumber": query_number,
"DocNumber": t.text,
"DocVotes": self.parse_doc_score_field(t.get("score")) }
for t in records_list]
def write_esperados_file(self):
esperados_file_path = config["DEFAULT"]["ESPERADOS"]
queries_tags = self.xml_file.findall('QUERY')
esperados_data = [ self.from_tag_to_esperados_json(tag) for tag in queries_tags ]
esperados_data = np.ravel(esperados_data)
unpacked_data = []
for i in esperados_data:
unpacked_data.extend(i)
esperados_df = pd.DataFrame(unpacked_data)
esperados_df.to_csv(esperados_file_path, sep=";", index=False)
@staticmethod
def get():
query_xml_file_path = config["DEFAULT"]["LEIA"]
queries_xml = ET.parse(query_xml_file_path ).getroot()
return ProcessQuery(queries_xml)
| [
"pandas.DataFrame",
"xml.etree.ElementTree.parse",
"numpy.mean",
"numpy.ravel"
] | [((856, 883), 'pandas.DataFrame', 'pd.DataFrame', (['consulta_data'], {}), '(consulta_data)\n', (868, 883), True, 'import pandas as pd\n'), ((1076, 1097), 'numpy.mean', 'np.mean', (['score_values'], {}), '(score_values)\n', (1083, 1097), True, 'import numpy as np\n'), ((1728, 1752), 'numpy.ravel', 'np.ravel', (['esperados_data'], {}), '(esperados_data)\n', (1736, 1752), True, 'import numpy as np\n'), ((1873, 1900), 'pandas.DataFrame', 'pd.DataFrame', (['unpacked_data'], {}), '(unpacked_data)\n', (1885, 1900), True, 'import pandas as pd\n'), ((2085, 2114), 'xml.etree.ElementTree.parse', 'ET.parse', (['query_xml_file_path'], {}), '(query_xml_file_path)\n', (2093, 2114), True, 'import xml.etree.ElementTree as ET\n')] |
#%% import different modules
import matplotlib
import matplotlib.pyplot as plt
from one_step_kmeans_method import *
import numpy as np
import numpy.linalg as li
from generate_data import *
from measure_error import *
from ADMM_sidel_method import *
from cvx_siedel_method import *
from ADMM_siedel_tv_method import *
from cvx_siedel_tv_method import *
#%% parameter figuration
num_x = 10
num_y = 100
num_z = 1000
num_pixel = 100
size_grid = 10
dimension = 10
min_dist_x = 0
variance_x = 100
variance_yz = 100
#%% Generate data
x = generate_centroids(num_x,dimension,variance_x,min_dist_x)
x_2 = generate_2centroids(size_grid,num_x)
y,t,z,r,distribution_x = generate_grid_samples(x,x_2,num_pixel,num_z,size_grid,variance_yz)
#%% Two model
opts = [10**(-6)]
result_o = one_step_kmeans(y,z,num_x,opts)
x_o = result_o['x']
t_o = result_o['t']
r_o = result_o['r']
#%% admm
opts=[100,[10**(-5),2000,4*10**(-3),10**(-2),(1/1000)**(1/10000)],[20]]
verbose_a,r_a,t_a,x_a= Alter_ADMM_siedel(y,z,num_x,opts,verbose=True)
#%% admm_tv
opts=[100,[10**(-5),2000,4*10**(-3),10**(-2),(1/1000)**(1/10000)],[20]]
verbose_av,r_av,t_av,x_av= Alter_ADMM_siedel_tv(y,z,num_x,opts,verbose=True)
#%% cvx
opts=[100,[20]]
verbose_c,r_c,t_c,x_c = Alter_cvx_siedel(y,z,10,opts,verbose=True)
#%% cvx_tv
opts=[100,[20]]
verbose_cv,r_cv,t_cv,x_cv = Alter_cvx_siedel_tv(y,z,10,opts,verbose=True)
#%% measure1
distribution_x_a = np.mean(np.vstack((r_a,t_a)),0).getA1()
distribution_x_av = np.mean(np.vstack((r_av,t_av)),0).getA1()
distribution_x_c = np.mean(np.vstack((r_c,t_c)),0)
distribution_x_o = np.mean(np.vstack((r_o,t_o)),0)
distribution_x_cv = np.mean(np.vstack((r_cv,t_cv)),0)
err_o = measure_X(x_o,x,distribution_x_o,distribution_x)
err_c = measure_X(x_c,x,distribution_x_c,distribution_x)
err_a = measure_X(x_a,x,distribution_x_a,distribution_x)
err_av = measure_X(x_av,x,distribution_x_av,distribution_x)
err_cv = measure_X(x_cv,x,distribution_x_cv,distribution_x)
print(err_o)
print(err_c)
print(err_a)
print(err_cv)
print(err_av)
#%% Value
def obj1(t1,x1,r1):
global y
global z
m=np.kron(np.ones(num_x), np.sum(np.mat(np.power(z, 2)), 1)) + \
np.kron(np.ones([np.size(z, 0), 1]), np.sum(np.mat(np.power(x1, 2)), 1).T) - 2 * z.dot(x1.T)
return li.norm(y-t1.dot(x1),'fro')**2+ np.sum(np.multiply(r1,m))
#%% Object value
print(obj1(t_a,x_a,r_a))
print(obj1(t_o,x_o,r_o))
print(obj1(t_c,x_c,r_c))
print(obj1(t_cv,x_cv,r_cv))
print(obj1(t_av,x_av,r_av))
print(obj1(t,x,r))
#%% measure2
err_T_av = measure_T(x_av,t_av,x,t,distribution_x_av,distribution_x)
err_T_cv = measure_T(x_cv,t_cv,x,t,distribution_x_cv,distribution_x)
err_T_a = measure_T(x_a,t_a,x,t,distribution_x_a,distribution_x)
err_T_c = measure_T(x_c,t_c,x,t,distribution_x_c,distribution_x)
print(err_T_av)
print(err_T_cv)
print(err_T_a)
print(err_T_c)
#%% measure 3
err_T_2_av = measure_T2(y,x_av,t_av,x,t)
err_T_2_cv = measure_T2(y,x_cv,t_cv,x,t)
err_T_2_a = measure_T2(y,x_a,t_a,x,t)
err_T_2_c = measure_T2(y,x_c,t_c,x,t)
print(err_T_2_av)
print(err_T_2_cv)
print(err_T_2_a)
print(err_T_2_c)
#%% giv e a plot
size = np.size(verbose_a)
iter = np.linspace(0,size-1,size)
fig,ax = plt.subplots()
ax.line1 = plt.plot(iter,verbose_a,label='admm')
ax.line2 = plt.plot(iter,verbose_c,label='cvx')
ax.line3 = plt.plot(iter,obj1(t,x,r)*np.ones(size),label='real')
ax.line4 = plt.plot(iter,obj1(t_o,x_o,r_o)*np.ones(size),label='Kmeans')
plt.legend()
plt.show() | [
"numpy.size",
"matplotlib.pyplot.show",
"numpy.multiply",
"matplotlib.pyplot.plot",
"numpy.power",
"matplotlib.pyplot.legend",
"numpy.ones",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.vstack"
] | [((3118, 3136), 'numpy.size', 'np.size', (['verbose_a'], {}), '(verbose_a)\n', (3125, 3136), True, 'import numpy as np\n'), ((3144, 3174), 'numpy.linspace', 'np.linspace', (['(0)', '(size - 1)', 'size'], {}), '(0, size - 1, size)\n', (3155, 3174), True, 'import numpy as np\n'), ((3181, 3195), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3193, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3207, 3246), 'matplotlib.pyplot.plot', 'plt.plot', (['iter', 'verbose_a'], {'label': '"""admm"""'}), "(iter, verbose_a, label='admm')\n", (3215, 3246), True, 'import matplotlib.pyplot as plt\n'), ((3256, 3294), 'matplotlib.pyplot.plot', 'plt.plot', (['iter', 'verbose_c'], {'label': '"""cvx"""'}), "(iter, verbose_c, label='cvx')\n", (3264, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3443), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3441, 3443), True, 'import matplotlib.pyplot as plt\n'), ((3444, 3454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3452, 3454), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1564), 'numpy.vstack', 'np.vstack', (['(r_c, t_c)'], {}), '((r_c, t_c))\n', (1552, 1564), True, 'import numpy as np\n'), ((1594, 1615), 'numpy.vstack', 'np.vstack', (['(r_o, t_o)'], {}), '((r_o, t_o))\n', (1603, 1615), True, 'import numpy as np\n'), ((1646, 1669), 'numpy.vstack', 'np.vstack', (['(r_cv, t_cv)'], {}), '((r_cv, t_cv))\n', (1655, 1669), True, 'import numpy as np\n'), ((3330, 3343), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (3337, 3343), True, 'import numpy as np\n'), ((3401, 3414), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (3408, 3414), True, 'import numpy as np\n'), ((1422, 1443), 'numpy.vstack', 'np.vstack', (['(r_a, t_a)'], {}), '((r_a, t_a))\n', (1431, 1443), True, 'import numpy as np\n'), ((1482, 1505), 'numpy.vstack', 'np.vstack', (['(r_av, t_av)'], {}), '((r_av, t_av))\n', (1491, 1505), True, 'import numpy as np\n'), ((2308, 2326), 'numpy.multiply', 'np.multiply', (['r1', 'm'], {}), '(r1, m)\n', (2319, 2326), True, 'import numpy as np\n'), ((2106, 2120), 'numpy.ones', 'np.ones', (['num_x'], {}), '(num_x)\n', (2113, 2120), True, 'import numpy as np\n'), ((2136, 2150), 'numpy.power', 'np.power', (['z', '(2)'], {}), '(z, 2)\n', (2144, 2150), True, 'import numpy as np\n'), ((2182, 2195), 'numpy.size', 'np.size', (['z', '(0)'], {}), '(z, 0)\n', (2189, 2195), True, 'import numpy as np\n'), ((2216, 2231), 'numpy.power', 'np.power', (['x1', '(2)'], {}), '(x1, 2)\n', (2224, 2231), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.