code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# gates.py
import numpy as np
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
b = 0.7
return 1 if np.sum(w*x) + b > 0 else 0
def AND(a, b):
return NAND(NAND(a, b), NAND(a, b))
def OR(a, b):
return NAND(NAND(a, a), NAND(b, b))
def XOR(x1, x2):
return AND(NAND(x1, x2), OR(x1, x2))
| [
"numpy.array",
"numpy.sum"
] | [((58, 76), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (66, 76), True, 'import numpy as np\n'), ((85, 107), 'numpy.array', 'np.array', (['[-0.5, -0.5]'], {}), '([-0.5, -0.5])\n', (93, 107), True, 'import numpy as np\n'), ((137, 150), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (143, 150), True, 'import numpy as np\n')] |
import numpy as np
print('load vectors')
data = np.loadtxt('../data/all_users_normalized.tsv')
print(data.shape)
print('save npy')
np.save('../data/all_users_normalized.npy', data)
| [
"numpy.save",
"numpy.loadtxt"
] | [((49, 95), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/all_users_normalized.tsv"""'], {}), "('../data/all_users_normalized.tsv')\n", (59, 95), True, 'import numpy as np\n'), ((134, 183), 'numpy.save', 'np.save', (['"""../data/all_users_normalized.npy"""', 'data'], {}), "('../data/all_users_normalized.npy', data)\n", (141, 183), True, 'import numpy as np\n')] |
import numpy as np
from pathlib import Path
from edges_io.io import S1P
def test_s1p_read(datadir: Path):
fl = (
datadir / "Receiver01_25C_2019_11_26_040_to_200MHz/S11/Ambient01/External01.s1p"
)
s1p = S1P(fl)
assert np.all(np.iscomplex(s1p.s11))
assert len(s1p.s11) == len(s1p.freq)
def test_s1_read_db(datadir: Path):
fl = datadir / "s11_db.s1p"
s1p = S1P(fl)
assert np.all(np.iscomplex(s1p.s11))
assert len(s1p.s11) == len(s1p.freq)
| [
"edges_io.io.S1P",
"numpy.iscomplex"
] | [((225, 232), 'edges_io.io.S1P', 'S1P', (['fl'], {}), '(fl)\n', (228, 232), False, 'from edges_io.io import S1P\n'), ((396, 403), 'edges_io.io.S1P', 'S1P', (['fl'], {}), '(fl)\n', (399, 403), False, 'from edges_io.io import S1P\n'), ((252, 273), 'numpy.iscomplex', 'np.iscomplex', (['s1p.s11'], {}), '(s1p.s11)\n', (264, 273), True, 'import numpy as np\n'), ((423, 444), 'numpy.iscomplex', 'np.iscomplex', (['s1p.s11'], {}), '(s1p.s11)\n', (435, 444), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
from datetime import datetime
from scipy.stats import ortho_group
from methods import FrankWolfe
from methods import ContrNewton
from oracles import create_log_sum_exp_oracle
def RunExperiment(n, m, mu, fw_iters, cn_iters, cn_inner_iters,
save=True, c=1.0):
"""
Run Frank-Wolfe and Contracting Newton methods for a particular
('n', 'm', 'mu')-instance of random problem. Plot and save the graphs.
"""
np.random.seed(31415)
A = np.random.rand(n, m) * 2 - 1
b = np.random.rand(m) * 2 - 1
oracle = create_log_sum_exp_oracle(A.T, b, mu)
x_0 = np.zeros(n)
mu_str = str(mu)
c_str = str(c)
title = 'n = %d, m = %d, mu = %s (c = %s)' % (n, m, mu_str, c_str)
filename = 'plots/%d_%d_%s_c%s' % (n, m, mu_str.split('.')[-1],
'_'.join(c_str.split('.')))
print('Experiment: %s' % title)
print('Filename: %s' % filename)
start_timestamp = datetime.now()
print('FW ...', end=' ', flush=True)
_, status, fw = FrankWolfe(oracle, x_0, n_iters=fw_iters)
print('DONE. Time: %s' % (str(datetime.now() - start_timestamp)))
start_timestamp = datetime.now()
print('CN ...', end=' ', flush=True)
_, status, cn = ContrNewton(oracle, x_0, n_iters=cn_iters,
inner_iters=cn_inner_iters, c=c)
print('DONE. Time: %s' % (str(datetime.now() - start_timestamp)))
fw_skip = fw_iters // 100
plt.figure(figsize=(5, 4))
fw_func = np.array(fw['func'])
cn_func = np.array(cn['func'])
mn2 = min(np.min(fw_func), np.min(cn_func))
fw_res = fw_func - mn2
cn_res = cn_func - mn2
plt.semilogy(list(range(0, fw_iters+1, fw_skip)),
fw_res[::fw_skip], ':', label='Frank-Wolfe', linewidth=4)
plt.semilogy(cn_res[0:-1], label='Contr.Newton', color='red', linewidth=2)
plt.grid()
plt.xlabel('Iterations', fontsize=14)
plt.ylabel('Function value', fontsize=14)
plt.title(title, fontsize=14)
t = plt.legend(fontsize=14)
plt.tight_layout()
if save:
plt.savefig(filename + '_iters.pdf')
plt.figure(figsize=(5, 4))
plt.semilogy(fw['time'][::fw_skip], fw_res[::fw_skip], ':',
label='Frank-Wolfe', linewidth=4)
plt.semilogy(cn['time'][0:-1], cn_res[0:-1], label='Contr.Newton',
color='red', linewidth=2)
plt.grid()
plt.ylabel('Function value', fontsize=14)
plt.xlabel('Time, s', fontsize=14)
plt.title(title, fontsize=14)
t = plt.legend(fontsize=14)
plt.tight_layout()
if save:
plt.savefig(filename + '_time.pdf')
plt.figure(figsize=(5, 4))
plt.plot(cn['t'])
def RunExpGroup(mu = 0.1):
"""
Run a group of experiments with a particular smoothing parameter 'mu'
and different 'n', 'm', 'c'.
"""
RunExperiment(n=100, m=1000, mu=mu, fw_iters=5000,
cn_iters=200, cn_inner_iters=3000, c=1.0)
RunExperiment(n=100, m=1000, mu=mu, fw_iters=5000,
cn_iters=80, cn_inner_iters=3000, c=0.05)
RunExperiment(n=100, m=2500, mu=mu, fw_iters=5000,
cn_iters=200, cn_inner_iters=3000, c=1.0)
RunExperiment(n=100, m=2500, mu=mu, fw_iters=5000,
cn_iters=100, cn_inner_iters=3000, c=0.05)
RunExperiment(n=500, m=1000, mu=mu, fw_iters=6000,
cn_iters=200, cn_inner_iters=3000, c=1.0)
RunExperiment(n=500, m=1000, mu=mu, fw_iters=6000,
cn_iters=100, cn_inner_iters=3000, c=0.05)
RunExperiment(n=500, m=2500, mu=mu, fw_iters=6000,
cn_iters=200, cn_inner_iters=3000, c=1.0)
RunExperiment(n=500, m=2500, mu=mu, fw_iters=6000,
cn_iters=100, cn_inner_iters=3000, c=0.05)
RunExpGroup(mu = 0.1)
RunExpGroup(mu = 0.05)
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.figure",
"methods.ContrNewton",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.semilogy",
"datetime.datetime.now",
"methods.FrankWolfe",
"matplotlib.pyplot.legend",
"numpy.min",
"matplotlib.pyplot.ylabel",
"matplotlib.pyp... | [((531, 552), 'numpy.random.seed', 'np.random.seed', (['(31415)'], {}), '(31415)\n', (545, 552), True, 'import numpy as np\n'), ((638, 675), 'oracles.create_log_sum_exp_oracle', 'create_log_sum_exp_oracle', (['A.T', 'b', 'mu'], {}), '(A.T, b, mu)\n', (663, 675), False, 'from oracles import create_log_sum_exp_oracle\n'), ((691, 702), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (699, 702), True, 'import numpy as np\n'), ((1057, 1071), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1069, 1071), False, 'from datetime import datetime\n'), ((1133, 1174), 'methods.FrankWolfe', 'FrankWolfe', (['oracle', 'x_0'], {'n_iters': 'fw_iters'}), '(oracle, x_0, n_iters=fw_iters)\n', (1143, 1174), False, 'from methods import FrankWolfe\n'), ((1272, 1286), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1284, 1286), False, 'from datetime import datetime\n'), ((1348, 1423), 'methods.ContrNewton', 'ContrNewton', (['oracle', 'x_0'], {'n_iters': 'cn_iters', 'inner_iters': 'cn_inner_iters', 'c': 'c'}), '(oracle, x_0, n_iters=cn_iters, inner_iters=cn_inner_iters, c=c)\n', (1359, 1423), False, 'from methods import ContrNewton\n'), ((1570, 1596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (1580, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1631), 'numpy.array', 'np.array', (["fw['func']"], {}), "(fw['func'])\n", (1619, 1631), True, 'import numpy as np\n'), ((1646, 1666), 'numpy.array', 'np.array', (["cn['func']"], {}), "(cn['func'])\n", (1654, 1666), True, 'import numpy as np\n'), ((1908, 1982), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['cn_res[0:-1]'], {'label': '"""Contr.Newton"""', 'color': '"""red"""', 'linewidth': '(2)'}), "(cn_res[0:-1], label='Contr.Newton', color='red', linewidth=2)\n", (1920, 1982), True, 'import matplotlib.pyplot as plt\n'), ((1987, 1997), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1995, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2002, 2039), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {'fontsize': '(14)'}), "('Iterations', fontsize=14)\n", (2012, 2039), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2085), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Function value"""'], {'fontsize': '(14)'}), "('Function value', fontsize=14)\n", (2054, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2119), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (2099, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2128, 2151), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2138, 2151), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2174), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2172, 2174), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2272), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (2256, 2272), True, 'import matplotlib.pyplot as plt\n'), ((2277, 2375), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["fw['time'][::fw_skip]", 'fw_res[::fw_skip]', '""":"""'], {'label': '"""Frank-Wolfe"""', 'linewidth': '(4)'}), "(fw['time'][::fw_skip], fw_res[::fw_skip], ':', label=\n 'Frank-Wolfe', linewidth=4)\n", (2289, 2375), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2490), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["cn['time'][0:-1]", 'cn_res[0:-1]'], {'label': '"""Contr.Newton"""', 'color': '"""red"""', 'linewidth': '(2)'}), "(cn['time'][0:-1], cn_res[0:-1], label='Contr.Newton', color=\n 'red', linewidth=2)\n", (2405, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2518), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2516, 2518), True, 'import matplotlib.pyplot as plt\n'), ((2523, 2564), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Function value"""'], {'fontsize': '(14)'}), "('Function value', fontsize=14)\n", (2533, 2564), True, 'import matplotlib.pyplot as plt\n'), ((2569, 2603), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time, s"""'], {'fontsize': '(14)'}), "('Time, s', fontsize=14)\n", (2579, 2603), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2637), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (2617, 2637), True, 'import matplotlib.pyplot as plt\n'), ((2646, 2669), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2656, 2669), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2692), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2690, 2692), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2785), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (2769, 2785), True, 'import matplotlib.pyplot as plt\n'), ((2790, 2807), 'matplotlib.pyplot.plot', 'plt.plot', (["cn['t']"], {}), "(cn['t'])\n", (2798, 2807), True, 'import matplotlib.pyplot as plt\n'), ((1681, 1696), 'numpy.min', 'np.min', (['fw_func'], {}), '(fw_func)\n', (1687, 1696), True, 'import numpy as np\n'), ((1698, 1713), 'numpy.min', 'np.min', (['cn_func'], {}), '(cn_func)\n', (1704, 1713), True, 'import numpy as np\n'), ((2196, 2232), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '_iters.pdf')"], {}), "(filename + '_iters.pdf')\n", (2207, 2232), True, 'import matplotlib.pyplot as plt\n'), ((2714, 2749), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '_time.pdf')"], {}), "(filename + '_time.pdf')\n", (2725, 2749), True, 'import matplotlib.pyplot as plt\n'), ((561, 581), 'numpy.random.rand', 'np.random.rand', (['n', 'm'], {}), '(n, m)\n', (575, 581), True, 'import numpy as np\n'), ((598, 615), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (612, 615), True, 'import numpy as np\n'), ((1209, 1223), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1221, 1223), False, 'from datetime import datetime\n'), ((1490, 1504), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1502, 1504), False, 'from datetime import datetime\n')] |
import pathlib
import numpy as np
from scipy.constants import e as qe, c as c_light, m_p
from scipy.signal import hilbert
from scipy.stats import linregress
from PyHEADTAIL.impedances import wakes
from PyHEADTAIL.machines.synchrotron import Synchrotron
from PyHEADTAIL.particles.slicing import UniformBinSlicer
from PyHEADTAIL.particles.particles import Particles
def test_reswall_vs_waketable():
n_attempts = 5
rel_tolerance = 10e-2
n_turns = 1000
macroparticlenumber = int(1e5)
# Beam and machine parameters
intensity = 1.6e+12
epsn_x = 2e-6 # Normalised horizontal emittance [m]
epsn_y = 2e-6 # Normalised vertical emittance [m]
sigma_z = 12.4 # [m]
E_kin = 1.4e9 # [eV]
E_rest = m_p * c_light**2 / qe # [eV]
gamma = 1 + E_kin/E_rest
betagamma = np.sqrt(gamma**2 - 1)
p0 = betagamma * m_p * c_light
circumference = 2*np.pi*100
Q_x = 6.22
Q_y = 6.25
xi_x = -0.1
xi_y = -0.1
Qp_x = Q_x * xi_x
Qp_y = Q_y * xi_y
beta_x = 16. # circumference/(2*np.pi*Q_x)
beta_y = 16. # circumference/(2*np.pi*Q_y)
alpha_mom = 0.027
# Non-linear map
h_RF = 7
V_RF = 20e3
dphi_RF = np.pi
p_increment = 0.0
# Create machine
machine = Synchrotron(optics_mode='smooth', circumference=circumference,
n_segments=1, beta_x=beta_x, beta_y=beta_y,
D_x=0.0, D_y=0.0,
accQ_x=Q_x, accQ_y=Q_y, Qp_x=Qp_x, Qp_y=Qp_y,
alpha_mom_compaction=alpha_mom,
longitudinal_mode='non-linear', h_RF=h_RF, V_RF=V_RF,
dphi_RF=dphi_RF, p_increment=p_increment,
p0=p0, charge=qe, mass=m_p)
# Create wakes
slices_for_wake = 500
slicer_for_wake = UniformBinSlicer(slices_for_wake, n_sigma_z=2)
# Resistive wall wake
conductivity = 1e6
pipe_sigma_y = 0.03565 # sigma_y * 8.
wake = wakes.ParallelHorizontalPlatesResistiveWall(
conductivity=conductivity, pipe_radius=pipe_sigma_y,
resistive_wall_length=10000, dt_min=1e-12)
wake_field_rw = wakes.WakeField(slicer_for_wake, wake)
# Wake table IW2D
wakefile_columns = ['time', 'dipole_x', 'dipole_y',
'quadrupole_x', 'quadrupole_y']
wake_folder = pathlib.Path(__file__).parent.joinpath(
'../../examples/impedances').absolute()
wakefile = wake_folder.joinpath("wakes/wake_PyHT.txt")
wake_table = wakes.WakeTable(wakefile, wakefile_columns)
wake_field_wt = wakes.WakeField(slicer_for_wake, wake_table)
# Loop over attempts
i_attempt = 0
while i_attempt < n_attempts:
print(f"Attempt {i_attempt+1}:")
# Create particles
bunch_rw = machine.generate_6D_Gaussian_bunch_matched(
n_macroparticles=macroparticlenumber,
intensity=intensity,
epsn_x=epsn_x,
epsn_y=epsn_y,
sigma_z=sigma_z,
)
print('momentum spread =', bunch_rw.sigma_dp())
print('synchrotron tune = ', machine.longitudinal_map.Q_s)
# Copy particles
coords = {'x': bunch_rw.x, 'xp': bunch_rw.xp,
'y': bunch_rw.y, 'yp': bunch_rw.yp,
'z': bunch_rw.z, 'dp': bunch_rw.dp}
bunch_wt = Particles(macroparticlenumber=bunch_rw.macroparticlenumber,
particlenumber_per_mp=bunch_rw.particlenumber_per_mp,
charge=bunch_rw.charge, mass=bunch_rw.mass,
circumference=bunch_rw.circumference,
gamma=bunch_rw.gamma,
coords_n_momenta_dict=coords)
# Arrays for saving
y_rw = np.zeros(n_turns, dtype=float)
y_wt = np.zeros(n_turns, dtype=float)
# Tracking loop
for i in range(n_turns):
for m in machine.one_turn_map:
m.track(bunch_rw)
m.track(bunch_wt)
wake_field_rw.track(bunch_rw)
wake_field_wt.track(bunch_wt)
y_rw[i] = bunch_rw.mean_y()
y_wt[i] = bunch_wt.mean_y()
# Check results
turns = np.arange(n_turns)
iMin = 100
iMax = n_turns
ampl_rw = np.abs(hilbert(y_rw))
b_rw, a_rw, r_Rw, p_rw, stderr_rw = linregress(turns[iMin:iMax],
np.log(ampl_rw[iMin:iMax]))
print(f"Growth rate RW {b_rw*1e4:.2f} [10^-4/turn]")
ampl_wt = np.abs(hilbert(y_wt))
b_wt, a_wt, r_wt, p_wt, stderr_wt = linregress(turns[iMin:iMax],
np.log(ampl_wt[iMin:iMax]))
print(f"Growth rate WT {b_wt*1e4:.2f} [10^-4/turn]")
# assert np.isclose(b_rw, b_wt, rtol=rel_tolerance), \
# "Resistive wall and wake table growth rates don't agree."
check = np.isclose(b_rw, b_wt, rtol=rel_tolerance)
assert check or i_attempt < n_attempts-1, \
f"After {n_attempts} attempts resistive wall and wake table \
growth rates don't agree."
if check:
print(f"Passed on {i_attempt + 1}. attempt.")
break
i_attempt += 1
| [
"numpy.log",
"PyHEADTAIL.impedances.wakes.WakeTable",
"numpy.zeros",
"numpy.isclose",
"pathlib.Path",
"PyHEADTAIL.particles.slicing.UniformBinSlicer",
"PyHEADTAIL.impedances.wakes.WakeField",
"PyHEADTAIL.particles.particles.Particles",
"numpy.arange",
"scipy.signal.hilbert",
"PyHEADTAIL.machines... | [((817, 840), 'numpy.sqrt', 'np.sqrt', (['(gamma ** 2 - 1)'], {}), '(gamma ** 2 - 1)\n', (824, 840), True, 'import numpy as np\n'), ((1265, 1609), 'PyHEADTAIL.machines.synchrotron.Synchrotron', 'Synchrotron', ([], {'optics_mode': '"""smooth"""', 'circumference': 'circumference', 'n_segments': '(1)', 'beta_x': 'beta_x', 'beta_y': 'beta_y', 'D_x': '(0.0)', 'D_y': '(0.0)', 'accQ_x': 'Q_x', 'accQ_y': 'Q_y', 'Qp_x': 'Qp_x', 'Qp_y': 'Qp_y', 'alpha_mom_compaction': 'alpha_mom', 'longitudinal_mode': '"""non-linear"""', 'h_RF': 'h_RF', 'V_RF': 'V_RF', 'dphi_RF': 'dphi_RF', 'p_increment': 'p_increment', 'p0': 'p0', 'charge': 'qe', 'mass': 'm_p'}), "(optics_mode='smooth', circumference=circumference, n_segments=1,\n beta_x=beta_x, beta_y=beta_y, D_x=0.0, D_y=0.0, accQ_x=Q_x, accQ_y=Q_y,\n Qp_x=Qp_x, Qp_y=Qp_y, alpha_mom_compaction=alpha_mom, longitudinal_mode\n ='non-linear', h_RF=h_RF, V_RF=V_RF, dphi_RF=dphi_RF, p_increment=\n p_increment, p0=p0, charge=qe, mass=m_p)\n", (1276, 1609), False, 'from PyHEADTAIL.machines.synchrotron import Synchrotron\n'), ((1842, 1888), 'PyHEADTAIL.particles.slicing.UniformBinSlicer', 'UniformBinSlicer', (['slices_for_wake'], {'n_sigma_z': '(2)'}), '(slices_for_wake, n_sigma_z=2)\n', (1858, 1888), False, 'from PyHEADTAIL.particles.slicing import UniformBinSlicer\n'), ((1995, 2138), 'PyHEADTAIL.impedances.wakes.ParallelHorizontalPlatesResistiveWall', 'wakes.ParallelHorizontalPlatesResistiveWall', ([], {'conductivity': 'conductivity', 'pipe_radius': 'pipe_sigma_y', 'resistive_wall_length': '(10000)', 'dt_min': '(1e-12)'}), '(conductivity=conductivity,\n pipe_radius=pipe_sigma_y, resistive_wall_length=10000, dt_min=1e-12)\n', (2038, 2138), False, 'from PyHEADTAIL.impedances import wakes\n'), ((2172, 2210), 'PyHEADTAIL.impedances.wakes.WakeField', 'wakes.WakeField', (['slicer_for_wake', 'wake'], {}), '(slicer_for_wake, wake)\n', (2187, 2210), False, 'from PyHEADTAIL.impedances import wakes\n'), ((2529, 2572), 'PyHEADTAIL.impedances.wakes.WakeTable', 'wakes.WakeTable', (['wakefile', 'wakefile_columns'], {}), '(wakefile, wakefile_columns)\n', (2544, 2572), False, 'from PyHEADTAIL.impedances import wakes\n'), ((2593, 2637), 'PyHEADTAIL.impedances.wakes.WakeField', 'wakes.WakeField', (['slicer_for_wake', 'wake_table'], {}), '(slicer_for_wake, wake_table)\n', (2608, 2637), False, 'from PyHEADTAIL.impedances import wakes\n'), ((3357, 3618), 'PyHEADTAIL.particles.particles.Particles', 'Particles', ([], {'macroparticlenumber': 'bunch_rw.macroparticlenumber', 'particlenumber_per_mp': 'bunch_rw.particlenumber_per_mp', 'charge': 'bunch_rw.charge', 'mass': 'bunch_rw.mass', 'circumference': 'bunch_rw.circumference', 'gamma': 'bunch_rw.gamma', 'coords_n_momenta_dict': 'coords'}), '(macroparticlenumber=bunch_rw.macroparticlenumber,\n particlenumber_per_mp=bunch_rw.particlenumber_per_mp, charge=bunch_rw.\n charge, mass=bunch_rw.mass, circumference=bunch_rw.circumference, gamma\n =bunch_rw.gamma, coords_n_momenta_dict=coords)\n', (3366, 3618), False, 'from PyHEADTAIL.particles.particles import Particles\n'), ((3794, 3824), 'numpy.zeros', 'np.zeros', (['n_turns'], {'dtype': 'float'}), '(n_turns, dtype=float)\n', (3802, 3824), True, 'import numpy as np\n'), ((3840, 3870), 'numpy.zeros', 'np.zeros', (['n_turns'], {'dtype': 'float'}), '(n_turns, dtype=float)\n', (3848, 3870), True, 'import numpy as np\n'), ((4248, 4266), 'numpy.arange', 'np.arange', (['n_turns'], {}), '(n_turns)\n', (4257, 4266), True, 'import numpy as np\n'), ((4979, 5021), 'numpy.isclose', 'np.isclose', (['b_rw', 'b_wt'], {'rtol': 'rel_tolerance'}), '(b_rw, b_wt, rtol=rel_tolerance)\n', (4989, 5021), True, 'import numpy as np\n'), ((4336, 4349), 'scipy.signal.hilbert', 'hilbert', (['y_rw'], {}), '(y_rw)\n', (4343, 4349), False, 'from scipy.signal import hilbert\n'), ((4479, 4505), 'numpy.log', 'np.log', (['ampl_rw[iMin:iMax]'], {}), '(ampl_rw[iMin:iMax])\n', (4485, 4505), True, 'import numpy as np\n'), ((4594, 4607), 'scipy.signal.hilbert', 'hilbert', (['y_wt'], {}), '(y_wt)\n', (4601, 4607), False, 'from scipy.signal import hilbert\n'), ((4737, 4763), 'numpy.log', 'np.log', (['ampl_wt[iMin:iMax]'], {}), '(ampl_wt[iMin:iMax])\n', (4743, 4763), True, 'import numpy as np\n'), ((2364, 2386), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (2376, 2386), False, 'import pathlib\n')] |
'''
Author: <NAME>
Date: 8/17/2018
Description: Creates a dataframe with moving averages and MACD oscillator
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from iexfinance import get_historical_data
moving_avg1 = 10
moving_avg2 = 20
ticker = "BABA"
now = datetime.now()
start = now - timedelta(days=90)
df = get_historical_data(ticker, start=start, end=now, output_format='pandas')
def macd(dat):
dat['10dma'] = dat['close'].rolling(window=moving_avg1, min_periods=1).mean()
dat['20dma'] = dat['close'].rolling(window=moving_avg2, min_periods=1).mean()
return dat
def add_macd(df):
df = macd(df)
df['position'] = 0
df['position'][moving_avg1:] = np.where(df['10dma'][moving_avg1:] >= df['20dma'][moving_avg1:], 1, 0)
df['signals'] = df['position'].diff()
df['oscillator'] = df['10dma'] - df['20dma']
return df
df = add_macd(df)
# print(df)
print(df.loc[df['signals'] == 1])
print(df.loc[df['signals'] == -1])
| [
"numpy.where",
"datetime.datetime.now",
"datetime.timedelta",
"iexfinance.get_historical_data"
] | [((326, 340), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (338, 340), False, 'from datetime import datetime, timedelta\n'), ((379, 452), 'iexfinance.get_historical_data', 'get_historical_data', (['ticker'], {'start': 'start', 'end': 'now', 'output_format': '"""pandas"""'}), "(ticker, start=start, end=now, output_format='pandas')\n", (398, 452), False, 'from iexfinance import get_historical_data\n'), ((355, 373), 'datetime.timedelta', 'timedelta', ([], {'days': '(90)'}), '(days=90)\n', (364, 373), False, 'from datetime import datetime, timedelta\n'), ((736, 806), 'numpy.where', 'np.where', (["(df['10dma'][moving_avg1:] >= df['20dma'][moving_avg1:])", '(1)', '(0)'], {}), "(df['10dma'][moving_avg1:] >= df['20dma'][moving_avg1:], 1, 0)\n", (744, 806), True, 'import numpy as np\n')] |
import numpy as np
from scipy import interpolate
import astropy.units as u
import astropy.constants as const
from nexoclom.atomicdata import atomicmass
from nexoclom.modelcode.surface_temperature import surface_temperature
# from nexoclom.math.distributions import MaxwellianDist
def surface_interaction_setup(inputs):
# Set up accommodation factor
if inputs.surfaceinteraction.accomfactor != 0:
longitude = np.radians(np.arange(361))
latitude = np.radians(np.arange(181)) - np.pi/2.
longrid, latgrid = np.meshgrid(longitude, latitude)
tsurf = surface_temperature(inputs.geometry, longrid.flatten(),
latgrid.flatten())
nt, nv, nprob = 201, 101, 101
temperature = np.linspace(min(tsurf), max(tsurf), nt)*u.K
v_temp = np.sqrt(2*temperature*const.k_B/
atomicmass(inputs.options.species))
v_temp = v_temp.to(u.km/u.s)
probability = np.linspace(0, 1, nprob)
probgrid = np.ndarray((nt,nprob))
for i,t in enumerate(temperature):
vrange = np.linspace(0*u.km/u.s, np.max(v_temp[i]*3), nv)
f_v = MaxwellianDist(vrange, t, inputs.options.species)
cumdist = f_v.cumsum()
cumdist -= cumdist.min()
cumdist /= cumdist.max()
probgrid[i,:] = np.interp(vrange.value, cumdist, probability)
# v_interp = interpolate.interp2d(probability, temperature,
# probgrid)
v_interp = interpolate.RectBivariateSpline(temperature.value,
probability, probgrid)
else:
v_interp = None
surfaceint = {'v_accom': v_interp}
return surfaceint
| [
"numpy.meshgrid",
"nexoclom.atomicdata.atomicmass",
"numpy.max",
"scipy.interpolate.RectBivariateSpline",
"numpy.arange",
"numpy.linspace",
"numpy.interp",
"numpy.ndarray"
] | [((536, 568), 'numpy.meshgrid', 'np.meshgrid', (['longitude', 'latitude'], {}), '(longitude, latitude)\n', (547, 568), True, 'import numpy as np\n'), ((971, 995), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nprob'], {}), '(0, 1, nprob)\n', (982, 995), True, 'import numpy as np\n'), ((1015, 1038), 'numpy.ndarray', 'np.ndarray', (['(nt, nprob)'], {}), '((nt, nprob))\n', (1025, 1038), True, 'import numpy as np\n'), ((1554, 1627), 'scipy.interpolate.RectBivariateSpline', 'interpolate.RectBivariateSpline', (['temperature.value', 'probability', 'probgrid'], {}), '(temperature.value, probability, probgrid)\n', (1585, 1627), False, 'from scipy import interpolate\n'), ((436, 450), 'numpy.arange', 'np.arange', (['(361)'], {}), '(361)\n', (445, 450), True, 'import numpy as np\n'), ((1356, 1401), 'numpy.interp', 'np.interp', (['vrange.value', 'cumdist', 'probability'], {}), '(vrange.value, cumdist, probability)\n', (1365, 1401), True, 'import numpy as np\n'), ((482, 496), 'numpy.arange', 'np.arange', (['(181)'], {}), '(181)\n', (491, 496), True, 'import numpy as np\n'), ((876, 910), 'nexoclom.atomicdata.atomicmass', 'atomicmass', (['inputs.options.species'], {}), '(inputs.options.species)\n', (886, 910), False, 'from nexoclom.atomicdata import atomicmass\n'), ((1126, 1147), 'numpy.max', 'np.max', (['(v_temp[i] * 3)'], {}), '(v_temp[i] * 3)\n', (1132, 1147), True, 'import numpy as np\n')] |
import numpy as np
from copy import copy
from elisa import const as c, BinarySystem
from elisa.binary_system.model import (
potential_value_primary,
potential_value_secondary,
pre_calculate_for_potential_value_primary,
pre_calculate_for_potential_value_secondary
)
from elisa.binary_system.radius import calculate_side_radius
from .. utils.default_binary_model import DEFAULT_SYSTEM
from .. import config
def back_radius_potential_primary(radius, mass_ratio, synchronicity=1.0, distance=1.0):
"""
Returns potential for given side radius.
:param distance: float;
:param synchronicity: float;
:param radius: float;
:param mass_ratio: float;
:return: float;
"""
# (F, q, d, phi, theta)
args = (synchronicity, mass_ratio, distance, c.PI, c.HALF_PI)
pot_args = pre_calculate_for_potential_value_primary(*args, return_as_tuple=True)
return potential_value_primary(radius, mass_ratio, *pot_args)
def back_radius_potential_secondary(radius, mass_ratio, synchronicity=1.0, distance=1.0):
"""
Returns potential for given side radius.
:param radius: float; side radius of the secondary component
:param mass_ratio: float;
:return: float;
"""
# (F, q, d, phi, theta)
args = (synchronicity, mass_ratio, distance, c.PI, c.HALF_PI)
pot_args = pre_calculate_for_potential_value_secondary(*args, return_as_tuple=True)
return potential_value_secondary(radius, mass_ratio, *pot_args)
def secondary_side_radius(mass_ratio, surface_potential):
"""
Side radius of secondary component
:param mass_ratio: float;
:param surface_potential: float;
:return: float; side radius
"""
return calculate_side_radius(1.0, mass_ratio, 1.0, surface_potential, 'secondary')
def critical_inclination(r1, r2, distance=None):
"""
Returns minimum inclination for occurence of eclipses.
:param r1: float;
:param r2: float;
:param distance: float;
:return: Union[float; numpy.array]
"""
summ = r1+r2
if np.isscalar(summ):
distance = 1.0 if distance is None else float(distance)
return np.degrees(np.arccos((r1+r2)/distance))
else:
distance = np.full(summ.shape, 1.0) if distance is None else \
(np.full(summ.shape, distance) if np.isscalar(distance) else distance)
result = np.full(summ.shape, np.nan)
summ = summ / distance
mask = summ < 1
result[mask] = np.degrees(np.arccos(summ[mask]/distance[mask]))
return result
def correct_sma(mass_ratio, r1, r2):
"""
Function will provide a values for sma and period that will create binary models with surface g within table
coverage.
:param mass_ratio: float;
:param r1: float;
:param r2: float;
:return: tuple; sma in sol rad, period in days
"""
mid_g = 270
m1 = 4e30
m2 = mass_ratio * m1
sma1 = np.sqrt(c.G * m1 / (r1**2 * mid_g))
sma2 = np.sqrt(c.G * m2 / (r2**2 * mid_g))
sma = 0.5 * (sma1 + sma2)
period = np.sqrt(c.FULL_ARC**2 * sma**3 / (c.G * (m1 + m2)))
return 1.4374e-9 * sma, period / 86400
def initialize_system(mass_ratio, r1, r2, t1, t2, inclination, omega1, omega2, overcontact):
"""
Initializing binary system based on grid params.
:param mass_ratio: float;
:param r1: float;
:param r2: float;
:param t1: int;
:param t2: int;
:param inclination: float;
:param omega1: float;
:param omega2: float;
:param overcontact: bool;
:return: elisa.BinarySystem
"""
dt = t1 - t2
if overcontact and np.abs(dt) > config.MAX_DIFF_T_OVERCONTACT:
t2 = t1 - config.MAX_DIFF_T_OVERCONTACT if dt > 0.0 else t1 + config.MAX_DIFF_T_OVERCONTACT
sma, period = correct_sma(mass_ratio, r1, r2)
params = copy(DEFAULT_SYSTEM)
params["system"].update({
'inclination': inclination, 'mass_ratio': mass_ratio,
'semi_major_axis': sma, 'period': period,
})
params["primary"].update({
'surface_potential': omega1, 't_eff': t1
})
params["secondary"].update({
'surface_potential': omega2, 't_eff': t2
})
return BinarySystem.from_json(params)
def invert_potential(potential, mass_ratio):
return potential / mass_ratio + 0.5 * (mass_ratio - 1) / mass_ratio
def switch_components(mass_ratio, r1, r2, t1, t2, inclination, omega1, omega2):
def inversion():
new_omega_1 = invert_potential(omega2, mass_ratio)
new_omega_2 = invert_potential(omega1, mass_ratio)
return [1.0 / mass_ratio, r2, r1, t2, t1, inclination], new_omega_1, new_omega_2
if t2 < t1:
return [mass_ratio, r1, r2, t1, t2, inclination], omega1, omega2
return inversion()
def return_closest_distance(binary):
"""
Returns the closest component distance during eclipses.
:param binary: BinarySystem;
:return: float; distance in SMA units
"""
conjunctions = binary.orbit.conjunctions
conj_phases = [conj['true_phase'] for conj in conjunctions.values()]
distances = binary.calculate_orbital_motion(conj_phases, return_nparray=True, calculate_from='phase')[:, 1]
return distances.min()
| [
"numpy.full",
"numpy.abs",
"elisa.binary_system.model.potential_value_secondary",
"elisa.binary_system.model.potential_value_primary",
"elisa.binary_system.model.pre_calculate_for_potential_value_primary",
"numpy.isscalar",
"copy.copy",
"elisa.binary_system.model.pre_calculate_for_potential_value_seco... | [((819, 889), 'elisa.binary_system.model.pre_calculate_for_potential_value_primary', 'pre_calculate_for_potential_value_primary', (['*args'], {'return_as_tuple': '(True)'}), '(*args, return_as_tuple=True)\n', (860, 889), False, 'from elisa.binary_system.model import potential_value_primary, potential_value_secondary, pre_calculate_for_potential_value_primary, pre_calculate_for_potential_value_secondary\n'), ((901, 955), 'elisa.binary_system.model.potential_value_primary', 'potential_value_primary', (['radius', 'mass_ratio', '*pot_args'], {}), '(radius, mass_ratio, *pot_args)\n', (924, 955), False, 'from elisa.binary_system.model import potential_value_primary, potential_value_secondary, pre_calculate_for_potential_value_primary, pre_calculate_for_potential_value_secondary\n'), ((1334, 1406), 'elisa.binary_system.model.pre_calculate_for_potential_value_secondary', 'pre_calculate_for_potential_value_secondary', (['*args'], {'return_as_tuple': '(True)'}), '(*args, return_as_tuple=True)\n', (1377, 1406), False, 'from elisa.binary_system.model import potential_value_primary, potential_value_secondary, pre_calculate_for_potential_value_primary, pre_calculate_for_potential_value_secondary\n'), ((1418, 1474), 'elisa.binary_system.model.potential_value_secondary', 'potential_value_secondary', (['radius', 'mass_ratio', '*pot_args'], {}), '(radius, mass_ratio, *pot_args)\n', (1443, 1474), False, 'from elisa.binary_system.model import potential_value_primary, potential_value_secondary, pre_calculate_for_potential_value_primary, pre_calculate_for_potential_value_secondary\n'), ((1700, 1775), 'elisa.binary_system.radius.calculate_side_radius', 'calculate_side_radius', (['(1.0)', 'mass_ratio', '(1.0)', 'surface_potential', '"""secondary"""'], {}), "(1.0, mass_ratio, 1.0, surface_potential, 'secondary')\n", (1721, 1775), False, 'from elisa.binary_system.radius import calculate_side_radius\n'), ((2038, 2055), 'numpy.isscalar', 'np.isscalar', (['summ'], {}), '(summ)\n', (2049, 2055), True, 'import numpy as np\n'), ((2909, 2946), 'numpy.sqrt', 'np.sqrt', (['(c.G * m1 / (r1 ** 2 * mid_g))'], {}), '(c.G * m1 / (r1 ** 2 * mid_g))\n', (2916, 2946), True, 'import numpy as np\n'), ((2956, 2993), 'numpy.sqrt', 'np.sqrt', (['(c.G * m2 / (r2 ** 2 * mid_g))'], {}), '(c.G * m2 / (r2 ** 2 * mid_g))\n', (2963, 2993), True, 'import numpy as np\n'), ((3036, 3091), 'numpy.sqrt', 'np.sqrt', (['(c.FULL_ARC ** 2 * sma ** 3 / (c.G * (m1 + m2)))'], {}), '(c.FULL_ARC ** 2 * sma ** 3 / (c.G * (m1 + m2)))\n', (3043, 3091), True, 'import numpy as np\n'), ((3804, 3824), 'copy.copy', 'copy', (['DEFAULT_SYSTEM'], {}), '(DEFAULT_SYSTEM)\n', (3808, 3824), False, 'from copy import copy\n'), ((4162, 4192), 'elisa.BinarySystem.from_json', 'BinarySystem.from_json', (['params'], {}), '(params)\n', (4184, 4192), False, 'from elisa import const as c, BinarySystem\n'), ((2357, 2384), 'numpy.full', 'np.full', (['summ.shape', 'np.nan'], {}), '(summ.shape, np.nan)\n', (2364, 2384), True, 'import numpy as np\n'), ((2147, 2178), 'numpy.arccos', 'np.arccos', (['((r1 + r2) / distance)'], {}), '((r1 + r2) / distance)\n', (2156, 2178), True, 'import numpy as np\n'), ((2205, 2229), 'numpy.full', 'np.full', (['summ.shape', '(1.0)'], {}), '(summ.shape, 1.0)\n', (2212, 2229), True, 'import numpy as np\n'), ((2474, 2512), 'numpy.arccos', 'np.arccos', (['(summ[mask] / distance[mask])'], {}), '(summ[mask] / distance[mask])\n', (2483, 2512), True, 'import numpy as np\n'), ((3596, 3606), 'numpy.abs', 'np.abs', (['dt'], {}), '(dt)\n', (3602, 3606), True, 'import numpy as np\n'), ((2303, 2324), 'numpy.isscalar', 'np.isscalar', (['distance'], {}), '(distance)\n', (2314, 2324), True, 'import numpy as np\n'), ((2270, 2299), 'numpy.full', 'np.full', (['summ.shape', 'distance'], {}), '(summ.shape, distance)\n', (2277, 2299), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd;
# python list
data = [1,2,3,4,5];
# numpy
ndata = np.array(data);
# pandas
pdata = pd.Series(data);
print(pdata[0]);
print(pdata.values);
print(pdata.index);
pdata2 = pd.Series(data, index=['A','B','C','D','E']);
print(pdata2);
print(pdata2['C']);
# dic
data2 = {'name':'kim','ko':100,'en':90,'ma':80, 'si':100};
pdata3 = pd.Series(data2);
pdata3.name = 'Score Name';
pdata3.index.name = 'Score Index name';
print(pdata3);
print(pdata3['name']);
print(pdata3.ko);
| [
"numpy.array",
"pandas.Series"
] | [((92, 106), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (100, 106), True, 'import numpy as np\n'), ((126, 141), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (135, 141), True, 'import pandas as pd\n'), ((211, 259), 'pandas.Series', 'pd.Series', (['data'], {'index': "['A', 'B', 'C', 'D', 'E']"}), "(data, index=['A', 'B', 'C', 'D', 'E'])\n", (220, 259), True, 'import pandas as pd\n'), ((367, 383), 'pandas.Series', 'pd.Series', (['data2'], {}), '(data2)\n', (376, 383), True, 'import pandas as pd\n')] |
import os
import time
import torch
import queue
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils.drivers import train, test, get_dataloader
from model.MobileNetV2 import MobileNetV2, InvertedResidual
from pruner.fp_mbnetv2 import FilterPrunerMBNetV2
from pruner.fp_resnet import FilterPrunerResNet
class LeGR:
def __init__(self, dataset, datapath, model, pruner, rank_type='l2_weight', batch_size=32, lr=1e-3, safeguard=0, global_random_rank=False, lub='', device='cuda'):
self.device = device
self.sample_for_ranking = 1 if rank_type in ['l1_weight', 'l2_weight', 'l2_bn', 'l1_bn', 'l2_bn_param'] else 5000
self.safeguard = safeguard
self.lub = lub
self.lr = lr
self.img_size = 32 if 'CIFAR' in args.dataset else 224
self.batch_size = batch_size
self.rank_type = rank_type
self.train_loader, self.val_loader, self.test_loader = get_dataloader(self.img_size, dataset, datapath, batch_size, args.no_val)
if 'CIFAR100' in dataset:
num_classes = 100
elif 'CIFAR10' in dataset:
num_classes = 10
elif 'ImageNet' in dataset:
num_classes = 1000
elif 'CUB200' in dataset:
num_classes = 200
self.model = model
self.criterion = torch.nn.CrossEntropyLoss()
self.pruner = eval(pruner)(self.model, rank_type, num_classes, safeguard, random=global_random_rank, device=device)
self.model.train()
def learn_ranking_ea(self, name, model_desc, tau_hat, long_ft, target):
name = name
start_t = time.time()
self.pruner.reset()
self.pruner.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
original_flops = self.pruner.cur_flops
original_size = self.pruner.cur_size
print('Before Pruning, FLOPs: {:.3f}M, Size: {:.3f}M'.format(original_flops/1e6, original_size/1e6))
mean_loss = []
num_layers = len(self.pruner.filter_ranks)
minimum_loss = 10
best_perturbation = None
POPULATIONS = 64
SAMPLES = 16
GENERATIONS = 400
SCALE_SIGMA = 1
MUTATE_PERCENT = 0.1
index_queue = queue.Queue(POPULATIONS)
population_loss = np.zeros(0)
population_data = []
original_dist = self.pruner.filter_ranks.copy()
original_dist_stat = {}
for k in sorted(original_dist):
a = original_dist[k].cpu().numpy()
original_dist_stat[k] = {'mean': np.mean(a), 'std': np.std(a)}
# Initialize Population
for i in range(GENERATIONS):
step_size = 1-(float(i)/(GENERATIONS*1.25))
# Perturn distribution
perturbation = []
if i == POPULATIONS-1:
for k in sorted(self.pruner.filter_ranks.keys()):
perturbation.append((1,0))
elif i < POPULATIONS-1:
for k in sorted(self.pruner.filter_ranks.keys()):
scale = np.exp(float(np.random.normal(0, SCALE_SIGMA)))
shift = float(np.random.normal(0, original_dist_stat[k]['std']))
perturbation.append((scale, shift))
else:
mean_loss.append(np.mean(population_loss))
sampled_idx = np.random.choice(POPULATIONS, SAMPLES)
sampled_loss = population_loss[sampled_idx]
winner_idx_ = np.argmin(sampled_loss)
winner_idx = sampled_idx[winner_idx_]
oldest_index = index_queue.get()
# Mutate winner
base = population_data[winner_idx]
# Perturb distribution
mnum = int(MUTATE_PERCENT * len(self.pruner.filter_ranks))
mutate_candidate = np.random.choice(len(self.pruner.filter_ranks), mnum)
for k in sorted(self.pruner.filter_ranks.keys()):
scale = 1
shift = 0
if k in mutate_candidate:
scale = np.exp(float(np.random.normal(0, SCALE_SIGMA*step_size)))
shift = float(np.random.normal(0, original_dist_stat[k]['std']))
perturbation.append((scale*base[k][0], shift+base[k][1]))
# Given affine transformations, rank and prune
self.pruner.pruning_with_transformations(original_dist, perturbation, target)
# Re-measure the pruned model in terms of FLOPs and size
self.pruner.reset()
self.pruner.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
cur_flops = self.pruner.cur_flops
cur_size = self.pruner.cur_size
self.pruner.model = self.pruner.model.to(self.device)
print('Density: {:.3f}% ({:.3f}M/{:.3f}M) | FLOPs: {:.3f}% ({:.3f}M/{:.3f}M)'.format(float(cur_size)/original_size*100, cur_size/1e6, original_size/1e6,
float(cur_flops)/original_flops*100, cur_flops/1e6, original_flops/1e6))
print('Fine tuning to recover from pruning iteration.')
optimizer = optim.SGD(self.pruner.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=5e-4)
if tau_hat > 0:
train(self.model, self.train_loader, self.val_loader, optimizer, epochs=1, steps=tau_hat, run_test=False, device=self.device)
acc, loss = test(self.model, self.val_loader, device=self.device, get_loss=True)
if np.mean(loss) < minimum_loss:
minimum_loss = np.mean(loss)
best_perturbation = perturbation
if i < POPULATIONS:
index_queue.put(i)
population_data.append(perturbation)
population_loss = np.append(population_loss, [np.mean(loss)])
else:
index_queue.put(oldest_index)
population_data[oldest_index] = perturbation
population_loss[oldest_index] = np.mean(loss)
# Restore the model back to origin
model = torch.load(model_desc)
if isinstance(model, nn.DataParallel):
model = model.module
model.eval()
model = model.to(self.device)
self.pruner.model = model
self.model = model
self.pruner.reset()
self.pruner.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
print('Generation {}, Step: {:.2f}, Min Loss: {:.3f}'.format(i, step_size, np.min(population_loss)))
total_t = time.time() - start_t
print('Finished. Use {:.2f} hours. Minimum Loss: {:.3f}'.format(float(total_t) / 3600, minimum_loss))
if not os.path.exists('./log'):
os.makedirs('./log')
np.savetxt(os.path.join('./log', '{}_ea_loss.txt'.format(name)), np.array(mean_loss))
np.savetxt(os.path.join('./log', '{}_ea_min.data'.format(name)), best_perturbation)
# Use the best affine transformation to obtain the resulting model
self.pruner.pruning_with_transformations(original_dist, best_perturbation, target)
if not os.path.exists('./ckpt'):
os.makedirs('./ckpt')
torch.save(self.pruner.model, os.path.join('ckpt', '{}_bestarch_init.pt'.format(name)))
def prune(self, name, model_name, long_ft, target=-1):
test_acc = []
b4ft_test_acc = []
density = []
flops = []
# Get the accuracy before pruning
acc = test(self.model, self.test_loader, device=self.device)
test_acc.append(acc)
b4ft_test_acc.append(acc)
self.pruner.reset()
self.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
b4prune_size = self.pruner.cur_size
b4prune_flops = self.pruner.cur_flops
density.append(self.pruner.cur_size)
flops.append(self.pruner.cur_flops)
print('Before Pruning, Acc: {:.2f}%, FLOPs: {:.3f}M, Size: {:.3f}M'.format(acc, b4prune_flops/1e6, b4prune_size/1e6))
# If there is learned affine transformation, load it.
if self.lub != '':
perturbation = np.loadtxt(self.lub)
else:
perturbation = np.array([[1., 0.] for _ in range(len(self.pruner.filter_ranks))])
self.pruner.pruning_with_transformations(self.pruner.filter_ranks, perturbation, target)
self.pruner.reset()
self.model.eval()
self.pruner.forward(torch.zeros((1,3,self.img_size,self.img_size), device=self.device))
cur_flops = self.pruner.cur_flops
cur_size = self.pruner.cur_size
density.append(cur_size)
flops.append(cur_flops)
print('Density: {:.3f}% ({:.3f}M/{:.3f}M) | FLOPs: {:.3f}% ({:.3f}M/{:.3f}M)'.format(cur_size/b4prune_size*100, cur_size/1e6, b4prune_size/1e6,
cur_flops/b4prune_flops*100, cur_flops/1e6, b4prune_flops/1e6))
print('Fine tuning to recover from pruning iteration.')
if not os.path.exists('./ckpt'):
os.makedirs('./ckpt')
print('Saving untrained pruned model...')
torch.save(self.pruner.model, os.path.join('ckpt', '{}_init.t7'.format(name)))
acc = test(self.model, self.test_loader, device=self.device)
b4ft_test_acc.append(acc)
if not os.path.exists('./log'):
os.makedirs('./log')
print('Finished. Going to fine tune the model a bit more')
if long_ft > 0:
optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=5e-4, nesterov=True)
#scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, long_ft)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [int(long_ft*0.3), int(long_ft*0.6), int(long_ft*0.8)], gamma=0.2)
if args.no_val:
train(self.model, self.train_loader, self.test_loader, optimizer, epochs=long_ft, scheduler=scheduler, device=self.device, name=name)
else:
train(self.model, self.train_loader, self.val_loader, optimizer, epochs=long_ft, scheduler=scheduler, device=self.device, name=name)
acc = test(self.model, self.test_loader, device=self.device)
test_acc.append(acc)
else:
acc = test(self.model, self.test_loader, device=self.device)
test_acc.append(acc)
log = np.stack([np.array(b4ft_test_acc), np.array(test_acc), np.array(density), np.array(flops)], axis=1)
np.savetxt(os.path.join('./log', '{}_test_acc.txt'.format(name)), log)
print('Summary')
print('Before Pruning- Accuracy: {:.3f}, Cost: {:.3f}M'.format(test_acc[0], b4prune_flops/1e6))
print('After Pruning- Accuracy: {:.3f}, Cost: {:.3f}M'.format(test_acc[-1], cur_flops/1e6))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--name", type=str, default='pruned_mbnetv2', help='Name for the experiments, the resulting model and logs will use this')
parser.add_argument("--datapath", type=str, default='./data', help='Path toward the dataset that is used for this experiment')
parser.add_argument("--dataset", type=str, default='torchvision.datasets.CIFAR10', help='The class name of the dataset that is used, please find available classes under the dataset folder')
parser.add_argument("--model", type=str, default='./ckpt/resnet56_cifar10.t7', help='The pre-trained model that pruning starts from')
parser.add_argument("--pruner", type=str, default='FilterPrunerResNet', help='Different network require differnt pruner implementation')
parser.add_argument("--rank_type", type=str, default='l2_weight', help='The ranking criteria for filter pruning')
parser.add_argument("--lub", type=str, default='', help='The affine transformations')
parser.add_argument("--global_random_rank", action='store_true', default=False, help='When this is specified, none of the rank_type matters, it will randomly prune the filters')
parser.add_argument("--tau_hat", type=int, default=0, help='The number of updates before evaluating for fitness (used in EA).')
parser.add_argument("--long_ft", type=int, default=60, help='It specifies how many epochs to fine-tune the network once the pruning is done')
parser.add_argument("--prune_away", type=float, default=90, help='How many percentage of constraints should be pruned away. E.g., 50 means 50% of FLOPs will be pruned away')
parser.add_argument("--safeguard", type=float, default=0, help='A floating point number that represent at least how many percentage of the original number of channel should be preserved. E.g., 0.10 means no matter what ranking, each layer should have at least 10% of the number of original channels.')
parser.add_argument("--batch_size", type=int, default=32, help='Batch size for training.')
parser.add_argument("--min_lub", action='store_true', default=False, help='Use Evolutionary Algorithm to solve latent variable for minimizing Lipschitz upper bound')
parser.add_argument("--uniform_pruning", action='store_true', default=False, help='Use Evolutionary Algorithm to solve latent variable for minimizing Lipschitz upper bound')
parser.add_argument("--no_val", action='store_true', default=False, help='Use full dataset to train (use to compare with prior art in CIFAR-10)')
parser.add_argument("--cpu", action='store_true', default=False, help='Use CPU')
parser.add_argument("--lr", type=float, default=0.001, help='The learning rate for fine-tuning')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
print(args)
print('Pruning {}'.format(args.name))
img_size = 32
device = 'cpu' if args.cpu else 'cuda'
prune_till = -1
prune_away = args.prune_away
model = torch.load(args.model)
if isinstance(model, nn.DataParallel):
model = model.module
model = model.to(device)
legr = LeGR(args.dataset, args.datapath, model, args.pruner, args.rank_type, args.batch_size, args.lr, safeguard=args.safeguard, global_random_rank=args.global_random_rank, lub=args.lub, device=device)
if prune_away > 0:
dummy_size = 32 if 'CIFAR' in args.dataset else 224
legr.pruner.reset()
legr.model.eval()
legr.pruner.forward(torch.zeros((1,3,dummy_size, dummy_size), device=device))
b4prune_flops = legr.pruner.cur_flops
prune_till = b4prune_flops * (1-(prune_away)/100.)
print('Pruned untill {:.3f}M'.format(prune_till/1000000.))
if args.uniform_pruning:
ratio = legr.pruner.get_uniform_ratio(prune_till)
legr.pruner.safeguard = ratio
prune_away = 99
if args.min_lub:
legr.learn_ranking_ea(args.name, args.model, args.tau_hat, args.long_ft, (1-(prune_away)/100.))
else:
legr.prune(args.name, args.model, args.long_ft, (1-(prune_away)/100.))
| [
"argparse.ArgumentParser",
"numpy.argmin",
"numpy.mean",
"numpy.random.normal",
"numpy.std",
"torch.load",
"os.path.exists",
"utils.drivers.test",
"numpy.loadtxt",
"numpy.random.choice",
"torch.zeros",
"numpy.min",
"utils.drivers.train",
"queue.Queue",
"os.makedirs",
"torch.nn.CrossEnt... | [((11236, 11261), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11259, 11261), False, 'import argparse\n'), ((14228, 14250), 'torch.load', 'torch.load', (['args.model'], {}), '(args.model)\n', (14238, 14250), False, 'import torch\n'), ((991, 1064), 'utils.drivers.get_dataloader', 'get_dataloader', (['self.img_size', 'dataset', 'datapath', 'batch_size', 'args.no_val'], {}), '(self.img_size, dataset, datapath, batch_size, args.no_val)\n', (1005, 1064), False, 'from utils.drivers import train, test, get_dataloader\n'), ((1377, 1404), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1402, 1404), False, 'import torch\n'), ((1673, 1684), 'time.time', 'time.time', ([], {}), '()\n', (1682, 1684), False, 'import time\n'), ((2326, 2350), 'queue.Queue', 'queue.Queue', (['POPULATIONS'], {}), '(POPULATIONS)\n', (2337, 2350), False, 'import queue\n'), ((2377, 2388), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (2385, 2388), True, 'import numpy as np\n'), ((7794, 7848), 'utils.drivers.test', 'test', (['self.model', 'self.test_loader'], {'device': 'self.device'}), '(self.model, self.test_loader, device=self.device)\n', (7798, 7848), False, 'from utils.drivers import train, test, get_dataloader\n'), ((9587, 9641), 'utils.drivers.test', 'test', (['self.model', 'self.test_loader'], {'device': 'self.device'}), '(self.model, self.test_loader, device=self.device)\n', (9591, 9641), False, 'from utils.drivers import train, test, get_dataloader\n'), ((1775, 1844), 'torch.zeros', 'torch.zeros', (['(1, 3, self.img_size, self.img_size)'], {'device': 'self.device'}), '((1, 3, self.img_size, self.img_size), device=self.device)\n', (1786, 1844), False, 'import torch\n'), ((5635, 5703), 'utils.drivers.test', 'test', (['self.model', 'self.val_loader'], {'device': 'self.device', 'get_loss': '(True)'}), '(self.model, self.val_loader, device=self.device, get_loss=True)\n', (5639, 5703), False, 'from utils.drivers import train, test, get_dataloader\n'), ((6310, 6332), 'torch.load', 'torch.load', (['model_desc'], {}), '(model_desc)\n', (6320, 6332), False, 'import torch\n'), ((6859, 6870), 'time.time', 'time.time', ([], {}), '()\n', (6868, 6870), False, 'import time\n'), ((7006, 7029), 'os.path.exists', 'os.path.exists', (['"""./log"""'], {}), "('./log')\n", (7020, 7029), False, 'import os\n'), ((7043, 7063), 'os.makedirs', 'os.makedirs', (['"""./log"""'], {}), "('./log')\n", (7054, 7063), False, 'import os\n'), ((7137, 7156), 'numpy.array', 'np.array', (['mean_loss'], {}), '(mean_loss)\n', (7145, 7156), True, 'import numpy as np\n'), ((7432, 7456), 'os.path.exists', 'os.path.exists', (['"""./ckpt"""'], {}), "('./ckpt')\n", (7446, 7456), False, 'import os\n'), ((7470, 7491), 'os.makedirs', 'os.makedirs', (['"""./ckpt"""'], {}), "('./ckpt')\n", (7481, 7491), False, 'import os\n'), ((7996, 8065), 'torch.zeros', 'torch.zeros', (['(1, 3, self.img_size, self.img_size)'], {'device': 'self.device'}), '((1, 3, self.img_size, self.img_size), device=self.device)\n', (8007, 8065), False, 'import torch\n'), ((8487, 8507), 'numpy.loadtxt', 'np.loadtxt', (['self.lub'], {}), '(self.lub)\n', (8497, 8507), True, 'import numpy as np\n'), ((8798, 8867), 'torch.zeros', 'torch.zeros', (['(1, 3, self.img_size, self.img_size)'], {'device': 'self.device'}), '((1, 3, self.img_size, self.img_size), device=self.device)\n', (8809, 8867), False, 'import torch\n'), ((9376, 9400), 'os.path.exists', 'os.path.exists', (['"""./ckpt"""'], {}), "('./ckpt')\n", (9390, 9400), False, 'import os\n'), ((9414, 9435), 'os.makedirs', 'os.makedirs', (['"""./ckpt"""'], {}), "('./ckpt')\n", (9425, 9435), False, 'import os\n'), ((9692, 9715), 'os.path.exists', 'os.path.exists', (['"""./log"""'], {}), "('./log')\n", (9706, 9715), False, 'import os\n'), ((9729, 9749), 'os.makedirs', 'os.makedirs', (['"""./log"""'], {}), "('./log')\n", (9740, 9749), False, 'import os\n'), ((10571, 10625), 'utils.drivers.test', 'test', (['self.model', 'self.test_loader'], {'device': 'self.device'}), '(self.model, self.test_loader, device=self.device)\n', (10575, 10625), False, 'from utils.drivers import train, test, get_dataloader\n'), ((10695, 10749), 'utils.drivers.test', 'test', (['self.model', 'self.test_loader'], {'device': 'self.device'}), '(self.model, self.test_loader, device=self.device)\n', (10699, 10749), False, 'from utils.drivers import train, test, get_dataloader\n'), ((14726, 14784), 'torch.zeros', 'torch.zeros', (['(1, 3, dummy_size, dummy_size)'], {'device': 'device'}), '((1, 3, dummy_size, dummy_size), device=device)\n', (14737, 14784), False, 'import torch\n'), ((2639, 2649), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (2646, 2649), True, 'import numpy as np\n'), ((2658, 2667), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (2664, 2667), True, 'import numpy as np\n'), ((4729, 4798), 'torch.zeros', 'torch.zeros', (['(1, 3, self.img_size, self.img_size)'], {'device': 'self.device'}), '((1, 3, self.img_size, self.img_size), device=self.device)\n', (4740, 4798), False, 'import torch\n'), ((5485, 5614), 'utils.drivers.train', 'train', (['self.model', 'self.train_loader', 'self.val_loader', 'optimizer'], {'epochs': '(1)', 'steps': 'tau_hat', 'run_test': '(False)', 'device': 'self.device'}), '(self.model, self.train_loader, self.val_loader, optimizer, epochs=1,\n steps=tau_hat, run_test=False, device=self.device)\n', (5490, 5614), False, 'from utils.drivers import train, test, get_dataloader\n'), ((5720, 5733), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (5727, 5733), True, 'import numpy as np\n'), ((5781, 5794), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (5788, 5794), True, 'import numpy as np\n'), ((6228, 6241), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (6235, 6241), True, 'import numpy as np\n'), ((6659, 6728), 'torch.zeros', 'torch.zeros', (['(1, 3, self.img_size, self.img_size)'], {'device': 'self.device'}), '((1, 3, self.img_size, self.img_size), device=self.device)\n', (6670, 6728), False, 'import torch\n'), ((10240, 10378), 'utils.drivers.train', 'train', (['self.model', 'self.train_loader', 'self.test_loader', 'optimizer'], {'epochs': 'long_ft', 'scheduler': 'scheduler', 'device': 'self.device', 'name': 'name'}), '(self.model, self.train_loader, self.test_loader, optimizer, epochs=\n long_ft, scheduler=scheduler, device=self.device, name=name)\n', (10245, 10378), False, 'from utils.drivers import train, test, get_dataloader\n'), ((10416, 10553), 'utils.drivers.train', 'train', (['self.model', 'self.train_loader', 'self.val_loader', 'optimizer'], {'epochs': 'long_ft', 'scheduler': 'scheduler', 'device': 'self.device', 'name': 'name'}), '(self.model, self.train_loader, self.val_loader, optimizer, epochs=\n long_ft, scheduler=scheduler, device=self.device, name=name)\n', (10421, 10553), False, 'from utils.drivers import train, test, get_dataloader\n'), ((10808, 10831), 'numpy.array', 'np.array', (['b4ft_test_acc'], {}), '(b4ft_test_acc)\n', (10816, 10831), True, 'import numpy as np\n'), ((10833, 10851), 'numpy.array', 'np.array', (['test_acc'], {}), '(test_acc)\n', (10841, 10851), True, 'import numpy as np\n'), ((10853, 10870), 'numpy.array', 'np.array', (['density'], {}), '(density)\n', (10861, 10870), True, 'import numpy as np\n'), ((10872, 10887), 'numpy.array', 'np.array', (['flops'], {}), '(flops)\n', (10880, 10887), True, 'import numpy as np\n'), ((3435, 3473), 'numpy.random.choice', 'np.random.choice', (['POPULATIONS', 'SAMPLES'], {}), '(POPULATIONS, SAMPLES)\n', (3451, 3473), True, 'import numpy as np\n'), ((3564, 3587), 'numpy.argmin', 'np.argmin', (['sampled_loss'], {}), '(sampled_loss)\n', (3573, 3587), True, 'import numpy as np\n'), ((6814, 6837), 'numpy.min', 'np.min', (['population_loss'], {}), '(population_loss)\n', (6820, 6837), True, 'import numpy as np\n'), ((3379, 3403), 'numpy.mean', 'np.mean', (['population_loss'], {}), '(population_loss)\n', (3386, 3403), True, 'import numpy as np\n'), ((6039, 6052), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (6046, 6052), True, 'import numpy as np\n'), ((3221, 3270), 'numpy.random.normal', 'np.random.normal', (['(0)', "original_dist_stat[k]['std']"], {}), "(0, original_dist_stat[k]['std'])\n", (3237, 3270), True, 'import numpy as np\n'), ((3152, 3184), 'numpy.random.normal', 'np.random.normal', (['(0)', 'SCALE_SIGMA'], {}), '(0, SCALE_SIGMA)\n', (3168, 3184), True, 'import numpy as np\n'), ((4278, 4327), 'numpy.random.normal', 'np.random.normal', (['(0)', "original_dist_stat[k]['std']"], {}), "(0, original_dist_stat[k]['std'])\n", (4294, 4327), True, 'import numpy as np\n'), ((4195, 4239), 'numpy.random.normal', 'np.random.normal', (['(0)', '(SCALE_SIGMA * step_size)'], {}), '(0, SCALE_SIGMA * step_size)\n', (4211, 4239), True, 'import numpy as np\n')] |
from niscv_v2.experiments.garch_truth import garch_model
from niscv_v2.basics.qtl import Qtl
import numpy as np
import multiprocessing
import os
from functools import partial
from datetime import datetime as dt
import pickle
def experiment(D, alpha, size_est, show, size_kn, ratio):
target, statistic, proposal = garch_model(D)
qtl = Qtl(D + 3, target, statistic, alpha, proposal, size_est=size_est, show=show)
qtl.initial_estimation()
qtl.resampling(size_kn, ratio)
qtl.density_estimation(mode=2, local=True, gamma=0.3, bdwth=1.5, alpha0=0.1)
qtl.nonparametric_estimation(mode=0)
qtl.nonparametric_estimation(mode=1)
qtl.nonparametric_estimation(mode=2)
qtl.control_calculation()
qtl.regression_estimation()
qtl.likelihood_estimation()
return qtl.result
def run(it, num):
np.random.seed(1997 * num + 1107 + it)
Ds = [1, 2, 5]
alphas = [0.05, 0.01]
ratios = [500, 1000, 2000]
result = []
for i, D in enumerate(Ds):
for alpha in alphas:
print(num, it, D, alpha)
result.append(experiment(D, alpha, size_est=400000, show=False, size_kn=2000, ratio=ratios[i]))
return result
def main(num):
os.environ['OMP_NUM_THREADS'] = '3'
with multiprocessing.Pool(processes=10) as pool:
begin = dt.now()
its = np.arange(1, 11)
R = pool.map(partial(run, num=num), its)
end = dt.now()
print((end - begin).seconds)
with open('../data/real/garch_estimate_{}'.format(num), 'wb') as file:
pickle.dump(R, file)
if __name__ == '__main__':
for n in np.arange(25, 31):
main(n)
| [
"functools.partial",
"niscv_v2.experiments.garch_truth.garch_model",
"pickle.dump",
"numpy.random.seed",
"niscv_v2.basics.qtl.Qtl",
"numpy.arange",
"multiprocessing.Pool",
"datetime.datetime.now"
] | [((319, 333), 'niscv_v2.experiments.garch_truth.garch_model', 'garch_model', (['D'], {}), '(D)\n', (330, 333), False, 'from niscv_v2.experiments.garch_truth import garch_model\n'), ((344, 420), 'niscv_v2.basics.qtl.Qtl', 'Qtl', (['(D + 3)', 'target', 'statistic', 'alpha', 'proposal'], {'size_est': 'size_est', 'show': 'show'}), '(D + 3, target, statistic, alpha, proposal, size_est=size_est, show=show)\n', (347, 420), False, 'from niscv_v2.basics.qtl import Qtl\n'), ((829, 867), 'numpy.random.seed', 'np.random.seed', (['(1997 * num + 1107 + it)'], {}), '(1997 * num + 1107 + it)\n', (843, 867), True, 'import numpy as np\n'), ((1606, 1623), 'numpy.arange', 'np.arange', (['(25)', '(31)'], {}), '(25, 31)\n', (1615, 1623), True, 'import numpy as np\n'), ((1250, 1284), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(10)'}), '(processes=10)\n', (1270, 1284), False, 'import multiprocessing\n'), ((1310, 1318), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (1316, 1318), True, 'from datetime import datetime as dt\n'), ((1333, 1349), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (1342, 1349), True, 'import numpy as np\n'), ((1413, 1421), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (1419, 1421), True, 'from datetime import datetime as dt\n'), ((1543, 1563), 'pickle.dump', 'pickle.dump', (['R', 'file'], {}), '(R, file)\n', (1554, 1563), False, 'import pickle\n'), ((1371, 1392), 'functools.partial', 'partial', (['run'], {'num': 'num'}), '(run, num=num)\n', (1378, 1392), False, 'from functools import partial\n')] |
import argparse
from pathlib import Path
import cv2
import matplotlib
import numpy as np
import torch
from tqdm import tqdm
from data.datasets import get_dataloaders
from utils.conf import Conf
from utils.saver import Saver
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from torch.nn.functional import adaptive_avg_pool2d
from torch.nn.functional import relu
from torch.nn.functional import normalize
from model.net import TriNet
class Hook:
def __init__(self):
self.buffer = []
def __call__(self, module, _, ten_out):
self.buffer.append(ten_out)
def reset(self):
self.buffer = []
def parse(conf: Conf):
parser = argparse.ArgumentParser(description='Train img to video model')
parser = conf.add_default_args(parser)
parser.add_argument('net1', type=str, help='Path to TriNet base folder.')
parser.add_argument('--chk_net1', type=str, help='checkpoint name', default='chk_end')
parser.add_argument('net2', type=str, help='Path to TriNet base folder.')
parser.add_argument('--chk_net2', type=str, help='checkpoint name', default='chk_end')
parser.add_argument('--dest_path', type=Path, default='/tmp/heatmaps_out')
args = parser.parse_args()
args.train_strategy = 'multiview'
args.use_random_erasing = False
args.num_train_images = 0
args.img_test_batch = 32
return args
def extract_grad_cam(net: TriNet, inputs: torch.Tensor, device: torch.device,
hook: Hook):
_, logits = net(inputs, return_logits=True) # forward calls hooks
logits_max = torch.max(logits, 1)[0]
conv_features = hook.buffer[0]
grads = torch.autograd.grad(logits_max, conv_features,
grad_outputs=torch.ones(len(conv_features)).to(device))[0]
with torch.no_grad():
weights = adaptive_avg_pool2d(grads, (1, 1))
attn = relu(torch.sum(conv_features * weights, 1))
old_shape = attn.shape
attn = normalize(attn.view(attn.shape[0], -1))
attn = attn.view(old_shape)
return attn.view(*inputs.shape[:2], *attn.shape[1:])
def save_img(img, attn, dest_path):
height, width = img.shape[0], img.shape[1]
fig = plt.figure()
fig.set_size_inches(width / height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, origin='upper')
if attn is not None:
ax.imshow(attn, origin='upper', extent=[0, width, height, 0],
alpha=0.4, cmap=plt.cm.get_cmap('jet'))
fig.canvas.draw()
plt.savefig(dest_path, dpi=height)
plt.close()
def main():
conf = Conf()
conf.suppress_random()
device = conf.get_device()
args = parse(conf)
dest_path = args.dest_path / (Path(args.net1).name + '__vs__' + Path(args.net2).name)
dest_path.mkdir(exist_ok=True, parents=True)
both_path = dest_path / 'both'
both_path.mkdir(exist_ok=True, parents=True)
net1_path = dest_path / Path(args.net1).name
net1_path.mkdir(exist_ok=True, parents=True)
net2_path = dest_path / Path(args.net2).name
net2_path.mkdir(exist_ok=True, parents=True)
orig_path = dest_path / 'orig'
orig_path.mkdir(exist_ok=True, parents=True)
# ---- Restore net
net1 = Saver.load_net(args.net1, args.chk_net1, args.dataset_name).to(device)
net2 = Saver.load_net(args.net2, args.chk_net2, args.dataset_name).to(device)
net1.eval()
net2.eval()
train_loader, query_loader, gallery_loader, queryimg_loader, galleryimg_loader = \
get_dataloaders(args.dataset_name, conf.nas_path, device, args)
# register hooks
hook_net_1, hook_net_2 = Hook(), Hook()
net1.backbone.features_layers[4].register_forward_hook(hook_net_1)
net2.backbone.features_layers[4].register_forward_hook(hook_net_2)
dst_idx = 0
for idx_batch, (vids, *_) in enumerate(tqdm(galleryimg_loader, 'iterating..')):
if idx_batch < len(galleryimg_loader) - 50:
continue
net1.zero_grad()
net2.zero_grad()
hook_net_1.reset()
hook_net_2.reset()
vids = vids.to(device)
attn_1 = extract_grad_cam(net1, vids, device, hook_net_1)
attn_2 = extract_grad_cam(net2, vids, device, hook_net_2)
B, N_VIEWS = attn_1.shape[0], attn_1.shape[1]
for idx_b in range(B):
for idx_v in range(N_VIEWS):
el_img = vids[idx_b, idx_v]
el_attn_1 = attn_1[idx_b, idx_v]
el_attn_2 = attn_2[idx_b, idx_v]
el_img = el_img.cpu().numpy().transpose(1, 2, 0)
el_attn_1 = el_attn_1.cpu().numpy()
el_attn_2 = el_attn_2.cpu().numpy()
mean, var = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
el_img = (el_img * var) + mean
el_img = np.clip(el_img, 0, 1)
el_attn_1 = cv2.blur(el_attn_1, (3, 3))
el_attn_1 = cv2.resize(el_attn_1, (el_img.shape[1], el_img.shape[0]),
interpolation=cv2.INTER_CUBIC)
el_attn_2 = cv2.blur(el_attn_2, (3, 3))
el_attn_2 = cv2.resize(el_attn_2, (el_img.shape[1], el_img.shape[0]),
interpolation=cv2.INTER_CUBIC)
save_img(el_img, el_attn_1, net1_path / f'{dst_idx}.png')
save_img(el_img, el_attn_2, net2_path / f'{dst_idx}.png')
save_img(el_img, None, orig_path / f'{dst_idx}.png')
save_img(np.concatenate([el_img, el_img], 1),
np.concatenate([el_attn_1, el_attn_2], 1), both_path / f'{dst_idx}.png')
dst_idx += 1
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.clip",
"matplotlib.pyplot.figure",
"pathlib.Path",
"torch.no_grad",
"matplotlib.pyplot.close",
"torch.nn.functional.adaptive_avg_pool2d",
"utils.conf.Conf",
"matplotlib.pyplot.Axes",
"cv2.resize",
"tqdm.tqdm",
"matplotlib.use",
"torch.max",
"torch.sum",
... | [((227, 248), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (241, 248), False, 'import matplotlib\n'), ((676, 739), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train img to video model"""'}), "(description='Train img to video model')\n", (699, 739), False, 'import argparse\n'), ((2216, 2228), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2226, 2228), True, 'from matplotlib import pyplot as plt\n'), ((2296, 2331), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (2304, 2331), True, 'from matplotlib import pyplot as plt\n'), ((2585, 2619), 'matplotlib.pyplot.savefig', 'plt.savefig', (['dest_path'], {'dpi': 'height'}), '(dest_path, dpi=height)\n', (2596, 2619), True, 'from matplotlib import pyplot as plt\n'), ((2624, 2635), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2633, 2635), True, 'from matplotlib import pyplot as plt\n'), ((2661, 2667), 'utils.conf.Conf', 'Conf', ([], {}), '()\n', (2665, 2667), False, 'from utils.conf import Conf\n'), ((3575, 3638), 'data.datasets.get_dataloaders', 'get_dataloaders', (['args.dataset_name', 'conf.nas_path', 'device', 'args'], {}), '(args.dataset_name, conf.nas_path, device, args)\n', (3590, 3638), False, 'from data.datasets import get_dataloaders\n'), ((1591, 1611), 'torch.max', 'torch.max', (['logits', '(1)'], {}), '(logits, 1)\n', (1600, 1611), False, 'import torch\n'), ((1812, 1827), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1825, 1827), False, 'import torch\n'), ((1847, 1881), 'torch.nn.functional.adaptive_avg_pool2d', 'adaptive_avg_pool2d', (['grads', '(1, 1)'], {}), '(grads, (1, 1))\n', (1866, 1881), False, 'from torch.nn.functional import adaptive_avg_pool2d\n'), ((3909, 3947), 'tqdm.tqdm', 'tqdm', (['galleryimg_loader', '"""iterating.."""'], {}), "(galleryimg_loader, 'iterating..')\n", (3913, 3947), False, 'from tqdm import tqdm\n'), ((1902, 1939), 'torch.sum', 'torch.sum', (['(conv_features * weights)', '(1)'], {}), '(conv_features * weights, 1)\n', (1911, 1939), False, 'import torch\n'), ((3004, 3019), 'pathlib.Path', 'Path', (['args.net1'], {}), '(args.net1)\n', (3008, 3019), False, 'from pathlib import Path\n'), ((3103, 3118), 'pathlib.Path', 'Path', (['args.net2'], {}), '(args.net2)\n', (3107, 3118), False, 'from pathlib import Path\n'), ((3293, 3352), 'utils.saver.Saver.load_net', 'Saver.load_net', (['args.net1', 'args.chk_net1', 'args.dataset_name'], {}), '(args.net1, args.chk_net1, args.dataset_name)\n', (3307, 3352), False, 'from utils.saver import Saver\n'), ((3375, 3434), 'utils.saver.Saver.load_net', 'Saver.load_net', (['args.net2', 'args.chk_net2', 'args.dataset_name'], {}), '(args.net2, args.chk_net2, args.dataset_name)\n', (3389, 3434), False, 'from utils.saver import Saver\n'), ((2535, 2557), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (2550, 2557), True, 'from matplotlib import pyplot as plt\n'), ((2819, 2834), 'pathlib.Path', 'Path', (['args.net2'], {}), '(args.net2)\n', (2823, 2834), False, 'from pathlib import Path\n'), ((4879, 4900), 'numpy.clip', 'np.clip', (['el_img', '(0)', '(1)'], {}), '(el_img, 0, 1)\n', (4886, 4900), True, 'import numpy as np\n'), ((4930, 4957), 'cv2.blur', 'cv2.blur', (['el_attn_1', '(3, 3)'], {}), '(el_attn_1, (3, 3))\n', (4938, 4957), False, 'import cv2\n'), ((4986, 5079), 'cv2.resize', 'cv2.resize', (['el_attn_1', '(el_img.shape[1], el_img.shape[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(el_attn_1, (el_img.shape[1], el_img.shape[0]), interpolation=cv2\n .INTER_CUBIC)\n', (4996, 5079), False, 'import cv2\n'), ((5143, 5170), 'cv2.blur', 'cv2.blur', (['el_attn_2', '(3, 3)'], {}), '(el_attn_2, (3, 3))\n', (5151, 5170), False, 'import cv2\n'), ((5199, 5292), 'cv2.resize', 'cv2.resize', (['el_attn_2', '(el_img.shape[1], el_img.shape[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(el_attn_2, (el_img.shape[1], el_img.shape[0]), interpolation=cv2\n .INTER_CUBIC)\n', (5209, 5292), False, 'import cv2\n'), ((2785, 2800), 'pathlib.Path', 'Path', (['args.net1'], {}), '(args.net1)\n', (2789, 2800), False, 'from pathlib import Path\n'), ((5572, 5607), 'numpy.concatenate', 'np.concatenate', (['[el_img, el_img]', '(1)'], {}), '([el_img, el_img], 1)\n', (5586, 5607), True, 'import numpy as np\n'), ((5634, 5675), 'numpy.concatenate', 'np.concatenate', (['[el_attn_1, el_attn_2]', '(1)'], {}), '([el_attn_1, el_attn_2], 1)\n', (5648, 5675), True, 'import numpy as np\n')] |
from keras import layers
from keras.models import Sequential
import numpy as np
import pickle as pkl
from keras.layers import Conv1D, GlobalMaxPooling1D, Dense, Dropout, Flatten, MaxPooling1D, Input, Concatenate
from keras.utils import np_utils
from keras.optimizers import RMSprop
train_vec = np.load('./datasets/train_bert.npy')
train_label = np.load('./datasets/train_label.npy')
test_vec = np.load('./datasets/test_bert.npy')
test_label = np.load('./datasets/test_label.npy')
train_label = np_utils.to_categorical(train_label,num_classes=2)
test_label = np_utils.to_categorical(test_label,num_classes=2)
model = Sequential()
model.add(Dense(512,activation='relu'))
model.add(Dense(256,activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(2,activation='softmax'))
rmpprop = RMSprop(lr = 0.0001)
model.compile(optimizer='adam',\
loss='categorical_crossentropy',\
metrics=['accuracy'])
model.fit(train_vec, train_label, epochs=6, batch_size=16,validation_split=0.11)
loss, accuracy = model.evaluate(test_vec, test_label)
result = model.predict(test_vec)
np.save( 'test_3layer_bert.npy',result)
print('test loss:', loss)
print('test accuracy:', accuracy)
| [
"numpy.load",
"numpy.save",
"keras.utils.np_utils.to_categorical",
"keras.layers.Dense",
"keras.models.Sequential",
"keras.optimizers.RMSprop"
] | [((305, 341), 'numpy.load', 'np.load', (['"""./datasets/train_bert.npy"""'], {}), "('./datasets/train_bert.npy')\n", (312, 341), True, 'import numpy as np\n'), ((357, 394), 'numpy.load', 'np.load', (['"""./datasets/train_label.npy"""'], {}), "('./datasets/train_label.npy')\n", (364, 394), True, 'import numpy as np\n'), ((407, 442), 'numpy.load', 'np.load', (['"""./datasets/test_bert.npy"""'], {}), "('./datasets/test_bert.npy')\n", (414, 442), True, 'import numpy as np\n'), ((457, 493), 'numpy.load', 'np.load', (['"""./datasets/test_label.npy"""'], {}), "('./datasets/test_label.npy')\n", (464, 493), True, 'import numpy as np\n'), ((509, 560), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['train_label'], {'num_classes': '(2)'}), '(train_label, num_classes=2)\n', (532, 560), False, 'from keras.utils import np_utils\n'), ((576, 626), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['test_label'], {'num_classes': '(2)'}), '(test_label, num_classes=2)\n', (599, 626), False, 'from keras.utils import np_utils\n'), ((637, 649), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (647, 649), False, 'from keras.models import Sequential\n'), ((827, 845), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (834, 845), False, 'from keras.optimizers import RMSprop\n'), ((1120, 1159), 'numpy.save', 'np.save', (['"""test_3layer_bert.npy"""', 'result'], {}), "('test_3layer_bert.npy', result)\n", (1127, 1159), True, 'import numpy as np\n'), ((661, 690), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (666, 690), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D, Dense, Dropout, Flatten, MaxPooling1D, Input, Concatenate\n'), ((702, 731), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (707, 731), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D, Dense, Dropout, Flatten, MaxPooling1D, Input, Concatenate\n'), ((743, 772), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (748, 772), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D, Dense, Dropout, Flatten, MaxPooling1D, Input, Concatenate\n'), ((785, 815), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (790, 815), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D, Dense, Dropout, Flatten, MaxPooling1D, Input, Concatenate\n')] |
"""
Takes an input NIST file and a Propellant, and creates polynomials for all the \
relevant variables
"""
from numpy import polyfit, poly1d
from phase import Phase
import numpy as np
import os.path
import warnings
import csv
warnings.simplefilter('ignore', np.RankWarning)
POLY_DEG = 10
def get_data(filename):
rel_path = f"{filename}.csv"
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path) as csvfile:
split_data = list(csv.reader(csvfile))
return split_data[1:]
def get_fit(data, position, degree):
temperatures = []
values = []
for row in data:
if len(row) <= position:
continue
if row[0] == "undefined":
continue
if row[position] == "undefined":
continue
temperatures.append(float(row[0]))
values.append(float(row[position]))
if len(temperatures) == 0:
return None
if degree > len(temperatures):
coeffs = polyfit(temperatures, values, len(temperatures))
else:
coeffs = polyfit(temperatures, values, degree)
return poly1d(coeffs)
def propellant_data(propellant, filename):
# "I don't like this being a function with side effects but I don't want to
# refactor it so I'll let it slide" - <NAME>, unsatisfied code reviewer
split_data = get_data(f"prop-data/{filename}")
# values for position of each data type are hard coded and manually read
# from file
if propellant.phase == Phase.TWO_PHASE:
propellant.F_pressure = get_fit(split_data, 1, POLY_DEG)
propellant.F_density_liquid = get_fit(split_data, 2, POLY_DEG)
propellant.F_density_vapour = get_fit(split_data, 14, POLY_DEG)
elif propellant.phase == Phase.LIQUID:
propellant.F_pressure = get_fit(split_data, 1, POLY_DEG)
propellant.F_density_liquid = get_fit(split_data, 2, POLY_DEG)
def combo_data(propellant_mix, filename):
split_data = get_data(f"combo-data/{filename}")
data = []
for row in split_data:
if row[0] == propellant_mix.oxidiser_name and row[1] == propellant_mix.fuel_name:
data = row
break
propellant_mix.ISP_sea_level = float(row[3])
propellant_mix.OF_molar_ratio = float(row[2])
propellant_mix.chamber_temp = float(row[4])
propellant_mix.exhaust_temp = float(row[5])
| [
"numpy.poly1d",
"csv.reader",
"warnings.simplefilter",
"numpy.polyfit"
] | [((228, 275), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'np.RankWarning'], {}), "('ignore', np.RankWarning)\n", (249, 275), False, 'import warnings\n'), ((1186, 1200), 'numpy.poly1d', 'poly1d', (['coeffs'], {}), '(coeffs)\n', (1192, 1200), False, 'from numpy import polyfit, poly1d\n'), ((1136, 1173), 'numpy.polyfit', 'polyfit', (['temperatures', 'values', 'degree'], {}), '(temperatures, values, degree)\n', (1143, 1173), False, 'from numpy import polyfit, poly1d\n'), ((552, 571), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (562, 571), False, 'import csv\n')] |
import numpy as np
import cv2
import g2o
from threading import Lock, Thread
from queue import Queue
from enum import Enum
from collections import defaultdict
from .covisibility import GraphKeyFrame
from .covisibility import GraphMapPoint
from .covisibility import GraphMeasurement
class Camera(object):
def __init__(self, fx, fy, cx, cy, width, height,
frustum_near, frustum_far, baseline):
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.baseline = baseline
self.intrinsic = np.array([
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]])
self.frustum_near = frustum_near
self.frustum_far = frustum_far
self.width = width
self.height = height
def compute_right_camera_pose(self, pose):
pos = pose * np.array([self.baseline, 0, 0])
return g2o.Isometry3d(pose.orientation(), pos)
class Frame(object):
def __init__(self, idx, pose, feature, cam, timestamp=None,
pose_covariance=np.identity(6)):
self.idx = idx
self.pose = pose # g2o.Isometry3d
self.feature = feature
self.cam = cam
self.timestamp = timestamp
self.image = feature.image
self.orientation = pose.orientation()
self.position = pose.position()
self.pose_covariance = pose_covariance
self.transform_matrix = pose.inverse().matrix()[:3] # shape: (3, 4)
self.projection_matrix = (
self.cam.intrinsic.dot(self.transform_matrix)) # from world frame to image
# batch version
def can_view(self, points, ground=False, margin=20): # Frustum Culling
points = np.transpose(points)
(u, v), depth = self.project(self.transform(points))
if ground:
return np.logical_and.reduce([
depth >= self.cam.frustum_near,
depth <= self.cam.frustum_far,
u >= - margin,
u <= self.cam.width + margin])
else:
return np.logical_and.reduce([
depth >= self.cam.frustum_near,
depth <= self.cam.frustum_far,
u >= - margin,
u <= self.cam.width + margin,
v >= - margin,
v <= self.cam.height + margin])
def update_pose(self, pose):
if isinstance(pose, g2o.SE3Quat):
self.pose = g2o.Isometry3d(pose.orientation(), pose.position())
else:
self.pose = pose
self.orientation = self.pose.orientation()
self.position = self.pose.position()
self.transform_matrix = self.pose.inverse().matrix()[:3]
self.projection_matrix = (
self.cam.intrinsic.dot(self.transform_matrix))
def transform(self, points): # from world coordinates
'''
Transform points from world coordinates frame to camera frame.
Args:
points: a point or an array of points, of shape (3,) or (3, N).
'''
R = self.transform_matrix[:3, :3]
if points.ndim == 1:
t = self.transform_matrix[:3, 3]
else:
t = self.transform_matrix[:3, 3:]
return R.dot(points) + t
def project(self, points):
'''
Project points from camera frame to image's pixel coordinates.
Args:
points: a point or an array of points, of shape (3,) or (3, N).
Returns:
Projected pixel coordinates, and respective depth.
'''
projection = self.cam.intrinsic.dot(points / points[-1:])
return projection[:2], points[-1]
def find_matches(self, points, descriptors):
'''
Match to points from world frame.
Args:
points: a list/array of points. shape: (N, 3)
descriptors: a list of feature descriptors. length: N
Returns:
List of successfully matched (queryIdx, trainIdx) pairs.
'''
points = np.transpose(points)
proj, _ = self.project(self.transform(points))
proj = proj.transpose()
return self.feature.find_matches(proj, descriptors)
def get_keypoint(self, i):
return self.feature.get_keypoint(i)
def get_descriptor(self, i):
return self.feature.get_descriptor(i)
def get_color(self, pt):
return self.feature.get_color(pt)
def set_matched(self, i):
self.feature.set_matched(i)
def get_unmatched_keypoints(self):
return self.feature.get_unmatched_keypoints()
class StereoFrame(Frame):
def __init__(self, idx, pose, feature, right_feature, cam,
right_cam=None, timestamp=None, pose_covariance=np.identity(6)):
super().__init__(idx, pose, feature, cam, timestamp, pose_covariance)
self.left = Frame(idx, pose, feature, cam, timestamp, pose_covariance)
self.right = Frame(idx,
cam.compute_right_camera_pose(pose),
right_feature, right_cam or cam,
timestamp, pose_covariance)
def find_matches(self, source, points, descriptors):
q2 = Queue()
def find_right(points, descriptors, q):
m = dict(self.right.find_matches(points, descriptors))
q.put(m)
t2 = Thread(target=find_right, args=(points, descriptors, q2))
t2.start()
matches_left = dict(self.left.find_matches(points, descriptors))
t2.join()
matches_right = q2.get()
measurements = []
for i, j in matches_left.items():
if i in matches_right:
j2 = matches_right[i]
y1 = self.left.get_keypoint(j).pt[1]
y2 = self.right.get_keypoint(j2).pt[1]
if abs(y1 - y2) > 2.5: # epipolar constraint
continue # TODO: choose one
meas = Measurement(
Measurement.Type.STEREO,
source,
[self.left.get_keypoint(j),
self.right.get_keypoint(j2)],
[self.left.get_descriptor(j),
self.right.get_descriptor(j2)])
measurements.append((i, meas))
self.left.set_matched(j)
self.right.set_matched(j2)
else:
meas = Measurement(
Measurement.Type.LEFT,
source,
[self.left.get_keypoint(j)],
[self.left.get_descriptor(j)])
measurements.append((i, meas))
self.left.set_matched(j)
for i, j in matches_right.items():
if i not in matches_left:
meas = Measurement(
Measurement.Type.RIGHT,
source,
[self.right.get_keypoint(j)],
[self.right.get_descriptor(j)])
measurements.append((i, meas))
self.right.set_matched(j)
return measurements
def match_mappoints(self, mappoints, source):
points = []
descriptors = []
for mappoint in mappoints:
points.append(mappoint.position)
descriptors.append(mappoint.descriptor)
matched_measurements = self.find_matches(source, points, descriptors)
measurements = []
for i, meas in matched_measurements:
meas.mappoint = mappoints[i]
measurements.append(meas)
return measurements
def triangulate(self):
kps_left, desps_left, idx_left = self.left.get_unmatched_keypoints()
kps_right, desps_right, idx_right = self.right.get_unmatched_keypoints()
mappoints, matches = self.triangulate_points(
kps_left, desps_left, kps_right, desps_right)
measurements = []
for mappoint, (i, j) in zip(mappoints, matches):
meas = Measurement(
Measurement.Type.STEREO,
Measurement.Source.TRIANGULATION,
[kps_left[i], kps_right[j]],
[desps_left[i], desps_right[j]])
meas.mappoint = mappoint
meas.view = self.transform(mappoint.position)
measurements.append(meas)
self.left.set_matched(idx_left[i])
self.right.set_matched(idx_right[j])
return mappoints, measurements
def triangulate_points(self, kps_left, desps_left, kps_right, desps_right):
matches = self.feature.row_match(
kps_left, desps_left, kps_right, desps_right)
assert len(matches) > 0
px_left = np.array([kps_left[m.queryIdx].pt for m in matches])
px_right = np.array([kps_right[m.trainIdx].pt for m in matches])
points = cv2.triangulatePoints(
self.left.projection_matrix,
self.right.projection_matrix,
px_left.transpose(),
px_right.transpose()
).transpose() # shape: (N, 4)
points = points[:, :3] / points[:, 3:]
can_view = np.logical_and(
self.left.can_view(points),
self.right.can_view(points))
mappoints = []
matchs = []
for i, point in enumerate(points):
if not can_view[i]:
continue
normal = point - self.position
normal = normal / np.linalg.norm(normal)
color = self.left.get_color(px_left[i])
mappoint = MapPoint(
point, normal, desps_left[matches[i].queryIdx], color)
mappoints.append(mappoint)
matchs.append((matches[i].queryIdx, matches[i].trainIdx))
return mappoints, matchs
def update_pose(self, pose):
super().update_pose(pose)
self.right.update_pose(pose)
self.left.update_pose(
self.cam.compute_right_camera_pose(pose))
# batch version
def can_view(self, mappoints):
points = []
point_normals = []
for i, p in enumerate(mappoints):
points.append(p.position)
point_normals.append(p.normal)
points = np.asarray(points)
point_normals = np.asarray(point_normals)
normals = points - self.position
normals /= np.linalg.norm(normals, axis=-1, keepdims=True)
cos = np.clip(np.sum(point_normals * normals, axis=1), -1, 1)
parallel = np.arccos(cos) < (np.pi / 4)
can_view = np.logical_or(
self.left.can_view(points),
self.right.can_view(points))
return np.logical_and(parallel, can_view)
def to_keyframe(self):
return KeyFrame(
self.idx, self.pose,
self.left.feature, self.right.feature,
self.cam, self.right.cam,
self.pose_covariance)
class KeyFrame(GraphKeyFrame, StereoFrame):
_id = 0
_id_lock = Lock()
def __init__(self, *args, **kwargs):
GraphKeyFrame.__init__(self)
StereoFrame.__init__(self, *args, **kwargs)
with KeyFrame._id_lock:
self.id = KeyFrame._id
KeyFrame._id += 1
self.reference_keyframe = None
self.reference_constraint = None
self.preceding_keyframe = None
self.preceding_constraint = None
self.loop_keyframe = None
self.loop_constraint = None
self.fixed = False
def update_reference(self, reference=None):
if reference is not None:
self.reference_keyframe = reference
self.reference_constraint = (
self.reference_keyframe.pose.inverse() * self.pose)
def update_preceding(self, preceding=None):
if preceding is not None:
self.preceding_keyframe = preceding
self.preceding_constraint = (
self.preceding_keyframe.pose.inverse() * self.pose)
def set_loop(self, keyframe, constraint):
self.loop_keyframe = keyframe
self.loop_constraint = constraint
def is_fixed(self):
return self.fixed
def set_fixed(self, fixed=True):
self.fixed = fixed
class MapPoint(GraphMapPoint):
_id = 0
_id_lock = Lock()
def __init__(self, position, normal, descriptor,
color=np.zeros(3),
covariance=np.identity(3) * 1e-4):
super().__init__()
with MapPoint._id_lock:
self.id = MapPoint._id
MapPoint._id += 1
self.position = position
self.normal = normal
self.descriptor = descriptor
self.covariance = covariance
self.color = color
# self.owner = None
self.count = defaultdict(int)
def update_position(self, position):
self.position = position
def update_normal(self, normal):
self.normal = normal
def update_descriptor(self, descriptor):
self.descriptor = descriptor
def set_color(self, color):
self.color = color
def is_bad(self):
with self._lock:
status = (
self.count['meas'] == 0
or (self.count['outlier'] > 20
and self.count['outlier'] > self.count['inlier'])
or (self.count['proj'] > 20
and self.count['proj'] > self.count['meas'] * 10))
return status
def increase_outlier_count(self):
with self._lock:
self.count['outlier'] += 1
def increase_inlier_count(self):
with self._lock:
self.count['inlier'] += 1
def increase_projection_count(self):
with self._lock:
self.count['proj'] += 1
def increase_measurement_count(self):
with self._lock:
self.count['meas'] += 1
class Measurement(GraphMeasurement):
Source = Enum('Measurement.Source', ['TRIANGULATION', 'TRACKING', 'REFIND'])
Type = Enum('Measurement.Type', ['STEREO', 'LEFT', 'RIGHT'])
def __init__(self, type, source, keypoints, descriptors):
super().__init__()
self.type = type
self.source = source
self.keypoints = keypoints
self.descriptors = descriptors
self.view = None # mappoint's position in current coordinates frame
self.xy = np.array(self.keypoints[0].pt)
if self.is_stereo():
self.xyx = np.array([
*keypoints[0].pt, keypoints[1].pt[0]])
self.triangulation = (source == self.Source.TRIANGULATION)
def get_descriptor(self, i=0):
return self.descriptors[i]
def get_keypoint(self, i=0):
return self.keypoints[i]
def get_descriptors(self):
return self.descriptors
def get_keypoints(self):
return self.keypoints
def is_stereo(self):
return self.type == Measurement.Type.STEREO
def is_left(self):
return self.type == Measurement.Type.LEFT
def is_right(self):
return self.type == Measurement.Type.RIGHT
def from_triangulation(self):
return self.triangulation
def from_tracking(self):
return self.source == Measurement.Source.TRACKING
def from_refind(self):
return self.source == Measurement.Source.REFIND | [
"threading.Thread",
"numpy.sum",
"numpy.logical_and",
"numpy.asarray",
"enum.Enum",
"numpy.transpose",
"numpy.identity",
"numpy.zeros",
"numpy.logical_and.reduce",
"threading.Lock",
"collections.defaultdict",
"numpy.array",
"numpy.linalg.norm",
"numpy.arccos",
"queue.Queue"
] | [((10908, 10914), 'threading.Lock', 'Lock', ([], {}), '()\n', (10912, 10914), False, 'from threading import Lock, Thread\n'), ((12172, 12178), 'threading.Lock', 'Lock', ([], {}), '()\n', (12176, 12178), False, 'from threading import Lock, Thread\n'), ((13791, 13858), 'enum.Enum', 'Enum', (['"""Measurement.Source"""', "['TRIANGULATION', 'TRACKING', 'REFIND']"], {}), "('Measurement.Source', ['TRIANGULATION', 'TRACKING', 'REFIND'])\n", (13795, 13858), False, 'from enum import Enum\n'), ((13870, 13923), 'enum.Enum', 'Enum', (['"""Measurement.Type"""', "['STEREO', 'LEFT', 'RIGHT']"], {}), "('Measurement.Type', ['STEREO', 'LEFT', 'RIGHT'])\n", (13874, 13923), False, 'from enum import Enum\n'), ((558, 605), 'numpy.array', 'np.array', (['[[fx, 0, cx], [0, fy, cy], [0, 0, 1]]'], {}), '([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n', (566, 605), True, 'import numpy as np\n'), ((1064, 1078), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (1075, 1078), True, 'import numpy as np\n'), ((1731, 1751), 'numpy.transpose', 'np.transpose', (['points'], {}), '(points)\n', (1743, 1751), True, 'import numpy as np\n'), ((4041, 4061), 'numpy.transpose', 'np.transpose', (['points'], {}), '(points)\n', (4053, 4061), True, 'import numpy as np\n'), ((4747, 4761), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (4758, 4761), True, 'import numpy as np\n'), ((5172, 5179), 'queue.Queue', 'Queue', ([], {}), '()\n', (5177, 5179), False, 'from queue import Queue\n'), ((5329, 5386), 'threading.Thread', 'Thread', ([], {'target': 'find_right', 'args': '(points, descriptors, q2)'}), '(target=find_right, args=(points, descriptors, q2))\n', (5335, 5386), False, 'from threading import Lock, Thread\n'), ((8655, 8707), 'numpy.array', 'np.array', (['[kps_left[m.queryIdx].pt for m in matches]'], {}), '([kps_left[m.queryIdx].pt for m in matches])\n', (8663, 8707), True, 'import numpy as np\n'), ((8727, 8780), 'numpy.array', 'np.array', (['[kps_right[m.trainIdx].pt for m in matches]'], {}), '([kps_right[m.trainIdx].pt for m in matches])\n', (8735, 8780), True, 'import numpy as np\n'), ((10158, 10176), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (10168, 10176), True, 'import numpy as np\n'), ((10201, 10226), 'numpy.asarray', 'np.asarray', (['point_normals'], {}), '(point_normals)\n', (10211, 10226), True, 'import numpy as np\n'), ((10288, 10335), 'numpy.linalg.norm', 'np.linalg.norm', (['normals'], {'axis': '(-1)', 'keepdims': '(True)'}), '(normals, axis=-1, keepdims=True)\n', (10302, 10335), True, 'import numpy as np\n'), ((10587, 10621), 'numpy.logical_and', 'np.logical_and', (['parallel', 'can_view'], {}), '(parallel, can_view)\n', (10601, 10621), True, 'import numpy as np\n'), ((12252, 12263), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12260, 12263), True, 'import numpy as np\n'), ((12652, 12668), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (12663, 12668), False, 'from collections import defaultdict\n'), ((14241, 14271), 'numpy.array', 'np.array', (['self.keypoints[0].pt'], {}), '(self.keypoints[0].pt)\n', (14249, 14271), True, 'import numpy as np\n'), ((860, 891), 'numpy.array', 'np.array', (['[self.baseline, 0, 0]'], {}), '([self.baseline, 0, 0])\n', (868, 891), True, 'import numpy as np\n'), ((1852, 1987), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[depth >= self.cam.frustum_near, depth <= self.cam.frustum_far, u >= -\n margin, u <= self.cam.width + margin]'], {}), '([depth >= self.cam.frustum_near, depth <= self.cam.\n frustum_far, u >= -margin, u <= self.cam.width + margin])\n', (1873, 1987), True, 'import numpy as np\n'), ((2082, 2267), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[depth >= self.cam.frustum_near, depth <= self.cam.frustum_far, u >= -\n margin, u <= self.cam.width + margin, v >= -margin, v <= self.cam.\n height + margin]'], {}), '([depth >= self.cam.frustum_near, depth <= self.cam.\n frustum_far, u >= -margin, u <= self.cam.width + margin, v >= -margin, \n v <= self.cam.height + margin])\n', (2103, 2267), True, 'import numpy as np\n'), ((10358, 10397), 'numpy.sum', 'np.sum', (['(point_normals * normals)'], {'axis': '(1)'}), '(point_normals * normals, axis=1)\n', (10364, 10397), True, 'import numpy as np\n'), ((10425, 10439), 'numpy.arccos', 'np.arccos', (['cos'], {}), '(cos)\n', (10434, 10439), True, 'import numpy as np\n'), ((12289, 12303), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (12300, 12303), True, 'import numpy as np\n'), ((14324, 14372), 'numpy.array', 'np.array', (['[*keypoints[0].pt, keypoints[1].pt[0]]'], {}), '([*keypoints[0].pt, keypoints[1].pt[0]])\n', (14332, 14372), True, 'import numpy as np\n'), ((9401, 9423), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {}), '(normal)\n', (9415, 9423), True, 'import numpy as np\n')] |
import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class TopicOnesZerosTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_asarray(self):
x = [1, 2, 3]
npa = np.asarray(x)
dnpa = dnp.asarray(x)
assert_equal(dnpa, npa)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.asarray",
"dolphindb_numpy.asarray",
"orca.connect"
] | [((519, 534), 'unittest.main', 'unittest.main', ([], {}), '()\n', (532, 534), False, 'import unittest\n'), ((292, 335), 'orca.connect', 'orca.connect', (['HOST', 'PORT', '"""admin"""', '"""123456"""'], {}), "(HOST, PORT, 'admin', '123456')\n", (304, 335), False, 'import orca\n'), ((410, 423), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (420, 423), True, 'import numpy as np\n'), ((439, 453), 'dolphindb_numpy.asarray', 'dnp.asarray', (['x'], {}), '(x)\n', (450, 453), True, 'import dolphindb_numpy as dnp\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
"""
Utilities for observation planning
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import logging
from astropy.coordinates import SkyCoord
from astropy.io import fits, ascii
from astropy.table import Table
from astropy import units as u
from astropy.time import Time
from imp import reload
from alexmods import utils
box_select = utils.box_select
__all__ = ["mike_snr_calculator","mike_texp_calculator"]
def mike_snr_calculator(bp,rp,texp,blue0=19.4,red0=18.5,slitloss=0.7):
# these are in AB mags, but let's just go with it
A_per_pix_B = 0.02
count_rate_B = A_per_pix_B * 10**(-0.4*(bp-blue0)) * slitloss
snr_B = np.sqrt(count_rate_B*texp)
A_per_pix_R = 0.05
count_rate_R = A_per_pix_R * 10**(-0.4*(rp-red0)) * slitloss
snr_R = np.sqrt(count_rate_R*texp)
return snr_B, snr_R
def mike_texp_calculator(bp, rp, snr, blue0=19.4,red0=18.5,slitloss=0.7):
A_per_pix_B = 0.02
count_rate_B = A_per_pix_B * 10**(-0.4*(bp-blue0)) * slitloss
texp_B = snr**2 / count_rate_B
A_per_pix_R = 0.05
count_rate_R = A_per_pix_R * 10**(-0.4*(rp-red0)) * slitloss
texp_R = snr**2 / count_rate_R
return texp_B, texp_R
def add_month_lines(ax, text_y=0, radeg=False):
ylim = ax.get_ylim()
month_ra = [7,9,11,13,15,17,19,21,23,1,3,5]
if radeg: month_ra = [x*360/24 for x in month_ra]
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
ax.vlines(month_ra,ylim[0],ylim[1],alpha=.3)
for x,month in zip(month_ra, months):
ax.text(x+.5,text_y,month,rotation='vertical',ha='left',va='center')
if radeg:
ax.xaxis.set_major_locator(plt.MultipleLocator(60))
ax.xaxis.set_minor_locator(plt.MultipleLocator(15))
else:
ax.xaxis.set_major_locator(plt.MultipleLocator(4))
ax.xaxis.set_minor_locator(plt.MultipleLocator(1))
| [
"matplotlib.pyplot.MultipleLocator",
"numpy.sqrt"
] | [((841, 869), 'numpy.sqrt', 'np.sqrt', (['(count_rate_B * texp)'], {}), '(count_rate_B * texp)\n', (848, 869), True, 'import numpy as np\n'), ((973, 1001), 'numpy.sqrt', 'np.sqrt', (['(count_rate_R * texp)'], {}), '(count_rate_R * texp)\n', (980, 1001), True, 'import numpy as np\n'), ((1866, 1889), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(60)'], {}), '(60)\n', (1885, 1889), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1949), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(15)'], {}), '(15)\n', (1945, 1949), True, 'import matplotlib.pyplot as plt\n'), ((1996, 2018), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(4)'], {}), '(4)\n', (2015, 2018), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2077), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(1)'], {}), '(1)\n', (2074, 2077), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
from ctypes import POINTER, c_double, c_int64
from pyscf.nao.m_libnao import libnao
libnao.aos_libnao.argtypes = (
POINTER(c_int64), # ncoords
POINTER(c_double), # coords
POINTER(c_int64), # norbs
POINTER(c_double), # res[icoord, orb]
POINTER(c_int64)) # ldres leading dimension (fastest changing dimension) of res (norbs)
""" The purpose of this is to evaluate the atomic orbitals at a given set of atomic coordinates """
def aos_libnao(coords, norbs):
assert len(coords.shape) == 2
assert coords.shape[1] == 3
assert norbs>0
ncoords = coords.shape[0]
co2val = np.require( np.zeros((ncoords,norbs)), dtype=c_double, requirements='CW')
crd_copy = np.require(coords, dtype=c_double, requirements='C')
libnao.aos_libnao(
c_int64(ncoords),
crd_copy.ctypes.data_as(POINTER(c_double)),
c_int64(norbs),
co2val.ctypes.data_as(POINTER(c_double)),
c_int64(norbs))
return co2val
| [
"ctypes.c_int64",
"numpy.zeros",
"numpy.require",
"ctypes.POINTER"
] | [((787, 803), 'ctypes.POINTER', 'POINTER', (['c_int64'], {}), '(c_int64)\n', (794, 803), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((818, 835), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (825, 835), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((848, 864), 'ctypes.POINTER', 'POINTER', (['c_int64'], {}), '(c_int64)\n', (855, 864), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((877, 894), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (884, 894), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((917, 933), 'ctypes.POINTER', 'POINTER', (['c_int64'], {}), '(c_int64)\n', (924, 933), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((1345, 1397), 'numpy.require', 'np.require', (['coords'], {'dtype': 'c_double', 'requirements': '"""C"""'}), "(coords, dtype=c_double, requirements='C')\n", (1355, 1397), True, 'import numpy as np\n'), ((1270, 1296), 'numpy.zeros', 'np.zeros', (['(ncoords, norbs)'], {}), '((ncoords, norbs))\n', (1278, 1296), True, 'import numpy as np\n'), ((1424, 1440), 'ctypes.c_int64', 'c_int64', (['ncoords'], {}), '(ncoords)\n', (1431, 1440), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((1494, 1508), 'ctypes.c_int64', 'c_int64', (['norbs'], {}), '(norbs)\n', (1501, 1508), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((1560, 1574), 'ctypes.c_int64', 'c_int64', (['norbs'], {}), '(norbs)\n', (1567, 1574), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((1470, 1487), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1477, 1487), False, 'from ctypes import POINTER, c_double, c_int64\n'), ((1536, 1553), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1543, 1553), False, 'from ctypes import POINTER, c_double, c_int64\n')] |
import os
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
from qtpy.QtWidgets import QWidget, QVBoxLayout, QCheckBox
from glue.config import qt_client
from glue.core.data_combo_helper import ComponentIDComboHelper
from glue.external.echo import CallbackProperty, SelectionCallbackProperty
from glue.external.echo.qt import (connect_checkable_button,
autoconnect_callbacks_to_qt)
from glue.viewers.common.layer_artist import LayerArtist
from glue.viewers.common.state import ViewerState, LayerState
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.utils.qt import load_ui
class TutorialViewerState(ViewerState):
x_att = SelectionCallbackProperty(docstring='The attribute to use on the x-axis')
y_att = SelectionCallbackProperty(docstring='The attribute to use on the y-axis')
def __init__(self, *args, **kwargs):
super(TutorialViewerState, self).__init__(*args, **kwargs)
self._x_att_helper = ComponentIDComboHelper(self, 'x_att')
self._y_att_helper = ComponentIDComboHelper(self, 'y_att')
self.add_callback('layers', self._on_layers_change)
def _on_layers_change(self, value):
# self.layers_data is a shortcut for
# [layer_state.layer for layer_state in self.layers]
self._x_att_helper.set_multiple_data(self.layers_data)
self._y_att_helper.set_multiple_data(self.layers_data)
class TutorialLayerState(LayerState):
fill = CallbackProperty(False, docstring='Whether to show the markers as filled or not')
class TutorialLayerArtist(LayerArtist):
_layer_state_cls = TutorialLayerState
def __init__(self, axes, *args, **kwargs):
super(TutorialLayerArtist, self).__init__(*args, **kwargs)
self.axes = axes
self.artist = self.axes.plot([], [], 'o', color=self.state.layer.style.color)[0]
self.state.add_callback('fill', self._on_fill_change)
self.state.add_callback('visible', self._on_visible_change)
self.state.add_callback('zorder', self._on_zorder_change)
self._viewer_state.add_callback('x_att', self._on_attribute_change)
self._viewer_state.add_callback('y_att', self._on_attribute_change)
def _on_fill_change(self, value=None):
if self.state.fill:
self.artist.set_markerfacecolor(self.state.layer.style.color)
else:
self.artist.set_markerfacecolor('none')
self.redraw()
def _on_visible_change(self, value=None):
self.artist.set_visible(self.state.visible)
self.redraw()
def _on_zorder_change(self, value=None):
self.artist.set_zorder(self.state.zorder)
self.redraw()
def _on_attribute_change(self, value=None):
if self._viewer_state.x_att is None or self._viewer_state.y_att is None:
return
x = self.state.layer[self._viewer_state.x_att]
y = self.state.layer[self._viewer_state.y_att]
self.artist.set_data(x, y)
self.axes.set_xlim(np.nanmin(x), np.nanmax(x))
self.axes.set_ylim(np.nanmin(y), np.nanmax(y))
self.redraw()
def clear(self):
self.artist.set_visible(False)
def remove(self):
self.artist.remove()
def redraw(self):
self.axes.figure.canvas.draw_idle()
def update(self):
self._on_fill_change()
self._on_attribute_change()
class TutorialViewerStateWidget(QWidget):
def __init__(self, viewer_state=None, session=None):
super(TutorialViewerStateWidget, self).__init__()
self.ui = load_ui('viewer_state.ui', self,
directory=os.path.dirname(__file__))
self.viewer_state = viewer_state
self._connections = autoconnect_callbacks_to_qt(self.viewer_state, self.ui)
class TutorialLayerStateWidget(QWidget):
def __init__(self, layer_artist):
super(TutorialLayerStateWidget, self).__init__()
self.checkbox = QCheckBox('Fill markers')
layout = QVBoxLayout()
layout.addWidget(self.checkbox)
self.setLayout(layout)
self.layer_state = layer_artist.state
connect_checkable_button(self.layer_state, 'fill', self.checkbox)
class TutorialDataViewer(DataViewer):
LABEL = 'Tutorial viewer'
_state_cls = TutorialViewerState
_options_cls = TutorialViewerStateWidget
_layer_style_widget_cls = TutorialLayerStateWidget
_data_artist_cls = TutorialLayerArtist
_subset_artist_cls = TutorialLayerArtist
def __init__(self, *args, **kwargs):
super(TutorialDataViewer, self).__init__(*args, **kwargs)
self.axes = plt.subplot(1, 1, 1)
self.setCentralWidget(self.axes.figure.canvas)
def get_layer_artist(self, cls, layer=None, layer_state=None):
return cls(self.axes, self.state, layer=layer, layer_state=layer_state)
qt_client.add(TutorialDataViewer)
| [
"matplotlib.pyplot.subplot",
"qtpy.QtWidgets.QCheckBox",
"glue.external.echo.CallbackProperty",
"glue.external.echo.SelectionCallbackProperty",
"glue.external.echo.qt.autoconnect_callbacks_to_qt",
"numpy.nanmax",
"os.path.dirname",
"qtpy.QtWidgets.QVBoxLayout",
"numpy.nanmin",
"matplotlib.use",
... | [((48, 72), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (62, 72), False, 'import matplotlib\n'), ((4912, 4945), 'glue.config.qt_client.add', 'qt_client.add', (['TutorialDataViewer'], {}), '(TutorialDataViewer)\n', (4925, 4945), False, 'from glue.config import qt_client\n'), ((736, 809), 'glue.external.echo.SelectionCallbackProperty', 'SelectionCallbackProperty', ([], {'docstring': '"""The attribute to use on the x-axis"""'}), "(docstring='The attribute to use on the x-axis')\n", (761, 809), False, 'from glue.external.echo import CallbackProperty, SelectionCallbackProperty\n'), ((822, 895), 'glue.external.echo.SelectionCallbackProperty', 'SelectionCallbackProperty', ([], {'docstring': '"""The attribute to use on the y-axis"""'}), "(docstring='The attribute to use on the y-axis')\n", (847, 895), False, 'from glue.external.echo import CallbackProperty, SelectionCallbackProperty\n'), ((1523, 1609), 'glue.external.echo.CallbackProperty', 'CallbackProperty', (['(False)'], {'docstring': '"""Whether to show the markers as filled or not"""'}), "(False, docstring=\n 'Whether to show the markers as filled or not')\n", (1539, 1609), False, 'from glue.external.echo import CallbackProperty, SelectionCallbackProperty\n'), ((1034, 1071), 'glue.core.data_combo_helper.ComponentIDComboHelper', 'ComponentIDComboHelper', (['self', '"""x_att"""'], {}), "(self, 'x_att')\n", (1056, 1071), False, 'from glue.core.data_combo_helper import ComponentIDComboHelper\n'), ((1101, 1138), 'glue.core.data_combo_helper.ComponentIDComboHelper', 'ComponentIDComboHelper', (['self', '"""y_att"""'], {}), "(self, 'y_att')\n", (1123, 1138), False, 'from glue.core.data_combo_helper import ComponentIDComboHelper\n'), ((3792, 3847), 'glue.external.echo.qt.autoconnect_callbacks_to_qt', 'autoconnect_callbacks_to_qt', (['self.viewer_state', 'self.ui'], {}), '(self.viewer_state, self.ui)\n', (3819, 3847), False, 'from glue.external.echo.qt import connect_checkable_button, autoconnect_callbacks_to_qt\n'), ((4013, 4038), 'qtpy.QtWidgets.QCheckBox', 'QCheckBox', (['"""Fill markers"""'], {}), "('Fill markers')\n", (4022, 4038), False, 'from qtpy.QtWidgets import QWidget, QVBoxLayout, QCheckBox\n'), ((4056, 4069), 'qtpy.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (4067, 4069), False, 'from qtpy.QtWidgets import QWidget, QVBoxLayout, QCheckBox\n'), ((4196, 4261), 'glue.external.echo.qt.connect_checkable_button', 'connect_checkable_button', (['self.layer_state', '"""fill"""', 'self.checkbox'], {}), "(self.layer_state, 'fill', self.checkbox)\n", (4220, 4261), False, 'from glue.external.echo.qt import connect_checkable_button, autoconnect_callbacks_to_qt\n'), ((4686, 4706), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (4697, 4706), True, 'from matplotlib import pyplot as plt\n'), ((3070, 3082), 'numpy.nanmin', 'np.nanmin', (['x'], {}), '(x)\n', (3079, 3082), True, 'import numpy as np\n'), ((3084, 3096), 'numpy.nanmax', 'np.nanmax', (['x'], {}), '(x)\n', (3093, 3096), True, 'import numpy as np\n'), ((3125, 3137), 'numpy.nanmin', 'np.nanmin', (['y'], {}), '(y)\n', (3134, 3137), True, 'import numpy as np\n'), ((3139, 3151), 'numpy.nanmax', 'np.nanmax', (['y'], {}), '(y)\n', (3148, 3151), True, 'import numpy as np\n'), ((3695, 3720), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3710, 3720), False, 'import os\n')] |
################# INSTRUCTIONS ##########################
#########################################################
# it returns output as a tuple containing two elements (integers).
# first element - if 0 then it doesn't contain a pothole.
# if 1 then it contains a pothole.
# second element - if 1 then the severity is low.
# if 2 then the severity is medium.
# if 3 then the severity is high.
#########################################################
#########################################################
import numpy as np
from PIL import Image
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import keras.backend as K
class Pothole:
def __init__(self, model0_path, weights0_path, model1_path, weights1_path, model2_path, weights2_path, model3_path, weights3_path):
self.loaded_models_dict = {}
self.models_and_weights_path_list = [[model0_path, weights0_path, "for_first_filter"], [model1_path, weights1_path, "for_road"], [model2_path, weights2_path, "for_pothole"], [model3_path, weights3_path, "severity"]]
for model_and_weight in self.models_and_weights_path_list:
json_file = open(model_and_weight[0], 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_and_weight[1])
self.loaded_models_dict[model_and_weight[2]] = loaded_model
def img_preprocessing(self, img):
img = img.resize((480, 640))
img = np.asarray(img)
img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))
img = (img * 1.) / 255
return img
def for_first_filter_prediction(self, img):
# print(np.max(self.loaded_models_dict["for_first_filter"].predict(img))* 100)
if np.max(self.loaded_models_dict["for_first_filter"].predict(img)) * 100 > 28:
return 1
return 0
def for_road_prediction(self, img):
if np.max(self.loaded_models_dict["for_road"].predict(img)) * 100 > 75:
return 1
return 0
def for_pothole_prediction(self, img):
# print(self.loaded_models_dict["for_pothole"].predict(img))
if np.max(self.loaded_models_dict["for_pothole"].predict(img)) * 100 > 58:
return 1
return 0
def for_severity_prediction(self, img):
return np.argmax(self.loaded_models_dict["severity"].predict(img)) + 1
def prediction(self, img):
img = self.img_preprocessing(img)
first_filter_result = self.for_first_filter_prediction(img)
is_road = self.for_road_prediction(img)
is_pothole = self.for_pothole_prediction(img)
severity = self.for_severity_prediction(img)
# print(is_road)
# print(is_pothole)
if first_filter_result and is_road and is_pothole:
K.clear_session()
return (1, severity)
else:
K.clear_session()
return (0, 0) | [
"numpy.asarray",
"keras.models.model_from_json",
"keras.backend.clear_session"
] | [((1866, 1881), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1876, 1881), True, 'import numpy as np\n'), ((1608, 1642), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (1623, 1642), False, 'from keras.models import model_from_json\n'), ((3227, 3244), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (3242, 3244), True, 'import keras.backend as K\n'), ((3321, 3338), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (3336, 3338), True, 'import keras.backend as K\n')] |
#
# Guess Manager Management
#
# <NAME>, August 10, 2021
#
# From the 20 runs, extract all of the pickled seeds with
# two, three, or four parts. Try to guess which part is
# the manager by running many one-on-one competitions
# between two parts. The part that wins the most competitions
# is the best guess for the manager. Score is based on green
# versus orange.
#
import golly as g
import model_classes as mclass
import model_functions as mfunc
import model_parameters as mparam
import numpy as np
import scipy.stats as st
import copy
import time
import pickle
import os
import re
import sys
#
# Parameter values for the experiments.
#
num_files = 20 # number of folders of runs of Model-S
possible_num_parts = [2, 3, 4] # possible numbers of parts in the seed
#
# Location of fusion_storage.bin files -- the input pickles.
#
fusion_dir = "C:/Users/peter/Peter's Projects" + \
"/management-theory-revised/Experiments"
# list of pickle files
fusion_files = []
# loop through the fusion files and record the file paths
# -- we assume the folders have the form "run1", "run2", ...
for i in range(num_files):
fusion_files.append(fusion_dir + "/run" + str(i + 1) + \
"/fusion_storage.bin")
#
# Loop through the pickles, loading them into fusion_list.
# Each fusion file will contain several pickles.
#
fusion_list = mfunc.read_fusion_pickles(fusion_files)
#
# Output file path.
#
results_file = fusion_dir + "/guess_manager_management.txt"
results_handle = open(results_file, "w")
#
# Process fusion_list in batches, where each batch has the
# same number of parts (see possible_num_parts above).
#
for current_num_parts in possible_num_parts:
# note the number of parts in the current group of seeds
results_handle.write("\n\n" + str(current_num_parts) + " parts in seed\n\n")
# some variables for calculating conditional probabilities
# - if there is exactly one manager, what is the probability that
# the manager has the most Immigration Game fitness, compared to the
# fitness of the workers? -- p(manager max fitness | exactly one manager)
count_one_manager = 0
count_one_manager_max_fitness = 0
total_sample_size = 0
# iterate through fusion_list skipping over cases that don't have
# exactly part_num parts
for seed in fusion_list:
# get a map of the regions in the seed
seed_map = mfunc.region_map(seed)
num_regions = np.amax(seed_map)
# make sure seed has current_num_parts parts
if (num_regions != current_num_parts):
continue
# update sample size for current_num_parts parts
total_sample_size += 1
# extract the parts from the seed, converting each part
# to a Game of Life pattern, composed entirely of zeros
# and ones; the new part will be reduced in size to match
# the size of the chosen region
part_list = []
for target_region in range(1, current_num_parts + 1):
target_part = mfunc.extract_parts(seed, seed_map, target_region)
target_part.num_living = target_part.count_ones()
part_list.append(target_part)
# measure the fitness of each part by one-on-one competitions
# in the Immigration Game
fitness_list = []
num_trials = 500
for i in range(current_num_parts):
part1 = part_list[i]
scores = []
for j in range(current_num_parts):
part2 = part_list[j]
if (i == j):
continue
else:
[score1, score2] = mfunc.score_management(g, part1, part2, \
mparam.width_factor, mparam.height_factor, \
mparam.time_factor, num_trials)
scores.append(score1)
average_fitness = sum(scores) / len(scores)
fitness_list.append(average_fitness)
# output fitness numbers from the Immigration Game
fitness_list_string = ", ".join(map(str, fitness_list))
results_handle.write("fitness of parts: " + fitness_list_string + "\n")
# calculate the tensor using the Management Game
seed_num = 0 # only one seed
seed_list = [seed] # only one seed
step_size = 1000 # one giant step
max_seeds = 10 # more than we'll need here
num_steps = 1001 # number of time steps, from 0 to 1000
num_colours = 5 # white, red, blue, orange, green
num_parts = current_num_parts # 2, 3, or 4 parts
step_num = 1000 # this is the number of the final step
[tensor, num_seeds] = mfunc.growth_tensor(g, seed_list, step_size, \
max_seeds, num_steps, num_colours, num_parts)
# classify each part as a manager or worker
manager_list = []
for part_num in range(num_parts):
# extract colours
red = tensor[seed_num, step_num, 1, part_num]
blue = tensor[seed_num, step_num, 2, part_num]
orange = tensor[seed_num, step_num, 3, part_num]
green = tensor[seed_num, step_num, 4, part_num]
# we focus on the current part (part_num) only
# -- the current part is always red, by convention
red_manager = int(orange > green) # true or false -> 1 or 0
manager_list.append(red_manager)
#
# output management status from the Management Game
manager_list_string = ", ".join(map(str, manager_list))
results_handle.write("manager status: " + manager_list_string + "\n\n")
# some variables for calculating conditional probabilities
# - if there is exactly one manager, what is the probability that
# the manager has the most Immigration Game fitness, compared to the
# fitness of the workers? -- p(manager max fitness | exactly one manager)
if (sum(manager_list) == 1): # if exactly one manager
count_one_manager += 1
manager_fitness = max(np.multiply(manager_list, fitness_list))
sorted_fitness = sorted(fitness_list, reverse=True)
# if the highest fitness matches the manager's fitness and the second
# highest fitness does not match the manager's fitness ...
if ((manager_fitness == sorted_fitness[0]) and \
(manager_fitness != sorted_fitness[1])):
count_one_manager_max_fitness += 1
#
#
# print out conditional probabilities
# - p(manager max fitness | exactly one manager)
prob_manager_max_fitness_given_one_manager = \
count_one_manager_max_fitness / count_one_manager
#
results_handle.write(
"p(manager max fitness | exactly one manager) = " + \
str(prob_manager_max_fitness_given_one_manager) + "\n" + \
" = " + str(count_one_manager_max_fitness) + " / " + \
str(count_one_manager) + "\n\n" + \
"p(one specific part | " + str(current_num_parts) + \
" parts to choose from) = " + str(1 / current_num_parts) + \
"\n\ntotal sample size = " + str(total_sample_size) + "\n\n")
#
#
results_handle.close()
#
# | [
"model_functions.read_fusion_pickles",
"numpy.multiply",
"model_functions.score_management",
"model_functions.extract_parts",
"model_functions.growth_tensor",
"numpy.amax",
"model_functions.region_map"
] | [((1396, 1435), 'model_functions.read_fusion_pickles', 'mfunc.read_fusion_pickles', (['fusion_files'], {}), '(fusion_files)\n', (1421, 1435), True, 'import model_functions as mfunc\n'), ((2430, 2452), 'model_functions.region_map', 'mfunc.region_map', (['seed'], {}), '(seed)\n', (2446, 2452), True, 'import model_functions as mfunc\n'), ((2472, 2489), 'numpy.amax', 'np.amax', (['seed_map'], {}), '(seed_map)\n', (2479, 2489), True, 'import numpy as np\n'), ((4491, 4585), 'model_functions.growth_tensor', 'mfunc.growth_tensor', (['g', 'seed_list', 'step_size', 'max_seeds', 'num_steps', 'num_colours', 'num_parts'], {}), '(g, seed_list, step_size, max_seeds, num_steps,\n num_colours, num_parts)\n', (4510, 4585), True, 'import model_functions as mfunc\n'), ((3004, 3054), 'model_functions.extract_parts', 'mfunc.extract_parts', (['seed', 'seed_map', 'target_region'], {}), '(seed, seed_map, target_region)\n', (3023, 3054), True, 'import model_functions as mfunc\n'), ((5786, 5825), 'numpy.multiply', 'np.multiply', (['manager_list', 'fitness_list'], {}), '(manager_list, fitness_list)\n', (5797, 5825), True, 'import numpy as np\n'), ((3538, 3657), 'model_functions.score_management', 'mfunc.score_management', (['g', 'part1', 'part2', 'mparam.width_factor', 'mparam.height_factor', 'mparam.time_factor', 'num_trials'], {}), '(g, part1, part2, mparam.width_factor, mparam.\n height_factor, mparam.time_factor, num_trials)\n', (3560, 3657), True, 'import model_functions as mfunc\n')] |
import numpy as np
from ..utils import hist_vec_by_r
from ..utils import hist_vec_by_r_cu
def scatter_xy(x, y=None, x_range=None, r_cut=0.5, q_bin=0.1, q_max=6.3, zero_padding=1, expand=0, use_gpu=False):
r"""Calculate static structure factor.
:param x: np.ndarray, coordinates of component 1
:param y: np.ndarray, coordinates of component 2
:param x_range: np.ndarray, range of positions
:param r_cut: double, bin size of rho
:param q_bin: double, bin size of wave vector q
:param q_max: double, max value of wave vector q
:param zero_padding: int (periods), whether pad density matrix with 0
:param expand: int or np.ndarray (periods), extend density matrix by its period
:param use_gpu: bool or int, use gpu code to summing vector S(Q) to S(q)
:return: np.ndarray, S(q) value
"""
mode = 'ab' if x is not y else 'aa'
# Using `x_range' rather than `box' for the unknown origin of the box
box = np.array(np.array([_[1] - _[0] for _ in x_range]))
bins = np.asarray(box / r_cut, dtype=np.int)
x_range = np.asarray(x_range)
expand = np.asarray(expand)
n_dim = x.shape[1]
if x_range.shape[0] != n_dim:
raise ValueError("Dimension of coordinates is %d and"
"dimension of x_range is %d" % (n_dim, x_range.shape[0]))
if bins.ndim < 1:
bins = np.asarray([bins] * n_dim)
if not (isinstance(use_gpu, bool) or isinstance(use_gpu, int)):
raise ValueError(
"`use_gpu' should be bool: False for not using GPU or an integer of GPU id!"
)
rho_x, _ = np.histogramdd(x, bins=bins, range=x_range)
if expand.ndim < 1:
expand = np.asarray([expand] * rho_x.ndim)
z_bins = (np.asarray(rho_x.shape) * zero_padding).astype(np.int64)
rho_x = np.pad(rho_x, [(0, _ * __) for _, __ in zip(rho_x.shape, expand)], 'wrap')
z_bins = np.where(
z_bins > np.asarray(rho_x.shape[0]), z_bins, np.asarray(rho_x.shape[0])
)
_rft_sq_x = np.fft.rfftn(rho_x, s=z_bins)
# expand density with periodic data, enlarge sample periods.
_rft_sq_y = _rft_sq_x
if mode == 'ab':
rho_y, _ = np.histogramdd(y, bins=bins, range=x_range)
rho_y = np.pad(rho_y, [(0, _ * __) for _, __ in zip(rho_y.shape, expand)], 'wrap')
_rft_sq_y = np.fft.rfftn(rho_y, s=z_bins)
_rft_sq_xy = _rft_sq_x.conj() * _rft_sq_y # circular correlation.
fslice = tuple([slice(0, _) for _ in z_bins])
lslice = np.arange(z_bins[-1] - z_bins[-1] // 2 - 1, 0, -1)
pad_axes = [(0, 1)] * (n_dim - 1) + [(0, 0)]
flip_axes = tuple(range(n_dim - 1))
# fftn(a) = np.concatenate([rfftn(a),
# conj(rfftn(a))[-np.arange(i),-np.arange(j)...,np.arange(k-k//2-1,0,-1)]], axis=-1)
# numpy >= 1.15
# The pad is to ensure arr -> arr[0,-1,-2,...] (arr[0, N-1...1] not flip(arr)->arr[-1,-2,...]
# (arr[N-1,N-2,...0]
_sq_xy = np.concatenate(
[_rft_sq_xy, np.flip(
np.pad(_rft_sq_xy.conj(), pad_axes, 'wrap'), axis=flip_axes
)[fslice][..., lslice]], axis=-1
)
# np.fft.rfftfreq does not work here, it has be the complete fft result.
_d = box / bins
# q = np.vstack([np.fft.fftfreq(_sq_xy.shape[_], _d[_]) for _ in range(_d.shape[0])])
q0 = np.fft.fftfreq(_sq_xy.shape[0], _d[0])
# _d is same in all directions, i.e., r_cut of sampling is same in all directions
# so that dq is same in all directions
dq = q0[1] - q0[0]
dq = dq * 2 * np.pi
middle = np.asarray(_sq_xy.shape, dtype=np.float64) // 2
_sq_xy = np.fft.fftshift(_sq_xy) # shift 0-freq to middle
# _sq_xy[0, 0, ..., 0] = np.fft.fftshift(_sq_xy)[middle]
if use_gpu is False:
return hist_vec_by_r(_sq_xy, dq, q_bin, q_max, middle=middle)
return hist_vec_by_r_cu(_sq_xy, dq, q_bin, q_max, gpu=use_gpu, middle=middle)
| [
"numpy.asarray",
"numpy.histogramdd",
"numpy.fft.rfftn",
"numpy.fft.fftfreq",
"numpy.fft.fftshift",
"numpy.arange",
"numpy.array"
] | [((1020, 1057), 'numpy.asarray', 'np.asarray', (['(box / r_cut)'], {'dtype': 'np.int'}), '(box / r_cut, dtype=np.int)\n', (1030, 1057), True, 'import numpy as np\n'), ((1072, 1091), 'numpy.asarray', 'np.asarray', (['x_range'], {}), '(x_range)\n', (1082, 1091), True, 'import numpy as np\n'), ((1105, 1123), 'numpy.asarray', 'np.asarray', (['expand'], {}), '(expand)\n', (1115, 1123), True, 'import numpy as np\n'), ((1598, 1641), 'numpy.histogramdd', 'np.histogramdd', (['x'], {'bins': 'bins', 'range': 'x_range'}), '(x, bins=bins, range=x_range)\n', (1612, 1641), True, 'import numpy as np\n'), ((2000, 2029), 'numpy.fft.rfftn', 'np.fft.rfftn', (['rho_x'], {'s': 'z_bins'}), '(rho_x, s=z_bins)\n', (2012, 2029), True, 'import numpy as np\n'), ((2480, 2530), 'numpy.arange', 'np.arange', (['(z_bins[-1] - z_bins[-1] // 2 - 1)', '(0)', '(-1)'], {}), '(z_bins[-1] - z_bins[-1] // 2 - 1, 0, -1)\n', (2489, 2530), True, 'import numpy as np\n'), ((3268, 3306), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['_sq_xy.shape[0]', '_d[0]'], {}), '(_sq_xy.shape[0], _d[0])\n', (3282, 3306), True, 'import numpy as np\n'), ((3557, 3580), 'numpy.fft.fftshift', 'np.fft.fftshift', (['_sq_xy'], {}), '(_sq_xy)\n', (3572, 3580), True, 'import numpy as np\n'), ((967, 1009), 'numpy.array', 'np.array', (['[(_[1] - _[0]) for _ in x_range]'], {}), '([(_[1] - _[0]) for _ in x_range])\n', (975, 1009), True, 'import numpy as np\n'), ((1363, 1389), 'numpy.asarray', 'np.asarray', (['([bins] * n_dim)'], {}), '([bins] * n_dim)\n', (1373, 1389), True, 'import numpy as np\n'), ((1683, 1716), 'numpy.asarray', 'np.asarray', (['([expand] * rho_x.ndim)'], {}), '([expand] * rho_x.ndim)\n', (1693, 1716), True, 'import numpy as np\n'), ((1951, 1977), 'numpy.asarray', 'np.asarray', (['rho_x.shape[0]'], {}), '(rho_x.shape[0])\n', (1961, 1977), True, 'import numpy as np\n'), ((2161, 2204), 'numpy.histogramdd', 'np.histogramdd', (['y'], {'bins': 'bins', 'range': 'x_range'}), '(y, bins=bins, range=x_range)\n', (2175, 2204), True, 'import numpy as np\n'), ((2316, 2345), 'numpy.fft.rfftn', 'np.fft.rfftn', (['rho_y'], {'s': 'z_bins'}), '(rho_y, s=z_bins)\n', (2328, 2345), True, 'import numpy as np\n'), ((3496, 3538), 'numpy.asarray', 'np.asarray', (['_sq_xy.shape'], {'dtype': 'np.float64'}), '(_sq_xy.shape, dtype=np.float64)\n', (3506, 3538), True, 'import numpy as np\n'), ((1915, 1941), 'numpy.asarray', 'np.asarray', (['rho_x.shape[0]'], {}), '(rho_x.shape[0])\n', (1925, 1941), True, 'import numpy as np\n'), ((1731, 1754), 'numpy.asarray', 'np.asarray', (['rho_x.shape'], {}), '(rho_x.shape)\n', (1741, 1754), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import time
N_STATES = 6 # number of states
ACTIONS = ['left','right']
MAX_EPISODES = 13
REFRESH_TIME = 0.3
LR = 0.1 # learning rate
EPSILON = 0.9 # to select either using exploration or using exploitation
GAMMA = 0.9
def build_q_table(states, actions):
'''
@states : int
@actions : list of strings, (basically list of names of all the actions)
'''
# q table is of the form states_actions
q_table = pd.DataFrame(
np.zeros((states, len(actions))),
columns=actions,
)
return q_table
def choose_action(state , q_table):
if (np.random.uniform() > EPSILON ) or ((q_table.iloc[state,:] == 0).all()):
action_name = np.random.choice(ACTIONS)
else :
action_name = q_table.iloc[state,:].idxmax()
return action_name
def get_feedback_from_env(current_S, action):
reward = 0
next_S = 0
if action == 'left':
if current_S == 0 :
next_S = 0
else:
next_S = current_S-1
else:
if current_S == N_STATES-2:
next_S = 'terminated'
reward=1
else:
next_S = current_S+1
return next_S,reward
def update_the_env(S, episode,step_counter):
disp_str = ['-']*(N_STATES-1)+['G']
if S=='terminated':
print('\rEpisode = {} and Steps = {}'.format(episode,step_counter))
else:
disp_str[S] = 'o'
disp_str = ''.join(disp_str)
print('\r{}'.format(disp_str),end='')
time.sleep(REFRESH_TIME)
def rl():
# main part of the code
q_table = build_q_table(N_STATES, ACTIONS)
for episode in range(MAX_EPISODES):
S = 0
is_terminated = False
step_counter = 0
update_the_env(S, episode,step_counter);
while not is_terminated:
A = choose_action(S,q_table)
next_S,R = get_feedback_from_env(S, A)
q_predict = q_table.loc[S, A]
if next_S == 'terminated':
q_target = R
is_terminated = True
else:
# NOTE : updating the table using both the action is not required.
q_target = R + GAMMA * q_table.iloc[next_S, :].max() # next state is not terminal
q_table.loc[S, A] += LR * (q_target - q_predict) # update
S = next_S
step_counter = step_counter+1
update_the_env(S, episode,step_counter)
return q_table
rl()
| [
"numpy.random.uniform",
"numpy.random.choice",
"time.sleep"
] | [((1336, 1360), 'time.sleep', 'time.sleep', (['REFRESH_TIME'], {}), '(REFRESH_TIME)\n', (1346, 1360), False, 'import time\n'), ((673, 698), 'numpy.random.choice', 'np.random.choice', (['ACTIONS'], {}), '(ACTIONS)\n', (689, 698), True, 'import numpy as np\n'), ((584, 603), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (601, 603), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import pandas as pd
from os.path import join
import os
import matplotlib as mpl
from scseirx import analysis_functions as af
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1 import make_axes_locatable
school_types = ['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary']
def q25(x):
return x.quantile(0.25)
def q75(x):
return x.quantile(0.75)
def hex_to_rgb(value):
'''
Converts hex to rgb colours
value: string of 6 characters representing a hex colour.
Returns: list length 3 of RGB values'''
value = value.strip("#") # removes hash symbol if present
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_dec(value):
'''
Converts rgb to decimal colours (i.e. divides each value by 256)
value: list (length 3) of RGB values
Returns: list (length 3) of decimal values'''
return [v/256 for v in value]
def get_continuous_cmap(hex_list, float_list=None):
'''
Creates and returns a color map that can be used in heat map figures. If
float_list is not provided, colour map graduates linearly between each color
in hex_list. If float_list is provided, each color in hex_list is mapped to
the respective location in float_list.
Parameters
----------
hex_list: list
List of hex code strings
float_list: list
List of floats between 0 and 1, same length as hex_list. Must start with
0 and end with 1.
Returns
----------
colour map
'''
rgb_list = [rgb_to_dec(hex_to_rgb(i)) for i in hex_list]
if float_list:
pass
else:
float_list = list(np.linspace(0,1,len(rgb_list)))
cdict = dict()
for num, col in enumerate(['red', 'green', 'blue']):
col_list = [[float_list[i], rgb_list[i][num], rgb_list[i][num]] for i in range(len(float_list))]
cdict[col] = col_list
cmp = mpl.colors.LinearSegmentedColormap('my_cmp', segmentdata=cdict, N=256)
return cmp
# set the colormap and centre the colorbar
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side
from a prescribed midpoint value) e.g. im=ax1.imshow(array,
norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100)).
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
def get_data(stype, src_path, vaccinations=False):
'''
Convenience function to read all ensembles from different measures
of a given school type and return one single data frame
'''
data = pd.DataFrame()
stype_path = join(src_path, stype)
files = os.listdir(stype_path)
for f in files:
params, agents, half = get_measures(f.strip('.csv'),
vaccinations=vaccinations)
if vaccinations:
params['student_test_rate'] = 1
params['teacher_test_rate'] = 1
params['mask_efficiency_exhale'] = 0.7
params['mask_efficiency_inhale'] = 0.5
params['base_transmission_risk_multiplier'] = 1.0
ensmbl = pd.read_csv(join(stype_path, f))
try:
ensmbl = ensmbl.drop(columns=['Unnamed: 0'])
except KeyError:
pass
ensmbl['preventive_test_type'] = params['preventive_test_type']
ensmbl['index_case'] = params['index_case']
ensmbl['transmission_risk_ventilation_modifier'] = \
params['transmission_risk_ventilation_modifier']
if ('class_size_reduction' in params.keys()) and not\
('half_classes' in params.keys()):
if params['class_size_reduction'] > 0:
params['half_classes'] = True
ensmbl['half_classes'] = True
else:
params['half_classes'] = False
ensmbl['half_classes'] = False
if ('half_classes' in params.keys()) and not\
('class_size_reduction' in params.keys()):
if params['half_classes']:
params['class_size_reduction'] = 0.5
ensmbl['class_size_reduction'] = 0.5
else:
params['class_size_reduction'] = 0.0
ensmbl['class_size_reduction'] = 0.0
ensmbl['half_classes'] = params['half_classes']
ensmbl['class_size_reduction'] = params['class_size_reduction']
ensmbl['student_testing_rate'] = params['student_test_rate']
ensmbl['teacher_testing_rate'] = params['teacher_test_rate']
ensmbl['mask_efficiency_inhale'] = params['mask_efficiency_inhale']
ensmbl['mask_efficiency_exhale'] = params['mask_efficiency_exhale']
ensmbl['base_transmission_risk_multiplier'] = \
params['base_transmission_risk_multiplier']
ensmbl['student_mask'] = agents['student']['mask']
ensmbl['teacher_mask'] = agents['teacher']['mask']
ensmbl['student_screening_interval'] = agents['student']\
['screening_interval']
ensmbl['teacher_screening_interval'] = agents['teacher']\
['screening_interval']
ensmbl['teacher_vaccination_ratio'] = agents['teacher']\
['vaccination_ratio']
ensmbl['student_vaccination_ratio'] = agents['student']\
['vaccination_ratio']
ensmbl['family_member_vaccination_ratio'] = agents['family_member']\
['vaccination_ratio']
data = pd.concat([data, ensmbl])
data = data.reset_index(drop=True)
data['teacher_screening_interval'] = data['teacher_screening_interval']\
.replace({None:'never'})
data['student_screening_interval'] = data['student_screening_interval']\
.replace({None:'never'})
return data
def get_measures(measure_string, vaccinations=False):
'''
Convenience function to get the individual measures given a string
(filename) of measures.
'''
agents = {
'student':{
'screening_interval': None,
'index_probability': 0,
'mask':False},
'teacher':{
'screening_interval': None,
'index_probability': 0,
'mask':False},
'family_member':{
'screening_interval': None,
'index_probability': 0,
'mask':False}
}
turnovers = {0:'same', 1:'one', 2:'two', 3:'three'}
bmap = {'T':True, 'F':False}
interval_map = {'0':0, '3':3, '7':7, '14':14, 'None':None}
index_map = {'s':'student', 't':'teacher'}
stype, _ = measure_string.split('_test')
rest = measure_string.split(stype + '_')[1]
if vaccinations:
ttpype, turnover, index, tf, sf, tmask, smask, half, vent, tvacc,\
svacc = rest.split('_')
fvacc = 'fvacc-0.6'
tmp = [ttpype, turnover, index, tf, sf, tmask, smask, half, vent,\
tvacc, svacc, fvacc]
else:
ttpype, turnover, index, tf, sf, tmask, smask, vent, stestrate, \
ttestrate, trisk, meffexh, meffinh, csizered, fratio, svacc, tvacc, \
fvacc = rest.split('_')
tmp = [ttpype, turnover, index, tf, sf, tmask, smask, vent, stestrate,\
ttestrate, trisk, meffexh, meffinh, csizered, fratio, svacc, tvacc,\
fvacc]
tmp = [m.split('-') for m in tmp]
params = {}
half = False
for m in tmp:
if len(m) == 1:
pass
elif m[0] == 'test':
params['preventive_test_type'] = m[1]
elif m[0] == 'turnover':
params['turnover'] = int(m[1])
elif m[0] == 'index':
params['index_case'] = index_map[m[1]]
elif m[0] == 'tf':
agents['teacher']['screening_interval'] = interval_map[m[1]]
elif m[0] == 'sf':
agents['student']['screening_interval'] = interval_map[m[1]]
elif m[0] == 'tmask':
agents['teacher']['mask'] = bmap[m[1]]
elif m[0] == 'smask':
agents['student']['mask'] = bmap[m[1]]
elif m[0] == 'half':
params['half_classes'] = bmap[m[1]]
elif m[0] == 'vent':
params['transmission_risk_ventilation_modifier'] = float(m[1])
elif m[0] == 'csizered':
params['class_size_reduction'] = float(m[1])
elif m[0] == 'stestrate':
params['student_test_rate'] = float(m[1])
elif m[0] == 'ttestrate':
params['teacher_test_rate'] = float(m[1])
elif m[0] == 'fratio':
params['added_friendship_contacts'] = float(m[1])
elif m[0] == 'meffexh':
params['mask_efficiency_exhale'] = float(m[1])
elif m[0] == 'meffinh':
params['mask_efficiency_inhale'] = float(m[1])
elif m[0] == 'trisk':
params['base_transmission_risk_multiplier'] = float(m[1])
elif m[0] == 'tvacc':
agents['teacher']['vaccination_ratio'] = float(m[1])
elif m[0] == 'svacc':
agents['student']['vaccination_ratio'] = float(m[1])
elif m[0] == 'fvacc':
agents['family_member']['vaccination_ratio'] = float(m[1])
elif m[0] == 'atd':
params['age_transmission_discount'] = float(m[1])
elif m[0] == 'cw':
params['contact_weight'] = float(m[1])
else:
print('unknown measure type ', m[0])
params['preventive_test_type'] = '{}_day_{}'\
.format(turnovers[params['turnover']], params['preventive_test_type'])
return params, agents, half
def get_baseline_data(src_path,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary']):
baseline_data = pd.DataFrame()
for stype in school_types:
tmp = pd.read_csv(join(src_path, '{}_observables.csv'.format(stype)))
tmp['school_type'] = stype
baseline_data = pd.concat([baseline_data, tmp])
baseline_data['test_sensitivity'] = 1.0
baseline_data['student_testing_rate'] = 1.0
baseline_data['teacher_testing_rate'] = 1.0
baseline_data['mask_efficiency_inhale'] = 0.7
baseline_data['mask_efficiency_exhale'] = 0.5
baseline_data['base_transmission_risk_multiplier'] = 1.0
baseline_data['friendship_ratio'] = 0.0
baseline_data['student_vaccination_ratio'] = 0.0
baseline_data['teacher_vaccination_ratio'] = 0.0
baseline_data['family_member_vaccination_ratio'] = 0.0
baseline_data['class_size_reduction'] = 0
baseline_data.loc[baseline_data[baseline_data['half_classes'] == True].index,
'class_size_reduction'] = 0.5
baseline_data = baseline_data.drop(columns=['Unnamed: 0'])
baseline_data = baseline_data.reset_index(drop=True)
baseline_data['student_screen_interval'] = \
baseline_data['student_screen_interval'].replace({np.nan:'never'})
baseline_data['teacher_screen_interval'] = \
baseline_data['teacher_screen_interval'].replace({np.nan:'never'})
return baseline_data
def get_test_sensitivity_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
test_sensitivity_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, ttype = \
screening_params
turnover, _, test = ttype.split('_')
sensitivity = float(test.split('antigen')[1])
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == ttype) &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'\
.format(screening_params))
row = {'school_type':stype,
'test_type':test,
'turnover':turnover,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':sensitivity,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
test_sensitivity_data = test_sensitivity_data.append(row,
ignore_index=True)
test_sensitivity_data.to_csv(join(src_path, 'test_sensitivity_observables.csv'),
index=False)
# combine the sensitivity analysis data with the baseline data
# (only relevant columns)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == vent_mod)]
test_sensitivity_data = pd.concat([test_sensitivity_data, \
baseline_chunk[test_sensitivity_data.columns].copy()])
return test_sensitivity_data
def get_testing_rate_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
testing_rate_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, s_testing_rate, \
t_testing_rate = screening_params
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod) &\
(stype_data['student_testing_rate'] == s_testing_rate) &\
(stype_data['teacher_testing_rate'] == t_testing_rate)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':s_testing_rate,
'teacher_testing_rate':t_testing_rate,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
testing_rate_data = testing_rate_data.append(row, ignore_index=True)
testing_rate_data.to_csv(join(src_path, 'testing_rate_data_observables.csv'),
index=False)
# combine the sensitivity analysis data with the baseline data
# (only relevant columns)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == vent_mod)]
testing_rate_data = pd.concat([testing_rate_data, \
baseline_chunk[testing_rate_data.columns].copy()])
return testing_rate_data
def get_class_size_reduction_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
class_size_reduction_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, vent_mod, class_size_reduction = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
class_size_reduction_data = \
class_size_reduction_data.append(row, ignore_index=True)
class_size_reduction_data.to_csv(join(src_path.split('/ensembles')[0],
'class_size_reduction_observables.csv'), index=False)
return class_size_reduction_data
def get_ventilation_efficiency_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
ventilation_efficiency_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
ventilation_efficiency_data = \
ventilation_efficiency_data.append(row, ignore_index=True)
ventilation_efficiency_data.to_csv(join(src_path.split('/ensembles')[0],
'ventilation_efficiency_observables.csv'), index=False)
# combine the sensitivity analysis data with the baseline data
# (only relevant columns)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == 0.36)]
ventilation_efficiency_data = pd.concat([ventilation_efficiency_data, \
baseline_chunk[ventilation_efficiency_data.columns].copy()])
return ventilation_efficiency_data
def get_mask_efficiency_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
mask_efficiency_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, m_efficiency_exhale, \
m_efficiency_inhale = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod) &\
(stype_data['mask_efficiency_inhale'] == m_efficiency_inhale) &\
(stype_data['mask_efficiency_exhale'] == m_efficiency_exhale)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':0.0,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':m_efficiency_exhale,
'mask_efficiency_exhale':m_efficiency_inhale,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
mask_efficiency_data = \
mask_efficiency_data.append(row, ignore_index=True)
mask_efficiency_data.to_csv(join(src_path.split('/ensembles')[0],
'mask_efficiency_observables.csv'), index=False)
return mask_efficiency_data
def get_added_friendship_contacts_data(src_path, params, baseline_data,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
added_friendship_contacts_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, friendship_ratio = screening_params
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod) &\
(stype_data['friendship_ratio'] == friendship_ratio)
]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
row = {'school_type':stype,
'test_type':'antigen',
'turnover':0,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':1.0,
'class_size_reduction':class_size_reduction,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.7,
'mask_efficiency_exhale':0.5,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':friendship_ratio,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
added_friendship_contacts_data = \
added_friendship_contacts_data.append(row, ignore_index=True)
added_friendship_contacts_data.to_csv(join(src_path.split('/ensembles')[0],
'added_friendship_contacts_observables.csv'), index=False)
baseline_chunk = baseline_data[\
(baseline_data['test_type'] == 'antigen') &\
(baseline_data['turnover'] == 0) &\
(baseline_data['student_screen_interval'] == s_screen_interval) &\
(baseline_data['teacher_screen_interval'] == t_screen_interval) &\
(baseline_data['student_mask'] == student_mask) &\
(baseline_data['teacher_mask'] == teacher_mask) &\
(baseline_data['class_size_reduction'] == class_size_reduction) &\
(baseline_data['ventilation_modification'] == 1.0) &\
(baseline_data['friendship_ratio'] == 0.0)]
added_friendship_contacts_data = pd.concat([added_friendship_contacts_data, \
baseline_chunk[added_friendship_contacts_data.columns].copy()])
return added_friendship_contacts_data
def get_worst_case_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
worst_case_data = pd.DataFrame()
for stype in school_types:
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, m_efficiency_exhale, \
m_efficiency_inhale, s_test_rate, t_test_rate, ttype, friendship_ratio \
= screening_params
turnover, _, test = ttype.split('_')
sensitivity = float(test.split('antigen')[1])
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == ttype) &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] == s_screen_interval) &\
(stype_data['teacher_screening_interval'] == t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] == vent_mod)]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
half = False
if class_size_reduction > 0:
half = True
row = {'school_type':stype,
'test_type':test,
'turnover':turnover,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':sensitivity,
'class_size_reduction':class_size_reduction,
'half_classes':half,
'student_testing_rate':s_test_rate,
'teacher_testing_rate':t_test_rate,
'mask_efficiency_inhale':m_efficiency_inhale,
'mask_efficiency_exhale':m_efficiency_exhale,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':friendship_ratio,
'student_vaccination_ratio':0,
'teacher_vaccination_ratio':0,
'family_member_vaccination_ratio':0}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
worst_case_data = \
worst_case_data.append(row, ignore_index=True)
worst_case_data['scenario'] = 'conservative'
return worst_case_data
def get_worst_case_and_vaccinations_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
worst_case_and_vaccinations_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, class_size_reduction, vent_mod, m_efficiency_exhale, \
m_efficiency_inhale, s_test_rate, t_test_rate, ttype, \
friendship_ratio, student_vaccination_ratio, \
teacher_vaccination_ratio, family_member_vaccination_ratio \
= screening_params
turnover, _, test = ttype.split('_')
sensitivity = float(test.split('antigen')[1])
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == ttype) &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] \
== s_screen_interval) &\
(stype_data['teacher_screening_interval'] \
== t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['class_size_reduction'] == class_size_reduction) &\
(stype_data['transmission_risk_ventilation_modifier'] \
== vent_mod) &\
(stype_data['student_vaccination_ratio']\
== student_vaccination_ratio) &\
(stype_data['teacher_vaccination_ratio']\
== teacher_vaccination_ratio) &\
(stype_data['family_member_vaccination_ratio']\
== family_member_vaccination_ratio)]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
half = False
if class_size_reduction > 0:
half = True
row = {'school_type':stype,
'test_type':test,
'turnover':turnover,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':sensitivity,
'class_size_reduction':class_size_reduction,
'half_classes':half,
'student_testing_rate':s_test_rate,
'teacher_testing_rate':t_test_rate,
'mask_efficiency_inhale':m_efficiency_inhale,
'mask_efficiency_exhale':m_efficiency_exhale,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':friendship_ratio,
'student_vaccination_ratio':student_vaccination_ratio,
'teacher_vaccination_ratio':teacher_vaccination_ratio,
'family_member_vaccination_ratio':\
family_member_vaccination_ratio}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
worst_case_and_vaccinations_data = \
worst_case_and_vaccinations_data.append(row, ignore_index=True)
return worst_case_and_vaccinations_data
def get_vaccination_data(src_path, params,
school_types=['primary', 'primary_dc', 'lower_secondary',
'lower_secondary_dc', 'upper_secondary', 'secondary'],
observables_of_interest=['infected_agents', 'R0']):
vaccination_data = pd.DataFrame()
for stype in school_types:
print('\t{}'.format(stype))
stype_data = get_data(stype, src_path, vaccinations=True)
for i, screening_params in params.iterrows():
index_case, s_screen_interval, t_screen_interval, student_mask, \
teacher_mask, half, vent_mod, student_vaccination_ratio, \
teacher_vaccination_ratio, family_member_vaccination_ratio \
= screening_params
test = 'antigen'
turnover = 0
sensitivity = 1.0
# calculate the ensemble statistics for each measure combination
measure_data = stype_data[\
(stype_data['preventive_test_type'] == 'same_day_antigen') &\
(stype_data['index_case'] == index_case) &\
(stype_data['student_screening_interval'] \
== s_screen_interval) &\
(stype_data['teacher_screening_interval'] \
== t_screen_interval) &\
(stype_data['student_mask'] == student_mask) &\
(stype_data['teacher_mask'] == teacher_mask) &\
(stype_data['half_classes'] == half) &\
(stype_data['transmission_risk_ventilation_modifier'] == \
vent_mod) &\
(stype_data['student_vaccination_ratio'] == \
student_vaccination_ratio) &\
(stype_data['teacher_vaccination_ratio'] == \
teacher_vaccination_ratio) &\
(stype_data['family_member_vaccination_ratio'] == \
family_member_vaccination_ratio)]
if len(measure_data) == 0:
print('WARNING: empty measure data for {}'.format(screening_params))
class_size_reduction = 0.0
if half:
class_size_reduction = 0.5
row = {'school_type':stype,
'test_type':test,
'turnover':turnover,
'index_case':index_case,
'student_screen_interval':s_screen_interval,
'teacher_screen_interval':t_screen_interval,
'student_mask':student_mask,
'teacher_mask':teacher_mask,
'ventilation_modification':vent_mod,
'test_sensitivity':sensitivity,
'class_size_reduction':class_size_reduction,
'half_classes':half,
'student_testing_rate':1.0,
'teacher_testing_rate':1.0,
'mask_efficiency_inhale':0.5,
'mask_efficiency_exhale':0.7,
'base_transmission_risk_multiplier':1.0,
'friendship_ratio':0.0,
'student_vaccination_ratio':student_vaccination_ratio,
'teacher_vaccination_ratio':teacher_vaccination_ratio,
'family_member_vaccination_ratio':\
family_member_vaccination_ratio}
for col in observables_of_interest:
row.update(af.get_statistics(measure_data, col))
vaccination_data = \
vaccination_data.append(row, ignore_index=True)
return vaccination_data
def build_test_sensitivity_heatmap(test_sensitivity_data, sensitivities,
school_types=school_types):
data = test_sensitivity_data[\
(test_sensitivity_data['student_screen_interval'] == 7) &\
(test_sensitivity_data['teacher_screen_interval'] == 7) &\
(test_sensitivity_data['student_mask'] == False) &\
(test_sensitivity_data['teacher_mask'] == False) &\
(test_sensitivity_data['class_size_reduction'] == 0.0) &\
(test_sensitivity_data['ventilation_modification'] == 1.0)]
data = data.set_index(\
['school_type', 'test_sensitivity', 'index_case'])
hmaps_test_sensitivity = {'N_infected':{'student':np.nan, 'teacher':np.nan},
'R0':{'student':np.nan, 'teacher':np.nan}}
for index_case in ['student', 'teacher']:
cmap = np.zeros((len(sensitivities), len(school_types)))
cmap_R0 = np.zeros((len(sensitivities), len(school_types)))
for i, s in enumerate(sensitivities):
for j, st in enumerate(school_types):
bl_infected_agents = data.loc[st, 1.0, index_case]\
['infected_agents_mean']
bl_R0 = data.loc[st, 1.0, index_case]['R0_mean']
cmap[i, j] = data.loc[st, s, index_case]['infected_agents_mean'] / \
bl_infected_agents
cmap_R0[i, j] = data .loc[st, s, index_case]['R0_mean']
hmaps_test_sensitivity['N_infected'][index_case] = cmap
hmaps_test_sensitivity['R0'][index_case] = cmap_R0
return hmaps_test_sensitivity
def build_testing_rate_heatmaps(testing_rate_data, testing_rates,
school_types=school_types):
data = testing_rate_data[\
(testing_rate_data['student_screen_interval'] == 7) &\
(testing_rate_data['teacher_screen_interval'] == 7) &\
(testing_rate_data['student_mask'] == False) &\
(testing_rate_data['teacher_mask'] == False) &\
(testing_rate_data['class_size_reduction'] == 0.0) &\
(testing_rate_data['ventilation_modification'] == 1.0)]
data = data.set_index(\
['school_type', 'student_testing_rate', 'teacher_testing_rate',
'index_case'])
hmaps_testing_rate = {'N_infected':{'student':np.nan, 'teacher':np.nan},
'R0':{'student':np.nan, 'teacher':np.nan}}
for index_case in ['student', 'teacher']:
cmap = np.zeros((len(testing_rates), len(school_types)))
cmap_R0 = np.zeros((len(testing_rates), len(school_types)))
for i, tpr in enumerate(testing_rates):
for j, st in enumerate(school_types):
bl_infected_agents = data.loc[st, 1.0, 1.0, index_case]\
['infected_agents_mean']
bl_R0 = data.loc[st, 1.0, 1.0, index_case]['R0_mean']
cmap[i, j] = data.loc[st, tpr, tpr, index_case]\
['infected_agents_mean'] / bl_infected_agents
cmap_R0[i, j] = data .loc[st, tpr, tpr, index_case]['R0_mean']
hmaps_testing_rate['N_infected'][index_case] = cmap
hmaps_testing_rate['R0'][index_case] = cmap_R0
return hmaps_testing_rate
def build_class_size_reduction_heatmaps(class_size_reduction_data,
class_size_reductions, school_types=school_types):
data = class_size_reduction_data[\
(class_size_reduction_data['student_screen_interval'] == 'never') &\
(class_size_reduction_data['teacher_screen_interval'] == 'never') &\
(class_size_reduction_data['student_mask'] == False) &\
(class_size_reduction_data['teacher_mask'] == False) &\
(class_size_reduction_data['ventilation_modification'] == 1.0)]
data = data.set_index(\
['school_type', 'class_size_reduction', 'index_case'])
hmap_class_size_reduction = {'N_infected':{'student':np.nan, 'teacher':np.nan},
'R0':{'student':np.nan, 'teacher':np.nan}}
for index_case in ['student', 'teacher']:
cmap = np.zeros((len(class_size_reductions), len(school_types)))
cmap_R0 = np.zeros((len(class_size_reductions), len(school_types)))
for i, csr in enumerate(class_size_reductions):
for j, st in enumerate(school_types):
bl_infected_agents = data.loc[st, 0.5, index_case]\
['infected_agents_mean']
bl_R0 = data.loc[st, 0.5, index_case]['R0_mean']
cmap[i, j] = data.loc[st, csr, index_case]['infected_agents_mean'] /\
bl_infected_agents
cmap_R0[i, j] = data.loc[st, csr, index_case]['R0_mean']
hmap_class_size_reduction['N_infected'][index_case] = cmap
hmap_class_size_reduction['R0'][index_case] = cmap_R0
return hmap_class_size_reduction
def build_ventilation_efficiency_heatmaps(ventilation_efficiency_data,
ventilation_efficiencies, school_types=school_types):
data = ventilation_efficiency_data[\
(ventilation_efficiency_data['student_screen_interval'] == 'never') &\
(ventilation_efficiency_data['teacher_screen_interval'] == 'never') &\
(ventilation_efficiency_data['student_mask'] == False) &\
(ventilation_efficiency_data['teacher_mask'] == False) &\
(ventilation_efficiency_data['class_size_reduction'] == 0.0)]
data = data.set_index(\
['school_type', 'ventilation_modification', 'index_case'])
hmaps_ventilation_efficiency = {
'N_infected':{'student':np.nan, 'teacher':np.nan},
'R0':{'student':np.nan, 'teacher':np.nan}
}
for index_case in ['student', 'teacher']:
cmap = np.zeros((len(ventilation_efficiencies), len(school_types)))
cmap_R0 = np.zeros((len(ventilation_efficiencies), len(school_types)))
for i, ve in enumerate(ventilation_efficiencies):
for j, st in enumerate(school_types):
bl_infected_agents = data.loc[st, 0.36, index_case]\
['infected_agents_mean']
bl_R0 = data.loc[st, 0.36, index_case]['R0_mean']
cmap[i, j] = data.loc[st, ve, index_case]['infected_agents_mean'] / \
bl_infected_agents
cmap_R0[i, j] = data .loc[st, ve, index_case]['R0_mean']
hmaps_ventilation_efficiency['N_infected'][index_case] = cmap
hmaps_ventilation_efficiency['R0'][index_case] = cmap_R0
return hmaps_ventilation_efficiency
def build_mask_efficiency_heatmaps(mask_efficiency_data,
mask_efficiencies_exhale, mask_efficiencies_inhale,
school_types=school_types):
data = mask_efficiency_data[\
(mask_efficiency_data['student_screen_interval'] == 'never') &\
(mask_efficiency_data['teacher_screen_interval'] == 'never') &\
(mask_efficiency_data['student_mask'] == True) &\
(mask_efficiency_data['teacher_mask'] == True) &\
(mask_efficiency_data['class_size_reduction'] == 0.0) &\
(mask_efficiency_data['ventilation_modification'] == 1.0)]
data = data.set_index(\
['school_type', 'mask_efficiency_exhale', 'mask_efficiency_inhale',
'index_case'])
hmaps_mask_efficiency = {'N_infected':{'student':np.nan, 'teacher':np.nan},
'R0':{'student':np.nan, 'teacher':np.nan}}
for index_case in ['student', 'teacher']:
cmap = np.zeros((len(mask_efficiencies_exhale), len(school_types)))
cmap_R0 = np.zeros((len(mask_efficiencies_exhale), len(school_types)))
for i, mee, mei in zip(range(len(mask_efficiencies_exhale)),
mask_efficiencies_exhale, mask_efficiencies_inhale):
for j, st in enumerate(school_types):
bl_infected_agents = data.loc[st, 0.7, 0.5, index_case]\
['infected_agents_mean']
bl_R0 = data.loc[st, 0.7, 0.5, index_case]['R0_mean']
cmap[i, j] = data.loc[st, mee, mei, index_case]['infected_agents_mean'] / \
bl_infected_agents
cmap_R0[i, j] = data .loc[st, mee, mei, index_case]['R0_mean']
hmaps_mask_efficiency['N_infected'][index_case] = cmap
hmaps_mask_efficiency['R0'][index_case] = cmap_R0
return hmaps_mask_efficiency
def build_added_friendship_contacts_heatmaps(added_friendship_contacts_data,
friendship_ratios, school_types=school_types):
data = added_friendship_contacts_data[\
(added_friendship_contacts_data['student_screen_interval'] == 'never') &\
(added_friendship_contacts_data['teacher_screen_interval'] == 'never') &\
(added_friendship_contacts_data['student_mask'] == False) &\
(added_friendship_contacts_data['teacher_mask'] == False) &\
(added_friendship_contacts_data['class_size_reduction'] == 0.0) &\
(added_friendship_contacts_data['ventilation_modification'] == 1.0)]
data = data.set_index(\
['school_type', 'friendship_ratio', 'index_case'])
hmaps_added_friendship_contacts = {
'N_infected':{'student':np.nan, 'teacher':np.nan},
'R0':{'student':np.nan, 'teacher':np.nan}
}
for index_case in ['student', 'teacher']:
cmap = np.zeros((len(friendship_ratios), len(school_types)))
cmap_R0 = np.zeros((len(friendship_ratios), len(school_types)))
for i, fr in zip(range(len(friendship_ratios)), friendship_ratios):
for j, st in enumerate(school_types):
bl_infected_agents = data.loc[st, 0.0, index_case]\
['infected_agents_mean']
bl_R0 = data.loc[st, 0.0, index_case]['R0_mean']
cmap[i, j] = data.loc[st, fr, index_case]['infected_agents_mean'] / \
bl_infected_agents
cmap_R0[i, j] = data .loc[st, fr, index_case]['R0_mean']
hmaps_added_friendship_contacts['N_infected'][index_case] = cmap
hmaps_added_friendship_contacts['R0'][index_case] = cmap_R0
return hmaps_added_friendship_contacts
def plot_heatmaps(axes, heatmaps, ylabel, yticklabels, indicator_ypos, colors,
X_min=0, X_max=10, R_min=0, R_max=9, title=False, xticks=False,
school_types=school_types):
images = {'N_infected':{'student':0, 'teacher':0},
'R0':{'student':0}, 'teacher':0}
xticklabels = ['primary', '+ daycare', 'low. sec.', '+ daycare', 'up. sec.',
'secondary']
for i, observable, cmap in zip(range(2), ['N_infected', 'R0'],
[get_continuous_cmap(colors), plt.get_cmap('coolwarm')]):
for j, index_case in enumerate(['student', 'teacher']):
ax = axes[2*i + j]
if observable == 'N_infected':
img = ax.imshow(heatmaps[observable][index_case],
vmin=X_min, vmax=X_max, cmap=cmap)
else:
img = ax.imshow(heatmaps[observable][index_case],
clim = (R_min, R_max),
norm = MidpointNormalize(midpoint=1, vmin=R_min, vmax=R_max),
cmap = cmap)
images[observable][index_case] = img
if 2*i + j == 0:
ax.set_yticks(range(len(yticklabels)))
ax.set_yticklabels(yticklabels)
ax.set_ylabel(ylabel, fontsize=16)
else:
ax.set_yticks([])
ax.set_yticklabels([])
if xticks:
ax.set_xticks(range(len(school_types)))
ax.set_xticklabels(['primary', '+ daycare', 'low. sec.',
'+ daycare', 'up. sec.', 'secondary'], fontsize=12)
ax.tick_params(axis='x', rotation=90)
else:
ax.set_xticks([])
if title:
ax.set_title('index case: {}'.format(index_case), fontsize=11)
# draw a box around the conservative estimate
for ax in axes:
rect = Rectangle((-0.41, indicator_ypos), 5.85, 1,
linewidth=2, edgecolor='r', facecolor='none')
ax.add_patch(rect)
return images | [
"pandas.DataFrame",
"matplotlib.colors.Normalize.__init__",
"matplotlib.colors.LinearSegmentedColormap",
"matplotlib.pyplot.get_cmap",
"scseirx.analysis_functions.get_statistics",
"matplotlib.patches.Rectangle",
"numpy.isnan",
"numpy.interp",
"pandas.concat",
"os.path.join",
"os.listdir"
] | [((2167, 2237), 'matplotlib.colors.LinearSegmentedColormap', 'mpl.colors.LinearSegmentedColormap', (['"""my_cmp"""'], {'segmentdata': 'cdict', 'N': '(256)'}), "('my_cmp', segmentdata=cdict, N=256)\n", (2201, 2237), True, 'import matplotlib as mpl\n'), ((3231, 3245), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3243, 3245), True, 'import pandas as pd\n'), ((3263, 3284), 'os.path.join', 'join', (['src_path', 'stype'], {}), '(src_path, stype)\n', (3267, 3284), False, 'from os.path import join\n'), ((3297, 3319), 'os.listdir', 'os.listdir', (['stype_path'], {}), '(stype_path)\n', (3307, 3319), False, 'import os\n'), ((10472, 10486), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10484, 10486), True, 'import pandas as pd\n'), ((12081, 12095), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12093, 12095), True, 'import pandas as pd\n'), ((15995, 16009), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16007, 16009), True, 'import pandas as pd\n'), ((19831, 19845), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19843, 19845), True, 'import pandas as pd\n'), ((22798, 22812), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (22810, 22812), True, 'import pandas as pd\n'), ((26605, 26619), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (26617, 26619), True, 'import pandas as pd\n'), ((29785, 29799), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (29797, 29799), True, 'import pandas as pd\n'), ((33677, 33691), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (33689, 33691), True, 'import pandas as pd\n'), ((36871, 36885), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (36883, 36885), True, 'import pandas as pd\n'), ((40705, 40719), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (40717, 40719), True, 'import pandas as pd\n'), ((2675, 2724), 'matplotlib.colors.Normalize.__init__', 'colors.Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (2700, 2724), True, 'import matplotlib.colors as colors\n'), ((6140, 6165), 'pandas.concat', 'pd.concat', (['[data, ensmbl]'], {}), '([data, ensmbl])\n', (6149, 6165), True, 'import pandas as pd\n'), ((10656, 10687), 'pandas.concat', 'pd.concat', (['[baseline_data, tmp]'], {}), '([baseline_data, tmp])\n', (10665, 10687), True, 'import pandas as pd\n'), ((14707, 14757), 'os.path.join', 'join', (['src_path', '"""test_sensitivity_observables.csv"""'], {}), "(src_path, 'test_sensitivity_observables.csv')\n", (14711, 14757), False, 'from os.path import join\n'), ((18571, 18622), 'os.path.join', 'join', (['src_path', '"""testing_rate_data_observables.csv"""'], {}), "(src_path, 'testing_rate_data_observables.csv')\n", (18575, 18622), False, 'from os.path import join\n'), ((56234, 56327), 'matplotlib.patches.Rectangle', 'Rectangle', (['(-0.41, indicator_ypos)', '(5.85)', '(1)'], {'linewidth': '(2)', 'edgecolor': '"""r"""', 'facecolor': '"""none"""'}), "((-0.41, indicator_ypos), 5.85, 1, linewidth=2, edgecolor='r',\n facecolor='none')\n", (56243, 56327), False, 'from matplotlib.patches import Rectangle\n'), ((2971, 2993), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (2980, 2993), True, 'import numpy as np\n'), ((2995, 3010), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (3003, 3010), True, 'import numpy as np\n'), ((3789, 3808), 'os.path.join', 'join', (['stype_path', 'f'], {}), '(stype_path, f)\n', (3793, 3808), False, 'from os.path import join\n'), ((54814, 54838), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (54826, 54838), True, 'import matplotlib.pyplot as plt\n'), ((14485, 14521), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (14502, 14521), True, 'from scseirx import analysis_functions as af\n'), ((18421, 18457), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (18438, 18457), True, 'from scseirx import analysis_functions as af\n'), ((22124, 22160), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (22141, 22160), True, 'from scseirx import analysis_functions as af\n'), ((25091, 25127), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (25108, 25127), True, 'from scseirx import analysis_functions as af\n'), ((29131, 29167), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (29148, 29167), True, 'from scseirx import analysis_functions as af\n'), ((32183, 32219), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (32200, 32219), True, 'from scseirx import analysis_functions as af\n'), ((36336, 36372), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (36353, 36372), True, 'from scseirx import analysis_functions as af\n'), ((40200, 40236), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (40217, 40236), True, 'from scseirx import analysis_functions as af\n'), ((43829, 43865), 'scseirx.analysis_functions.get_statistics', 'af.get_statistics', (['measure_data', 'col'], {}), '(measure_data, col)\n', (43846, 43865), True, 'from scseirx import analysis_functions as af\n')] |
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score, auc, roc_curve, confusion_matrix, fbeta_score
from imblearn.over_sampling import BorderlineSMOTE
from collections import Counter
import gc as gc
from sklearn.feature_selection import RFE
#-------------------------------------------------------------------------------------------------------------------------
def kfold_smote_RFE(features_num, classifier, folds, df_train_filtered_std, y_train, smote='y'):
"""K_fold training/validation for RFE with LightGBM/RandomForest/XGBoost/CATBoost,
with SMOTE train re-sampling,
features_num-> select the number of features for RFE"""
# get a list of models to evaluate
def get_models():
models = dict()
for i in range(2, features_num+1):
models[str(i)] = RFE(estimator=classifier, n_features_to_select=i)
return models
# data from each foldf
fold_results = list()
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(df_train_filtered_std, y_train)):
train_x, train_y = df_train_filtered_std.iloc[train_idx], y_train.iloc[train_idx]
valid_x, valid_y = df_train_filtered_std.iloc[valid_idx], y_train.iloc[valid_idx]
# summarize class distribution
counter = Counter(train_y)
print('\n-----------------------------------------------------')
print('Fold %2d, original distribution: ' % (n_fold + 1))
print(counter)
if smote=='y':
# transform the dataset
oversample = BorderlineSMOTE()
train_x, train_y = oversample.fit_resample(train_x, train_y)
# summarize the new class distribution
counter = Counter(train_y)
print('Fold %2d, re-sampled distribution: ' % (n_fold + 1))
print(counter)
# get the models to evaluate
models = get_models()
# evaluate the models and store results
models_results, names = list(), list()
for name, model in models.items():
# Print the number of features of the model
print('\nFeatures:%s' % (name))
# fit RFE
model.fit(train_x, train_y)
# validation per model
probas = model.predict_proba(valid_x)[:, 1]
# ROC-AUC per model
AUC = roc_auc_score(valid_y, probas)
# Collecting results
models_results.append(AUC)
names.append(name)
# summarize all features
for i in range(train_x.shape[1]):
print('Column: %d, Selected %s, Rank: %.3f' % (i, model.support_[i], model.ranking_[i]))
# Print AUC score
print(f'\nAUC: {AUC}')
print('\nModels results')
print(models_results)
fold_results.append(models_results)
print('\nFolds results')
print(fold_results)
fold_results = np.asarray(fold_results)
# plot model performance for comparison
plt.figure(figsize=(15,10))
plt.boxplot(fold_results, labels=range(2,features_num+1), showmeans=True)
plt.title('RECURSIVE FEATURE ELIMINATION'
f'\n\ntrain re-sampling (SMOTE):"{smote}"',fontsize=20)
plt.xlabel('Numbers of features selected',fontsize=15)
plt.ylabel('Crossvalidation AUC',fontsize=15)
plt.ylim((0.5, 0.8))
# save
plt.savefig(f'projets\\07_loan_customer_scoring\\production\\savefig\\model_test_{smote_case}\\feature_selection\\{class_weigh_case}\\feature_selection_RFE_feature_number.png', transparent=True)
plt.show()
return fold_results
#-------------------------------------------------------------------------------------------------------------------------
# Classification with kfold available for several algorithms
def kfold_classif(classifier, folds, df_train_std, target_train, df_val_std, target_val, custom_loss, fbeta, fbeta_number=0, logistic_regression=False, train_resampling='n', eval_set=False, scorer='auc', early_stopping_rounds=None, verbose=200):
"""K_fold training/validation for DecisionTree/RandomForest/LightGBM/XGBoost/CATBoost/LogisticRegression,
train_resampling-> borderline smote re-sampling on the train part,
fbetanumber-> for function to optimize"""
""""num_iteration=clf.best_iteration_ can be added in the predict_proba() when callable """
# Create arrays and dataframes to store results
crossvalid_probas = np.zeros(df_train_std.shape[0])
valid_probas = np.zeros(df_val_std.shape[0])
fold_AUC_list = []
feature_importance_df = pd.DataFrame()
feats = [f for f in df_train_std.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
# Modification of columns
df_train_std_2 = df_train_std[feats]
df_val_std_2 = df_val_std[feats]
df_train_std_2.columns = ["".join (c if c.isalnum() else "_" for c in str(x)) for x in df_train_std_2.columns]
df_val_std_2.columns = ["".join (c if c.isalnum() else "_" for c in str(x)) for x in df_val_std_2.columns]
# define thresholds
thresholds = np.arange(0, 1, 0.001)
# apply threshold to positive probabilities to create labels
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
def custom_cost_function(testy, yhat):
# get the fn and the fp from the confusion matrix
tn, fp, fn, tp = confusion_matrix(testy, yhat).ravel()
# function
y = 10*fn + fp
return y
# data from each fold
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(df_train_std_2, target_train)):
train_x, train_y = df_train_std_2.iloc[train_idx], target_train.iloc[train_idx]
valid_x, valid_y = df_train_std_2.iloc[valid_idx], target_train.iloc[valid_idx]
# Re-sampling
if train_resampling=='y':
# summarize class distribution
counter = Counter(train_y)
print('Fold %2d, original distribution: ' % (n_fold + 1))
print(counter)
# transform the dataset
oversample = BorderlineSMOTE()
train_x, train_y = oversample.fit_resample(train_x, train_y)
# summarize the new class distribution
counter = Counter(train_y)
print('Fold %2d, re-sampled distribution: ' % (n_fold + 1))
print(counter)
# classifier instance
clf = classifier
# fitting
if eval_set==True:
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric=scorer, verbose=verbose, early_stopping_rounds=early_stopping_rounds)
if eval_set==False:
clf.fit(train_x, train_y)
# validation
crossvalid_probas[valid_idx] = clf.predict_proba(valid_x)[:, 1]
# ROC-AUC
AUC = roc_auc_score(valid_y, crossvalid_probas[valid_idx])
fold_AUC_list.append(AUC)
# showing results from each fold
print('Fold %2d AUC : %.6f' % (n_fold + 1, AUC))
# Collecting results
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
# Classifier case
if logistic_regression==True:
fold_importance_df["importance"] = clf.coef_[0]
if logistic_regression==False:
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
fold_importance_df["val_fold_AUC"] = AUC
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
feature_importance_df.sort_values(by='importance', ascending=False, inplace=True)
#validation_ROC_AUC = roc_auc_score(target_train, crossvalid_probas)
valid_probas += clf.predict_proba(df_val_std)[:, 1] / folds.n_splits
del train_x, train_y, valid_x, valid_y
gc.collect()
# Final performance
mean_crossvalid_fold_ROC_AUC = sum(fold_AUC_list)/len(fold_AUC_list)
print('Mean cross-validation ROC-AUC score %.6f' % mean_crossvalid_fold_ROC_AUC)
#validation_ROC_AUC = roc_auc_score(target_train, crossvalid_probas)
validation_ROC_AUC = roc_auc_score(target_val, valid_probas)
print('Validation ROC-AUC score %.6f' % validation_ROC_AUC)
# Optimising the threshold
if (fbeta==True)&(fbeta_number!=0):
# evaluate each threshold with f-beta loss function
scores = [fbeta_score(target_val.values, to_labels(valid_probas, t), average='weighted', beta=fbeta_number) for t in thresholds]
# get best threshold
ix = np.argmax(scores)
print(f'Threshold=%.3f, F-{fbeta_number} score_max=%.5f' % (thresholds[ix], scores[ix]))
best_score = scores[ix]
threshold = thresholds[ix]
if custom_loss=='y':
# evaluate each threshold with custom loss function
scores = [custom_cost_function(target_val.values, to_labels(valid_probas, t)) for t in thresholds]
# get best threshold
ix = np.argmin(scores)
print(f'Threshold=%.3f, Custom loss function (10*fn + fp) score_min=%.5f' % (thresholds[ix], scores[ix]))
best_score = scores[ix]
threshold = thresholds[ix]
return clf, feature_importance_df, mean_crossvalid_fold_ROC_AUC, validation_ROC_AUC, best_score, threshold
#-------------------------------------------------------------------------------------------------------------------------
# One hot encoder (avec récupération des labels)
from sklearn.preprocessing import OneHotEncoder as SklearnOneHotEncoder
import pandas as pd
import numpy as np
class OneHotEncoder(SklearnOneHotEncoder):
def __init__(self, **kwargs):
super(OneHotEncoder, self).__init__(**kwargs)
self.fit_flag = False
def fit(self, X, **kwargs):
out = super().fit(X)
self.fit_flag = True
return out
def transform(self, X, **kwargs):
sparse_matrix = super(OneHotEncoder, self).transform(X)
new_columns = self.get_new_columns(X=X)
d_out = pd.DataFrame(sparse_matrix.toarray(), columns=new_columns, index=X.index)
return d_out
def fit_transform(self, X, **kwargs):
self.fit(X)
return self.transform(X)
def get_new_columns(self, X):
new_columns = []
for i, column in enumerate(X.columns):
j = 0
while j < len(self.categories_[i]):
new_columns.append(f'{column}_<{self.categories_[i][j]}>')
j += 1
return new_columns
#-------------------------------------------------------------------------------------------------------------------------
# Targer Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = trained_model.transform(dataframe_work[column])
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# Label Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def label_encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = dataframe_work[column].apply(lambda x: trained_model.transform([x])[0] if pd.notna(x) else np.NaN)
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# Targer Encoding ou One Hot Encoding (1 nouvelle colonne crée)
def target_encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]], columns=[column,fix_column])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = trained_model.transform(dataframe_work[column])
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
# ONE-HOT-ENCODING (plusieurs nouvelles colonnes crées)
def vector_encoding_transform_with_merge(dataframe, column, fix_column, trained_model):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work_transformed = pd.DataFrame(trained_model.transform(dataframe_work))
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work_transformed.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work_transformed, on=fix_column)
return dataframe
#----------------------------------------------------------------------------------------------
def SAVE_encoding_transform_with_merge(dataframe, column, fix_column, trained_model, column_new_name):
"""Fonction transfroam,nt une colonne de dataframe à partir d'un modèle fitté"""
"""renseigner le modèle fitté"""
"""Indiquer le nouveau nom de colonne"""
"""Indiquer une colonne fixe pour le merge"""
# Création d'un dataframe avec la colonne souhaitée et une colonne repère en index pour éviter de perdre l'ordre après .transform (re-indexage possible de la fonction):
dataframe_work = pd.DataFrame(dataframe[[column,fix_column]])
dataframe_work.set_index([fix_column], inplace = True)
# Transform
dataframe_work[column_new_name] = trained_model.transform(dataframe_work[column])
dataframe_work.drop(column, axis=1, inplace=True)
# La colonne repère a été passée en index puis réapparaît après un reset index:
dataframe_work.reset_index(inplace=True)
# Merge avec colonne commune fix_column:
dataframe = pd.merge(dataframe, dataframe_work, on=fix_column)
return dataframe
| [
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"pandas.notna",
"numpy.argmax",
"numpy.asarray",
"pandas.merge",
"numpy.zeros",
"sklearn.feature_selection.RFE",
"numpy.argmin",
"sklearn.metrics.roc_auc_score",
"imblearn.over_sampling.BorderlineSMOTE",
"gc.collect",
"numpy.arange",
... | [((2925, 2949), 'numpy.asarray', 'np.asarray', (['fold_results'], {}), '(fold_results)\n', (2935, 2949), True, 'import numpy as np\n'), ((4449, 4480), 'numpy.zeros', 'np.zeros', (['df_train_std.shape[0]'], {}), '(df_train_std.shape[0])\n', (4457, 4480), True, 'import numpy as np\n'), ((4500, 4529), 'numpy.zeros', 'np.zeros', (['df_val_std.shape[0]'], {}), '(df_val_std.shape[0])\n', (4508, 4529), True, 'import numpy as np\n'), ((4581, 4595), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4593, 4595), True, 'import pandas as pd\n'), ((5091, 5113), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.001)'], {}), '(0, 1, 0.001)\n', (5100, 5113), True, 'import numpy as np\n'), ((8320, 8359), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['target_val', 'valid_probas'], {}), '(target_val, valid_probas)\n', (8333, 8359), False, 'from sklearn.metrics import roc_auc_score, auc, roc_curve, confusion_matrix, fbeta_score\n'), ((11446, 11521), 'pandas.DataFrame', 'pd.DataFrame', (['dataframe[[column, fix_column]]'], {'columns': '[column, fix_column]'}), '(dataframe[[column, fix_column]], columns=[column, fix_column])\n', (11458, 11521), True, 'import pandas as pd\n'), ((11928, 11978), 'pandas.merge', 'pd.merge', (['dataframe', 'dataframe_work'], {'on': 'fix_column'}), '(dataframe, dataframe_work, on=fix_column)\n', (11936, 11978), True, 'import pandas as pd\n'), ((12586, 12661), 'pandas.DataFrame', 'pd.DataFrame', (['dataframe[[column, fix_column]]'], {'columns': '[column, fix_column]'}), '(dataframe[[column, fix_column]], columns=[column, fix_column])\n', (12598, 12661), True, 'import pandas as pd\n'), ((13119, 13169), 'pandas.merge', 'pd.merge', (['dataframe', 'dataframe_work'], {'on': 'fix_column'}), '(dataframe, dataframe_work, on=fix_column)\n', (13127, 13169), True, 'import pandas as pd\n'), ((13777, 13852), 'pandas.DataFrame', 'pd.DataFrame', (['dataframe[[column, fix_column]]'], {'columns': '[column, fix_column]'}), '(dataframe[[column, fix_column]], columns=[column, fix_column])\n', (13789, 13852), True, 'import pandas as pd\n'), ((14259, 14309), 'pandas.merge', 'pd.merge', (['dataframe', 'dataframe_work'], {'on': 'fix_column'}), '(dataframe, dataframe_work, on=fix_column)\n', (14267, 14309), True, 'import pandas as pd\n'), ((14893, 14938), 'pandas.DataFrame', 'pd.DataFrame', (['dataframe[[column, fix_column]]'], {}), '(dataframe[[column, fix_column]])\n', (14905, 14938), True, 'import pandas as pd\n'), ((15305, 15367), 'pandas.merge', 'pd.merge', (['dataframe', 'dataframe_work_transformed'], {'on': 'fix_column'}), '(dataframe, dataframe_work_transformed, on=fix_column)\n', (15313, 15367), True, 'import pandas as pd\n'), ((16006, 16051), 'pandas.DataFrame', 'pd.DataFrame', (['dataframe[[column, fix_column]]'], {}), '(dataframe[[column, fix_column]])\n', (16018, 16051), True, 'import pandas as pd\n'), ((16459, 16509), 'pandas.merge', 'pd.merge', (['dataframe', 'dataframe_work'], {'on': 'fix_column'}), '(dataframe, dataframe_work, on=fix_column)\n', (16467, 16509), True, 'import pandas as pd\n'), ((1292, 1308), 'collections.Counter', 'Counter', (['train_y'], {}), '(train_y)\n', (1299, 1308), False, 'from collections import Counter\n'), ((6916, 6968), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['valid_y', 'crossvalid_probas[valid_idx]'], {}), '(valid_y, crossvalid_probas[valid_idx])\n', (6929, 6968), False, 'from sklearn.metrics import roc_auc_score, auc, roc_curve, confusion_matrix, fbeta_score\n'), ((7172, 7186), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7184, 7186), True, 'import pandas as pd\n'), ((7619, 7681), 'pandas.concat', 'pd.concat', (['[feature_importance_df, fold_importance_df]'], {'axis': '(0)'}), '([feature_importance_df, fold_importance_df], axis=0)\n', (7628, 7681), True, 'import pandas as pd\n'), ((8021, 8033), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8031, 8033), True, 'import gc as gc\n'), ((8751, 8768), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (8760, 8768), True, 'import numpy as np\n'), ((9194, 9211), 'numpy.argmin', 'np.argmin', (['scores'], {}), '(scores)\n', (9203, 9211), True, 'import numpy as np\n'), ((830, 879), 'sklearn.feature_selection.RFE', 'RFE', ([], {'estimator': 'classifier', 'n_features_to_select': 'i'}), '(estimator=classifier, n_features_to_select=i)\n', (833, 879), False, 'from sklearn.feature_selection import RFE\n'), ((1556, 1573), 'imblearn.over_sampling.BorderlineSMOTE', 'BorderlineSMOTE', ([], {}), '()\n', (1571, 1573), False, 'from imblearn.over_sampling import BorderlineSMOTE\n'), ((1720, 1736), 'collections.Counter', 'Counter', (['train_y'], {}), '(train_y)\n', (1727, 1736), False, 'from collections import Counter\n'), ((2351, 2381), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['valid_y', 'probas'], {}), '(valid_y, probas)\n', (2364, 2381), False, 'from sklearn.metrics import roc_auc_score, auc, roc_curve, confusion_matrix, fbeta_score\n'), ((5943, 5959), 'collections.Counter', 'Counter', (['train_y'], {}), '(train_y)\n', (5950, 5959), False, 'from collections import Counter\n'), ((6131, 6148), 'imblearn.over_sampling.BorderlineSMOTE', 'BorderlineSMOTE', ([], {}), '()\n', (6146, 6148), False, 'from imblearn.over_sampling import BorderlineSMOTE\n'), ((6295, 6311), 'collections.Counter', 'Counter', (['train_y'], {}), '(train_y)\n', (6302, 6311), False, 'from collections import Counter\n'), ((5405, 5434), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['testy', 'yhat'], {}), '(testy, yhat)\n', (5421, 5434), False, 'from sklearn.metrics import roc_auc_score, auc, roc_curve, confusion_matrix, fbeta_score\n'), ((12848, 12859), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (12856, 12859), True, 'import pandas as pd\n')] |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.solvers as S
import nnabla.functions as F
import os
import functools
from tqdm import tqdm
from sgd_influence_utils.model import setup_model
from sgd_influence_utils.dataset import get_batch_data, init_dataset, get_data, get_image_size, get_batch_indices
from sgd_influence_utils.utils import get_indices, save_to_csv, is_proto_graph
from sgd_influence_utils.infl import compute_gradient, save_infl_for_analysis
def infl_icml(model_info_dict, file_dir_dict, use_all_params, need_evaluate, alpha):
num_epochs = 2
# params
lr = model_info_dict['lr']
seed = model_info_dict['seed']
net_func = model_info_dict['net_func']
batch_size = model_info_dict['batch_size']
target_epoch = model_info_dict['num_epochs']
# files and dirs
save_dir = file_dir_dict['save_dir']
infl_filename = file_dir_dict['infl_filename']
network_info = model_info_dict['network_info']
net_name_dict = model_info_dict['net_name_dict']
bsa = model_info_dict['bs_adjuster']
final_model_name = file_dir_dict['weight_name_dict']['final']
final_model_path = os.path.join(save_dir, 'epoch%02d' % (
target_epoch - 1), 'weights', final_model_name)
input_dir_name = os.path.dirname(file_dir_dict['train_csv'])
# setup
trainset, valset, image_shape, n_classes, ntr, nval = init_dataset(
file_dir_dict['train_csv'], file_dir_dict['val_csv'], seed)
n_channels, _h, _w = image_shape
idx_train = get_indices(ntr, seed)
idx_val = get_indices(nval, seed)
if is_proto_graph(net_func):
resize_size_train = model_info_dict['resize_size_train']
resize_size_val = model_info_dict['resize_size_val']
solver = network_info.optimizers['Optimizer'].solver
_setup_model = functools.partial(
setup_model, net_name_dict=net_name_dict)
else:
resize_size_train = get_image_size((_h, _w))
resize_size_val = resize_size_train
solver = S.Momentum(lr=lr, momentum=0.9)
_setup_model = setup_model
nn.load_parameters(final_model_path)
trained_params = nn.get_parameters(grad_only=False)
test = True
grad_model = functools.partial(
_setup_model, network=net_func, n_classes=n_classes, n_channels=n_channels, resize_size=resize_size_val, test=test, reduction='sum')
solver.set_parameters(trained_params)
# gradient
u = compute_gradient(grad_model, bsa, solver, valset,
batch_size, idx_val, resize_size_val)
# Hinv * u with SGD
seed_train = 0
v = {
f'{k}': nn.Variable(p.d.shape, need_grad=True)
for k, p in nn.get_parameters(grad_only=False).items()
}
for k, vv in v.items():
vv.d = 0
solver.set_parameters(v)
loss_train = []
loss_fn = None
test = False
for epoch in tqdm(range(num_epochs)):
# training
seed_train = 0
np.random.seed(epoch)
idx = get_batch_indices(ntr, batch_size, seed=epoch)
for j, i in enumerate(idx):
seeds = list(range(seed_train, seed_train + i.size))
seed_train += i.size
X, y = get_batch_data(trainset, idx_train, i,
resize_size_train, test=test, seeds=seeds)
_, loss_fn, input_image = bsa.adjust_batch_size(
grad_model, len(X), loss_fn, test=test)
input_image["image"].d = X
input_image["label"].d = y
loss_fn.forward()
vg = 0
params = nn.get_parameters(grad_only=False)
for k, gp in zip(params.keys(), nn.grad([loss_fn], params.values())):
# for k, g in grad_params.items():
vv = v.get(k, None)
if vv is not None:
vg += F.sum(vv * gp)
for k, p in nn.get_parameters(grad_only=False).items():
p.grad.zero()
loss_i = 0
params = nn.get_parameters(grad_only=False)
for k, vgp in zip(params.keys(), nn.grad([vg], params.values())):
vv = v.get(k, None)
uu = u.get(k, None)
if (vv is not None) & (uu is not None):
loss_i += 0.5 * \
F.sum(vgp * vv + alpha * vv * vv) - F.sum(uu * vv)
loss_i.forward()
solver.zero_grad()
loss_i.backward(clear_buffer=True)
solver.update()
loss_train.append(loss_i.d.copy())
# influence
infl_dict = dict()
infl = np.zeros(ntr)
for i in tqdm(range(ntr), desc='calc influence (3/3 steps)'):
csv_idx = idx_train[i]
file_name = trainset.get_filepath_to_data(csv_idx)
file_name = os.path.join(input_dir_name, file_name)
file_name = os.path.normpath(file_name)
X, y = get_data(
trainset, idx_train[i], resize_size_train, True, seed=i)
_, loss_fn, input_image = bsa.adjust_batch_size(
grad_model, len(X), loss_fn, test=test)
input_image["image"].d = X
input_image["label"].d = y
loss_fn.forward()
for p in nn.get_parameters(grad_only=False).values():
p.grad.zero()
loss_fn.backward(clear_buffer=True)
infl_i = 0
for k, p in nn.get_parameters(grad_only=False).items():
vv = v[k]
if vv is not None:
infl_i += (p.g * vv.d).sum()
infl[i] = -infl_i / ntr
infl_dict[csv_idx] = [file_name, y, infl[i]]
infl_list = [val + [key] for key, val in infl_dict.items()]
infl_list = sorted(infl_list, key=lambda x: (x[-2]))
# save
header = ['x:image', 'y:label', 'influence', 'datasource_index']
data_type = 'object,int,float,int'
if need_evaluate:
save_infl_for_analysis(infl_list, use_all_params,
save_dir, infl_filename, epoch, header, data_type)
save_to_csv(filename=infl_filename, header=header,
list_to_save=infl_list, data_type=data_type)
| [
"numpy.random.seed",
"nnabla.get_parameters",
"sgd_influence_utils.utils.save_to_csv",
"sgd_influence_utils.utils.is_proto_graph",
"os.path.join",
"sgd_influence_utils.dataset.get_image_size",
"sgd_influence_utils.dataset.get_batch_indices",
"os.path.dirname",
"sgd_influence_utils.infl.save_infl_for... | [((1724, 1813), 'os.path.join', 'os.path.join', (['save_dir', "('epoch%02d' % (target_epoch - 1))", '"""weights"""', 'final_model_name'], {}), "(save_dir, 'epoch%02d' % (target_epoch - 1), 'weights',\n final_model_name)\n", (1736, 1813), False, 'import os\n'), ((1840, 1883), 'os.path.dirname', 'os.path.dirname', (["file_dir_dict['train_csv']"], {}), "(file_dir_dict['train_csv'])\n", (1855, 1883), False, 'import os\n'), ((1954, 2026), 'sgd_influence_utils.dataset.init_dataset', 'init_dataset', (["file_dir_dict['train_csv']", "file_dir_dict['val_csv']", 'seed'], {}), "(file_dir_dict['train_csv'], file_dir_dict['val_csv'], seed)\n", (1966, 2026), False, 'from sgd_influence_utils.dataset import get_batch_data, init_dataset, get_data, get_image_size, get_batch_indices\n'), ((2090, 2112), 'sgd_influence_utils.utils.get_indices', 'get_indices', (['ntr', 'seed'], {}), '(ntr, seed)\n', (2101, 2112), False, 'from sgd_influence_utils.utils import get_indices, save_to_csv, is_proto_graph\n'), ((2127, 2150), 'sgd_influence_utils.utils.get_indices', 'get_indices', (['nval', 'seed'], {}), '(nval, seed)\n', (2138, 2150), False, 'from sgd_influence_utils.utils import get_indices, save_to_csv, is_proto_graph\n'), ((2158, 2182), 'sgd_influence_utils.utils.is_proto_graph', 'is_proto_graph', (['net_func'], {}), '(net_func)\n', (2172, 2182), False, 'from sgd_influence_utils.utils import get_indices, save_to_csv, is_proto_graph\n'), ((2662, 2698), 'nnabla.load_parameters', 'nn.load_parameters', (['final_model_path'], {}), '(final_model_path)\n', (2680, 2698), True, 'import nnabla as nn\n'), ((2720, 2754), 'nnabla.get_parameters', 'nn.get_parameters', ([], {'grad_only': '(False)'}), '(grad_only=False)\n', (2737, 2754), True, 'import nnabla as nn\n'), ((2788, 2946), 'functools.partial', 'functools.partial', (['_setup_model'], {'network': 'net_func', 'n_classes': 'n_classes', 'n_channels': 'n_channels', 'resize_size': 'resize_size_val', 'test': 'test', 'reduction': '"""sum"""'}), "(_setup_model, network=net_func, n_classes=n_classes,\n n_channels=n_channels, resize_size=resize_size_val, test=test,\n reduction='sum')\n", (2805, 2946), False, 'import functools\n'), ((3014, 3105), 'sgd_influence_utils.infl.compute_gradient', 'compute_gradient', (['grad_model', 'bsa', 'solver', 'valset', 'batch_size', 'idx_val', 'resize_size_val'], {}), '(grad_model, bsa, solver, valset, batch_size, idx_val,\n resize_size_val)\n', (3030, 3105), False, 'from sgd_influence_utils.infl import compute_gradient, save_infl_for_analysis\n'), ((5156, 5169), 'numpy.zeros', 'np.zeros', (['ntr'], {}), '(ntr)\n', (5164, 5169), True, 'import numpy as np\n'), ((6538, 6637), 'sgd_influence_utils.utils.save_to_csv', 'save_to_csv', ([], {'filename': 'infl_filename', 'header': 'header', 'list_to_save': 'infl_list', 'data_type': 'data_type'}), '(filename=infl_filename, header=header, list_to_save=infl_list,\n data_type=data_type)\n', (6549, 6637), False, 'from sgd_influence_utils.utils import get_indices, save_to_csv, is_proto_graph\n'), ((2394, 2453), 'functools.partial', 'functools.partial', (['setup_model'], {'net_name_dict': 'net_name_dict'}), '(setup_model, net_name_dict=net_name_dict)\n', (2411, 2453), False, 'import functools\n'), ((2505, 2529), 'sgd_influence_utils.dataset.get_image_size', 'get_image_size', (['(_h, _w)'], {}), '((_h, _w))\n', (2519, 2529), False, 'from sgd_influence_utils.dataset import get_batch_data, init_dataset, get_data, get_image_size, get_batch_indices\n'), ((2591, 2622), 'nnabla.solvers.Momentum', 'S.Momentum', ([], {'lr': 'lr', 'momentum': '(0.9)'}), '(lr=lr, momentum=0.9)\n', (2601, 2622), True, 'import nnabla.solvers as S\n'), ((3197, 3235), 'nnabla.Variable', 'nn.Variable', (['p.d.shape'], {'need_grad': '(True)'}), '(p.d.shape, need_grad=True)\n', (3208, 3235), True, 'import nnabla as nn\n'), ((3528, 3549), 'numpy.random.seed', 'np.random.seed', (['epoch'], {}), '(epoch)\n', (3542, 3549), True, 'import numpy as np\n'), ((3564, 3610), 'sgd_influence_utils.dataset.get_batch_indices', 'get_batch_indices', (['ntr', 'batch_size'], {'seed': 'epoch'}), '(ntr, batch_size, seed=epoch)\n', (3581, 3610), False, 'from sgd_influence_utils.dataset import get_batch_data, init_dataset, get_data, get_image_size, get_batch_indices\n'), ((5346, 5385), 'os.path.join', 'os.path.join', (['input_dir_name', 'file_name'], {}), '(input_dir_name, file_name)\n', (5358, 5385), False, 'import os\n'), ((5406, 5433), 'os.path.normpath', 'os.path.normpath', (['file_name'], {}), '(file_name)\n', (5422, 5433), False, 'import os\n'), ((5449, 5514), 'sgd_influence_utils.dataset.get_data', 'get_data', (['trainset', 'idx_train[i]', 'resize_size_train', '(True)'], {'seed': 'i'}), '(trainset, idx_train[i], resize_size_train, True, seed=i)\n', (5457, 5514), False, 'from sgd_influence_utils.dataset import get_batch_data, init_dataset, get_data, get_image_size, get_batch_indices\n'), ((6402, 6506), 'sgd_influence_utils.infl.save_infl_for_analysis', 'save_infl_for_analysis', (['infl_list', 'use_all_params', 'save_dir', 'infl_filename', 'epoch', 'header', 'data_type'], {}), '(infl_list, use_all_params, save_dir, infl_filename,\n epoch, header, data_type)\n', (6424, 6506), False, 'from sgd_influence_utils.infl import compute_gradient, save_infl_for_analysis\n'), ((3764, 3850), 'sgd_influence_utils.dataset.get_batch_data', 'get_batch_data', (['trainset', 'idx_train', 'i', 'resize_size_train'], {'test': 'test', 'seeds': 'seeds'}), '(trainset, idx_train, i, resize_size_train, test=test, seeds=\n seeds)\n', (3778, 3850), False, 'from sgd_influence_utils.dataset import get_batch_data, init_dataset, get_data, get_image_size, get_batch_indices\n'), ((4146, 4180), 'nnabla.get_parameters', 'nn.get_parameters', ([], {'grad_only': '(False)'}), '(grad_only=False)\n', (4163, 4180), True, 'import nnabla as nn\n'), ((4569, 4603), 'nnabla.get_parameters', 'nn.get_parameters', ([], {'grad_only': '(False)'}), '(grad_only=False)\n', (4586, 4603), True, 'import nnabla as nn\n'), ((5750, 5784), 'nnabla.get_parameters', 'nn.get_parameters', ([], {'grad_only': '(False)'}), '(grad_only=False)\n', (5767, 5784), True, 'import nnabla as nn\n'), ((5904, 5938), 'nnabla.get_parameters', 'nn.get_parameters', ([], {'grad_only': '(False)'}), '(grad_only=False)\n', (5921, 5938), True, 'import nnabla as nn\n'), ((3256, 3290), 'nnabla.get_parameters', 'nn.get_parameters', ([], {'grad_only': '(False)'}), '(grad_only=False)\n', (3273, 3290), True, 'import nnabla as nn\n'), ((4411, 4425), 'nnabla.functions.sum', 'F.sum', (['(vv * gp)'], {}), '(vv * gp)\n', (4416, 4425), True, 'import nnabla.functions as F\n'), ((4450, 4484), 'nnabla.get_parameters', 'nn.get_parameters', ([], {'grad_only': '(False)'}), '(grad_only=False)\n', (4467, 4484), True, 'import nnabla as nn\n'), ((4908, 4922), 'nnabla.functions.sum', 'F.sum', (['(uu * vv)'], {}), '(uu * vv)\n', (4913, 4922), True, 'import nnabla.functions as F\n'), ((4872, 4905), 'nnabla.functions.sum', 'F.sum', (['(vgp * vv + alpha * vv * vv)'], {}), '(vgp * vv + alpha * vv * vv)\n', (4877, 4905), True, 'import nnabla.functions as F\n')] |
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from patsy import dmatrices
from patsy import dmatrix
from scipy.optimize import minimize, curve_fit
import itertools
from matplotlib.ticker import PercentFormatter
import math
# test function
def test_function(data, a, b, c):
x = data[0]
y = data[1]
return a * (x**b) * (y**c)
def test_function2(data, a, b, c, d, e):
x = data[0]
y = data[1]
return a + b * x + c * y + d*x*y + e
def nonliner_function(data, a, b, c, d, e, f, g, h, k):
x = data[0]
y = data[1]
return a + b * x + c * y + d*x*y + f*(x**2) + h*(y**2) + g*(x*y)**2 + e
def fit_data(fn, X, Y, Z):
# get fit parameters from scipy curve fit
parameters, covariance = curve_fit(fn, [X, Y], Z)
# create surface function model
# setup data points for calculating surface model
model_x_data = np.linspace(min(X), max(X), 50)
model_y_data = np.linspace(min(Y), max(Y), 50)
# create coordinate arrays for vectorized evaluations
X_fit, Y_fit = np.meshgrid(model_x_data, model_y_data)
# calculate Z coordinate array
Z_fit = fn(np.array([X_fit, Y_fit]), *parameters)
return X_fit, Y_fit, Z_fit
def calc_main_effect(data, factor, response):
high = data[factor].max()
low = data[factor].min()
means = {}
for level in [high, low]:
factor_level_mask = data[factor] == level
level_response_mean = data[factor_level_mask][response].mean()
means[level] = level_response_mean
return means[high] - means[low]
def calc_interaction_effect(data, factor1, factor2, response):
high1 = data[factor1].max()
low1 = data[factor1].min()
high2 = data[factor2].max()
low2 = data[factor2].min()
means = {}
for level2 in [high2, low2]:
factor2_level_mask = data[factor2] == level2
factor2_level_responses = data[factor2_level_mask][response]
factor2_level_factor1 = data[factor2_level_mask][factor1]
conditions = [(factor2_level_factor1 == high1),(factor2_level_factor1 == low1)]
choices = [1, -1]
factor_signs = np.select(conditions, choices)
factor2_responses_factor1_signs = factor2_level_responses * factor_signs
factor1_factor2_responses_mean = factor2_responses_factor1_signs.mean()
means[level2] = factor1_factor2_responses_mean
return means[high2] - means[low2]
def calc_effects(data, response_label, factor_labels):
main_effects = [calc_main_effect(data, factor_label, response_label) for factor_label in factor_labels]
factor_pairs = itertools.combinations(factor_labels, 2)
interaction_effects = []
for factor_label1, factor_label2 in factor_pairs:
interaction_effect = calc_interaction_effect(data, factor_label1, factor_label2, response_label)
interaction_effects.append(interaction_effect)
effects = main_effects + interaction_effects
return effects
def _plot_response_surfaces(data, response_label, factor_labels):
cmap = "cool"
cmap = "viridis"
label_pad = 10
axis_font_size = 12
tick_font_size = 14
n = len(factor_labels)
fig_scale = 4
fig, axs = plt.subplots(n, n, figsize=(fig_scale*n,fig_scale*n), constrained_layout=True, subplot_kw={'projection': '3d'})
plt.suptitle(response_label + " Response Surfaces", fontsize=22)
# build a rectangle in axes coords
left, width = -0.33, 1.66
bottom, height = -0.2, 1.4
right = left + width
top = bottom + height
Z = data[response_label]
C = data[response_label]
ln_C = np.log(C)
S = np.full((len(Z),), 100)
for i, row in enumerate(axs):
for j, ax in enumerate(row):
factor_A_label = factor_labels[i]
factor_B_label = factor_labels[j]
X = data[factor_A_label]
Y = data[factor_B_label]
fit = fit_data(test_function2, X, Y, Z)
ax.plot_surface(*fit, alpha=0.5)
ax.scatter(X, Y, Z, s=S, c=ln_C, label = C, cmap=cmap, alpha=1.0)
# handles1, labels1 = scatter1.legend_elements(prop="colors")
# legend1 = ax.legend(handles1, labels1, loc="lower right", title=c_label)
# ax.set_title("Maximum CNTF Height Response", fontsize=18, pad=20)
ax.view_init(15, 60)
ax.tick_params(axis='both', which='major', labelsize=tick_font_size)
ax.set_xlabel(factor_A_label, fontsize=axis_font_size, labelpad=label_pad)
ax.set_ylabel(factor_B_label, fontsize=axis_font_size, labelpad=label_pad)
ax.set_zlabel(response_label, fontsize=axis_font_size)
ax.invert_xaxis()
# handles, labels = scatter.legend_elements(prop="sizes", alpha=0.6)
# legend2 = ax.legend(handles, labels, loc="upper right", title="Sizes")
# for i in range(len(X)): #plot each point + it's index as text above
# ax.text(X[i]+0.5, Y[i]+0.5, Z[i], f'Flow: {C[i]}', size=16, zorder=1, color='k', weight='bold')
plt.show()
def linear(x, m, b):
return m*x + b
def _plot_effect_grid(data, response_label, factor_labels):
fontsize = 16
n = len(factor_labels)
fig_scale = 4
fig, axs = plt.subplots(n, n, figsize=(fig_scale*n,fig_scale*n))
y = data[response_label]
C = data[response_label]
# build a rectangle in axes coords
left, width = -0.33, 1.66
bottom, height = -0.2, 1.4
right = left + width
top = bottom + height
for i, row in enumerate(axs):
for j, ax in enumerate(row):
factor = factor_labels[i]
if i == j:
# main effects
x = data[factor]
# linear fit
param, param_cov = curve_fit(linear, x, y)
# ans stores the new y-data according to
# the coefficients given by curve-fit() function
ans = param[0]*x + param[1]
# print(param[0], param[1])
# z = np.polyfit(x, y1, 1)
# p = np.poly1d(z)
# print(z)
scatter = ax.scatter(x, y, s=100, c=C)
ax.plot(x, ans, linestyle='dashed', color ='black', label='linear fit')
# ax.text(1.0, 0.2, f'Intercept = {param[0]:.2}, Slope = {param[1]:.2}', fontsize = 11)
# ax.set_ylim(ymin=0, ymax=200)
ax.set_xlabel(factor)
ax.set_ylabel(response_label)
ax.set_title(f'{factor}')
ax.legend(loc='best')
# fig.colorbar(scatter, label=response_2_label)
if i == 0:
ax.text(0.5*(left+right), top, factor,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes,
fontsize=fontsize,
fontweight='bold')
ax.text(left, 0.5*(bottom+top), factor,
horizontalalignment='center',
verticalalignment='center',
rotation=90,
transform=ax.transAxes,
fontsize=fontsize,
fontweight='bold')
else:
# interaction effects
factor1 = factor
factor2 = factor_labels[j]
high1 = data[factor1].max()
low1 = data[factor1].min()
high2 = data[factor2].max()
low2 = data[factor2].min()
maskHighAHighB = (data[factor1] == high1) & (data[factor2] == high2)
maskHighALowB = (data[factor1] == high1) & (data[factor2] == low2)
maskLowAHighB = (data[factor1] == low1) & (data[factor2] == high2)
maskLowALowB = (data[factor1] == low1) & (data[factor2] == low2)
y1 = data[maskLowAHighB][response_label].mean()
y2 = data[maskLowALowB][response_label].mean()
y3 = data[maskHighAHighB][response_label].mean()
y4 = data[maskHighALowB][response_label].mean()
ax.plot([low1, high1], [y1,y3], label=f'+ {factor2}')
ax.plot([low1, high1], [y2,y4], label=f'- {factor2}')
ax.set_xlabel(f'{factor1}')
ax.set_ylabel(response_label)
ax.set_title(f'{factor1}:{factor2}')
ax.legend(loc='best')
if i == 0:
ax.text(0.5*(left+right), top, factor2,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes,
fontsize=fontsize,
fontweight='bold')
if j == 0:
ax.text(left, 0.5*(bottom+top), factor,
horizontalalignment='center',
verticalalignment='center',
rotation=90,
transform=ax.transAxes,
fontsize=fontsize,
fontweight='bold')
fig.tight_layout()
plt.show()
def _Pareto_plot(effects, factor_labels, xlabel, ylabel):
colors = []
signs = []
for effect in effects:
if effect < 0:
colors.append('gray')
signs.append(-1)
else:
colors.append('royalblue')
signs.append(1)
effects_df = pd.DataFrame({
'factors': factor_labels,
'effects': np.abs(effects),
'signs': signs,
'colors': colors
})
effects_df_sorted = effects_df.sort_values('effects', ascending=False)
x = effects_df_sorted['factors'].values
y = effects_df_sorted['effects'].values
c = effects_df_sorted['colors'].values
s = effects_df_sorted['signs'].values
weights = y / y.sum()
cumsum = weights.cumsum()
fig, ax1 = plt.subplots()
legend_labels = ['Positive Sign','Negative Sign']
handles = [plt.Rectangle((0,0), 1,1, color='royalblue'), plt.Rectangle((0,0), 1,1, color='gray')]
plt.legend(handles, legend_labels, loc='center right')
ax1.bar(x, y, color = c)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax2 = ax1.twinx()
ax2.plot(x, cumsum, '-ro', alpha=0.5)
ax2.set_ylabel('', color='r')
ax2.tick_params('y', colors='r')
vals = ax2.get_yticks()
ax2.yaxis.set_major_formatter(PercentFormatter())
# hide y-labels on right side
show_pct_y = False
if not show_pct_y:
ax2.set_yticks([])
pct_format='{0:.0%}'
formatted_weights = [pct_format.format(x) for x in cumsum]
for i, txt in enumerate(formatted_weights):
ax2.annotate(txt, (x[i], cumsum[i]), fontweight='heavy')
title = f'Pareto plot of Main and Interaction Effects on Response'
if title:
plt.title(title)
plt.tight_layout()
plt.show()
class factorial_doe:
"""
This class facilitates the analysis of full factorial and/or central composite design experiments.
"""
def __init__(self, data):
"""
Inputs:
data: Pandas DataFrame. Contains the data for the experiments, including the factors and the responses.
"""
self.letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S']
self.data_raw = data
self.data = pd.DataFrame()
self.factor_names = []
self.factor_labels = []
self.factor_lookup = dict()
self.factor_ranges = []
self.models = []
self.results = []
self.results_details = []
def encode(self, value, lower, upper):
"""
Converts factor values into their design space equivalent values.
"""
# Convert tuples to numpy arrays, perform elementwise operations, then convert result back to a tuple to preserve typing.
if type(value) is tuple:
value = np.array(value)
out = (value - (lower+upper)/2) / ((upper-lower)/2)
out = tuple(out)
# Perform elementwise operations on single values or numpy arrays and preserve typing.
else:
out = (value - (lower+upper)/2) / ((upper-lower)/2)
return out
def decode(self, value_coded, lower, upper):
"""
Converts factor values from design space to experimental values.
"""
# Convert tuples to numpy arrays, perform elementwise operations, then convert result back to a tuple to preserve typing.
if type(value_coded) is tuple:
value_coded = np.array(value_coded)
out = value_coded * (upper-lower)/2 + (upper+lower)/2
out = tuple(out)
# Perform elementwise operations on single values or numpy arrays and preserve typing.
else:
out = value_coded * (upper-lower)/2 + (upper+lower)/2
return out
def select_data(self, factor_names, data_bounds = None):
"""
Converts user-specified columns of data to their design space values and creates a new dataframe to hold their values.
Inputs:
factor_names: list of strings. Contains the column headers that correspond to the desired factors to be analyzed.
data_bounds: dict. Specifies the upper and lower bounds for the values each factor can take. Contains a key:value pair for each factor of the form: 'factor_name': [lower_bound, upper_bound].
"""
self.factor_names = factor_names
# Convert each factor's values to design space values and append to dataframe, with generic factor label.
for i, factor_name in enumerate(factor_names):
if data_bounds is None:
bound_low = np.min(self.data_raw[factor_name].values)
bound_high = np.max(self.data_raw[factor_name].values)
else:
bound_low = data_bounds[factor_name][0]
bound_high = data_bounds[factor_name][1]
# Add a new column to dataframe containing the encoded factor values.
self.data[self.letters[i]] = self.encode(self.data_raw[factor_name].values, bound_low, bound_high)
# Append factor label to list of labels for future reference.
self.factor_labels.append(self.letters[i])
# Add factor name to a dictionary for later use in translating the generic label.
self.factor_lookup[self.letters[i]] = factor_name
# Record the min and max ranges for the factor, for use in conversion later.
self.factor_ranges.append((bound_low, bound_high))
def select_responses(self, response_names):
"""
Specifies which column(s) of data can be used as the response for the analysis.
Input:
response_name: list of strings. Strings should be the column headers for the desired data.
"""
for i, response_name in enumerate(response_names):
self.data[f'Y{i}'] = self.data_raw[response_name].values
def encode_factors(self, factors):
"""
Convert a complete set of factors from experimental values to their corresponding design space values.
Inputs:
factors: list-like. Contains the set of factors to be converted. Each element should reprsent a separate factor.
"""
encoded_values = []
for factor, bounds in zip(factors, self.factor_ranges):
encoded_values.append(self.encode(factor, bounds[0], bounds[1]))
return encoded_values
def decode_factors(self, factors):
"""
Convert a complete set of factors from design space values to their corresponding experimental values.
Inputs:
factors: list-like. Contains the set of factors to be converted. Each element should reprsent a separate factor.
"""
decoded_values = []
for factor, bounds in zip(factors, self.factor_ranges):
decoded_values.append(self.decode(factor, bounds[0], bounds[1]))
return decoded_values
def define_model(self, model_types = ['linear'], response = 'Y0'):
"""
Generates a model string and formula for use in least squares fitting and subsequent model evaluation.
Inputs:
model_types: list of strings. Options are: 'linear', 'interaction', and 'quadratic'.
response: string. Specifies which coded response should be used for fitting the model under construction.
"""
# Initialize a dictionary to hold model info
model_dict = dict()
# Initialize the model string.
model_string = ''
# Generate the model string
for model_type in model_types:
# Add terms that are linear in the factors (example: 'A')
if model_type == 'linear':
for index, label in enumerate(self.factor_labels):
model_string = model_string + f'{label} + '
# Add interaction terms (example: A:B)
if model_type == 'interaction':
for index, label in enumerate(self.factor_labels):
for i in range(index+1, len(self.factor_labels)):
model_string = model_string + f'{self.factor_labels[index]}:{self.factor_labels[i]} + '
# Add terms that are quadratic in the factors (example: A^2)
if model_type == 'quadratic':
for index, label in enumerate(self.factor_labels):
model_string = model_string + f'np.power({label},2) + '
# Remove the extra ' + ' from the end of the model string.
model_string = model_string[:-3]
# Store for use in model evaluation in the future.
model_dict['model_string'] = model_string
# Store for model fitting in the future.
model_dict['formula'] = f'{response} ~ {model_string}'
self.models.append(model_dict)
def fit_models(self):
"""
Performs least-squares fitting of the model to previously-specified data.
"""
for model in self.models:
y, X = dmatrices(model['formula'], data=self.data, return_type='dataframe')
mod = sm.OLS(y, X)
model['model_fit'] = mod.fit()
def model_predict(self,factor_values, model_index = 0, guess = None):
"""
Evaluates a fitted model at a specified set of input values.
Inputs:
factor_values: list. Contains the input values to feed into the model. Should be the same length as the number of independent variables in the model.
"""
if guess == None:
guess = np.zeros_like(factor_values)
x1 = pd.DataFrame(data=[factor_values],columns=self.factor_labels)
X = dmatrix(self.models[model_index]['model_string'], data=x1, return_type='dataframe')
return self.models[model_index]['model_fit'].predict(exog =X).values[0]
def model_optimize(self, guess = None, maximize = False, bounds = None):
"""
Find a factor values that give a local optimum of the model for the response surface. The default behavior is to find the minimum value of the function.
Inputs:
guess: list-like containing ints or floats. Optional. Specifies an initial guess for the optimizer. Should contain one element for each of the factors.
maximize: bool. Specifies that the optimizer should find the maximum value rather than the minimum.
bounds:
"""
# Provide an initial guess of the optimum coordinates if one is given.
if guess == None:
guess = np.zeros((1,len(self.factor_labels)))
# Convert the bounds for optimization parameters to design space value equivalents if bounds are given.
if bounds is not None:
bounds = self.encode_factors(bounds)
for index in range(len(self.models)):
opt = minimize(
lambda factors: (-1)**(maximize) * self.model_predict(factors, model_index = index),
guess,
bounds = bounds)
self.results_details.append(opt)
self.results.append(self.decode_factors(opt.x))
# ANALYSIS
def plot_response_surfaces(self, response_name):
_plot_response_surfaces(self.data_raw, response_name, self.factor_names)
def plot_effect_grid(self, response_name):
_plot_effect_grid(self.data_raw, response_name, self.factor_names)
def pareto_plot(self, response_name):
effects = calc_effects(self.data_raw, response_name, self.factor_names)
xlabel = 'Factors'
ylabel = f'Magnitude of Effect on {response_name}'
factor_letter_pairs = itertools.combinations(self.factor_labels, 2)
factor_letters = self.factor_labels + list(factor_letter_pairs)
_Pareto_plot(effects, factor_letters, xlabel, ylabel) | [
"matplotlib.pyplot.title",
"numpy.abs",
"statsmodels.api.OLS",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"patsy.dmatrices",
"numpy.meshgrid",
"numpy.zeros_like",
"numpy.max",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.... | [((797, 821), 'scipy.optimize.curve_fit', 'curve_fit', (['fn', '[X, Y]', 'Z'], {}), '(fn, [X, Y], Z)\n', (806, 821), False, 'from scipy.optimize import minimize, curve_fit\n'), ((1092, 1131), 'numpy.meshgrid', 'np.meshgrid', (['model_x_data', 'model_y_data'], {}), '(model_x_data, model_y_data)\n', (1103, 1131), True, 'import numpy as np\n'), ((2676, 2716), 'itertools.combinations', 'itertools.combinations', (['factor_labels', '(2)'], {}), '(factor_labels, 2)\n', (2698, 2716), False, 'import itertools\n'), ((3316, 3436), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', 'n'], {'figsize': '(fig_scale * n, fig_scale * n)', 'constrained_layout': '(True)', 'subplot_kw': "{'projection': '3d'}"}), "(n, n, figsize=(fig_scale * n, fig_scale * n),\n constrained_layout=True, subplot_kw={'projection': '3d'})\n", (3328, 3436), True, 'from matplotlib import pyplot as plt\n'), ((3436, 3500), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["(response_label + ' Response Surfaces')"], {'fontsize': '(22)'}), "(response_label + ' Response Surfaces', fontsize=22)\n", (3448, 3500), True, 'from matplotlib import pyplot as plt\n'), ((3755, 3764), 'numpy.log', 'np.log', (['C'], {}), '(C)\n', (3761, 3764), True, 'import numpy as np\n'), ((5302, 5312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5310, 5312), True, 'from matplotlib import pyplot as plt\n'), ((5493, 5551), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', 'n'], {'figsize': '(fig_scale * n, fig_scale * n)'}), '(n, n, figsize=(fig_scale * n, fig_scale * n))\n', (5505, 5551), True, 'from matplotlib import pyplot as plt\n'), ((9582, 9592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9590, 9592), True, 'from matplotlib import pyplot as plt\n'), ((10421, 10435), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10433, 10435), True, 'from matplotlib import pyplot as plt\n'), ((10596, 10650), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'legend_labels'], {'loc': '"""center right"""'}), "(handles, legend_labels, loc='center right')\n", (10606, 10650), True, 'from matplotlib import pyplot as plt\n'), ((11384, 11402), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11400, 11402), True, 'from matplotlib import pyplot as plt\n'), ((11407, 11417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11415, 11417), True, 'from matplotlib import pyplot as plt\n'), ((1182, 1206), 'numpy.array', 'np.array', (['[X_fit, Y_fit]'], {}), '([X_fit, Y_fit])\n', (1190, 1206), True, 'import numpy as np\n'), ((2203, 2233), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (2212, 2233), True, 'import numpy as np\n'), ((10505, 10551), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', '(1)', '(1)'], {'color': '"""royalblue"""'}), "((0, 0), 1, 1, color='royalblue')\n", (10518, 10551), True, 'from matplotlib import pyplot as plt\n'), ((10551, 10592), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', '(1)', '(1)'], {'color': '"""gray"""'}), "((0, 0), 1, 1, color='gray')\n", (10564, 10592), True, 'from matplotlib import pyplot as plt\n'), ((10934, 10952), 'matplotlib.ticker.PercentFormatter', 'PercentFormatter', ([], {}), '()\n', (10950, 10952), False, 'from matplotlib.ticker import PercentFormatter\n'), ((11362, 11378), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (11371, 11378), True, 'from matplotlib import pyplot as plt\n'), ((11922, 11936), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11934, 11936), True, 'import pandas as pd\n'), ((19326, 19388), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[factor_values]', 'columns': 'self.factor_labels'}), '(data=[factor_values], columns=self.factor_labels)\n', (19338, 19388), True, 'import pandas as pd\n'), ((19400, 19488), 'patsy.dmatrix', 'dmatrix', (["self.models[model_index]['model_string']"], {'data': 'x1', 'return_type': '"""dataframe"""'}), "(self.models[model_index]['model_string'], data=x1, return_type=\n 'dataframe')\n", (19407, 19488), False, 'from patsy import dmatrix\n'), ((21381, 21426), 'itertools.combinations', 'itertools.combinations', (['self.factor_labels', '(2)'], {}), '(self.factor_labels, 2)\n', (21403, 21426), False, 'import itertools\n'), ((9987, 10002), 'numpy.abs', 'np.abs', (['effects'], {}), '(effects)\n', (9993, 10002), True, 'import numpy as np\n'), ((12487, 12502), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (12495, 12502), True, 'import numpy as np\n'), ((13131, 13152), 'numpy.array', 'np.array', (['value_coded'], {}), '(value_coded)\n', (13139, 13152), True, 'import numpy as np\n'), ((18736, 18804), 'patsy.dmatrices', 'dmatrices', (["model['formula']"], {'data': 'self.data', 'return_type': '"""dataframe"""'}), "(model['formula'], data=self.data, return_type='dataframe')\n", (18745, 18804), False, 'from patsy import dmatrices\n'), ((18823, 18835), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (18829, 18835), True, 'import statsmodels.api as sm\n'), ((19284, 19312), 'numpy.zeros_like', 'np.zeros_like', (['factor_values'], {}), '(factor_values)\n', (19297, 19312), True, 'import numpy as np\n'), ((6020, 6043), 'scipy.optimize.curve_fit', 'curve_fit', (['linear', 'x', 'y'], {}), '(linear, x, y)\n', (6029, 6043), False, 'from scipy.optimize import minimize, curve_fit\n'), ((14281, 14322), 'numpy.min', 'np.min', (['self.data_raw[factor_name].values'], {}), '(self.data_raw[factor_name].values)\n', (14287, 14322), True, 'import numpy as np\n'), ((14352, 14393), 'numpy.max', 'np.max', (['self.data_raw[factor_name].values'], {}), '(self.data_raw[factor_name].values)\n', (14358, 14393), True, 'import numpy as np\n')] |
import random
import time
import unittest
import numpy as np
def add_scalar(writer, mode, tag, num_steps, skip):
with writer.mode(mode) as my_writer:
scalar = my_writer.scalar(tag)
for i in range(num_steps):
if i % skip == 0:
scalar.add_record(i, random.random())
def add_image(writer,
mode,
tag,
num_samples,
num_passes,
step_cycle,
shape=[50, 50, 3]):
with writer.mode(mode) as writer_:
image_writer = writer_.image(tag, num_samples, step_cycle)
for pass_ in xrange(num_passes):
image_writer.start_sampling()
for ins in xrange(2 * num_samples):
data = np.random.random(shape) * 256
data = np.ndarray.flatten(data)
image_writer.add_sample(shape, list(data))
image_writer.finish_sampling()
def add_histogram(writer, mode, tag, num_buckets):
with writer.mode(mode) as writer:
histogram = writer.histogram(tag, num_buckets)
for i in range(10):
histogram.add_record(i, np.random.normal(0.1 + i * 0.01, size=1000))
| [
"random.random",
"numpy.random.random",
"numpy.random.normal",
"numpy.ndarray.flatten"
] | [((805, 829), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['data'], {}), '(data)\n', (823, 829), True, 'import numpy as np\n'), ((1142, 1185), 'numpy.random.normal', 'np.random.normal', (['(0.1 + i * 0.01)'], {'size': '(1000)'}), '(0.1 + i * 0.01, size=1000)\n', (1158, 1185), True, 'import numpy as np\n'), ((298, 313), 'random.random', 'random.random', ([], {}), '()\n', (311, 313), False, 'import random\n'), ((752, 775), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (768, 775), True, 'import numpy as np\n')] |
# Lint as: python3
"""Tests for epi_forecast_stat_mech.sparse_estimator."""
import functools
from absl.testing import absltest
from epi_forecast_stat_mech import sparse
from epi_forecast_stat_mech import sparse_estimator
from epi_forecast_stat_mech.tests import test_high_level
import numpy as np
class TestHighLevelSparseEstimator(absltest.TestCase):
"""Tests for Sparse high_level module."""
def test_SparseEstimator(self):
"""Verify we can fit and predict from SparseEstimator."""
num_samples = 11 # number of 'roll out' samples.
train_data, test_data = test_high_level.create_synthetic_dataset()
# These arguments are chosen to make the test faster.
estimator = sparse_estimator.SparseEstimator(
initializer=sparse.predefined_constant_initializer,
optimizer=functools.partial(sparse._adam_optim, max_iter=10),
penalty_factor_grid=np.exp(
np.linspace(np.log(.1), np.log(1000.), num=5))).fit(train_data)
predictions = estimator.predict(test_data, num_samples)
self.assertCountEqual(['location', 'sample', 'time'], predictions.dims)
np.testing.assert_array_equal(predictions.time, test_data.time)
np.testing.assert_array_equal(train_data.location, predictions.location)
self.assertLen(predictions.sample, num_samples)
_ = estimator.alpha.to_netcdf()
_ = estimator.intercept.to_netcdf()
_ = estimator.mech_params.to_netcdf()
_ = estimator.mech_params_hat.to_netcdf()
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"functools.partial",
"numpy.log",
"numpy.testing.assert_array_equal",
"epi_forecast_stat_mech.tests.test_high_level.create_synthetic_dataset"
] | [((1502, 1517), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (1515, 1517), False, 'from absl.testing import absltest\n'), ((581, 623), 'epi_forecast_stat_mech.tests.test_high_level.create_synthetic_dataset', 'test_high_level.create_synthetic_dataset', ([], {}), '()\n', (621, 623), False, 'from epi_forecast_stat_mech.tests import test_high_level\n'), ((1114, 1177), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predictions.time', 'test_data.time'], {}), '(predictions.time, test_data.time)\n', (1143, 1177), True, 'import numpy as np\n'), ((1182, 1254), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['train_data.location', 'predictions.location'], {}), '(train_data.location, predictions.location)\n', (1211, 1254), True, 'import numpy as np\n'), ((810, 860), 'functools.partial', 'functools.partial', (['sparse._adam_optim'], {'max_iter': '(10)'}), '(sparse._adam_optim, max_iter=10)\n', (827, 860), False, 'import functools\n'), ((922, 933), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (928, 933), True, 'import numpy as np\n'), ((934, 948), 'numpy.log', 'np.log', (['(1000.0)'], {}), '(1000.0)\n', (940, 948), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import xlsxwriter
import numpy as np
import argparse
import gemmi
import yaml
import glob
import os
import mdtraj as md
from collections import OrderedDict
from rdkit import Chem
from . import analysis_engine
_KJ_2_KCAL = 1./4.184
_NM_2_ANG = 10.
_RAD_2_DEG = 180./np.pi
_GASCONST_KCAL = 8.31446261815324 * _KJ_2_KCAL / 1000.
def parse_arguments():
parser = argparse.ArgumentParser(
description="Python script for merging simulation data for xtal MD project."
)
parser.add_argument('--input', "-i", type=str, help="Input yaml file.", required=True)
parser.add_argument('--output', "-o", type=str, help="Output xlsx MS Excel file", required=True)
return parser.parse_args()
def read_csv(csv_path):
"""
Read csv file. Save each column as key:value pair in dict.
"""
data_dict = OrderedDict()
header_dict = OrderedDict()
with open(csv_path, "r") as fopen:
read_first_line = True
for line in fopen:
if read_first_line:
if line[0] == "#":
line = line[1:]
line = line.lstrip().rstrip().split(",")
for column, key in enumerate(line):
data_dict[key] = list()
header_dict[column] = key
read_first_line = False
continue
else:
line = line.lstrip().rstrip().split(",")
for column in header_dict:
key = header_dict[column]
value = line[column]
data_dict[key].append(float(value))
if len(data_dict) > 0:
for key in data_dict:
data_dict[key] = np.array(data_dict[key])
data_dict["N_rows"] = data_dict[key].size
data_dict["N_columns"] = len(data_dict[key])
else:
data_dict["N_rows"] = 0
data_dict["N_columns"] = 0
return data_dict
class WorkbookWrapper(object):
def __init__(self, output):
"""
Constructor for the whole thing. `output` is the xlsx output path on disk.
"""
self.workbook = xlsxwriter.Workbook(output)
### Define some formats for later use
### =================================
self.header_format_1 = self.workbook.add_format(
{
'bold' : 1,
'border' : 2,
'align' : 'center',
'valign' : 'vcenter',
'bottom' : True,
'top' : True,
'left' : False,
'right' : False,
}
)
self.header_format_2 = self.workbook.add_format(
{
'bold' : 1,
'border' : 1,
'align' : 'center',
'valign' : 'vcenter',
'bottom' : True,
'top' : True,
'left' : False,
'right' : False,
}
)
self.data_format_1 = self.workbook.add_format(
{
'align' : 'center',
'valign' : 'vcenter',
'num_format': '#,##0.00',
}
)
self.data_format_2 = self.workbook.add_format(
{
'align' : 'center',
'valign' : 'vcenter',
'italic' : 1,
}
)
self.worksheet_dict = OrderedDict()
self.force_field_dict = OrderedDict()
self.labels_dict_row = OrderedDict()
self.labels_dict_row["Sublimation Energy"] = 3
self.labels_dict_row["a"] = 4
self.labels_dict_row["b"] = 5
self.labels_dict_row["c"] = 6
self.labels_dict_row["α"] = 7
self.labels_dict_row["β"] = 8
self.labels_dict_row["γ"] = 9
self.labels_dict_row["alpha"] = 7
self.labels_dict_row["beta"] = 8
self.labels_dict_row["gamma"] = 9
self.labels_dict_row["<[Δ(d < 4Å)]>"] = 10
self.labels_dict_row["Max <[Δ(d < 4Å)]>"] = 11
self.labels_dict_row["H-bond geometry"] = 12
self.labels_dict_row["<[d(X-H•••O)]>"] = 13
self.labels_dict_row["<[d(X-H•••O=C)]>"] = 14
self.labels_dict_row["<[d(X-H•••N)]>"] = 15
self.labels_dict_row["<[d(X-H•••N=C)]>"] = 16
self.labels_dict_row["Max <[d(X-H•••O)]>"] = 17
self.labels_dict_row["Max <[d(X-H•••O=C)]>"] = 18
self.labels_dict_row["Max <[d(X-H•••N)]>"] = 19
self.labels_dict_row["Max <[d(X-H•••N=C)]>"] = 20
self.labels_dict_row["<[∠(X-H•••O)]>"] = 21
self.labels_dict_row["<[∠(X-H•••O=C)]>"] = 22
self.labels_dict_row["<[∠(X-H•••N)]>"] = 23
self.labels_dict_row["<[∠(X-H•••N=C)]>"] = 24
self.labels_dict_row["Max <[∠(X-H•••O)]>"] = 25
self.labels_dict_row["Max <[∠(X-H•••O=C)]>"] = 26
self.labels_dict_row["Max <[∠(X-H•••N)]>"] = 27
self.labels_dict_row["Max <[∠(X-H•••N=C)]>"] = 28
self.labels_dict_row["Translation/Rotation"] = 29
self.labels_dict_row["<[Δ(dCOM)]>"] = 30
self.labels_dict_row["Max <[Δ(dCOM)]>"] = 31
self.labels_dict_row["<{∠PA1-PA1(expt)}>"] = 32
self.labels_dict_row["<{∠PA2-PA2(expt)}>"] = 33
self.labels_dict_row["<{∠PA3-PA3(expt)}>"] = 34
self.labels_dict_row["Max <{∠PA1-PA1(expt)}>"] = 35
self.labels_dict_row["Max <{∠PA2-PA2(expt)}>"] = 36
self.labels_dict_row["Max <{∠PA3-PA3(expt)}>"] = 37
self.labels_dict_row["Density"] = 38
self.sub_titles_row = [
"H-bond geometry",
"Translation/Rotation",
]
def add_xtal(self, crystal_name):
"""
Add a new crystal with name `crystal_name` to the workbook. This will come as a new
worksheet in the xlsx file.
"""
worksheet = self.workbook.add_worksheet(crystal_name)
worksheet.set_column(
0,
0,
20.0
)
worksheet.set_row(
0,
30.0
)
### Make header
### ===========
worksheet.write(
0,
0,
"Crystal/Property",
self.header_format_1
)
### Write row labels
### ================
worksheet.write(
1,
0,
crystal_name,
self.header_format_1
)
worksheet.write(
2,
0,
"",
self.header_format_2
)
for label, row_idx in self.labels_dict_row.items():
if label in self.sub_titles_row:
worksheet.write(
row_idx,
0,
label,
self.data_format_2
)
else:
worksheet.write(
row_idx,
0,
label,
self.data_format_1
)
self.worksheet_dict[crystal_name] = worksheet
self.force_field_dict[crystal_name] = list()
def add_forcefield(self, forcefield_name, crystal_name):
"""
Add a force field with name `forcefield_name` to worksheet
of xtal with name `crystal_name`.
"""
self.force_field_dict[crystal_name].append(forcefield_name)
N_force_fields = len(self.force_field_dict[crystal_name])
force_field_idx = N_force_fields - 1
expt_col = 1 + N_force_fields * 2
worksheet = self.worksheet_dict[crystal_name]
worksheet.merge_range(
first_row=1,
first_col=1 + force_field_idx * 2,
last_row=1,
last_col=2 + force_field_idx * 2,
data=forcefield_name,
cell_format=self.header_format_1
)
worksheet.write(
2,
1 + force_field_idx * 2,
"< >",
self.header_format_2
)
worksheet.write(
2,
2 + force_field_idx * 2,
"% Dev",
self.header_format_2
)
worksheet.write(
2,
expt_col,
"< >",
self.header_format_2
)
worksheet.write(
2,
1 + expt_col,
"% Dev",
self.header_format_2
)
worksheet.merge_range(
first_row=0,
first_col=1,
last_row=0,
last_col=2 * N_force_fields,
data="Force Field",
cell_format=self.header_format_1
)
worksheet.merge_range(
first_row=1,
first_col=expt_col,
last_row=1,
last_col=1 + expt_col,
data="Expt.",
cell_format=self.header_format_1
)
def add_data(
self,
data_value,
data_std,
data_name,
forcefield_name,
crystal_name):
"""
Add data of value `data_value` and name `data_name` to worksheet
of crystal `crystal_name` in column of force field `forcefield_name`.
"""
if forcefield_name == "experiment":
N_force_fields = len(self.force_field_dict[crystal_name])
force_field_idx = N_force_fields
else:
force_field_idx = self.force_field_dict[crystal_name].index(forcefield_name)
worksheet = self.worksheet_dict[crystal_name]
if isinstance(data_value, str):
worksheet.write(
self.labels_dict_row[data_name],
1 + force_field_idx * 2,
data_value,
self.data_format_1
)
elif isinstance(data_value, type(None)):
worksheet.write(
self.labels_dict_row[data_name],
1 + force_field_idx * 2,
"--",
self.data_format_1
)
elif np.isnan(data_value) or np.isinf(data_value):
worksheet.write(
self.labels_dict_row[data_name],
1 + force_field_idx * 2,
"--",
self.data_format_1
)
else:
worksheet.write(
self.labels_dict_row[data_name],
1 + force_field_idx * 2,
data_value,
self.data_format_1
)
if isinstance(data_std, str):
worksheet.write(
self.labels_dict_row[data_name],
2 + force_field_idx * 2,
data_std,
self.data_format_1
)
elif isinstance(data_std, type(None)):
worksheet.write(
self.labels_dict_row[data_name],
2 + force_field_idx * 2,
"--",
self.data_format_1
)
elif np.isnan(data_std) or np.isinf(data_std):
worksheet.write(
self.labels_dict_row[data_name],
2 + force_field_idx * 2,
"--",
self.data_format_1
)
else:
worksheet.write(
self.labels_dict_row[data_name],
2 + force_field_idx * 2,
data_std,
self.data_format_1
)
def close(self):
"""
Close the workbook.
"""
self.workbook.close()
def main():
"""
Main routine to run the analysis workflow.
"""
args = parse_arguments()
with open(args.input, "r") as fopen:
input_dict = yaml.safe_load(fopen)
workbook_wrap = WorkbookWrapper(args.output)
### Loop over each xtal and put in new workbook
### ===========================================
for crystal_name in input_dict:
workbook_wrap.add_xtal(crystal_name)
ref_strc = md.load(input_dict[crystal_name]["experiment"]["supercell-pdb"])
with open(input_dict[crystal_name]["experiment"]["supercell-rdkit"], "r") as fopen:
rdmol = Chem.JSONToMols(fopen.read())[0]
ucinfo = read_csv(input_dict[crystal_name]["experiment"]["supercell-ucinfo"])
a_idxs = [ucinfo["unitcell_in_supercell_a"][mol_idx] for mol_idx in range(ucinfo["N_rows"])]
b_idxs = [ucinfo["unitcell_in_supercell_b"][mol_idx] for mol_idx in range(ucinfo["N_rows"])]
c_idxs = [ucinfo["unitcell_in_supercell_c"][mol_idx] for mol_idx in range(ucinfo["N_rows"])]
N_unitcells_a = np.max(a_idxs) - np.min(a_idxs) + 1
N_unitcells_b = np.max(b_idxs) - np.min(b_idxs) + 1
N_unitcells_c = np.max(c_idxs) - np.min(c_idxs) + 1
residue_classes_list = np.array(ucinfo["mol_in_unitcell"], dtype=int)
### N_rows is number of molecules in supercell info csv
N_molecules = ucinfo["N_rows"]
dist_pair_list = analysis_engine.build_pair_list(
traj = ref_strc,
rdmol = rdmol,
distance_cutoff = 0.4,
bond_cutoff = 4,
exclude_hydrogen=True,
)
dist_pair_rank_list = analysis_engine.build_pair_ranks(
topology = ref_strc.topology,
pair_list = dist_pair_list,
residue_classes_list = residue_classes_list
)
ref_distances = analysis_engine.compute_pairwise_distances(ref_strc, dist_pair_list)
acc_O_single_list,\
acc_O_double_list,\
acc_N_single_list,\
acc_N_double_list = analysis_engine.get_hbond_indices(
ref_strc,
rdmol
)
if acc_O_single_list.size > 0:
ref_hbond_O_single_diffs = analysis_engine.compute_pairwise_distances(
ref_strc,
acc_O_single_list[:,[0,2]]
)
ref_hbond_O_single_diffs *= _NM_2_ANG
ref_hbond_O_single_angles = analysis_engine.compute_tuplewise_angles(
ref_strc,
acc_O_single_list
)
ref_hbond_O_single_angles *= _RAD_2_DEG
acc_O_single_pair_rank_list = analysis_engine.build_pair_ranks(
topology = ref_strc.topology,
pair_list = acc_O_single_list[:,[0,2]],
residue_classes_list = residue_classes_list
)
if acc_O_double_list.size > 0:
ref_hbond_O_double_diffs = analysis_engine.compute_pairwise_distances(
ref_strc,
acc_O_double_list[:,[0,2]]
)
ref_hbond_O_double_diffs *= _NM_2_ANG
ref_hbond_O_double_angles = analysis_engine.compute_tuplewise_angles(
ref_strc,
acc_O_double_list
)
ref_hbond_O_double_angles *= _RAD_2_DEG
acc_O_double_pair_rank_list = analysis_engine.build_pair_ranks(
topology = ref_strc.topology,
pair_list = acc_O_double_list[:,[0,2]],
residue_classes_list = residue_classes_list
)
if acc_N_single_list.size > 0:
ref_hbond_N_single_diffs = analysis_engine.compute_pairwise_distances(
ref_strc,
acc_N_single_list[:,[0,2]]
)
ref_hbond_N_single_diffs *= _NM_2_ANG
ref_hbond_N_single_angles = analysis_engine.compute_tuplewise_angles(
ref_strc,
acc_N_single_list
)
ref_hbond_N_single_angles *= _RAD_2_DEG
acc_N_single_pair_rank_list = analysis_engine.build_pair_ranks(
topology = ref_strc.topology,
pair_list = acc_N_single_list[:,[0,2]],
residue_classes_list = residue_classes_list
)
if acc_N_double_list.size > 0:
ref_hbond_N_double_diffs = analysis_engine.compute_pairwise_distances(
ref_strc,
acc_N_double_list[:,[0,2]]
)
ref_hbond_N_double_diffs *= _NM_2_ANG
ref_hbond_N_double_angles = analysis_engine.compute_tuplewise_angles(
ref_strc,
acc_N_double_list
)
ref_hbond_N_double_angles *= _RAD_2_DEG
acc_N_double_pair_rank_list = analysis_engine.build_pair_ranks(
topology = ref_strc.topology,
pair_list = acc_N_double_list[:,[0,2]],
residue_classes_list = residue_classes_list
)
### Carry out analysis for each forcefield
### ======================================
for forcefield_name in input_dict[crystal_name]:
if forcefield_name.lower() == "experiment":
continue
print(f"Processing {crystal_name} / {forcefield_name}")
workbook_wrap.add_forcefield(forcefield_name, crystal_name)
xtal_topology = input_dict[crystal_name][forcefield_name]["xtal-topology"]
xtal_trajectory = input_dict[crystal_name][forcefield_name]["xtal-trajectory"]
xtal_output = input_dict[crystal_name][forcefield_name]["xtal-output"]
gas_output = input_dict[crystal_name][forcefield_name]["gas-output"]
a_len = list()
b_len = list()
c_len = list()
alpha = list()
beta = list()
gamma = list()
ene_xtal = list()
ene_gas = list()
density_xtal = list()
distance_diffs = list()
com_diffs = list()
pc1_diffs = list()
pc2_diffs = list()
pc3_diffs = list()
hbond_O_single_diffs = list()
hbond_O_double_diffs = list()
hbond_N_single_diffs = list()
hbond_N_double_diffs = list()
hbond_O_single_angles = list()
hbond_O_double_angles = list()
hbond_N_single_angles = list()
hbond_N_double_angles = list()
bad_xtal_list = list()
bad_gas_list = list()
for output_csv in glob.glob(xtal_output):
data = read_csv(output_csv)
if data["N_rows"] == 0 or data["N_columns"] == 0:
basename, _ = os.path.splitext(output_csv)
bad_xtal_list.append(basename)
continue
_potential = data["Potential"] # Potential energy in kJ/mol
if np.isnan(_potential).any():
basename, _ = os.path.splitext(output_csv)
bad_xtal_list.append(basename)
continue
ene_xtal.extend(_potential.tolist())
for output_csv in glob.glob(gas_output):
data = read_csv(output_csv)
if data["N_rows"] == 0 or data["N_columns"] == 0:
basename, _ = os.path.splitext(output_csv)
bad_gas_list.append(basename)
continue
_potential = data["Potential"] # Potential energy in kJ/mol
if np.isnan(_potential).any():
basename, _ = os.path.splitext(output_csv)
bad_gas_list.append(basename)
continue
ene_gas.extend(_potential)
for output_traj in glob.glob(xtal_trajectory):
basename, _ = os.path.splitext(output_traj)
if basename in bad_xtal_list:
continue
query_traj = md.load(
output_traj,
top=ref_strc.topology
)
_unitcell_angles = query_traj.unitcell_angles
alpha.extend(_unitcell_angles[:,0].tolist())
beta.extend(_unitcell_angles[:,1].tolist())
gamma.extend(_unitcell_angles[:,2].tolist())
### Multiply by 10 to get Ang
_unitcell_lengths = query_traj.unitcell_lengths * 10.
### Correct for number of unitcells along each direction
_unitcell_lengths[:,0] /= N_unitcells_a
_unitcell_lengths[:,1] /= N_unitcells_b
_unitcell_lengths[:,2] /= N_unitcells_c
a_len.extend(_unitcell_lengths[:,0].tolist())
b_len.extend(_unitcell_lengths[:,1].tolist())
c_len.extend(_unitcell_lengths[:,2].tolist())
### Devide by 1000 to get g/cm^3
_density = md.density(query_traj) / 1000.
density_xtal.extend(_density)
_com_diffs = analysis_engine.compute_com_diff_per_residue(
query_traj,
ref_strc,
rdmol,
residue_classes_list,
)
_pc_diffs = analysis_engine.compute_pc_diff_per_residue(
query_traj,
ref_strc,
rdmol
)
_distance_diffs = ref_distances - analysis_engine.compute_pairwise_distances(
query_traj,
dist_pair_list
)
_distance_diffs = np.abs(_distance_diffs) * _NM_2_ANG
_com_diffs = np.abs(_com_diffs) * _NM_2_ANG
if acc_O_single_list.size > 0:
_hbond_O_single_diffs = analysis_engine.compute_pairwise_distances(
query_traj,
acc_O_single_list[:,[0,2]]
)
_hbond_O_single_diffs *= _NM_2_ANG
_hbond_O_single_diffs = ref_hbond_O_single_diffs - _hbond_O_single_diffs
_hbond_O_single_angles = analysis_engine.compute_tuplewise_angles(
query_traj,
acc_O_single_list,
)
_hbond_O_single_angles *= _RAD_2_DEG
_hbond_O_single_angles = ref_hbond_O_single_angles - _hbond_O_single_angles
if acc_O_double_list.size > 0:
_hbond_O_double_diffs = analysis_engine.compute_pairwise_distances(
query_traj,
acc_O_double_list[:,[0,2]]
)
_hbond_O_double_diffs *= _NM_2_ANG
_hbond_O_double_diffs = ref_hbond_O_double_diffs - _hbond_O_double_diffs
_hbond_O_double_angles = analysis_engine.compute_tuplewise_angles(
query_traj,
acc_O_double_list,
)
_hbond_O_double_angles *= _RAD_2_DEG
_hbond_O_double_angles = ref_hbond_O_double_angles - _hbond_O_double_angles
if acc_N_single_list.size > 0:
_hbond_N_single_diffs = analysis_engine.compute_pairwise_distances(
query_traj,
acc_N_single_list[:,[0,2]]
)
_hbond_N_single_diffs *= _NM_2_ANG
_hbond_N_single_diffs = ref_hbond_N_single_diffs - _hbond_N_single_diffs
_hbond_N_single_angles = analysis_engine.compute_tuplewise_angles(
query_traj,
acc_N_single_list,
)
_hbond_N_single_angles *= _RAD_2_DEG
_hbond_N_single_angles = ref_hbond_N_single_angles - _hbond_N_single_angles
if acc_N_double_list.size > 0:
_hbond_N_double_diffs = analysis_engine.compute_pairwise_distances(
query_traj,
acc_N_double_list[:,[0,2]]
)
_hbond_N_double_diffs *= _NM_2_ANG
_hbond_N_double_diffs = ref_hbond_N_double_diffs - _hbond_N_double_diffs
_hbond_N_double_angles = analysis_engine.compute_tuplewise_angles(
query_traj,
acc_N_double_list,
)
_hbond_N_double_angles *= _RAD_2_DEG
_hbond_N_double_angles = ref_hbond_N_double_angles - _hbond_N_double_angles
### This means, we only do this the first iteration
if len(com_diffs) == 0:
com_diffs = _com_diffs
distance_diffs = _distance_diffs
pc1_diffs = _pc_diffs[:,:,0]
pc2_diffs = _pc_diffs[:,:,1]
pc3_diffs = _pc_diffs[:,:,2]
if acc_O_single_list.size > 0:
hbond_O_single_diffs = _hbond_O_single_diffs
hbond_O_single_angles = _hbond_O_single_angles
if acc_O_double_list.size > 0:
hbond_O_double_diffs = _hbond_O_double_diffs
hbond_O_double_angles = _hbond_O_double_angles
if acc_N_single_list.size > 0:
hbond_N_single_diffs = _hbond_N_single_diffs
hbond_N_single_angles = _hbond_N_single_angles
if acc_N_double_list.size > 0:
hbond_N_double_diffs = _hbond_N_double_diffs
hbond_N_double_angles = _hbond_N_double_angles
else:
com_diffs = np.vstack((com_diffs, _com_diffs))
distance_diffs = np.vstack((distance_diffs, _distance_diffs))
pc1_diffs = np.vstack((pc1_diffs, _pc_diffs[:,:,0]))
pc2_diffs = np.vstack((pc2_diffs, _pc_diffs[:,:,1]))
pc3_diffs = np.vstack((pc3_diffs, _pc_diffs[:,:,2]))
if acc_O_single_list.size > 0:
hbond_O_single_diffs = np.vstack((hbond_O_single_diffs, _hbond_O_single_diffs))
hbond_O_single_angles = np.vstack((hbond_O_single_angles, _hbond_O_single_angles))
if acc_O_double_list.size > 0:
hbond_O_double_diffs = np.vstack((hbond_O_double_diffs, _hbond_O_double_diffs))
hbond_O_double_angles = np.vstack((hbond_O_double_angles, _hbond_O_double_angles))
if acc_N_single_list.size > 0:
hbond_N_single_diffs = np.vstack((hbond_N_single_diffs, _hbond_N_single_diffs))
hbond_N_single_angles = np.vstack((hbond_N_single_angles, _hbond_N_single_angles))
if acc_N_double_list.size > 0:
hbond_N_double_diffs = np.vstack((hbond_N_double_diffs, _hbond_N_double_diffs))
hbond_N_double_angles = np.vstack((hbond_N_double_angles, _hbond_N_double_angles))
### Make sure everything is np.ndarray
a_len = np.array(a_len)
b_len = np.array(b_len)
c_len = np.array(c_len)
alpha = np.array(alpha)
beta = np.array(beta)
gamma = np.array(gamma)
ene_xtal = np.array(ene_xtal)
ene_gas = np.array(ene_gas)
density_xtal = np.array(density_xtal)
distance_diffs = np.array(distance_diffs)
com_diffs = np.array(com_diffs)
pc1_diffs = np.array(pc1_diffs)
pc2_diffs = np.array(pc2_diffs)
pc3_diffs = np.array(pc3_diffs)
hbond_O_single_diffs = np.array(hbond_O_single_diffs)
hbond_O_double_diffs = np.array(hbond_O_double_diffs)
hbond_N_single_diffs = np.array(hbond_N_single_diffs)
hbond_N_double_diffs = np.array(hbond_N_double_diffs)
hbond_O_single_angles = np.array(hbond_O_single_angles)
hbond_O_double_angles = np.array(hbond_O_double_angles)
hbond_N_single_angles = np.array(hbond_N_single_angles)
hbond_N_double_angles = np.array(hbond_N_double_angles)
### If we don't have any data:
if distance_diffs.size < 2:
continue
### Write distance diff data ###
### ======================== ###
avg = np.mean(distance_diffs)
std = np.std(distance_diffs)
workbook_wrap.add_data(
avg,
std/avg*100.,
"<[Δ(d < 4Å)]>",
forcefield_name,
crystal_name
)
max_avg = 0.
max_std = 0.
for unique_rank in np.unique(dist_pair_rank_list):
valids = np.where(unique_rank == dist_pair_rank_list)[0]
_max_avg = np.mean(distance_diffs[:,valids])
if _max_avg > max_avg:
max_avg = _max_avg
max_std = np.std(distance_diffs[:,valids])
workbook_wrap.add_data(
max_avg,
max_std/max_avg*100.,
"Max <[Δ(d < 4Å)]>",
forcefield_name,
crystal_name
)
### Write com diff data ###
### =================== ###
avg = np.mean(com_diffs)
std = np.std(com_diffs)
workbook_wrap.add_data(
avg,
std/avg*100.,
"<[Δ(dCOM)]>",
forcefield_name,
crystal_name
)
res_avg = np.mean(com_diffs, axis=0)
max_idx = np.argmax(res_avg)
max_avg = res_avg[max_idx]
max_std = np.std(com_diffs[:,max_idx])
workbook_wrap.add_data(
max_avg,
max_std/max_avg*100.,
"Max <[Δ(dCOM)]>",
forcefield_name,
crystal_name
)
### Write pc diff data ###
### ================== ###
avg = np.mean(pc1_diffs)
std = np.std(pc1_diffs)
workbook_wrap.add_data(
avg,
std/avg*100.,
"<{∠PA1-PA1(expt)}>",
forcefield_name,
crystal_name
)
res_avg = np.mean(pc1_diffs, axis=0)
max_idx = np.argmax(res_avg)
max_avg = res_avg[max_idx]
max_std = np.std(pc1_diffs[:,max_idx])
workbook_wrap.add_data(
max_avg,
max_std/max_avg*100.,
"Max <{∠PA1-PA1(expt)}>",
forcefield_name,
crystal_name
)
avg = np.mean(pc2_diffs)
std = np.std(pc2_diffs)
workbook_wrap.add_data(
avg,
std/avg*100.,
"<{∠PA2-PA2(expt)}>",
forcefield_name,
crystal_name
)
res_avg = np.mean(pc2_diffs, axis=0)
max_idx = np.argmax(res_avg)
max_avg = res_avg[max_idx]
max_std = np.std(pc2_diffs[:,max_idx])
workbook_wrap.add_data(
max_avg,
max_std/max_avg*100.,
"Max <{∠PA2-PA2(expt)}>",
forcefield_name,
crystal_name
)
avg = np.mean(pc3_diffs)
std = np.std(pc3_diffs)
workbook_wrap.add_data(
avg,
std/avg*100.,
"<{∠PA3-PA3(expt)}>",
forcefield_name,
crystal_name
)
res_avg = np.mean(pc3_diffs, axis=0)
max_idx = np.argmax(res_avg)
max_avg = res_avg[max_idx]
max_std = np.std(pc3_diffs[:,max_idx])
workbook_wrap.add_data(
max_avg,
max_std/max_avg*100.,
"Max <{∠PA3-PA3(expt)}>",
forcefield_name,
crystal_name
)
### Write hbond data ###
### ================ ###
### X-H•••O
### -------
if len(hbond_O_single_diffs) > 0:
avg = np.mean(hbond_O_single_diffs)
std = np.std(hbond_O_single_diffs)
std = std/avg*100.
max_avg = 0.
max_std = 0.
for unique_rank in np.unique(acc_O_single_pair_rank_list):
valids = np.where(unique_rank == acc_O_single_pair_rank_list)[0]
_max_avg = np.mean(hbond_O_single_diffs[:,valids])
if _max_avg > max_avg:
max_avg = _max_avg
max_std = np.std(hbond_O_single_diffs[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[d(X-H•••O)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[d(X-H•••O)]>",
forcefield_name,
crystal_name
)
if len(hbond_O_single_angles) > 0:
avg = np.mean(hbond_O_single_angles)
std = np.std(hbond_O_single_angles)
std = std/avg*100.
max_avg = -np.inf
max_std = 0.
min_avg = np.inf
min_std = 0.
for unique_rank in np.unique(acc_O_single_pair_rank_list):
valids = np.where(unique_rank == acc_O_single_pair_rank_list)[0]
rank_avg = np.mean(hbond_O_single_angles[:,valids])
if rank_avg > max_avg:
max_avg = rank_avg
max_std = np.std(hbond_O_single_angles[:,valids])
if rank_avg < min_avg:
min_avg = rank_avg
min_std = np.std(hbond_O_single_angles[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[∠(X-H•••O)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[∠(X-H•••O)]>",
forcefield_name,
crystal_name
)
### X-H•••O=C
### ---------
if len(hbond_O_double_diffs) > 0:
avg = np.mean(hbond_O_double_diffs)
std = np.std(hbond_O_double_diffs)
std = std/avg*100.
max_avg = 0.
max_std = 0.
for unique_rank in np.unique(acc_O_double_pair_rank_list):
valids = np.where(unique_rank == acc_O_double_pair_rank_list)[0]
_max_avg = np.mean(hbond_O_double_diffs[:,valids])
if _max_avg > max_avg:
max_avg = _max_avg
max_std = np.std(hbond_O_double_diffs[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[d(X-H•••O=C)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[d(X-H•••O=C)]>",
forcefield_name,
crystal_name
)
if len(hbond_O_double_angles) > 0:
avg = np.mean(hbond_O_double_angles)
std = np.std(hbond_O_double_angles)
std = std/avg*100.
max_avg = -np.inf
max_std = 0.
min_avg = np.inf
min_std = 0.
for unique_rank in np.unique(acc_O_double_pair_rank_list):
valids = np.where(unique_rank == acc_O_double_pair_rank_list)[0]
rank_avg = np.mean(hbond_O_double_angles[:,valids])
if rank_avg > max_avg:
max_avg = rank_avg
max_std = np.std(hbond_O_double_angles[:,valids])
if rank_avg < min_avg:
min_avg = rank_avg
min_std = np.std(hbond_O_double_angles[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[∠(X-H•••O=C)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[∠(X-H•••O=C)]>",
forcefield_name,
crystal_name
)
### X-H•••N
### -------
if len(hbond_N_single_diffs) > 0:
avg = np.mean(hbond_N_single_diffs)
std = np.std(hbond_N_single_diffs)
std = std/avg*100.
max_avg = 0.
max_std = 0.
for unique_rank in np.unique(acc_N_single_pair_rank_list):
valids = np.where(unique_rank == acc_N_single_pair_rank_list)[0]
_max_avg = np.mean(hbond_N_single_diffs[:,valids])
if _max_avg > max_avg:
max_avg = _max_avg
max_std = np.std(hbond_N_single_diffs[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[d(X-H•••N)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[d(X-H•••N)]>",
forcefield_name,
crystal_name
)
if len(hbond_N_single_angles) > 0:
avg = np.mean(hbond_N_single_angles)
std = np.std(hbond_N_single_angles)
std = std/avg*100.
max_avg = -np.inf
max_std = 0.
min_avg = np.inf
min_std = 0.
for unique_rank in np.unique(acc_N_single_pair_rank_list):
valids = np.where(unique_rank == acc_N_single_pair_rank_list)[0]
rank_avg = np.mean(hbond_N_single_angles[:,valids])
if rank_avg > max_avg:
max_avg = rank_avg
max_std = np.std(hbond_N_single_angles[:,valids])
if rank_avg < min_avg:
min_avg = rank_avg
min_std = np.std(hbond_N_single_angles[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[∠(X-H•••N)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[∠(X-H•••N)]>",
forcefield_name,
crystal_name
)
### X-H•••N=C
### ---------
if len(hbond_N_double_diffs) > 0:
avg = np.mean(hbond_N_double_diffs)
std = np.std(hbond_N_double_diffs)
std = std/avg*100.
max_avg = 0.
max_std = 0.
for unique_rank in np.unique(acc_N_double_pair_rank_list):
valids = np.where(unique_rank == acc_N_double_pair_rank_list)[0]
_max_avg = np.mean(hbond_N_double_diffs[:,valids])
if _max_avg > max_avg:
max_avg = _max_avg
max_std = np.std(hbond_N_double_diffs[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[d(X-H•••N=C)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[d(X-H•••N=C)]>",
forcefield_name,
crystal_name
)
if len(hbond_N_double_angles) > 0:
avg = np.mean(hbond_N_double_angles)
std = np.std(hbond_N_double_angles)
std = std/avg*100.
max_avg = -np.inf
max_std = 0.
min_avg = np.inf
min_std = 0.
for unique_rank in np.unique(acc_N_double_pair_rank_list):
valids = np.where(unique_rank == acc_N_double_pair_rank_list)[0]
rank_avg = np.mean(hbond_N_double_angles[:,valids])
if rank_avg > max_avg:
max_avg = rank_avg
max_std = np.std(hbond_N_double_angles[:,valids])
if rank_avg < min_avg:
min_avg = rank_avg
min_std = np.std(hbond_N_double_angles[:,valids])
else:
avg = "--"
std = "--"
max_avg = "--"
max_std = "--"
workbook_wrap.add_data(
avg,
std,
"<[∠(X-H•••N=C)]>",
forcefield_name,
crystal_name
)
workbook_wrap.add_data(
max_avg,
max_std,
"Max <[∠(X-H•••N=C)]>",
forcefield_name,
crystal_name
)
### Write box vector length ###
### ======================= ###
avg = np.mean(a_len)
std = np.std(a_len)
workbook_wrap.add_data(
avg,
std/avg*100.,
"a",
forcefield_name,
crystal_name
)
avg = np.mean(b_len)
std = np.std(b_len)
workbook_wrap.add_data(
avg,
std/avg*100.,
"b",
forcefield_name,
crystal_name
)
avg = np.mean(c_len)
std = np.std(c_len)
workbook_wrap.add_data(
avg,
std/avg*100.,
"c",
forcefield_name,
crystal_name
)
### Write box vector angles ###
### ======================= ###
avg = np.mean(alpha)
std = np.std(alpha)
workbook_wrap.add_data(
avg,
std/avg*100.,
"alpha",
forcefield_name,
crystal_name
)
avg = np.mean(beta)
std = np.std(beta)
workbook_wrap.add_data(
avg,
std/avg*100.,
"beta",
forcefield_name,
crystal_name
)
avg = np.mean(gamma)
std = np.std(gamma)
workbook_wrap.add_data(
avg,
std/avg*100.,
"gamma",
forcefield_name,
crystal_name
)
### Write sublimation enthalpy ###
### ========================== ###
ene_xtal = np.array(ene_xtal) * _KJ_2_KCAL
ene_gas = np.array(ene_gas) * _KJ_2_KCAL
if ene_xtal.size > 1 and ene_gas.size > 1:
ene_xtal /= float(N_molecules)
sublimation_avg = np.mean(ene_gas) - np.mean(ene_xtal)/float(N_molecules)
sublimation_avg += (_GASCONST_KCAL * input_dict[crystal_name]["experiment"]["temperature"])
sublimation_std = np.var(ene_xtal) + np.var(ene_gas)
sublimation_std = np.sqrt(sublimation_std)
workbook_wrap.add_data(
sublimation_avg,
sublimation_std/sublimation_avg * 100.,
"Sublimation Energy",
forcefield_name,
crystal_name
)
else:
workbook_wrap.add_data(
"--",
"--",
"Sublimation Energy",
forcefield_name,
crystal_name
)
### Write density ###
### ============= ###
avg = np.mean(density_xtal)
std = np.std(density_xtal)
workbook_wrap.add_data(
avg,
std/avg*100.,
"Density",
forcefield_name,
crystal_name
)
### Parse in experimental data ###
### ========================== ###
doc = gemmi.cif.read(input_dict[crystal_name]["experiment"]["experiment-cif"])[0]
strc = gemmi.make_small_structure_from_block(doc)
density = doc.find_value("_exptl_crystal_density_diffrn")
if density != None:
try:
density = float(density)
except:
density = "--"
elif "density" in input_dict[crystal_name]["experiment"]:
density = input_dict[crystal_name]["experiment"]["density"]
else:
### Then we don't have density
density = "--"
workbook_wrap.add_data(
strc.cell.a,
"--",
"a",
"experiment",
crystal_name
)
workbook_wrap.add_data(
strc.cell.b,
"--",
"b",
"experiment",
crystal_name
)
workbook_wrap.add_data(
strc.cell.c,
"--",
"c",
"experiment",
crystal_name
)
workbook_wrap.add_data(
strc.cell.alpha,
"--",
"alpha",
"experiment",
crystal_name
)
workbook_wrap.add_data(
strc.cell.beta,
"--",
"beta",
"experiment",
crystal_name
)
workbook_wrap.add_data(
strc.cell.gamma,
"--",
"gamma",
"experiment",
crystal_name
)
### Write hbond data ###
### ================ ###
# ### X-H•••O
# ### -------
# if acc_O_single_list.size > 0:
# avg = np.mean(ref_hbond_O_single_diffs)
# std = np.std(ref_hbond_O_single_diffs)
# std = std/avg*100.
# max_avg = 0.
# max_std = "--"
# for unique_rank in np.unique(acc_O_single_pair_rank_list):
# valids = np.where(unique_rank == acc_O_single_pair_rank_list)[0]
# _max_avg = np.mean(ref_hbond_O_single_diffs[:,valids])
# if _max_avg > max_avg:
# max_avg = _max_avg
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[d(X-H•••O)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[d(X-H•••O)]>",
# "experiment",
# crystal_name
# )
#
# if acc_O_single_list.size > 0:
# avg = np.mean(ref_hbond_O_single_angles)
# std = np.std(ref_hbond_O_single_angles)
# std = std/avg*100.
# max_avg = -np.inf
# max_std = 0.
# min_avg = np.inf
# min_std = 0.
# for unique_rank in np.unique(acc_O_single_pair_rank_list):
# valids = np.where(unique_rank == acc_O_single_pair_rank_list)[0]
# rank_avg = np.mean(ref_hbond_O_single_angles[:,valids])
# if rank_avg > max_avg:
# max_avg = rank_avg
# max_std = np.std(ref_hbond_O_single_angles[:,valids])
# if rank_avg < min_avg:
# min_avg = rank_avg
# min_std = np.std(ref_hbond_O_single_angles[:,valids])
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[∠(X-H•••O)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[∠(X-H•••O)]>",
# "experiment",
# crystal_name
# )
#
# ### X-H•••O=C
# ### ---------
# if acc_O_double_list.size > 0:
# avg = np.mean(ref_hbond_O_double_diffs)
# std = np.std(ref_hbond_O_double_diffs)
# std = std/avg*100.
# max_avg = 0.
# max_std = "--"
# for unique_rank in np.unique(acc_O_double_pair_rank_list):
# valids = np.where(unique_rank == acc_O_double_pair_rank_list)[0]
# _max_avg = np.mean(ref_hbond_O_double_diffs[:,valids])
# if _max_avg > max_avg:
# max_avg = _max_avg
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[d(X-H•••O=C)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[d(X-H•••O=C)]>",
# "experiment",
# crystal_name
# )
#
# if acc_O_double_list.size > 0:
# avg = np.mean(ref_hbond_O_double_angles)
# std = np.std(ref_hbond_O_double_angles)
# std = std/avg*100.
# max_avg = -np.inf
# max_std = 0.
# min_avg = np.inf
# min_std = 0.
# for unique_rank in np.unique(acc_O_double_pair_rank_list):
# valids = np.where(unique_rank == acc_O_double_pair_rank_list)[0]
# rank_avg = np.mean(ref_hbond_O_double_angles[:,valids])
# if rank_avg > max_avg:
# max_avg = rank_avg
# max_std = np.std(ref_hbond_O_double_angles[:,valids])
# if rank_avg < min_avg:
# min_avg = rank_avg
# min_std = np.std(ref_hbond_O_double_angles[:,valids])
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[∠(X-H•••O=C)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[∠(X-H•••O=C)]>",
# "experiment",
# crystal_name
# )
#
# ### X-H•••N
# ### -------
# if acc_N_single_list.size > 0:
# avg = np.mean(ref_hbond_N_single_diffs)
# std = np.std(ref_hbond_N_single_diffs)
# std = std/avg*100.
# max_avg = 0.
# max_std = "--"
# for unique_rank in np.unique(acc_N_single_pair_rank_list):
# valids = np.where(unique_rank == acc_N_single_pair_rank_list)[0]
# _max_avg = np.mean(ref_hbond_N_single_diffs[:,valids])
# if _max_avg > max_avg:
# max_avg = _max_avg
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[d(X-H•••N)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[d(X-H•••N)]>",
# "experiment",
# crystal_name
# )
#
# if acc_N_single_list.size > 0:
# avg = np.mean(ref_hbond_N_single_angles)
# std = np.std(ref_hbond_N_single_angles)
# std = std/avg*100.
# max_avg = -np.inf
# max_std = 0.
# min_avg = np.inf
# min_std = 0.
# for unique_rank in np.unique(acc_N_single_pair_rank_list):
# valids = np.where(unique_rank == acc_N_single_pair_rank_list)[0]
# rank_avg = np.mean(ref_hbond_N_single_angles[:,valids])
# if rank_avg > max_avg:
# max_avg = rank_avg
# max_std = np.std(ref_hbond_N_single_angles[:,valids])
# if rank_avg < min_avg:
# min_avg = rank_avg
# min_std = np.std(ref_hbond_N_single_angles[:,valids])
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[∠(X-H•••N)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[∠(X-H•••N)]>",
# "experiment",
# crystal_name
# )
#
# ### X-H•••N=C
# ### ---------
# if acc_N_double_list.size > 0:
# avg = np.mean(ref_hbond_N_double_diffs)
# std = np.std(ref_hbond_N_double_diffs)
# std = std/avg*100.
# max_avg = 0.
# max_std = "--"
# for unique_rank in np.unique(acc_N_double_pair_rank_list):
# valids = np.where(unique_rank == acc_N_double_pair_rank_list)[0]
# _max_avg = np.mean(ref_hbond_N_double_diffs[:,valids])
# if _max_avg > max_avg:
# max_avg = _max_avg
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[d(X-H•••N=C)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[d(X-H•••N=C)]>",
# "experiment",
# crystal_name
# )
#
# if acc_N_double_list.size > 0:
# avg = np.mean(ref_hbond_N_double_angles)
# std = np.std(ref_hbond_N_double_angles)
# std = std/avg*100.
# max_avg = -np.inf
# max_std = 0.
# min_avg = np.inf
# min_std = 0.
# for unique_rank in np.unique(acc_N_double_pair_rank_list):
# valids = np.where(unique_rank == acc_N_double_pair_rank_list)[0]
# rank_avg = np.mean(ref_hbond_N_double_angles[:,valids])
# if rank_avg > max_avg:
# max_avg = rank_avg
# max_std = np.std(ref_hbond_N_double_angles[:,valids])
# if rank_avg < min_avg:
# min_avg = rank_avg
# min_std = np.std(ref_hbond_N_double_angles[:,valids])
# else:
# avg = "--"
# std = "--"
# max_avg = "--"
# max_std = "--"
# workbook_wrap.add_data(
# avg,
# std,
# "<[∠(X-H•••N=C)]>",
# "experiment",
# crystal_name
# )
# workbook_wrap.add_data(
# max_avg,
# max_std,
# "Max <[∠(X-H•••N=C)]>",
# "experiment",
# crystal_name
# )
### Sublimation Enthalpy ###
### -------------------- ###
if "sublimation-enthalpy" in input_dict[crystal_name]["experiment"]:
workbook_wrap.add_data(
input_dict[crystal_name]["experiment"]["sublimation-enthalpy"],
input_dict[crystal_name]["experiment"]["sublimation-enthalpy-std"],
"Sublimation Energy",
"experiment",
crystal_name
)
workbook_wrap.add_data(
density,
"--",
"Density",
"experiment",
crystal_name
)
workbook_wrap.close()
def entry_point():
main()
if __name__ == "__main__":
entry_point() | [
"numpy.abs",
"argparse.ArgumentParser",
"numpy.argmax",
"gemmi.cif.read",
"numpy.isnan",
"mdtraj.load",
"numpy.mean",
"yaml.safe_load",
"glob.glob",
"numpy.unique",
"mdtraj.density",
"gemmi.make_small_structure_from_block",
"numpy.std",
"numpy.max",
"numpy.var",
"numpy.isinf",
"numpy... | [((392, 498), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Python script for merging simulation data for xtal MD project."""'}), "(description=\n 'Python script for merging simulation data for xtal MD project.')\n", (415, 498), False, 'import argparse\n'), ((863, 876), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (874, 876), False, 'from collections import OrderedDict\n'), ((895, 908), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (906, 908), False, 'from collections import OrderedDict\n'), ((2152, 2179), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['output'], {}), '(output)\n', (2171, 2179), False, 'import xlsxwriter\n'), ((3446, 3459), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3457, 3459), False, 'from collections import OrderedDict\n'), ((3492, 3505), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3503, 3505), False, 'from collections import OrderedDict\n'), ((3538, 3551), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3549, 3551), False, 'from collections import OrderedDict\n'), ((11982, 12003), 'yaml.safe_load', 'yaml.safe_load', (['fopen'], {}), '(fopen)\n', (11996, 12003), False, 'import yaml\n'), ((12262, 12326), 'mdtraj.load', 'md.load', (["input_dict[crystal_name]['experiment']['supercell-pdb']"], {}), "(input_dict[crystal_name]['experiment']['supercell-pdb'])\n", (12269, 12326), True, 'import mdtraj as md\n'), ((13076, 13122), 'numpy.array', 'np.array', (["ucinfo['mol_in_unitcell']"], {'dtype': 'int'}), "(ucinfo['mol_in_unitcell'], dtype=int)\n", (13084, 13122), True, 'import numpy as np\n'), ((46577, 46619), 'gemmi.make_small_structure_from_block', 'gemmi.make_small_structure_from_block', (['doc'], {}), '(doc)\n', (46614, 46619), False, 'import gemmi\n'), ((1722, 1746), 'numpy.array', 'np.array', (['data_dict[key]'], {}), '(data_dict[key])\n', (1730, 1746), True, 'import numpy as np\n'), ((18496, 18518), 'glob.glob', 'glob.glob', (['xtal_output'], {}), '(xtal_output)\n', (18505, 18518), False, 'import glob\n'), ((19145, 19166), 'glob.glob', 'glob.glob', (['gas_output'], {}), '(gas_output)\n', (19154, 19166), False, 'import glob\n'), ((19762, 19788), 'glob.glob', 'glob.glob', (['xtal_trajectory'], {}), '(xtal_trajectory)\n', (19771, 19788), False, 'import glob\n'), ((27443, 27458), 'numpy.array', 'np.array', (['a_len'], {}), '(a_len)\n', (27451, 27458), True, 'import numpy as np\n'), ((27479, 27494), 'numpy.array', 'np.array', (['b_len'], {}), '(b_len)\n', (27487, 27494), True, 'import numpy as np\n'), ((27515, 27530), 'numpy.array', 'np.array', (['c_len'], {}), '(c_len)\n', (27523, 27530), True, 'import numpy as np\n'), ((27551, 27566), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (27559, 27566), True, 'import numpy as np\n'), ((27587, 27601), 'numpy.array', 'np.array', (['beta'], {}), '(beta)\n', (27595, 27601), True, 'import numpy as np\n'), ((27622, 27637), 'numpy.array', 'np.array', (['gamma'], {}), '(gamma)\n', (27630, 27637), True, 'import numpy as np\n'), ((27661, 27679), 'numpy.array', 'np.array', (['ene_xtal'], {}), '(ene_xtal)\n', (27669, 27679), True, 'import numpy as np\n'), ((27703, 27720), 'numpy.array', 'np.array', (['ene_gas'], {}), '(ene_gas)\n', (27711, 27720), True, 'import numpy as np\n'), ((27748, 27770), 'numpy.array', 'np.array', (['density_xtal'], {}), '(density_xtal)\n', (27756, 27770), True, 'import numpy as np\n'), ((27800, 27824), 'numpy.array', 'np.array', (['distance_diffs'], {}), '(distance_diffs)\n', (27808, 27824), True, 'import numpy as np\n'), ((27854, 27873), 'numpy.array', 'np.array', (['com_diffs'], {}), '(com_diffs)\n', (27862, 27873), True, 'import numpy as np\n'), ((27903, 27922), 'numpy.array', 'np.array', (['pc1_diffs'], {}), '(pc1_diffs)\n', (27911, 27922), True, 'import numpy as np\n'), ((27952, 27971), 'numpy.array', 'np.array', (['pc2_diffs'], {}), '(pc2_diffs)\n', (27960, 27971), True, 'import numpy as np\n'), ((28001, 28020), 'numpy.array', 'np.array', (['pc3_diffs'], {}), '(pc3_diffs)\n', (28009, 28020), True, 'import numpy as np\n'), ((28056, 28086), 'numpy.array', 'np.array', (['hbond_O_single_diffs'], {}), '(hbond_O_single_diffs)\n', (28064, 28086), True, 'import numpy as np\n'), ((28122, 28152), 'numpy.array', 'np.array', (['hbond_O_double_diffs'], {}), '(hbond_O_double_diffs)\n', (28130, 28152), True, 'import numpy as np\n'), ((28188, 28218), 'numpy.array', 'np.array', (['hbond_N_single_diffs'], {}), '(hbond_N_single_diffs)\n', (28196, 28218), True, 'import numpy as np\n'), ((28254, 28284), 'numpy.array', 'np.array', (['hbond_N_double_diffs'], {}), '(hbond_N_double_diffs)\n', (28262, 28284), True, 'import numpy as np\n'), ((28321, 28352), 'numpy.array', 'np.array', (['hbond_O_single_angles'], {}), '(hbond_O_single_angles)\n', (28329, 28352), True, 'import numpy as np\n'), ((28389, 28420), 'numpy.array', 'np.array', (['hbond_O_double_angles'], {}), '(hbond_O_double_angles)\n', (28397, 28420), True, 'import numpy as np\n'), ((28457, 28488), 'numpy.array', 'np.array', (['hbond_N_single_angles'], {}), '(hbond_N_single_angles)\n', (28465, 28488), True, 'import numpy as np\n'), ((28525, 28556), 'numpy.array', 'np.array', (['hbond_N_double_angles'], {}), '(hbond_N_double_angles)\n', (28533, 28556), True, 'import numpy as np\n'), ((28776, 28799), 'numpy.mean', 'np.mean', (['distance_diffs'], {}), '(distance_diffs)\n', (28783, 28799), True, 'import numpy as np\n'), ((28819, 28841), 'numpy.std', 'np.std', (['distance_diffs'], {}), '(distance_diffs)\n', (28825, 28841), True, 'import numpy as np\n'), ((29126, 29156), 'numpy.unique', 'np.unique', (['dist_pair_rank_list'], {}), '(dist_pair_rank_list)\n', (29135, 29156), True, 'import numpy as np\n'), ((29753, 29771), 'numpy.mean', 'np.mean', (['com_diffs'], {}), '(com_diffs)\n', (29760, 29771), True, 'import numpy as np\n'), ((29791, 29808), 'numpy.std', 'np.std', (['com_diffs'], {}), '(com_diffs)\n', (29797, 29808), True, 'import numpy as np\n'), ((30031, 30057), 'numpy.mean', 'np.mean', (['com_diffs'], {'axis': '(0)'}), '(com_diffs, axis=0)\n', (30038, 30057), True, 'import numpy as np\n'), ((30080, 30098), 'numpy.argmax', 'np.argmax', (['res_avg'], {}), '(res_avg)\n', (30089, 30098), True, 'import numpy as np\n'), ((30160, 30189), 'numpy.std', 'np.std', (['com_diffs[:, max_idx]'], {}), '(com_diffs[:, max_idx])\n', (30166, 30189), True, 'import numpy as np\n'), ((30502, 30520), 'numpy.mean', 'np.mean', (['pc1_diffs'], {}), '(pc1_diffs)\n', (30509, 30520), True, 'import numpy as np\n'), ((30540, 30557), 'numpy.std', 'np.std', (['pc1_diffs'], {}), '(pc1_diffs)\n', (30546, 30557), True, 'import numpy as np\n'), ((30788, 30814), 'numpy.mean', 'np.mean', (['pc1_diffs'], {'axis': '(0)'}), '(pc1_diffs, axis=0)\n', (30795, 30814), True, 'import numpy as np\n'), ((30837, 30855), 'numpy.argmax', 'np.argmax', (['res_avg'], {}), '(res_avg)\n', (30846, 30855), True, 'import numpy as np\n'), ((30917, 30946), 'numpy.std', 'np.std', (['pc1_diffs[:, max_idx]'], {}), '(pc1_diffs[:, max_idx])\n', (30923, 30946), True, 'import numpy as np\n'), ((31189, 31207), 'numpy.mean', 'np.mean', (['pc2_diffs'], {}), '(pc2_diffs)\n', (31196, 31207), True, 'import numpy as np\n'), ((31227, 31244), 'numpy.std', 'np.std', (['pc2_diffs'], {}), '(pc2_diffs)\n', (31233, 31244), True, 'import numpy as np\n'), ((31475, 31501), 'numpy.mean', 'np.mean', (['pc2_diffs'], {'axis': '(0)'}), '(pc2_diffs, axis=0)\n', (31482, 31501), True, 'import numpy as np\n'), ((31524, 31542), 'numpy.argmax', 'np.argmax', (['res_avg'], {}), '(res_avg)\n', (31533, 31542), True, 'import numpy as np\n'), ((31604, 31633), 'numpy.std', 'np.std', (['pc2_diffs[:, max_idx]'], {}), '(pc2_diffs[:, max_idx])\n', (31610, 31633), True, 'import numpy as np\n'), ((31876, 31894), 'numpy.mean', 'np.mean', (['pc3_diffs'], {}), '(pc3_diffs)\n', (31883, 31894), True, 'import numpy as np\n'), ((31914, 31931), 'numpy.std', 'np.std', (['pc3_diffs'], {}), '(pc3_diffs)\n', (31920, 31931), True, 'import numpy as np\n'), ((32162, 32188), 'numpy.mean', 'np.mean', (['pc3_diffs'], {'axis': '(0)'}), '(pc3_diffs, axis=0)\n', (32169, 32188), True, 'import numpy as np\n'), ((32211, 32229), 'numpy.argmax', 'np.argmax', (['res_avg'], {}), '(res_avg)\n', (32220, 32229), True, 'import numpy as np\n'), ((32291, 32320), 'numpy.std', 'np.std', (['pc3_diffs[:, max_idx]'], {}), '(pc3_diffs[:, max_idx])\n', (32297, 32320), True, 'import numpy as np\n'), ((43247, 43261), 'numpy.mean', 'np.mean', (['a_len'], {}), '(a_len)\n', (43254, 43261), True, 'import numpy as np\n'), ((43282, 43295), 'numpy.std', 'np.std', (['a_len'], {}), '(a_len)\n', (43288, 43295), True, 'import numpy as np\n'), ((43506, 43520), 'numpy.mean', 'np.mean', (['b_len'], {}), '(b_len)\n', (43513, 43520), True, 'import numpy as np\n'), ((43540, 43553), 'numpy.std', 'np.std', (['b_len'], {}), '(b_len)\n', (43546, 43553), True, 'import numpy as np\n'), ((43764, 43778), 'numpy.mean', 'np.mean', (['c_len'], {}), '(c_len)\n', (43771, 43778), True, 'import numpy as np\n'), ((43798, 43811), 'numpy.std', 'np.std', (['c_len'], {}), '(c_len)\n', (43804, 43811), True, 'import numpy as np\n'), ((44110, 44124), 'numpy.mean', 'np.mean', (['alpha'], {}), '(alpha)\n', (44117, 44124), True, 'import numpy as np\n'), ((44144, 44157), 'numpy.std', 'np.std', (['alpha'], {}), '(alpha)\n', (44150, 44157), True, 'import numpy as np\n'), ((44372, 44385), 'numpy.mean', 'np.mean', (['beta'], {}), '(beta)\n', (44379, 44385), True, 'import numpy as np\n'), ((44405, 44417), 'numpy.std', 'np.std', (['beta'], {}), '(beta)\n', (44411, 44417), True, 'import numpy as np\n'), ((44631, 44645), 'numpy.mean', 'np.mean', (['gamma'], {}), '(gamma)\n', (44638, 44645), True, 'import numpy as np\n'), ((44665, 44678), 'numpy.std', 'np.std', (['gamma'], {}), '(gamma)\n', (44671, 44678), True, 'import numpy as np\n'), ((46121, 46142), 'numpy.mean', 'np.mean', (['density_xtal'], {}), '(density_xtal)\n', (46128, 46142), True, 'import numpy as np\n'), ((46161, 46181), 'numpy.std', 'np.std', (['density_xtal'], {}), '(density_xtal)\n', (46167, 46181), True, 'import numpy as np\n'), ((46483, 46555), 'gemmi.cif.read', 'gemmi.cif.read', (["input_dict[crystal_name]['experiment']['experiment-cif']"], {}), "(input_dict[crystal_name]['experiment']['experiment-cif'])\n", (46497, 46555), False, 'import gemmi\n'), ((12888, 12902), 'numpy.max', 'np.max', (['a_idxs'], {}), '(a_idxs)\n', (12894, 12902), True, 'import numpy as np\n'), ((12905, 12919), 'numpy.min', 'np.min', (['a_idxs'], {}), '(a_idxs)\n', (12911, 12919), True, 'import numpy as np\n'), ((12948, 12962), 'numpy.max', 'np.max', (['b_idxs'], {}), '(b_idxs)\n', (12954, 12962), True, 'import numpy as np\n'), ((12965, 12979), 'numpy.min', 'np.min', (['b_idxs'], {}), '(b_idxs)\n', (12971, 12979), True, 'import numpy as np\n'), ((13008, 13022), 'numpy.max', 'np.max', (['c_idxs'], {}), '(c_idxs)\n', (13014, 13022), True, 'import numpy as np\n'), ((13025, 13039), 'numpy.min', 'np.min', (['c_idxs'], {}), '(c_idxs)\n', (13031, 13039), True, 'import numpy as np\n'), ((19821, 19850), 'os.path.splitext', 'os.path.splitext', (['output_traj'], {}), '(output_traj)\n', (19837, 19850), False, 'import os\n'), ((19956, 19999), 'mdtraj.load', 'md.load', (['output_traj'], {'top': 'ref_strc.topology'}), '(output_traj, top=ref_strc.topology)\n', (19963, 19999), True, 'import mdtraj as md\n'), ((29260, 29294), 'numpy.mean', 'np.mean', (['distance_diffs[:, valids]'], {}), '(distance_diffs[:, valids])\n', (29267, 29294), True, 'import numpy as np\n'), ((32736, 32765), 'numpy.mean', 'np.mean', (['hbond_O_single_diffs'], {}), '(hbond_O_single_diffs)\n', (32743, 32765), True, 'import numpy as np\n'), ((32789, 32817), 'numpy.std', 'np.std', (['hbond_O_single_diffs'], {}), '(hbond_O_single_diffs)\n', (32795, 32817), True, 'import numpy as np\n'), ((32947, 32985), 'numpy.unique', 'np.unique', (['acc_O_single_pair_rank_list'], {}), '(acc_O_single_pair_rank_list)\n', (32956, 32985), True, 'import numpy as np\n'), ((33909, 33939), 'numpy.mean', 'np.mean', (['hbond_O_single_angles'], {}), '(hbond_O_single_angles)\n', (33916, 33939), True, 'import numpy as np\n'), ((33963, 33992), 'numpy.std', 'np.std', (['hbond_O_single_angles'], {}), '(hbond_O_single_angles)\n', (33969, 33992), True, 'import numpy as np\n'), ((34189, 34227), 'numpy.unique', 'np.unique', (['acc_O_single_pair_rank_list'], {}), '(acc_O_single_pair_rank_list)\n', (34198, 34227), True, 'import numpy as np\n'), ((35364, 35393), 'numpy.mean', 'np.mean', (['hbond_O_double_diffs'], {}), '(hbond_O_double_diffs)\n', (35371, 35393), True, 'import numpy as np\n'), ((35417, 35445), 'numpy.std', 'np.std', (['hbond_O_double_diffs'], {}), '(hbond_O_double_diffs)\n', (35423, 35445), True, 'import numpy as np\n'), ((35575, 35613), 'numpy.unique', 'np.unique', (['acc_O_double_pair_rank_list'], {}), '(acc_O_double_pair_rank_list)\n', (35584, 35613), True, 'import numpy as np\n'), ((36541, 36571), 'numpy.mean', 'np.mean', (['hbond_O_double_angles'], {}), '(hbond_O_double_angles)\n', (36548, 36571), True, 'import numpy as np\n'), ((36595, 36624), 'numpy.std', 'np.std', (['hbond_O_double_angles'], {}), '(hbond_O_double_angles)\n', (36601, 36624), True, 'import numpy as np\n'), ((36821, 36859), 'numpy.unique', 'np.unique', (['acc_O_double_pair_rank_list'], {}), '(acc_O_double_pair_rank_list)\n', (36830, 36859), True, 'import numpy as np\n'), ((37996, 38025), 'numpy.mean', 'np.mean', (['hbond_N_single_diffs'], {}), '(hbond_N_single_diffs)\n', (38003, 38025), True, 'import numpy as np\n'), ((38049, 38077), 'numpy.std', 'np.std', (['hbond_N_single_diffs'], {}), '(hbond_N_single_diffs)\n', (38055, 38077), True, 'import numpy as np\n'), ((38207, 38245), 'numpy.unique', 'np.unique', (['acc_N_single_pair_rank_list'], {}), '(acc_N_single_pair_rank_list)\n', (38216, 38245), True, 'import numpy as np\n'), ((39169, 39199), 'numpy.mean', 'np.mean', (['hbond_N_single_angles'], {}), '(hbond_N_single_angles)\n', (39176, 39199), True, 'import numpy as np\n'), ((39223, 39252), 'numpy.std', 'np.std', (['hbond_N_single_angles'], {}), '(hbond_N_single_angles)\n', (39229, 39252), True, 'import numpy as np\n'), ((39449, 39487), 'numpy.unique', 'np.unique', (['acc_N_single_pair_rank_list'], {}), '(acc_N_single_pair_rank_list)\n', (39458, 39487), True, 'import numpy as np\n'), ((40624, 40653), 'numpy.mean', 'np.mean', (['hbond_N_double_diffs'], {}), '(hbond_N_double_diffs)\n', (40631, 40653), True, 'import numpy as np\n'), ((40677, 40705), 'numpy.std', 'np.std', (['hbond_N_double_diffs'], {}), '(hbond_N_double_diffs)\n', (40683, 40705), True, 'import numpy as np\n'), ((40835, 40873), 'numpy.unique', 'np.unique', (['acc_N_double_pair_rank_list'], {}), '(acc_N_double_pair_rank_list)\n', (40844, 40873), True, 'import numpy as np\n'), ((41801, 41831), 'numpy.mean', 'np.mean', (['hbond_N_double_angles'], {}), '(hbond_N_double_angles)\n', (41808, 41831), True, 'import numpy as np\n'), ((41855, 41884), 'numpy.std', 'np.std', (['hbond_N_double_angles'], {}), '(hbond_N_double_angles)\n', (41861, 41884), True, 'import numpy as np\n'), ((42081, 42119), 'numpy.unique', 'np.unique', (['acc_N_double_pair_rank_list'], {}), '(acc_N_double_pair_rank_list)\n', (42090, 42119), True, 'import numpy as np\n'), ((44991, 45009), 'numpy.array', 'np.array', (['ene_xtal'], {}), '(ene_xtal)\n', (44999, 45009), True, 'import numpy as np\n'), ((45046, 45063), 'numpy.array', 'np.array', (['ene_gas'], {}), '(ene_gas)\n', (45054, 45063), True, 'import numpy as np\n'), ((45490, 45514), 'numpy.sqrt', 'np.sqrt', (['sublimation_std'], {}), '(sublimation_std)\n', (45497, 45514), True, 'import numpy as np\n'), ((10336, 10356), 'numpy.isnan', 'np.isnan', (['data_value'], {}), '(data_value)\n', (10344, 10356), True, 'import numpy as np\n'), ((10360, 10380), 'numpy.isinf', 'np.isinf', (['data_value'], {}), '(data_value)\n', (10368, 10380), True, 'import numpy as np\n'), ((11265, 11283), 'numpy.isnan', 'np.isnan', (['data_std'], {}), '(data_std)\n', (11273, 11283), True, 'import numpy as np\n'), ((11287, 11305), 'numpy.isinf', 'np.isinf', (['data_std'], {}), '(data_std)\n', (11295, 11305), True, 'import numpy as np\n'), ((18665, 18693), 'os.path.splitext', 'os.path.splitext', (['output_csv'], {}), '(output_csv)\n', (18681, 18693), False, 'import os\n'), ((18952, 18980), 'os.path.splitext', 'os.path.splitext', (['output_csv'], {}), '(output_csv)\n', (18968, 18980), False, 'import os\n'), ((19313, 19341), 'os.path.splitext', 'os.path.splitext', (['output_csv'], {}), '(output_csv)\n', (19329, 19341), False, 'import os\n'), ((19579, 19607), 'os.path.splitext', 'os.path.splitext', (['output_csv'], {}), '(output_csv)\n', (19595, 19607), False, 'import os\n'), ((20928, 20950), 'mdtraj.density', 'md.density', (['query_traj'], {}), '(query_traj)\n', (20938, 20950), True, 'import mdtraj as md\n'), ((21648, 21671), 'numpy.abs', 'np.abs', (['_distance_diffs'], {}), '(_distance_diffs)\n', (21654, 21671), True, 'import numpy as np\n'), ((21718, 21736), 'numpy.abs', 'np.abs', (['_com_diffs'], {}), '(_com_diffs)\n', (21724, 21736), True, 'import numpy as np\n'), ((25982, 26016), 'numpy.vstack', 'np.vstack', (['(com_diffs, _com_diffs)'], {}), '((com_diffs, _com_diffs))\n', (25991, 26016), True, 'import numpy as np\n'), ((26054, 26098), 'numpy.vstack', 'np.vstack', (['(distance_diffs, _distance_diffs)'], {}), '((distance_diffs, _distance_diffs))\n', (26063, 26098), True, 'import numpy as np\n'), ((26131, 26173), 'numpy.vstack', 'np.vstack', (['(pc1_diffs, _pc_diffs[:, :, 0])'], {}), '((pc1_diffs, _pc_diffs[:, :, 0]))\n', (26140, 26173), True, 'import numpy as np\n'), ((26204, 26246), 'numpy.vstack', 'np.vstack', (['(pc2_diffs, _pc_diffs[:, :, 1])'], {}), '((pc2_diffs, _pc_diffs[:, :, 1]))\n', (26213, 26246), True, 'import numpy as np\n'), ((26277, 26319), 'numpy.vstack', 'np.vstack', (['(pc3_diffs, _pc_diffs[:, :, 2])'], {}), '((pc3_diffs, _pc_diffs[:, :, 2]))\n', (26286, 26319), True, 'import numpy as np\n'), ((29185, 29229), 'numpy.where', 'np.where', (['(unique_rank == dist_pair_rank_list)'], {}), '(unique_rank == dist_pair_rank_list)\n', (29193, 29229), True, 'import numpy as np\n'), ((29402, 29435), 'numpy.std', 'np.std', (['distance_diffs[:, valids]'], {}), '(distance_diffs[:, valids])\n', (29408, 29435), True, 'import numpy as np\n'), ((33105, 33145), 'numpy.mean', 'np.mean', (['hbond_O_single_diffs[:, valids]'], {}), '(hbond_O_single_diffs[:, valids])\n', (33112, 33145), True, 'import numpy as np\n'), ((34347, 34388), 'numpy.mean', 'np.mean', (['hbond_O_single_angles[:, valids]'], {}), '(hbond_O_single_angles[:, valids])\n', (34354, 34388), True, 'import numpy as np\n'), ((35733, 35773), 'numpy.mean', 'np.mean', (['hbond_O_double_diffs[:, valids]'], {}), '(hbond_O_double_diffs[:, valids])\n', (35740, 35773), True, 'import numpy as np\n'), ((36979, 37020), 'numpy.mean', 'np.mean', (['hbond_O_double_angles[:, valids]'], {}), '(hbond_O_double_angles[:, valids])\n', (36986, 37020), True, 'import numpy as np\n'), ((38365, 38405), 'numpy.mean', 'np.mean', (['hbond_N_single_diffs[:, valids]'], {}), '(hbond_N_single_diffs[:, valids])\n', (38372, 38405), True, 'import numpy as np\n'), ((39607, 39648), 'numpy.mean', 'np.mean', (['hbond_N_single_angles[:, valids]'], {}), '(hbond_N_single_angles[:, valids])\n', (39614, 39648), True, 'import numpy as np\n'), ((40993, 41033), 'numpy.mean', 'np.mean', (['hbond_N_double_diffs[:, valids]'], {}), '(hbond_N_double_diffs[:, valids])\n', (41000, 41033), True, 'import numpy as np\n'), ((42239, 42280), 'numpy.mean', 'np.mean', (['hbond_N_double_angles[:, valids]'], {}), '(hbond_N_double_angles[:, valids])\n', (42246, 42280), True, 'import numpy as np\n'), ((45221, 45237), 'numpy.mean', 'np.mean', (['ene_gas'], {}), '(ene_gas)\n', (45228, 45237), True, 'import numpy as np\n'), ((45420, 45436), 'numpy.var', 'np.var', (['ene_xtal'], {}), '(ene_xtal)\n', (45426, 45436), True, 'import numpy as np\n'), ((45439, 45454), 'numpy.var', 'np.var', (['ene_gas'], {}), '(ene_gas)\n', (45445, 45454), True, 'import numpy as np\n'), ((18890, 18910), 'numpy.isnan', 'np.isnan', (['_potential'], {}), '(_potential)\n', (18898, 18910), True, 'import numpy as np\n'), ((19517, 19537), 'numpy.isnan', 'np.isnan', (['_potential'], {}), '(_potential)\n', (19525, 19537), True, 'import numpy as np\n'), ((26417, 26473), 'numpy.vstack', 'np.vstack', (['(hbond_O_single_diffs, _hbond_O_single_diffs)'], {}), '((hbond_O_single_diffs, _hbond_O_single_diffs))\n', (26426, 26473), True, 'import numpy as np\n'), ((26523, 26581), 'numpy.vstack', 'np.vstack', (['(hbond_O_single_angles, _hbond_O_single_angles)'], {}), '((hbond_O_single_angles, _hbond_O_single_angles))\n', (26532, 26581), True, 'import numpy as np\n'), ((26681, 26737), 'numpy.vstack', 'np.vstack', (['(hbond_O_double_diffs, _hbond_O_double_diffs)'], {}), '((hbond_O_double_diffs, _hbond_O_double_diffs))\n', (26690, 26737), True, 'import numpy as np\n'), ((26786, 26844), 'numpy.vstack', 'np.vstack', (['(hbond_O_double_angles, _hbond_O_double_angles)'], {}), '((hbond_O_double_angles, _hbond_O_double_angles))\n', (26795, 26844), True, 'import numpy as np\n'), ((26944, 27000), 'numpy.vstack', 'np.vstack', (['(hbond_N_single_diffs, _hbond_N_single_diffs)'], {}), '((hbond_N_single_diffs, _hbond_N_single_diffs))\n', (26953, 27000), True, 'import numpy as np\n'), ((27049, 27107), 'numpy.vstack', 'np.vstack', (['(hbond_N_single_angles, _hbond_N_single_angles)'], {}), '((hbond_N_single_angles, _hbond_N_single_angles))\n', (27058, 27107), True, 'import numpy as np\n'), ((27207, 27263), 'numpy.vstack', 'np.vstack', (['(hbond_N_double_diffs, _hbond_N_double_diffs)'], {}), '((hbond_N_double_diffs, _hbond_N_double_diffs))\n', (27216, 27263), True, 'import numpy as np\n'), ((27312, 27370), 'numpy.vstack', 'np.vstack', (['(hbond_N_double_angles, _hbond_N_double_angles)'], {}), '((hbond_N_double_angles, _hbond_N_double_angles))\n', (27321, 27370), True, 'import numpy as np\n'), ((33018, 33070), 'numpy.where', 'np.where', (['(unique_rank == acc_O_single_pair_rank_list)'], {}), '(unique_rank == acc_O_single_pair_rank_list)\n', (33026, 33070), True, 'import numpy as np\n'), ((33265, 33304), 'numpy.std', 'np.std', (['hbond_O_single_diffs[:, valids]'], {}), '(hbond_O_single_diffs[:, valids])\n', (33271, 33304), True, 'import numpy as np\n'), ((34260, 34312), 'numpy.where', 'np.where', (['(unique_rank == acc_O_single_pair_rank_list)'], {}), '(unique_rank == acc_O_single_pair_rank_list)\n', (34268, 34312), True, 'import numpy as np\n'), ((34508, 34548), 'numpy.std', 'np.std', (['hbond_O_single_angles[:, valids]'], {}), '(hbond_O_single_angles[:, valids])\n', (34514, 34548), True, 'import numpy as np\n'), ((34668, 34708), 'numpy.std', 'np.std', (['hbond_O_single_angles[:, valids]'], {}), '(hbond_O_single_angles[:, valids])\n', (34674, 34708), True, 'import numpy as np\n'), ((35646, 35698), 'numpy.where', 'np.where', (['(unique_rank == acc_O_double_pair_rank_list)'], {}), '(unique_rank == acc_O_double_pair_rank_list)\n', (35654, 35698), True, 'import numpy as np\n'), ((35893, 35932), 'numpy.std', 'np.std', (['hbond_O_double_diffs[:, valids]'], {}), '(hbond_O_double_diffs[:, valids])\n', (35899, 35932), True, 'import numpy as np\n'), ((36892, 36944), 'numpy.where', 'np.where', (['(unique_rank == acc_O_double_pair_rank_list)'], {}), '(unique_rank == acc_O_double_pair_rank_list)\n', (36900, 36944), True, 'import numpy as np\n'), ((37140, 37180), 'numpy.std', 'np.std', (['hbond_O_double_angles[:, valids]'], {}), '(hbond_O_double_angles[:, valids])\n', (37146, 37180), True, 'import numpy as np\n'), ((37300, 37340), 'numpy.std', 'np.std', (['hbond_O_double_angles[:, valids]'], {}), '(hbond_O_double_angles[:, valids])\n', (37306, 37340), True, 'import numpy as np\n'), ((38278, 38330), 'numpy.where', 'np.where', (['(unique_rank == acc_N_single_pair_rank_list)'], {}), '(unique_rank == acc_N_single_pair_rank_list)\n', (38286, 38330), True, 'import numpy as np\n'), ((38525, 38564), 'numpy.std', 'np.std', (['hbond_N_single_diffs[:, valids]'], {}), '(hbond_N_single_diffs[:, valids])\n', (38531, 38564), True, 'import numpy as np\n'), ((39520, 39572), 'numpy.where', 'np.where', (['(unique_rank == acc_N_single_pair_rank_list)'], {}), '(unique_rank == acc_N_single_pair_rank_list)\n', (39528, 39572), True, 'import numpy as np\n'), ((39768, 39808), 'numpy.std', 'np.std', (['hbond_N_single_angles[:, valids]'], {}), '(hbond_N_single_angles[:, valids])\n', (39774, 39808), True, 'import numpy as np\n'), ((39928, 39968), 'numpy.std', 'np.std', (['hbond_N_single_angles[:, valids]'], {}), '(hbond_N_single_angles[:, valids])\n', (39934, 39968), True, 'import numpy as np\n'), ((40906, 40958), 'numpy.where', 'np.where', (['(unique_rank == acc_N_double_pair_rank_list)'], {}), '(unique_rank == acc_N_double_pair_rank_list)\n', (40914, 40958), True, 'import numpy as np\n'), ((41153, 41192), 'numpy.std', 'np.std', (['hbond_N_double_diffs[:, valids]'], {}), '(hbond_N_double_diffs[:, valids])\n', (41159, 41192), True, 'import numpy as np\n'), ((42152, 42204), 'numpy.where', 'np.where', (['(unique_rank == acc_N_double_pair_rank_list)'], {}), '(unique_rank == acc_N_double_pair_rank_list)\n', (42160, 42204), True, 'import numpy as np\n'), ((42400, 42440), 'numpy.std', 'np.std', (['hbond_N_double_angles[:, valids]'], {}), '(hbond_N_double_angles[:, valids])\n', (42406, 42440), True, 'import numpy as np\n'), ((42560, 42600), 'numpy.std', 'np.std', (['hbond_N_double_angles[:, valids]'], {}), '(hbond_N_double_angles[:, valids])\n', (42566, 42600), True, 'import numpy as np\n'), ((45240, 45257), 'numpy.mean', 'np.mean', (['ene_xtal'], {}), '(ene_xtal)\n', (45247, 45257), True, 'import numpy as np\n')] |
import sys
import json
import numpy as np
import argparse
from pathlib import Path
import logging
from logging.config import fileConfig
import cv2
import pickle
from deeptennis.vision.transforms import BoundingBox
def dilate_image(image, thresh_low=180):
resized = image
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
opening = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=2)
gray = gray - opening
ret, mask = cv2.threshold(gray, thresh_low, 255, cv2.THRESH_BINARY)
image_final = cv2.bitwise_and(gray, gray, mask=mask)
ret, new_img = cv2.threshold(image_final, thresh_low, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
dilated = cv2.dilate(new_img, kernel, iterations=2)
return dilated, gray
def find_text(dilated, min_w=5, min_h=5):
_, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mx_right = 0
for contour in contours:
[x, y, w, h] = cv2.boundingRect(contour)
if w < min_w or h < min_h:
continue
mx_right = max(mx_right, x + w)
return mx_right
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frames-path", type=str)
parser.add_argument("--save-path", type=str, default=None)
parser.add_argument("--meta-file", type=str, default=None)
parser.add_argument("--outline-threshold", type=int, default=150)
parser.add_argument("--segmentation", type=int, default=0)
args = parser.parse_args()
fileConfig('logging_config.ini')
with open(args.meta_file, 'r') as f:
match_metas = json.load(f)
frames_path = Path(args.frames_path)
save_path = Path(args.save_path)
if not save_path.parent.exists():
save_path.parent.mkdir()
match_name = frames_path.stem
match_meta = match_metas.get(match_name, None)
if match_meta is None:
sys.exit(0)
frame_list = np.array(list(sorted(frames_path.iterdir())))
x, y, w, h = match_meta['box']
invert = match_meta['invert']
min_w, min_h = match_meta['min_score_text_width'], match_meta['min_score_text_height']
thresh_low = match_meta['score_thresh_low']
min_score_width = match_meta['min_score_width']
logging.debug(f"Begin bounding box detection for {match_name}")
score_boxes = []
im_sizes = []
for frame in frame_list:
img = cv2.imread(str(frame))
im_sizes.append(img.shape)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img[y:y + h, x:x + w].astype(np.int32)
img = 255 - img if invert else img
img = img.astype(np.uint8)
dilated, g = dilate_image(img, thresh_low=thresh_low)
score_width = find_text(dilated, min_w=min_w, min_h=min_h)
if score_width < min_score_width:
score_boxes.append([0, 0, 0, 0])
else:
score_boxes.append([x, y, score_width, h])
if args.segmentation:
if not save_path.exists():
save_path.mkdir(parents=True)
for i, (imsize, box) in enumerate(zip(im_sizes, score_boxes)):
mask = np.zeros((imsize[0], imsize[1]), dtype=np.uint8)
bbox = BoundingBox.from_box(box)
mask = cv2.fillConvexPoly(mask, np.array(bbox.as_list(), dtype=np.int64).reshape(4, 2), 1)
filename = (save_path / frame_list[i].name).with_suffix(".png")
cv2.imwrite(str(filename), mask)
else:
save_list = list(zip([f.name for f in frame_list], score_boxes))
with open(save_path, 'wb') as save_file:
pickle.dump(save_list, save_file)
| [
"pickle.dump",
"json.load",
"logging.debug",
"argparse.ArgumentParser",
"cv2.bitwise_and",
"cv2.dilate",
"cv2.cvtColor",
"cv2.getStructuringElement",
"cv2.threshold",
"cv2.morphologyEx",
"numpy.zeros",
"pathlib.Path",
"deeptennis.vision.transforms.BoundingBox.from_box",
"sys.exit",
"cv2.... | [((290, 331), 'cv2.cvtColor', 'cv2.cvtColor', (['resized', 'cv2.COLOR_BGR2GRAY'], {}), '(resized, cv2.COLOR_BGR2GRAY)\n', (302, 331), False, 'import cv2\n'), ((345, 395), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_CROSS', '(3, 3)'], {}), '(cv2.MORPH_CROSS, (3, 3))\n', (370, 395), False, 'import cv2\n'), ((409, 469), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(2)'}), '(gray, cv2.MORPH_OPEN, kernel, iterations=2)\n', (425, 469), False, 'import cv2\n'), ((512, 567), 'cv2.threshold', 'cv2.threshold', (['gray', 'thresh_low', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, thresh_low, 255, cv2.THRESH_BINARY)\n', (525, 567), False, 'import cv2\n'), ((586, 624), 'cv2.bitwise_and', 'cv2.bitwise_and', (['gray', 'gray'], {'mask': 'mask'}), '(gray, gray, mask=mask)\n', (601, 624), False, 'import cv2\n'), ((644, 706), 'cv2.threshold', 'cv2.threshold', (['image_final', 'thresh_low', '(255)', 'cv2.THRESH_BINARY'], {}), '(image_final, thresh_low, 255, cv2.THRESH_BINARY)\n', (657, 706), False, 'import cv2\n'), ((720, 770), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_CROSS', '(3, 3)'], {}), '(cv2.MORPH_CROSS, (3, 3))\n', (745, 770), False, 'import cv2\n'), ((785, 826), 'cv2.dilate', 'cv2.dilate', (['new_img', 'kernel'], {'iterations': '(2)'}), '(new_img, kernel, iterations=2)\n', (795, 826), False, 'import cv2\n'), ((925, 992), 'cv2.findContours', 'cv2.findContours', (['dilated', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (941, 992), False, 'import cv2\n'), ((1245, 1270), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1268, 1270), False, 'import argparse\n'), ((1617, 1649), 'logging.config.fileConfig', 'fileConfig', (['"""logging_config.ini"""'], {}), "('logging_config.ini')\n", (1627, 1649), False, 'from logging.config import fileConfig\n'), ((1746, 1768), 'pathlib.Path', 'Path', (['args.frames_path'], {}), '(args.frames_path)\n', (1750, 1768), False, 'from pathlib import Path\n'), ((1785, 1805), 'pathlib.Path', 'Path', (['args.save_path'], {}), '(args.save_path)\n', (1789, 1805), False, 'from pathlib import Path\n'), ((2338, 2401), 'logging.debug', 'logging.debug', (['f"""Begin bounding box detection for {match_name}"""'], {}), "(f'Begin bounding box detection for {match_name}')\n", (2351, 2401), False, 'import logging\n'), ((1062, 1087), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (1078, 1087), False, 'import cv2\n'), ((1714, 1726), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1723, 1726), False, 'import json\n'), ((1998, 2009), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2006, 2009), False, 'import sys\n'), ((2556, 2592), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2568, 2592), False, 'import cv2\n'), ((3203, 3251), 'numpy.zeros', 'np.zeros', (['(imsize[0], imsize[1])'], {'dtype': 'np.uint8'}), '((imsize[0], imsize[1]), dtype=np.uint8)\n', (3211, 3251), True, 'import numpy as np\n'), ((3271, 3296), 'deeptennis.vision.transforms.BoundingBox.from_box', 'BoundingBox.from_box', (['box'], {}), '(box)\n', (3291, 3296), False, 'from deeptennis.vision.transforms import BoundingBox\n'), ((3665, 3698), 'pickle.dump', 'pickle.dump', (['save_list', 'save_file'], {}), '(save_list, save_file)\n', (3676, 3698), False, 'import pickle\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
from paddle.incubate.optimizer.functional import bfgs_iterates, bfgs_minimize
from paddle.incubate.optimizer.functional.bfgs import (
SearchState,
verify_symmetric_positive_definite_matrix,
update_approx_inverse_hessian)
from paddle.incubate.optimizer.functional.bfgs_utils import (
vjp, vnorm_inf
)
import tensorflow as tf
def jacfn_gen(f, create_graph=False):
r"""Returns a helper function for computing the jacobians.
Requires `f` to be single valued function. Batch input is allowed.
The returned function, when called, returns a tensor of [Batched] gradients.
"""
def jac(x):
fval, grads = vjp(f, x, create_graph=create_graph)
return grads
return jac
def hesfn_gen(f):
r"""Returns a helper function for computing the hessians.
Requires `f` to be single valued function. Batch input is allowed.
The returned function, when called, returns a tensor of [Batched]
second order partial derivatives.
"""
def hess(x):
y = f(x)
batch_mode = len(x.shape) > 1
dim = x.shape[-1]
vs = []
for i in range(dim):
v = paddle.zeros_like(x)
if batch_mode:
v[..., i] = 1
else:
v[i] = 1
v = v.detach()
vs.append(v)
jacfn = jacfn_gen(f, create_graph=True)
rows = [vjp(jacfn, x, v)[1] for v in vs]
h = paddle.stack(rows, axis=-2)
return h
return hess
def verify_symmetric(H):
perm = list(range(len(H.shape)))
perm[-2:] = perm[-1], perm[-2]
# batch_deltas = paddle.norm(H - H.transpose(perm), axis=perm[-2:])
# is_symmetic = paddle.sum(batch_deltas) == 0.0
assert paddle.allclose(H, H.transpose(perm)), (
f"(Batched) matrix {H} is not symmetric."
)
@paddle.no_grad()
def update_inv_hessian_strict(bat, H, s, y):
dtype = H.dtype
dim = s.shape[-1]
if bat:
rho = 1. / paddle.einsum('...i, ...i', s, y)
else:
rho = 1. / paddle.dot(s, y)
rho = rho.unsqueeze(-1).unsqueeze(-1) if bat else rho.unsqueeze(-1)
I = paddle.eye(dim, dtype=dtype)
sy = paddle.einsum('...ij,...i,...j->...ij', rho, s, y)
l = I - sy
L = paddle.cholesky(H)
lL = paddle.matmul(l, L)
Lr = paddle.einsum('...ij->...ji', lL)
lHr = paddle.matmul(lL, Lr)
verify_symmetric(lHr)
rsTs = paddle.einsum('...ij,...i,...j->...ij', rho, s, s)
H_next = lHr + rsTs
verify_symmetric_positive_definite_matrix(H_next)
return H_next
def quadratic_gen(shape, dtype):
center = paddle.rand(shape, dtype=dtype)
hessian_shape = shape + shape[-1:]
rotation = paddle.rand(hessian_shape, dtype=dtype)
# hessian = paddle.einsum('...ik, ...jk', rotation, rotation)
hessian = paddle.matmul(rotation, rotation, transpose_y=True)
if shape[-1] > 1:
verify_symmetric_positive_definite_matrix(hessian)
else:
hessian = paddle.abs(hessian)
f = lambda x: paddle.sum((x - center) * hessian.squeeze(-1) * (x - center), axis=-1)
return f, center
def f(x):
# (TODO:Tongxin) einsum may internally rely on dy2static which
# does not support higher order gradients.
# y = paddle.einsum('...i, ...ij, ...j',
# x - center,
# hessian,
# x - center)
leftprod = paddle.matmul(hessian, (x - center).unsqueeze(-1))
y = paddle.matmul((x - center).unsqueeze(-2), leftprod)
if len(shape) > 1:
y = y.reshape(shape[:-1])
else:
y = y.reshape([1])
return y
return f, center
class TestBFGS(unittest.TestCase):
def setUp(self):
pass
def gen_configs(self):
dtypes = ['float32', 'float64']
shapes = {
# '1d2v': [2],
'2d2v': [2, 2],
'1d50v': [50],
'10d10v': [10, 10],
'1d1v': [1],
'2d1v': [2, 1],
}
for dtype in dtypes:
for shape in shapes.values():
yield shape, dtype
def test_update_approx_inverse_hessian(self):
paddle.seed(1234)
for shape, dtype in self.gen_configs():
bat = len(shape) > 1
# only supports shapes with up to 2 dims.
f, center = quadratic_gen(shape, dtype)
# x0 = paddle.ones(shape, dtype=dtype)
x0 = paddle.rand(shape, dtype=dtype)
# The true inverse hessian value at x0
hess = hesfn_gen(f)(x0)
verify_symmetric_positive_definite_matrix(hess)
hess_np = hess.numpy()
hess_np_inv = np.linalg.inv(hess_np)
h0 = paddle.to_tensor(hess_np_inv)
verify_symmetric_positive_definite_matrix(h0)
f0, g0 = vjp(f, x0)
gnorm = vnorm_inf(f0)
state = SearchState(bat, x0, f0, g0, h0, gnorm)
# Verifies the two estimated invese Hessians are close
for _ in range(5):
s = paddle.rand(shape, dtype=dtype)
x1 = x0 + s
f1, g1 = vjp(f, x1)
y = g1 - g0
h1 = update_approx_inverse_hessian(state, h0, s, y)
h1_strict = update_inv_hessian_strict(bat, h0, s, y)
verify_symmetric_positive_definite_matrix(h1)
verify_symmetric_positive_definite_matrix(h1_strict)
self.assertTrue(True)
def test_quadratic(self):
paddle.seed(12345)
for shape, dtype in self.gen_configs():
f, center = quadratic_gen(shape, dtype)
print(f'center {center}')
print(f'f {f(center)}')
x0 = paddle.ones(shape, dtype=dtype)
result = bfgs_minimize(f, x0, dtype=dtype, iters=100, ls_iters=100)
print(result)
self.assertTrue(paddle.all(result.converged))
self.assertTrue(paddle.allclose(result.x_location, center))
# shape = [2]
# dtype = 'float32'
# center = np.random.rand(2)
# scales = np.array([0.2, 1.5])
# x0 = np.ones_like(center)
# s = np.array([-0.5, -0.5])
# # scales = paddle.square(paddle.rand(shape, dtype=dtype))
# # # TF results as reference
# # center_tf = tf.convert_to_tensor(center)
# # x0_tf = tf.convert_to_tensor(x0)
# # s_tf = tf.convert_to_tensor(s)
# # def f_tf(x):
# # return tf.reduce_sum(tf.square(x - center_tf), axis=-1)
# # with tf.GradientTape() as tape:
# # tape.watch(x0_tf)
# # y = f_tf(x0_tf)
# # g0_tf = tape.gradient(y, x0_tf)
# # x1_tf = x0_tf + s_tf
# # with tf.GradientTape() as tape:
# # tape.watch(x1_tf)
# # y = f_tf(x1_tf)
# # g1_tf = tape.gradient(y, x1_tf)
# # y_tf = g1_tf - g0_tf
# # h0_tf = tf.linalg.inv(tf.linalg.diag(g0_tf))
# # normalization_factor = tf.tensordot(s_tf, y_tf, 1)
# # h1_tf = tf_inv_hessian_update(y_tf, s_tf, normalization_factor, h0_tf)
# # Applies the proper update rules.
# center_pp = paddle.to_tensor(center)
# scales_pp = paddle.to_tensor(scales)
# x0_pp = paddle.to_tensor(x0)
# s_pp = paddle.to_tensor(s)
# def f(x):
# return paddle.sum(paddle.square(x - center_pp), axis=-1)
# f0_pp, g0_pp = vjp(f, x0_pp)
# h0_pp = paddle.inverse(paddle.diag(g0_pp))
# state = SearchState(x0_pp, f0_pp, g0_pp, h0_pp)
# x1_pp = x0_pp + s_pp
# f1_pp, g1_pp = vjp(f, x1_pp)
# y_pp = g1_pp - g0_pp
# h1_pp = update_approx_inverse_hessian(state, h0_pp, s_pp, y_pp)
# h1_pp_proper = update_inv_hessian_strict(h0_pp, s_pp, y_pp)
if __name__ == "__main__":
unittest.main()
| [
"paddle.incubate.optimizer.functional.bfgs_minimize",
"paddle.stack",
"paddle.incubate.optimizer.functional.bfgs_utils.vnorm_inf",
"paddle.incubate.optimizer.functional.bfgs.verify_symmetric_positive_definite_matrix",
"paddle.allclose",
"paddle.cholesky",
"paddle.rand",
"unittest.main",
"paddle.incu... | [((2495, 2511), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (2509, 2511), False, 'import paddle\n'), ((2790, 2818), 'paddle.eye', 'paddle.eye', (['dim'], {'dtype': 'dtype'}), '(dim, dtype=dtype)\n', (2800, 2818), False, 'import paddle\n'), ((2828, 2878), 'paddle.einsum', 'paddle.einsum', (['"""...ij,...i,...j->...ij"""', 'rho', 's', 'y'], {}), "('...ij,...i,...j->...ij', rho, s, y)\n", (2841, 2878), False, 'import paddle\n'), ((2902, 2920), 'paddle.cholesky', 'paddle.cholesky', (['H'], {}), '(H)\n', (2917, 2920), False, 'import paddle\n'), ((2930, 2949), 'paddle.matmul', 'paddle.matmul', (['l', 'L'], {}), '(l, L)\n', (2943, 2949), False, 'import paddle\n'), ((2959, 2992), 'paddle.einsum', 'paddle.einsum', (['"""...ij->...ji"""', 'lL'], {}), "('...ij->...ji', lL)\n", (2972, 2992), False, 'import paddle\n'), ((3003, 3024), 'paddle.matmul', 'paddle.matmul', (['lL', 'Lr'], {}), '(lL, Lr)\n', (3016, 3024), False, 'import paddle\n'), ((3062, 3112), 'paddle.einsum', 'paddle.einsum', (['"""...ij,...i,...j->...ij"""', 'rho', 's', 's'], {}), "('...ij,...i,...j->...ij', rho, s, s)\n", (3075, 3112), False, 'import paddle\n'), ((3142, 3191), 'paddle.incubate.optimizer.functional.bfgs.verify_symmetric_positive_definite_matrix', 'verify_symmetric_positive_definite_matrix', (['H_next'], {}), '(H_next)\n', (3183, 3191), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((3262, 3293), 'paddle.rand', 'paddle.rand', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (3273, 3293), False, 'import paddle\n'), ((3348, 3387), 'paddle.rand', 'paddle.rand', (['hessian_shape'], {'dtype': 'dtype'}), '(hessian_shape, dtype=dtype)\n', (3359, 3387), False, 'import paddle\n'), ((3468, 3519), 'paddle.matmul', 'paddle.matmul', (['rotation', 'rotation'], {'transpose_y': '(True)'}), '(rotation, rotation, transpose_y=True)\n', (3481, 3519), False, 'import paddle\n'), ((8276, 8291), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8289, 8291), False, 'import unittest\n'), ((1308, 1344), 'paddle.incubate.optimizer.functional.bfgs_utils.vjp', 'vjp', (['f', 'x'], {'create_graph': 'create_graph'}), '(f, x, create_graph=create_graph)\n', (1311, 1344), False, 'from paddle.incubate.optimizer.functional.bfgs_utils import vjp, vnorm_inf\n'), ((2097, 2124), 'paddle.stack', 'paddle.stack', (['rows'], {'axis': '(-2)'}), '(rows, axis=-2)\n', (2109, 2124), False, 'import paddle\n'), ((3551, 3601), 'paddle.incubate.optimizer.functional.bfgs.verify_symmetric_positive_definite_matrix', 'verify_symmetric_positive_definite_matrix', (['hessian'], {}), '(hessian)\n', (3592, 3601), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((3630, 3649), 'paddle.abs', 'paddle.abs', (['hessian'], {}), '(hessian)\n', (3640, 3649), False, 'import paddle\n'), ((4868, 4885), 'paddle.seed', 'paddle.seed', (['(1234)'], {}), '(1234)\n', (4879, 4885), False, 'import paddle\n'), ((6255, 6273), 'paddle.seed', 'paddle.seed', (['(12345)'], {}), '(12345)\n', (6266, 6273), False, 'import paddle\n'), ((1815, 1835), 'paddle.zeros_like', 'paddle.zeros_like', (['x'], {}), '(x)\n', (1832, 1835), False, 'import paddle\n'), ((2630, 2663), 'paddle.einsum', 'paddle.einsum', (['"""...i, ...i"""', 's', 'y'], {}), "('...i, ...i', s, y)\n", (2643, 2663), False, 'import paddle\n'), ((2693, 2709), 'paddle.dot', 'paddle.dot', (['s', 'y'], {}), '(s, y)\n', (2703, 2709), False, 'import paddle\n'), ((5141, 5172), 'paddle.rand', 'paddle.rand', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5152, 5172), False, 'import paddle\n'), ((5273, 5320), 'paddle.incubate.optimizer.functional.bfgs.verify_symmetric_positive_definite_matrix', 'verify_symmetric_positive_definite_matrix', (['hess'], {}), '(hess)\n', (5314, 5320), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((5382, 5404), 'numpy.linalg.inv', 'np.linalg.inv', (['hess_np'], {}), '(hess_np)\n', (5395, 5404), True, 'import numpy as np\n'), ((5422, 5451), 'paddle.to_tensor', 'paddle.to_tensor', (['hess_np_inv'], {}), '(hess_np_inv)\n', (5438, 5451), False, 'import paddle\n'), ((5465, 5510), 'paddle.incubate.optimizer.functional.bfgs.verify_symmetric_positive_definite_matrix', 'verify_symmetric_positive_definite_matrix', (['h0'], {}), '(h0)\n', (5506, 5510), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((5532, 5542), 'paddle.incubate.optimizer.functional.bfgs_utils.vjp', 'vjp', (['f', 'x0'], {}), '(f, x0)\n', (5535, 5542), False, 'from paddle.incubate.optimizer.functional.bfgs_utils import vjp, vnorm_inf\n'), ((5563, 5576), 'paddle.incubate.optimizer.functional.bfgs_utils.vnorm_inf', 'vnorm_inf', (['f0'], {}), '(f0)\n', (5572, 5576), False, 'from paddle.incubate.optimizer.functional.bfgs_utils import vjp, vnorm_inf\n'), ((5597, 5636), 'paddle.incubate.optimizer.functional.bfgs.SearchState', 'SearchState', (['bat', 'x0', 'f0', 'g0', 'h0', 'gnorm'], {}), '(bat, x0, f0, g0, h0, gnorm)\n', (5608, 5636), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((6465, 6496), 'paddle.ones', 'paddle.ones', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (6476, 6496), False, 'import paddle\n'), ((6518, 6576), 'paddle.incubate.optimizer.functional.bfgs_minimize', 'bfgs_minimize', (['f', 'x0'], {'dtype': 'dtype', 'iters': '(100)', 'ls_iters': '(100)'}), '(f, x0, dtype=dtype, iters=100, ls_iters=100)\n', (6531, 6576), False, 'from paddle.incubate.optimizer.functional import bfgs_iterates, bfgs_minimize\n'), ((2052, 2068), 'paddle.incubate.optimizer.functional.bfgs_utils.vjp', 'vjp', (['jacfn', 'x', 'v'], {}), '(jacfn, x, v)\n', (2055, 2068), False, 'from paddle.incubate.optimizer.functional.bfgs_utils import vjp, vnorm_inf\n'), ((5768, 5799), 'paddle.rand', 'paddle.rand', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5779, 5799), False, 'import paddle\n'), ((5853, 5863), 'paddle.incubate.optimizer.functional.bfgs_utils.vjp', 'vjp', (['f', 'x1'], {}), '(f, x1)\n', (5856, 5863), False, 'from paddle.incubate.optimizer.functional.bfgs_utils import vjp, vnorm_inf\n'), ((5930, 5976), 'paddle.incubate.optimizer.functional.bfgs.update_approx_inverse_hessian', 'update_approx_inverse_hessian', (['state', 'h0', 's', 'y'], {}), '(state, h0, s, y)\n', (5959, 5976), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((6062, 6107), 'paddle.incubate.optimizer.functional.bfgs.verify_symmetric_positive_definite_matrix', 'verify_symmetric_positive_definite_matrix', (['h1'], {}), '(h1)\n', (6103, 6107), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((6124, 6176), 'paddle.incubate.optimizer.functional.bfgs.verify_symmetric_positive_definite_matrix', 'verify_symmetric_positive_definite_matrix', (['h1_strict'], {}), '(h1_strict)\n', (6165, 6176), False, 'from paddle.incubate.optimizer.functional.bfgs import SearchState, verify_symmetric_positive_definite_matrix, update_approx_inverse_hessian\n'), ((6631, 6659), 'paddle.all', 'paddle.all', (['result.converged'], {}), '(result.converged)\n', (6641, 6659), False, 'import paddle\n'), ((6689, 6731), 'paddle.allclose', 'paddle.allclose', (['result.x_location', 'center'], {}), '(result.x_location, center)\n', (6704, 6731), False, 'import paddle\n')] |
from enum import Enum
from types import SimpleNamespace
import numpy as np
from os.path import dirname, normpath
import modelinter
class Const(Enum):
TRADING_YEAR = 252 # length of a trading year
WHOLE_YEAR = 365 #length of an actual year
ANNUALIZE = np.sqrt(TRADING_YEAR) # to ANNUALIZE daily volatility
N_YEARS_KEEP = 5.5 # previous years of data to keep
RISK_FREE_RATE = 0.0025 # assumed constant
SIGMA_WINDOW = 20 # time window for volatility estimation
# the following parameter is justified by notebook 0-preliminary_analysis.ipynb
SIGMA_SHIFT = 10 # by how much to delay the rolling volatility estimation
class ConstE(Enum):
TIMESTAMP = 'timestamp'
DATE = 'date'
DATE_FORMAT = '%Y-%m-%d'
ALT_DATE_FORMAT = '%d-%m-%Y'
class TimeseriesVariablesE(Enum):
SP500 = 'SP500'
VIX = 'VIX'
# ugly hack, but it should work on all OSs
absolute = dirname(modelinter.__file__)
class Paths(Enum):
SAVE_DIR = normpath(absolute + '/resources/data/interim/') + '/'
FIGURES_DIR = normpath(absolute + '/resources/plots/') + '/'
DATA_DIR = normpath(absolute + '/resources/data/raw/') + '/'
FREE_DATA_DIR = normpath(absolute + '/resources/data/processed/free_subset/') + '/'
PKL_EXT = '.pkl'
Slices = SimpleNamespace(
# to slice the pandas dataframe for stock data
stocks_subset=slice(2, None),
indices_subset=slice(0, 2)
) | [
"os.path.dirname",
"os.path.normpath",
"numpy.sqrt"
] | [((908, 936), 'os.path.dirname', 'dirname', (['modelinter.__file__'], {}), '(modelinter.__file__)\n', (915, 936), False, 'from os.path import dirname, normpath\n'), ((266, 287), 'numpy.sqrt', 'np.sqrt', (['TRADING_YEAR'], {}), '(TRADING_YEAR)\n', (273, 287), True, 'import numpy as np\n'), ((971, 1018), 'os.path.normpath', 'normpath', (["(absolute + '/resources/data/interim/')"], {}), "(absolute + '/resources/data/interim/')\n", (979, 1018), False, 'from os.path import dirname, normpath\n'), ((1043, 1083), 'os.path.normpath', 'normpath', (["(absolute + '/resources/plots/')"], {}), "(absolute + '/resources/plots/')\n", (1051, 1083), False, 'from os.path import dirname, normpath\n'), ((1105, 1148), 'os.path.normpath', 'normpath', (["(absolute + '/resources/data/raw/')"], {}), "(absolute + '/resources/data/raw/')\n", (1113, 1148), False, 'from os.path import dirname, normpath\n'), ((1175, 1236), 'os.path.normpath', 'normpath', (["(absolute + '/resources/data/processed/free_subset/')"], {}), "(absolute + '/resources/data/processed/free_subset/')\n", (1183, 1236), False, 'from os.path import dirname, normpath\n')] |
import numpy as np
import torch
class SpecAugment:
def __init__(self, T=8, F=8, mT=8, mF=2):
self.T = T
self.F = F
self.mT = mT
self.mF = mF
def __call__(self, x):
width, height = x.shape[-2:]
mask = torch.ones_like(x, requires_grad=False)
for _ in range(self.mT):
t_delta = np.random.randint(low=0, high=self.T)
t0 = np.random.randint(low=0, high=width - t_delta)
mask[:, t0:t0 + t_delta, :] = 0
for _ in range(self.mF):
f_delta = np.random.randint(low=0, high=self.F)
f0 = np.random.randint(low=0, high=height - f_delta)
mask[:, :, f0:f0 + f_delta] = 0
return x * mask
| [
"torch.ones_like",
"numpy.random.randint"
] | [((259, 298), 'torch.ones_like', 'torch.ones_like', (['x'], {'requires_grad': '(False)'}), '(x, requires_grad=False)\n', (274, 298), False, 'import torch\n'), ((355, 392), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self.T'}), '(low=0, high=self.T)\n', (372, 392), True, 'import numpy as np\n'), ((410, 456), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(width - t_delta)'}), '(low=0, high=width - t_delta)\n', (427, 456), True, 'import numpy as np\n'), ((557, 594), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self.F'}), '(low=0, high=self.F)\n', (574, 594), True, 'import numpy as np\n'), ((612, 659), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(height - f_delta)'}), '(low=0, high=height - f_delta)\n', (629, 659), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn import metrics
from glob import glob
import sys
subname1=sys.argv[1]
subname2=sys.argv[2]
def myauc(y,pred):
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
return metrics.auc(fpr, tpr)
sub1=pd.read_csv(subname1,index_col=0)
sub2=pd.read_csv(subname2,index_col=0)
print('pred', sub1.shape)
subjects = list(range(1,13))
real=[]
for subject in subjects:
fnames = ['../../data/train/subj%d_series%d_events.csv' % (subject,i) for i in range(7,9)]
for fname in fnames:
labels= pd.read_csv(fname,index_col=0)
real.append(labels)
print(fname,labels.shape)
real=pd.concat(real)
print('combined', real.shape)
xx=[]
bestweight=[]
bestscore=[]
for name in real.columns.values:
y=np.array(real[name])
yr1=np.array(sub1[name])
yr2=np.array(sub2[name])
bests=-1
bestf=-1
for j in range(11):
yr=yr1*j+yr2*(10-j)
yr/=10
m=myauc(y,yr)
print(name,j,m)
if bests<m:
bests=m
bestf=j
bestweight.append(bestf)
bestscore.append(bests)
#xx.append(myauc(y,yr))
#print name, xx[-1]
#print 'average',np.mean(xx)
print(bestweight)
print(bestscore)
print(np.mean(bestscore))
| [
"sklearn.metrics.roc_curve",
"pandas.read_csv",
"sklearn.metrics.auc",
"numpy.mean",
"numpy.array",
"pandas.concat"
] | [((278, 312), 'pandas.read_csv', 'pd.read_csv', (['subname1'], {'index_col': '(0)'}), '(subname1, index_col=0)\n', (289, 312), True, 'import pandas as pd\n'), ((318, 352), 'pandas.read_csv', 'pd.read_csv', (['subname2'], {'index_col': '(0)'}), '(subname2, index_col=0)\n', (329, 352), True, 'import pandas as pd\n'), ((685, 700), 'pandas.concat', 'pd.concat', (['real'], {}), '(real)\n', (694, 700), True, 'import pandas as pd\n'), ((196, 235), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y', 'pred'], {'pos_label': '(1)'}), '(y, pred, pos_label=1)\n', (213, 235), False, 'from sklearn import metrics\n'), ((248, 269), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (259, 269), False, 'from sklearn import metrics\n'), ((817, 837), 'numpy.array', 'np.array', (['real[name]'], {}), '(real[name])\n', (825, 837), True, 'import numpy as np\n'), ((847, 867), 'numpy.array', 'np.array', (['sub1[name]'], {}), '(sub1[name])\n', (855, 867), True, 'import numpy as np\n'), ((877, 897), 'numpy.array', 'np.array', (['sub2[name]'], {}), '(sub2[name])\n', (885, 897), True, 'import numpy as np\n'), ((1294, 1312), 'numpy.mean', 'np.mean', (['bestscore'], {}), '(bestscore)\n', (1301, 1312), True, 'import numpy as np\n'), ((584, 615), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'index_col': '(0)'}), '(fname, index_col=0)\n', (595, 615), True, 'import pandas as pd\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import region_grow
import get_boundary
def extract_vein_by_region_grow(edges_canny, image, threshold_perimeter, threshold_kernel_boundary):
"""
edges_canny, image, threshold_perimeter, threshold_kernel_boundary -> vein, main_vein, vein_points, main_vein_points
:param edges_canny: edges_canny.
:param image: path_name of leave.
:param threshold_perimeter: tolerated threshold of perimeter of contours, to get rid of the fractions of boundary.
:param threshold_kernel_boundary: width of dilated boundary, to get rid of the boundary.
:return: vein, main_vein, vein_points, main_vein_points.
"""
img_ori_gray = cv2.imread(image, 0)
# cut out boundary
boundary = get_boundary.get_boundary(image)
canvas_boundary = np.zeros(edges_canny.shape[:2], dtype=np.uint8)
for i in boundary:
canvas_boundary[int(i[0]), int(i[1])] = 255
kernel_boundary = cv2.getStructuringElement(cv2.MORPH_RECT, threshold_kernel_boundary)
canvas_boundary = cv2.dilate(canvas_boundary, kernel_boundary) # 膨胀后的边框
opened = cv2.bitwise_or(edges_canny, canvas_boundary)
res_all = region_grow.region_grow(opened, 'all')
# 得到叶脉并依区域周长去噪
vein = cv2.subtract(res_all, canvas_boundary)
# plt.imshow(vein, plt.cm.gray)
# plt.suptitle('vein')
# plt.show()
# 连接断裂的主叶脉
h, w = vein.shape
# denoise
vein[:, round(w / 2) - 20:round(w / 2) + 20], contours, hierarchy = \
cv2.findContours(vein[:, round(w/2)-20:round(w/2)+20], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
small_perimeters = [i for i in contours if len(i) < 10] # 删短周长的区域
cv2.fillPoly(vein[:, round(w/2)-20:round(w/2)+20], small_perimeters, 0)
# plt.imshow(vein[:, round(w/2)-20:round(w/2)+20], plt.cm.gray)
# plt.suptitle('First One')
# plt.show()
# temporary end
vein_end = [0, 0]
start_point_prev = [-1, -1]
# get the bottom index
for end_idx in range(len(vein[::-1, round(w/2)-20:round(w/2)+20])):
if vein[end_idx, :].any():
for j in range(len(vein[end_idx, round(w/2)-20:round(w/2)+20])):
if vein[:, round(w/2)-20:round(w/2)+20][end_idx][j] == 255:
vein_end = [end_idx, j+1]
for i in range(len(vein[:vein_end[0], round(w/2)-20:round(w/2)+20])):
if i != 0:
if start_point and end_point and i in list(range(0, end_point[0])):
continue
# print('i:{}'.format(i))
start_point = []
end_point = []
flag_end = 'go'
flag_continue_for_i = 'go'
# get start_point and end_point
for j in range(len(vein[0:vein_end[0], round(w/2)-20:round(w/2)+20])):
if flag_end == 'brk':
break
if vein[:, round(w/2)-20:round(w/2)+20][j].any() and start_point == []:
if not vein[:, round(w/2)-20:round(w/2)+20][j+1].any():
for k in range(len(vein[:, round(w/2)-20:round(w/2)+20][j])):
if vein[:, round(w/2)-20:round(w/2)+20][j][k] == 255:
start_point = [j, (k+round(w/2)-20)+1]
# print('start_point:', start_point)
if start_point[0] == start_point_prev[0]:
flag_continue_for_i = 'cnt'
start_point_prev = start_point.copy()
break
if flag_continue_for_i == 'cnt':
break
if not vein[:, round(w/2)-20:round(w/2)+20][j].any() and start_point != [] and end_point == []:
# print("All zeros in %d-th line." % j)
if vein[:, round(w/2)-20:round(w/2)+20][j+1].any():
for k in range(len(vein[:, round(w/2)-20:round(w/2)+20][j])):
if vein[:, round(w/2)-20:round(w/2)+20][j+1][k] == 255:
end_point = [j+1, (k+round(w/2)-20)+1]
# print('end_point:', end_point)
flag_end = 'brk'
break
else:
continue
# get points end
if not start_point or not end_point or flag_continue_for_i == 'cnt':
continue
canny_threshold_enhanced_locally = [30, 60]
# print(start_point, end_point)
# print([start_point[0], end_point[0], min(start_point[1], end_point[1]), max(start_point[1], end_point[1])])
vein_enhanced_locally = img_ori_gray[start_point[0]:end_point[0],
min(start_point[1], end_point[1]):max(start_point[1], end_point[1])+1]
# print("vein_enhanced_locally", vein_enhanced_locally.shape, type(vein_enhanced_locally))
# vein_enhanced_locally_GB = cv2.bilateralFilter(vein_enhanced_locally, 9, 75, 75)
# print('rect:', [start_point[0], end_point[0],
# min(start_point[1], end_point[1]), max(start_point[1], end_point[1])+1])
# plt.imshow(vein_enhanced_locally, cmap="gray")
# plt.show()
edge_enhanced_locally = cv2.Canny(vein_enhanced_locally, *canny_threshold_enhanced_locally, apertureSize=3)
# for i in vein_enhanced_locally:
# print(i)
# for i in range(10):
# print("")
# for i in vein_enhanced_locally_GB:
# print(i)
white_pixel_percentage = list(edge_enhanced_locally.ravel() == 255).count(1) /\
len(list(edge_enhanced_locally.ravel()))
start_point_check = [-1, -1]
counter_canny_adjustment = 0
counter_prevent_dead_loop = 2
white_pixel_percentage_prev = 0
while not 1/40 < white_pixel_percentage < 1/20 and (start_point_check == [-1, -1] or
start_point_check[1] >= start_point[0]):
if not counter_prevent_dead_loop:
# print('final white_pixel_percentage: {}'.format(white_pixel_percentage))
break
# code of adjustment on threshold of canny
if white_pixel_percentage <= 1/40:
canny_threshold_enhanced_locally = [canny_threshold_enhanced_locally[0] - 1,
canny_threshold_enhanced_locally[1] - 1]
else:
canny_threshold_enhanced_locally = [canny_threshold_enhanced_locally[0] + 1,
canny_threshold_enhanced_locally[1] + 1]
# print("vein_enhanced_locally", vein_enhanced_locally.shape, type(vein_enhanced_locally))
# vein_enhanced_locally_GB = cv2.bilateralFilter(vein_enhanced_locally, 9, 75, 75)
edge_enhanced_locally = cv2.Canny(vein_enhanced_locally, *canny_threshold_enhanced_locally, apertureSize=3)
# print('vein_enhanced_locally_GB:', vein_enhanced_locally_GB.shape, type(vein_enhanced_locally_GB))
# print("vein_enhanced_locally_GB.shape_in_while:", vein_enhanced_locally_GB.shape)
white_pixel_percentage = (np.sum(edge_enhanced_locally==1) /
len(list(edge_enhanced_locally.ravel())))
if white_pixel_percentage == white_pixel_percentage_prev:
counter_prevent_dead_loop -= 1
white_pixel_percentage_prev = white_pixel_percentage
# print('{}-th white_pixel_percentage:{}'.format(counter_canny_adjustment,
# white_pixel_percentage))
counter_canny_adjustment += 1
# CHECK IF THE BRANCH HAS EXTENDED
for k in range(len(vein[:, round(w / 2) - 20:round(w / 2) + 20])):
if vein[:, round(w / 2) - 20:round(w / 2) + 20][k].any():
if not vein[:, round(w / 2) - 20:round(w / 2) + 20][min(vein.shape[0]-1, k + 1)].any():
for j in range(len(vein[:, round(w / 2) - 20:round(w / 2) + 20][k])):
if vein[:, round(w / 2) - 20:round(w / 2) + 20][k][j] == 255:
start_point_check = [k, j + 1]
break
# CHECK END
# print('final white_pixel_percentage: {}'.format(white_pixel_percentage))
# while end
vein[start_point[0]:end_point[0], min(start_point[1], end_point[1]):max(start_point[1], end_point[1])+1] = \
edge_enhanced_locally
# plt.imshow(vein, plt.cm.gray)
# plt.suptitle('In Iteration')
# plt.show()
# for end
# denoise
vein[:, round(w / 2) - 20:round(w / 2) + 20], contours, hierarchy = \
cv2.findContours(vein[:, round(w/2)-20:round(w/2)+20], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
small_perimeters = [i for i in contours if len(i) < 10] # 删短周长的区域
cv2.fillPoly(vein[:, round(w/2)-20:round(w/2)+20], small_perimeters, 0)
# plt.imshow(vein[:, round(w/2)-20:round(w/2)+20], plt.cm.gray)
# plt.suptitle('Last One')
# plt.show()
vein, contours, hierarchy = cv2.findContours(vein, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
small_perimeters = [i for i in contours if len(i) < threshold_perimeter] # 删短周长的区域
cv2.fillPoly(vein, small_perimeters, 0)
# 上 -> 下
res_top = region_grow.region_grow(vein, 'top')
kernel_main_vein = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
res_top = cv2.dilate(res_top, kernel_main_vein)
res_top = cv2.dilate(res_top, kernel_main_vein)
main_vein = cv2.bitwise_and(vein, res_top)
# fig_1, axes = plt.subplots(1, 3, figsize=(16, 8))
# ax1, ax2, ax3 = axes.ravel()
# ax1.imshow(vein, plt.cm.gray)
# ax1.set_title('vein')
# ax2.imshow(res_top, plt.cm.gray)
# ax2.set_title('grow_from_top')
# ax3.imshow(main_vein, plt.cm.gray)
# ax3.set_title('main_vein')
# plt.show()
# main_vein, contours, hierarchy = cv2.findContours(main_vein, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# small_perimeters = [i for i in contours if len(i) < 0] # 删短周长的区域
# cv2.fillPoly(main_vein, small_perimeters, 0)
# save points
vein_points = []
for i in range(vein.shape[0]):
for j in range(vein.shape[1]):
if vein[i, j] == 255:
vein_points.append([i, j])
main_vein_points = []
for i in range(main_vein.shape[0]):
for j in range(main_vein.shape[1]):
if main_vein[i, j] == 255:
main_vein_points.append([i, j])
main_vein_points = np.array(main_vein_points)
# for i in main_vein_points:
# # print(i)
# plt.scatter(i[1], i[0], c="red")
# ax = plt.gca()
# ax.set_aspect(1)
# plt.figure()
# plt.imshow(main_vein, cmap="gray")
# plt.show()
return vein, main_vein, vein_points, main_vein_points
| [
"cv2.Canny",
"cv2.subtract",
"numpy.sum",
"region_grow.region_grow",
"cv2.dilate",
"cv2.bitwise_and",
"cv2.getStructuringElement",
"numpy.zeros",
"cv2.fillPoly",
"cv2.imread",
"cv2.bitwise_or",
"numpy.array",
"get_boundary.get_boundary",
"cv2.findContours"
] | [((724, 744), 'cv2.imread', 'cv2.imread', (['image', '(0)'], {}), '(image, 0)\n', (734, 744), False, 'import cv2\n'), ((785, 817), 'get_boundary.get_boundary', 'get_boundary.get_boundary', (['image'], {}), '(image)\n', (810, 817), False, 'import get_boundary\n'), ((841, 888), 'numpy.zeros', 'np.zeros', (['edges_canny.shape[:2]'], {'dtype': 'np.uint8'}), '(edges_canny.shape[:2], dtype=np.uint8)\n', (849, 888), True, 'import numpy as np\n'), ((989, 1057), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', 'threshold_kernel_boundary'], {}), '(cv2.MORPH_RECT, threshold_kernel_boundary)\n', (1014, 1057), False, 'import cv2\n'), ((1081, 1125), 'cv2.dilate', 'cv2.dilate', (['canvas_boundary', 'kernel_boundary'], {}), '(canvas_boundary, kernel_boundary)\n', (1091, 1125), False, 'import cv2\n'), ((1150, 1194), 'cv2.bitwise_or', 'cv2.bitwise_or', (['edges_canny', 'canvas_boundary'], {}), '(edges_canny, canvas_boundary)\n', (1164, 1194), False, 'import cv2\n'), ((1210, 1248), 'region_grow.region_grow', 'region_grow.region_grow', (['opened', '"""all"""'], {}), "(opened, 'all')\n", (1233, 1248), False, 'import region_grow\n'), ((1283, 1321), 'cv2.subtract', 'cv2.subtract', (['res_all', 'canvas_boundary'], {}), '(res_all, canvas_boundary)\n', (1295, 1321), False, 'import cv2\n'), ((9322, 9384), 'cv2.findContours', 'cv2.findContours', (['vein', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(vein, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (9338, 9384), False, 'import cv2\n'), ((9480, 9519), 'cv2.fillPoly', 'cv2.fillPoly', (['vein', 'small_perimeters', '(0)'], {}), '(vein, small_perimeters, 0)\n', (9492, 9519), False, 'import cv2\n'), ((9551, 9587), 'region_grow.region_grow', 'region_grow.region_grow', (['vein', '"""top"""'], {}), "(vein, 'top')\n", (9574, 9587), False, 'import region_grow\n'), ((9612, 9661), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (9637, 9661), False, 'import cv2\n'), ((9677, 9714), 'cv2.dilate', 'cv2.dilate', (['res_top', 'kernel_main_vein'], {}), '(res_top, kernel_main_vein)\n', (9687, 9714), False, 'import cv2\n'), ((9730, 9767), 'cv2.dilate', 'cv2.dilate', (['res_top', 'kernel_main_vein'], {}), '(res_top, kernel_main_vein)\n', (9740, 9767), False, 'import cv2\n'), ((9785, 9815), 'cv2.bitwise_and', 'cv2.bitwise_and', (['vein', 'res_top'], {}), '(vein, res_top)\n', (9800, 9815), False, 'import cv2\n'), ((10807, 10833), 'numpy.array', 'np.array', (['main_vein_points'], {}), '(main_vein_points)\n', (10815, 10833), True, 'import numpy as np\n'), ((5254, 5341), 'cv2.Canny', 'cv2.Canny', (['vein_enhanced_locally', '*canny_threshold_enhanced_locally'], {'apertureSize': '(3)'}), '(vein_enhanced_locally, *canny_threshold_enhanced_locally,\n apertureSize=3)\n', (5263, 5341), False, 'import cv2\n'), ((6955, 7042), 'cv2.Canny', 'cv2.Canny', (['vein_enhanced_locally', '*canny_threshold_enhanced_locally'], {'apertureSize': '(3)'}), '(vein_enhanced_locally, *canny_threshold_enhanced_locally,\n apertureSize=3)\n', (6964, 7042), False, 'import cv2\n'), ((7289, 7323), 'numpy.sum', 'np.sum', (['(edge_enhanced_locally == 1)'], {}), '(edge_enhanced_locally == 1)\n', (7295, 7323), True, 'import numpy as np\n')] |
#MDL_QUADCOPTER Dynamic parameters for a quadrotor.
#
# MDL_QUADCOPTER is a script creates the workspace variable quad which
# describes the dynamic characterstics of a quadrotor flying robot.
#
# Properties::
#
# This is a structure with the following elements:
#
# nrotors Number of rotors (1x1)
# J Flyer rotational inertia matrix (3x3)
# h Height of rotors above CoG (1x1)
# d Length of flyer arms (1x1)
# nb Number of blades per rotor (1x1)
# r Rotor radius (1x1)
# c Blade chord (1x1)
# e Flapping hinge offset (1x1)
# Mb Rotor blade mass (1x1)
# Mc Estimated hub clamp mass (1x1)
# ec Blade root clamp displacement (1x1)
# Ib Rotor blade rotational inertia (1x1)
# Ic Estimated root clamp inertia (1x1)
# mb Static blade moment (1x1)
# Ir Total rotor inertia (1x1)
# Ct Non-dim. thrust coefficient (1x1)
# Cq Non-dim. torque coefficient (1x1)
# sigma Rotor solidity ratio (1x1)
# thetat Blade tip angle (1x1)
# theta0 Blade root angle (1x1)
# theta1 Blade twist angle (1x1)
# theta75 3/4 blade angle (1x1)
# thetai Blade ideal root approximation (1x1)
# a Lift slope gradient (1x1)
# A Rotor disc area (1x1)
# gamma Lock number (1x1)
#
#
# Notes::
# - SI units are used.
#
# References::
# - Design, Construction and Control of a Large Quadrotor micro air vehicle.
# P.Pounds, PhD thesis,
# Australian National University, 2007.
# http://www.eng.yale.edu/pep5/P_Pounds_Thesis_2008.pdf
# - This is a heavy lift quadrotor
#
import numpy as np
from math import pi, sqrt, inf
quadrotor = {}
quadrotor['nrotors'] = 4 # 4 rotors
quadrotor['g'] = 9.81 # g Gravity
quadrotor['rho'] = 1.184 # rho Density of air
quadrotor['muv'] = 1.5e-5 # muv Viscosity of air
# Airframe
quadrotor['M'] = 4 # M Mass
Ixx = 0.082
Iyy = 0.082
Izz = 0.149 #0.160
quadrotor['J'] = np.diag([Ixx, Iyy, Izz]) # I Flyer rotational inertia matrix 3x3
quadrotor['h'] = -0.007 # h Height of rotors above CoG
quadrotor['d'] = 0.315 # d Length of flyer arms
#Rotor
quadrotor['nb'] = 2 # b Number of blades per rotor
quadrotor['r'] = 0.165 # r Rotor radius
quadrotor['c'] = 0.018 # c Blade chord
quadrotor['e'] = 0.0 # e Flapping hinge offset
quadrotor['Mb'] = 0.005 # Mb Rotor blade mass
quadrotor['Mc'] = 0.010 # Mc Estimated hub clamp mass
quadrotor['ec'] = 0.004 # ec Blade root clamp displacement
quadrotor['Ib'] = quadrotor['Mb'] * (quadrotor['r'] - quadrotor['ec'])**2 / 4 # Ib Rotor blade rotational inertia
quadrotor['Ic'] = quadrotor['Mc'] * (quadrotor['ec'])**2 / 4 # Ic Estimated root clamp inertia
quadrotor['mb'] = quadrotor['g'] * (quadrotor['Mc'] * quadrotor['ec'] / 2 + quadrotor['Mb'] * quadrotor['r'] /2) # mb Static blade moment
quadrotor['Ir'] = quadrotor['nb'] * (quadrotor['Ib'] + quadrotor['Ic']) # Ir Total rotor inertia
quadrotor['Ct'] = 0.0048 # Ct Non-dim. thrust coefficient
quadrotor['Cq'] = quadrotor['Ct'] * sqrt(quadrotor['Ct']/2) # Cq Non-dim. torque coefficient
quadrotor['sigma'] = quadrotor['c'] * quadrotor['nb'] / (pi * quadrotor['r']) # sigma Rotor solidity ratio
quadrotor['thetat'] = 6.8 * (pi / 180) # thetat Blade tip angle
quadrotor['theta0'] = 14.6 * (pi / 180) # theta0 Blade root angle
quadrotor['theta1'] = quadrotor['thetat'] - quadrotor['theta0'] # theta1 Blade twist angle
quadrotor['theta75'] = quadrotor['theta0'] + 0.75 * quadrotor['theta1'] # theta76 3/4 blade angle
try:
quadrotor['thetai'] = quadrotor['thetat'] * (quadrotor['r'] / quadrotor['e']) # thetai Blade ideal root approximation
except ZeroDivisionError:
quadrotor['thetai'] = inf
quadrotor['a'] = 5.5 # a Lift slope gradient
# derived constants
quadrotor['A'] = pi*quadrotor['r']**2 # A Rotor disc area
quadrotor['gamma'] = quadrotor['rho'] * quadrotor['a'] * quadrotor['c'] * quadrotor['r']**4 / (quadrotor['Ib'] + quadrotor['Ic']) # gamma Lock number
quadrotor['b'] = quadrotor['Ct'] * quadrotor['rho']*quadrotor['A']*quadrotor['r']**2 # T = b w^2
quadrotor['k'] = quadrotor['Cq'] * quadrotor['rho']*quadrotor['A']*quadrotor['r']**3 # Q = k w^2
quadrotor['verbose'] = False | [
"numpy.diag",
"math.sqrt"
] | [((2020, 2044), 'numpy.diag', 'np.diag', (['[Ixx, Iyy, Izz]'], {}), '([Ixx, Iyy, Izz])\n', (2027, 2044), True, 'import numpy as np\n'), ((3387, 3412), 'math.sqrt', 'sqrt', (["(quadrotor['Ct'] / 2)"], {}), "(quadrotor['Ct'] / 2)\n", (3391, 3412), False, 'from math import pi, sqrt, inf\n')] |
"""Data structures."""
from __future__ import annotations
from abc import abstractmethod
from dataclasses import asdict, dataclass, field, fields, is_dataclass
from typing import (
Any,
Dict,
Generic,
Iterator,
List,
Optional,
Protocol,
Tuple,
TypeVar,
Union,
cast,
overload,
)
from PIL import Image
import attr
import numpy as np
import numpy.typing as npt
from ranzen.decorators import implements
from ranzen.misc import gcopy
import torch
from torch import Tensor
from typing_extensions import Self, TypeAlias, runtime_checkable
from conduit.types import Addable, IndexType, Sized
__all__ = [
"BinarySample",
"BinarySampleIW",
"Dataset",
"DatasetWrapper",
"ImageSize",
"InputContainer",
"LoadedData",
"MultiCropOutput",
"NamedSample",
"PseudoCdtDataset",
"RawImage",
"SampleBase",
"SizedDataset",
"SubgroupSample",
"SubgroupSampleIW",
"TargetData",
"TernarySample",
"TernarySampleIW",
"TrainTestSplit",
"TrainValTestSplit",
"UnloadedData",
"concatenate_inputs",
"shallow_asdict",
"shallow_astuple",
]
RawImage: TypeAlias = Union[npt.NDArray[np.integer], Image.Image]
UnloadedData: TypeAlias = Union[
npt.NDArray[np.floating],
npt.NDArray[np.integer],
npt.NDArray[np.string_],
Tensor,
]
LoadedData: TypeAlias = Union[
Tensor,
Image.Image,
npt.NDArray[np.floating],
npt.NDArray[np.integer],
npt.NDArray[np.string_],
Dict[str, Tensor],
Dict[str, Image.Image],
Dict[str, npt.NDArray[np.floating]],
Dict[str, npt.NDArray[np.integer]],
Dict[str, npt.NDArray[np.string_]],
List[Image.Image],
"InputContainer[X_co]",
]
IndexabledData: TypeAlias = Union[
Tensor,
npt.NDArray[np.floating],
npt.NDArray[np.integer],
npt.NDArray[np.string_],
]
X = TypeVar("X", bound=LoadedData)
X_co = TypeVar("X_co", bound=LoadedData, covariant=True)
XI = TypeVar("XI", bound=IndexabledData)
TargetData: TypeAlias = Union[Tensor, npt.NDArray[np.floating], npt.NDArray[np.integer]]
def concatenate_inputs(x1: X, x2: X, *, is_batched: bool) -> X:
if type(x1) != type(x2) or (
isinstance(x1, list) and type(x1[0]) != type(cast(List, x2)[0]) # type: ignore
):
raise AttributeError("Only data of the same type can be concatenated (added) together.")
if isinstance(x1, Tensor):
# if the number of dimensions is different by 1, append a batch dimension.
ndim_diff = x1.ndim - x2.ndim # type: ignore
if ndim_diff == 1:
x2 = x2.unsqueeze(0) # type: ignore
elif ndim_diff == -1:
x1 = x1.unsqueeze(0)
if is_batched:
return torch.cat([x1, x2], dim=0) # type: ignore
return torch.stack([x1, x2], dim=0) # type: ignore
elif isinstance(x1, np.ndarray):
# if the number of dimensions is different by 1, append a batch dimension.
ndim_diff = x1.ndim - x2.ndim # type: ignore
if ndim_diff == 1:
x2 = np.expand_dims(x2, axis=0) # type: ignore
elif ndim_diff == -1:
x1 = np.expand_dims(x1, axis=0) # type: ignore
if is_batched:
return np.concatenate([x1, x2], axis=0) # type: ignore
return np.stack([x1, x2], axis=0) # type: ignore
elif isinstance(x1, Image.Image):
return [x1, x2] # type: ignore
elif isinstance(x1, dict):
for key, value in x2.items(): # type: ignore
if key in x1:
x1[key] = concatenate_inputs(x1[key], value, is_batched=is_batched) # type: ignore
else:
x1[key] = value # type: ignore
return x1
return x1 + x2 # type: ignore
@runtime_checkable
class InputContainer(Sized, Addable, Protocol[X_co]):
@implements(Sized)
def __len__(self) -> int:
"""Total number of samples in the container."""
...
@implements(Addable)
def __add__(self, other: Self) -> Self:
...
def to(
self,
device: Optional[Union[torch.device, str]],
*,
non_blocking: bool = False,
) -> Self:
for name, value in shallow_asdict(self).items():
if isinstance(value, (Tensor, InputContainer)):
setattr(self, name, value.to(device, non_blocking=non_blocking))
return self
@dataclass
class MultiCropOutput(InputContainer[Tensor]):
global_crops: List[Tensor]
local_crops: List[Tensor] = field(default_factory=list)
@property
def all_crops(self) -> List[Tensor]:
return self.global_crops + self.local_crops
@property
def global_crop_sizes(self):
return [crop.shape[-3:] for crop in self.global_crops]
@property
def local_crop_sizes(self):
return [crop.shape[-3:] for crop in self.local_crops]
@property
def shape(self):
"""Shape of the global crops - for compatibility with DMs."""
return self.global_crops[0].shape
@implements(InputContainer)
def __len__(self) -> int:
"""Total number of crops."""
return len(self.global_crops) + len(self.local_crops)
def __iadd__(self, other: Self) -> Self:
self.global_crops += other.global_crops
self.local_crops += other.local_crops
return self
@implements(InputContainer)
def __add__(self, other: Self) -> Self:
copy = gcopy(self, deep=False)
copy.global_crops = copy.global_crops + other.global_crops
copy.local_crops = copy.local_crops + other.local_crops
return copy
IS = TypeVar("IS", bound="SampleBase[IndexabledData]")
@dataclass
class SampleBase(InputContainer[X]):
x: X
@implements(InputContainer)
def __len__(self) -> int:
return len(self.__dataclass_fields__) # type: ignore[attr-defined]
@abstractmethod
def __iter__(self) -> Iterator[X]:
...
@implements(InputContainer)
def __add__(self, other: Self) -> Self:
copy = gcopy(self, deep=False)
copy.x = concatenate_inputs(copy.x, other.x, is_batched=True)
return copy
def astuple(self, deep=False) -> Tuple[X]:
tuple_ = tuple(iter(self))
if deep:
tuple_ = gcopy(tuple_, deep=True)
return tuple_
def asdict(self, deep=False) -> Dict[str, X]:
if deep:
asdict(self)
return shallow_asdict(self)
def __getitem__(self: "SampleBase[XI]", index: IndexType) -> "SampleBase[XI]":
return gcopy(self, deep=False, x=self.x[index])
@dataclass
class NamedSample(SampleBase[X]):
@overload
def add_field(self, *, y: None = ..., s: None = ..., iw: None = ...) -> Self:
...
@overload
def add_field(self, *, y: Tensor = ..., s: None = ..., iw: None = ...) -> "BinarySample":
...
@overload
def add_field(self, *, y: Tensor = ..., s: None = ..., iw: Tensor = ...) -> "BinarySampleIW":
...
@overload
def add_field(self, *, y: Tensor = ..., s: Tensor = ..., iw: None = ...) -> "TernarySample":
...
@overload
def add_field(self, *, y: Tensor = ..., s: Tensor = ..., iw: Tensor = ...) -> "TernarySampleIW":
...
def add_field(
self, y: Optional[Tensor] = None, s: Optional[Tensor] = None, iw: Optional[Tensor] = None
) -> Union[Self, "BinarySample", "BinarySampleIW", "TernarySample", "TernarySampleIW"]:
if y is not None:
if s is not None:
if iw is not None:
return TernarySampleIW(x=self.x, s=s, y=y, iw=iw)
return TernarySample(x=self.x, s=s, y=y)
if iw is not None:
return BinarySampleIW(x=self.x, y=y, iw=iw)
return BinarySample(x=self.x, y=y)
return self
@implements(SampleBase)
def __iter__(self) -> Iterator[X]:
yield self.x
@implements(SampleBase)
def __getitem__(self: "NamedSample[XI]", index: IndexType) -> "NamedSample[XI]":
return gcopy(self, deep=False, x=self.x[index])
@dataclass
class _BinarySampleMixin:
y: Tensor
@dataclass
class _SubgroupSampleMixin:
s: Tensor
@dataclass
class BinarySample(NamedSample[X], _BinarySampleMixin):
@overload
def add_field(self, *, s: None = ..., iw: None = ...) -> Self:
...
@overload
def add_field(self, *, s: None = ..., iw: Tensor = ...) -> "BinarySampleIW":
...
@overload
def add_field(self, *, s: Tensor = ..., iw: None = ...) -> "TernarySample":
...
@overload
def add_field(self, *, s: Tensor = ..., iw: Tensor = ...) -> "TernarySampleIW":
...
def add_field(
self, *, s: Optional[Tensor] = None, iw: Optional[Tensor] = None
) -> Union[Self, "BinarySampleIW", "TernarySample", "TernarySampleIW"]:
if s is not None:
if iw is not None:
return TernarySampleIW(x=self.x, s=s, y=self.y, iw=iw)
return TernarySample(x=self.x, s=s, y=self.y)
if iw is not None:
return BinarySampleIW(x=self.x, y=self.y, iw=iw)
return self
@implements(SampleBase)
def __iter__(self) -> Iterator[LoadedData]:
yield from (self.x, self.y)
@implements(NamedSample)
def __add__(self, other: Self) -> Self:
copy = gcopy(self, deep=False)
copy.y = torch.cat([copy.y, other.y], dim=0)
copy.x = concatenate_inputs(copy.x, other.x, is_batched=len(copy.y) > 1)
return copy
@implements(SampleBase)
def __getitem__(self: "BinarySample[XI]", index: IndexType) -> "BinarySample[XI]":
return gcopy(self, deep=False, x=self.x[index], y=self.y[index])
@dataclass
class SubgroupSample(NamedSample[X], _SubgroupSampleMixin):
@overload
def add_field(self, *, y: None = ..., iw: None = ...) -> Self:
...
@overload
def add_field(self, *, y: None = ..., iw: Tensor = ...) -> "SubgroupSampleIW":
...
@overload
def add_field(self, *, y: Tensor = ..., iw: None = ...) -> "TernarySample":
...
@overload
def add_field(self, *, y: Tensor = ..., iw: Tensor = ...) -> "TernarySampleIW":
...
def add_field(
self, *, y: Optional[Tensor] = None, iw: Optional[Tensor] = None
) -> Union[Self, "SubgroupSampleIW", "TernarySample", "TernarySampleIW"]:
if y is not None:
if iw is not None:
return TernarySampleIW(x=self.x, s=self.s, y=y, iw=iw)
return TernarySample(x=self.x, s=self.s, y=y)
if iw is not None:
return SubgroupSampleIW(x=self.x, s=self.s, iw=iw)
return self
@implements(SampleBase)
def __iter__(self) -> Iterator[LoadedData]:
yield from (self.x, self.s)
@implements(NamedSample)
def __add__(self, other: Self) -> Self:
copy = gcopy(self, deep=False)
copy.s = torch.cat([copy.s, other.s], dim=0)
copy.x = concatenate_inputs(copy.x, other.x, is_batched=len(copy.s) > 1)
return copy
@implements(SampleBase)
def __getitem__(self: "SubgroupSample[XI]", index: IndexType) -> "SubgroupSample[XI]":
return gcopy(self, deep=False, x=self.x[index], s=self.s[index])
@dataclass
class _IwMixin:
iw: Tensor
@dataclass
class BinarySampleIW(BinarySample[X], _BinarySampleMixin, _IwMixin):
@overload
def add_field(self, s: None = ...) -> Self:
...
@overload
def add_field(self, s: Tensor = ...) -> "TernarySampleIW":
...
def add_field(self, s: Optional[Tensor] = None) -> Union[Self, "TernarySampleIW"]:
if s is not None:
return TernarySampleIW(x=self.x, s=s, y=self.y, iw=self.iw)
return self
@implements(SampleBase)
def __iter__(self) -> Iterator[LoadedData]:
yield from (self.x, self.y, self.iw)
@implements(BinarySample)
def __add__(self, other: Self) -> Self:
copy = super().__add__(other)
copy.iw = torch.cat([copy.iw, other.iw], dim=0)
return copy
@implements(SampleBase)
def __getitem__(self: "BinarySampleIW[XI]", index: IndexType) -> "BinarySampleIW[XI]":
return gcopy(self, deep=False, x=self.x[index], y=self.y[index], iw=self.iw[index])
@dataclass
class SubgroupSampleIW(SubgroupSample[X], _IwMixin):
@overload
def add_field(self, y: None = ...) -> Self:
...
@overload
def add_field(self, y: Tensor = ...) -> "TernarySampleIW":
...
def add_field(self, y: Optional[Tensor] = None) -> Union[Self, "TernarySampleIW"]:
if y is not None:
return TernarySampleIW(x=self.x, s=self.s, y=y, iw=self.iw)
return self
@implements(SampleBase)
def __iter__(self) -> Iterator[LoadedData]:
yield from (self.x, self.s, self.iw)
@implements(SubgroupSample)
def __add__(self, other: Self) -> Self:
copy = super().__add__(other)
copy.iw = torch.cat([copy.iw, other.iw], dim=0)
return copy
@implements(SampleBase)
def __getitem__(self: "SubgroupSampleIW[XI]", index: IndexType) -> "SubgroupSampleIW[XI]":
return gcopy(self, deep=False, x=self.x[index], s=self.s[index], iw=self.iw[index])
@dataclass
class TernarySample(BinarySample[X], _SubgroupSampleMixin):
@overload
def add_field(self, iw: None = ...) -> Self:
...
@overload
def add_field(self, iw: Tensor) -> Self:
...
def add_field(self, iw: Optional[Tensor] = None) -> Union[Self, "TernarySampleIW"]:
if iw is not None:
return TernarySampleIW(x=self.x, s=self.s, y=self.y, iw=iw)
return self
@implements(SampleBase)
def __iter__(self) -> Iterator[LoadedData]:
yield from (self.x, self.y, self.s)
@implements(BinarySample)
def __add__(self, other: Self) -> Self:
copy = super().__add__(other)
copy.s = torch.cat([copy.s, other.s], dim=0)
return copy
@implements(SampleBase)
def __getitem__(self: "TernarySample[XI]", index: IndexType) -> "TernarySample[XI]":
return gcopy(self, deep=False, x=self.x[index], y=self.y[index], s=self.s[index])
@dataclass
class TernarySampleIW(TernarySample[X], _IwMixin):
def add_field(self) -> Self:
return self
@implements(SampleBase)
def __iter__(self) -> Iterator[LoadedData]:
yield from (self.x, self.y, self.s, self.iw)
@implements(TernarySample)
def __add__(self, other: Self) -> Self:
copy = super().__add__(other)
copy.iw = torch.cat([copy.iw, other.iw], dim=0)
return copy
@implements(SampleBase)
def __getitem__(self: "TernarySampleIW[XI]", index: IndexType) -> "TernarySampleIW[XI]":
return gcopy(
self, deep=False, x=self.x[index], y=self.y[index], s=self.s[index], iw=self.iw[index]
)
def shallow_astuple(dataclass: object) -> Tuple[Any, ...]:
"""dataclasses.astuple() but without the deep-copying/recursion." """
if not is_dataclass(dataclass):
raise TypeError("shallow_astuple() should be called on dataclass instances")
return tuple(getattr(dataclass, field.name) for field in fields(dataclass))
def shallow_asdict(dataclass: object) -> Dict[str, Any]:
"""dataclasses.asdict() but without the deep-copying/recursion." """
if not is_dataclass(dataclass):
raise TypeError("shallow_asdict() should be called on dataclass instances")
return {field.name: getattr(dataclass, field.name) for field in fields(dataclass)}
@attr.define
class ImageSize:
c: int
h: int
w: int
def __mul__(self, other: Union[Self, float]) -> Self:
copy = gcopy(self, deep=False)
if isinstance(other, float):
copy.c = round(copy.c * other)
copy.h = round(copy.h * other)
copy.w = round(copy.w * other)
else:
copy.c *= other.c
copy.h *= other.h
copy.w *= other.w
return copy
def __iter__(self) -> Iterator[int]:
yield from (self.c, self.h, self.w)
@property
def numel(self) -> int:
return sum(iter(self))
@attr.define(kw_only=True)
class MeanStd:
mean: Union[Tuple[float, ...], List[float]]
std: Union[Tuple[float, ...], List[float]]
def __iter__(self) -> Iterator[Union[Tuple[float, ...], List[float]]]:
yield from (self.mean, self.mean)
def __imul__(self, value: float) -> Self:
self.mean = [value * elem for elem in self.mean]
self.std = [value * elem for elem in self.std]
return self
def __mul__(self, value: float) -> Self:
copy = gcopy(self, deep=True)
copy *= value
return copy
def __idiv__(self, value: float) -> Self:
self *= 1 / value
return self
def __div__(self, value: float) -> Self:
copy = gcopy(self, deep=True)
copy *= 1 / value
return copy
R_co = TypeVar("R_co", covariant=True)
@runtime_checkable
class Dataset(Protocol[R_co]):
def __getitem__(self, index: int) -> R_co:
...
@runtime_checkable
class SizedDataset(Dataset[R_co], Sized, Protocol):
@implements(Dataset)
def __getitem__(self, index: int) -> R_co:
...
@implements(Sized)
def __len__(self) -> int:
...
X2 = TypeVar("X2", bound=UnloadedData)
Y = TypeVar("Y", Tensor, None)
S = TypeVar("S", Tensor, None)
@runtime_checkable
class PseudoCdtDataset(Protocol[R_co, X2, Y, S]):
x: X2
y: Y
s: S
def __getitem__(self, index: int) -> R_co:
...
def __len__(self) -> int:
...
D = TypeVar("D", bound=Dataset)
@runtime_checkable
class DatasetWrapper(SizedDataset[R_co], Protocol):
dataset: Dataset
@implements(SizedDataset)
def __getitem__(self, index: int) -> R_co:
...
@implements(SizedDataset)
def __len__(self) -> Optional[int]:
if isinstance(self.dataset, SizedDataset):
return len(self.dataset)
return None
@attr.define(kw_only=True)
class TrainTestSplit(Generic[D]):
train: D
test: D
def __iter__(self) -> Iterator[D]:
yield from (self.train, self.test)
@attr.define(kw_only=True)
class TrainValTestSplit(TrainTestSplit[D]):
val: D
def __iter__(self) -> Iterator[D]:
yield from (self.train, self.val, self.test)
| [
"numpy.stack",
"ranzen.decorators.implements",
"torch.stack",
"ranzen.misc.gcopy",
"typing.cast",
"attr.define",
"torch.cat",
"numpy.expand_dims",
"dataclasses.is_dataclass",
"dataclasses.field",
"dataclasses.fields",
"typing.TypeVar",
"dataclasses.asdict",
"numpy.concatenate"
] | [((1868, 1898), 'typing.TypeVar', 'TypeVar', (['"""X"""'], {'bound': 'LoadedData'}), "('X', bound=LoadedData)\n", (1875, 1898), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((1906, 1955), 'typing.TypeVar', 'TypeVar', (['"""X_co"""'], {'bound': 'LoadedData', 'covariant': '(True)'}), "('X_co', bound=LoadedData, covariant=True)\n", (1913, 1955), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((1961, 1996), 'typing.TypeVar', 'TypeVar', (['"""XI"""'], {'bound': 'IndexabledData'}), "('XI', bound=IndexabledData)\n", (1968, 1996), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((5603, 5652), 'typing.TypeVar', 'TypeVar', (['"""IS"""'], {'bound': '"""SampleBase[IndexabledData]"""'}), "('IS', bound='SampleBase[IndexabledData]')\n", (5610, 5652), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((16134, 16159), 'attr.define', 'attr.define', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (16145, 16159), False, 'import attr\n'), ((16925, 16956), 'typing.TypeVar', 'TypeVar', (['"""R_co"""'], {'covariant': '(True)'}), "('R_co', covariant=True)\n", (16932, 16956), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((17298, 17331), 'typing.TypeVar', 'TypeVar', (['"""X2"""'], {'bound': 'UnloadedData'}), "('X2', bound=UnloadedData)\n", (17305, 17331), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((17336, 17362), 'typing.TypeVar', 'TypeVar', (['"""Y"""', 'Tensor', 'None'], {}), "('Y', Tensor, None)\n", (17343, 17362), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((17367, 17393), 'typing.TypeVar', 'TypeVar', (['"""S"""', 'Tensor', 'None'], {}), "('S', Tensor, None)\n", (17374, 17393), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((17602, 17629), 'typing.TypeVar', 'TypeVar', (['"""D"""'], {'bound': 'Dataset'}), "('D', bound=Dataset)\n", (17609, 17629), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n'), ((17996, 18021), 'attr.define', 'attr.define', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (18007, 18021), False, 'import attr\n'), ((18168, 18193), 'attr.define', 'attr.define', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (18179, 18193), False, 'import attr\n'), ((3823, 3840), 'ranzen.decorators.implements', 'implements', (['Sized'], {}), '(Sized)\n', (3833, 3840), False, 'from ranzen.decorators import implements\n'), ((3945, 3964), 'ranzen.decorators.implements', 'implements', (['Addable'], {}), '(Addable)\n', (3955, 3964), False, 'from ranzen.decorators import implements\n'), ((4503, 4530), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (4508, 4530), False, 'from dataclasses import asdict, dataclass, field, fields, is_dataclass\n'), ((5013, 5039), 'ranzen.decorators.implements', 'implements', (['InputContainer'], {}), '(InputContainer)\n', (5023, 5039), False, 'from ranzen.decorators import implements\n'), ((5335, 5361), 'ranzen.decorators.implements', 'implements', (['InputContainer'], {}), '(InputContainer)\n', (5345, 5361), False, 'from ranzen.decorators import implements\n'), ((5718, 5744), 'ranzen.decorators.implements', 'implements', (['InputContainer'], {}), '(InputContainer)\n', (5728, 5744), False, 'from ranzen.decorators import implements\n'), ((5929, 5955), 'ranzen.decorators.implements', 'implements', (['InputContainer'], {}), '(InputContainer)\n', (5939, 5955), False, 'from ranzen.decorators import implements\n'), ((7811, 7833), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (7821, 7833), False, 'from ranzen.decorators import implements\n'), ((7900, 7922), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (7910, 7922), False, 'from ranzen.decorators import implements\n'), ((9129, 9151), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (9139, 9151), False, 'from ranzen.decorators import implements\n'), ((9242, 9265), 'ranzen.decorators.implements', 'implements', (['NamedSample'], {}), '(NamedSample)\n', (9252, 9265), False, 'from ranzen.decorators import implements\n'), ((9509, 9531), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (9519, 9531), False, 'from ranzen.decorators import implements\n'), ((10659, 10681), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (10669, 10681), False, 'from ranzen.decorators import implements\n'), ((10772, 10795), 'ranzen.decorators.implements', 'implements', (['NamedSample'], {}), '(NamedSample)\n', (10782, 10795), False, 'from ranzen.decorators import implements\n'), ((11039, 11061), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (11049, 11061), False, 'from ranzen.decorators import implements\n'), ((11728, 11750), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (11738, 11750), False, 'from ranzen.decorators import implements\n'), ((11850, 11874), 'ranzen.decorators.implements', 'implements', (['BinarySample'], {}), '(BinarySample)\n', (11860, 11874), False, 'from ranzen.decorators import implements\n'), ((12039, 12061), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (12049, 12061), False, 'from ranzen.decorators import implements\n'), ((12687, 12709), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (12697, 12709), False, 'from ranzen.decorators import implements\n'), ((12809, 12835), 'ranzen.decorators.implements', 'implements', (['SubgroupSample'], {}), '(SubgroupSample)\n', (12819, 12835), False, 'from ranzen.decorators import implements\n'), ((13000, 13022), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (13010, 13022), False, 'from ranzen.decorators import implements\n'), ((13644, 13666), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (13654, 13666), False, 'from ranzen.decorators import implements\n'), ((13765, 13789), 'ranzen.decorators.implements', 'implements', (['BinarySample'], {}), '(BinarySample)\n', (13775, 13789), False, 'from ranzen.decorators import implements\n'), ((13951, 13973), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (13961, 13973), False, 'from ranzen.decorators import implements\n'), ((14276, 14298), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (14286, 14298), False, 'from ranzen.decorators import implements\n'), ((14406, 14431), 'ranzen.decorators.implements', 'implements', (['TernarySample'], {}), '(TernarySample)\n', (14416, 14431), False, 'from ranzen.decorators import implements\n'), ((14596, 14618), 'ranzen.decorators.implements', 'implements', (['SampleBase'], {}), '(SampleBase)\n', (14606, 14618), False, 'from ranzen.decorators import implements\n'), ((17146, 17165), 'ranzen.decorators.implements', 'implements', (['Dataset'], {}), '(Dataset)\n', (17156, 17165), False, 'from ranzen.decorators import implements\n'), ((17231, 17248), 'ranzen.decorators.implements', 'implements', (['Sized'], {}), '(Sized)\n', (17241, 17248), False, 'from ranzen.decorators import implements\n'), ((17730, 17754), 'ranzen.decorators.implements', 'implements', (['SizedDataset'], {}), '(SizedDataset)\n', (17740, 17754), False, 'from ranzen.decorators import implements\n'), ((17820, 17844), 'ranzen.decorators.implements', 'implements', (['SizedDataset'], {}), '(SizedDataset)\n', (17830, 17844), False, 'from ranzen.decorators import implements\n'), ((2785, 2813), 'torch.stack', 'torch.stack', (['[x1, x2]'], {'dim': '(0)'}), '([x1, x2], dim=0)\n', (2796, 2813), False, 'import torch\n'), ((5421, 5444), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)'}), '(self, deep=False)\n', (5426, 5444), False, 'from ranzen.misc import gcopy\n'), ((6015, 6038), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)'}), '(self, deep=False)\n', (6020, 6038), False, 'from ranzen.misc import gcopy\n'), ((6525, 6565), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]'}), '(self, deep=False, x=self.x[index])\n', (6530, 6565), False, 'from ranzen.misc import gcopy\n'), ((8023, 8063), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]'}), '(self, deep=False, x=self.x[index])\n', (8028, 8063), False, 'from ranzen.misc import gcopy\n'), ((9325, 9348), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)'}), '(self, deep=False)\n', (9330, 9348), False, 'from ranzen.misc import gcopy\n'), ((9366, 9401), 'torch.cat', 'torch.cat', (['[copy.y, other.y]'], {'dim': '(0)'}), '([copy.y, other.y], dim=0)\n', (9375, 9401), False, 'import torch\n'), ((9634, 9691), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]', 'y': 'self.y[index]'}), '(self, deep=False, x=self.x[index], y=self.y[index])\n', (9639, 9691), False, 'from ranzen.misc import gcopy\n'), ((10855, 10878), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)'}), '(self, deep=False)\n', (10860, 10878), False, 'from ranzen.misc import gcopy\n'), ((10896, 10931), 'torch.cat', 'torch.cat', (['[copy.s, other.s]'], {'dim': '(0)'}), '([copy.s, other.s], dim=0)\n', (10905, 10931), False, 'import torch\n'), ((11168, 11225), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]', 's': 'self.s[index]'}), '(self, deep=False, x=self.x[index], s=self.s[index])\n', (11173, 11225), False, 'from ranzen.misc import gcopy\n'), ((11975, 12012), 'torch.cat', 'torch.cat', (['[copy.iw, other.iw]'], {'dim': '(0)'}), '([copy.iw, other.iw], dim=0)\n', (11984, 12012), False, 'import torch\n'), ((12168, 12244), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]', 'y': 'self.y[index]', 'iw': 'self.iw[index]'}), '(self, deep=False, x=self.x[index], y=self.y[index], iw=self.iw[index])\n', (12173, 12244), False, 'from ranzen.misc import gcopy\n'), ((12936, 12973), 'torch.cat', 'torch.cat', (['[copy.iw, other.iw]'], {'dim': '(0)'}), '([copy.iw, other.iw], dim=0)\n', (12945, 12973), False, 'import torch\n'), ((13133, 13209), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]', 's': 'self.s[index]', 'iw': 'self.iw[index]'}), '(self, deep=False, x=self.x[index], s=self.s[index], iw=self.iw[index])\n', (13138, 13209), False, 'from ranzen.misc import gcopy\n'), ((13889, 13924), 'torch.cat', 'torch.cat', (['[copy.s, other.s]'], {'dim': '(0)'}), '([copy.s, other.s], dim=0)\n', (13898, 13924), False, 'import torch\n'), ((14078, 14152), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]', 'y': 'self.y[index]', 's': 'self.s[index]'}), '(self, deep=False, x=self.x[index], y=self.y[index], s=self.s[index])\n', (14083, 14152), False, 'from ranzen.misc import gcopy\n'), ((14532, 14569), 'torch.cat', 'torch.cat', (['[copy.iw, other.iw]'], {'dim': '(0)'}), '([copy.iw, other.iw], dim=0)\n', (14541, 14569), False, 'import torch\n'), ((14727, 14824), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)', 'x': 'self.x[index]', 'y': 'self.y[index]', 's': 'self.s[index]', 'iw': 'self.iw[index]'}), '(self, deep=False, x=self.x[index], y=self.y[index], s=self.s[index],\n iw=self.iw[index])\n', (14732, 14824), False, 'from ranzen.misc import gcopy\n'), ((14989, 15012), 'dataclasses.is_dataclass', 'is_dataclass', (['dataclass'], {}), '(dataclass)\n', (15001, 15012), False, 'from dataclasses import asdict, dataclass, field, fields, is_dataclass\n'), ((15322, 15345), 'dataclasses.is_dataclass', 'is_dataclass', (['dataclass'], {}), '(dataclass)\n', (15334, 15345), False, 'from dataclasses import asdict, dataclass, field, fields, is_dataclass\n'), ((15657, 15680), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(False)'}), '(self, deep=False)\n', (15662, 15680), False, 'from ranzen.misc import gcopy\n'), ((16628, 16650), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(True)'}), '(self, deep=True)\n', (16633, 16650), False, 'from ranzen.misc import gcopy\n'), ((16847, 16869), 'ranzen.misc.gcopy', 'gcopy', (['self'], {'deep': '(True)'}), '(self, deep=True)\n', (16852, 16869), False, 'from ranzen.misc import gcopy\n'), ((2727, 2753), 'torch.cat', 'torch.cat', (['[x1, x2]'], {'dim': '(0)'}), '([x1, x2], dim=0)\n', (2736, 2753), False, 'import torch\n'), ((3288, 3314), 'numpy.stack', 'np.stack', (['[x1, x2]'], {'axis': '(0)'}), '([x1, x2], axis=0)\n', (3296, 3314), True, 'import numpy as np\n'), ((6250, 6274), 'ranzen.misc.gcopy', 'gcopy', (['tuple_'], {'deep': '(True)'}), '(tuple_, deep=True)\n', (6255, 6274), False, 'from ranzen.misc import gcopy\n'), ((6377, 6389), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (6383, 6389), False, 'from dataclasses import asdict, dataclass, field, fields, is_dataclass\n'), ((15499, 15516), 'dataclasses.fields', 'fields', (['dataclass'], {}), '(dataclass)\n', (15505, 15516), False, 'from dataclasses import asdict, dataclass, field, fields, is_dataclass\n'), ((3049, 3075), 'numpy.expand_dims', 'np.expand_dims', (['x2'], {'axis': '(0)'}), '(x2, axis=0)\n', (3063, 3075), True, 'import numpy as np\n'), ((3224, 3256), 'numpy.concatenate', 'np.concatenate', (['[x1, x2]'], {'axis': '(0)'}), '([x1, x2], axis=0)\n', (3238, 3256), True, 'import numpy as np\n'), ((15160, 15177), 'dataclasses.fields', 'fields', (['dataclass'], {}), '(dataclass)\n', (15166, 15177), False, 'from dataclasses import asdict, dataclass, field, fields, is_dataclass\n'), ((3139, 3165), 'numpy.expand_dims', 'np.expand_dims', (['x1'], {'axis': '(0)'}), '(x1, axis=0)\n', (3153, 3165), True, 'import numpy as np\n'), ((2239, 2253), 'typing.cast', 'cast', (['List', 'x2'], {}), '(List, x2)\n', (2243, 2253), False, 'from typing import Any, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, TypeVar, Union, cast, overload\n')] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
class LrnNet(nn.Cell):
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
super(LrnNet, self).__init__()
self.depth_radius = depth_radius
self.bias = bias
self.alpha = alpha
self.beta = beta
self.norm_region = norm_region
self.lrn = P.LRN(depth_radius, bias, alpha, beta, norm_region)
def construct(self, input_x):
output = self.lrn(input_x)
return output
def lrn_np_bencmark(data_type):
"""
Feature: generate a lrn numpy benchmark.
Description: The input shape need to match to output shape.
Expectation: match to np mindspore LRN.
"""
y_exp = np.array([[[[1.6239204, -0.61149347],
[-0.5279556, -1.0724881]],
[[0.86518127, -2.3005495],
[1.7440975, -0.760866]],
[[0.31895563, -0.2492632],
[1.4615093, -2.059218]]]]).astype(data_type)
return y_exp
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.parametrize("data_type", [np.float32, np.float16])
def test_lrn(data_type):
"""
Feature: Test LRN.
Description: The input shape need to match to output shape.
Expectation: match to np benchmark.
"""
context.set_context(mode=context.GRAPH_MODE)
input_data = np.array([[[[1.6243454, -0.6117564],
[-0.5281718, -1.0729686]],
[[0.86540765, -2.3015387],
[1.7448118, -0.7612069]],
[[0.3190391, -0.24937038],
[1.4621079, -2.0601406]]]]).astype(data_type)
error = 1e-6
if data_type == np.float16:
error = 1e-3
benchmark_output = lrn_np_bencmark(data_type)
lrn = LrnNet(depth_radius=2, bias=1.0, alpha=0.0001, beta=0.75)
output = lrn(Tensor(input_data))
np.testing.assert_allclose(output.asnumpy(), benchmark_output, rtol=error)
context.set_context(mode=context.PYNATIVE_MODE)
output = lrn(Tensor(input_data))
np.testing.assert_allclose(output.asnumpy(), benchmark_output, rtol=error)
| [
"mindspore.context.set_context",
"mindspore.Tensor",
"numpy.array",
"pytest.mark.parametrize",
"mindspore.ops.operations.LRN"
] | [((1938, 2000), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_type"""', '[np.float32, np.float16]'], {}), "('data_type', [np.float32, np.float16])\n", (1961, 2000), False, 'import pytest\n'), ((2173, 2217), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE'}), '(mode=context.GRAPH_MODE)\n', (2192, 2217), True, 'import mindspore.context as context\n'), ((2876, 2923), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.PYNATIVE_MODE'}), '(mode=context.PYNATIVE_MODE)\n', (2895, 2923), True, 'import mindspore.context as context\n'), ((1176, 1227), 'mindspore.ops.operations.LRN', 'P.LRN', (['depth_radius', 'bias', 'alpha', 'beta', 'norm_region'], {}), '(depth_radius, bias, alpha, beta, norm_region)\n', (1181, 1227), True, 'from mindspore.ops import operations as P\n'), ((2773, 2791), 'mindspore.Tensor', 'Tensor', (['input_data'], {}), '(input_data)\n', (2779, 2791), False, 'from mindspore import Tensor\n'), ((2941, 2959), 'mindspore.Tensor', 'Tensor', (['input_data'], {}), '(input_data)\n', (2947, 2959), False, 'from mindspore import Tensor\n'), ((1536, 1716), 'numpy.array', 'np.array', (['[[[[1.6239204, -0.61149347], [-0.5279556, -1.0724881]], [[0.86518127, -\n 2.3005495], [1.7440975, -0.760866]], [[0.31895563, -0.2492632], [\n 1.4615093, -2.059218]]]]'], {}), '([[[[1.6239204, -0.61149347], [-0.5279556, -1.0724881]], [[\n 0.86518127, -2.3005495], [1.7440975, -0.760866]], [[0.31895563, -\n 0.2492632], [1.4615093, -2.059218]]]])\n', (1544, 1716), True, 'import numpy as np\n'), ((2235, 2416), 'numpy.array', 'np.array', (['[[[[1.6243454, -0.6117564], [-0.5281718, -1.0729686]], [[0.86540765, -\n 2.3015387], [1.7448118, -0.7612069]], [[0.3190391, -0.24937038], [\n 1.4621079, -2.0601406]]]]'], {}), '([[[[1.6243454, -0.6117564], [-0.5281718, -1.0729686]], [[\n 0.86540765, -2.3015387], [1.7448118, -0.7612069]], [[0.3190391, -\n 0.24937038], [1.4621079, -2.0601406]]]])\n', (2243, 2416), True, 'import numpy as np\n')] |
import math
import numpy as np
import numpy.linalg as la
def point_from_angle(x, y, angle, length):
"""return the endpoint of a line starting in x,y using the given angle and length"""
x = x + length * math.cos(angle)
y = y + length * math.sin(angle)
return x, y
def distance(point_1, point_2):
"""Distance between the 2 points"""
return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)
def get_line_angle(point_1, point_2):
"""Returns the angle of a line that goes from point_1 to point_2"""
return ((math.atan2((point_1[1] - point_2[1]), (point_1[0] - point_2[0])) * 180.0 / math.pi) + 360) % 360
def py_ang(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2' """
cosang = np.dot(v1, v2)
sinang = la.norm(np.cross(v1, v2))
return np.arctan2(sinang, cosang)
def check_periodicity(angle):
# Reset degrees after 2 PI radians
if angle >= 2 * math.pi:
angle = angle - 2 * math.pi
elif angle <= -2 * math.pi:
angle = angle + 2 * math.pi
return angle
def get_point_from_angle(point, angle, lenght):
"""
Return a point given a starting point, the angle that it has to form with respect to X-axis and how far from that
point it has to be.
:param point: Initial point
:param angle: angle with respect to X-axis
:param lenght: How far from the initial point the final point must be
:return: array with [x, y]
"""
return [point[0] + lenght * math.cos(angle), point[1] + lenght * math.sin(angle)]
| [
"numpy.arctan2",
"math.sqrt",
"math.atan2",
"numpy.cross",
"math.sin",
"math.cos",
"numpy.dot"
] | [((366, 440), 'math.sqrt', 'math.sqrt', (['((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)'], {}), '((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)\n', (375, 440), False, 'import math\n'), ((772, 786), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (778, 786), True, 'import numpy as np\n'), ((837, 863), 'numpy.arctan2', 'np.arctan2', (['sinang', 'cosang'], {}), '(sinang, cosang)\n', (847, 863), True, 'import numpy as np\n'), ((808, 824), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (816, 824), True, 'import numpy as np\n'), ((212, 227), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (220, 227), False, 'import math\n'), ((249, 264), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (257, 264), False, 'import math\n'), ((1509, 1524), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1517, 1524), False, 'import math\n'), ((1546, 1561), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1554, 1561), False, 'import math\n'), ((566, 626), 'math.atan2', 'math.atan2', (['(point_1[1] - point_2[1])', '(point_1[0] - point_2[0])'], {}), '(point_1[1] - point_2[1], point_1[0] - point_2[0])\n', (576, 626), False, 'import math\n')] |
from keras.layers import Input, Dense
from keras.models import Model
from keras.datasets import mnist
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import pickle
# Deep Autoencoder
features_path = 'deep_autoe_features.pickle'
labels_path = 'deep_autoe_labels.pickle'
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression factor 24.5, assuming the input is 784 floats
# this is our input placeholder; 784 = 28 x 28
input_img = Input(shape=(784, ))
my_epochs = 100
# "encoded" is the encoded representation of the inputs
encoded = Dense(encoding_dim * 4, activation='relu')(input_img)
encoded = Dense(encoding_dim * 2, activation='relu')(encoded)
encoded = Dense(encoding_dim, activation='relu')(encoded)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(encoding_dim * 2, activation='relu')(encoded)
decoded = Dense(encoding_dim * 4, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# Separate Encoder model
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# Separate Decoder model
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim, ))
# retrieve the layers of the autoencoder model
decoder_layer1 = autoencoder.layers[-3]
decoder_layer2 = autoencoder.layers[-2]
decoder_layer3 = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer3(decoder_layer2(decoder_layer1(encoded_input))))
# Train to reconstruct MNIST digits
# configure model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# prepare input data
(x_train, _), (x_test, y_test) = mnist.load_data()
# normalize all values between 0 and 1 and flatten the 28x28 images into vectors of size 784
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
# Train autoencoder for 50 epochs
autoencoder.fit(x_train, x_train, epochs=my_epochs, batch_size=256, shuffle=True, validation_data=(x_test, x_test),
verbose=2)
# after 100 epochs the autoencoder seems to reach a stable train/test lost value
# Visualize the reconstructed encoded representations
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# save latent space features 32-d vector
pickle.dump(encoded_imgs, open(features_path, 'wb'))
pickle.dump(y_test, open(labels_path, 'wb'))
n = 10 # how many digits we will display
plt.figure(figsize=(10, 2), dpi=100)
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.set_axis_off()
# display reconstruction
ax = plt.subplot(2, n, i + n + 1)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.set_axis_off()
plt.show()
K.clear_session()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.gray",
"matplotlib.pyplot.show",
"keras.datasets.mnist.load_data",
"keras.models.Model",
"numpy.prod",
"matplotlib.pyplot.figure",
"keras.layers.Dense",
"keras.layers.Input",
"keras.backend.clear_session"
] | [((509, 528), 'keras.layers.Input', 'Input', ([], {'shape': '(784,)'}), '(shape=(784,))\n', (514, 528), False, 'from keras.layers import Input, Dense\n'), ((1082, 1107), 'keras.models.Model', 'Model', (['input_img', 'decoded'], {}), '(input_img, decoded)\n', (1087, 1107), False, 'from keras.models import Model\n'), ((1202, 1227), 'keras.models.Model', 'Model', (['input_img', 'encoded'], {}), '(input_img, encoded)\n', (1207, 1227), False, 'from keras.models import Model\n'), ((1332, 1360), 'keras.layers.Input', 'Input', ([], {'shape': '(encoding_dim,)'}), '(shape=(encoding_dim,))\n', (1337, 1360), False, 'from keras.layers import Input, Dense\n'), ((1903, 1920), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1918, 1920), False, 'from keras.datasets import mnist\n'), ((2937, 2973), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 2)', 'dpi': '(100)'}), '(figsize=(10, 2), dpi=100)\n', (2947, 2973), True, 'import matplotlib.pyplot as plt\n'), ((3283, 3293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3291, 3293), True, 'import matplotlib.pyplot as plt\n'), ((3295, 3312), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (3310, 3312), True, 'from keras import backend as K\n'), ((614, 656), 'keras.layers.Dense', 'Dense', (['(encoding_dim * 4)'], {'activation': '"""relu"""'}), "(encoding_dim * 4, activation='relu')\n", (619, 656), False, 'from keras.layers import Input, Dense\n'), ((678, 720), 'keras.layers.Dense', 'Dense', (['(encoding_dim * 2)'], {'activation': '"""relu"""'}), "(encoding_dim * 2, activation='relu')\n", (683, 720), False, 'from keras.layers import Input, Dense\n'), ((740, 778), 'keras.layers.Dense', 'Dense', (['encoding_dim'], {'activation': '"""relu"""'}), "(encoding_dim, activation='relu')\n", (745, 778), False, 'from keras.layers import Input, Dense\n'), ((852, 894), 'keras.layers.Dense', 'Dense', (['(encoding_dim * 2)'], {'activation': '"""relu"""'}), "(encoding_dim * 2, activation='relu')\n", (857, 894), False, 'from keras.layers import Input, Dense\n'), ((914, 956), 'keras.layers.Dense', 'Dense', (['(encoding_dim * 4)'], {'activation': '"""relu"""'}), "(encoding_dim * 4, activation='relu')\n", (919, 956), False, 'from keras.layers import Input, Dense\n'), ((976, 1008), 'keras.layers.Dense', 'Dense', (['(784)'], {'activation': '"""sigmoid"""'}), "(784, activation='sigmoid')\n", (981, 1008), False, 'from keras.layers import Input, Dense\n'), ((3025, 3049), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'n', '(i + 1)'], {}), '(2, n, i + 1)\n', (3036, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3106), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (3104, 3106), True, 'import matplotlib.pyplot as plt\n'), ((3168, 3196), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'n', '(i + n + 1)'], {}), '(2, n, i + n + 1)\n', (3179, 3196), True, 'import matplotlib.pyplot as plt\n'), ((3249, 3259), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (3257, 3259), True, 'import matplotlib.pyplot as plt\n'), ((2140, 2166), 'numpy.prod', 'np.prod', (['x_train.shape[1:]'], {}), '(x_train.shape[1:])\n', (2147, 2166), True, 'import numpy as np\n'), ((2207, 2232), 'numpy.prod', 'np.prod', (['x_test.shape[1:]'], {}), '(x_test.shape[1:])\n', (2214, 2232), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
from pytext.utils import label
class LabelUtilTest(unittest.TestCase):
def test_get_label_weights(self):
vocab = {"foo": 0, "bar": 1}
weights = {"foo": 3.2, "foobar": 2.1}
weights_tensor = label.get_label_weights(vocab, weights)
np.testing.assert_array_almost_equal(
np.array([3.2, 1]), weights_tensor.detach().numpy()
)
def test_get_auto_label_weights(self):
vocab_dict = {"foo": 0, "bar": 1}
label_counts = {"foo": 4, "bar": 1}
weights_tensor = label.get_auto_label_weights(vocab_dict, label_counts)
np.testing.assert_array_almost_equal(
np.array([0.25, 4]), weights_tensor[0].detach().numpy()
)
def test_get_normalized_sqrt_label_weights(self):
vocab_dict = {"foo": 0, "bar": 1}
label_counts = {"foo": 4, "bar": 1}
weights_tensor = label.get_normalized_sqrt_label_weights(
vocab_dict, label_counts
)
np.testing.assert_array_almost_equal(
np.array([0.5, 2]), weights_tensor[0].detach().numpy()
)
def test_get_normalized_cap_label_weights(self):
vocab_dict = {"foo": 0, "bar": 1}
label_counts = {"foo": 4, "bar": 1}
weights_tensor = label.get_normalized_cap_label_weights(
vocab_dict, label_counts
)
np.testing.assert_array_almost_equal(
np.array([0.625, 1]), weights_tensor[0].detach().numpy()
)
| [
"pytext.utils.label.get_normalized_cap_label_weights",
"pytext.utils.label.get_auto_label_weights",
"pytext.utils.label.get_label_weights",
"numpy.array",
"pytext.utils.label.get_normalized_sqrt_label_weights"
] | [((350, 389), 'pytext.utils.label.get_label_weights', 'label.get_label_weights', (['vocab', 'weights'], {}), '(vocab, weights)\n', (373, 389), False, 'from pytext.utils import label\n'), ((665, 719), 'pytext.utils.label.get_auto_label_weights', 'label.get_auto_label_weights', (['vocab_dict', 'label_counts'], {}), '(vocab_dict, label_counts)\n', (693, 719), False, 'from pytext.utils import label\n'), ((1010, 1075), 'pytext.utils.label.get_normalized_sqrt_label_weights', 'label.get_normalized_sqrt_label_weights', (['vocab_dict', 'label_counts'], {}), '(vocab_dict, label_counts)\n', (1049, 1075), False, 'from pytext.utils import label\n'), ((1386, 1450), 'pytext.utils.label.get_normalized_cap_label_weights', 'label.get_normalized_cap_label_weights', (['vocab_dict', 'label_counts'], {}), '(vocab_dict, label_counts)\n', (1424, 1450), False, 'from pytext.utils import label\n'), ((448, 466), 'numpy.array', 'np.array', (['[3.2, 1]'], {}), '([3.2, 1])\n', (456, 466), True, 'import numpy as np\n'), ((778, 797), 'numpy.array', 'np.array', (['[0.25, 4]'], {}), '([0.25, 4])\n', (786, 797), True, 'import numpy as np\n'), ((1156, 1174), 'numpy.array', 'np.array', (['[0.5, 2]'], {}), '([0.5, 2])\n', (1164, 1174), True, 'import numpy as np\n'), ((1531, 1551), 'numpy.array', 'np.array', (['[0.625, 1]'], {}), '([0.625, 1])\n', (1539, 1551), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
# Add parent directory to path
from mnist_util import (
load_pb_file,
print_nodes,
)
def get_predict_labels(model_file, input_node, output_node, input_data):
# Load saved model
tf.import_graph_def(load_pb_file(model_file))
print(f"predict labels - loaded model from file: {model_file}")
print_nodes()
# Get input / output tensors
x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
input_node)
y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
output_node)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
predicted_labels = sess.run(y_output,
feed_dict={x_input: input_data})
return np.argmax(predicted_labels)
if __name__ == "__main__":
from mnist.example import x_test as mnist_x_test
from mnist.example import y_test as mnist_y_test
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_node",
type=str,
default="import/input:0",
help="Tensor name of data input",
)
parser.add_argument(
"--output_node",
type=str,
default="import/output/BiasAdd:0",
help="Tensor name of model output",
)
parser.add_argument(
"--model_file",
type=str,
default="./models/cryptonets-relu.pb",
help="Filename of saved protobuf model")
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("Unparsed flags:", unparsed)
exit(1)
# Get input / output tensors
input_node = FLAGS.input_node
output_node = FLAGS.output_node
model_file = FLAGS.model_file
input_data = mnist_x_test.reshape((1, 28, 28, 1))
predic_labels = get_predict_labels(model_file=model_file,
input_node=input_node,
output_node=output_node,
input_data=input_data)
correct_labels = np.argmax(mnist_y_test)
print('correct labels: ', correct_labels)
print('predict labels: ', predic_labels)
np.testing.assert_equal(actual=predic_labels,
desired=correct_labels)
| [
"argparse.ArgumentParser",
"numpy.argmax",
"tensorflow.compat.v1.get_default_graph",
"mnist_util.load_pb_file",
"tensorflow.compat.v1.Session",
"mnist.example.x_test.reshape",
"numpy.testing.assert_equal",
"mnist_util.print_nodes",
"tensorflow.compat.v1.global_variables_initializer"
] | [((358, 371), 'mnist_util.print_nodes', 'print_nodes', ([], {}), '()\n', (369, 371), False, 'from mnist_util import load_pb_file, print_nodes\n'), ((813, 840), 'numpy.argmax', 'np.argmax', (['predicted_labels'], {}), '(predicted_labels)\n', (822, 840), True, 'import numpy as np\n'), ((1033, 1058), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1056, 1058), False, 'import argparse\n'), ((1811, 1847), 'mnist.example.x_test.reshape', 'mnist_x_test.reshape', (['(1, 28, 28, 1)'], {}), '((1, 28, 28, 1))\n', (1831, 1847), True, 'from mnist.example import x_test as mnist_x_test\n'), ((2121, 2144), 'numpy.argmax', 'np.argmax', (['mnist_y_test'], {}), '(mnist_y_test)\n', (2130, 2144), True, 'import numpy as np\n'), ((2240, 2309), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ([], {'actual': 'predic_labels', 'desired': 'correct_labels'}), '(actual=predic_labels, desired=correct_labels)\n', (2263, 2309), True, 'import numpy as np\n'), ((259, 283), 'mnist_util.load_pb_file', 'load_pb_file', (['model_file'], {}), '(model_file)\n', (271, 283), False, 'from mnist_util import load_pb_file, print_nodes\n'), ((592, 614), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (612, 614), True, 'import tensorflow as tf\n'), ((420, 452), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (450, 452), True, 'import tensorflow as tf\n'), ((508, 540), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (538, 540), True, 'import tensorflow as tf\n'), ((641, 684), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (682, 684), True, 'import tensorflow as tf\n')] |
import neural_network_lyapunov.examples.car.unicycle as unicycle
import neural_network_lyapunov.utils as utils
import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip
import unittest
import numpy as np
import torch
import scipy.integrate
import scipy.linalg
import gurobipy
class TestUnicycle(unittest.TestCase):
def test_dynamics(self):
plant = unicycle.Unicycle(torch.float64)
# Test with pytorch tensor.
x = torch.tensor([2., 3., 0.5], dtype=torch.float64)
u = torch.tensor([0.5, -0.2], dtype=torch.float64)
xdot_torch = plant.dynamics(x, u)
np.testing.assert_allclose(
xdot_torch.detach().numpy(),
np.array([u[0] * torch.cos(x[2]), u[0] * torch.sin(x[2]), u[1]]))
xdot_np = plant.dynamics(x.detach().numpy(), u.detach().numpy())
np.testing.assert_allclose(xdot_torch.detach().numpy(), xdot_np)
def test_dynamics_gradient(self):
plant = unicycle.Unicycle(torch.float64)
def tester(x_val: np.ndarray, u_val: np.ndarray):
A, B = plant.dynamics_gradient(x_val, u_val)
A_torch, B_torch = plant.dynamics_gradient(torch.from_numpy(x_val),
torch.from_numpy(u_val))
np.testing.assert_allclose(A, A_torch.detach().numpy())
np.testing.assert_allclose(B, B_torch.detach().numpy())
"""
Compute gradint through pytorch autograd.
"""
x_torch = torch.from_numpy(x_val)
x_torch.requires_grad = True
u_torch = torch.from_numpy(u_val)
u_torch.requires_grad = True
for i in range(3):
if x_torch.grad is not None:
x_torch.grad.zero_()
if u_torch.grad is not None:
u_torch.grad.zero_()
xdot = plant.dynamics(x_torch, u_torch)
xdot[i].backward()
np.testing.assert_allclose(A_torch[i].detach().numpy(),
x_torch.grad.detach().numpy())
np.testing.assert_allclose(B_torch[i].detach().numpy(),
u_torch.grad.detach().numpy())
tester(np.array([0.5, 0.4, 0.2]), np.array([-0.3, 0.8]))
tester(np.array([-0.5, 0.7, -2.2]), np.array([-1.3, -.8]))
tester(np.array([-2.5, 0.7, -1.5]), np.array([-1.9, -.8]))
def test_next_pose(self):
plant = unicycle.Unicycle(torch.float64)
x = torch.tensor([2., 3., 0.5], dtype=torch.float64)
u = torch.tensor([0.5, -0.2], dtype=torch.float64)
x_next = plant.next_pose(x, u, 0.1)
result = scipy.integrate.solve_ivp(
lambda t, x_val: plant.dynamics(x_val,
u.detach().numpy()), [0, 0.1],
x.detach().numpy())
np.testing.assert_allclose(x_next, result.y[:, -1])
class TestUnicycleReLUModel(unittest.TestCase):
def setUp(self):
self.dtype = torch.float64
# Arbitrarily initialize the relu network. All the tests should pass
# even if the network doesn't approximate the unicycle dynamics.
dynamics_relu_no_thetadot = utils.setup_relu((2, 4, 3, 2),
params=None,
negative_slope=0.1,
bias=True,
dtype=self.dtype)
dynamics_relu_no_thetadot[0].weight.data = torch.tensor(
[[0.2, 0.5], [-1.3, 0.5], [-0.3, -0.2], [-0.4, -1.4]],
dtype=self.dtype)
dynamics_relu_no_thetadot[0].bias.data = torch.tensor(
[0.4, -1.2, 0.1, 2.3], dtype=self.dtype)
dynamics_relu_no_thetadot[2].weight.data = torch.tensor(
[[0.4, 0.1, -1.4, 0.2], [0.1, -0.2, -0.5, -1.1],
[0.3, 0.5, 1.1, -0.2]],
dtype=self.dtype)
dynamics_relu_no_thetadot[2].bias.data = torch.tensor([0.2, 0.1, -0.3],
dtype=self.dtype)
dynamics_relu_no_thetadot[4].weight.data = torch.tensor(
[[0.1, -0.3, 0.5], [0.3, -0.2, 2.1]], dtype=self.dtype)
dynamics_relu_no_thetadot[4].bias.data = torch.tensor([0.4, -1.2],
dtype=self.dtype)
self.dut_thetadot_not_input = unicycle.UnicycleReLUModel(
self.dtype,
x_lo=torch.tensor([-3, -3, -np.pi], dtype=self.dtype),
x_up=torch.tensor([3, 3, np.pi], dtype=self.dtype),
u_lo=torch.tensor([-2, -0.5], dtype=self.dtype),
u_up=torch.tensor([5, 0.5], dtype=self.dtype),
dynamics_relu=dynamics_relu_no_thetadot,
dt=0.01,
thetadot_as_input=False)
dynamics_relu_thetadot = utils.setup_relu((3, 4, 3, 2),
params=None,
negative_slope=0.1,
bias=True,
dtype=self.dtype)
dynamics_relu_thetadot[0].weight.data = torch.tensor(
[[0.2, 0.5, 0.1], [-1.3, 0.5, -1.2], [-0.3, -0.2, 0.4],
[-0.4, -1.4, 0.5]],
dtype=self.dtype)
dynamics_relu_no_thetadot[0].bias.data = torch.tensor(
[0.4, -1.2, 0.1, 2.3], dtype=self.dtype)
dynamics_relu_thetadot[2].weight.data = dynamics_relu_no_thetadot[
2].weight.data
dynamics_relu_thetadot[2].bias.data = dynamics_relu_no_thetadot[
2].bias.data
dynamics_relu_thetadot[4].weight.data = dynamics_relu_no_thetadot[
4].weight.data
dynamics_relu_thetadot[4].bias.data = dynamics_relu_thetadot[
4].bias.data
self.dut_thetadot_input = unicycle.UnicycleReLUModel(
self.dtype,
x_lo=torch.tensor([-3, -3, -np.pi], dtype=self.dtype),
x_up=torch.tensor([3, 3, np.pi], dtype=self.dtype),
u_lo=torch.tensor([-2, -0.5], dtype=self.dtype),
u_up=torch.tensor([5, 0.5], dtype=self.dtype),
dynamics_relu=dynamics_relu_thetadot,
dt=0.01,
thetadot_as_input=True)
def step_forward_tester(self, dut):
# First test a single x_start and u_start
x_start = torch.tensor([0.2, 0.5, -0.1], dtype=self.dtype)
u_start = torch.tensor([2.1, 0.3], dtype=self.dtype)
x_next = dut.step_forward(x_start, u_start)
def eval_next_state(x_val, u_val):
if dut.thetadot_as_input:
network_input = torch.tensor([x_val[2], u_val[0], u_val[1]],
dtype=self.dtype)
network_input_zero = torch.zeros((3,), dtype=self.dtype)
else:
network_input = torch.tensor([x_val[2], u_val[0]],
dtype=self.dtype)
network_input_zero = torch.zeros((2,), dtype=self.dtype)
position_next = x_val[:2] + \
dut.dynamics_relu(network_input) - dut.dynamics_relu(
network_input_zero)
theta_next = x_val[2] + u_val[1] * dut.dt
return np.array([
position_next[0].item(), position_next[1].item(),
theta_next.item()
])
np.testing.assert_allclose(x_next.detach().numpy(),
eval_next_state(x_start, u_start))
# Now test a batch of x_start and u_start
x_start = torch.tensor([[0.2, 0.5, -0.1], [0.4, 0.3, 0.5]],
dtype=self.dtype)
u_start = torch.tensor([[2.1, 0.3], [-0.3, 0.4]], dtype=self.dtype)
x_next = dut.step_forward(x_start, u_start)
self.assertEqual(x_next.shape, (2, 3))
for i in range(x_start.shape[0]):
np.testing.assert_allclose(x_next[i].detach().numpy(),
eval_next_state(x_start[i], u_start[i]))
def test_step_forward_thetadot_not_input(self):
self.step_forward_tester(self.dut_thetadot_not_input)
def test_step_forward_thetadot_as_input(self):
self.step_forward_tester(self.dut_thetadot_input)
def add_dynamics_constraint_tester(self, dut):
def tester(x_val, u_val):
# Setup an MILP with fixed x_var and u_var, check if x_next_var is
# solved to the right value.
mip = gurobi_torch_mip.GurobiTorchMILP(self.dtype)
x_var = mip.addVars(3, lb=-gurobipy.GRB.INFINITY)
u_var = mip.addVars(2, lb=-gurobipy.GRB.INFINITY)
x_next_var = mip.addVars(3, lb=-gurobipy.GRB.INFINITY)
dut.add_dynamics_constraint(mip, x_var, x_next_var, u_var, "slack",
"binary")
# Fix x_var to x_val, u_var to u_val
mip.addMConstrs([torch.eye(3, dtype=self.dtype)], [x_var],
sense=gurobipy.GRB.EQUAL,
b=x_val)
mip.addMConstrs([torch.eye(2, dtype=self.dtype)], [u_var],
sense=gurobipy.GRB.EQUAL,
b=u_val)
mip.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
mip.gurobi_model.optimize()
self.assertEqual(mip.gurobi_model.status,
gurobipy.GRB.Status.OPTIMAL)
x_next_val = np.array([var.xn for var in x_next_var])
x_next_val_expected = dut.step_forward(x_val, u_val)
np.testing.assert_allclose(x_next_val,
x_next_val_expected.detach().numpy(),
atol=1e-8)
tester(torch.tensor([0., 0., 0.], dtype=self.dtype),
torch.tensor([0., 0.], dtype=self.dtype))
tester(torch.tensor([0.5, -0.3, 0.4], dtype=self.dtype),
torch.tensor([0., 0.], dtype=self.dtype))
tester(torch.tensor([0.6, -1.3, 0.4], dtype=self.dtype),
torch.tensor([4., 0.3], dtype=self.dtype))
tester(torch.tensor([0.6, -1.3, 0.4], dtype=self.dtype),
torch.tensor([-2., 0.3], dtype=self.dtype))
def test_add_dynamics_constraint_thetadot_not_input(self):
self.add_dynamics_constraint_tester(self.dut_thetadot_not_input)
def test_add_dynamics_constraint_thetadot_as_input(self):
self.add_dynamics_constraint_tester(self.dut_thetadot_input)
class TestUnicycleReLUZeroVelModel(unittest.TestCase):
def setUp(self):
self.dtype = torch.float64
# Arbitrarily initialize the relu network. All the tests should pass
# even if the network doesn't approximate the unicycle dynamics.
dynamics_relu_no_thetadot = utils.setup_relu((2, 4, 3, 2),
params=None,
negative_slope=0.1,
bias=True,
dtype=self.dtype)
dynamics_relu_no_thetadot[0].weight.data = torch.tensor(
[[0.2, 0.5], [-1.3, 0.5], [-0.3, -0.2], [-0.4, -1.4]],
dtype=self.dtype)
dynamics_relu_no_thetadot[0].bias.data = torch.tensor(
[0.4, -1.2, 0.1, 2.3], dtype=self.dtype)
dynamics_relu_no_thetadot[2].weight.data = torch.tensor(
[[0.4, 0.1, -1.4, 0.2], [0.1, -0.2, -0.5, -1.1],
[0.3, 0.5, 1.1, -0.2]],
dtype=self.dtype)
dynamics_relu_no_thetadot[2].bias.data = torch.tensor([0.2, 0.1, -0.3],
dtype=self.dtype)
dynamics_relu_no_thetadot[4].weight.data = torch.tensor(
[[0.1, -0.3, 0.5], [0.3, -0.2, 2.1]], dtype=self.dtype)
dynamics_relu_no_thetadot[4].bias.data = torch.tensor([0.4, -1.2],
dtype=self.dtype)
self.dut_thetadot_not_input = unicycle.UnicycleReLUZeroVelModel(
self.dtype,
x_lo=torch.tensor([-3, -3, -np.pi], dtype=self.dtype),
x_up=torch.tensor([3, 3, np.pi], dtype=self.dtype),
u_lo=torch.tensor([-2, -0.5], dtype=self.dtype),
u_up=torch.tensor([5, 0.5], dtype=self.dtype),
dynamics_relu=dynamics_relu_no_thetadot,
dt=0.01,
thetadot_as_input=False)
dynamics_relu_thetadot = utils.setup_relu((3, 4, 3, 2),
params=None,
negative_slope=0.1,
bias=True,
dtype=self.dtype)
dynamics_relu_thetadot[0].weight.data = torch.tensor(
[[0.2, 0.5, 0.1], [-1.3, 0.5, -1.2], [-0.3, -0.2, 0.4],
[-0.4, -1.4, 0.5]],
dtype=self.dtype)
dynamics_relu_no_thetadot[0].bias.data = torch.tensor(
[0.4, -1.2, 0.1, 2.3], dtype=self.dtype)
dynamics_relu_thetadot[2].weight.data = dynamics_relu_no_thetadot[
2].weight.data
dynamics_relu_thetadot[2].bias.data = dynamics_relu_no_thetadot[
2].bias.data
dynamics_relu_thetadot[4].weight.data = dynamics_relu_no_thetadot[
4].weight.data
dynamics_relu_thetadot[4].bias.data = dynamics_relu_thetadot[
4].bias.data
self.dut_thetadot_input = unicycle.UnicycleReLUZeroVelModel(
self.dtype,
x_lo=torch.tensor([-3, -3, -np.pi], dtype=self.dtype),
x_up=torch.tensor([3, 3, np.pi], dtype=self.dtype),
u_lo=torch.tensor([-2, -0.5], dtype=self.dtype),
u_up=torch.tensor([5, 0.5], dtype=self.dtype),
dynamics_relu=dynamics_relu_thetadot,
dt=0.01,
thetadot_as_input=True)
def step_forward_tester(self, dut):
# First make sure that if vel = 0, then pos[n+1] = pos[n]
x_start = torch.tensor([0.5, 0.3, -1.2], dtype=self.dtype)
u_start = torch.tensor([0, 0.5], dtype=self.dtype)
np.testing.assert_allclose(
dut.step_forward(x_start, u_start)[:2].detach().numpy(),
x_start[:2].detach().numpy())
# First test a single x_start and u_start
x_start = torch.tensor([0.2, 0.5, -0.1], dtype=self.dtype)
u_start = torch.tensor([2.1, 0.3], dtype=self.dtype)
x_next = dut.step_forward(x_start, u_start)
def eval_next_state(x_val, u_val):
if dut.thetadot_as_input:
network_input = torch.tensor([x_val[2], u_val[0], u_val[1]],
dtype=self.dtype)
network_input_zero_vel = torch.tensor([x_val[2], 0, u_val[1]],
dtype=self.dtype)
else:
network_input = torch.tensor([x_val[2], u_val[0]],
dtype=self.dtype)
network_input_zero_vel = torch.tensor([x_val[2], 0],
dtype=self.dtype)
position_next = x_val[:2] + \
dut.dynamics_relu(network_input) - dut.dynamics_relu(
network_input_zero_vel)
theta_next = x_val[2] + u_val[1] * dut.dt
return np.array([
position_next[0].item(), position_next[1].item(),
theta_next.item()
])
np.testing.assert_allclose(x_next.detach().numpy(),
eval_next_state(x_start, u_start))
# Now test a batch of x_start and u_start
x_start = torch.tensor([[0.2, 0.5, -0.1], [0.4, 0.3, 0.5]],
dtype=self.dtype)
u_start = torch.tensor([[2.1, 0.3], [-0.3, 0.4]], dtype=self.dtype)
x_next = dut.step_forward(x_start, u_start)
self.assertEqual(x_next.shape, (2, 3))
for i in range(x_start.shape[0]):
np.testing.assert_allclose(x_next[i].detach().numpy(),
eval_next_state(x_start[i], u_start[i]))
def test_step_forward_thetadot_not_input(self):
self.step_forward_tester(self.dut_thetadot_not_input)
def test_step_forward_thetadot_as_input(self):
self.step_forward_tester(self.dut_thetadot_input)
def add_dynamics_constraint_tester(self, dut):
def tester(x_val, u_val):
# Setup an MILP with fixed x_var and u_var, check if x_next_var is
# solved to the right value.
mip = gurobi_torch_mip.GurobiTorchMILP(self.dtype)
x_var = mip.addVars(3, lb=-gurobipy.GRB.INFINITY)
u_var = mip.addVars(2, lb=-gurobipy.GRB.INFINITY)
x_next_var = mip.addVars(3, lb=-gurobipy.GRB.INFINITY)
dut.add_dynamics_constraint(mip, x_var, x_next_var, u_var, "slack",
"binary")
# Fix x_var to x_val, u_var to u_val
mip.addMConstrs([torch.eye(3, dtype=self.dtype)], [x_var],
sense=gurobipy.GRB.EQUAL,
b=x_val)
mip.addMConstrs([torch.eye(2, dtype=self.dtype)], [u_var],
sense=gurobipy.GRB.EQUAL,
b=u_val)
mip.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
mip.gurobi_model.optimize()
self.assertEqual(mip.gurobi_model.status,
gurobipy.GRB.Status.OPTIMAL)
x_next_val = np.array([var.xn for var in x_next_var])
x_next_val_expected = dut.step_forward(x_val, u_val)
np.testing.assert_allclose(x_next_val,
x_next_val_expected.detach().numpy(),
atol=1e-8)
tester(torch.tensor([0., 0., 0.], dtype=self.dtype),
torch.tensor([0., 0.], dtype=self.dtype))
tester(torch.tensor([0.5, -0.3, 0.4], dtype=self.dtype),
torch.tensor([0., 0.], dtype=self.dtype))
tester(torch.tensor([0.6, -1.3, 0.4], dtype=self.dtype),
torch.tensor([4., 0.3], dtype=self.dtype))
tester(torch.tensor([0.6, -1.3, 0.4], dtype=self.dtype),
torch.tensor([-2., 0.3], dtype=self.dtype))
def test_add_dynamics_constraint_thetadot_not_input(self):
self.add_dynamics_constraint_tester(self.dut_thetadot_not_input)
def test_add_dynamics_constraint_thetadot_as_input(self):
self.add_dynamics_constraint_tester(self.dut_thetadot_input)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"neural_network_lyapunov.utils.setup_relu",
"neural_network_lyapunov.gurobi_torch_mip.GurobiTorchMILP",
"torch.eye",
"neural_network_lyapunov.examples.car.unicycle.Unicycle",
"torch.cos",
"numpy.array",
"torch.zeros",
"numpy.testing.assert_allclose",
"torch.sin",
"torch.tensor",... | [((18962, 18977), 'unittest.main', 'unittest.main', ([], {}), '()\n', (18975, 18977), False, 'import unittest\n'), ((375, 407), 'neural_network_lyapunov.examples.car.unicycle.Unicycle', 'unicycle.Unicycle', (['torch.float64'], {}), '(torch.float64)\n', (392, 407), True, 'import neural_network_lyapunov.examples.car.unicycle as unicycle\n'), ((456, 506), 'torch.tensor', 'torch.tensor', (['[2.0, 3.0, 0.5]'], {'dtype': 'torch.float64'}), '([2.0, 3.0, 0.5], dtype=torch.float64)\n', (468, 506), False, 'import torch\n'), ((517, 563), 'torch.tensor', 'torch.tensor', (['[0.5, -0.2]'], {'dtype': 'torch.float64'}), '([0.5, -0.2], dtype=torch.float64)\n', (529, 563), False, 'import torch\n'), ((962, 994), 'neural_network_lyapunov.examples.car.unicycle.Unicycle', 'unicycle.Unicycle', (['torch.float64'], {}), '(torch.float64)\n', (979, 994), True, 'import neural_network_lyapunov.examples.car.unicycle as unicycle\n'), ((2500, 2532), 'neural_network_lyapunov.examples.car.unicycle.Unicycle', 'unicycle.Unicycle', (['torch.float64'], {}), '(torch.float64)\n', (2517, 2532), True, 'import neural_network_lyapunov.examples.car.unicycle as unicycle\n'), ((2545, 2595), 'torch.tensor', 'torch.tensor', (['[2.0, 3.0, 0.5]'], {'dtype': 'torch.float64'}), '([2.0, 3.0, 0.5], dtype=torch.float64)\n', (2557, 2595), False, 'import torch\n'), ((2606, 2652), 'torch.tensor', 'torch.tensor', (['[0.5, -0.2]'], {'dtype': 'torch.float64'}), '([0.5, -0.2], dtype=torch.float64)\n', (2618, 2652), False, 'import torch\n'), ((2908, 2959), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['x_next', 'result.y[:, -1]'], {}), '(x_next, result.y[:, -1])\n', (2934, 2959), True, 'import numpy as np\n'), ((3252, 3348), 'neural_network_lyapunov.utils.setup_relu', 'utils.setup_relu', (['(2, 4, 3, 2)'], {'params': 'None', 'negative_slope': '(0.1)', 'bias': '(True)', 'dtype': 'self.dtype'}), '((2, 4, 3, 2), params=None, negative_slope=0.1, bias=True,\n dtype=self.dtype)\n', (3268, 3348), True, 'import neural_network_lyapunov.utils as utils\n'), ((3608, 3698), 'torch.tensor', 'torch.tensor', (['[[0.2, 0.5], [-1.3, 0.5], [-0.3, -0.2], [-0.4, -1.4]]'], {'dtype': 'self.dtype'}), '([[0.2, 0.5], [-1.3, 0.5], [-0.3, -0.2], [-0.4, -1.4]], dtype=\n self.dtype)\n', (3620, 3698), False, 'import torch\n'), ((3768, 3821), 'torch.tensor', 'torch.tensor', (['[0.4, -1.2, 0.1, 2.3]'], {'dtype': 'self.dtype'}), '([0.4, -1.2, 0.1, 2.3], dtype=self.dtype)\n', (3780, 3821), False, 'import torch\n'), ((3886, 3994), 'torch.tensor', 'torch.tensor', (['[[0.4, 0.1, -1.4, 0.2], [0.1, -0.2, -0.5, -1.1], [0.3, 0.5, 1.1, -0.2]]'], {'dtype': 'self.dtype'}), '([[0.4, 0.1, -1.4, 0.2], [0.1, -0.2, -0.5, -1.1], [0.3, 0.5, \n 1.1, -0.2]], dtype=self.dtype)\n', (3898, 3994), False, 'import torch\n'), ((4077, 4125), 'torch.tensor', 'torch.tensor', (['[0.2, 0.1, -0.3]'], {'dtype': 'self.dtype'}), '([0.2, 0.1, -0.3], dtype=self.dtype)\n', (4089, 4125), False, 'import torch\n'), ((4239, 4307), 'torch.tensor', 'torch.tensor', (['[[0.1, -0.3, 0.5], [0.3, -0.2, 2.1]]'], {'dtype': 'self.dtype'}), '([[0.1, -0.3, 0.5], [0.3, -0.2, 2.1]], dtype=self.dtype)\n', (4251, 4307), False, 'import torch\n'), ((4370, 4413), 'torch.tensor', 'torch.tensor', (['[0.4, -1.2]'], {'dtype': 'self.dtype'}), '([0.4, -1.2], dtype=self.dtype)\n', (4382, 4413), False, 'import torch\n'), ((4962, 5058), 'neural_network_lyapunov.utils.setup_relu', 'utils.setup_relu', (['(3, 4, 3, 2)'], {'params': 'None', 'negative_slope': '(0.1)', 'bias': '(True)', 'dtype': 'self.dtype'}), '((3, 4, 3, 2), params=None, negative_slope=0.1, bias=True,\n dtype=self.dtype)\n', (4978, 5058), True, 'import neural_network_lyapunov.utils as utils\n'), ((5303, 5413), 'torch.tensor', 'torch.tensor', (['[[0.2, 0.5, 0.1], [-1.3, 0.5, -1.2], [-0.3, -0.2, 0.4], [-0.4, -1.4, 0.5]]'], {'dtype': 'self.dtype'}), '([[0.2, 0.5, 0.1], [-1.3, 0.5, -1.2], [-0.3, -0.2, 0.4], [-0.4,\n -1.4, 0.5]], dtype=self.dtype)\n', (5315, 5413), False, 'import torch\n'), ((5497, 5550), 'torch.tensor', 'torch.tensor', (['[0.4, -1.2, 0.1, 2.3]'], {'dtype': 'self.dtype'}), '([0.4, -1.2, 0.1, 2.3], dtype=self.dtype)\n', (5509, 5550), False, 'import torch\n'), ((6515, 6563), 'torch.tensor', 'torch.tensor', (['[0.2, 0.5, -0.1]'], {'dtype': 'self.dtype'}), '([0.2, 0.5, -0.1], dtype=self.dtype)\n', (6527, 6563), False, 'import torch\n'), ((6582, 6624), 'torch.tensor', 'torch.tensor', (['[2.1, 0.3]'], {'dtype': 'self.dtype'}), '([2.1, 0.3], dtype=self.dtype)\n', (6594, 6624), False, 'import torch\n'), ((7744, 7811), 'torch.tensor', 'torch.tensor', (['[[0.2, 0.5, -0.1], [0.4, 0.3, 0.5]]'], {'dtype': 'self.dtype'}), '([[0.2, 0.5, -0.1], [0.4, 0.3, 0.5]], dtype=self.dtype)\n', (7756, 7811), False, 'import torch\n'), ((7861, 7918), 'torch.tensor', 'torch.tensor', (['[[2.1, 0.3], [-0.3, 0.4]]'], {'dtype': 'self.dtype'}), '([[2.1, 0.3], [-0.3, 0.4]], dtype=self.dtype)\n', (7873, 7918), False, 'import torch\n'), ((10988, 11084), 'neural_network_lyapunov.utils.setup_relu', 'utils.setup_relu', (['(2, 4, 3, 2)'], {'params': 'None', 'negative_slope': '(0.1)', 'bias': '(True)', 'dtype': 'self.dtype'}), '((2, 4, 3, 2), params=None, negative_slope=0.1, bias=True,\n dtype=self.dtype)\n', (11004, 11084), True, 'import neural_network_lyapunov.utils as utils\n'), ((11344, 11434), 'torch.tensor', 'torch.tensor', (['[[0.2, 0.5], [-1.3, 0.5], [-0.3, -0.2], [-0.4, -1.4]]'], {'dtype': 'self.dtype'}), '([[0.2, 0.5], [-1.3, 0.5], [-0.3, -0.2], [-0.4, -1.4]], dtype=\n self.dtype)\n', (11356, 11434), False, 'import torch\n'), ((11504, 11557), 'torch.tensor', 'torch.tensor', (['[0.4, -1.2, 0.1, 2.3]'], {'dtype': 'self.dtype'}), '([0.4, -1.2, 0.1, 2.3], dtype=self.dtype)\n', (11516, 11557), False, 'import torch\n'), ((11622, 11730), 'torch.tensor', 'torch.tensor', (['[[0.4, 0.1, -1.4, 0.2], [0.1, -0.2, -0.5, -1.1], [0.3, 0.5, 1.1, -0.2]]'], {'dtype': 'self.dtype'}), '([[0.4, 0.1, -1.4, 0.2], [0.1, -0.2, -0.5, -1.1], [0.3, 0.5, \n 1.1, -0.2]], dtype=self.dtype)\n', (11634, 11730), False, 'import torch\n'), ((11813, 11861), 'torch.tensor', 'torch.tensor', (['[0.2, 0.1, -0.3]'], {'dtype': 'self.dtype'}), '([0.2, 0.1, -0.3], dtype=self.dtype)\n', (11825, 11861), False, 'import torch\n'), ((11975, 12043), 'torch.tensor', 'torch.tensor', (['[[0.1, -0.3, 0.5], [0.3, -0.2, 2.1]]'], {'dtype': 'self.dtype'}), '([[0.1, -0.3, 0.5], [0.3, -0.2, 2.1]], dtype=self.dtype)\n', (11987, 12043), False, 'import torch\n'), ((12106, 12149), 'torch.tensor', 'torch.tensor', (['[0.4, -1.2]'], {'dtype': 'self.dtype'}), '([0.4, -1.2], dtype=self.dtype)\n', (12118, 12149), False, 'import torch\n'), ((12705, 12801), 'neural_network_lyapunov.utils.setup_relu', 'utils.setup_relu', (['(3, 4, 3, 2)'], {'params': 'None', 'negative_slope': '(0.1)', 'bias': '(True)', 'dtype': 'self.dtype'}), '((3, 4, 3, 2), params=None, negative_slope=0.1, bias=True,\n dtype=self.dtype)\n', (12721, 12801), True, 'import neural_network_lyapunov.utils as utils\n'), ((13046, 13156), 'torch.tensor', 'torch.tensor', (['[[0.2, 0.5, 0.1], [-1.3, 0.5, -1.2], [-0.3, -0.2, 0.4], [-0.4, -1.4, 0.5]]'], {'dtype': 'self.dtype'}), '([[0.2, 0.5, 0.1], [-1.3, 0.5, -1.2], [-0.3, -0.2, 0.4], [-0.4,\n -1.4, 0.5]], dtype=self.dtype)\n', (13058, 13156), False, 'import torch\n'), ((13240, 13293), 'torch.tensor', 'torch.tensor', (['[0.4, -1.2, 0.1, 2.3]'], {'dtype': 'self.dtype'}), '([0.4, -1.2, 0.1, 2.3], dtype=self.dtype)\n', (13252, 13293), False, 'import torch\n'), ((14281, 14329), 'torch.tensor', 'torch.tensor', (['[0.5, 0.3, -1.2]'], {'dtype': 'self.dtype'}), '([0.5, 0.3, -1.2], dtype=self.dtype)\n', (14293, 14329), False, 'import torch\n'), ((14348, 14388), 'torch.tensor', 'torch.tensor', (['[0, 0.5]'], {'dtype': 'self.dtype'}), '([0, 0.5], dtype=self.dtype)\n', (14360, 14388), False, 'import torch\n'), ((14605, 14653), 'torch.tensor', 'torch.tensor', (['[0.2, 0.5, -0.1]'], {'dtype': 'self.dtype'}), '([0.2, 0.5, -0.1], dtype=self.dtype)\n', (14617, 14653), False, 'import torch\n'), ((14672, 14714), 'torch.tensor', 'torch.tensor', (['[2.1, 0.3]'], {'dtype': 'self.dtype'}), '([2.1, 0.3], dtype=self.dtype)\n', (14684, 14714), False, 'import torch\n'), ((15984, 16051), 'torch.tensor', 'torch.tensor', (['[[0.2, 0.5, -0.1], [0.4, 0.3, 0.5]]'], {'dtype': 'self.dtype'}), '([[0.2, 0.5, -0.1], [0.4, 0.3, 0.5]], dtype=self.dtype)\n', (15996, 16051), False, 'import torch\n'), ((16101, 16158), 'torch.tensor', 'torch.tensor', (['[[2.1, 0.3], [-0.3, 0.4]]'], {'dtype': 'self.dtype'}), '([[2.1, 0.3], [-0.3, 0.4]], dtype=self.dtype)\n', (16113, 16158), False, 'import torch\n'), ((1515, 1538), 'torch.from_numpy', 'torch.from_numpy', (['x_val'], {}), '(x_val)\n', (1531, 1538), False, 'import torch\n'), ((1602, 1625), 'torch.from_numpy', 'torch.from_numpy', (['u_val'], {}), '(u_val)\n', (1618, 1625), False, 'import torch\n'), ((2269, 2294), 'numpy.array', 'np.array', (['[0.5, 0.4, 0.2]'], {}), '([0.5, 0.4, 0.2])\n', (2277, 2294), True, 'import numpy as np\n'), ((2296, 2317), 'numpy.array', 'np.array', (['[-0.3, 0.8]'], {}), '([-0.3, 0.8])\n', (2304, 2317), True, 'import numpy as np\n'), ((2334, 2361), 'numpy.array', 'np.array', (['[-0.5, 0.7, -2.2]'], {}), '([-0.5, 0.7, -2.2])\n', (2342, 2361), True, 'import numpy as np\n'), ((2363, 2385), 'numpy.array', 'np.array', (['[-1.3, -0.8]'], {}), '([-1.3, -0.8])\n', (2371, 2385), True, 'import numpy as np\n'), ((2401, 2428), 'numpy.array', 'np.array', (['[-2.5, 0.7, -1.5]'], {}), '([-2.5, 0.7, -1.5])\n', (2409, 2428), True, 'import numpy as np\n'), ((2430, 2452), 'numpy.array', 'np.array', (['[-1.9, -0.8]'], {}), '([-1.9, -0.8])\n', (2438, 2452), True, 'import numpy as np\n'), ((8656, 8700), 'neural_network_lyapunov.gurobi_torch_mip.GurobiTorchMILP', 'gurobi_torch_mip.GurobiTorchMILP', (['self.dtype'], {}), '(self.dtype)\n', (8688, 8700), True, 'import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip\n'), ((9648, 9688), 'numpy.array', 'np.array', (['[var.xn for var in x_next_var]'], {}), '([var.xn for var in x_next_var])\n', (9656, 9688), True, 'import numpy as np\n'), ((9948, 9995), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {'dtype': 'self.dtype'}), '([0.0, 0.0, 0.0], dtype=self.dtype)\n', (9960, 9995), False, 'import torch\n'), ((10009, 10051), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {'dtype': 'self.dtype'}), '([0.0, 0.0], dtype=self.dtype)\n', (10021, 10051), False, 'import torch\n'), ((10066, 10114), 'torch.tensor', 'torch.tensor', (['[0.5, -0.3, 0.4]'], {'dtype': 'self.dtype'}), '([0.5, -0.3, 0.4], dtype=self.dtype)\n', (10078, 10114), False, 'import torch\n'), ((10131, 10173), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {'dtype': 'self.dtype'}), '([0.0, 0.0], dtype=self.dtype)\n', (10143, 10173), False, 'import torch\n'), ((10188, 10236), 'torch.tensor', 'torch.tensor', (['[0.6, -1.3, 0.4]'], {'dtype': 'self.dtype'}), '([0.6, -1.3, 0.4], dtype=self.dtype)\n', (10200, 10236), False, 'import torch\n'), ((10253, 10295), 'torch.tensor', 'torch.tensor', (['[4.0, 0.3]'], {'dtype': 'self.dtype'}), '([4.0, 0.3], dtype=self.dtype)\n', (10265, 10295), False, 'import torch\n'), ((10311, 10359), 'torch.tensor', 'torch.tensor', (['[0.6, -1.3, 0.4]'], {'dtype': 'self.dtype'}), '([0.6, -1.3, 0.4], dtype=self.dtype)\n', (10323, 10359), False, 'import torch\n'), ((10376, 10419), 'torch.tensor', 'torch.tensor', (['[-2.0, 0.3]'], {'dtype': 'self.dtype'}), '([-2.0, 0.3], dtype=self.dtype)\n', (10388, 10419), False, 'import torch\n'), ((16896, 16940), 'neural_network_lyapunov.gurobi_torch_mip.GurobiTorchMILP', 'gurobi_torch_mip.GurobiTorchMILP', (['self.dtype'], {}), '(self.dtype)\n', (16928, 16940), True, 'import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip\n'), ((17888, 17928), 'numpy.array', 'np.array', (['[var.xn for var in x_next_var]'], {}), '([var.xn for var in x_next_var])\n', (17896, 17928), True, 'import numpy as np\n'), ((18188, 18235), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {'dtype': 'self.dtype'}), '([0.0, 0.0, 0.0], dtype=self.dtype)\n', (18200, 18235), False, 'import torch\n'), ((18249, 18291), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {'dtype': 'self.dtype'}), '([0.0, 0.0], dtype=self.dtype)\n', (18261, 18291), False, 'import torch\n'), ((18306, 18354), 'torch.tensor', 'torch.tensor', (['[0.5, -0.3, 0.4]'], {'dtype': 'self.dtype'}), '([0.5, -0.3, 0.4], dtype=self.dtype)\n', (18318, 18354), False, 'import torch\n'), ((18371, 18413), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0]'], {'dtype': 'self.dtype'}), '([0.0, 0.0], dtype=self.dtype)\n', (18383, 18413), False, 'import torch\n'), ((18428, 18476), 'torch.tensor', 'torch.tensor', (['[0.6, -1.3, 0.4]'], {'dtype': 'self.dtype'}), '([0.6, -1.3, 0.4], dtype=self.dtype)\n', (18440, 18476), False, 'import torch\n'), ((18493, 18535), 'torch.tensor', 'torch.tensor', (['[4.0, 0.3]'], {'dtype': 'self.dtype'}), '([4.0, 0.3], dtype=self.dtype)\n', (18505, 18535), False, 'import torch\n'), ((18551, 18599), 'torch.tensor', 'torch.tensor', (['[0.6, -1.3, 0.4]'], {'dtype': 'self.dtype'}), '([0.6, -1.3, 0.4], dtype=self.dtype)\n', (18563, 18599), False, 'import torch\n'), ((18616, 18659), 'torch.tensor', 'torch.tensor', (['[-2.0, 0.3]'], {'dtype': 'self.dtype'}), '([-2.0, 0.3], dtype=self.dtype)\n', (18628, 18659), False, 'import torch\n'), ((1166, 1189), 'torch.from_numpy', 'torch.from_numpy', (['x_val'], {}), '(x_val)\n', (1182, 1189), False, 'import torch\n'), ((1246, 1269), 'torch.from_numpy', 'torch.from_numpy', (['u_val'], {}), '(u_val)\n', (1262, 1269), False, 'import torch\n'), ((4583, 4631), 'torch.tensor', 'torch.tensor', (['[-3, -3, -np.pi]'], {'dtype': 'self.dtype'}), '([-3, -3, -np.pi], dtype=self.dtype)\n', (4595, 4631), False, 'import torch\n'), ((4650, 4695), 'torch.tensor', 'torch.tensor', (['[3, 3, np.pi]'], {'dtype': 'self.dtype'}), '([3, 3, np.pi], dtype=self.dtype)\n', (4662, 4695), False, 'import torch\n'), ((4714, 4756), 'torch.tensor', 'torch.tensor', (['[-2, -0.5]'], {'dtype': 'self.dtype'}), '([-2, -0.5], dtype=self.dtype)\n', (4726, 4756), False, 'import torch\n'), ((4775, 4815), 'torch.tensor', 'torch.tensor', (['[5, 0.5]'], {'dtype': 'self.dtype'}), '([5, 0.5], dtype=self.dtype)\n', (4787, 4815), False, 'import torch\n'), ((6065, 6113), 'torch.tensor', 'torch.tensor', (['[-3, -3, -np.pi]'], {'dtype': 'self.dtype'}), '([-3, -3, -np.pi], dtype=self.dtype)\n', (6077, 6113), False, 'import torch\n'), ((6132, 6177), 'torch.tensor', 'torch.tensor', (['[3, 3, np.pi]'], {'dtype': 'self.dtype'}), '([3, 3, np.pi], dtype=self.dtype)\n', (6144, 6177), False, 'import torch\n'), ((6196, 6238), 'torch.tensor', 'torch.tensor', (['[-2, -0.5]'], {'dtype': 'self.dtype'}), '([-2, -0.5], dtype=self.dtype)\n', (6208, 6238), False, 'import torch\n'), ((6257, 6297), 'torch.tensor', 'torch.tensor', (['[5, 0.5]'], {'dtype': 'self.dtype'}), '([5, 0.5], dtype=self.dtype)\n', (6269, 6297), False, 'import torch\n'), ((6791, 6853), 'torch.tensor', 'torch.tensor', (['[x_val[2], u_val[0], u_val[1]]'], {'dtype': 'self.dtype'}), '([x_val[2], u_val[0], u_val[1]], dtype=self.dtype)\n', (6803, 6853), False, 'import torch\n'), ((6936, 6971), 'torch.zeros', 'torch.zeros', (['(3,)'], {'dtype': 'self.dtype'}), '((3,), dtype=self.dtype)\n', (6947, 6971), False, 'import torch\n'), ((7022, 7074), 'torch.tensor', 'torch.tensor', (['[x_val[2], u_val[0]]'], {'dtype': 'self.dtype'}), '([x_val[2], u_val[0]], dtype=self.dtype)\n', (7034, 7074), False, 'import torch\n'), ((7157, 7192), 'torch.zeros', 'torch.zeros', (['(2,)'], {'dtype': 'self.dtype'}), '((2,), dtype=self.dtype)\n', (7168, 7192), False, 'import torch\n'), ((12326, 12374), 'torch.tensor', 'torch.tensor', (['[-3, -3, -np.pi]'], {'dtype': 'self.dtype'}), '([-3, -3, -np.pi], dtype=self.dtype)\n', (12338, 12374), False, 'import torch\n'), ((12393, 12438), 'torch.tensor', 'torch.tensor', (['[3, 3, np.pi]'], {'dtype': 'self.dtype'}), '([3, 3, np.pi], dtype=self.dtype)\n', (12405, 12438), False, 'import torch\n'), ((12457, 12499), 'torch.tensor', 'torch.tensor', (['[-2, -0.5]'], {'dtype': 'self.dtype'}), '([-2, -0.5], dtype=self.dtype)\n', (12469, 12499), False, 'import torch\n'), ((12518, 12558), 'torch.tensor', 'torch.tensor', (['[5, 0.5]'], {'dtype': 'self.dtype'}), '([5, 0.5], dtype=self.dtype)\n', (12530, 12558), False, 'import torch\n'), ((13815, 13863), 'torch.tensor', 'torch.tensor', (['[-3, -3, -np.pi]'], {'dtype': 'self.dtype'}), '([-3, -3, -np.pi], dtype=self.dtype)\n', (13827, 13863), False, 'import torch\n'), ((13882, 13927), 'torch.tensor', 'torch.tensor', (['[3, 3, np.pi]'], {'dtype': 'self.dtype'}), '([3, 3, np.pi], dtype=self.dtype)\n', (13894, 13927), False, 'import torch\n'), ((13946, 13988), 'torch.tensor', 'torch.tensor', (['[-2, -0.5]'], {'dtype': 'self.dtype'}), '([-2, -0.5], dtype=self.dtype)\n', (13958, 13988), False, 'import torch\n'), ((14007, 14047), 'torch.tensor', 'torch.tensor', (['[5, 0.5]'], {'dtype': 'self.dtype'}), '([5, 0.5], dtype=self.dtype)\n', (14019, 14047), False, 'import torch\n'), ((14881, 14943), 'torch.tensor', 'torch.tensor', (['[x_val[2], u_val[0], u_val[1]]'], {'dtype': 'self.dtype'}), '([x_val[2], u_val[0], u_val[1]], dtype=self.dtype)\n', (14893, 14943), False, 'import torch\n'), ((15030, 15085), 'torch.tensor', 'torch.tensor', (['[x_val[2], 0, u_val[1]]'], {'dtype': 'self.dtype'}), '([x_val[2], 0, u_val[1]], dtype=self.dtype)\n', (15042, 15085), False, 'import torch\n'), ((15190, 15242), 'torch.tensor', 'torch.tensor', (['[x_val[2], u_val[0]]'], {'dtype': 'self.dtype'}), '([x_val[2], u_val[0]], dtype=self.dtype)\n', (15202, 15242), False, 'import torch\n'), ((15329, 15374), 'torch.tensor', 'torch.tensor', (['[x_val[2], 0]'], {'dtype': 'self.dtype'}), '([x_val[2], 0], dtype=self.dtype)\n', (15341, 15374), False, 'import torch\n'), ((9100, 9130), 'torch.eye', 'torch.eye', (['(3)'], {'dtype': 'self.dtype'}), '(3, dtype=self.dtype)\n', (9109, 9130), False, 'import torch\n'), ((9262, 9292), 'torch.eye', 'torch.eye', (['(2)'], {'dtype': 'self.dtype'}), '(2, dtype=self.dtype)\n', (9271, 9292), False, 'import torch\n'), ((17340, 17370), 'torch.eye', 'torch.eye', (['(3)'], {'dtype': 'self.dtype'}), '(3, dtype=self.dtype)\n', (17349, 17370), False, 'import torch\n'), ((17502, 17532), 'torch.eye', 'torch.eye', (['(2)'], {'dtype': 'self.dtype'}), '(2, dtype=self.dtype)\n', (17511, 17532), False, 'import torch\n'), ((712, 727), 'torch.cos', 'torch.cos', (['x[2]'], {}), '(x[2])\n', (721, 727), False, 'import torch\n'), ((736, 751), 'torch.sin', 'torch.sin', (['x[2]'], {}), '(x[2])\n', (745, 751), False, 'import torch\n')] |
from pathlib import Path
import numpy as np
import pandas as pd
from pandas.core.base import PandasObject
import geopandas as gpd
FILE_TPL = 'hybas_as_lev{level:02}_v1c.shp'
def load_hydrobasins_geodataframe(hydrobasins_dir, continent, levels=range(1, 13)):
gdfs = []
for level in levels:
print(f'Loading level: {level}')
filepath = Path(hydrobasins_dir, FILE_TPL.format(level=level))
gdf = gpd.read_file(str(filepath))
gdf['LEVEL'] = level
gdfs.append(gdf)
gdf = gpd.GeoDataFrame(pd.concat(gdfs, ignore_index=True))
gdf['PFAF_STR'] = gdf.PFAF_ID.apply(str)
return gdf
# Added to the GeoDataFrame class using:
# https://stackoverflow.com/a/53630084/54557
def _find_downstream(gdf, start_basin_idx):
"""Find all downstream basins at the same level as the start basin"""
start_row = gdf.loc[start_basin_idx]
gdf_lev = gdf[gdf.LEVEL == start_row.LEVEL]
next_row = start_row
downstream = np.array(gdf_lev.HYBAS_ID == start_row.HYBAS_ID, dtype=bool)
while next_row is not None:
next_downstream = np.array(next_row['NEXT_DOWN'] == gdf_lev['HYBAS_ID'], dtype=bool)
downstream |= next_downstream
next_gdf = gdf_lev[next_downstream]
if len(next_gdf):
assert len(next_gdf) == 1
next_row = next_gdf.iloc[0]
else:
next_row = None
return gdf_lev[downstream]
def _find_upstream(gdf, start_basin_idx):
start_row = gdf.loc[start_basin_idx]
gdf_lev = gdf[gdf.LEVEL == start_row.LEVEL]
next_hops = np.array(gdf_lev['NEXT_DOWN'] == start_row['HYBAS_ID'], dtype=bool)
all_hops = next_hops.copy()
while next_hops.sum():
next_gdf = gdf_lev.loc[next_hops]
next_hops = np.zeros_like(next_hops)
for i, row in next_gdf.iterrows():
next_hops |= np.array(gdf_lev['NEXT_DOWN'] == row['HYBAS_ID'], dtype=bool)
all_hops |= next_hops
return gdf_lev[all_hops > 0]
PandasObject.find_downstream = _find_downstream
PandasObject.find_upstream = _find_upstream
| [
"numpy.zeros_like",
"numpy.array",
"pandas.concat"
] | [((970, 1030), 'numpy.array', 'np.array', (['(gdf_lev.HYBAS_ID == start_row.HYBAS_ID)'], {'dtype': 'bool'}), '(gdf_lev.HYBAS_ID == start_row.HYBAS_ID, dtype=bool)\n', (978, 1030), True, 'import numpy as np\n'), ((1566, 1633), 'numpy.array', 'np.array', (["(gdf_lev['NEXT_DOWN'] == start_row['HYBAS_ID'])"], {'dtype': 'bool'}), "(gdf_lev['NEXT_DOWN'] == start_row['HYBAS_ID'], dtype=bool)\n", (1574, 1633), True, 'import numpy as np\n'), ((537, 571), 'pandas.concat', 'pd.concat', (['gdfs'], {'ignore_index': '(True)'}), '(gdfs, ignore_index=True)\n', (546, 571), True, 'import pandas as pd\n'), ((1090, 1156), 'numpy.array', 'np.array', (["(next_row['NEXT_DOWN'] == gdf_lev['HYBAS_ID'])"], {'dtype': 'bool'}), "(next_row['NEXT_DOWN'] == gdf_lev['HYBAS_ID'], dtype=bool)\n", (1098, 1156), True, 'import numpy as np\n'), ((1755, 1779), 'numpy.zeros_like', 'np.zeros_like', (['next_hops'], {}), '(next_hops)\n', (1768, 1779), True, 'import numpy as np\n'), ((1848, 1909), 'numpy.array', 'np.array', (["(gdf_lev['NEXT_DOWN'] == row['HYBAS_ID'])"], {'dtype': 'bool'}), "(gdf_lev['NEXT_DOWN'] == row['HYBAS_ID'], dtype=bool)\n", (1856, 1909), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import itertools
def recursive_elm():
#Set seed for repeatibility
np.random.seed(10)
#Data and model constants
num_total_features = 11 #These are the number of features to be ranked
num_outputs = 2 #Number of outputs
num_hidden_neurons = 20 #Number of neurons in ELM
num_samples = 2000 #Number of samples in training
num_val_samples = 1000 #Number of samples in validation
acceptance_ratio = 0.1 #This fraction of solutions will be accepted for rankings
#Generate total data
input_data = np.random.normal(size=(num_samples+num_val_samples,num_total_features))
output_data = np.zeros(shape=(num_samples+num_val_samples, num_outputs), dtype='double')
output_data[:, 0] = np.sin(input_data[:,1]+input_data[:,2]+input_data[:,3])
output_data[:, 1] = np.cos(input_data[:,1]*input_data[:,3])*np.sin(input_data[:,3])
#Training inputs
training_inputs = input_data[0:num_samples,:]
training_outputs = output_data[0:num_samples,:]
#Training inputs
validation_inputs = input_data[num_samples:,:]
validation_outputs = output_data[num_samples:,:]
#Feature list
counter = np.zeros(shape=(num_total_features,),dtype='int')
feature_range = np.arange(start=0,stop=num_total_features,dtype='int')
#print(list(itertools.combinations(feature_range,2)))
#Making a tracker of the total possible combinations of inputs
combination_list = []
for combination in range(1,len(feature_range)+1):#Atleast one feature must be used
for subset in itertools.combinations(feature_range, combination):
combination_list.append(np.asarray(subset))
num_retain = int(acceptance_ratio*len(combination_list))
#These combinations can be used as masks for the total input sampling (i.e. the columns)
# print(training_inputs[:,combination_list[7]])
# print(np.shape(training_inputs[:, combination_list[7]]))
error_list = []
for choice in range(len(combination_list)):
choice_inputs = training_inputs[:,combination_list[choice]]
#Set layer 1 weights
num_inputs = np.shape(choice_inputs)[1]
w1 = np.random.randn(num_inputs,num_hidden_neurons)
b1 = np.random.randn(1,num_hidden_neurons)
#multiply to get linear transform
a1 = np.matmul(choice_inputs,w1)
hidden_range = np.arange(0,num_hidden_neurons,dtype='int')
a1[:,hidden_range] = a1[:,hidden_range] + b1[0,hidden_range]
#Activate with tan sigmoid
a1 = np.tanh(a1)
#Use ELM (i.e., pseudoinverse projection to obtain w2)
w2_opt = np.matmul(np.linalg.pinv(a1),training_outputs)
#Find validation MSE
validation_choice = validation_inputs[:,combination_list[choice]]
preds = np.matmul(validation_choice,w1)
preds[:,hidden_range] = preds[:,hidden_range] + b1[0,hidden_range]
preds = np.tanh(preds)
preds = np.matmul(preds,w2_opt)
error = np.sum((preds - validation_outputs)**2)
error_list.append(error)
indices = np.array(error_list).argsort()
indices = indices[0:num_retain]
accepted_combinations = np.asarray(combination_list)[indices]
for combination in range(0,len(accepted_combinations)):
for count_val in range(num_total_features):
if count_val in accepted_combinations[combination]:
counter[count_val] = counter[count_val] + 1
print(counter)
y_pos = np.arange(len(counter))
objects = []
objects.append(str(feature_range)[:])
plt.figure()
plt.bar(y_pos, counter, align='center', alpha=0.5)
plt.xticks(y_pos, feature_range)
plt.ylabel('Feature occurence')
plt.xlabel('Feature labels')
plt.show()
recursive_elm()
| [
"numpy.random.seed",
"numpy.sum",
"matplotlib.pyplot.bar",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.random.normal",
"numpy.linalg.pinv",
"numpy.random.randn",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"numpy.tanh",
"numpy.asarray",
"itert... | [((126, 144), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (140, 144), True, 'import numpy as np\n'), ((616, 690), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_samples + num_val_samples, num_total_features)'}), '(size=(num_samples + num_val_samples, num_total_features))\n', (632, 690), True, 'import numpy as np\n'), ((707, 783), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_samples + num_val_samples, num_outputs)', 'dtype': '"""double"""'}), "(shape=(num_samples + num_val_samples, num_outputs), dtype='double')\n", (715, 783), True, 'import numpy as np\n'), ((806, 868), 'numpy.sin', 'np.sin', (['(input_data[:, 1] + input_data[:, 2] + input_data[:, 3])'], {}), '(input_data[:, 1] + input_data[:, 2] + input_data[:, 3])\n', (812, 868), True, 'import numpy as np\n'), ((1233, 1283), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_total_features,)', 'dtype': '"""int"""'}), "(shape=(num_total_features,), dtype='int')\n", (1241, 1283), True, 'import numpy as np\n'), ((1303, 1359), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'num_total_features', 'dtype': '"""int"""'}), "(start=0, stop=num_total_features, dtype='int')\n", (1312, 1359), True, 'import numpy as np\n'), ((3627, 3639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3637, 3639), True, 'import matplotlib.pyplot as plt\n'), ((3644, 3694), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'counter'], {'align': '"""center"""', 'alpha': '(0.5)'}), "(y_pos, counter, align='center', alpha=0.5)\n", (3651, 3694), True, 'import matplotlib.pyplot as plt\n'), ((3699, 3731), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'feature_range'], {}), '(y_pos, feature_range)\n', (3709, 3731), True, 'import matplotlib.pyplot as plt\n'), ((3736, 3767), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Feature occurence"""'], {}), "('Feature occurence')\n", (3746, 3767), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3800), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature labels"""'], {}), "('Feature labels')\n", (3782, 3800), True, 'import matplotlib.pyplot as plt\n'), ((3806, 3816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3814, 3816), True, 'import matplotlib.pyplot as plt\n'), ((886, 929), 'numpy.cos', 'np.cos', (['(input_data[:, 1] * input_data[:, 3])'], {}), '(input_data[:, 1] * input_data[:, 3])\n', (892, 929), True, 'import numpy as np\n'), ((926, 950), 'numpy.sin', 'np.sin', (['input_data[:, 3]'], {}), '(input_data[:, 3])\n', (932, 950), True, 'import numpy as np\n'), ((1620, 1670), 'itertools.combinations', 'itertools.combinations', (['feature_range', 'combination'], {}), '(feature_range, combination)\n', (1642, 1670), False, 'import itertools\n'), ((2227, 2274), 'numpy.random.randn', 'np.random.randn', (['num_inputs', 'num_hidden_neurons'], {}), '(num_inputs, num_hidden_neurons)\n', (2242, 2274), True, 'import numpy as np\n'), ((2287, 2325), 'numpy.random.randn', 'np.random.randn', (['(1)', 'num_hidden_neurons'], {}), '(1, num_hidden_neurons)\n', (2302, 2325), True, 'import numpy as np\n'), ((2381, 2409), 'numpy.matmul', 'np.matmul', (['choice_inputs', 'w1'], {}), '(choice_inputs, w1)\n', (2390, 2409), True, 'import numpy as np\n'), ((2432, 2477), 'numpy.arange', 'np.arange', (['(0)', 'num_hidden_neurons'], {'dtype': '"""int"""'}), "(0, num_hidden_neurons, dtype='int')\n", (2441, 2477), True, 'import numpy as np\n'), ((2594, 2605), 'numpy.tanh', 'np.tanh', (['a1'], {}), '(a1)\n', (2601, 2605), True, 'import numpy as np\n'), ((2854, 2886), 'numpy.matmul', 'np.matmul', (['validation_choice', 'w1'], {}), '(validation_choice, w1)\n', (2863, 2886), True, 'import numpy as np\n'), ((2977, 2991), 'numpy.tanh', 'np.tanh', (['preds'], {}), '(preds)\n', (2984, 2991), True, 'import numpy as np\n'), ((3008, 3032), 'numpy.matmul', 'np.matmul', (['preds', 'w2_opt'], {}), '(preds, w2_opt)\n', (3017, 3032), True, 'import numpy as np\n'), ((3049, 3090), 'numpy.sum', 'np.sum', (['((preds - validation_outputs) ** 2)'], {}), '((preds - validation_outputs) ** 2)\n', (3055, 3090), True, 'import numpy as np\n'), ((3232, 3260), 'numpy.asarray', 'np.asarray', (['combination_list'], {}), '(combination_list)\n', (3242, 3260), True, 'import numpy as np\n'), ((2187, 2210), 'numpy.shape', 'np.shape', (['choice_inputs'], {}), '(choice_inputs)\n', (2195, 2210), True, 'import numpy as np\n'), ((2697, 2715), 'numpy.linalg.pinv', 'np.linalg.pinv', (['a1'], {}), '(a1)\n', (2711, 2715), True, 'import numpy as np\n'), ((3137, 3157), 'numpy.array', 'np.array', (['error_list'], {}), '(error_list)\n', (3145, 3157), True, 'import numpy as np\n'), ((1708, 1726), 'numpy.asarray', 'np.asarray', (['subset'], {}), '(subset)\n', (1718, 1726), True, 'import numpy as np\n')] |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
import os
import re
import sys
import time
print(time.ctime())
os.chdir(os.path.join(
os.path.dirname(os.path.abspath(__file__))))
extra_compile_args = []
extra_link_args = []
if os.name == 'posix':
extra_compile_args = ['-fopenmp', '-O3', '-ffast-math', '-march=native']
extra_link_args = ['-fopenmp']
def get_extensions():
extensions = []
for root, subFolders, files in os.walk('.'):
for _f in files:
if _f.endswith('.pyx'):
f = os.path.join(root, _f)
sources = [f]
name = re.sub(r'(.pyx)$', '', f)\
.replace('./', '')\
.replace('/', '.')\
.replace('\\', '.')\
.replace('..', '')
extensions.append(
Extension(name, sources, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args))
return extensions
setup(
name = 'CM compiled',
ext_modules = cythonize(get_extensions(), compiler_directives={'language_level' : sys.version_info[0]}),
include_dirs=[numpy.get_include()]
)
| [
"os.path.abspath",
"os.walk",
"time.ctime",
"distutils.extension.Extension",
"numpy.get_include",
"os.path.join",
"re.sub"
] | [((172, 184), 'time.ctime', 'time.ctime', ([], {}), '()\n', (182, 184), False, 'import time\n'), ((520, 532), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (527, 532), False, 'import os\n'), ((231, 256), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (246, 256), False, 'import os\n'), ((1236, 1255), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1253, 1255), False, 'import numpy\n'), ((615, 637), 'os.path.join', 'os.path.join', (['root', '_f'], {}), '(root, _f)\n', (627, 637), False, 'import os\n'), ((955, 1055), 'distutils.extension.Extension', 'Extension', (['name', 'sources'], {'extra_compile_args': 'extra_compile_args', 'extra_link_args': 'extra_link_args'}), '(name, sources, extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args)\n', (964, 1055), False, 'from distutils.extension import Extension\n'), ((692, 716), 're.sub', 're.sub', (['"""(.pyx)$"""', '""""""', 'f'], {}), "('(.pyx)$', '', f)\n", (698, 716), False, 'import re\n')] |
#!/usr/bin/env python3
import argparse
from ddsketch.ddsketch import LogCollapsingLowestDenseDDSketch
import numpy as np
import os
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('input', type=argparse.FileType('r'))
parser.add_argument('output', type=argparse.FileType('w'))
parser.add_argument('alpha', type=float, nargs='?', default=0.0001)
parser.add_argument('max_bins', type=int, nargs='?', default=32768)
args = parser.parse_args()
input_floats = []
for line in args.input.readlines():
input_floats += [float(i) for i in line.split(",") if i.strip()]
sketch = LogCollapsingLowestDenseDDSketch(relative_accuracy=args.alpha, bin_limit=args.max_bins)
for v in input_floats:
sketch.add(v)
output_quantiles = [(x, sketch.get_quantile_value(x)) for x in np.linspace(0, 1, 1000)]
for quantile, value in output_quantiles:
args.output.write(f"{quantile:.3},{value:.9}\n")
args.output.flush()
os.fsync(args.output)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"os.fsync",
"ddsketch.ddsketch.LogCollapsingLowestDenseDDSketch",
"numpy.linspace",
"argparse.FileType"
] | [((158, 219), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (181, 219), False, 'import argparse\n'), ((670, 762), 'ddsketch.ddsketch.LogCollapsingLowestDenseDDSketch', 'LogCollapsingLowestDenseDDSketch', ([], {'relative_accuracy': 'args.alpha', 'bin_limit': 'args.max_bins'}), '(relative_accuracy=args.alpha, bin_limit=\n args.max_bins)\n', (702, 762), False, 'from ddsketch.ddsketch import LogCollapsingLowestDenseDDSketch\n'), ((1035, 1056), 'os.fsync', 'os.fsync', (['args.output'], {}), '(args.output)\n', (1043, 1056), False, 'import os\n'), ((258, 280), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (275, 280), False, 'import argparse\n'), ((321, 343), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (338, 343), False, 'import argparse\n'), ((879, 902), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (890, 902), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
sess = tf.Session()
my_array = np.array([[1., 3., 5., 7., 9.],
[-2., 0., 2., 4., 6.],
[-6., -3., 0., 3., 6.]])
x_vals = np.array([my_array, my_array + 1])
x_data = tf.placeholder(tf.float32, shape=(3, 5))
m1 = tf.constant([[1.],[0.],[-1.],[2.],[4.]])
m2 = tf.constant([[2.]])
a1 = tf.constant([[10.]])
prod1 = tf.matmul(x_data, m1)
prod2 = tf.matmul(prod1, m2)
add1 = tf.add(prod2, a1)
for x_val in x_vals:
print(sess.run(add1, feed_dict={x_data: x_val}))
writer = tf.summary.FileWriter('/home/amardeep/tensorflow_learning',sess.graph)
writer.close() | [
"tensorflow.Session",
"tensorflow.add",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.summary.FileWriter",
"numpy.array"
] | [((51, 63), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (61, 63), True, 'import tensorflow as tf\n'), ((76, 175), 'numpy.array', 'np.array', (['[[1.0, 3.0, 5.0, 7.0, 9.0], [-2.0, 0.0, 2.0, 4.0, 6.0], [-6.0, -3.0, 0.0, \n 3.0, 6.0]]'], {}), '([[1.0, 3.0, 5.0, 7.0, 9.0], [-2.0, 0.0, 2.0, 4.0, 6.0], [-6.0, -\n 3.0, 0.0, 3.0, 6.0]])\n', (84, 175), True, 'import numpy as np\n'), ((165, 199), 'numpy.array', 'np.array', (['[my_array, my_array + 1]'], {}), '([my_array, my_array + 1])\n', (173, 199), True, 'import numpy as np\n'), ((209, 249), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(3, 5)'}), '(tf.float32, shape=(3, 5))\n', (223, 249), True, 'import tensorflow as tf\n'), ((257, 306), 'tensorflow.constant', 'tf.constant', (['[[1.0], [0.0], [-1.0], [2.0], [4.0]]'], {}), '([[1.0], [0.0], [-1.0], [2.0], [4.0]])\n', (268, 306), True, 'import tensorflow as tf\n'), ((303, 323), 'tensorflow.constant', 'tf.constant', (['[[2.0]]'], {}), '([[2.0]])\n', (314, 323), True, 'import tensorflow as tf\n'), ((328, 349), 'tensorflow.constant', 'tf.constant', (['[[10.0]]'], {}), '([[10.0]])\n', (339, 349), True, 'import tensorflow as tf\n'), ((359, 380), 'tensorflow.matmul', 'tf.matmul', (['x_data', 'm1'], {}), '(x_data, m1)\n', (368, 380), True, 'import tensorflow as tf\n'), ((389, 409), 'tensorflow.matmul', 'tf.matmul', (['prod1', 'm2'], {}), '(prod1, m2)\n', (398, 409), True, 'import tensorflow as tf\n'), ((417, 434), 'tensorflow.add', 'tf.add', (['prod2', 'a1'], {}), '(prod2, a1)\n', (423, 434), True, 'import tensorflow as tf\n'), ((519, 590), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""/home/amardeep/tensorflow_learning"""', 'sess.graph'], {}), "('/home/amardeep/tensorflow_learning', sess.graph)\n", (540, 590), True, 'import tensorflow as tf\n')] |
import unittest
import numpy as np
import math
from edge.model.inference.symmetric7 import SymmetricMaternCosGP
def get_gp(x, y):
return SymmetricMaternCosGP(
x, y,
noise_prior=(1, 0.1),
noise_constraint=(1e-3, 1e4),
lengthscale_prior=(1.5, 0.1),
lengthscale_constraint=(1e-3, 10),
outputscale_prior=(1, 0.1),
outputscale_constraint=(1e-3, 1e2),
hyperparameters_initialization={
'matern_0.lengthscale': 2
},
value_structure_discount_factor=0.5
)
class SymmetricMaternCosGPTest(unittest.TestCase):
def test_init(self):
x = np.arange(14).reshape(-1, 7)
x[:, -1] = [0, 1]
y = x.sum(axis=1)
model = get_gp(x, y)
self.assertTrue(
(
model.covar_module.base_kernel.base_kernel.kernels[0].
kernels[0].lengthscale == 2
).all()
)
self.assertEqual(
model.covar_module.base_kernel.base_kernel.kernels[0].\
kernels[0].lengthscale.shape[1],
4
)
self.assertEqual(
model.covar_module.base_kernel.base_kernel.kernels[1].\
period_length[0],
math.pi
)
self.assertTrue(
(
(model.covar_module.base_kernel.base_kernel.kernels[2].
kernels[0].lengthscale - 1.5).abs() < 1e-6
).all()
)
self.assertEqual(
model.covar_module.base_kernel.base_kernel.kernels[2].\
kernels[0].lengthscale.shape[1],
1
)
def test_forward(self):
z = np.arange(21).reshape(-1, 7)
z[:, -1] = [0, 1, 2]
x = z[(0, 1), :]
x_test = z[2, :].reshape(1, 7)
y = x.sum(axis=1)
model = get_gp(x, y)
y_test = model.predict(x_test)
mean = y_test.mean
covar = y_test.covariance_matrix
self.assertEqual(mean.shape[0], 1)
self.assertEqual(tuple(covar.shape), (1, 1))
if __name__ == '__main__':
unittest.main()
| [
"edge.model.inference.symmetric7.SymmetricMaternCosGP",
"unittest.main",
"numpy.arange"
] | [((144, 475), 'edge.model.inference.symmetric7.SymmetricMaternCosGP', 'SymmetricMaternCosGP', (['x', 'y'], {'noise_prior': '(1, 0.1)', 'noise_constraint': '(0.001, 10000.0)', 'lengthscale_prior': '(1.5, 0.1)', 'lengthscale_constraint': '(0.001, 10)', 'outputscale_prior': '(1, 0.1)', 'outputscale_constraint': '(0.001, 100.0)', 'hyperparameters_initialization': "{'matern_0.lengthscale': 2}", 'value_structure_discount_factor': '(0.5)'}), "(x, y, noise_prior=(1, 0.1), noise_constraint=(0.001, \n 10000.0), lengthscale_prior=(1.5, 0.1), lengthscale_constraint=(0.001, \n 10), outputscale_prior=(1, 0.1), outputscale_constraint=(0.001, 100.0),\n hyperparameters_initialization={'matern_0.lengthscale': 2},\n value_structure_discount_factor=0.5)\n", (164, 475), False, 'from edge.model.inference.symmetric7 import SymmetricMaternCosGP\n'), ((2078, 2093), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2091, 2093), False, 'import unittest\n'), ((639, 652), 'numpy.arange', 'np.arange', (['(14)'], {}), '(14)\n', (648, 652), True, 'import numpy as np\n'), ((1664, 1677), 'numpy.arange', 'np.arange', (['(21)'], {}), '(21)\n', (1673, 1677), True, 'import numpy as np\n')] |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for util_tfp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as real_tf
from discussion import fun_mcmc
from discussion.fun_mcmc import backend
tf = backend.tf
tfp = backend.tfp
util = backend.util
util_tfp = fun_mcmc.util_tfp
real_tf.enable_v2_behavior()
class UtilTFPTestTensorFlow(real_tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(UtilTFPTestTensorFlow, self).setUp()
backend.set_backend(backend.TENSORFLOW, backend.MANUAL_TRANSFORMS)
def testWrapTransitionKernel(self):
class TestKernel(tfp.mcmc.TransitionKernel):
def one_step(self, current_state, previous_kernel_results):
return [x + 1 for x in current_state], previous_kernel_results + 1
def bootstrap_results(self, current_state):
return sum(current_state)
def is_calibrated(self):
return True
def kernel(state, pkr):
return util_tfp.transition_kernel_wrapper(state, pkr, TestKernel())
state = {'x': 0., 'y': 1.}
kr = 1.
(final_state, final_kr), _ = fun_mcmc.trace(
(state, kr),
kernel,
2,
trace_fn=lambda *args: (),
)
self.assertAllEqual({
'x': 2.,
'y': 3.
}, util.map_tree(np.array, final_state))
self.assertAllEqual(1. + 2., final_kr)
def testBijectorToTransformFn(self):
bijectors = [
tfp.bijectors.Identity(),
tfp.bijectors.Scale([
[1., 2.],
[3., 4.],
])
]
state = [tf.ones([2, 1]), tf.ones([2, 2])]
transform_fn = util_tfp.bijector_to_transform_fn(
bijectors, state_structure=state, batch_ndims=1)
fwd, (_, fwd_ldj1), fwd_ldj2 = fun_mcmc.call_transport_map_with_ldj(
transform_fn, state)
self.assertAllClose(
[np.ones([2, 1]), np.array([
[1., 2.],
[3., 4],
])], fwd)
true_fwd_ldj = np.array([
np.log(1) + np.log(2),
np.log(3) + np.log(4),
])
self.assertAllClose(true_fwd_ldj, fwd_ldj1)
self.assertAllClose(true_fwd_ldj, fwd_ldj2)
inverse_transform_fn = backend.util.inverse_fn(transform_fn)
inv, (_, inv_ldj1), inv_ldj2 = fun_mcmc.call_transport_map_with_ldj(
inverse_transform_fn, state)
self.assertAllClose(
[np.ones([2, 1]),
np.array([
[1., 1. / 2.],
[1. / 3., 1. / 4.],
])], inv)
self.assertAllClose(-true_fwd_ldj, inv_ldj1)
self.assertAllClose(-true_fwd_ldj, inv_ldj2)
class UtilTFPTestJAX(UtilTFPTestTensorFlow):
def setUp(self):
super(UtilTFPTestJAX, self).setUp()
backend.set_backend(backend.JAX, backend.MANUAL_TRANSFORMS)
if __name__ == '__main__':
real_tf.test.main()
| [
"discussion.fun_mcmc.call_transport_map_with_ldj",
"numpy.log",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.enable_v2_behavior",
"discussion.fun_mcmc.trace",
"numpy.ones",
"discussion.fun_mcmc.backend.set_backend",
"numpy.array",
"discussion.fun_mcmc.backend.util.inverse_fn"
] | [((1092, 1120), 'tensorflow.compat.v2.enable_v2_behavior', 'real_tf.enable_v2_behavior', ([], {}), '()\n', (1118, 1120), True, 'import tensorflow.compat.v2 as real_tf\n'), ((3528, 3547), 'tensorflow.compat.v2.test.main', 'real_tf.test.main', ([], {}), '()\n', (3545, 3547), True, 'import tensorflow.compat.v2 as real_tf\n'), ((1270, 1336), 'discussion.fun_mcmc.backend.set_backend', 'backend.set_backend', (['backend.TENSORFLOW', 'backend.MANUAL_TRANSFORMS'], {}), '(backend.TENSORFLOW, backend.MANUAL_TRANSFORMS)\n', (1289, 1336), False, 'from discussion.fun_mcmc import backend\n'), ((1885, 1950), 'discussion.fun_mcmc.trace', 'fun_mcmc.trace', (['(state, kr)', 'kernel', '(2)'], {'trace_fn': '(lambda *args: ())'}), '((state, kr), kernel, 2, trace_fn=lambda *args: ())\n', (1899, 1950), False, 'from discussion import fun_mcmc\n'), ((2514, 2571), 'discussion.fun_mcmc.call_transport_map_with_ldj', 'fun_mcmc.call_transport_map_with_ldj', (['transform_fn', 'state'], {}), '(transform_fn, state)\n', (2550, 2571), False, 'from discussion import fun_mcmc\n'), ((2929, 2966), 'discussion.fun_mcmc.backend.util.inverse_fn', 'backend.util.inverse_fn', (['transform_fn'], {}), '(transform_fn)\n', (2952, 2966), False, 'from discussion.fun_mcmc import backend\n'), ((3002, 3067), 'discussion.fun_mcmc.call_transport_map_with_ldj', 'fun_mcmc.call_transport_map_with_ldj', (['inverse_transform_fn', 'state'], {}), '(inverse_transform_fn, state)\n', (3038, 3067), False, 'from discussion import fun_mcmc\n'), ((3437, 3496), 'discussion.fun_mcmc.backend.set_backend', 'backend.set_backend', (['backend.JAX', 'backend.MANUAL_TRANSFORMS'], {}), '(backend.JAX, backend.MANUAL_TRANSFORMS)\n', (3456, 3496), False, 'from discussion.fun_mcmc import backend\n'), ((2615, 2630), 'numpy.ones', 'np.ones', (['[2, 1]'], {}), '([2, 1])\n', (2622, 2630), True, 'import numpy as np\n'), ((2632, 2664), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4]]'], {}), '([[1.0, 2.0], [3.0, 4]])\n', (2640, 2664), True, 'import numpy as np\n'), ((3111, 3126), 'numpy.ones', 'np.ones', (['[2, 1]'], {}), '([2, 1])\n', (3118, 3126), True, 'import numpy as np\n'), ((3137, 3189), 'numpy.array', 'np.array', (['[[1.0, 1.0 / 2.0], [1.0 / 3.0, 1.0 / 4.0]]'], {}), '([[1.0, 1.0 / 2.0], [1.0 / 3.0, 1.0 / 4.0]])\n', (3145, 3189), True, 'import numpy as np\n'), ((2743, 2752), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (2749, 2752), True, 'import numpy as np\n'), ((2755, 2764), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2761, 2764), True, 'import numpy as np\n'), ((2774, 2783), 'numpy.log', 'np.log', (['(3)'], {}), '(3)\n', (2780, 2783), True, 'import numpy as np\n'), ((2786, 2795), 'numpy.log', 'np.log', (['(4)'], {}), '(4)\n', (2792, 2795), True, 'import numpy as np\n')] |
from __future__ import division
import struct
import numpy as np
def unpack_floats(batch_labels):
shape = batch_labels[..., 0].shape
floats = np.empty(shape, np.float32)
for index, _ in np.ndenumerate(floats):
floats[index] = struct.unpack('f', batch_labels[index + (slice(0, 4),)])[0]
return floats
def unpack_float64s(batch_labels):
shape = batch_labels[..., 0].shape
floats = np.empty(shape, np.float64)
for index, _ in np.ndenumerate(floats):
floats[index] = struct.unpack('d', batch_labels[index + (slice(0, 8),)])[0]
return floats
def calculate_labels(batch_labels):
floats = unpack_floats(batch_labels)
return np.mean(floats, axis=-1)
def one_hot_encoding(vector, nb_classes):
"""
Converts an input 1-D vector of integers into an output
2-D array of one-hot vectors, where an i'th input value
of j will set a '1' in the i'th row, j'th column of the
output array.
Example:
v = np.array((1, 0, 4))
one_hot_v = one_hot_encoding(v)
print one_hot_v
[[0 1 0 0 0]
[1 0 0 0 0]
[0 0 0 0 1]]
"""
assert isinstance(vector, np.ndarray)
assert len(vector) > 0
result = np.zeros(shape=(len(vector), nb_classes), dtype=np.float32)
result[np.arange(len(vector)), vector] = 1.0
return result
| [
"numpy.empty",
"numpy.mean",
"numpy.ndenumerate"
] | [((154, 181), 'numpy.empty', 'np.empty', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (162, 181), True, 'import numpy as np\n'), ((202, 224), 'numpy.ndenumerate', 'np.ndenumerate', (['floats'], {}), '(floats)\n', (216, 224), True, 'import numpy as np\n'), ((419, 446), 'numpy.empty', 'np.empty', (['shape', 'np.float64'], {}), '(shape, np.float64)\n', (427, 446), True, 'import numpy as np\n'), ((467, 489), 'numpy.ndenumerate', 'np.ndenumerate', (['floats'], {}), '(floats)\n', (481, 489), True, 'import numpy as np\n'), ((684, 708), 'numpy.mean', 'np.mean', (['floats'], {'axis': '(-1)'}), '(floats, axis=-1)\n', (691, 708), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Batch output depth map images by <NAME>.
Copyright 2019, <NAME>, HKUST.
Depth map visualization.
"""
import numpy as np
import cv2
import argparse
import matplotlib.pyplot as plt
from preprocess import load_pfm
from depthfusion import read_gipuma_dmb
import os, re
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('depth_dir')
args = parser.parse_args()
depth_dir = args.depth_dir
for filename in os.listdir(depth_dir):
if not re.match(r'^\d{8}_init.pfm$', filename):
continue
depth_path = os.path.join(depth_dir, filename)
if depth_path.endswith('npy'):
depth_image = np.load(depth_path)
depth_image = np.squeeze(depth_image)
print('value range: ', depth_image.min(), depth_image.max())
plt.imshow(depth_image, 'rainbow')
# plt.show()
elif depth_path.endswith('pfm'):
depth_image = load_pfm(open(depth_path, 'rb'))
ma = np.ma.masked_equal(depth_image, 0.0, copy=False)
print('value range: ', ma.min(), ma.max())
plt.imshow(depth_image, 'rainbow')
# plt.show()
elif depth_path.endswith('dmb'):
depth_image = read_gipuma_dmb(depth_path)
ma = np.ma.masked_equal(depth_image, 0.0, copy=False)
print('value range: ', ma.min(), ma.max())
plt.imshow(depth_image, 'rainbow')
# plt.show()
else:
depth_image = cv2.imread(depth_path)
ma = np.ma.masked_equal(depth_image, 0.0, copy=False)
print('value range: ', ma.min(), ma.max())
plt.imshow(depth_image)
# plt.show()
plt.savefig(os.path.join(depth_dir, filename.split('.')[0] + '.jpg'))
| [
"depthfusion.read_gipuma_dmb",
"numpy.load",
"argparse.ArgumentParser",
"matplotlib.pyplot.imshow",
"numpy.ma.masked_equal",
"re.match",
"cv2.imread",
"numpy.squeeze",
"os.path.join",
"os.listdir"
] | [((334, 359), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (357, 359), False, 'import argparse\n'), ((480, 501), 'os.listdir', 'os.listdir', (['depth_dir'], {}), '(depth_dir)\n', (490, 501), False, 'import os, re\n'), ((610, 643), 'os.path.join', 'os.path.join', (['depth_dir', 'filename'], {}), '(depth_dir, filename)\n', (622, 643), False, 'import os, re\n'), ((518, 557), 're.match', 're.match', (['"""^\\\\d{8}_init.pfm$"""', 'filename'], {}), "('^\\\\d{8}_init.pfm$', filename)\n", (526, 557), False, 'import os, re\n'), ((709, 728), 'numpy.load', 'np.load', (['depth_path'], {}), '(depth_path)\n', (716, 728), True, 'import numpy as np\n'), ((755, 778), 'numpy.squeeze', 'np.squeeze', (['depth_image'], {}), '(depth_image)\n', (765, 778), True, 'import numpy as np\n'), ((864, 898), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depth_image', '"""rainbow"""'], {}), "(depth_image, 'rainbow')\n", (874, 898), True, 'import matplotlib.pyplot as plt\n'), ((1041, 1089), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['depth_image', '(0.0)'], {'copy': '(False)'}), '(depth_image, 0.0, copy=False)\n', (1059, 1089), True, 'import numpy as np\n'), ((1157, 1191), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depth_image', '"""rainbow"""'], {}), "(depth_image, 'rainbow')\n", (1167, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1284, 1311), 'depthfusion.read_gipuma_dmb', 'read_gipuma_dmb', (['depth_path'], {}), '(depth_path)\n', (1299, 1311), False, 'from depthfusion import read_gipuma_dmb\n'), ((1329, 1377), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['depth_image', '(0.0)'], {'copy': '(False)'}), '(depth_image, 0.0, copy=False)\n', (1347, 1377), True, 'import numpy as np\n'), ((1445, 1479), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depth_image', '"""rainbow"""'], {}), "(depth_image, 'rainbow')\n", (1455, 1479), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1567), 'cv2.imread', 'cv2.imread', (['depth_path'], {}), '(depth_path)\n', (1555, 1567), False, 'import cv2\n'), ((1585, 1633), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['depth_image', '(0.0)'], {'copy': '(False)'}), '(depth_image, 0.0, copy=False)\n', (1603, 1633), True, 'import numpy as np\n'), ((1701, 1724), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depth_image'], {}), '(depth_image)\n', (1711, 1724), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from SimPEG import Mesh, Maps, SolverLU, Utils
from SimPEG.Utils import ExtractCoreMesh
import numpy as np
from SimPEG.EM.Static import DC
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import LogFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.constants import epsilon_0
import copy
from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget
from .Base import widgetify
# Mesh, sigmaMap can be globals global
npad = 12
growrate = 2.
cs = 20.
hx = [(cs, npad, -growrate), (cs, 100), (cs, npad, growrate)]
hy = [(cs, npad, -growrate), (cs, 50)]
mesh = Mesh.TensorMesh([hx, hy], "CN")
expmap = Maps.ExpMap(mesh)
mapping = expmap
xmin = -1000.
xmax = 1000.
ymin = -1000.
ymax = 100.
xylim = np.c_[[xmin, ymin], [xmax, ymax]]
indCC, meshcore = ExtractCoreMesh(xylim, mesh)
indx = (mesh.gridFx[:, 0] >= xmin) & (mesh.gridFx[:, 0] <= xmax) & (mesh.gridFx[:, 1] >= ymin) & (mesh.gridFx[:, 1] <= ymax)
indy = (mesh.gridFy[:, 0] >= xmin) & (mesh.gridFy[:, 0] <= xmax) & (mesh.gridFy[:, 1] >= ymin) & (mesh.gridFy[:, 1] <= ymax)
indF = np.concatenate((indx, indy))
def model_valley(lnsig_air=np.log(1e-8), ln_sigback=np.log(1e-4), ln_over=np.log(1e-2),
ln_sigtarget=np.log(1e-3), overburden_thick=200., overburden_wide=1000.,
target_thick=200., target_wide=400.,
a=1000., b=500., xc=0., zc=250.):
mtrue = ln_sigback*np.ones(mesh.nC)
mhalf = copy.deepcopy(mtrue)
ellips = (((mesh.gridCC[:, 0]-xc)**2.)/a**2. + ((mesh.gridCC[:, 1]-zc)**2.)/b**2.) <1.
mtrue[ellips] = lnsig_air
mair = copy.deepcopy(mtrue)
#overb = (mesh.gridCC[:, 1] >-overburden_thick) & (mesh.gridCC[:, 1]<=0)&(mesh.gridCC[:, 0] >-overburden_wide/2.)&(mesh.gridCC[:, 0] <overburden_wide/2.)
#mtrue[overb] = ln_over*np.ones_like(mtrue[overb])
bottom_valley = mesh.gridCC[ellips, 1].min()
overb = (mesh.gridCC[:, 1] >= bottom_valley) & (mesh.gridCC[:, 1] < bottom_valley+overburden_thick) & ellips
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])
mair[overb] = ln_sigback
mover = copy.deepcopy(mtrue)
target = (mesh.gridCC[:, 1] > bottom_valley-target_thick) & (mesh.gridCC[:, 1] < bottom_valley) & (mesh.gridCC[:, 0] > -target_wide/2.) & (mesh.gridCC[:, 0] < target_wide/2.)
mtrue[target] = ln_sigtarget*np.ones_like(mtrue[target])
mtrue = Utils.mkvc(mtrue)
return mtrue, mhalf, mair, mover
def findnearest(A):
idx = np.abs(mesh.gridCC[:, 0, None]-A).argmin(axis=0)
return mesh.gridCC[idx, 0]
def get_Surface(mtrue, A):
active = (mtrue > (np.log(1e-8)))
nearpoint = findnearest(A)
columns = mesh.gridCC[:, 0, None] == nearpoint
ind = np.logical_and(columns.T, active).T
idm = []
surface = []
for i in range(ind.shape[1]):
idm.append(np.where(np.all(mesh.gridCC == np.r_[nearpoint[i], np.max(mesh.gridCC[ind[:, i], 1])], axis=1)))
surface.append(mesh.gridCC[idm[-1], 1])
return Utils.mkvc(np.r_[idm]), Utils.mkvc(np.r_[surface])
def model_fields(A, B, mtrue, mhalf, mair, mover, whichprimary='air'):
idA, surfaceA = get_Surface(mtrue, A)
idB, surfaceB = get_Surface(mtrue, B)
Mx = mesh.gridCC
# Nx = np.empty(shape =(mesh.nC, 2))
rx = DC.Rx.Pole_ky(Mx)
# rx = DC.Rx.Dipole(Mx, Nx)
if(B == []):
src = DC.Src.Pole([rx], np.r_[A, surfaceA])
else:
src = DC.Src.Dipole([rx], np.r_[A, surfaceA], np.r_[B, surfaceB])
# src = DC.Src.Dipole_ky([rx], np.r_[A, 0.], np.r_[B, 0.])
survey = DC.Survey_ky([src])
# survey = DC.Survey([src])
# survey_prim = DC.Survey([src])
survey_prim = DC.Survey_ky([src])
survey_air = DC.Survey_ky([src])
#problem = DC.Problem3D_CC(mesh, sigmaMap = mapping)
problem = DC.Problem2D_CC(mesh, sigmaMap=mapping)
# problem_prim = DC.Problem3D_CC(mesh, sigmaMap = mapping)
problem_prim = DC.Problem2D_CC(mesh, sigmaMap=mapping)
problem_air = DC.Problem2D_CC(mesh, sigmaMap=mapping)
problem.Solver = SolverLU
problem_prim.Solver = SolverLU
problem_air.Solver = SolverLU
problem.pair(survey)
problem_prim.pair(survey_prim)
problem_air.pair(survey_air)
mesh.setCellGradBC("neumann")
cellGrad = mesh.cellGrad
faceDiv = mesh.faceDiv
if whichprimary == 'air':
phi_primary = survey_prim.dpred(mair)
elif whichprimary == 'half':
phi_primary = survey_prim.dpred(mhalf)
elif whichprimary == 'overburden':
phi_primary = survey_prim.dpred(mover)
e_primary = -cellGrad*phi_primary
j_primary = problem_prim.MfRhoI*problem_prim.Grad*phi_primary
q_primary = epsilon_0*problem_prim.Vol*(faceDiv*e_primary)
primary_field = {'phi': phi_primary, 'e': e_primary, 'j': j_primary, 'q': q_primary}
phi_total = survey.dpred(mtrue)
e_total = -cellGrad*phi_total
j_total = problem.MfRhoI*problem.Grad*phi_total
q_total = epsilon_0*problem.Vol*(faceDiv*e_total)
total_field = {'phi': phi_total, 'e': e_total, 'j': j_total, 'q': q_total}
phi_air = survey.dpred(mair)
e_air = -cellGrad*phi_air
j_air = problem.MfRhoI*problem.Grad*phi_air
q_air = epsilon_0*problem.Vol*(faceDiv*e_air)
air_field = {'phi': phi_air, 'e': e_air, 'j': j_air, 'q': q_air}
return src, primary_field, air_field, total_field
def get_Surface_Potentials(mtrue, survey, src, field_obj):
phi = field_obj['phi']
CCLoc = mesh.gridCC
XLoc = np.unique(mesh.gridCC[:, 0])
surfaceInd, zsurfaceLoc = get_Surface(mtrue, XLoc)
phiSurface = phi[surfaceInd]
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60., 0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return XLoc, phiSurface, phiScale
def getCylinderPoints(xc, zc, a, b):
xLocOrig1 = np.arange(-a, a+a/10., a/10.)
xLocOrig2 = np.arange(a, -a-a/10., -a/10.)
# Top half of cylinder
zLoc1 = b*np.sqrt(1.-(xLocOrig1/a)**2)+zc
# Bottom half of cylinder
zLoc2 = -b*np.sqrt(1.-(xLocOrig2/a)**2)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
cylinderPoints = np.vstack([np.vstack([xLoc1, zLoc1]).T, np.vstack([xLoc2, zLoc2]).T])
return cylinderPoints
def get_OverburdenPoints(cylinderPoints, overburden_thick):
bottom = cylinderPoints[:, 1].min()
indb = np.where(cylinderPoints[:, 1] < 0.)
overburdenPoints = [np.maximum(cylinderPoints[i, 1], bottom+overburden_thick) for i in indb]
return np.vstack([cylinderPoints[indb, 0], overburdenPoints]).T
# In[30]:
def getPlateCorners(target_thick, target_wide, cylinderPoints):
bottom = cylinderPoints[:, 1].min()
xc = 0.
zc = bottom-0.5*target_thick
rotPlateCorners = np.array([[-0.5*target_wide, 0.5*target_thick], [0.5*target_wide, 0.5*target_thick],
[-0.5*target_wide, -0.5*target_thick], [0.5*target_wide, -0.5*target_thick]])
plateCorners = rotPlateCorners + np.hstack([np.repeat(xc, 4).reshape([4, 1]), np.repeat(zc, 4).reshape([4, 1])])
return plateCorners
def get_TargetPoints(target_thick, target_wide, ellips_b, ellips_zc):
xLocOrig1 = np.arange(-target_wide/2., target_wide/2.+target_wide/10., target_wide/10.)
xLocOrig2 = np.arange(target_wide/2., -target_wide/2.-target_wide/10., -target_wide/10.)
zloc1 = np.ones_like(xLocOrig1)*(ellips_b+ellips_zc)
zloc1 = np.ones_like(xLocOrig1)*(ellips_b+ellips_zc-target_thick)
corner
targetpoint = np.vstack([np.vstack([xLoc1, zLoc1]).T, np.vstack([xLoc2, zLoc2]).T])
def getSensitivity(survey, A, B, M, N, model):
if(survey == "Dipole-Dipole"):
rx = DC.Rx.Dipole_ky(np.r_[M, 0.], np.r_[N, 0.])
src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
elif(survey == "Pole-Dipole"):
rx = DC.Rx.Dipole_ky(np.r_[M, 0.], np.r_[N, 0.])
src = DC.Src.Pole([rx], np.r_[A, 0.])
elif(survey == "Dipole-Pole"):
rx = DC.Rx.Pole_ky(np.r_[M, 0.])
src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
elif(survey == "Pole-Pole"):
rx = DC.Rx.Pole_ky(np.r_[M, 0.])
src = DC.Src.Pole([rx], np.r_[A, 0.])
survey = DC.Survey_ky([src])
problem = DC.Problem2D_CC(mesh, sigmaMap=mapping)
problem.Solver = SolverLU
problem.pair(survey)
fieldObj = problem.fields(model)
J = problem.Jtvec(model, np.array([1.]), f=fieldObj)
return J
def calculateRhoA(survey, VM, VN, A, B, M, N):
#to stabilize division
eps = 1e-9
if(survey == "Dipole-Dipole"):
G = 1. / (1./(np.abs(A-M)+eps) - 1./(np.abs(M-B)+eps) - 1./(np.abs(N-A)+eps) + 1./(np.abs(N-B)+eps))
rho_a = (VM-VN)*2.*np.pi*G
elif(survey == "Pole-Dipole"):
G = 1. / (1./(np.abs(A-M)+eps) - 1./(np.abs(N-A)+eps))
rho_a = (VM-VN)*2.*np.pi*G
elif(survey == "Dipole-Pole"):
G = 1. / (1./(np.abs(A-M)+eps) - 1./(np.abs(M-B)+eps))
rho_a = (VM)*2.*np.pi*G
elif(survey == "Pole-Pole"):
G = 1. / (1./(np.abs(A-M)+eps))
rho_a = (VM)*2.*np.pi*G
return rho_a
def PLOT(survey, A, B, M, N, rhohalf, rholayer, rhoTarget, overburden_thick, overburden_wide,
target_thick, target_wide, whichprimary,
ellips_a, ellips_b, xc, zc, Field, Type, Scale):
labelsize = 12.
ticksize = 10.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
B = []
ln_sigTarget = np.log(1./rhoTarget)
ln_sigLayer = np.log(1./rholayer)
ln_sigHalf = np.log(1./rhohalf)
mtrue, mhalf, mair, mover = model_valley(lnsig_air=np.log(1e-8), ln_sigback=ln_sigHalf, ln_over=ln_sigLayer,
ln_sigtarget=ln_sigTarget , overburden_thick =overburden_thick,
target_thick=target_thick, target_wide =target_wide,
a=ellips_a, b=ellips_b, xc=xc, zc=zc)
src, primary_field, air_field, total_field = model_fields(A, B, mtrue, mhalf, mair, mover, whichprimary=whichprimary)
fig, ax = plt.subplots(2, 1, figsize=(9*1.5, 9*1.5), sharex=True)
fig.subplots_adjust(right=0.8)
xSurface, phiTotalSurface, phiScaleTotal = get_Surface_Potentials(mtrue, survey, src, total_field)
xSurface, phiPrimSurface, phiScalePrim = get_Surface_Potentials(mtrue, survey, src, primary_field)
xSurface, phiAirSurface, phiScaleAir = get_Surface_Potentials(mtrue, survey, src, air_field)
ylim = np.r_[-1., 1.]*np.max(np.abs(phiTotalSurface))
xlim = np.array([-1000., 1000.])
if(survey == "Dipole-Pole" or survey == "Pole-Pole"):
MInd = np.where(xSurface == findnearest(M))
N = []
VM = phiTotalSurface[MInd[0]]
VN = 0.
VMprim = phiPrimSurface[MInd[0]]
VNprim = 0.
VMair = phiAirSurface[MInd[0]]
VNair = 0.
else:
MInd = np.where(xSurface == findnearest(M))
NInd = np.where(xSurface == findnearest(N))
VM = phiTotalSurface[MInd[0]]
VN = phiTotalSurface[NInd[0]]
VMprim = phiPrimSurface[MInd[0]]
VNprim = phiPrimSurface[NInd[0]]
VMair = phiAirSurface[MInd[0]]
VNair = phiAirSurface[NInd[0]]
#2D geometric factor
G2D = rhohalf/(calculateRhoA(survey, VMair, VNair, A, B, M, N))
#print G2D
# Subplot 1: Full set of surface potentials
ax[0].plot(xSurface, phiPrimSurface, linestyle='dashed', linewidth=2., color='k')
ax[0].plot(xSurface, phiTotalSurface, color=[0.1, 0.5, 0.1], linewidth=1.)
ax[0].grid(which='both', linestyle='-', linewidth=0.5, color=[0.2, 0.2, 0.2], alpha=0.5)
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
ax[0].plot(A, 0, '+', markersize=12, markeredgewidth=3, color=[1., 0., 0.])
else:
ax[0].plot(A, 0, '+', markersize=12, markeredgewidth=3, color=[1., 0., 0.])
ax[0].plot(B, 0, '_', markersize=12, markeredgewidth=3, color=[0., 0., 1.])
ax[0].set_ylabel('Potential, (V)', fontsize=labelsize)
ax[0].set_xlabel('x (m)', fontsize=labelsize)
ax[0].set_xlim(xlim)
ax[0].set_ylim(ylim)
if(survey == "Dipole-Pole" or survey == "Pole-Pole"):
ax[0].plot(M, VM, 'o', color='k')
xytextM = (M+0.5, np.max([np.min([VM, ylim.max()]), ylim.min()])+0.5)
ax[0].annotate('%2.1e'%(VM), xy=xytextM, xytext=xytextM, fontsize=labelsize)
else:
ax[0].plot(M, VM, 'o', color='k')
ax[0].plot(N, VN, 'o', color='k')
xytextM = (M+0.5, np.max([np.min([VM, ylim.max()]), ylim.min()])+0.5)
xytextN = (N+0.5, np.max([np.min([VN, ylim.max()]), ylim.min()])+0.5)
ax[0].annotate('%2.1e'%(VM), xy=xytextM, xytext=xytextM, fontsize=labelsize)
ax[0].annotate('%2.1e'%(VN), xy=xytextN, xytext=xytextN, fontsize=labelsize)
ax[0].tick_params(axis='both', which='major', labelsize=ticksize)
props = dict(boxstyle='round', facecolor='grey', alpha=0.4)
ax[0].text(xlim.max()+1, ylim.max()-0.1*ylim.max(), '$\\rho_a$ = %2.2f'%(G2D*calculateRhoA(survey, VM, VN, A, B, M, N)),
verticalalignment='bottom', bbox=props, fontsize=14)
ax[0].legend(['Reference Potential', 'Model Potential'], loc=3, fontsize=labelsize)
if Scale == 'Log':
ax[0].set_yscale('symlog', linthreshy=1e-5)
if Field == 'Model':
label = 'Resisitivity (ohm-m)'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "jet_r"}
if Scale == 'Log':
pcolorOpts = {'norm': matplotlib.colors.LogNorm(), "cmap": "jet_r"}
if whichprimary == 'air':
mprimary = mair
elif whichprimary == 'overburden':
mprimary = mover
elif whichprimary == 'half':
mprimary = mhalf
if Type == 'Total':
u = 1./(mapping*mtrue)
elif Type == 'Primary':
u = 1./(mapping*mprimary)
elif Type == 'Secondary':
u = 1./(mapping*mtrue) - 1./(mapping*mprimary)
if Scale == 'Log':
linthresh = 10.
pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2), "cmap": "jet_r"}
#prepare for masking arrays - 'conventional' arrays won't do it
u = np.ma.array(u)
#mask values below a certain threshold
u = np.ma.masked_where(mtrue <= np.log(1e-8) , u)
elif Field == 'Potential':
label = 'Potential (V)'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
linthresh = 10.
pcolorOpts = {'norm': matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2), "cmap": "viridis"}
if Type == 'Total':
# formatter = LogFormatter(10, labelOnlyBase =False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh =10, linscale =0.1)}
u = total_field['phi'] - phiScaleTotal
elif Type == 'Primary':
# formatter = LogFormatter(10, labelOnlyBase =False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh =10, linscale =0.1)}
u = primary_field['phi'] - phiScalePrim
elif Type == 'Secondary':
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
uTotal = total_field['phi'] - phiScaleTotal
uPrim = primary_field['phi'] - phiScalePrim
u = uTotal - uPrim
elif Field == 'E':
label = 'Electric Field (V/m)'
xtype = 'F'
view = 'vec'
streamOpts = {'color': 'w'}
ind = indF
#formatter = LogFormatter(10, labelOnlyBase =False)
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
pcolorOpts = {'norm': matplotlib.colors.LogNorm(), "cmap": "viridis"}
formatter = "%.1e"
if Type == 'Total':
u = total_field['e']
elif Type == 'Primary':
u = primary_field['e']
elif Type == 'Secondary':
uTotal = total_field['e']
uPrim = primary_field['e']
u = uTotal - uPrim
elif Field == 'J':
label = 'Current density ($A/m^2$)'
xtype = 'F'
view = 'vec'
streamOpts = {'color': 'w'}
ind = indF
#formatter = LogFormatter(10, labelOnlyBase =False)
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
pcolorOpts = {'norm':matplotlib.colors.LogNorm(), "cmap": "viridis"}
formatter = "%.1e"
if Type == 'Total':
u = total_field['j']
elif Type == 'Primary':
u = primary_field['j']
elif Type == 'Secondary':
uTotal = total_field['j']
uPrim = primary_field['j']
u = uTotal - uPrim
elif Field == 'Charge':
label = 'Charge Density ($C/m^2$)'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
# formatter = LogFormatter(10, labelOnlyBase =False)
pcolorOpts = {"cmap": "RdBu_r"}
if Scale == 'Log':
linthresh = 1e-12
pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2), "cmap": "RdBu_r"}
formatter = "%.1e"
if Type == 'Total':
u = total_field['q']
elif Type == 'Primary':
u = primary_field['q']
elif Type == 'Secondary':
uTotal = total_field['q']
uPrim = primary_field['q']
u = uTotal - uPrim
elif Field == 'Sensitivity':
label = 'Sensitivity'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
# formatter = LogFormatter(10, labelOnlyBase =False)
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
linthresh = 1e-4
pcolorOpts = {'norm': matplotlib.colors.SymLogNorm(linthresh=linthresh, linscale=0.2), "cmap": "viridis"}
# formatter = formatter = "$10^{%.1f}$"
formatter = "%.1e"
if Type == 'Total':
u = getSensitivity(survey, A, B, M, N, mtrue)
elif Type == 'Primary':
u = getSensitivity(survey, A, B, M, N, mhalf)
elif Type == 'Secondary':
uTotal = getSensitivity(survey, A, B, M, N, mtrue)
uPrim = getSensitivity(survey, A, B, M, N, mhalf)
u = uTotal - uPrim
# u = np.log10(abs(u))
if Scale == 'Log':
eps = 1e-16
else:
eps = 0.
#print ind.shape
#print u.shape
#print xtype
dat = meshcore.plotImage(u[ind]+eps, vType=xtype, ax=ax[1], grid=False, view=view, streamOpts=streamOpts, pcolorOpts=pcolorOpts) #gridOpts ={'color':'k', 'alpha':0.5}
# Get cylinder outline
cylinderPoints = getCylinderPoints(xc, zc, ellips_a, ellips_b)
if(rhoTarget != rhohalf):
# Get plate corners
plateCorners = getPlateCorners(target_thick, target_wide, cylinderPoints)
# plot top of plate outline
ax[1].plot(plateCorners[[0, 1], 0], plateCorners[[0, 1], 1], linestyle='dashed', color='k')
# plot east side of plate outline
ax[1].plot(plateCorners[[1, 3], 0], plateCorners[[1, 3], 1], linestyle='dashed', color='k')
# plot bottom of plate outline
ax[1].plot(plateCorners[[2, 3], 0], plateCorners[[2, 3], 1], linestyle='dashed', color='k')
# plot west side of plate outline
ax[1].plot(plateCorners[[0, 2], 0], plateCorners[[0, 2], 1], linestyle='dashed', color='k')
if(rholayer != rhohalf):
OverburdenPoints = get_OverburdenPoints(cylinderPoints, overburden_thick)
if np.all(OverburdenPoints[:, 1]<=0.):
ax[1].plot(OverburdenPoints[:, 0], OverburdenPoints[:, 1], linestyle='dashed', color='k')
ax[1].plot(OverburdenPoints[:, 0], OverburdenPoints[:, 1], linestyle='dashed', color='k')
idcyl = cylinderPoints[:, 1]<=0.
ax[1].plot(cylinderPoints[idcyl, 0], cylinderPoints[idcyl, 1], linestyle='dashed', color='k')
#if (Field == 'Charge') and (Type != 'Primary') and (Type != 'Total'):
# qTotal = total_field['q']
# qPrim = primary_field['q']
# qSecondary = qTotal - qPrim
# qPosSum, qNegSum, qPosAvgLoc, qNegAvgLoc = sumCylinderCharges(xc, zc, r, qSecondary)
# ax[1].plot(qPosAvgLoc[0], qPosAvgLoc[1], marker = '.', color ='black', markersize = labelsize)
# ax[1].plot(qNegAvgLoc[0], qNegAvgLoc[1], marker = '.', color ='black', markersize = labelsize)
# if(qPosAvgLoc[0] > qNegAvgLoc[0]):
# xytext_qPos = (qPosAvgLoc[0] + 1., qPosAvgLoc[1] - 0.5)
# xytext_qNeg = (qNegAvgLoc[0] - 15., qNegAvgLoc[1] - 0.5)
# else:
# xytext_qPos = (qPosAvgLoc[0] - 15., qPosAvgLoc[1] - 0.5)
# xytext_qNeg = (qNegAvgLoc[0] + 1., qNegAvgLoc[1] - 0.5)
# ax[1].annotate('+Q = %2.1e'%(qPosSum), xy =xytext_qPos, xytext =xytext_qPos , fontsize = labelsize)
# ax[1].annotate('-Q = %2.1e'%(qNegSum), xy =xytext_qNeg, xytext =xytext_qNeg , fontsize = labelsize)
ax[1].set_xlabel('x (m)', fontsize=labelsize)
ax[1].set_ylabel('z (m)', fontsize=labelsize)
_, surfaceA = get_Surface(mtrue, A)
_, surfaceB = get_Surface(mtrue, B)
_, surfaceM = get_Surface(mtrue, M)
_, surfaceN = get_Surface(mtrue, N)
if(survey == "Dipole-Dipole"):
ax[1].plot(A, surfaceA+1., marker='v', color='red', markersize=labelsize)
ax[1].plot(B, surfaceB+1., marker='v', color='blue', markersize=labelsize)
ax[1].plot(M, surfaceM+1., marker='^', color='yellow', markersize=labelsize)
ax[1].plot(N, surfaceN+1., marker='^', color='green', markersize=labelsize)
xytextA1 = (A-0.5, surfaceA+2.)
xytextB1 = (B-0.5, surfaceB+2.)
xytextM1 = (M-0.5, surfaceM+2.)
xytextN1 = (N-0.5, surfaceN+2.)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('B', xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate('N', xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif(survey == "Pole-Dipole"):
ax[1].plot(A, surfaceA+1., marker='v', color='red', markersize=labelsize)
ax[1].plot(M, surfaceM+1., marker='^', color='yellow', markersize=labelsize)
ax[1].plot(N, surfaceN+1., marker='^', color='green', markersize=labelsize)
xytextA1 = (A-0.5, surfaceA+2.)
xytextM1 = (M-0.5, surfaceM+2.)
xytextN1 = (N-0.5, surfaceN+2.)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate('N', xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif(survey == "Dipole-Pole"):
ax[1].plot(A, surfaceA+1., marker='v', color='red', markersize=labelsize)
ax[1].plot(B, surfaceB+1., marker='v', color='blue', markersize=labelsize)
ax[1].plot(M, surfaceM+1., marker='^', color='yellow', markersize=labelsize)
xytextA1 = (A-0.5, surfaceA+2.)
xytextB1 = (B-0.5, surfaceB+2.)
xytextM1 = (M-0.5, surfaceM+2.)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('B', xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
elif(survey == "Pole-Pole"):
ax[1].plot(A, surfaceA+1., marker='v', color='red', markersize=labelsize)
ax[1].plot(M, surfaceM+1., marker='^', color='yellow', markersize=labelsize)
xytextA1 = (A-0.5, surfaceA+2.)
xytextM1 = (M-0.5, surfaceM+2.)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].tick_params(axis='both', which='major', labelsize=ticksize)
cbar_ax = fig.add_axes([0.8, 0.05, 0.08, 0.5])
cbar_ax.axis('off')
vmin, vmax = dat[0].get_clim()
#if Field == 'Model':
# vmax =(np.r_[rhohalf, rholayer, rhoTarget]).max()
if Scale == 'Log':
if (Field == 'E') or (Field == 'J'):
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.logspace(np.log10(vmin), np.log10(vmax), 5))
elif (Field == 'Model'):
if (Type == 'Secondary'):
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.r_[np.minimum(0., vmin), np.maximum(0., vmax)])
else:
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.logspace(np.log10(vmin), np.log10(vmax), 5))
else:
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.r_[-1.*np.logspace(np.log10(-vmin-eps), np.log10(linthresh), 3)[:-1], 0., np.logspace(np.log10(linthresh), np.log10(vmax), 3)[1:]])
else:
if (Field == 'Model') and (Type == 'Secondary'):
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.r_[np.minimum(0., vmin), np.maximum(0., vmax)])
else:
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.linspace(vmin, vmax, 5))
#t_logloc = matplotlib.ticker.LogLocator(base =10.0, subs =[1.0, 2.], numdecs =4, numticks =8)
#tick_locator = matplotlib.ticker.SymmetricalLogLocator(t_logloc)
#cb.locator = tick_locator
#cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
#cb.update_ticks()
cb.ax.tick_params(labelsize=ticksize)
cb.set_label(label, fontsize=labelsize)
ax[1].set_xlim([xmin, xmax])
ax[1].set_ylim([ymin, ymax])
#ax[1].set_aspect('equal')
plt.show()
def valley_app():
app = widgetify(PLOT, manual=True,
survey=ToggleButtons(options=['Dipole-Dipole', 'Dipole-Pole', 'Pole-Dipole', 'Pole-Pole'], value='Dipole-Dipole'),
xc=FloatSlider(min=-1005., max=1000., step=10., value=0.), #, continuous_update=False),
zc=FloatSlider(min=-1000., max=1000., step=10., value=250.), #, continuous_update=False),
ellips_a=FloatSlider(min=10., max=10000., step=100., value=1000.), #, continuous_update=False),
ellips_b=FloatSlider(min=10., max=10000., step=100., value=500.), #, continuous_update=False),
rhohalf=FloatText(min=1e-8, max=1e8, value=1000., description='$\\rho_1$'), #, continuous_update=False, description='$\\rho_1$'),
rholayer=FloatText(min=1e-8, max=1e8, value=100., description='$\\rho_2$'), #, continuous_update=False, description='$\\rho_2$'),
rhoTarget=FloatText(min=1e-8, max=1e8, value=500., description='$\\rho_3$'), #, continuous_update=False, description='$\\rho_3$'),
overburden_thick=FloatSlider(min=0., max=1000., step= 10., value=200.), #, continuous_update=False),
overburden_wide=fixed(2000.), #, continuous_update=False),
target_thick=FloatSlider(min=0., max=1000., step= 10., value=200.), #, continuous_update=False),
target_wide=FloatSlider(min=0., max=1000., step= 10., value=200.), #, continuous_update=False),
A=FloatSlider(min=-1010., max=1010., step=20., value=-510.), #, continuous_update=False),
B=FloatSlider(min=-1010., max=1010., step=20., value=510.), #, continuous_update=False),
M=FloatSlider(min=-1010., max=1010., step=20., value=-210.), #, continuous_update=False),
N=FloatSlider(min=-1010., max=1010., step=20., value=210.), #, continuous_update=False),
Field=ToggleButtons(options=['Model', 'Potential', 'E', 'J', 'Charge', 'Sensitivity'], value='J'),
whichprimary=ToggleButtons(options=['air', 'overburden'], value='overburden'),
Type=ToggleButtons(options=['Total', 'Primary', 'Secondary'], value='Total'),
Scale=ToggleButtons(options=['Linear', 'Log'], value ='Log')
)
return app
if __name__ == '__main__':
app = valley_app()
| [
"numpy.maximum",
"numpy.abs",
"SimPEG.Utils.ExtractCoreMesh",
"numpy.ones",
"SimPEG.EM.Static.DC.Src.Pole",
"ipywidgets.fixed",
"matplotlib.colors.LogNorm",
"numpy.arange",
"numpy.sqrt",
"numpy.unique",
"matplotlib.colors.SymLogNorm",
"SimPEG.EM.Static.DC.Rx.Pole_ky",
"numpy.max",
"SimPEG.... | [((828, 859), 'SimPEG.Mesh.TensorMesh', 'Mesh.TensorMesh', (['[hx, hy]', '"""CN"""'], {}), "([hx, hy], 'CN')\n", (843, 859), False, 'from SimPEG import Mesh, Maps, SolverLU, Utils\n'), ((869, 886), 'SimPEG.Maps.ExpMap', 'Maps.ExpMap', (['mesh'], {}), '(mesh)\n', (880, 886), False, 'from SimPEG import Mesh, Maps, SolverLU, Utils\n'), ((1017, 1045), 'SimPEG.Utils.ExtractCoreMesh', 'ExtractCoreMesh', (['xylim', 'mesh'], {}), '(xylim, mesh)\n', (1032, 1045), False, 'from SimPEG.Utils import ExtractCoreMesh\n'), ((1303, 1331), 'numpy.concatenate', 'np.concatenate', (['(indx, indy)'], {}), '((indx, indy))\n', (1317, 1331), True, 'import numpy as np\n'), ((1360, 1373), 'numpy.log', 'np.log', (['(1e-08)'], {}), '(1e-08)\n', (1366, 1373), True, 'import numpy as np\n'), ((1385, 1399), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (1391, 1399), True, 'import numpy as np\n'), ((1407, 1419), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (1413, 1419), True, 'import numpy as np\n'), ((1451, 1464), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (1457, 1464), True, 'import numpy as np\n'), ((1669, 1689), 'copy.deepcopy', 'copy.deepcopy', (['mtrue'], {}), '(mtrue)\n', (1682, 1689), False, 'import copy\n'), ((1823, 1843), 'copy.deepcopy', 'copy.deepcopy', (['mtrue'], {}), '(mtrue)\n', (1836, 1843), False, 'import copy\n'), ((2315, 2335), 'copy.deepcopy', 'copy.deepcopy', (['mtrue'], {}), '(mtrue)\n', (2328, 2335), False, 'import copy\n'), ((2590, 2607), 'SimPEG.Utils.mkvc', 'Utils.mkvc', (['mtrue'], {}), '(mtrue)\n', (2600, 2607), False, 'from SimPEG import Mesh, Maps, SolverLU, Utils\n'), ((3470, 3487), 'SimPEG.EM.Static.DC.Rx.Pole_ky', 'DC.Rx.Pole_ky', (['Mx'], {}), '(Mx)\n', (3483, 3487), False, 'from SimPEG.EM.Static import DC\n'), ((3749, 3768), 'SimPEG.EM.Static.DC.Survey_ky', 'DC.Survey_ky', (['[src]'], {}), '([src])\n', (3761, 3768), False, 'from SimPEG.EM.Static import DC\n'), ((3856, 3875), 'SimPEG.EM.Static.DC.Survey_ky', 'DC.Survey_ky', (['[src]'], {}), '([src])\n', (3868, 3875), False, 'from SimPEG.EM.Static import DC\n'), ((3893, 3912), 'SimPEG.EM.Static.DC.Survey_ky', 'DC.Survey_ky', (['[src]'], {}), '([src])\n', (3905, 3912), False, 'from SimPEG.EM.Static import DC\n'), ((3984, 4023), 'SimPEG.EM.Static.DC.Problem2D_CC', 'DC.Problem2D_CC', (['mesh'], {'sigmaMap': 'mapping'}), '(mesh, sigmaMap=mapping)\n', (3999, 4023), False, 'from SimPEG.EM.Static import DC\n'), ((4106, 4145), 'SimPEG.EM.Static.DC.Problem2D_CC', 'DC.Problem2D_CC', (['mesh'], {'sigmaMap': 'mapping'}), '(mesh, sigmaMap=mapping)\n', (4121, 4145), False, 'from SimPEG.EM.Static import DC\n'), ((4164, 4203), 'SimPEG.EM.Static.DC.Problem2D_CC', 'DC.Problem2D_CC', (['mesh'], {'sigmaMap': 'mapping'}), '(mesh, sigmaMap=mapping)\n', (4179, 4203), False, 'from SimPEG.EM.Static import DC\n'), ((5653, 5681), 'numpy.unique', 'np.unique', (['mesh.gridCC[:, 0]'], {}), '(mesh.gridCC[:, 0])\n', (5662, 5681), True, 'import numpy as np\n'), ((6226, 6263), 'numpy.arange', 'np.arange', (['(-a)', '(a + a / 10.0)', '(a / 10.0)'], {}), '(-a, a + a / 10.0, a / 10.0)\n', (6235, 6263), True, 'import numpy as np\n'), ((6272, 6310), 'numpy.arange', 'np.arange', (['a', '(-a - a / 10.0)', '(-a / 10.0)'], {}), '(a, -a - a / 10.0, -a / 10.0)\n', (6281, 6310), True, 'import numpy as np\n'), ((6814, 6850), 'numpy.where', 'np.where', (['(cylinderPoints[:, 1] < 0.0)'], {}), '(cylinderPoints[:, 1] < 0.0)\n', (6822, 6850), True, 'import numpy as np\n'), ((7200, 7386), 'numpy.array', 'np.array', (['[[-0.5 * target_wide, 0.5 * target_thick], [0.5 * target_wide, 0.5 *\n target_thick], [-0.5 * target_wide, -0.5 * target_thick], [0.5 *\n target_wide, -0.5 * target_thick]]'], {}), '([[-0.5 * target_wide, 0.5 * target_thick], [0.5 * target_wide, 0.5 *\n target_thick], [-0.5 * target_wide, -0.5 * target_thick], [0.5 *\n target_wide, -0.5 * target_thick]])\n', (7208, 7386), True, 'import numpy as np\n'), ((7621, 7715), 'numpy.arange', 'np.arange', (['(-target_wide / 2.0)', '(target_wide / 2.0 + target_wide / 10.0)', '(target_wide / 10.0)'], {}), '(-target_wide / 2.0, target_wide / 2.0 + target_wide / 10.0, \n target_wide / 10.0)\n', (7630, 7715), True, 'import numpy as np\n'), ((7713, 7808), 'numpy.arange', 'np.arange', (['(target_wide / 2.0)', '(-target_wide / 2.0 - target_wide / 10.0)', '(-target_wide / 10.0)'], {}), '(target_wide / 2.0, -target_wide / 2.0 - target_wide / 10.0, -\n target_wide / 10.0)\n', (7722, 7808), True, 'import numpy as np\n'), ((8631, 8650), 'SimPEG.EM.Static.DC.Survey_ky', 'DC.Survey_ky', (['[src]'], {}), '([src])\n', (8643, 8650), False, 'from SimPEG.EM.Static import DC\n'), ((8665, 8704), 'SimPEG.EM.Static.DC.Problem2D_CC', 'DC.Problem2D_CC', (['mesh'], {'sigmaMap': 'mapping'}), '(mesh, sigmaMap=mapping)\n', (8680, 8704), False, 'from SimPEG.EM.Static import DC\n'), ((9871, 9894), 'numpy.log', 'np.log', (['(1.0 / rhoTarget)'], {}), '(1.0 / rhoTarget)\n', (9877, 9894), True, 'import numpy as np\n'), ((9910, 9932), 'numpy.log', 'np.log', (['(1.0 / rholayer)'], {}), '(1.0 / rholayer)\n', (9916, 9932), True, 'import numpy as np\n'), ((9947, 9968), 'numpy.log', 'np.log', (['(1.0 / rhohalf)'], {}), '(1.0 / rhohalf)\n', (9953, 9968), True, 'import numpy as np\n'), ((10424, 10483), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(9 * 1.5, 9 * 1.5)', 'sharex': '(True)'}), '(2, 1, figsize=(9 * 1.5, 9 * 1.5), sharex=True)\n', (10436, 10483), True, 'import matplotlib.pyplot as plt\n'), ((10888, 10915), 'numpy.array', 'np.array', (['[-1000.0, 1000.0]'], {}), '([-1000.0, 1000.0])\n', (10896, 10915), True, 'import numpy as np\n'), ((26273, 26283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26281, 26283), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1656), 'numpy.ones', 'np.ones', (['mesh.nC'], {}), '(mesh.nC)\n', (1647, 1656), True, 'import numpy as np\n'), ((2247, 2273), 'numpy.ones_like', 'np.ones_like', (['mtrue[overb]'], {}), '(mtrue[overb])\n', (2259, 2273), True, 'import numpy as np\n'), ((2549, 2576), 'numpy.ones_like', 'np.ones_like', (['mtrue[target]'], {}), '(mtrue[target])\n', (2561, 2576), True, 'import numpy as np\n'), ((2808, 2821), 'numpy.log', 'np.log', (['(1e-08)'], {}), '(1e-08)\n', (2814, 2821), True, 'import numpy as np\n'), ((2915, 2948), 'numpy.logical_and', 'np.logical_and', (['columns.T', 'active'], {}), '(columns.T, active)\n', (2929, 2948), True, 'import numpy as np\n'), ((3190, 3212), 'SimPEG.Utils.mkvc', 'Utils.mkvc', (['np.r_[idm]'], {}), '(np.r_[idm])\n', (3200, 3212), False, 'from SimPEG import Mesh, Maps, SolverLU, Utils\n'), ((3214, 3240), 'SimPEG.Utils.mkvc', 'Utils.mkvc', (['np.r_[surface]'], {}), '(np.r_[surface])\n', (3224, 3240), False, 'from SimPEG import Mesh, Maps, SolverLU, Utils\n'), ((3551, 3588), 'SimPEG.EM.Static.DC.Src.Pole', 'DC.Src.Pole', (['[rx]', 'np.r_[A, surfaceA]'], {}), '([rx], np.r_[A, surfaceA])\n', (3562, 3588), False, 'from SimPEG.EM.Static import DC\n'), ((3613, 3672), 'SimPEG.EM.Static.DC.Src.Dipole', 'DC.Src.Dipole', (['[rx]', 'np.r_[A, surfaceA]', 'np.r_[B, surfaceB]'], {}), '([rx], np.r_[A, surfaceA], np.r_[B, surfaceB])\n', (3626, 3672), False, 'from SimPEG.EM.Static import DC\n'), ((5864, 5923), 'SimPEG.Utils.closestPoints', 'Utils.closestPoints', (['mesh', '[xmax + 60.0, 0.0]'], {'gridLoc': '"""CC"""'}), "(mesh, [xmax + 60.0, 0.0], gridLoc='CC')\n", (5883, 5923), False, 'from SimPEG import Mesh, Maps, SolverLU, Utils\n'), ((6874, 6933), 'numpy.maximum', 'np.maximum', (['cylinderPoints[i, 1]', '(bottom + overburden_thick)'], {}), '(cylinderPoints[i, 1], bottom + overburden_thick)\n', (6884, 6933), True, 'import numpy as np\n'), ((6958, 7012), 'numpy.vstack', 'np.vstack', (['[cylinderPoints[indb, 0], overburdenPoints]'], {}), '([cylinderPoints[indb, 0], overburdenPoints])\n', (6967, 7012), True, 'import numpy as np\n'), ((7802, 7825), 'numpy.ones_like', 'np.ones_like', (['xLocOrig1'], {}), '(xLocOrig1)\n', (7814, 7825), True, 'import numpy as np\n'), ((7859, 7882), 'numpy.ones_like', 'np.ones_like', (['xLocOrig1'], {}), '(xLocOrig1)\n', (7871, 7882), True, 'import numpy as np\n'), ((8115, 8160), 'SimPEG.EM.Static.DC.Rx.Dipole_ky', 'DC.Rx.Dipole_ky', (['np.r_[M, 0.0]', 'np.r_[N, 0.0]'], {}), '(np.r_[M, 0.0], np.r_[N, 0.0])\n', (8130, 8160), False, 'from SimPEG.EM.Static import DC\n'), ((8173, 8222), 'SimPEG.EM.Static.DC.Src.Dipole', 'DC.Src.Dipole', (['[rx]', 'np.r_[A, 0.0]', 'np.r_[B, 0.0]'], {}), '([rx], np.r_[A, 0.0], np.r_[B, 0.0])\n', (8186, 8222), False, 'from SimPEG.EM.Static import DC\n'), ((8827, 8842), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (8835, 8842), True, 'import numpy as np\n'), ((14671, 14685), 'numpy.ma.array', 'np.ma.array', (['u'], {}), '(u)\n', (14682, 14685), True, 'import numpy as np\n'), ((20207, 20244), 'numpy.all', 'np.all', (['(OverburdenPoints[:, 1] <= 0.0)'], {}), '(OverburdenPoints[:, 1] <= 0.0)\n', (20213, 20244), True, 'import numpy as np\n'), ((2677, 2712), 'numpy.abs', 'np.abs', (['(mesh.gridCC[:, 0, None] - A)'], {}), '(mesh.gridCC[:, 0, None] - A)\n', (2683, 2712), True, 'import numpy as np\n'), ((6344, 6379), 'numpy.sqrt', 'np.sqrt', (['(1.0 - (xLocOrig1 / a) ** 2)'], {}), '(1.0 - (xLocOrig1 / a) ** 2)\n', (6351, 6379), True, 'import numpy as np\n'), ((6421, 6456), 'numpy.sqrt', 'np.sqrt', (['(1.0 - (xLocOrig2 / a) ** 2)'], {}), '(1.0 - (xLocOrig2 / a) ** 2)\n', (6428, 6456), True, 'import numpy as np\n'), ((6509, 6532), 'numpy.ones_like', 'np.ones_like', (['xLocOrig1'], {}), '(xLocOrig1)\n', (6521, 6532), True, 'import numpy as np\n'), ((6560, 6583), 'numpy.ones_like', 'np.ones_like', (['xLocOrig2'], {}), '(xLocOrig2)\n', (6572, 6583), True, 'import numpy as np\n'), ((8269, 8314), 'SimPEG.EM.Static.DC.Rx.Dipole_ky', 'DC.Rx.Dipole_ky', (['np.r_[M, 0.0]', 'np.r_[N, 0.0]'], {}), '(np.r_[M, 0.0], np.r_[N, 0.0])\n', (8284, 8314), False, 'from SimPEG.EM.Static import DC\n'), ((8327, 8359), 'SimPEG.EM.Static.DC.Src.Pole', 'DC.Src.Pole', (['[rx]', 'np.r_[A, 0.0]'], {}), '([rx], np.r_[A, 0.0])\n', (8338, 8359), False, 'from SimPEG.EM.Static import DC\n'), ((10022, 10035), 'numpy.log', 'np.log', (['(1e-08)'], {}), '(1e-08)\n', (10028, 10035), True, 'import numpy as np\n'), ((10852, 10875), 'numpy.abs', 'np.abs', (['phiTotalSurface'], {}), '(phiTotalSurface)\n', (10858, 10875), True, 'import numpy as np\n'), ((26365, 26475), 'ipywidgets.ToggleButtons', 'ToggleButtons', ([], {'options': "['Dipole-Dipole', 'Dipole-Pole', 'Pole-Dipole', 'Pole-Pole']", 'value': '"""Dipole-Dipole"""'}), "(options=['Dipole-Dipole', 'Dipole-Pole', 'Pole-Dipole',\n 'Pole-Pole'], value='Dipole-Dipole')\n", (26378, 26475), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((26492, 26550), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(-1005.0)', 'max': '(1000.0)', 'step': '(10.0)', 'value': '(0.0)'}), '(min=-1005.0, max=1000.0, step=10.0, value=0.0)\n', (26503, 26550), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((26596, 26656), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(-1000.0)', 'max': '(1000.0)', 'step': '(10.0)', 'value': '(250.0)'}), '(min=-1000.0, max=1000.0, step=10.0, value=250.0)\n', (26607, 26656), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((26708, 26768), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(10.0)', 'max': '(10000.0)', 'step': '(100.0)', 'value': '(1000.0)'}), '(min=10.0, max=10000.0, step=100.0, value=1000.0)\n', (26719, 26768), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((26820, 26879), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(10.0)', 'max': '(10000.0)', 'step': '(100.0)', 'value': '(500.0)'}), '(min=10.0, max=10000.0, step=100.0, value=500.0)\n', (26831, 26879), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((26930, 27006), 'ipywidgets.FloatText', 'FloatText', ([], {'min': '(1e-08)', 'max': '(100000000.0)', 'value': '(1000.0)', 'description': '"""$\\\\rho_1$"""'}), "(min=1e-08, max=100000000.0, value=1000.0, description='$\\\\rho_1$')\n", (26939, 27006), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27077, 27152), 'ipywidgets.FloatText', 'FloatText', ([], {'min': '(1e-08)', 'max': '(100000000.0)', 'value': '(100.0)', 'description': '"""$\\\\rho_2$"""'}), "(min=1e-08, max=100000000.0, value=100.0, description='$\\\\rho_2$')\n", (27086, 27152), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27224, 27299), 'ipywidgets.FloatText', 'FloatText', ([], {'min': '(1e-08)', 'max': '(100000000.0)', 'value': '(500.0)', 'description': '"""$\\\\rho_3$"""'}), "(min=1e-08, max=100000000.0, value=500.0, description='$\\\\rho_3$')\n", (27233, 27299), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27378, 27434), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(0.0)', 'max': '(1000.0)', 'step': '(10.0)', 'value': '(200.0)'}), '(min=0.0, max=1000.0, step=10.0, value=200.0)\n', (27389, 27434), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27494, 27507), 'ipywidgets.fixed', 'fixed', (['(2000.0)'], {}), '(2000.0)\n', (27499, 27507), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27566, 27622), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(0.0)', 'max': '(1000.0)', 'step': '(10.0)', 'value': '(200.0)'}), '(min=0.0, max=1000.0, step=10.0, value=200.0)\n', (27577, 27622), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27678, 27734), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(0.0)', 'max': '(1000.0)', 'step': '(10.0)', 'value': '(200.0)'}), '(min=0.0, max=1000.0, step=10.0, value=200.0)\n', (27689, 27734), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27780, 27841), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(-1010.0)', 'max': '(1010.0)', 'step': '(20.0)', 'value': '(-510.0)'}), '(min=-1010.0, max=1010.0, step=20.0, value=-510.0)\n', (27791, 27841), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27886, 27946), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(-1010.0)', 'max': '(1010.0)', 'step': '(20.0)', 'value': '(510.0)'}), '(min=-1010.0, max=1010.0, step=20.0, value=510.0)\n', (27897, 27946), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((27991, 28052), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(-1010.0)', 'max': '(1010.0)', 'step': '(20.0)', 'value': '(-210.0)'}), '(min=-1010.0, max=1010.0, step=20.0, value=-210.0)\n', (28002, 28052), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((28097, 28157), 'ipywidgets.FloatSlider', 'FloatSlider', ([], {'min': '(-1010.0)', 'max': '(1010.0)', 'step': '(20.0)', 'value': '(210.0)'}), '(min=-1010.0, max=1010.0, step=20.0, value=210.0)\n', (28108, 28157), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((28206, 28301), 'ipywidgets.ToggleButtons', 'ToggleButtons', ([], {'options': "['Model', 'Potential', 'E', 'J', 'Charge', 'Sensitivity']", 'value': '"""J"""'}), "(options=['Model', 'Potential', 'E', 'J', 'Charge',\n 'Sensitivity'], value='J')\n", (28219, 28301), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((28328, 28392), 'ipywidgets.ToggleButtons', 'ToggleButtons', ([], {'options': "['air', 'overburden']", 'value': '"""overburden"""'}), "(options=['air', 'overburden'], value='overburden')\n", (28341, 28392), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((28415, 28486), 'ipywidgets.ToggleButtons', 'ToggleButtons', ([], {'options': "['Total', 'Primary', 'Secondary']", 'value': '"""Total"""'}), "(options=['Total', 'Primary', 'Secondary'], value='Total')\n", (28428, 28486), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((28510, 28563), 'ipywidgets.ToggleButtons', 'ToggleButtons', ([], {'options': "['Linear', 'Log']", 'value': '"""Log"""'}), "(options=['Linear', 'Log'], value='Log')\n", (28523, 28563), False, 'from ipywidgets import interact, interact_manual, IntSlider, FloatSlider, FloatText, ToggleButtons, fixed, Widget\n'), ((6617, 6642), 'numpy.vstack', 'np.vstack', (['[xLoc1, zLoc1]'], {}), '([xLoc1, zLoc1])\n', (6626, 6642), True, 'import numpy as np\n'), ((6646, 6671), 'numpy.vstack', 'np.vstack', (['[xLoc2, zLoc2]'], {}), '([xLoc2, zLoc2])\n', (6655, 6671), True, 'import numpy as np\n'), ((7959, 7984), 'numpy.vstack', 'np.vstack', (['[xLoc1, zLoc1]'], {}), '([xLoc1, zLoc1])\n', (7968, 7984), True, 'import numpy as np\n'), ((7988, 8013), 'numpy.vstack', 'np.vstack', (['[xLoc2, zLoc2]'], {}), '([xLoc2, zLoc2])\n', (7997, 8013), True, 'import numpy as np\n'), ((8407, 8435), 'SimPEG.EM.Static.DC.Rx.Pole_ky', 'DC.Rx.Pole_ky', (['np.r_[M, 0.0]'], {}), '(np.r_[M, 0.0])\n', (8420, 8435), False, 'from SimPEG.EM.Static import DC\n'), ((8449, 8498), 'SimPEG.EM.Static.DC.Src.Dipole', 'DC.Src.Dipole', (['[rx]', 'np.r_[A, 0.0]', 'np.r_[B, 0.0]'], {}), '([rx], np.r_[A, 0.0], np.r_[B, 0.0])\n', (8462, 8498), False, 'from SimPEG.EM.Static import DC\n'), ((13930, 13957), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {}), '()\n', (13955, 13957), False, 'import matplotlib\n'), ((14773, 14786), 'numpy.log', 'np.log', (['(1e-08)'], {}), '(1e-08)\n', (14779, 14786), True, 'import numpy as np\n'), ((8543, 8571), 'SimPEG.EM.Static.DC.Rx.Pole_ky', 'DC.Rx.Pole_ky', (['np.r_[M, 0.0]'], {}), '(np.r_[M, 0.0])\n', (8556, 8571), False, 'from SimPEG.EM.Static import DC\n'), ((8585, 8617), 'SimPEG.EM.Static.DC.Src.Pole', 'DC.Src.Pole', (['[rx]', 'np.r_[A, 0.0]'], {}), '([rx], np.r_[A, 0.0])\n', (8596, 8617), False, 'from SimPEG.EM.Static import DC\n'), ((15103, 15166), 'matplotlib.colors.SymLogNorm', 'matplotlib.colors.SymLogNorm', ([], {'linthresh': 'linthresh', 'linscale': '(0.2)'}), '(linthresh=linthresh, linscale=0.2)\n', (15131, 15166), False, 'import matplotlib\n'), ((25766, 25792), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', '(5)'], {}), '(vmin, vmax, 5)\n', (25777, 25792), True, 'import numpy as np\n'), ((7441, 7457), 'numpy.repeat', 'np.repeat', (['xc', '(4)'], {}), '(xc, 4)\n', (7450, 7457), True, 'import numpy as np\n'), ((7475, 7491), 'numpy.repeat', 'np.repeat', (['zc', '(4)'], {}), '(zc, 4)\n', (7484, 7491), True, 'import numpy as np\n'), ((9087, 9100), 'numpy.abs', 'np.abs', (['(N - B)'], {}), '(N - B)\n', (9093, 9100), True, 'import numpy as np\n'), ((16253, 16280), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {}), '()\n', (16278, 16280), False, 'import matplotlib\n'), ((24881, 24895), 'numpy.log10', 'np.log10', (['vmin'], {}), '(vmin)\n', (24889, 24895), True, 'import numpy as np\n'), ((24897, 24911), 'numpy.log10', 'np.log10', (['vmax'], {}), '(vmax)\n', (24905, 24911), True, 'import numpy as np\n'), ((9064, 9077), 'numpy.abs', 'np.abs', (['(N - A)'], {}), '(N - A)\n', (9070, 9077), True, 'import numpy as np\n'), ((9197, 9210), 'numpy.abs', 'np.abs', (['(A - M)'], {}), '(A - M)\n', (9203, 9210), True, 'import numpy as np\n'), ((9220, 9233), 'numpy.abs', 'np.abs', (['(N - A)'], {}), '(N - A)\n', (9226, 9233), True, 'import numpy as np\n'), ((14504, 14567), 'matplotlib.colors.SymLogNorm', 'matplotlib.colors.SymLogNorm', ([], {'linthresh': 'linthresh', 'linscale': '(0.2)'}), '(linthresh=linthresh, linscale=0.2)\n', (14532, 14567), False, 'import matplotlib\n'), ((16928, 16955), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {}), '()\n', (16953, 16955), False, 'import matplotlib\n'), ((25633, 25654), 'numpy.minimum', 'np.minimum', (['(0.0)', 'vmin'], {}), '(0.0, vmin)\n', (25643, 25654), True, 'import numpy as np\n'), ((25655, 25676), 'numpy.maximum', 'np.maximum', (['(0.0)', 'vmax'], {}), '(0.0, vmax)\n', (25665, 25676), True, 'import numpy as np\n'), ((3085, 3118), 'numpy.max', 'np.max', (['mesh.gridCC[ind[:, i], 1]'], {}), '(mesh.gridCC[ind[:, i], 1])\n', (3091, 3118), True, 'import numpy as np\n'), ((9018, 9031), 'numpy.abs', 'np.abs', (['(A - M)'], {}), '(A - M)\n', (9024, 9031), True, 'import numpy as np\n'), ((9041, 9054), 'numpy.abs', 'np.abs', (['(M - B)'], {}), '(M - B)\n', (9047, 9054), True, 'import numpy as np\n'), ((9330, 9343), 'numpy.abs', 'np.abs', (['(A - M)'], {}), '(A - M)\n', (9336, 9343), True, 'import numpy as np\n'), ((9353, 9366), 'numpy.abs', 'np.abs', (['(M - B)'], {}), '(M - B)\n', (9359, 9366), True, 'import numpy as np\n'), ((9458, 9471), 'numpy.abs', 'np.abs', (['(A - M)'], {}), '(A - M)\n', (9464, 9471), True, 'import numpy as np\n'), ((17631, 17694), 'matplotlib.colors.SymLogNorm', 'matplotlib.colors.SymLogNorm', ([], {'linthresh': 'linthresh', 'linscale': '(0.2)'}), '(linthresh=linthresh, linscale=0.2)\n', (17659, 17694), False, 'import matplotlib\n'), ((25226, 25240), 'numpy.log10', 'np.log10', (['vmin'], {}), '(vmin)\n', (25234, 25240), True, 'import numpy as np\n'), ((25242, 25256), 'numpy.log10', 'np.log10', (['vmax'], {}), '(vmax)\n', (25250, 25256), True, 'import numpy as np\n'), ((18430, 18493), 'matplotlib.colors.SymLogNorm', 'matplotlib.colors.SymLogNorm', ([], {'linthresh': 'linthresh', 'linscale': '(0.2)'}), '(linthresh=linthresh, linscale=0.2)\n', (18458, 18493), False, 'import matplotlib\n'), ((25073, 25094), 'numpy.minimum', 'np.minimum', (['(0.0)', 'vmin'], {}), '(0.0, vmin)\n', (25083, 25094), True, 'import numpy as np\n'), ((25095, 25116), 'numpy.maximum', 'np.maximum', (['(0.0)', 'vmax'], {}), '(0.0, vmax)\n', (25105, 25116), True, 'import numpy as np\n'), ((25440, 25459), 'numpy.log10', 'np.log10', (['linthresh'], {}), '(linthresh)\n', (25448, 25459), True, 'import numpy as np\n'), ((25461, 25475), 'numpy.log10', 'np.log10', (['vmax'], {}), '(vmax)\n', (25469, 25475), True, 'import numpy as np\n'), ((25373, 25394), 'numpy.log10', 'np.log10', (['(-vmin - eps)'], {}), '(-vmin - eps)\n', (25381, 25394), True, 'import numpy as np\n'), ((25394, 25413), 'numpy.log10', 'np.log10', (['linthresh'], {}), '(linthresh)\n', (25402, 25413), True, 'import numpy as np\n')] |
import os
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
def get_stereo_image_generators(train_folder, img_rows=256, img_cols=832, batch_size=16, shuffle=True):
train_imagegen = ImageDataGenerator(rescale=1.0 / 255.0,
rotation_range=5,
shear_range=0.01,
zoom_range=0.01,
height_shift_range=0.01,
width_shift_range=0.01)
train_generator_left = train_imagegen.flow_from_directory(train_folder,
target_size=(img_rows, img_cols),
batch_size=batch_size,
seed=10,
shuffle=shuffle,
classes=['image_2'],
class_mode=None)
train_generator_right = train_imagegen.flow_from_directory(train_folder,
target_size=(img_rows, img_cols),
batch_size=batch_size,
seed=10,
shuffle=shuffle,
classes=['image_3'],
class_mode=None)
def train_generator_func():
while True:
left_image = train_generator_left.next()
right_image = train_generator_right.next()
output = np.concatenate((left_image, right_image), axis=2)
yield output, [output, np.zeros(shape=(output.shape[0], img_rows - 4, img_cols - 4)),
np.zeros(shape=(output.shape[0], img_rows - 4, img_cols - 4))]
train_generator = train_generator_func()
return train_generator
| [
"keras.preprocessing.image.ImageDataGenerator",
"numpy.zeros",
"numpy.concatenate"
] | [((213, 358), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255.0)', 'rotation_range': '(5)', 'shear_range': '(0.01)', 'zoom_range': '(0.01)', 'height_shift_range': '(0.01)', 'width_shift_range': '(0.01)'}), '(rescale=1.0 / 255.0, rotation_range=5, shear_range=0.01,\n zoom_range=0.01, height_shift_range=0.01, width_shift_range=0.01)\n', (231, 358), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1885, 1934), 'numpy.concatenate', 'np.concatenate', (['(left_image, right_image)'], {'axis': '(2)'}), '((left_image, right_image), axis=2)\n', (1899, 1934), True, 'import numpy as np\n'), ((1971, 2032), 'numpy.zeros', 'np.zeros', ([], {'shape': '(output.shape[0], img_rows - 4, img_cols - 4)'}), '(shape=(output.shape[0], img_rows - 4, img_cols - 4))\n', (1979, 2032), True, 'import numpy as np\n'), ((2065, 2126), 'numpy.zeros', 'np.zeros', ([], {'shape': '(output.shape[0], img_rows - 4, img_cols - 4)'}), '(shape=(output.shape[0], img_rows - 4, img_cols - 4))\n', (2073, 2126), True, 'import numpy as np\n')] |
import itertools
import numpy as np
from scipy.stats import entropy
from scipy.sparse import csc_matrix
from scipy.special import logsumexp, digamma, betaln
from .vireo_base import normalize, loglik_amplify, beta_entropy
from .vireo_base import get_binom_coeff, logbincoeff
__docformat__ = "restructuredtext en"
class Vireo():
"""Viroe model: Variational Inference for reconstruction of ensemble origin
The prior can be set via set_prior() before fitting the model.
Key properties
--------------
beta_mu: numpy array (1, n_GT) or (n_var, n_GT)
Beta mean parameter of theta's posterior
beta_sum: numpy array (1, n_GT) or (n_var, n_GT), same as beta_mu
Beta concetration parameter of theta's posterior
ID_prob: numpy array (n_cell, n_donor)
Posterior cell assignment probability to each donor
GT_prob: numpy array (n_var, n_donor, n_GT)
Posterior genotype probability per variant per donor
"""
def __init__(self, n_cell, n_var, n_donor, n_GT=3, learn_GT=True,
learn_theta=True, ASE_mode=False, fix_beta_sum=False,
beta_mu_init=None, beta_sum_init=None, ID_prob_init=None,
GT_prob_init=None):
"""Initialise Vireo model
Note, multiple initializations are highly recomended to avoid local
optima.
Parameters
----------
n_cell : int.
Number of cells
n_var : int.
Number of variants
n_donor : int.
Number of donors
n_GT : int.
Number of genotype categories
learn_GT: bool.
Whether updating `GT_prob`; otherwise using the initial
ASE_mode: bool.
Whether setting allelic ratio `theta` to be variant specific
fix_beta_sum: bool.
Whether fixing the concetration parameter of theta's posterior
beta_mu_init: numpy array (1, n_GT) or (n_var, n_GT)
Initial value of beta_mu, the mean parameter of theta
beta_sum_init: numpy array (1, n_GT) or (n_var, n_GT), same as beta_mu
Initial value of beta_sum, the concetration parameter of theta
ID_prob_init: numpy array (n_cell, n_donor)
Initial value of ID_prob, cell assignment probability to each donor
GT_prob_init: numpy array (n_var, n_donor, n_GT)
Initial value of GT_prob, genotype probability per variant and donor
"""
self.n_GT = n_GT
self.n_var = n_var
self.n_cell = n_cell
self.n_donor = n_donor
self.learn_GT = learn_GT
self.ASE_mode = ASE_mode
self.learn_theta = learn_theta
self.fix_beta_sum = fix_beta_sum
self.ELBO_ = np.zeros((0))
# initial key parameters
self.set_initial(beta_mu_init, beta_sum_init, ID_prob_init, GT_prob_init)
# set hyper parameters for prior
self.set_prior()
def set_initial(self, beta_mu_init=None, beta_sum_init=None,
ID_prob_init=None, GT_prob_init=None):
"""Set initial values
"""
theta_len = self.n_var if self.ASE_mode else 1
if beta_mu_init is not None:
self.beta_mu = beta_mu_init
else:
self.beta_mu = (np.ones((theta_len, self.n_GT)) *
np.linspace(0.01, 0.99, self.n_GT).reshape(1, -1))
if beta_sum_init is not None:
self.beta_sum = beta_sum_init
else:
self.beta_sum = np.ones((theta_len, self.n_GT)) * 50
if ID_prob_init is not None:
self.ID_prob = normalize(ID_prob_init, axis=1)
else:
self.ID_prob = normalize(np.random.rand(self.n_cell, self.n_donor))
if GT_prob_init is not None:
self.GT_prob = normalize(GT_prob_init)
else:
_GT_val = np.random.rand(self.n_var, self.n_donor, self.n_GT)
self.GT_prob = normalize(_GT_val)
def set_prior(self, GT_prior=None, ID_prior=None, beta_mu_prior=None,
beta_sum_prior=None, min_GP=0.00001):
"""Set prior for key variables: theta, GT_prob and ID_prob.
The priors are in the same shape as its according variables.
min_GP: float. Minimun genotype probability in GT_prior.
"""
if beta_mu_prior is None:
beta_mu_prior = np.expand_dims(
np.linspace(0.01, 0.99, self.beta_mu.shape[1]), axis=0)
if beta_sum_prior is None:
beta_sum_prior = np.ones(beta_mu_prior.shape) * 50.0
self.theta_s1_prior = beta_mu_prior * beta_sum_prior
self.theta_s2_prior = (1 - beta_mu_prior) * beta_sum_prior
if ID_prior is not None:
if len(ID_prior.shape) == 1:
ID_prior = np.expand_dims(ID_prior, axis=0)
self.ID_prior = ID_prior
else:
self.ID_prior = normalize(np.ones(self.ID_prob.shape))
if GT_prior is not None:
if len(GT_prior.shape) == 2:
GT_prior = np.expand_dims(GT_prior, axis=0)
GT_prior[GT_prior < min_GP] = min_GP
GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP
GT_prior = normalize(GT_prior)
self.GT_prior = GT_prior
else:
self.GT_prior = normalize(np.ones(self.GT_prob.shape))
@property
def theta_s1(self):
"""Beta concetration1 parameter for theta posterior"""
return self.beta_mu * self.beta_sum
@property
def theta_s2(self):
"""Beta concetration2 parameter for theta posterior"""
return (1 - self.beta_mu) * self.beta_sum
@property
def digamma1_(self):
"""Digamma of Beta concetration1 parameter"""
return np.expand_dims(digamma(self.theta_s1), 1)
@property
def digamma2_(self):
"""Digamma of Beta concetration2 parameter"""
return np.expand_dims(digamma(self.theta_s2), 1)
@property
def digammas_(self):
"""Digamma of Beta concetration summary parameter"""
return np.expand_dims(digamma(self.theta_s1 + self.theta_s2), 1)
def update_theta_size(self, AD, DP):
"""Coordinate ascent for updating theta posterior parameters
"""
BD = DP - AD
S1_gt = AD @ self.ID_prob #(n_var, n_donor)
S2_gt = BD @ self.ID_prob #(n_var, n_donor)
_theta_s1 = np.zeros(self.beta_mu.shape)
_theta_s2 = np.zeros(self.beta_mu.shape)
_theta_s1 += self.theta_s1_prior.copy()
_theta_s2 += self.theta_s2_prior.copy()
for ig in range(self.n_GT):
_axis = 1 if self.ASE_mode else None
_theta_s1[:, ig:(ig+1)] += np.sum(
S1_gt * self.GT_prob[:, :, ig], axis=_axis, keepdims=True)
_theta_s2[:, ig:(ig+1)] += np.sum(
S2_gt * self.GT_prob[:, :, ig], axis=_axis, keepdims=True)
self.beta_mu = _theta_s1 / (_theta_s1 + _theta_s2)
if self.fix_beta_sum == False:
self.beta_sum = _theta_s1 + _theta_s2
def update_ID_prob(self, AD, DP):
"""Coordinate ascent for updating assignment probability
"""
BD = DP - AD
logLik_ID = np.zeros((AD.shape[1], self.n_donor))
for ig in range(self.n_GT):
S1 = AD.T @ (self.GT_prob[:, :, ig] * self.digamma1_[:, :, ig])
S2 = BD.T @ (self.GT_prob[:, :, ig] * self.digamma2_[:, :, ig])
SS = DP.T @ (self.GT_prob[:, :, ig] * self.digammas_[:, :, ig])
logLik_ID += (S1 + S2 - SS)
self.ID_prob = normalize(np.exp(loglik_amplify(
logLik_ID + np.log(self.ID_prior))))
return logLik_ID
def update_GT_prob(self, AD, DP):
"""Coordinate ascent for updating genotype probability
"""
S1_gt = AD @ self.ID_prob
SS_gt = DP @ self.ID_prob
S2_gt = SS_gt - S1_gt
logLik_GT = np.zeros(self.GT_prior.shape)
for ig in range(self.n_GT):
logLik_GT[:, :, ig] = (
S1_gt * self.digamma1_[:, :, ig] +
S2_gt * self.digamma2_[:, :, ig] -
SS_gt * self.digammas_[:, :, ig])
self.GT_prob = normalize(np.exp(loglik_amplify(
logLik_GT + np.log(self.GT_prior))))
def get_ELBO(self, logLik_ID, AD=None, DP=None):
"""Calculating variational evidence lower bound with current parameters
logLik_ID: numpy array (n_cell, n_donor), the output from update_ID_prob
"""
if logLik_ID is None:
BD = DP - AD
logLik_ID = np.zeros((AD.shape[1], self.n_donor))
for ig in range(self.n_GT):
S1 = AD.T @ (self.GT_prob[:, :, ig] * self.digamma1_[:, :, ig])
S2 = BD.T @ (self.GT_prob[:, :, ig] * self.digamma2_[:, :, ig])
SS = DP.T @ (self.GT_prob[:, :, ig] * self.digammas_[:, :, ig])
logLik_ID += (S1 + S2 - SS)
LB_p = np.sum(logLik_ID * self.ID_prob)
KL_ID = np.sum(entropy(self.ID_prob, self.ID_prior, axis=-1))
KL_GT = np.sum(entropy(self.GT_prob, self.GT_prior, axis=-1))
KL_theta = beta_entropy(
np.append(
np.expand_dims(self.theta_s1, 1),
np.expand_dims(self.theta_s2, 1), axis = 1),
np.append(
np.expand_dims(self.theta_s1_prior, 1),
np.expand_dims(self.theta_s2_prior, 1), axis = 1))
# print(LB_p, KL_ID, KL_GT, KL_theta)
return LB_p - KL_ID - KL_GT - KL_theta
def _fit_VB(self, AD, DP, max_iter=200, min_iter=5, epsilon_conv=1e-2,
delay_fit_theta=0, verbose=True):
"""Fit Vireo model with coordinate ascent
"""
ELBO = np.zeros(max_iter)
numerical_minimal = 1e-6
for it in range(max_iter):
if self.learn_theta and it >= delay_fit_theta:
self.update_theta_size(AD, DP)
if self.learn_GT:
self.update_GT_prob(AD, DP)
_logLik_ID = self.update_ID_prob(AD, DP)
ELBO[it] = self.get_ELBO(_logLik_ID) #+ _binom_coeff_log
if it > min_iter:
if ELBO[it] < ELBO[it - 1] - numerical_minimal:
if verbose:
print("Warning: Lower bound decreases!\n")
elif it == max_iter - 1:
if verbose:
print("Warning: VB did not converge!\n")
elif ELBO[it] - ELBO[it - 1] < epsilon_conv:
break
return ELBO[:it]
def fit(self, AD, DP, max_iter=200, min_iter=5, epsilon_conv=1e-2,
delay_fit_theta=0, verbose=True, n_inits=50, nproc=1):
"""Fit Vireo model with coordinate ascent
Parameters
----------
AD : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for alternative allele
DP : scipy.sparse.csc_matrix (n_var, n_cell)
Sparse count matrix for depths, alternative + refeerence alleles
max_iter : int
Maximum number of iterations
min_iter :
Minimum number of iterations
epsilon_conv : float
Threshold for detecting convergence
delay_fit_theta : int
Number of steps to delay updating theta. This can be very useful
for common genetics when there is good prior on allelic ratio.
verbose : bool
Whether print out log info
"""
if type(DP) is np.ndarray and np.mean(DP > 0) < 0.3:
print("Warning: input matrices is %.1f%% sparse, "
%(100 - np.mean(DP > 0) * 100) +
"change to scipy.sparse.csc_matrix" )
AD = csc_matrix(AD)
DP = csc_matrix(DP)
ELBO = self._fit_VB(AD, DP, max_iter, min_iter, epsilon_conv,
delay_fit_theta, verbose)
# _binom_coeff_log = np.sum(logbincoeff(DP, AD, is_sparse=True))
# _binom_coeff_log = np.sum(get_binom_coeff(AD, DP))
ELBO += np.sum(get_binom_coeff(AD, DP))
self.ELBO_ = np.append(self.ELBO_, ELBO)
| [
"numpy.sum",
"numpy.log",
"scipy.stats.entropy",
"numpy.zeros",
"numpy.ones",
"numpy.expand_dims",
"numpy.append",
"scipy.special.digamma",
"scipy.sparse.csc_matrix",
"numpy.mean",
"numpy.linspace",
"numpy.random.rand"
] | [((2735, 2746), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (2743, 2746), True, 'import numpy as np\n'), ((6403, 6431), 'numpy.zeros', 'np.zeros', (['self.beta_mu.shape'], {}), '(self.beta_mu.shape)\n', (6411, 6431), True, 'import numpy as np\n'), ((6452, 6480), 'numpy.zeros', 'np.zeros', (['self.beta_mu.shape'], {}), '(self.beta_mu.shape)\n', (6460, 6480), True, 'import numpy as np\n'), ((7220, 7257), 'numpy.zeros', 'np.zeros', (['(AD.shape[1], self.n_donor)'], {}), '((AD.shape[1], self.n_donor))\n', (7228, 7257), True, 'import numpy as np\n'), ((7968, 7997), 'numpy.zeros', 'np.zeros', (['self.GT_prior.shape'], {}), '(self.GT_prior.shape)\n', (7976, 7997), True, 'import numpy as np\n'), ((9032, 9064), 'numpy.sum', 'np.sum', (['(logLik_ID * self.ID_prob)'], {}), '(logLik_ID * self.ID_prob)\n', (9038, 9064), True, 'import numpy as np\n'), ((9818, 9836), 'numpy.zeros', 'np.zeros', (['max_iter'], {}), '(max_iter)\n', (9826, 9836), True, 'import numpy as np\n'), ((12229, 12256), 'numpy.append', 'np.append', (['self.ELBO_', 'ELBO'], {}), '(self.ELBO_, ELBO)\n', (12238, 12256), True, 'import numpy as np\n'), ((3865, 3916), 'numpy.random.rand', 'np.random.rand', (['self.n_var', 'self.n_donor', 'self.n_GT'], {}), '(self.n_var, self.n_donor, self.n_GT)\n', (3879, 3916), True, 'import numpy as np\n'), ((5769, 5791), 'scipy.special.digamma', 'digamma', (['self.theta_s1'], {}), '(self.theta_s1)\n', (5776, 5791), False, 'from scipy.special import logsumexp, digamma, betaln\n'), ((5921, 5943), 'scipy.special.digamma', 'digamma', (['self.theta_s2'], {}), '(self.theta_s2)\n', (5928, 5943), False, 'from scipy.special import logsumexp, digamma, betaln\n'), ((6080, 6118), 'scipy.special.digamma', 'digamma', (['(self.theta_s1 + self.theta_s2)'], {}), '(self.theta_s1 + self.theta_s2)\n', (6087, 6118), False, 'from scipy.special import logsumexp, digamma, betaln\n'), ((6701, 6766), 'numpy.sum', 'np.sum', (['(S1_gt * self.GT_prob[:, :, ig])'], {'axis': '_axis', 'keepdims': '(True)'}), '(S1_gt * self.GT_prob[:, :, ig], axis=_axis, keepdims=True)\n', (6707, 6766), True, 'import numpy as np\n'), ((6823, 6888), 'numpy.sum', 'np.sum', (['(S2_gt * self.GT_prob[:, :, ig])'], {'axis': '_axis', 'keepdims': '(True)'}), '(S2_gt * self.GT_prob[:, :, ig], axis=_axis, keepdims=True)\n', (6829, 6888), True, 'import numpy as np\n'), ((8654, 8691), 'numpy.zeros', 'np.zeros', (['(AD.shape[1], self.n_donor)'], {}), '((AD.shape[1], self.n_donor))\n', (8662, 8691), True, 'import numpy as np\n'), ((9088, 9133), 'scipy.stats.entropy', 'entropy', (['self.ID_prob', 'self.ID_prior'], {'axis': '(-1)'}), '(self.ID_prob, self.ID_prior, axis=-1)\n', (9095, 9133), False, 'from scipy.stats import entropy\n'), ((9158, 9203), 'scipy.stats.entropy', 'entropy', (['self.GT_prob', 'self.GT_prior'], {'axis': '(-1)'}), '(self.GT_prob, self.GT_prior, axis=-1)\n', (9165, 9203), False, 'from scipy.stats import entropy\n'), ((11842, 11856), 'scipy.sparse.csc_matrix', 'csc_matrix', (['AD'], {}), '(AD)\n', (11852, 11856), False, 'from scipy.sparse import csc_matrix\n'), ((11874, 11888), 'scipy.sparse.csc_matrix', 'csc_matrix', (['DP'], {}), '(DP)\n', (11884, 11888), False, 'from scipy.sparse import csc_matrix\n'), ((3279, 3310), 'numpy.ones', 'np.ones', (['(theta_len, self.n_GT)'], {}), '((theta_len, self.n_GT))\n', (3286, 3310), True, 'import numpy as np\n'), ((3504, 3535), 'numpy.ones', 'np.ones', (['(theta_len, self.n_GT)'], {}), '((theta_len, self.n_GT))\n', (3511, 3535), True, 'import numpy as np\n'), ((3689, 3730), 'numpy.random.rand', 'np.random.rand', (['self.n_cell', 'self.n_donor'], {}), '(self.n_cell, self.n_donor)\n', (3703, 3730), True, 'import numpy as np\n'), ((4399, 4445), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', 'self.beta_mu.shape[1]'], {}), '(0.01, 0.99, self.beta_mu.shape[1])\n', (4410, 4445), True, 'import numpy as np\n'), ((4519, 4547), 'numpy.ones', 'np.ones', (['beta_mu_prior.shape'], {}), '(beta_mu_prior.shape)\n', (4526, 4547), True, 'import numpy as np\n'), ((4785, 4817), 'numpy.expand_dims', 'np.expand_dims', (['ID_prior'], {'axis': '(0)'}), '(ID_prior, axis=0)\n', (4799, 4817), True, 'import numpy as np\n'), ((4907, 4934), 'numpy.ones', 'np.ones', (['self.ID_prob.shape'], {}), '(self.ID_prob.shape)\n', (4914, 4934), True, 'import numpy as np\n'), ((5038, 5070), 'numpy.expand_dims', 'np.expand_dims', (['GT_prior'], {'axis': '(0)'}), '(GT_prior, axis=0)\n', (5052, 5070), True, 'import numpy as np\n'), ((5317, 5344), 'numpy.ones', 'np.ones', (['self.GT_prob.shape'], {}), '(self.GT_prob.shape)\n', (5324, 5344), True, 'import numpy as np\n'), ((9277, 9309), 'numpy.expand_dims', 'np.expand_dims', (['self.theta_s1', '(1)'], {}), '(self.theta_s1, 1)\n', (9291, 9309), True, 'import numpy as np\n'), ((9328, 9360), 'numpy.expand_dims', 'np.expand_dims', (['self.theta_s2', '(1)'], {}), '(self.theta_s2, 1)\n', (9342, 9360), True, 'import numpy as np\n'), ((9412, 9450), 'numpy.expand_dims', 'np.expand_dims', (['self.theta_s1_prior', '(1)'], {}), '(self.theta_s1_prior, 1)\n', (9426, 9450), True, 'import numpy as np\n'), ((9469, 9507), 'numpy.expand_dims', 'np.expand_dims', (['self.theta_s2_prior', '(1)'], {}), '(self.theta_s2_prior, 1)\n', (9483, 9507), True, 'import numpy as np\n'), ((11631, 11646), 'numpy.mean', 'np.mean', (['(DP > 0)'], {}), '(DP > 0)\n', (11638, 11646), True, 'import numpy as np\n'), ((3330, 3364), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', 'self.n_GT'], {}), '(0.01, 0.99, self.n_GT)\n', (3341, 3364), True, 'import numpy as np\n'), ((7651, 7672), 'numpy.log', 'np.log', (['self.ID_prior'], {}), '(self.ID_prior)\n', (7657, 7672), True, 'import numpy as np\n'), ((8305, 8326), 'numpy.log', 'np.log', (['self.GT_prior'], {}), '(self.GT_prior)\n', (8311, 8326), True, 'import numpy as np\n'), ((11744, 11759), 'numpy.mean', 'np.mean', (['(DP > 0)'], {}), '(DP > 0)\n', (11751, 11759), True, 'import numpy as np\n')] |
import h5py
import numpy as np
import silx.math.fit
import silx.math.fit.peaks
# fileRead = '/home/esrf/slim/data/ihme10/id15/TiC_Calib/ihme10_TiC_calib.h5'
# filesave = '/home/esrf/slim/easistrain/easistrain/EDD/Results_ihme10_TiC_calib.h5'
# sample = 'TiC_calib'
# dataset = '0001'
# scanNumber = '4'
# horizontalDetector = 'mca2_det0'
# verticalDetector = 'mca2_det1'
# numberOfPeaks = 1
# rangeFit = [680,820]
# doublet = [1]
def splitPseudoVoigt(xData, *params):
return silx.math.fit.sum_splitpvoigt(xData, *params)
def guessParameters(yData, counterOfPeak, doublet):
fwhmGuess = silx.math.fit.peaks.guess_fwhm(yData)
peaksGuess = silx.math.fit.peaks.peak_search(
yData,
fwhmGuess,
sensitivity=1,
begin_index=None,
end_index=None,
debug=False,
relevance_info=False,
) ## index of the peak
if np.size(peaksGuess) > doublet[counterOfPeak]:
# print(peaksGuess[np.argsort(yData[peaksGuess[:].astype(int)])])
peaksGuess = peaksGuess[np.argsort(yData[peaksGuess[:].astype(int)])][
-doublet[counterOfPeak] :
]
print(peaksGuess)
return fwhmGuess, peaksGuess
def angleCalibrationEDD(
fileRead,
fileSave,
sample,
dataset,
scanNumber,
horizontalDetector,
verticalDetector,
numberOfPeaks,
doublet,
rangeFit,
):
with h5py.File(fileRead, "r") as h5Read: ## Read the h5 file of raw data
patternHorizontalDetector = h5Read[
sample
+ "_"
+ str(dataset)
+ "_"
+ str(scanNumber)
+ ".1/measurement/"
+ horizontalDetector
][
()
] ## pattern of horizontal detector
patternVerticalDetector = h5Read[
sample
+ "_"
+ str(dataset)
+ "_"
+ str(scanNumber)
+ ".1/measurement/"
+ verticalDetector
][
()
] ## pattern of vertical detector
h5Save = h5py.File(fileSave, "a") ## create/append h5 file to save in
if not "angleCalibration" in h5Save.keys():
angleCalibrationLevel1 = h5Save.create_group(
"angleCalibration"
) ## angleCalibration group
else:
angleCalibrationLevel1 = h5Save["angleCalibration"]
rawDataLevel1_1 = angleCalibrationLevel1.create_group(
"rawData" + "_" + str(dataset) + "_" + str(scanNumber)
) ## rawData subgroup in calibration group
fitLevel1_2 = angleCalibrationLevel1.create_group(
"fit" + "_" + str(dataset) + "_" + str(scanNumber)
) ## fit subgroup in calibration group
fitLevel1_2.create_group("fitParams") ## fit results group for the two detector
fitParamsHD = np.array(())
fitParamsVD = np.array(())
uncertaintyFitParamsHD = np.array(())
uncertaintyFitParamsVD = np.array(())
for i in range(numberOfPeaks):
peakHorizontalDetector = np.transpose(
(
np.arange(rangeFit[2 * i], rangeFit[(2 * i) + 1]),
patternHorizontalDetector[rangeFit[2 * i] : rangeFit[(2 * i) + 1]],
)
) ## peak of the horizontal detector
peakVerticalDetector = np.transpose(
(
np.arange(rangeFit[2 * i], rangeFit[(2 * i) + 1]),
patternVerticalDetector[rangeFit[2 * i] : rangeFit[(2 * i) + 1]],
)
) ## peak of the vertical detector
backgroundHorizontalDetector = silx.math.fit.strip(
data=peakHorizontalDetector[:, 1],
w=5,
niterations=4000,
factor=1,
anchors=None,
) ## background of the horizontal detector
backgroundVerticalDetector = silx.math.fit.strip(
data=peakVerticalDetector[:, 1],
w=5,
niterations=4000,
factor=1,
anchors=None,
) ## background of the vertical detector
fitLevel1_2.create_group(
f"fitLine_{str(i)}"
) ## create group for each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"rawHorizontalDetector", dtype="f", data=peakHorizontalDetector
) ## create dataset for raw data of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"rawVerticalDetector", dtype="f", data=peakVerticalDetector
) ## create dataset for raw data of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"backgroundHorizontalDetector",
dtype="f",
data=np.transpose(
(peakHorizontalDetector[:, 0], backgroundHorizontalDetector)
),
) ## create dataset for background of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"backgroundVerticalDetector",
dtype="f",
data=np.transpose((peakVerticalDetector[:, 0], backgroundVerticalDetector)),
) ## create dataset for background of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"bgdSubsDataHorizontalDetector",
dtype="f",
data=np.transpose(
(
peakHorizontalDetector[:, 0],
peakHorizontalDetector[:, 1] - backgroundHorizontalDetector,
)
),
) ## create dataset for HD raw data after subst of background
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"bgdSubsDataVerticalDetector",
dtype="f",
data=np.transpose(
(
peakVerticalDetector[:, 0],
peakVerticalDetector[:, 1] - backgroundVerticalDetector,
)
),
) ## create dataset for VD raw data after subst of background
fwhmGuessHD, peaksGuessHD = guessParameters(
peakHorizontalDetector[:, 1] - backgroundHorizontalDetector, i, doublet
) ## guess fit parameters for HD
fwhmGuessVD, peaksGuessVD = guessParameters(
peakVerticalDetector[:, 1] - backgroundVerticalDetector, i, doublet
) ## guess fit parameters for VD
initialGuessHD = np.zeros(5 * doublet[i])
initialGuessVD = np.zeros(5 * doublet[i])
for n in range(doublet[i]):
initialGuessHD[5 * n] = peakHorizontalDetector[:, 1][int(peaksGuessHD[n])]
-backgroundHorizontalDetector[int(peaksGuessHD[n])]
initialGuessHD[5 * n + 1] = peakHorizontalDetector[:, 0][
int(peaksGuessHD[n])
]
initialGuessHD[5 * n + 2] = fwhmGuessHD
initialGuessHD[5 * n + 3] = fwhmGuessHD
initialGuessHD[5 * n + 4] = 0.5
initialGuessVD[5 * n] = peakVerticalDetector[:, 1][int(peaksGuessVD[n])]
-backgroundVerticalDetector[int(peaksGuessVD[n])]
initialGuessVD[5 * n + 1] = peakVerticalDetector[:, 0][int(peaksGuessVD[n])]
initialGuessVD[5 * n + 2] = fwhmGuessVD
initialGuessVD[5 * n + 3] = fwhmGuessVD
initialGuessVD[5 * n + 4] = 0.5
optimal_parametersHD, covarianceHD, infodictHD = silx.math.fit.leastsq(
model=splitPseudoVoigt,
xdata=peakHorizontalDetector[:, 0],
ydata=peakHorizontalDetector[:, 1] - backgroundHorizontalDetector,
p0=initialGuessHD,
sigma=np.sqrt(
np.abs(peakHorizontalDetector[:, 1] - backgroundHorizontalDetector) + 1
),
full_output=True,
max_iter=1000,
) ## fit of the peak of the Horizontal detector
optimal_parametersVD, covarianceVD, infodictVD = silx.math.fit.leastsq(
model=splitPseudoVoigt,
xdata=peakVerticalDetector[:, 0],
ydata=peakVerticalDetector[:, 1] - backgroundVerticalDetector,
p0=initialGuessVD,
sigma=np.sqrt(
np.abs(peakVerticalDetector[:, 1] - backgroundVerticalDetector) + 1
),
full_output=True,
max_iter=1000,
) ## fit of the peak of the Vertical detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"fitHorizontalDetector",
dtype="f",
data=np.transpose(
(
peakHorizontalDetector[:, 0],
splitPseudoVoigt(peakHorizontalDetector[:, 0], optimal_parametersHD)
+ backgroundHorizontalDetector,
)
),
) ## fitted data of the horizontal detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"fitVerticalDetector",
dtype="f",
data=np.transpose(
(
peakVerticalDetector[:, 0],
splitPseudoVoigt(peakVerticalDetector[:, 0], optimal_parametersVD)
+ backgroundVerticalDetector,
)
),
) ## fitted data of the vertical detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"errorHorizontalDetector",
dtype="f",
data=np.transpose(
(
peakHorizontalDetector[:, 0],
np.absolute(
splitPseudoVoigt(
peakHorizontalDetector[:, 0], optimal_parametersHD
)
+ backgroundHorizontalDetector
- peakHorizontalDetector[:, 1]
),
)
),
) ## error of the horizontal detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"errorVerticalDetector",
dtype="f",
data=np.transpose(
(
peakVerticalDetector[:, 0],
np.absolute(
splitPseudoVoigt(
peakVerticalDetector[:, 0], optimal_parametersVD
)
+ backgroundVerticalDetector
- peakVerticalDetector[:, 1]
),
)
),
) ## error of the vertical detector
for n in range(doublet[i]):
fitParamsHD = np.append(
fitParamsHD,
np.append(
optimal_parametersHD[5 * n : 5 * n + 5],
[
infodictHD["reduced_chisq"],
100
* np.sum(
np.absolute(
splitPseudoVoigt(
peakHorizontalDetector[:, 0], optimal_parametersHD
)
+ backgroundHorizontalDetector
- peakHorizontalDetector[:, 1]
)
)
/ np.sum(peakHorizontalDetector[:, 1]),
],
),
axis=0,
) ##
fitParamsVD = np.append(
fitParamsVD,
np.append(
optimal_parametersVD[5 * n : 5 * n + 5],
[
infodictVD["reduced_chisq"],
100
* np.sum(
np.absolute(
splitPseudoVoigt(
peakVerticalDetector[:, 0], optimal_parametersVD
)
+ backgroundVerticalDetector
- peakVerticalDetector[:, 1]
)
)
/ np.sum(peakVerticalDetector[:, 1]),
],
),
axis=0,
) ##
uncertaintyFitParamsHD = np.append(
uncertaintyFitParamsHD, infodictHD["uncertainties"], axis=0
) ##
uncertaintyFitParamsVD = np.append(
uncertaintyFitParamsVD, infodictVD["uncertainties"], axis=0
) ##
rawDataLevel1_1.create_dataset(
"horizontalDetector", dtype="f", data=patternHorizontalDetector
) ## save raw data of the horizontal detector
rawDataLevel1_1.create_dataset(
"verticalDetector", dtype="f", data=patternVerticalDetector
) ## save raw data of the vertical detector
fitLevel1_2["fitParams"].create_dataset(
"fitParamsHD",
dtype="f",
data=np.reshape(fitParamsHD, (int(np.size(fitParamsHD) / 7), 7)),
) ## save parameters of the fit of HD
fitLevel1_2["fitParams"].create_dataset(
"fitParamsVD",
dtype="f",
data=np.reshape(fitParamsVD, (int(np.size(fitParamsVD) / 7), 7)),
) ## save parameters of the fit of VD
fitLevel1_2["fitParams"].create_dataset(
"uncertaintyParamsHD", dtype="f", data=uncertaintyFitParamsHD
) ## save uncertainty on the parameters of the fit of HD
fitLevel1_2["fitParams"].create_dataset(
"uncertaintyParamsVD", dtype="f", data=uncertaintyFitParamsVD
) ## save uncertainty on the parameters of the fit of VD
h5Save.close()
return
| [
"h5py.File",
"numpy.size",
"numpy.abs",
"numpy.sum",
"numpy.zeros",
"numpy.transpose",
"numpy.append",
"numpy.array",
"numpy.arange"
] | [((2043, 2067), 'h5py.File', 'h5py.File', (['fileSave', '"""a"""'], {}), "(fileSave, 'a')\n", (2052, 2067), False, 'import h5py\n'), ((2776, 2788), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (2784, 2788), True, 'import numpy as np\n'), ((2807, 2819), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (2815, 2819), True, 'import numpy as np\n'), ((2849, 2861), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (2857, 2861), True, 'import numpy as np\n'), ((2891, 2903), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (2899, 2903), True, 'import numpy as np\n'), ((880, 899), 'numpy.size', 'np.size', (['peaksGuess'], {}), '(peaksGuess)\n', (887, 899), True, 'import numpy as np\n'), ((1382, 1406), 'h5py.File', 'h5py.File', (['fileRead', '"""r"""'], {}), "(fileRead, 'r')\n", (1391, 1406), False, 'import h5py\n'), ((6279, 6303), 'numpy.zeros', 'np.zeros', (['(5 * doublet[i])'], {}), '(5 * doublet[i])\n', (6287, 6303), True, 'import numpy as np\n'), ((6329, 6353), 'numpy.zeros', 'np.zeros', (['(5 * doublet[i])'], {}), '(5 * doublet[i])\n', (6337, 6353), True, 'import numpy as np\n'), ((12035, 12105), 'numpy.append', 'np.append', (['uncertaintyFitParamsHD', "infodictHD['uncertainties']"], {'axis': '(0)'}), "(uncertaintyFitParamsHD, infodictHD['uncertainties'], axis=0)\n", (12044, 12105), True, 'import numpy as np\n'), ((12177, 12247), 'numpy.append', 'np.append', (['uncertaintyFitParamsVD', "infodictVD['uncertainties']"], {'axis': '(0)'}), "(uncertaintyFitParamsVD, infodictVD['uncertainties'], axis=0)\n", (12186, 12247), True, 'import numpy as np\n'), ((3016, 3063), 'numpy.arange', 'np.arange', (['rangeFit[2 * i]', 'rangeFit[2 * i + 1]'], {}), '(rangeFit[2 * i], rangeFit[2 * i + 1])\n', (3025, 3063), True, 'import numpy as np\n'), ((3286, 3333), 'numpy.arange', 'np.arange', (['rangeFit[2 * i]', 'rangeFit[2 * i + 1]'], {}), '(rangeFit[2 * i], rangeFit[2 * i + 1])\n', (3295, 3333), True, 'import numpy as np\n'), ((4635, 4709), 'numpy.transpose', 'np.transpose', (['(peakHorizontalDetector[:, 0], backgroundHorizontalDetector)'], {}), '((peakHorizontalDetector[:, 0], backgroundHorizontalDetector))\n', (4647, 4709), True, 'import numpy as np\n'), ((4949, 5019), 'numpy.transpose', 'np.transpose', (['(peakVerticalDetector[:, 0], backgroundVerticalDetector)'], {}), '((peakVerticalDetector[:, 0], backgroundVerticalDetector))\n', (4961, 5019), True, 'import numpy as np\n'), ((5232, 5341), 'numpy.transpose', 'np.transpose', (['(peakHorizontalDetector[:, 0], peakHorizontalDetector[:, 1] -\n backgroundHorizontalDetector)'], {}), '((peakHorizontalDetector[:, 0], peakHorizontalDetector[:, 1] -\n backgroundHorizontalDetector))\n', (5244, 5341), True, 'import numpy as np\n'), ((5639, 5742), 'numpy.transpose', 'np.transpose', (['(peakVerticalDetector[:, 0], peakVerticalDetector[:, 1] -\n backgroundVerticalDetector)'], {}), '((peakVerticalDetector[:, 0], peakVerticalDetector[:, 1] -\n backgroundVerticalDetector))\n', (5651, 5742), True, 'import numpy as np\n'), ((7511, 7578), 'numpy.abs', 'np.abs', (['(peakHorizontalDetector[:, 1] - backgroundHorizontalDetector)'], {}), '(peakHorizontalDetector[:, 1] - backgroundHorizontalDetector)\n', (7517, 7578), True, 'import numpy as np\n'), ((8023, 8086), 'numpy.abs', 'np.abs', (['(peakVerticalDetector[:, 1] - backgroundVerticalDetector)'], {}), '(peakVerticalDetector[:, 1] - backgroundVerticalDetector)\n', (8029, 8086), True, 'import numpy as np\n'), ((11051, 11087), 'numpy.sum', 'np.sum', (['peakHorizontalDetector[:, 1]'], {}), '(peakHorizontalDetector[:, 1])\n', (11057, 11087), True, 'import numpy as np\n'), ((11878, 11912), 'numpy.sum', 'np.sum', (['peakVerticalDetector[:, 1]'], {}), '(peakVerticalDetector[:, 1])\n', (11884, 11912), True, 'import numpy as np\n'), ((12724, 12744), 'numpy.size', 'np.size', (['fitParamsHD'], {}), '(fitParamsHD)\n', (12731, 12744), True, 'import numpy as np\n'), ((12928, 12948), 'numpy.size', 'np.size', (['fitParamsVD'], {}), '(fitParamsVD)\n', (12935, 12948), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import pandas as pd
import numpy as np
import os.path
from scipy.io import FortranFile
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import glob
from pybloomfilter import BloomFilter
from multiprocessing import Pool
try:
from tqdm import tqdm
except:
print('Missing tqdm library, install it (fallbacks to dummy function).')
def tqdm(foo):
return foo
from mpl_toolkits.mplot3d import Axes3D
import tools
parser = argparse.ArgumentParser(description='FIXME')
parser.add_argument('--galaxy-list', type=str,
default='lists/list_kingal_00782.dat')
parser.add_argument('--halo-list', type=str,
default='lists/list_halo.dat.bin')
parser.add_argument('--association-list', type=str,
default='lists/associated_halogal_782.dat.bin')
parser.add_argument('--ramses-output-start', type=str,
default='/data52/Horizon-AGN/OUTPUT_DIR/output_00032/')
parser.add_argument('--ramses-output-end', type=str,
default='/data52/Horizon-AGN/OUTPUT_DIR/output_00782/')
parser.add_argument('--DM-tree-bricks', '-dtb', type=str,
default='/data40b/Horizon-AGN/TREE_DM_raw/tree_bricks752')
parser.add_argument('--process', '-p', type=int, default=1)
def particles_in_halo(tree_brick, start=0, end=None, fun_filter=lambda x: True):
''' Open a tree bricks file and associate to each halo the corresponding particles.
'''
# Open file
f = FortranFile(tree_brick, 'r')
# Give a value to end, by default start + 1
if end == None:
end = start + 1
# Read headers
nbodies = f.read_ints()[0]
f.read_reals(dtype=np.float32)
aexp = f.read_reals(dtype=np.float32)
f.read_reals(dtype=np.float32)
age = f.read_reals(dtype=np.float32)
nhalo, nsubhalo = f.read_ints()
halo_tot = nhalo + nsubhalo
halos = {}
for i in tqdm(range(halo_tot)):
parts = f.read_ints()[0]
members = f.read_ints()
this_id = f.read_ints()[0]
if (start <= this_id and this_id < end and fun_filter(this_id)):
for dm_particle_id in members:
if not halos.has_key(this_id):
halos[this_id] = []
halos[this_id].append(dm_particle_id)
elif this_id >= end:
break
f.read_ints()
# Irrelevant
level, hosthalo, hostsub, nbsub, nextsub = f.read_ints()
mstar = 1e11 * f.read_reals(dtype=np.float32)
px, py, pz = f.read_reals(dtype=np.float32)
f.read_reals(dtype=np.float32)
f.read_reals(dtype=np.float32)
rad = f.read_reals(dtype=np.float32)[0]
f.read_reals(dtype=np.float32)
f.read_reals(dtype=np.float32)
rvir, mvir, tvir, cvel = f.read_reals(dtype=np.float32)
f.read_reals(dtype=np.float32)
f.close()
return halos
def read_galaxy_list(listfile):
galFile = FortranFile(listfile, 'r')
print(listfile)
ngal, columns = galFile.read_ints()
_tmp = (galFile.read_reals(dtype=np.float32)).reshape((columns, ngal)).transpose()
galaxies = pd.DataFrame(_tmp,
columns=['id', 'vt', 'dvz', 'dvr', 'dvtheta', 'mass', 'x', 'y', 'z'])
galaxies.id.astype(int)
galaxies['sigma'] = 1/3.*np.sqrt(galaxies.dvz**2 + galaxies.dvtheta**2 + galaxies.dvr**2)
galaxies['sigmaoverv'] = galaxies.sigma / galaxies.vt
galaxies['elliptic'] = galaxies.sigmaoverv > 1.5
galaxies['spiral'] = galaxies.sigmaoverv < 0.8
return galaxies
def read_halo_list(listfile):
haloFile = FortranFile(listfile, 'r')
nhalos, columns = haloFile.read_ints()
_tmp = (haloFile.read_reals(dtype=np.float32)).reshape((columns, nhalos)).transpose()
halos = pd.DataFrame(_tmp,
columns=['id', 'level', 'mass', 'x', 'y', 'z', 'rvir'])
halos[['id', 'level']] = halos[['id', 'level']].astype(int)
return halos
def read_infos(path):
with open(path, 'r') as f:
ncpu = int(f.readline().replace('\n', '').split('=')[1])
ndim = int(f.readline().replace('\n', '').split('=')[1])
levelmin = int(f.readline().replace('\n', '').split('=')[1])
levelmax = int(f.readline().replace('\n', '').split('=')[1])
ngridmax = int(f.readline().replace('\n', '').split('=')[1])
nstep_coarse = int(f.readline().replace('\n', '').split('=')[1])
f.readline()
boxlen = float(f.readline().replace('\n', '').split('=')[1])
time = float(f.readline().replace('\n', '').split('=')[1])
aexp= float(f.readline().replace('\n', '').split('=')[1])
H0 = float(f.readline().replace('\n', '').split('=')[1])
omega_m = float(f.readline().replace('\n', '').split('=')[1])
omega_l = float(f.readline().replace('\n', '').split('=')[1])
omega_k = float(f.readline().replace('\n', '').split('=')[1])
omega_b = float(f.readline().replace('\n', '').split('=')[1])
unit_l = float(f.readline().replace('\n', '').split('=')[1])
unit_d = float(f.readline().replace('\n', '').split('=')[1])
unit_t = float(f.readline().replace('\n', '').split('=')[1])
f.readline()
ordering_type = f.readline().replace('\n', '').split('=')[1].strip()
headers = f.readline().replace('\n', '').split()
infos = pd.read_csv(f, names=headers, delim_whitespace=True)
infos.DOMAIN = infos.DOMAIN.astype(np.int32)
metadata = {
"ncpu": ncpu,
"ndim": ndim,
"levelmin": levelmin,
"levelmax": levelmax,
"time": time,
"aexp": aexp,
"unit_l": unit_l,
"unit_d": unit_d,
"unit_t": unit_t
}
infos.metadata = metadata
return infos
def read_association(listfile):
assocFile = FortranFile(listfile, 'r')
nassoc, columns = assocFile.read_ints()
_tmp = (assocFile.read_reals(dtype=np.float32)).reshape((columns, nassoc)).transpose()
assoc = pd.DataFrame(_tmp,
columns=['halo_id', 'level', 'halo_mass', 'gal_id', 'gal_mass'])
assoc[['halo_id', 'level', 'gal_id']] = assoc[['halo_id', 'level', 'gal_id']].astype(np.int32)
return assoc
def read_output(path, header_only=True):
f = FortranFile(path, 'r')
ncpu = f.read_ints()
dim = f.read_ints()
nparts = f.read_ints()
if header_only:
f.close()
return ncpu, dim, nparts
f.read_ints()
f.read_ints()
f.read_ints()
f.read_ints()
f.read_ints()
x = f.read_reals(dtype=np.float64)
y = f.read_reals(dtype=np.float64)
z = f.read_reals(dtype=np.float64)
vx = f.read_reals(dtype=np.float64)
vy = f.read_reals(dtype=np.float64)
vz = f.read_reals(dtype=np.float64)
m = f.read_reals(dtype=np.float64)
part_ids = f.read_ints()
birth = f.read_reals(dtype=np.float32)
f.close()
return ncpu, dim, nparts, x, y, z, part_ids
def _process_one(data_file):
''' Process one output file to generate a bloom filter'''
path, dump_name = os.path.split(data_file)
_, parent_dir = os.path.split(path)
# ensure the containing folder exists
bf_dir_path = os.path.join('bloom_filters', parent_dir)
if not os.path.isdir(bf_dir_path):
os.mkdir(bf_dir_path)
bf_file_path = os.path.join(bf_dir_path, dump_name)
if not os.path.isfile(bf_file_path):
ncpu, _, nparts, _, _, _, ids = read_output(data_file, header_only=False)
bf = BloomFilter(nparts, 1./ncpu, bf_file_path)
bf.update(ids)
return bf_file_path
def _process_all(data_file_list, use_tqdm=True):
'''Simple wrapper for a looper'''
if not use_tqdm:
iterator = data_file_list
else:
iterator = tqdm(data_file_list)
return [_process_one(data_file) for data_file in iterator]
def build_bloom_filter(basepath):
''' Build a bloom filter for each outputs' dump, so that it is very fast to know
whether a particle is part of a dump.'''
allfiles = glob.glob(os.path.join(basepath, 'part*.out*'))
allfiles.sort()
if args.process > 1:
p = Pool(args.process)
# split the list of files into 10-files chunks
chunk_l = 100
splitted = [allfiles[i*chunk_l:(i+1)*chunk_l]
for i in range(int(len(allfiles) / (chunk_l*1.))+1)]
return reduce(lambda prev, curr: prev + curr, p.map(_process_all, splitted))
else:
return _process_all(allfiles, use_tqdm=True)
def cpu_containing(particles, bloom_filters, yieldAll=True):
''' Iterate over all bloom filter and yield the one containing the particle'''
for cpu in tqdm(range(len(bloom_filters))):
bf = BloomFilter.open(bloom_filters[cpu])
yieldCPU = False
cpu_contains = []
for p in particles:
if p in bf:
yieldCPU = True
cpu_contains.append(p)
if yieldAll or yieldCPU:
yield cpu+1, cpu_contains
def run_all_galaxies(galaxies, halo_list, associations, information, bf, ramses_output_dir):
_tmp = associations.gal_id[associations.gal_id > 0]
n_galaxies = _tmp.size
_galaxies_treated = 0
for _, gal_id in _tmp.iteritems():
_galaxies_treated += 1
print(('Get information of all particles of halo associated to galaxy {}'.format(gal_id) +
' ({}/{} - {:.2f}%)'.format(_galaxies_treated, n_galaxies,
100.*_galaxies_treated/n_galaxies)))
yield v0(gal_id, halo_list, associations, information, bf, ramses_output_dir)
def v0(gal_id, halo_list, associations, information, bf, ramses_output_dir):
ramses_dump = int(ramses_output_dir.split('/')[-2].split('_')[-1])
gal_halo = associations[associations.gal_id == gal_id]
halo_id = int(gal_halo.halo_id)
particles = pd.DataFrame({
'id': particles_in_halo(args.DM_tree_bricks, start=halo_id)[halo_id]
})
particles_as_arr = particles.as_matrix().flatten()
cpus = list(cpu_containing(particles_as_arr, bf))
data = pd.DataFrame(columns=['x', 'y', 'z', 'ids'])
cpu_visited = {}
# create a set of found particles
particles_found = set()
particles_as_set = set(particles.id)
# routine to filter and fill particles_found
def filter_out(x, y, z, ids):
# list of particles we're looking for
particles_list = particles_as_set - particles_found
for i in range(len(ids)):
if ids[i] in particles_list:
particles_found.add(ids[i])
yield x[i], y[i], z[i], ids[i]
# Sort CPUs by number of particles in them (first has most particles)
def sortFun(x, y):
lx, ly = len(x[1]), len(y[1])
if (lx < ly):
return 1
elif (lx > ly):
return -1
else:
return 0
cpus.sort(sortFun)
print('Read {} cpus containing the {} particles'.format(len(cpus), len(particles)))
for cpu, part_in_cpu_raw in cpus:
# remove already visited particles from part_in_cpu
# part_in_cpu = set(part_in_cpu_raw) - particles_found
part_remaining = particles_as_set - particles_found
if len(part_remaining) == 0:
print('\t\tFound everything at cpu {}!'.format(cpu))
break
print('\t~{} parts in cpu {} ({} remaining)'.format(len(part_in_cpu_raw),
cpu, len(part_remaining)))
ncpu, dim, nparts, x, y, z, part_ids = read_output(
os.path.join(ramses_output_dir,
'part_{:0>5}.out{:0>5}'.format(ramses_dump, cpu)), header_only=False)
_tmp = list(filter_out(x, y, z, part_ids))
# no elements found, go to next process
if len(_tmp) == 0:
continue
arr = np.array(_tmp)
data_tmp = pd.DataFrame(arr, columns=['x', 'y', 'z', 'ids'])
data = data.append(data_tmp)
# data_halo = data[[_d.ids in particles_as_set for k, _d in tqdm(data.iterrows())]]
return gal_id, halo_id, data
if __name__ == '__main__':
global args
args = parser.parse_args()
print('Reading lists…')
def get_ramses_output(ramses_output_path):
# get the part in "output_00xxx"
output = os.path.dirname(ramses_output_path).split('/')[-1]
num = output.split('_')[-1]
return num
nsim = get_ramses_output(args.ramses_output_start)
path = os.path.join(args.ramses_output_start, 'info_' + nsim + '.txt')
infos_start = read_infos(path)
nsim = get_ramses_output(args.ramses_output_end)
print(nsim)
path = os.path.join(args.ramses_output_end, 'info_' + nsim + '.txt')
infos_end = read_infos(path)
galaxies = read_galaxy_list(args.galaxy_list)
_tmp_rows, _tmp_cols = tools.io.read_list_header(args.halo_list)
halo_list = tools.io.read_list_data(_tmp_rows, _tmp_cols)
associations = read_association(args.association_list)
# print('Reading tree…')
# halos_particles = particles_in_halo(args.DM_tree_bricks_start, start=181102)
print('Loading particles index…')
bf_end = build_bloom_filter(args.ramses_output_end)
bf_start = build_bloom_filter(args.ramses_output_start)
genstart = run_all_galaxies(galaxies, halo_list, associations, infos_start, bf_start,
args.ramses_output_start)
genend = run_all_galaxies(galaxies, halo_list, associations, infos_end, bf_end,
args.ramses_output_end)
def gen_plot(parts_start, parts_end, gal_id, halo_id):
fig = plt.figure(figsize=(16, 9))
fig.set
ax = fig.add_subplot(121, projection='3d')
ax.set_title('Start')
ax.scatter3D(parts_start.x, parts_start.y, parts_start.z)
ax = fig.add_subplot(122, projection='3d')
ax.set_title('End')
ax.scatter3D(parts_end.x, parts_end.y, parts_end.z)
fig.suptitle('Halo {}, galaxy {}'.format(halo_id, gal_id))
plt.show()
def loop():
gal_id, halo_id, parts_start = next(genstart)
gal_id, halo_id, parts_end = next(genend)
gen_plot(parts_start, parts_end, gal_id, halo_id)
def plot(gal_id):
_, _, parts_start = v0(gal_id, halo_list, associations, infos_start, bf_start,
args.ramses_output_start)
_, _, parts_end = v0(gal_id, halo_list, associations, infos_end, bf_end,
args.ramses_output_end)
halo_halo = int(associations[associations.gal_id == gal_id].halo_id)
gen_plot(parts_start, parts_end, gal_id, halo_id)
| [
"pandas.DataFrame",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"pandas.read_csv",
"pybloomfilter.BloomFilter.open",
"tools.io.read_list_header",
"pybloomfilter.BloomFilter",
"matplotlib.pyplot.figure",
"numpy.array",
"multiprocessing.Pool",
"scipy.io.FortranFile",
"too... | [((539, 583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FIXME"""'}), "(description='FIXME')\n", (562, 583), False, 'import argparse\n'), ((1569, 1597), 'scipy.io.FortranFile', 'FortranFile', (['tree_brick', '"""r"""'], {}), "(tree_brick, 'r')\n", (1580, 1597), False, 'from scipy.io import FortranFile\n'), ((3021, 3047), 'scipy.io.FortranFile', 'FortranFile', (['listfile', '"""r"""'], {}), "(listfile, 'r')\n", (3032, 3047), False, 'from scipy.io import FortranFile\n'), ((3210, 3302), 'pandas.DataFrame', 'pd.DataFrame', (['_tmp'], {'columns': "['id', 'vt', 'dvz', 'dvr', 'dvtheta', 'mass', 'x', 'y', 'z']"}), "(_tmp, columns=['id', 'vt', 'dvz', 'dvr', 'dvtheta', 'mass',\n 'x', 'y', 'z'])\n", (3222, 3302), True, 'import pandas as pd\n'), ((3679, 3705), 'scipy.io.FortranFile', 'FortranFile', (['listfile', '"""r"""'], {}), "(listfile, 'r')\n", (3690, 3705), False, 'from scipy.io import FortranFile\n'), ((3851, 3925), 'pandas.DataFrame', 'pd.DataFrame', (['_tmp'], {'columns': "['id', 'level', 'mass', 'x', 'y', 'z', 'rvir']"}), "(_tmp, columns=['id', 'level', 'mass', 'x', 'y', 'z', 'rvir'])\n", (3863, 3925), True, 'import pandas as pd\n'), ((5893, 5919), 'scipy.io.FortranFile', 'FortranFile', (['listfile', '"""r"""'], {}), "(listfile, 'r')\n", (5904, 5919), False, 'from scipy.io import FortranFile\n'), ((6067, 6154), 'pandas.DataFrame', 'pd.DataFrame', (['_tmp'], {'columns': "['halo_id', 'level', 'halo_mass', 'gal_id', 'gal_mass']"}), "(_tmp, columns=['halo_id', 'level', 'halo_mass', 'gal_id',\n 'gal_mass'])\n", (6079, 6154), True, 'import pandas as pd\n'), ((6344, 6366), 'scipy.io.FortranFile', 'FortranFile', (['path', '"""r"""'], {}), "(path, 'r')\n", (6355, 6366), False, 'from scipy.io import FortranFile\n'), ((10154, 10198), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['x', 'y', 'z', 'ids']"}), "(columns=['x', 'y', 'z', 'ids'])\n", (10166, 10198), True, 'import pandas as pd\n'), ((12904, 12945), 'tools.io.read_list_header', 'tools.io.read_list_header', (['args.halo_list'], {}), '(args.halo_list)\n', (12929, 12945), False, 'import tools\n'), ((12962, 13007), 'tools.io.read_list_data', 'tools.io.read_list_data', (['_tmp_rows', '_tmp_cols'], {}), '(_tmp_rows, _tmp_cols)\n', (12985, 13007), False, 'import tools\n'), ((3385, 3455), 'numpy.sqrt', 'np.sqrt', (['(galaxies.dvz ** 2 + galaxies.dvtheta ** 2 + galaxies.dvr ** 2)'], {}), '(galaxies.dvz ** 2 + galaxies.dvtheta ** 2 + galaxies.dvr ** 2)\n', (3392, 3455), True, 'import numpy as np\n'), ((5445, 5497), 'pandas.read_csv', 'pd.read_csv', (['f'], {'names': 'headers', 'delim_whitespace': '(True)'}), '(f, names=headers, delim_whitespace=True)\n', (5456, 5497), True, 'import pandas as pd\n'), ((7564, 7609), 'pybloomfilter.BloomFilter', 'BloomFilter', (['nparts', '(1.0 / ncpu)', 'bf_file_path'], {}), '(nparts, 1.0 / ncpu, bf_file_path)\n', (7575, 7609), False, 'from pybloomfilter import BloomFilter\n'), ((7827, 7847), 'tqdm.tqdm', 'tqdm', (['data_file_list'], {}), '(data_file_list)\n', (7831, 7847), False, 'from tqdm import tqdm\n'), ((8200, 8218), 'multiprocessing.Pool', 'Pool', (['args.process'], {}), '(args.process)\n', (8204, 8218), False, 'from multiprocessing import Pool\n'), ((8777, 8813), 'pybloomfilter.BloomFilter.open', 'BloomFilter.open', (['bloom_filters[cpu]'], {}), '(bloom_filters[cpu])\n', (8793, 8813), False, 'from pybloomfilter import BloomFilter\n'), ((11926, 11940), 'numpy.array', 'np.array', (['_tmp'], {}), '(_tmp)\n', (11934, 11940), True, 'import numpy as np\n'), ((11961, 12010), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'columns': "['x', 'y', 'z', 'ids']"}), "(arr, columns=['x', 'y', 'z', 'ids'])\n", (11973, 12010), True, 'import pandas as pd\n'), ((13668, 13695), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (13678, 13695), True, 'import matplotlib.pyplot as plt\n'), ((14074, 14084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14082, 14084), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
datos= np.genfromtxt("data.txt")
plt.hist(datos,bins=100)
plt.savefig("histograma.pdf") | [
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"matplotlib.pyplot.hist"
] | [((59, 84), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data.txt"""'], {}), "('data.txt')\n", (72, 84), True, 'import numpy as np\n'), ((85, 110), 'matplotlib.pyplot.hist', 'plt.hist', (['datos'], {'bins': '(100)'}), '(datos, bins=100)\n', (93, 110), True, 'import matplotlib.pyplot as plt\n'), ((110, 139), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""histograma.pdf"""'], {}), "('histograma.pdf')\n", (121, 139), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 10:57:28 2018
@author: jack.lingheng.meng
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import glob
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow import layers
import numpy as np
import pandas as pd
import pprint as pp
import time
from IPython.core.debugger import Tracer
from LASAgent.replay_buffer import ReplayBuffer
from LASAgent.noise import AdaptiveParamNoiseSpec,NormalActionNoise,OrnsteinUhlenbeckActionNoise
from LASAgent.environment_model.multilayer_nn_env_model import MultilayerNNEnvModel
from LASAgent.intrinsic_motivation_model.knowledge_based_intrinsic_motivation import KnowledgeBasedIntrinsicMotivationComponent
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -action_bound and action_bound
"""
def __init__(self, name, sess, observation_space, action_space,
learning_rate, tau, batch_size,
actor_model_save_path = 'results/models',
target_actor_model_save_path = 'results/models',
restore_model_flag=False,
restore_model_version = 0):
"""
Parameters
----------
name: str
sess: tf.Session
observation_space: gym.spaces.Box
action_space: gym.spaces.Box
learning_rate: float
tau: float
batch_size: int
restore_model_flag: bool default=False
actor_model_save_path_and_name: str default = 'results/models/actor_model.ckpt'
target_actor_model_save_path_and_name: str default = 'results/models/target_actor_model.ckpt'
"""
self.name = name
self.sess = sess
self.s_dim = observation_space.shape[0]
self.a_dim = action_space.shape[0]
self.action_bound_high = action_space.high
self.action_bound_low = action_space.low
self.learning_rate = learning_rate
self.tau = tau
self.batch_size = batch_size
# Info for load pre-trained actor models
self.actor_model_save_path = actor_model_save_path
self.target_actor_model_save_path = target_actor_model_save_path
self.restore_model_flag = restore_model_flag
self.restore_model_version = self._find_the_most_recent_model_version()
if self.restore_model_flag and self.restore_model_version == -1:
logging.error('You do not have pretrained models.\nPlease set "load_pretrained_agent_flag = False".')
with tf.name_scope(self.name):
with tf.variable_scope(self.name) as self.scope:
# Create Actor Model
self.inputs, self.out = self.create_actor_network()
self.network_params = tf.trainable_variables(scope=self.name)
self.actor_model_saver = tf.train.Saver(self.network_params) # Saver to save and restore model variables
# Create Target Actor Model
self.target_inputs, self.target_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables(scope=self.name)[len(self.network_params):]
self.target_actor_model_saver = tf.train.Saver(self.target_network_params) # Saver to save and restore model variables
# Op for periodically updating target network
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network: d[Q(s,a)]/d[a]
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
# The reason of negative self.action_gradient here is we want to do
# gradient ascent, and AdamOptimizer will do gradient descent when applying
# a gradient.
self.unnormalized_actor_gradients = tf.gradients(self.out,
self.network_params,
-self.action_gradient)
# Normalized actor gradient
self.actor_gradients = list(map(lambda x: tf.divide(x, self.batch_size), self.unnormalized_actor_gradients))
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(zip(self.actor_gradients, self.network_params))
# Initialize variables in variable_scope: self.name
# Note: make sure initialize variables **after** defining all variable
self.sess.run(tf.variables_initializer(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = self.name)))
# Restor Actor and Target-Actor Models
if self.restore_model_flag == True:
actor_filepath = os.path.join(self.actor_model_save_path,
self.name + '_' + str(self.restore_model_version)+'.ckpt')
target_actor_filepath = os.path.join(self.actor_model_save_path,
self.name + '_target_' + str(self.restore_model_version)+'.ckpt')
self.restore_actor_and_target_actor_network(actor_filepath,
target_actor_filepath)
def create_actor_network(self):
"""
"""
inputs = tf.placeholder(tf.float32, shape=(None, self.s_dim), name = 'ActorInput')
h1 = layers.Dense(units = 100, activation = tf.nn.relu,
kernel_initializer = tf.initializers.truncated_normal)(inputs)
h1 = layers.BatchNormalization()(h1)
h1 = layers.Dropout(0.5)(h1)
h2 = layers.Dense(units = 50, activation = tf.nn.relu,
kernel_initializer = tf.initializers.truncated_normal)(h1)
h2 = layers.BatchNormalization()(h2)
h2 = layers.Dropout(0.5)(h2)
# Final layer weights are init to Uniform[-3e-3, 3e-3]
out = layers.Dense(units = self.a_dim, activation = tf.tanh,
kernel_initializer=tf.initializers.random_uniform(minval = -0.003, maxval = 0.003),
name = 'ActorOutput')(h2)
return inputs, out
def save_actor_network(self, version_number = 0):
"""save actor and target actor model"""
actor_filepath = os.path.join(self.actor_model_save_path,
self.name + '_' + str(version_number)+'.ckpt')
target_actor_filepath = os.path.join(self.target_actor_model_save_path,
self.name +'_target_' + str(version_number)+'.ckpt')
self.actor_model_saver.save(self.sess, actor_filepath)
self.target_actor_model_saver.save(self.sess, target_actor_filepath)
logging.info('Actor model saved in path: {}.'.format(actor_filepath))
logging.info('Target Actor model saved in path: {}.'.format(target_actor_filepath))
def restore_actor_and_target_actor_network(self, actor_filepath, target_actor_filepath):
"""
The following code is to inspect variables in a checkpoint:
from tensorflow.python.tools import inspect_checkpoint as chkp
chkp.print_tensors_in_checkpoint_file(file_path, tensor_name='', all_tensors=True, all_tensor_names=True)
"""
# Initialize variables
self.sess.run(tf.variables_initializer(self.network_params, name='init_network_params'))
self.sess.run(tf.variables_initializer(self.target_network_params, name='init_target_network_params'))
self.actor_model_saver.restore(self.sess, actor_filepath)
self.target_actor_model_saver.restore(self.sess, target_actor_filepath)
logging.info('Restored acotor: {}'.format(actor_filepath))
logging.info('Restored target acotor: {}'.format(target_actor_filepath))
def train(self, inputs, a_gradient):
"""Train actor"""
self.sess.run(self.optimize,
feed_dict={self.inputs: inputs,
self.action_gradient: a_gradient})
def predict(self, inputs):
"""
Prediction of Actor Model.
"""
return self.sess.run(self.out,
feed_dict={self.inputs: inputs})
def predict_target(self, inputs):
"""
Prediction of Target Actor Model.
"""
return self.sess.run(self.target_out,
feed_dict={self.target_inputs: inputs})
def update_target_network(self):
"""Update Target Actor Model"""
self.sess.run(self.update_target_network_params)
def _find_the_most_recent_model_version(self):
"""
Returns
-------
the_most_recent_model_version: int
the most recent model version. If no saved model, return -1.
"""
# Find the most recent version
model_version = []
for file_name_temp in os.listdir(self.actor_model_save_path):
if self.name+'_target_' in file_name_temp:
_, version_temp = file_name_temp.split('.')[0].split(self.name+'_target_')
model_version.append(version_temp)
if len(model_version) != 0:
the_most_recent_model_version = max([int(i) for i in model_version])
else:
the_most_recent_model_version = -1
return the_most_recent_model_version
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, name, sess, observation_space, action_space,
learning_rate, tau, gamma,
critic_model_save_path = 'results/models',
target_critic_model_save_path = 'results/models',
restore_model_flag=False,
restore_model_version = 0):
"""
Parameters
----------
name: str
The name of this cirtic network. Giving a name to object of CriticNetwork
is necessary to avoid messing up trainable variables together.
sess: tf.Session
tf.Session to run computational graph
observation_space: gym.spaces.Box
observation space of environment
action_space: gym.spaces.Box
action space of environment
learning_rate: float
learning rate to train CriticNetwork
tau: float
hyper-parameter weighting the update of target network
gamma: float
discount rate
critic_model_save_path: str default = 'results/models'
path of critic model we are going to save
target_critic_model_save: str default = 'results/models/target_critic_model.ckpt'
path of target critic model we are going to save
restore_model_flag: bool default=False:
indicator of whether to restore a pre-trained critic network
restore_model_version: int default = 0
if restore model, this parameter gives the number of specific version
of models we are going to restore
"""
# name is necessary, since we will reuse this graph multiple times.
self.name = name
self.sess = sess
self.s_dim = observation_space.shape[0]
self.a_dim = action_space.shape[0]
self.learning_rate = learning_rate
self.tau = tau
self.gamma = gamma
# Info for save and load pre-trained critic models
self.critic_model_save_path = critic_model_save_path
self.target_critic_model_save_path = target_critic_model_save_path
self.restore_model_flag = restore_model_flag
self.restore_model_version = self._find_the_most_recent_model_version()
if self.restore_model_flag and self.restore_model_version == -1:
raise Exception('You do not have pretrained models.\nPlease set "load_pretrained_agent_flag = False".')
with tf.name_scope(self.name):
with tf.variable_scope(self.name) as self.scope:
# Create Critic Model
self.inputs, self.action, self.out = self.create_critic_network()
self.network_params = tf.trainable_variables(scope=self.name)
self.critic_model_saver = tf.train.Saver(self.network_params) # Saver to save and restore model variables
# Create Target Critic Model
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables(scope=self.name)[len(self.network_params):]
self.target_critic_model_saver = tf.train.Saver(self.target_network_params)
#Tracer()()
# Op for periodically updating target network with online network
# weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \
+ tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target
self.target_q_value = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tf.losses.mean_squared_error(labels = self.target_q_value,
predictions = self.out)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action.
# For each action in the minibatch (i.e., for each x in xs),
# this will sum up the gradients of each critic output in the minibatch
# w.r.t. that action. Each output is independent of all
# actions except for one.
self.action_grads = tf.gradients(self.out, self.action)
# Initialize variables in variable_scope: self.name
# Note: make sure initialize variables **after** defining all variable
self.sess.run(tf.variables_initializer(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = self.name)))
# Restore Critic and Target Critic Models
if self.restore_model_flag == True:
critic_filepath = os.path.join(self.critic_model_save_path,
self.name + '_' + str(self.restore_model_version)+'.ckpt')
target_critic_filepath = os.path.join(self.critic_model_save_path,
self.name + '_target_' + str(self.restore_model_version)+'.ckpt')
self.restore_critic_and_target_critic_network(critic_filepath,
target_critic_filepath)
def create_critic_network(self):
"""Create critic network"""
obs = tf.placeholder(tf.float32, shape=(None, self.s_dim), name = 'CriticInputState')
act = tf.placeholder(tf.float32, shape=(None, self.a_dim), name = 'CriticInputAction')
h1_obs = layers.Dense(units = 400, activation = tf.nn.relu,
kernel_initializer = tf.initializers.truncated_normal)(obs)
h1_obs = layers.BatchNormalization()(h1_obs)
h1_obs = layers.Dropout(0.5)(h1_obs)
h1_act = layers.Dense(units = 400, activation = tf.nn.relu,
kernel_initializer = tf.initializers.truncated_normal)(act)
h1_act = layers.BatchNormalization()(h1_act)
h1_act = layers.Dropout(0.5)(h1_act)
merged = tf.concat([h1_obs, h1_act], axis=1)
h2 = layers.Dense(units = 300, activation = tf.nn.relu,
kernel_initializer = tf.initializers.truncated_normal)(merged)
h2 = layers.BatchNormalization()(h2)
h2 = layers.Dropout(0.5)(h2)
# Linear layer connected to 1 output representing Q(s,a)
# Final layer weights are init to Uniform[-3e-3, 3e-3]
out = layers.Dense(units = 1,
kernel_initializer=tf.initializers.random_uniform(minval = -0.003, maxval = 0.003),
name = 'CriticOutput')(h2)
return obs, act, out
def save_critic_network(self, version_number = 0):
"""
Function used to save critic and target critic model
Parameters
----------
version_number: int default = 0
the time when save this ciritic and its target critic models.
"""
critic_filepath = os.path.join(self.critic_model_save_path,
self.name + '_' + str(version_number)+'.ckpt')
target_critic_filepath = os.path.join(self.target_critic_model_save_path,
self.name +'_target_' + str(version_number)+'.ckpt')
self.critic_model_saver.save(self.sess, critic_filepath)
self.target_critic_model_saver.save(self.sess, target_critic_filepath)
logging.info('Critic model saved in path: {}.'.format(critic_filepath))
logging.info('Target Critic model saved in path: {}.'.format(target_critic_filepath))
def restore_critic_and_target_critic_network(self, critic_filepath, target_critic_filepath):
"""
The following code is to inspect variables in a checkpoint:
from tensorflow.python.tools import inspect_checkpoint as chkp
chkp.print_tensors_in_checkpoint_file(file_path, tensor_name='', all_tensors=True, all_tensor_names=True)
"""
self.critic_model_saver.restore(self.sess, critic_filepath)
self.target_critic_model_saver.restore(self.sess, target_critic_filepath)
logging.info('Restored acotor: {}'.format(critic_filepath))
logging.info('Restored target acotor: {}'.format(target_critic_filepath))
def train(self, observation, action, target_q_value):
"""
Returns
-------
loss: mean square error
out: output of Critic_Network
optimize: tf.operation
"""
return self.sess.run([self.loss, self.out, self.optimize],
feed_dict={self.inputs: observation,
self.action: action,
self.target_q_value: target_q_value})
def predict(self, observation, action):
return self.sess.run(self.out,
feed_dict={self.inputs: observation,
self.action: action})
def predict_target(self, observation, action):
"""
Prediction of Target-Critic Model
"""
return self.sess.run(self.target_out,
feed_dict={self.target_inputs: observation,
self.target_action: action})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads,
feed_dict={
self.inputs: inputs,
self.action: actions})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def _find_the_most_recent_model_version(self):
"""
Returns
-------
the_most_recent_model_version: int
the most recent model version
"""
# Find the most recent version
model_version = []
for file_name_temp in os.listdir(self.critic_model_save_path):
if self.name+'_target_' in file_name_temp:
_, version_temp = file_name_temp.split('.')[0].split(self.name+'_target_')
model_version.append(version_temp)
if len(model_version) != 0:
the_most_recent_model_version = max([int(i) for i in model_version])
else:
the_most_recent_model_version = -1
return the_most_recent_model_version
# ===========================
# Living Architecture System Agent
# ===========================
class LASAgent_Actor_Critic():
"""
LASAgent is the learning agent of Living Architecture System. Basically, it
consists of three main components coding policies and value-functions:
1. Extrinsically motivated actor-critic model
2. Knowledge-based Intrinsically motivated actor-critic model
3. Competence-based Intrinsically motivated actor-critic model
And two main components producing intrinsic motivation
1. Knowledge-based intrinsic motivation
2. Competence-based intrinsic motivation
"""
def __init__(self, sess, agent_name,
observation_space, action_space,
actor_lr = 0.0001, actor_tau = 0.001,
critic_lr = 0.0001, critic_tau = 0.001, gamma = 0.99,
minibatch_size = 64,
max_episodes = 50000, max_episode_len = 1000,
# Exploration Strategies
exploration_action_noise_type = 'ou_0.2',
exploration_epsilon_greedy_type = 'none',
# Save Summaries
save_dir = '',
experiment_runs = '',
# Save and Restore Actor-Critic Model
restore_actor_model_flag = False,
restore_critic_model_flag = False,
restore_env_model_flag = False,
restore_model_version = 0):
"""
Intialize LASAgent.
Parameters
----------
actor_lr: float default = 0.0001
actor model learning rate
actor_tau: float default = 0.001
target actor model updating weight
critic_lr: float default = 0.0001
critic model learning rate
critic_tau: float default = 0.001
target critic model updating weight
gamma default:int = 0.99
future reward discounting paramter
minibatch_size:int default = 64
size of minibabtch
max_episodes:int default = 50000
maximum number of episodes
max_episode_len: int default = 1000
maximum lenght of each episode
exploration_action_noise_type: str default = 'ou_0.2',
set up action noise. Options:
1. 'none' (no action noise)
2. 'adaptive-param_0.2'
3. 'normal_0.2'
4. 'ou_0.2'
exploration_epsilon_greedy_type: str default = 'none',
set up epsilon-greedy.
1. If exploration_epsilon_greedy_type == 'none', no epsilon-greedy.
2. 'epsilon-greedy-max_1_min_0.05_decay_0.999'
save_dir: string default='')
directory to save tensorflow summaries and pre-trained models
experiment_runs: str default = ''
directory to save summaries of a specific run
restore_actor_model_flag: bool default = False
indicate whether load pre-trained actor model
restore_critic_model_flag: bool default = False
indicate whetther load pre-trained critic model
"""
# Produce a string describes experiment setting
self.experiment_setting = ['LAS Environment:' + '<br />' +\
'1. action_space: ' + str(action_space.shape) + '<br />' +\
'2. observation_space: ' + str(observation_space.shape) + '<br /><br />' +\
'LASAgent Hyper-parameters: ' + '<br />' +\
'1. actor_lr: ' + str(actor_lr) + '<br />' +\
'2. actor_tau: ' + str(actor_tau) + '<br />' +\
'3. critic_lr: ' + str(critic_lr) + '<br />' +\
'4. critic_tau: ' + str(critic_tau) + '<br />' +\
'5. gamma: ' + str(gamma) + '<br />' +\
'6. minibatch_size: ' + str(minibatch_size) + '<br />' +\
'7. max_episodes: ' + str(max_episodes) + '<br />' +\
'8. max_episode_len: ' + str(max_episode_len) + '<br />' +\
'9. action_noise_type: ' + str(exploration_action_noise_type) + '<br />' +\
'10.epsilon_greedy_type: ' + str(exploration_epsilon_greedy_type) + '<br />' +\
'11.restore_actor_model_flag: ' + str(restore_actor_model_flag) + '<br />' +\
'12.restore_critic_model_flag: ' + str(restore_critic_model_flag)][0]
# Init Environment Related Parameters
self.sess = sess
self.agent_name = agent_name
self.action_space = action_space
self.observation_space = observation_space
self.save_dir = save_dir
self.experiment_runs = experiment_runs
# Temporary Memory
self.first_experience = True
self.observation_old = []
self.action_old = []
self.reward_new = []
self.observation_new = []
# =================================================================== #
# Initialize Global Hyper-parameters #
# =================================================================== #
self.max_episodes = max_episodes
self.max_episode_len = max_episode_len
self.episode_counter = 1
self.steps_counter = 1 # Steps elapsed in one episode
self.total_step_counter = 1 # Steps elapsed in whole life
self.render_env = False
# =================================================================== #
# Initialize Replay Buffers for #
# Extrinsic and Intrinsic Policy, and Environment Model #
# =================================================================== #
# ********************************************* #
# Replay Buffer for Extrinsic Policy #
# ********************************************* #
self.buffer_size = 1000000
self.random_seed = 1234
self.replay_buffer = ReplayBuffer(self.buffer_size, self.random_seed)
# ********************************************* #
# Replay Buffer for Environment Model #
# ********************************************* #
# 5% experience will be saved in test buffer
self.env_model_buffer_test_ratio = 0.2
# 1. Training Buffer
self.env_model_train_buffer_size = 100000
self.env_model_train_buffer = ReplayBuffer(self.env_model_train_buffer_size, self.random_seed)
# 2. Test Buffer:
# For examing whether our environemnt model is converged, we can
# save a small set of testing samples that will not be used to
# training environment. Note that this test set should not have too
# much past experiences either too much recent experiences.
self.env_model_test_buffer_size = 10000
self.env_model_test_samples_size = 1000
self.env_model_test_buffer = ReplayBuffer(self.env_model_test_buffer_size, self.random_seed)
# ****************************************************** #
# Replay Buffer for Knowledge-based Intrinsic Policy #
# ****************************************************** #
self.knowledge_based_intrinsic_policy_buffer_size = 1000000
self.knowledge_based_intrinsic_policy_replay_buffer = ReplayBuffer(self.knowledge_based_intrinsic_policy_buffer_size,
self.random_seed)
# ****************************************************** #
# Replay Buffer for Competence-based Intrinsic Policy #
# ****************************************************** #
self.competence_based_intrinsic_policy_buffer_size = 1000000
self.competence_based_intrinsic_policy_replay_buffer = ReplayBuffer(self.competence_based_intrinsic_policy_buffer_size,
self.random_seed)
# =================================================================== #
# Initialize Parameters for Both Actor and Critic Model #
# =================================================================== #
self.minibatch_size = 64
# Common Saving Directory (we should use os.path.join(), change to it later)
self.models_dir = os.path.join(self.save_dir,'models',self.experiment_runs)
if not os.path.exists(self.models_dir):
os.makedirs(self.models_dir)
# =================================================================== #
# Initialize Extrinsically Motivated Actor-Critic Model #
# =================================================================== #
# Extrinsically Motivated Actor
self.extrinsically_motivated_actor_name = self.agent_name+'_extrinsically_motivated_actor_name'
self.extrinsic_actor_lr = actor_lr
self.extrinsic_actor_tau = actor_tau
# Restore Pre-trained Actor Modles
self.extrinsic_actor_model_save_path = self.models_dir
self.target_extrinsic_actor_model_save_path = self.models_dir
self.restore_extrinsic_actor_model_flag = restore_actor_model_flag
self.restore_extrinsic_actor_model_version = restore_model_version
self.extrinsic_actor_model = ActorNetwork(self.extrinsically_motivated_actor_name,
self.sess,
self.observation_space,
self.action_space,
self.extrinsic_actor_lr,
self.extrinsic_actor_tau,
self.minibatch_size,
self.extrinsic_actor_model_save_path,
self.target_extrinsic_actor_model_save_path,
self.restore_extrinsic_actor_model_flag,
self.restore_extrinsic_actor_model_version)
# Extrinsically Motivated Critic
self.extrinsically_motivated_critic_name = self.agent_name+'_extrinsically_motivated_critic_name'
self.extrinsic_critic_lr = critic_lr
self.extrinsic_critic_tau = critic_tau
self.extrinsic_gamma = gamma
# Restore Pre-trained Critic Model
self.extrinsic_critic_model_save_path = self.models_dir
self.target_extrinsic_critic_model_save_path = self.models_dir
self.restore_extrinsic_critic_model_flag = restore_critic_model_flag
self.restore_extrinsic_critic_model_version = restore_model_version
self.extrinsic_critic_model = CriticNetwork(self.extrinsically_motivated_critic_name,
self.sess,
self.observation_space,
self.action_space,
self.extrinsic_critic_lr,
self.extrinsic_critic_tau,
self.extrinsic_gamma,
self.extrinsic_critic_model_save_path,
self.target_extrinsic_critic_model_save_path,
self.restore_extrinsic_critic_model_flag,
self.restore_extrinsic_critic_model_version)
# =================================================================== #
# Initialize Environment Model #
# =================================================================== #
self.environment_model_name = self.agent_name+'_current_environment_model_name'
self.env_model_lr = 0.0001
self.env_model_minibatch_size = 200
self.env_model_save_path = self.models_dir
self.save_env_model_every_xxx_episodes = 5
self.saved_env_model_version_number = 0
self.env_load_flag = restore_env_model_flag
self.environment_model = MultilayerNNEnvModel(self.environment_model_name,
self.sess,
self.observation_space,
self.action_space,
self.env_model_lr,
self.env_model_save_path,
self.env_load_flag)
# =================================================================== #
# Initialize Knowledge-based #
# Intrinsically Motivated Actor-Critic Model #
# =================================================================== #
# Initialize Knowledge-based Intrinsic Motivation Component
self.knowledge_based_intrinsic_reward = 0
# Note; actual window size = sliding window size * save_env_model_every_xxx_steps
self.knowledge_based_intrinsic_reward_sliding_window_size = 4
self.update_newest_env_model_every_xxx_steps = 200
self.knowledge_based_intrinsic_motivation_model = KnowledgeBasedIntrinsicMotivationComponent(self.environment_model,
self.knowledge_based_intrinsic_reward_sliding_window_size)
# Intrinsically Motivated Actor
self.knowledge_based_intrinsic_actor_name = self.agent_name+'_knowledge_based_intrinsic_actor_name'
self.knowledge_based_intrinsic_actor_lr = actor_lr
self.knowledge_based_intrinsic_actor_tau = actor_tau
# Restore Pre-trained Actor Motivated by Knowledge-based Intrinsic Motivation
self.knowledge_based_intrinsic_actor_model_save_path = self.models_dir
self.target_knowledge_based_intrinsic_actor_model_save_path = self.models_dir
self.restore_knowledge_based_intrinsic_actor_model_flag = False
self.restore_knowledge_based_intrinsic_actor_model_version = restore_model_version
self.knowledge_based_intrinsic_actor_model = ActorNetwork(self.knowledge_based_intrinsic_actor_name,
self.sess,
self.observation_space,
self.action_space,
self.knowledge_based_intrinsic_actor_lr,
self.knowledge_based_intrinsic_actor_tau,
self.minibatch_size,
self.knowledge_based_intrinsic_actor_model_save_path,
self.target_knowledge_based_intrinsic_actor_model_save_path,
self.restore_knowledge_based_intrinsic_actor_model_flag,
self.restore_knowledge_based_intrinsic_actor_model_version)
# Intrinsically Motivated Critic
self.knowledge_based_intrinsic_critic_name = self.agent_name+'_knowledge_based_intrinsic_critic_name'
self.knowledge_based_intrinsic_critic_lr = critic_lr
self.knowledge_based_intrinsic_critic_tau = critic_tau
self.knowledge_based_intrinsic_critic_gamma = gamma
# Restore Pre-trained Critic Model
self.knowledge_based_intrinsic_critic_model_save_path = self.models_dir
self.target_knowledge_based_intrinsic_critic_model_save_path = self.models_dir
self.restore_knowledge_based_intrinsic_critic_model_flag = False
self.restore_knowledge_based_intrinsic_critic_model_version = restore_model_version
self.knowledge_based_intrinsic_critic_model = CriticNetwork(self.knowledge_based_intrinsic_critic_name,
self.sess,
self.observation_space,
self.action_space,
self.knowledge_based_intrinsic_critic_lr,
self.knowledge_based_intrinsic_critic_tau,
self.knowledge_based_intrinsic_critic_gamma,
self.knowledge_based_intrinsic_critic_model_save_path,
self.target_knowledge_based_intrinsic_critic_model_save_path,
self.restore_knowledge_based_intrinsic_critic_model_flag,
self.restore_knowledge_based_intrinsic_critic_model_version)
# =================================================================== #
# Initialize Competence-based #
# Intrinsically Motivated Actor-Critic Model #
# =================================================================== #
# Competence-based Intrinsic Motivation
self.competence_based_intrinsic_reward = 0
# =================================================================== #
# Initialize Exploration Strategies #
# =================================================================== #
# 1. Action Noise to Maintain Exploration
self.exploration_action_noise_type = exploration_action_noise_type
self.actor_noise = self._init_action_noise(self.exploration_action_noise_type, self.action_space.shape[0])
# 2. Epsilon-Greedy
self.exploration_epsilon_greedy_type = exploration_epsilon_greedy_type # 'epsilon-greedy-max_1_min_0.05_decay_0.999'
self.epsilon_max, self.epsilon_min, self.epsilon_decay = self._init_epsilon_greedy(self.exploration_epsilon_greedy_type)
self.epsilon = self.epsilon_max
# 3. Knowledge-based Intrinsic Motivation (for future implementation)
# 4. Competence-based Intrinsic Motivation
# =================================================================== #
# Initialize Summary Ops #
# =================================================================== #
# TODO: Make sure when restore pretrained models, summary will be writen
# to new summary directory. (Maybe not necessary, because
# tensorboard can choose Relative Horizontal Axis.)
self.summary_dir = os.path.join(self.save_dir,'summary',self.experiment_runs)
if not os.path.isdir(self.summary_dir):
os.makedirs(self.summary_dir)
self.episode_rewards = 0
self.writer = tf.summary.FileWriter(self.summary_dir, self.sess.graph)
# Summarize Extrinsically Motivated Actor-Critic Training
# 1. accumulated reward in one episode
# TODO: should summarize (obs, act, r, obs_new)??
# 2. observation, action, reward and
# 3. loss of critic model
self.summary_ops_accu_rewards, self.summary_vars_accu_rewards = self._init_summarize_accumulated_rewards()
self.summary_ops_action_reward, self.summary_action, self.summary_reward = self._init_summarize_action_and_reward()
self.summary_ops_critic_loss, self.summary_critic_loss = self._init_summarize_actor_critic()
# Summarize Knowledge-based Intrinsic Motivation Component
self.summary_ops_kb_reward, self.sum_kb_reward = self._init_summarize_knowledge_based_intrinsic_reward()
# Summarize Environment Model Training
self.summary_ops_env_loss, self.summary_env_loss = self._init_summarize_environment_model()
# Summarize Experiment Setting
self.summary_ops_experiment_setting, self.summary_experiment_setting = self._init_summarize_experiment_setting()
summary_str_experiment_setting = self.sess.run(self.summary_ops_experiment_setting,
feed_dict = {self.summary_experiment_setting: self.experiment_setting})
self.writer.add_summary(summary_str_experiment_setting)
# Initialize hyper-parameters for visualize extrinsic state action value
self._init_visualize_extrinsic_actor_critic()
# self._init_visualize_extrinsic_state_action_value_function()
# self._init_visualize_extrinsic_action_value_given_a_specific_state()
# =================================================================== #
# Initialize Tranable Variables #
# =================================================================== #
# Note: Don't call self.sess.run(tf.global_variables_initializer()).
# Otherwise, restoring pretrained models will fail.
self.extrinsic_actor_model.update_target_network()
self.extrinsic_critic_model.update_target_network()
# =================================================================== #
# Main Interaction Functions #
# =================================================================== #
def perceive_and_act(self, observation, reward, done):
"""
Perceive observation and reward, then return action based on current
observation.
Parameters
----------
observation: np.shape(observation) = (obs_dim,)
observation
reward: float
reward of previous action
done: bool
whether current simulation is done
Returns
-------
action: np.shape(action) = (act_dim, )
action generated by agent
"""
self.observation_new = observation
self.reward_new = reward
self.done = done
# *********************************** #
# Produce Action #
# *********************************** #
# If this is the first action, no complete experience to remember.
if self.first_experience:
action = self._act(self.observation_new)
self.action_old = action
self.observation_old = self.observation_new
self.first_experience = False
self.total_step_counter += 1
return action
# Choose an action
action = self._act(self.observation_new)
# Add summary date
self._summary_meta_data()
# Memorize experiencs
self._memorize_experience()
# Train Models
self._train()
# Reset Temporary Variables
# Note: Before return, set observation and action as old.
self.observation_old = self.observation_new
self.action_old = action
self.total_step_counter += 1
self.writer.flush()
return action
def _memorize_experience(self):
"""Remember Experiences"""
# 1. Extrinsic Policy Replay Buffer
self.replay_buffer.add(self.observation_old, self.action_old, self.reward_new, self.done, self.observation_new)
# 2. Environment Model Replay Buffer
if np.random.rand(1) <= self.env_model_buffer_test_ratio:
self.env_model_test_buffer.add(self.observation_old, self.action_old, self.reward_new, self.done, self.observation_new)
else:
self.env_model_train_buffer.add(self.observation_old, self.action_old,
self.reward_new, self.done,
self.observation_new)
# 3. Intrinsc Policy Replay Buffer
# The Learning Progress plays the role of intrinsic reward
# a. knowledge-based intirnsic motivation
self.k_based_intrinsic_r, _ = self.knowledge_based_intrinsic_motivation_model.knowledge_based_intrinsic_reward(self.observation_old,
self.action_old,
self.reward_new,
self.observation_new)
self.knowledge_based_intrinsic_policy_replay_buffer.add(self.observation_old, self.action_old,
self.k_based_intrinsic_r, self.done,
self.observation_new)
# Summarize Knowledge-based Intrinsic Reward
self.writer.add_summary(self.sess.run(self.summary_ops_kb_reward,
feed_dict={self.sum_kb_reward:self.k_based_intrinsic_r}),
self.total_step_counter)
# TODO: b. competence-based intrinsic motivation
def _summary_meta_data(self):
"""Write Summaries for Analysis"""
# 1. Save Step Summaries
self.writer.add_summary(self.sess.run(self.summary_ops_action_reward,
feed_dict = {self.summary_action: self.action_old,
self.summary_reward: self.reward_new}),
self.total_step_counter)
# 2. Save Episode Summaries
self.episode_rewards += self.reward_new
if self.steps_counter == self.max_episode_len or self.done == True:
# # Save data for visualize extrinsic state action value
# if self.episode_counter % self.embedding_extrinsic_state_action_value_episodic_frequency == 0:
# self.peoriodically_save_extrinsic_state_action_value_embedding(self.episode_counter)
# # Save data for visualize extrinsic action values given a state
# if self.episode_counter % self.embedding_extrinsic_action_value_given_a_state_episodic_frequency == 0:
# self.peoriodically_save_extrinsic_action_value_given_a_state(self.observation_new,
# self.episode_counter)
# Episodic Summary
self.writer.add_summary(self.sess.run(self.summary_ops_accu_rewards,
feed_dict = {self.summary_vars_accu_rewards: self.episode_rewards}),
self.episode_counter)
# Reset Summary Data
self.steps_counter = 1
self.episode_rewards = 0
self.episode_counter += 1
else:
self.steps_counter += 1
def _act(self, observation_new):
"""
Produce action based on current observation.
Parameters
----------
observation_new: np.shape(observation) = (obs_dim,)
Returns
-------
action: np.shape(action) = (act_dim, )
"""
# Epsilon-Greedy
if self.exploration_epsilon_greedy_type != 'none':
if np.random.rand(1) <= self.epsilon:
action = self.action_space.sample()
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
if self.total_step_counter % 2000 == 0:
print("epsilon:{}".format(self.epsilon))
return action
# Action Noise
if self.exploration_action_noise_type != 'none':
action = self.extrinsic_actor_model.predict(np.reshape(observation_new, [1, self.observation_space.shape[0]])) + self.actor_noise() #The noise is too huge.
else:
action = self.extrinsic_actor_model.predict(np.reshape(observation_new, [1, self.observation_space.shape[0]]))
return action[0]
def _train(self):
""" Train Actor-Critic Model """
# ******************************************************************* #
# Train Extrinsically Motivated Actor-Critic Model #
# ******************************************************************* #
if self.replay_buffer.size() > self.minibatch_size:
# Random Samples
s_batch, a_batch, r_batch, t_batch, s2_batch = \
self.replay_buffer.sample_batch(int(self.minibatch_size))
# Prioritized Samples
# Calculate targets
target_q = self.extrinsic_critic_model.predict_target(
s2_batch, self.extrinsic_actor_model.predict_target(s2_batch))
target_q_value = []
for k in range(int(self.minibatch_size)):
if t_batch[k]:
target_q_value.append(r_batch[k])
else:
target_q_value.append(r_batch[k] + self.extrinsic_critic_model.gamma * target_q[k])
# Update the critic given the targets
critic_loss, _, _ = self.extrinsic_critic_model.train(s_batch,a_batch,
np.reshape(target_q_value, (int(self.minibatch_size), 1)))
# Summarize critic training loss
self.writer.add_summary(self.sess.run(self.summary_ops_critic_loss,
feed_dict = {self.summary_critic_loss: critic_loss}),
self.total_step_counter)
# Optimize the actor using the gradient of Q-value with respect
# to action in sampled experiences batch
a_outs = self.extrinsic_actor_model.predict(s_batch)
grads = self.extrinsic_critic_model.action_gradients(s_batch, a_outs)
self.extrinsic_actor_model.train(s_batch, grads[0])
# Update target networks
self.extrinsic_actor_model.update_target_network()
self.extrinsic_critic_model.update_target_network()
# ******************************************************************* #
# Train Environment Model #
# ******************************************************************* #
if self.env_model_train_buffer.size() > self.env_model_minibatch_size:
# Train env model every step
s_batch, a_batch, r_batch, t_batch, s2_batch = self.env_model_train_buffer.sample_batch(int(self.env_model_minibatch_size))
self.environment_model.train_env_model(s_batch,a_batch,
s2_batch,
np.reshape(r_batch, (int(self.env_model_minibatch_size), 1)))
# Replace the oldeest env model in knowledge-based intrinsic motiavtion compoent
# with the newest env model, every "update_newest_env_model_every_xxx_steps" steps.
if (self.total_step_counter % self.update_newest_env_model_every_xxx_steps) == 0:
self.knowledge_based_intrinsic_motivation_model.update_env_model_window(self.environment_model.get_env_model_weights())
# Evaluate on test buffer every step
if self.env_model_test_buffer.size() > self.env_model_test_samples_size:
s_batch_test, a_batch_test, r_batch_test, t_batch_test, s2_batch_test =\
self.env_model_test_buffer.sample_batch(int(self.env_model_test_samples_size))
env_obs_transition_model_loss, _ = self.environment_model.evaluate_env_model(s_batch_test, a_batch_test,
s2_batch_test,
np.reshape(r_batch_test, (int(self.env_model_test_samples_size), 1)))
# Summaries of Training Environment Model
self.writer.add_summary(self.sess.run(self.summary_ops_env_loss,
feed_dict = {self.summary_env_loss: env_obs_transition_model_loss}),
self.total_step_counter)
# ********************************************************************* #
# Train Knowledge-based Intrinsically Motivated Actor-Critic Model #
# ********************************************************************* #
def _save_learned_model(self, version_number):
# Save extrinsically motivated actor-critic model
self.extrinsic_actor_model.save_actor_network(version_number)
self.extrinsic_critic_model.save_critic_network(version_number)
logging.info('Save extrinsic_actor_model and extrinsic_critic_model: done.')
# Save Environment Model
self.environment_model.save_env_model(version_number)
logging.info('Save environment_model: done.')
# =================================================================== #
# Intrinsic Motivation Components #
# =================================================================== #
def competence_based_intrinsic_motivation_component(self):
"""
Returns
-------
competence_based_intrinsic_reward: float
"""
# =================================================================== #
# Initialization Exploratory Strageties #
# =================================================================== #
def _init_epsilon_greedy(self, exploration_epsilon_greedy_type):
"""
Initialize hyper-parameters for epsilon-greedy.
Parameters
----------
exploration_epsilon_greedy_type: str default = 'epsilon-greedy-max_1_min_0.05_decay_0.999'
str for setting epsilon greedy. Please keep the format and just change float numbers.
For default 'epsilon-greedy-max_1_min_0.05_decay_0.999', it means:
maximum epsilon = 1
minimum spsilom = 0.05
epsilon decay = 0.999
If exploration_epsilon_greedy_type == 'none', no epsilon-greedy.
Returns
-------
epsilon_max: float
maximum epsilon
epsilon_min: float
minimum spsilom
epsilon_decay: float
epsilon decay
"""
if exploration_epsilon_greedy_type == 'none':
epsilon_max=0
epsilon_min=0
epsilon_decay=0
else:
_, epsilon_max, _, epsilon_min, _, epsilon_decay = exploration_epsilon_greedy_type.split('_')
return float(epsilon_max), float(epsilon_min), float(epsilon_decay)
def _init_action_noise(self, action_noise_type='ou_0.2', nb_actions=1):
"""
Initialize action noise object.
Parameters
----------
action_noise_type: str default = 'ou_0.2'
type of action noise:
1. 'none' (no action noise)
2. 'adaptive-param_0.2'
3. 'normal_0.2'
4. 'ou_0.2'
nb_actions: int default = 1
dimension of action space
Returns
-------
action_noise: object of ActionNoise class.
"""
if action_noise_type == 'none':
pass
elif 'adaptive-param' in action_noise_type:
_, stddev = action_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
return param_noise
elif 'normal' in action_noise_type:
_, stddev = action_noise_type.split('_')
action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
return action_noise
elif 'ou' in action_noise_type:
_, stddev = action_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
return action_noise
else:
raise RuntimeError('unknown noise type "{}"'.format(action_noise_type))
# =================================================================== #
# Initialization Summary Functions #
# =================================================================== #
def _init_summarize_accumulated_rewards(self):
"""
Function used for building summaries.
"""
episode_rewards = tf.Variable(0.)
episode_rewards_sum = tf.summary.scalar("Accumulated_Rewards", episode_rewards)
summary_ops = tf.summary.merge([episode_rewards_sum])
return summary_ops, episode_rewards
def _init_summarize_action_and_reward(self):
"""
Histogram summaries of action and reward
Returns
-------
summary_ops: tf.ops
ops to summarize action and reward
action: tf.placeholder
placeholder for feeding action
reward: tf.placeholder
placeholder for feeding reward
"""
action = tf.placeholder(tf.float32, shape=self.action_space.shape)
reward = tf.placeholder(tf.float32)
action_sum = tf.summary.histogram("action", action)
reward_sum = tf.summary.scalar("reward", reward)
summary_ops = tf.summary.merge([action_sum, reward_sum])
return summary_ops, action, reward
def _init_summarize_actor_critic(self):
"""
Summarize data from actor-critic model.
"""
loss_critic = tf.placeholder(dtype = tf.float32)
loss_critic_sum = tf.summary.scalar('loss_critic', loss_critic)
loss_critic_sum_op = tf.summary.merge([loss_critic_sum])
return loss_critic_sum_op, loss_critic
def _init_summarize_experiment_setting(self):
"""
Summarize experiment setting
"""
experiemnt_setting = tf.placeholder(tf.string)
experiemnt_setting_sum = tf.summary.text('Actor_Critic_Agent_Experiment_Setting', experiemnt_setting)
summary_ops = tf.summary.merge([experiemnt_setting_sum])
return summary_ops, experiemnt_setting
def _init_summarize_environment_model(self):
"""
Summarize environment model training
"""
loss = tf.placeholder(dtype = tf.float32)
loss_sum = tf.summary.scalar('loss_env_model', loss)
loss_sum_op = tf.summary.merge([loss_sum])
return loss_sum_op, loss
def _init_summarize_knowledge_based_intrinsic_reward(self):
""" """
knowledge_based_intrinsic_reward = tf.placeholder(tf.float32)
summary = tf.summary.scalar('knowledge_based_intrinsic_reward', knowledge_based_intrinsic_reward)
sum_op = tf.summary.merge([summary])
return sum_op, knowledge_based_intrinsic_reward
# =================================================================== #
# Visualize Extrinsic State-Action Value #
# =================================================================== #
def _init_visualize_extrinsic_actor_critic(self):
self.embeded_vars_of_extrinsic_actor_critic_file_name = 'embeded_vars_of_extrinsic_actor_critic.ckpt'
self.embeded_vars_of_extrinsic_actor_critic_config = projector.ProjectorConfig()
"""
Visualize state-action value of sampled (state,action) pair.
"""
self.embedding_extrinsic_state_action_value_sample_size = 10000
# save every 2 episode
self.embedding_extrinsic_state_action_value_episodic_frequency = 2
# Generate embeded data: (sample_size, state_dim + action_dim)
# Note: embedded data only need to save once, while metadata need to save
# several times.
act_dim = self.action_space.shape[0]
obs_dim = self.observation_space.shape[0]
embeded_data = np.zeros((self.embedding_extrinsic_state_action_value_sample_size, obs_dim+act_dim))
self.embeded_extrinsic_action_samples = np.zeros((self.embedding_extrinsic_state_action_value_sample_size, act_dim))
self.embeded_extrinsic_state_samples = np.zeros((self.embedding_extrinsic_state_action_value_sample_size, obs_dim))
for i in range(self.embedding_extrinsic_state_action_value_sample_size):
act_sample = self.action_space.sample()
obs_sample = self.observation_space.sample()
embeded_data[i,:] = np.concatenate((obs_sample,act_sample))
self.embeded_extrinsic_action_samples[i,:] = act_sample
self.embeded_extrinsic_state_samples[i,:] = obs_sample
# Initialize embedding variable
self.embedding_extrinsic_state_action_value_var = tf.Variable(embeded_data,
dtype=tf.float32,
name = 'extrinsic_state_action_value')
self.sess.run(self.embedding_extrinsic_state_action_value_var.initializer)
"""
Visualize state-action value of sampled action given a specific state.
"""
self.embedding_extrinsic_action_value_given_a_state_sample_size = 10000
self.embedding_extrinsic_action_value_given_a_state_episodic_frequency = 2
# Generate embedding data: (sample_size, action_dim)
act_dim = self.action_space.shape[0]
self.embeded_extrinsic_action_samples = np.zeros((self.embedding_extrinsic_action_value_given_a_state_sample_size,act_dim))
for i in range(self.embedding_extrinsic_action_value_given_a_state_sample_size):
self.embeded_extrinsic_action_samples[i,:] = self.action_space.sample()
# Initialize embedding variable
self.embeded_extrinsic_action_value_given_a_state_var = tf.Variable(self.embeded_extrinsic_action_samples,
dtype = tf.float32,
name = 'extrinsic_action_value_given_a_state')
self.sess.run(self.embeded_extrinsic_action_value_given_a_state_var.initializer)
# Save embedding vars
saver_embed = tf.train.Saver([self.embedding_extrinsic_state_action_value_var,
self.embeded_extrinsic_action_value_given_a_state_var])
saver_embed.save(self.sess,
os.path.join(self.summary_dir, self.embeded_vars_of_extrinsic_actor_critic_file_name))
def _init_visualize_extrinsic_state_action_value_function(self):
"""
Visualize state-action value of sampled (state,action) pair.
"""
self.embedding_extrinsic_state_action_value_sample_size = 10000
# save every 2 episode
self.embedding_extrinsic_state_action_value_episodic_frequency = 2
# Generate embeded data: (sample_size, state_dim + action_dim)
# Note: embedded data only need to save once, while metadata need to save
# several times.
act_dim = self.action_space.shape[0]
obs_dim = self.observation_space.shape[0]
embeded_data = np.zeros((self.embedding_extrinsic_state_action_value_sample_size, obs_dim+act_dim))
self.embeded_extrinsic_action_samples = np.zeros((self.embedding_extrinsic_state_action_value_sample_size, act_dim))
self.embeded_extrinsic_state_samples = np.zeros((self.embedding_extrinsic_state_action_value_sample_size, obs_dim))
for i in range(self.embedding_extrinsic_state_action_value_sample_size):
act_sample = self.action_space.sample()
obs_sample = self.observation_space.sample()
embeded_data[i,:] = np.concatenate((obs_sample,act_sample))
self.embeded_extrinsic_action_samples[i,:] = act_sample
self.embeded_extrinsic_state_samples[i,:] = obs_sample
# Initialize embedding variable
self.embedding_extrinsic_state_action_value_var = tf.Variable(embeded_data,
dtype=tf.float32,
name = 'extrinsic_state_action_value')
self.sess.run(self.embedding_extrinsic_state_action_value_var.initializer)
# Save embedding
saver_embed = tf.train.Saver([self.embedding_extrinsic_state_action_value_var])
saver_embed.save(self.sess,
os.path.join(self.summary_dir,self.embeded_vars_of_extrinsic_actor_critic_file_name))
def peoriodically_save_extrinsic_state_action_value_embedding(self, version_num):
"""
Preparing embeded data and metadata, then call function peoriodically
to write these data into tensorflow summary.
"""
# metadata should be saved peoriodically and separatively to associate with differetn embedding
metadata_file_name = 'embedding_extrinsic_state_action_value_meta_' + str(version_num) +'.tsv'
# Preparing metadate which will be associated with embedding when write
# these data to summary. Thus, the file name of metadata is:
# (embeded_data_name+'_metadata.tsv')
action_value_batch = self.extrinsic_critic_model.predict(self.embeded_extrinsic_state_samples,
self.embeded_extrinsic_action_samples)
metadata = pd.DataFrame(action_value_batch)
metadata.columns = ['embedding_extrinsic_state_action_value_'+str(version_num)]
metadata.to_csv(os.path.join(self.summary_dir, metadata_file_name),
sep = '\t')
# Associate metadata with embedding:
# embeddings {
# tensor_name: 'word_embedding'
# metadata_path: '$LOG_DIR/metadata.tsv'}
embedding = self.embeded_vars_of_extrinsic_actor_critic_config.embeddings.add()
embedding.tensor_name = self.embedding_extrinsic_state_action_value_var.name
embedding.metadata_path = metadata_file_name
projector.visualize_embeddings(self.writer,
self.embeded_vars_of_extrinsic_actor_critic_config)
def _init_visualize_extrinsic_action_value_given_a_specific_state(self):
"""
Visualize state-action value of sampled action given a specific state.
"""
self.embedding_extrinsic_action_value_given_a_state_sample_size = 10000
self.embedding_extrinsic_action_value_given_a_state_episodic_frequency = 2
# Generate embedding data: (sample_size, action_dim)
act_dim = self.action_space.shape[0]
self.embeded_extrinsic_action_samples = np.zeros((self.embedding_extrinsic_action_value_given_a_state_sample_size,act_dim))
for i in range(self.embedding_extrinsic_action_value_given_a_state_sample_size):
self.embeded_extrinsic_action_samples[i,:] = self.action_space.sample()
# Initialize embedding variable
self.embeded_extrinsic_action_value_given_a_state_var = tf.Variable(self.embeded_extrinsic_action_samples,
dtype = tf.float32,
name = 'extrinsic_action_value_given_a_state')
self.sess.run(self.embeded_extrinsic_action_value_given_a_state_var.initializer)
# Save embedding var
saver_embed = tf.train.Saver([self.embeded_extrinsic_action_value_given_a_state_var])
saver_embed.save(self.sess,
os.path.join(self.summary_dir, self.embeded_vars_of_extrinsic_actor_critic_file_name))
def peoriodically_save_extrinsic_action_value_given_a_state(self,
state,
version_num):
"""
Peoriodically called to visualize action value of a given state
Parameters
----------
state:
version_num:
"""
metadata_file_name = 'embedding_extrinsic_action_value_given_a_state_meta_' + str(version_num) + '.tsv'
# Generate metadata
state_samples = np.tile(state, [self.embedding_extrinsic_action_value_given_a_state_sample_size,1])
action_value_batch = self.extrinsic_critic_model.predict(state_samples,
self.embeded_extrinsic_action_samples)
# Save metadata
metadata = pd.DataFrame(action_value_batch)
metadata.columns = ['embedding_extrinsic_action_value_given_a_state'+str(version_num)]
metadata.to_csv(os.path.join(self.summary_dir, metadata_file_name),
sep = '\t')
# Associate metadata with embedding
embedding = self.embeded_vars_of_extrinsic_actor_critic_config.embeddings.add()
embedding.tensor_name = self.embeded_extrinsic_action_value_given_a_state_var.name
embedding.metadata_path = metadata_file_name
projector.visualize_embeddings(self.writer,
self.embeded_vars_of_extrinsic_actor_critic_config)
| [
"tensorflow.layers.Dropout",
"tensorflow.trainable_variables",
"LASAgent.environment_model.multilayer_nn_env_model.MultilayerNNEnvModel",
"tensorflow.get_collection",
"LASAgent.replay_buffer.ReplayBuffer",
"tensorflow.variables_initializer",
"numpy.ones",
"tensorflow.multiply",
"tensorflow.contrib.t... | [((6233, 6304), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.s_dim)', 'name': '"""ActorInput"""'}), "(tf.float32, shape=(None, self.s_dim), name='ActorInput')\n", (6247, 6304), True, 'import tensorflow as tf\n'), ((9935, 9973), 'os.listdir', 'os.listdir', (['self.actor_model_save_path'], {}), '(self.actor_model_save_path)\n', (9945, 9973), False, 'import os\n'), ((16154, 16231), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.s_dim)', 'name': '"""CriticInputState"""'}), "(tf.float32, shape=(None, self.s_dim), name='CriticInputState')\n", (16168, 16231), True, 'import tensorflow as tf\n'), ((16248, 16326), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.a_dim)', 'name': '"""CriticInputAction"""'}), "(tf.float32, shape=(None, self.a_dim), name='CriticInputAction')\n", (16262, 16326), True, 'import tensorflow as tf\n'), ((16879, 16914), 'tensorflow.concat', 'tf.concat', (['[h1_obs, h1_act]'], {'axis': '(1)'}), '([h1_obs, h1_act], axis=1)\n', (16888, 16914), True, 'import tensorflow as tf\n'), ((20874, 20913), 'os.listdir', 'os.listdir', (['self.critic_model_save_path'], {}), '(self.critic_model_save_path)\n', (20884, 20913), False, 'import os\n'), ((27773, 27821), 'LASAgent.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.buffer_size', 'self.random_seed'], {}), '(self.buffer_size, self.random_seed)\n', (27785, 27821), False, 'from LASAgent.replay_buffer import ReplayBuffer\n'), ((28213, 28277), 'LASAgent.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.env_model_train_buffer_size', 'self.random_seed'], {}), '(self.env_model_train_buffer_size, self.random_seed)\n', (28225, 28277), False, 'from LASAgent.replay_buffer import ReplayBuffer\n'), ((28739, 28802), 'LASAgent.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.env_model_test_buffer_size', 'self.random_seed'], {}), '(self.env_model_test_buffer_size, self.random_seed)\n', (28751, 28802), False, 'from LASAgent.replay_buffer import ReplayBuffer\n'), ((29134, 29220), 'LASAgent.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.knowledge_based_intrinsic_policy_buffer_size', 'self.random_seed'], {}), '(self.knowledge_based_intrinsic_policy_buffer_size, self.\n random_seed)\n', (29146, 29220), False, 'from LASAgent.replay_buffer import ReplayBuffer\n'), ((29624, 29711), 'LASAgent.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.competence_based_intrinsic_policy_buffer_size', 'self.random_seed'], {}), '(self.competence_based_intrinsic_policy_buffer_size, self.\n random_seed)\n', (29636, 29711), False, 'from LASAgent.replay_buffer import ReplayBuffer\n'), ((30175, 30234), 'os.path.join', 'os.path.join', (['self.save_dir', '"""models"""', 'self.experiment_runs'], {}), "(self.save_dir, 'models', self.experiment_runs)\n", (30187, 30234), False, 'import os\n'), ((34026, 34204), 'LASAgent.environment_model.multilayer_nn_env_model.MultilayerNNEnvModel', 'MultilayerNNEnvModel', (['self.environment_model_name', 'self.sess', 'self.observation_space', 'self.action_space', 'self.env_model_lr', 'self.env_model_save_path', 'self.env_load_flag'], {}), '(self.environment_model_name, self.sess, self.\n observation_space, self.action_space, self.env_model_lr, self.\n env_model_save_path, self.env_load_flag)\n', (34046, 34204), False, 'from LASAgent.environment_model.multilayer_nn_env_model import MultilayerNNEnvModel\n'), ((35244, 35374), 'LASAgent.intrinsic_motivation_model.knowledge_based_intrinsic_motivation.KnowledgeBasedIntrinsicMotivationComponent', 'KnowledgeBasedIntrinsicMotivationComponent', (['self.environment_model', 'self.knowledge_based_intrinsic_reward_sliding_window_size'], {}), '(self.environment_model, self.\n knowledge_based_intrinsic_reward_sliding_window_size)\n', (35286, 35374), False, 'from LASAgent.intrinsic_motivation_model.knowledge_based_intrinsic_motivation import KnowledgeBasedIntrinsicMotivationComponent\n'), ((41196, 41256), 'os.path.join', 'os.path.join', (['self.save_dir', '"""summary"""', 'self.experiment_runs'], {}), "(self.save_dir, 'summary', self.experiment_runs)\n", (41208, 41256), False, 'import os\n'), ((41409, 41465), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.summary_dir', 'self.sess.graph'], {}), '(self.summary_dir, self.sess.graph)\n', (41430, 41465), True, 'import tensorflow as tf\n'), ((55510, 55586), 'logging.info', 'logging.info', (['"""Save extrinsic_actor_model and extrinsic_critic_model: done."""'], {}), "('Save extrinsic_actor_model and extrinsic_critic_model: done.')\n", (55522, 55586), False, 'import logging\n'), ((55690, 55735), 'logging.info', 'logging.info', (['"""Save environment_model: done."""'], {}), "('Save environment_model: done.')\n", (55702, 55735), False, 'import logging\n'), ((59408, 59424), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), '(0.0)\n', (59419, 59424), True, 'import tensorflow as tf\n'), ((59454, 59511), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accumulated_Rewards"""', 'episode_rewards'], {}), "('Accumulated_Rewards', episode_rewards)\n", (59471, 59511), True, 'import tensorflow as tf\n'), ((59543, 59582), 'tensorflow.summary.merge', 'tf.summary.merge', (['[episode_rewards_sum]'], {}), '([episode_rewards_sum])\n', (59559, 59582), True, 'import tensorflow as tf\n'), ((60040, 60097), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.action_space.shape'}), '(tf.float32, shape=self.action_space.shape)\n', (60054, 60097), True, 'import tensorflow as tf\n'), ((60115, 60141), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (60129, 60141), True, 'import tensorflow as tf\n'), ((60172, 60210), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""action"""', 'action'], {}), "('action', action)\n", (60192, 60210), True, 'import tensorflow as tf\n'), ((60232, 60267), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""reward"""', 'reward'], {}), "('reward', reward)\n", (60249, 60267), True, 'import tensorflow as tf\n'), ((60299, 60341), 'tensorflow.summary.merge', 'tf.summary.merge', (['[action_sum, reward_sum]'], {}), '([action_sum, reward_sum])\n', (60315, 60341), True, 'import tensorflow as tf\n'), ((60528, 60560), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (60542, 60560), True, 'import tensorflow as tf\n'), ((60589, 60634), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_critic"""', 'loss_critic'], {}), "('loss_critic', loss_critic)\n", (60606, 60634), True, 'import tensorflow as tf\n'), ((60664, 60699), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_critic_sum]'], {}), '([loss_critic_sum])\n', (60680, 60699), True, 'import tensorflow as tf\n'), ((60892, 60917), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {}), '(tf.string)\n', (60906, 60917), True, 'import tensorflow as tf\n'), ((60951, 61027), 'tensorflow.summary.text', 'tf.summary.text', (['"""Actor_Critic_Agent_Experiment_Setting"""', 'experiemnt_setting'], {}), "('Actor_Critic_Agent_Experiment_Setting', experiemnt_setting)\n", (60966, 61027), True, 'import tensorflow as tf\n'), ((61050, 61092), 'tensorflow.summary.merge', 'tf.summary.merge', (['[experiemnt_setting_sum]'], {}), '([experiemnt_setting_sum])\n', (61066, 61092), True, 'import tensorflow as tf\n'), ((61281, 61313), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (61295, 61313), True, 'import tensorflow as tf\n'), ((61335, 61376), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_env_model"""', 'loss'], {}), "('loss_env_model', loss)\n", (61352, 61376), True, 'import tensorflow as tf\n'), ((61399, 61427), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_sum]'], {}), '([loss_sum])\n', (61415, 61427), True, 'import tensorflow as tf\n'), ((61589, 61615), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (61603, 61615), True, 'import tensorflow as tf\n'), ((61634, 61725), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""knowledge_based_intrinsic_reward"""', 'knowledge_based_intrinsic_reward'], {}), "('knowledge_based_intrinsic_reward',\n knowledge_based_intrinsic_reward)\n", (61651, 61725), True, 'import tensorflow as tf\n'), ((61739, 61766), 'tensorflow.summary.merge', 'tf.summary.merge', (['[summary]'], {}), '([summary])\n', (61755, 61766), True, 'import tensorflow as tf\n'), ((62268, 62295), 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (62293, 62295), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((62873, 62963), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_state_action_value_sample_size, obs_dim + act_dim)'], {}), '((self.embedding_extrinsic_state_action_value_sample_size, obs_dim +\n act_dim))\n', (62881, 62963), True, 'import numpy as np\n'), ((63006, 63082), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_state_action_value_sample_size, act_dim)'], {}), '((self.embedding_extrinsic_state_action_value_sample_size, act_dim))\n', (63014, 63082), True, 'import numpy as np\n'), ((63130, 63206), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_state_action_value_sample_size, obs_dim)'], {}), '((self.embedding_extrinsic_state_action_value_sample_size, obs_dim))\n', (63138, 63206), True, 'import numpy as np\n'), ((63702, 63787), 'tensorflow.Variable', 'tf.Variable', (['embeded_data'], {'dtype': 'tf.float32', 'name': '"""extrinsic_state_action_value"""'}), "(embeded_data, dtype=tf.float32, name='extrinsic_state_action_value'\n )\n", (63713, 63787), True, 'import tensorflow as tf\n'), ((64438, 64526), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_action_value_given_a_state_sample_size, act_dim)'], {}), '((self.embedding_extrinsic_action_value_given_a_state_sample_size,\n act_dim))\n', (64446, 64526), True, 'import numpy as np\n'), ((64799, 64917), 'tensorflow.Variable', 'tf.Variable', (['self.embeded_extrinsic_action_samples'], {'dtype': 'tf.float32', 'name': '"""extrinsic_action_value_given_a_state"""'}), "(self.embeded_extrinsic_action_samples, dtype=tf.float32, name=\n 'extrinsic_action_value_given_a_state')\n", (64810, 64917), True, 'import tensorflow as tf\n'), ((65228, 65353), 'tensorflow.train.Saver', 'tf.train.Saver', (['[self.embedding_extrinsic_state_action_value_var, self.\n embeded_extrinsic_action_value_given_a_state_var]'], {}), '([self.embedding_extrinsic_state_action_value_var, self.\n embeded_extrinsic_action_value_given_a_state_var])\n', (65242, 65353), True, 'import tensorflow as tf\n'), ((66181, 66271), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_state_action_value_sample_size, obs_dim + act_dim)'], {}), '((self.embedding_extrinsic_state_action_value_sample_size, obs_dim +\n act_dim))\n', (66189, 66271), True, 'import numpy as np\n'), ((66314, 66390), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_state_action_value_sample_size, act_dim)'], {}), '((self.embedding_extrinsic_state_action_value_sample_size, act_dim))\n', (66322, 66390), True, 'import numpy as np\n'), ((66438, 66514), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_state_action_value_sample_size, obs_dim)'], {}), '((self.embedding_extrinsic_state_action_value_sample_size, obs_dim))\n', (66446, 66514), True, 'import numpy as np\n'), ((67010, 67095), 'tensorflow.Variable', 'tf.Variable', (['embeded_data'], {'dtype': 'tf.float32', 'name': '"""extrinsic_state_action_value"""'}), "(embeded_data, dtype=tf.float32, name='extrinsic_state_action_value'\n )\n", (67021, 67095), True, 'import tensorflow as tf\n'), ((67363, 67428), 'tensorflow.train.Saver', 'tf.train.Saver', (['[self.embedding_extrinsic_state_action_value_var]'], {}), '([self.embedding_extrinsic_state_action_value_var])\n', (67377, 67428), True, 'import tensorflow as tf\n'), ((68457, 68489), 'pandas.DataFrame', 'pd.DataFrame', (['action_value_batch'], {}), '(action_value_batch)\n', (68469, 68489), True, 'import pandas as pd\n'), ((69095, 69195), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['self.writer', 'self.embeded_vars_of_extrinsic_actor_critic_config'], {}), '(self.writer, self.\n embeded_vars_of_extrinsic_actor_critic_config)\n', (69125, 69195), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((69746, 69834), 'numpy.zeros', 'np.zeros', (['(self.embedding_extrinsic_action_value_given_a_state_sample_size, act_dim)'], {}), '((self.embedding_extrinsic_action_value_given_a_state_sample_size,\n act_dim))\n', (69754, 69834), True, 'import numpy as np\n'), ((70107, 70225), 'tensorflow.Variable', 'tf.Variable', (['self.embeded_extrinsic_action_samples'], {'dtype': 'tf.float32', 'name': '"""extrinsic_action_value_given_a_state"""'}), "(self.embeded_extrinsic_action_samples, dtype=tf.float32, name=\n 'extrinsic_action_value_given_a_state')\n", (70118, 70225), True, 'import tensorflow as tf\n'), ((70517, 70588), 'tensorflow.train.Saver', 'tf.train.Saver', (['[self.embeded_extrinsic_action_value_given_a_state_var]'], {}), '([self.embeded_extrinsic_action_value_given_a_state_var])\n', (70531, 70588), True, 'import tensorflow as tf\n'), ((71330, 71419), 'numpy.tile', 'np.tile', (['state', '[self.embedding_extrinsic_action_value_given_a_state_sample_size, 1]'], {}), '(state, [self.\n embedding_extrinsic_action_value_given_a_state_sample_size, 1])\n', (71337, 71419), True, 'import numpy as np\n'), ((71641, 71673), 'pandas.DataFrame', 'pd.DataFrame', (['action_value_batch'], {}), '(action_value_batch)\n', (71653, 71673), True, 'import pandas as pd\n'), ((72165, 72265), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['self.writer', 'self.embeded_vars_of_extrinsic_actor_critic_config'], {}), '(self.writer, self.\n embeded_vars_of_extrinsic_actor_critic_config)\n', (72195, 72265), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((2890, 3004), 'logging.error', 'logging.error', (['"""You do not have pretrained models.\nPlease set "load_pretrained_agent_flag = False"."""'], {}), '(\n """You do not have pretrained models.\nPlease set "load_pretrained_agent_flag = False"."""\n )\n', (2903, 3004), False, 'import logging\n'), ((3014, 3038), 'tensorflow.name_scope', 'tf.name_scope', (['self.name'], {}), '(self.name)\n', (3027, 3038), True, 'import tensorflow as tf\n'), ((4352, 4398), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.a_dim]'], {}), '(tf.float32, [None, self.a_dim])\n', (4366, 4398), True, 'import tensorflow as tf\n'), ((4688, 4754), 'tensorflow.gradients', 'tf.gradients', (['self.out', 'self.network_params', '(-self.action_gradient)'], {}), '(self.out, self.network_params, -self.action_gradient)\n', (4700, 4754), True, 'import tensorflow as tf\n'), ((6320, 6424), 'tensorflow.layers.Dense', 'layers.Dense', ([], {'units': '(100)', 'activation': 'tf.nn.relu', 'kernel_initializer': 'tf.initializers.truncated_normal'}), '(units=100, activation=tf.nn.relu, kernel_initializer=tf.\n initializers.truncated_normal)\n', (6332, 6424), False, 'from tensorflow import layers\n'), ((6474, 6501), 'tensorflow.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6499, 6501), False, 'from tensorflow import layers\n'), ((6519, 6538), 'tensorflow.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6533, 6538), False, 'from tensorflow import layers\n'), ((6565, 6668), 'tensorflow.layers.Dense', 'layers.Dense', ([], {'units': '(50)', 'activation': 'tf.nn.relu', 'kernel_initializer': 'tf.initializers.truncated_normal'}), '(units=50, activation=tf.nn.relu, kernel_initializer=tf.\n initializers.truncated_normal)\n', (6577, 6668), False, 'from tensorflow import layers\n'), ((6714, 6741), 'tensorflow.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6739, 6741), False, 'from tensorflow import layers\n'), ((6759, 6778), 'tensorflow.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6773, 6778), False, 'from tensorflow import layers\n'), ((8318, 8391), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['self.network_params'], {'name': '"""init_network_params"""'}), "(self.network_params, name='init_network_params')\n", (8342, 8391), True, 'import tensorflow as tf\n'), ((8415, 8507), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['self.target_network_params'], {'name': '"""init_target_network_params"""'}), "(self.target_network_params, name=\n 'init_target_network_params')\n", (8439, 8507), True, 'import tensorflow as tf\n'), ((13030, 13054), 'tensorflow.name_scope', 'tf.name_scope', (['self.name'], {}), '(self.name)\n', (13043, 13054), True, 'import tensorflow as tf\n'), ((14382, 14419), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {}), '(tf.float32, [None, 1])\n', (14396, 14419), True, 'import tensorflow as tf\n'), ((14495, 14573), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'self.target_q_value', 'predictions': 'self.out'}), '(labels=self.target_q_value, predictions=self.out)\n', (14523, 14573), True, 'import tensorflow as tf\n'), ((15083, 15118), 'tensorflow.gradients', 'tf.gradients', (['self.out', 'self.action'], {}), '(self.out, self.action)\n', (15095, 15118), True, 'import tensorflow as tf\n'), ((16355, 16459), 'tensorflow.layers.Dense', 'layers.Dense', ([], {'units': '(400)', 'activation': 'tf.nn.relu', 'kernel_initializer': 'tf.initializers.truncated_normal'}), '(units=400, activation=tf.nn.relu, kernel_initializer=tf.\n initializers.truncated_normal)\n', (16367, 16459), False, 'from tensorflow import layers\n'), ((16510, 16537), 'tensorflow.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (16535, 16537), False, 'from tensorflow import layers\n'), ((16563, 16582), 'tensorflow.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (16577, 16582), False, 'from tensorflow import layers\n'), ((16617, 16721), 'tensorflow.layers.Dense', 'layers.Dense', ([], {'units': '(400)', 'activation': 'tf.nn.relu', 'kernel_initializer': 'tf.initializers.truncated_normal'}), '(units=400, activation=tf.nn.relu, kernel_initializer=tf.\n initializers.truncated_normal)\n', (16629, 16721), False, 'from tensorflow import layers\n'), ((16772, 16799), 'tensorflow.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (16797, 16799), False, 'from tensorflow import layers\n'), ((16825, 16844), 'tensorflow.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (16839, 16844), False, 'from tensorflow import layers\n'), ((16937, 17041), 'tensorflow.layers.Dense', 'layers.Dense', ([], {'units': '(300)', 'activation': 'tf.nn.relu', 'kernel_initializer': 'tf.initializers.truncated_normal'}), '(units=300, activation=tf.nn.relu, kernel_initializer=tf.\n initializers.truncated_normal)\n', (16949, 17041), False, 'from tensorflow import layers\n'), ((17091, 17118), 'tensorflow.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (17116, 17118), False, 'from tensorflow import layers\n'), ((17136, 17155), 'tensorflow.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (17150, 17155), False, 'from tensorflow import layers\n'), ((30248, 30279), 'os.path.exists', 'os.path.exists', (['self.models_dir'], {}), '(self.models_dir)\n', (30262, 30279), False, 'import os\n'), ((30293, 30321), 'os.makedirs', 'os.makedirs', (['self.models_dir'], {}), '(self.models_dir)\n', (30304, 30321), False, 'import os\n'), ((41270, 41301), 'os.path.isdir', 'os.path.isdir', (['self.summary_dir'], {}), '(self.summary_dir)\n', (41283, 41301), False, 'import os\n'), ((41315, 41344), 'os.makedirs', 'os.makedirs', (['self.summary_dir'], {}), '(self.summary_dir)\n', (41326, 41344), False, 'import os\n'), ((45935, 45952), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (45949, 45952), True, 'import numpy as np\n'), ((63429, 63469), 'numpy.concatenate', 'np.concatenate', (['(obs_sample, act_sample)'], {}), '((obs_sample, act_sample))\n', (63443, 63469), True, 'import numpy as np\n'), ((65448, 65538), 'os.path.join', 'os.path.join', (['self.summary_dir', 'self.embeded_vars_of_extrinsic_actor_critic_file_name'], {}), '(self.summary_dir, self.\n embeded_vars_of_extrinsic_actor_critic_file_name)\n', (65460, 65538), False, 'import os\n'), ((66737, 66777), 'numpy.concatenate', 'np.concatenate', (['(obs_sample, act_sample)'], {}), '((obs_sample, act_sample))\n', (66751, 66777), True, 'import numpy as np\n'), ((67490, 67580), 'os.path.join', 'os.path.join', (['self.summary_dir', 'self.embeded_vars_of_extrinsic_actor_critic_file_name'], {}), '(self.summary_dir, self.\n embeded_vars_of_extrinsic_actor_critic_file_name)\n', (67502, 67580), False, 'import os\n'), ((68602, 68652), 'os.path.join', 'os.path.join', (['self.summary_dir', 'metadata_file_name'], {}), '(self.summary_dir, metadata_file_name)\n', (68614, 68652), False, 'import os\n'), ((70650, 70740), 'os.path.join', 'os.path.join', (['self.summary_dir', 'self.embeded_vars_of_extrinsic_actor_critic_file_name'], {}), '(self.summary_dir, self.\n embeded_vars_of_extrinsic_actor_critic_file_name)\n', (70662, 70740), False, 'import os\n'), ((71793, 71843), 'os.path.join', 'os.path.join', (['self.summary_dir', 'metadata_file_name'], {}), '(self.summary_dir, metadata_file_name)\n', (71805, 71843), False, 'import os\n'), ((3070, 3098), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (3087, 3098), True, 'import tensorflow as tf\n'), ((3257, 3296), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'self.name'}), '(scope=self.name)\n', (3279, 3296), True, 'import tensorflow as tf\n'), ((3338, 3373), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.network_params'], {}), '(self.network_params)\n', (3352, 3373), True, 'import tensorflow as tf\n'), ((3721, 3763), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.target_network_params'], {}), '(self.target_network_params)\n', (3735, 3763), True, 'import tensorflow as tf\n'), ((13086, 13114), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (13103, 13114), True, 'import tensorflow as tf\n'), ((13288, 13327), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'self.name'}), '(scope=self.name)\n', (13310, 13327), True, 'import tensorflow as tf\n'), ((13370, 13405), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.network_params'], {}), '(self.network_params)\n', (13384, 13405), True, 'import tensorflow as tf\n'), ((13778, 13820), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.target_network_params'], {}), '(self.target_network_params)\n', (13792, 13820), True, 'import tensorflow as tf\n'), ((49890, 49907), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (49904, 49907), True, 'import numpy as np\n'), ((50549, 50614), 'numpy.reshape', 'np.reshape', (['observation_new', '[1, self.observation_space.shape[0]]'], {}), '(observation_new, [1, self.observation_space.shape[0]])\n', (50559, 50614), True, 'import numpy as np\n'), ((3606, 3645), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'self.name'}), '(scope=self.name)\n', (3628, 3645), True, 'import tensorflow as tf\n'), ((5104, 5146), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (5126, 5146), True, 'import tensorflow as tf\n'), ((5422, 5487), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n', (5439, 5487), True, 'import tensorflow as tf\n'), ((6971, 7030), 'tensorflow.initializers.random_uniform', 'tf.initializers.random_uniform', ([], {'minval': '(-0.003)', 'maxval': '(0.003)'}), '(minval=-0.003, maxval=0.003)\n', (7001, 7030), True, 'import tensorflow as tf\n'), ((13662, 13701), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'self.name'}), '(scope=self.name)\n', (13684, 13701), True, 'import tensorflow as tf\n'), ((14659, 14701), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (14681, 14701), True, 'import tensorflow as tf\n'), ((15330, 15395), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\n', (15347, 15395), True, 'import tensorflow as tf\n'), ((17381, 17440), 'tensorflow.initializers.random_uniform', 'tf.initializers.random_uniform', ([], {'minval': '(-0.003)', 'maxval': '(0.003)'}), '(minval=-0.003, maxval=0.003)\n', (17411, 17440), True, 'import tensorflow as tf\n'), ((50367, 50432), 'numpy.reshape', 'np.reshape', (['observation_new', '[1, self.observation_space.shape[0]]'], {}), '(observation_new, [1, self.observation_space.shape[0]])\n', (50377, 50432), True, 'import numpy as np\n'), ((3987, 4032), 'tensorflow.multiply', 'tf.multiply', (['self.network_params[i]', 'self.tau'], {}), '(self.network_params[i], self.tau)\n', (3998, 4032), True, 'import tensorflow as tf\n'), ((4089, 4147), 'tensorflow.multiply', 'tf.multiply', (['self.target_network_params[i]', '(1.0 - self.tau)'], {}), '(self.target_network_params[i], 1.0 - self.tau)\n', (4100, 4147), True, 'import tensorflow as tf\n'), ((4974, 5003), 'tensorflow.divide', 'tf.divide', (['x', 'self.batch_size'], {}), '(x, self.batch_size)\n', (4983, 5003), True, 'import tensorflow as tf\n'), ((14082, 14127), 'tensorflow.multiply', 'tf.multiply', (['self.network_params[i]', 'self.tau'], {}), '(self.network_params[i], self.tau)\n', (14093, 14127), True, 'import tensorflow as tf\n'), ((14186, 14244), 'tensorflow.multiply', 'tf.multiply', (['self.target_network_params[i]', '(1.0 - self.tau)'], {}), '(self.target_network_params[i], 1.0 - self.tau)\n', (14197, 14244), True, 'import tensorflow as tf\n'), ((58600, 58620), 'numpy.zeros', 'np.zeros', (['nb_actions'], {}), '(nb_actions)\n', (58608, 58620), True, 'import numpy as np\n'), ((58644, 58663), 'numpy.ones', 'np.ones', (['nb_actions'], {}), '(nb_actions)\n', (58651, 58663), True, 'import numpy as np\n'), ((58849, 58869), 'numpy.zeros', 'np.zeros', (['nb_actions'], {}), '(nb_actions)\n', (58857, 58869), True, 'import numpy as np\n'), ((58893, 58912), 'numpy.ones', 'np.ones', (['nb_actions'], {}), '(nb_actions)\n', (58900, 58912), True, 'import numpy as np\n')] |
import sys
# GFX imports
#
from glfw import *
import pygloo
from pygloo import *
# Math
#
from math import *
import random
import numpy as np
from geometry import Geometry, mat4, _flatten_list
gl = None
test_model = None
model_distance = 10
model_rotate_x = 0
model_rotate_y = 0
mouse_xpos = 0
mouse_ypos = 0
mouse_down_xpos = 0
mouse_down_ypos = 0
class Action:
none, select, add_select, camera, translate, rotate = range(6)
user_action = Action.none
def on_key(window, key, scancode, action, mods):
global test_model
if action is GLFW_PRESS:
if key is GLFW_KEY_ESCAPE:
glfwSetWindowShouldClose(window, 1)
if key is GLFW_KEY_C:
test_model.constrained = list(set(test_model.constrained + test_model.selected))
if key is GLFW_KEY_X:
test_model.constrained = [i for i in test_model.constrained if i not in test_model.selected]
def on_mouse(window, button, action, mods):
global user_action
global test_model
global mouse_xpos
global mouse_ypos
global mouse_down_xpos
global mouse_down_ypos
if button is GLFW_MOUSE_BUTTON_LEFT:
if (user_action in [Action.select, Action.add_select, Action.translate]) and action is GLFW_RELEASE:
if user_action in [Action.select, Action.add_select]:
# do the selection
w, h = glfwGetFramebufferSize(window)
x1 = (2*mouse_xpos/w)-1
x2 = (2*mouse_down_xpos/w)-1
y1 = -((2*mouse_ypos/h)-1)
y2 = -((2*mouse_down_ypos/h)-1)
xmin = min(x1, x2)
ymin = min(y1, y2)
xmax = max(x1, x2)
ymax = max(y1, y2)
screen_mat = np.dot(get_projmatrix(w, h), get_viewmatrix())
screen_vert = [[v[0]/v[3], v[1]/v[3], v[2]/v[3]] for v in
[np.dot(screen_mat, [v[0], v[1], v[2], 1.0]) for v in test_model.verts]]
screen_select = [i for i in range(len(screen_vert)) if
screen_vert[i][0] > xmin and screen_vert[i][0] < xmax and
screen_vert[i][1] > ymin and screen_vert[i][1] < ymax]
if user_action is Action.select:
test_model.selected = screen_select
else:
test_model.selected = list(set(test_model.selected + screen_select))
user_action = Action.none
elif user_action is Action.none and action is GLFW_PRESS:
if glfwGetKey(window, GLFW_KEY_LEFT_CONTROL) is GLFW_PRESS:
user_action = Action.translate
else:
user_action = Action.add_select if glfwGetKey(window, GLFW_KEY_LEFT_SHIFT) is GLFW_PRESS else Action.select
mouse_down_xpos = mouse_xpos
mouse_down_ypos = mouse_ypos
elif button is GLFW_MOUSE_BUTTON_RIGHT:
if (user_action in [Action.camera, Action.rotate]) and action is GLFW_RELEASE:
user_action = Action.none
elif user_action is Action.none and action is GLFW_PRESS:
if glfwGetKey(window, GLFW_KEY_LEFT_CONTROL) is GLFW_PRESS:
user_action = Action.rotate
else:
user_action = Action.camera
def on_scroll(window, xoffset, yoffset):
global model_distance
model_distance = max(model_distance * (1.0 - (0.1 * yoffset)), 0)
def on_mouse_move(window, xpos, ypos):
global model_rotate_x
global model_rotate_y
global mouse_xpos
global mouse_ypos
global user_action
if user_action is Action.select:
pass
elif user_action is Action.camera:
model_rotate_y += pi*(mouse_xpos - xpos)/180
model_rotate_x += pi*(mouse_ypos - ypos)/180
model_rotate_x = min(max(model_rotate_x, -pi/2),pi/2)
elif user_action is Action.translate:
pass
elif user_action is Action.rotate:
pass
mouse_xpos = xpos
mouse_ypos = ypos
def render(w,h):
global gl
global test_model
global model_rotate_x
global model_rotate_y
global user_action
# render model
test_model.update(gl)
test_model.render(gl, get_viewmatrix(), get_projmatrix(w,h), wireframe=True)
# Draw the seelection box
if user_action in [Action.select, Action.add_select]:
gl.glUseProgram(Geometry.flat_shader)
i = pygloo.c_array(GLfloat, _flatten_list(mat4.identity()))
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.flat_shader, "modelViewMatrix"), 1, True, i)
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.flat_shader, "projectionMatrix"), 1, True, i)
gl.glUniform3fv(gl.glGetUniformLocation(Geometry.flat_shader, "color"), 1, pygloo.c_array(GLfloat, [0.0, 0.0, 0.0]))
global mouse_xpos
global mouse_ypos
global mouse_down_xpos
global mouse_down_ypos
x1 = (2*mouse_xpos/w)-1
x2 = (2*mouse_down_xpos/w)-1
y1 = -((2*mouse_ypos/h)-1)
y2 = -((2*mouse_down_ypos/h)-1)
gl.glColor3f(0.1, 0.1, 0.1);
gl.glBegin(GL_LINE_LOOP)
gl.glVertex3f( x1, y1, 0.0)
gl.glVertex3f( x1, y2, 0.0)
gl.glVertex3f( x2, y2, 0.0)
gl.glVertex3f( x2, y1, 0.0)
gl.glEnd()
def get_viewmatrix():
camera = np.dot(mat4.rotateY(model_rotate_y), np.dot(mat4.rotateX(model_rotate_x), mat4.translate(0,0,model_distance)))
return np.linalg.inv(camera)
def get_projmatrix(w, h):
zfar = 1000
znear = 0.1
return mat4.perspectiveProjection(pi / 3, float(w)/h, znear, zfar)
def main():
global gl
global test_model
# Initialize the library
if not glfwInit():
sys.exit()
# Initilize GL
gl = pygloo.init()
if not gl:
sys.exit()
# Create a windowed mode window and its OpenGL context
window = glfwCreateWindow(640, 480, "Hello World", None, None)
if not window:
glfwTerminate()
sys.exit()
# Make the window's context current
glfwMakeContextCurrent(window)
# Install a input handlers
glfwSetKeyCallback(window, on_key)
glfwSetMouseButtonCallback(window, on_mouse)
glfwSetCursorPosCallback(window, on_mouse_move)
glfwSetScrollCallback(window, on_scroll)
# Load an obj
#
test_model = Geometry.from_OBJ(gl, "assets/sphere.obj")
# Loop until the user closes the window
while not glfwWindowShouldClose(window):
# Render
width, height = glfwGetFramebufferSize(window)
gl.glViewport(0, 0, width, height)
gl.glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
gl.glClearColor(1.0, 1.0, 1.0, 1.0) # white
gl.glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
gl.glEnable(GL_DEPTH_TEST);
# gl.glDepthFunc(GL_LESS);
gl.glDepthFunc(GL_LEQUAL);
# Render
#
render(width, height)
# Poll for and process events
glfwPollEvents()
# Swap front and back buffers
glfwSwapBuffers(window)
glfwTerminate()
if __name__ == '__main__':
main()
| [
"geometry.mat4.identity",
"pygloo.init",
"geometry.Geometry.from_OBJ",
"pygloo.c_array",
"geometry.mat4.rotateX",
"geometry.mat4.rotateY",
"numpy.linalg.inv",
"geometry.mat4.translate",
"numpy.dot",
"sys.exit"
] | [((4736, 4757), 'numpy.linalg.inv', 'np.linalg.inv', (['camera'], {}), '(camera)\n', (4749, 4757), True, 'import numpy as np\n'), ((5009, 5022), 'pygloo.init', 'pygloo.init', ([], {}), '()\n', (5020, 5022), False, 'import pygloo\n'), ((5524, 5566), 'geometry.Geometry.from_OBJ', 'Geometry.from_OBJ', (['gl', '"""assets/sphere.obj"""'], {}), "(gl, 'assets/sphere.obj')\n", (5541, 5566), False, 'from geometry import Geometry, mat4, _flatten_list\n'), ((4624, 4652), 'geometry.mat4.rotateY', 'mat4.rotateY', (['model_rotate_y'], {}), '(model_rotate_y)\n', (4636, 4652), False, 'from geometry import Geometry, mat4, _flatten_list\n'), ((4975, 4985), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4983, 4985), False, 'import sys\n'), ((5037, 5047), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5045, 5047), False, 'import sys\n'), ((5206, 5216), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5214, 5216), False, 'import sys\n'), ((4136, 4176), 'pygloo.c_array', 'pygloo.c_array', (['GLfloat', '[0.0, 0.0, 0.0]'], {}), '(GLfloat, [0.0, 0.0, 0.0])\n', (4150, 4176), False, 'import pygloo\n'), ((4661, 4689), 'geometry.mat4.rotateX', 'mat4.rotateX', (['model_rotate_x'], {}), '(model_rotate_x)\n', (4673, 4689), False, 'from geometry import Geometry, mat4, _flatten_list\n'), ((4691, 4727), 'geometry.mat4.translate', 'mat4.translate', (['(0)', '(0)', 'model_distance'], {}), '(0, 0, model_distance)\n', (4705, 4727), False, 'from geometry import Geometry, mat4, _flatten_list\n'), ((3836, 3851), 'geometry.mat4.identity', 'mat4.identity', ([], {}), '()\n', (3849, 3851), False, 'from geometry import Geometry, mat4, _flatten_list\n'), ((1653, 1696), 'numpy.dot', 'np.dot', (['screen_mat', '[v[0], v[1], v[2], 1.0]'], {}), '(screen_mat, [v[0], v[1], v[2], 1.0])\n', (1659, 1696), True, 'import numpy as np\n')] |
# Copyright (c) 2019 <NAME> <<EMAIL>>
# -*- coding: utf-8 -*-
"""
This standalone module is intended to launch independent of the main brain application with the purpose of reading the
contents of connectome and visualizing various aspects.
"""
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import os
import json
import time
import random
import numpy as np
global connectome_file_path
try:
# connectome_file_path = sys.argv[1]
connectome_file_path = './connectome_4/'
if connectome_file_path:
print("Connectome path is:", connectome_file_path)
except IndexError or NameError:
print("Error occurred while setting the connectome path")
with open(connectome_file_path + 'genome_tmp.json', "r") as genome_file:
genome_data = json.load(genome_file)
genome = genome_data
blueprint = genome["blueprint"]
cortical_list = []
for key in blueprint:
cortical_list.append(key)
def load_brain():
global connectome_file_path
brain = {}
for item in cortical_list:
if os.path.isfile(connectome_file_path + item + '.json'):
with open(connectome_file_path + item + '.json', "r") as data_file:
data = json.load(data_file)
brain[item] = data
return brain
class Visualizer(object):
def __init__(self):
self.traces = dict()
self.app = QtGui.QApplication(sys.argv)
self.w = gl.GLViewWidget()
self.w.opts['distance'] = 40
self.w.setWindowTitle('pyqtgraph example: GLLinePlotItem')
self.w.setGeometry(0, 110, 1920, 1080)
self.w.show()
# create the background grids
gx = gl.GLGridItem()
gx.rotate(90, 0, 1, 0)
gx.translate(-10, 0, 0)
self.w.addItem(gx)
gy = gl.GLGridItem()
gy.rotate(90, 1, 0, 0)
gy.translate(0, -10, 0)
self.w.addItem(gy)
gz = gl.GLGridItem()
gz.translate(0, 0, -10)
self.w.addItem(gz)
# self.n = 1
# self.m = 1000
# self.y = np.linspace(-10, 10, self.n)
# self.x = np.linspace(-10, 10, self.m)
# self.phase = 0
#
# for i in range(self.n):
# yi = np.array([self.y[i]] * self.m)
# d = np.sqrt(self.x ** 2 + yi ** 2)
# z = 10 * np.cos(d + self.phase) / (d + 1)
# pts = np.vstack([self.x, yi, z]).transpose()
# self.traces[i] = gl.GLLinePlotItem(pos=pts, color=pg.glColor(
# (i, self.n * 1.3)), width=(i + 1) / 10, antialias=True)
# self.w.addItem(self.traces[i])
def start(self):
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
def set_plotdata(self, name, points, color, width):
self.traces[name].setData(pos=points, color=color, width=width)
# def update(self, pos):
# for i in range(self.n):
# # yi = np.array([self.y[i]] * self.m)
# # d = np.sqrt(self.x ** 2 + yi ** 2)
# # z = 10 * np.cos(d + self.phase) / (d + 1)
# # pts = np.vstack([self.x, yi, z]).transpose()
# self.set_plotdata(
# name=i, points=pos,
# color=pg.glColor((i, self.n * 1.3)),
# width=(i + 1) / 10
# )
# self.phase -= .003
def animation(self, pos):
timer = QtCore.QTimer()
timer.timeout.connect(self.set_plotdata(
name=0, points=pos,
color=pg.glColor((1, 1.3)),
width=0.1
))
timer.start(20)
self.start()
def connectome_visualizer(cortical_area, neuron_show=False, neighbor_show=False, threshold=0):
"""Visualizes the Neurons in the connectome"""
print('1')
cortical_file_path = connectome_file_path + cortical_area + '.json'
latest_modification_date = os.path.getmtime(cortical_file_path)
print('2')
brain = load_brain()
neuron_locations = []
for key in brain[cortical_area]:
location_data = brain[cortical_area][key]["location"]
location_data.append(brain[cortical_area][key]["cumulative_fire_count"])
neuron_locations.append(location_data)
print('3')
print('4')
color = ["r", "b", "g", "c", "m", "y", "k", "w"]
# todo: Figure how to determine the delta between previous data-set and new one to solve cumulative plot issue
plot_data = []
while 1 == 1:
new_modification_date = os.path.getmtime(cortical_file_path)
print('5')
if latest_modification_date != new_modification_date:
print('6')
print(new_modification_date)
random_color = color[random.randrange(0, len(color))]
latest_modification_date = new_modification_date
old_brain = brain
previous_plot_data = plot_data
brain = load_brain()
# Displays the Axon-Dendrite connections when True is set
if neighbor_show:
data = brain[cortical_area]
# todo: need to compile a matrix with all source and destinations first before plotting them all
plot_data = []
for neuron_id in data:
if data[neuron_id]["neighbors"].keys():
source_location = data[neuron_id]["location"]
for subkey in data[neuron_id]["neighbors"]:
if (data[neuron_id]['neighbors'][subkey]['cortical_area'] == cortical_area) and (
data[neuron_id]['neighbors'][subkey]['postsynaptic_current'] >= threshold):
destination_location = data[subkey]["location"]
plot_data.append(source_location)
plot_data.append(destination_location)
# print(plot_data)
# todo: build the delta between previous plot data and new plot data
plot_delta = []
for item in plot_data:
if item not in previous_plot_data:
plot_delta.append(item)
pos = np.array(plot_delta)
if pos != []:
print("POS:\n", pos)
v.animation(pos)
time.sleep(5)
if __name__ == '__main__':
v = Visualizer()
# connectome_visualizer(sys.argv[2], neuron_show=False, neighbor_show=True)
connectome_visualizer('vision_memory', neighbor_show=True, neuron_show=False)
# connectome_visualizer_xxx(sys.argv[2], sys.argv[3], neuron_show=False, neighbor_show=True)
# if sys.flags.interactive != 1:
# app.run()
| [
"pyqtgraph.glColor",
"json.load",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"pyqtgraph.opengl.GLGridItem",
"time.sleep",
"os.path.isfile",
"pyqtgraph.Qt.QtCore.QTimer",
"numpy.array",
"os.path.getmtime",
"pyqtgraph.opengl.GLViewWidget",
"pyqtgraph.Qt.QtGui.QApplication"
] | [((802, 824), 'json.load', 'json.load', (['genome_file'], {}), '(genome_file)\n', (811, 824), False, 'import json\n'), ((3940, 3976), 'os.path.getmtime', 'os.path.getmtime', (['cortical_file_path'], {}), '(cortical_file_path)\n', (3956, 3976), False, 'import os\n'), ((1062, 1115), 'os.path.isfile', 'os.path.isfile', (["(connectome_file_path + item + '.json')"], {}), "(connectome_file_path + item + '.json')\n", (1076, 1115), False, 'import os\n'), ((1393, 1421), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1411, 1421), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((1439, 1456), 'pyqtgraph.opengl.GLViewWidget', 'gl.GLViewWidget', ([], {}), '()\n', (1454, 1456), True, 'import pyqtgraph.opengl as gl\n'), ((1682, 1697), 'pyqtgraph.opengl.GLGridItem', 'gl.GLGridItem', ([], {}), '()\n', (1695, 1697), True, 'import pyqtgraph.opengl as gl\n'), ((1801, 1816), 'pyqtgraph.opengl.GLGridItem', 'gl.GLGridItem', ([], {}), '()\n', (1814, 1816), True, 'import pyqtgraph.opengl as gl\n'), ((1920, 1935), 'pyqtgraph.opengl.GLGridItem', 'gl.GLGridItem', ([], {}), '()\n', (1933, 1935), True, 'import pyqtgraph.opengl as gl\n'), ((3441, 3456), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (3454, 3456), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((4542, 4578), 'os.path.getmtime', 'os.path.getmtime', (['cortical_file_path'], {}), '(cortical_file_path)\n', (4558, 4578), False, 'import os\n'), ((6367, 6380), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6377, 6380), False, 'import time\n'), ((6241, 6261), 'numpy.array', 'np.array', (['plot_delta'], {}), '(plot_delta)\n', (6249, 6261), True, 'import numpy as np\n'), ((1220, 1240), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1229, 1240), False, 'import json\n'), ((2729, 2758), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (2756, 2758), False, 'from pyqtgraph.Qt import QtCore, QtGui\n'), ((3564, 3584), 'pyqtgraph.glColor', 'pg.glColor', (['(1, 1.3)'], {}), '((1, 1.3))\n', (3574, 3584), True, 'import pyqtgraph as pg\n')] |
import os
import cv2
import numpy as np
# import printj
# from annotation_utils.coco.structs import COCO_Dataset
# from common_utils.common_types.segmentation import Segmentation
from tqdm import tqdm
def merge_categories(json_path :str, output_json_path :str, merge_from :list,
merge_to :int = None):
"""# merge categories
- json_path :str = input json file path,
- output_json_path :str = output json file path,
- merge_from :list = list of categories id to merge/combine into one category,
- merge_to :int = category id to merge to, default is the first elemnt of 'merge_from'
"""
if merge_to is None:
merge_to = merge_from[0]
coco_dataset = COCO_Dataset.load_from_path(
json_path=json_path,
check_paths=False,
strict=False,
)
pbar = tqdm(coco_dataset.images, colour='#44aa44')
for image in pbar:
pbar.set_description("Combining Categories")
pbar.set_postfix({'file_name': image.file_name})
mask = np.zeros((image.width, image.height), np.uint8)
for ann in coco_dataset.annotations:
if ann.image_id == image.id:
if ann.category_id in merge_from:
seg = ann.segmentation
contours = seg.to_contour()
mask0 = np.zeros((image.width, image.height), np.uint8)
cv2.drawContours(mask0, contours, -1, (255, 255, 255), -1)
mask = mask + mask0
# if show_image(mask):
# return
color_contours, _ = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
seg = Segmentation.from_contour(
contour_list=color_contours, exclude_invalid_polygons=True)
for ann in coco_dataset.annotations:
if ann.image_id == image.id:
if ann.category_id == merge_to:
ann.segmentation = seg
ann.bbox = seg.to_bbox()
coco_dataset.categories.remove(
[cat_id for cat_id in merge_from if cat_id is not merge_to])
ouput_dir = os.path.abspath(f'{output_json_path}/..')
if not os.path.exists(ouput_dir):
os.makedirs(ouput_dir)
coco_dataset.save_to_path(output_json_path, overwrite=True)
if __name__ == "__main__":
json_path = f'/home/jitesh/3d/data/coco_data/bolt/bp8_0/img/coco_annotations.json'
output_json_path = os.path.abspath(
f'{json_path}/../../json') + '/bolt.json'
merge_from = [0, 1, 2, 3]
merge_to = 0
merge_categories(json_path, output_json_path, merge_from, merge_to)
| [
"tqdm.tqdm",
"os.path.abspath",
"os.makedirs",
"numpy.zeros",
"os.path.exists",
"cv2.drawContours",
"cv2.findContours"
] | [((823, 866), 'tqdm.tqdm', 'tqdm', (['coco_dataset.images'], {'colour': '"""#44aa44"""'}), "(coco_dataset.images, colour='#44aa44')\n", (827, 866), False, 'from tqdm import tqdm\n'), ((2127, 2168), 'os.path.abspath', 'os.path.abspath', (['f"""{output_json_path}/.."""'], {}), "(f'{output_json_path}/..')\n", (2142, 2168), False, 'import os\n'), ((1015, 1062), 'numpy.zeros', 'np.zeros', (['(image.width, image.height)', 'np.uint8'], {}), '((image.width, image.height), np.uint8)\n', (1023, 1062), True, 'import numpy as np\n'), ((1590, 1656), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1606, 1656), False, 'import cv2\n'), ((2180, 2205), 'os.path.exists', 'os.path.exists', (['ouput_dir'], {}), '(ouput_dir)\n', (2194, 2205), False, 'import os\n'), ((2215, 2237), 'os.makedirs', 'os.makedirs', (['ouput_dir'], {}), '(ouput_dir)\n', (2226, 2237), False, 'import os\n'), ((2442, 2484), 'os.path.abspath', 'os.path.abspath', (['f"""{json_path}/../../json"""'], {}), "(f'{json_path}/../../json')\n", (2457, 2484), False, 'import os\n'), ((1318, 1365), 'numpy.zeros', 'np.zeros', (['(image.width, image.height)', 'np.uint8'], {}), '((image.width, image.height), np.uint8)\n', (1326, 1365), True, 'import numpy as np\n'), ((1386, 1444), 'cv2.drawContours', 'cv2.drawContours', (['mask0', 'contours', '(-1)', '(255, 255, 255)', '(-1)'], {}), '(mask0, contours, -1, (255, 255, 255), -1)\n', (1402, 1444), False, 'import cv2\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""DSCNN export."""
import argparse
import numpy as np
from mindspore import Tensor
from mindspore.train.serialization import export
from src.config import eval_config
from src.ds_cnn import DSCNN
from src.models import load_ckpt
parser = argparse.ArgumentParser()
args, model_settings = eval_config(parser)
network = DSCNN(model_settings, args.model_size_info)
load_ckpt(network, args.model_dir, False)
x = np.random.uniform(0.0, 1.0, size=[1, 1, model_settings['spectrogram_length'],
model_settings['dct_coefficient_count']]).astype(np.float32)
export(network, Tensor(x), file_name=args.model_dir.replace('.ckpt', '.air'), file_format='AIR')
| [
"src.models.load_ckpt",
"src.ds_cnn.DSCNN",
"numpy.random.uniform",
"argparse.ArgumentParser",
"mindspore.Tensor",
"src.config.eval_config"
] | [((908, 933), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (931, 933), False, 'import argparse\n'), ((958, 977), 'src.config.eval_config', 'eval_config', (['parser'], {}), '(parser)\n', (969, 977), False, 'from src.config import eval_config\n'), ((988, 1031), 'src.ds_cnn.DSCNN', 'DSCNN', (['model_settings', 'args.model_size_info'], {}), '(model_settings, args.model_size_info)\n', (993, 1031), False, 'from src.ds_cnn import DSCNN\n'), ((1032, 1073), 'src.models.load_ckpt', 'load_ckpt', (['network', 'args.model_dir', '(False)'], {}), '(network, args.model_dir, False)\n', (1041, 1073), False, 'from src.models import load_ckpt\n'), ((1271, 1280), 'mindspore.Tensor', 'Tensor', (['x'], {}), '(x)\n', (1277, 1280), False, 'from mindspore import Tensor\n'), ((1078, 1202), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': "[1, 1, model_settings['spectrogram_length'], model_settings[\n 'dct_coefficient_count']]"}), "(0.0, 1.0, size=[1, 1, model_settings['spectrogram_length'\n ], model_settings['dct_coefficient_count']])\n", (1095, 1202), True, 'import numpy as np\n')] |
import numpy as np
import sklearn
"""
input: NxTxFxD tensor
output: NxCxFxT tensor
"""
def from_embedding(embedding, n_channels, n_jobs=-1):
embedding_dim = embedding.shape[-1]
labels = sklearn.cluster.KMeans(
n_clusters=n_channels, n_jobs=n_jobs
).fit(
embedding.reshape(embedding.size // embedding_dim, embedding_dim)
).labels_
mask = np.eye(n_channels)[labels]\
.reshape(list(embedding.shape[:-1])+[n_channels])\
.transpose((0, 3, 2, 1))
return mask
"""
input: NxTxFxC tensor
output: NxCxFxT tensor
"""
def from_mask(mask):
return mask.transpose((0, 3, 2, 1))
| [
"sklearn.cluster.KMeans",
"numpy.eye"
] | [((196, 256), 'sklearn.cluster.KMeans', 'sklearn.cluster.KMeans', ([], {'n_clusters': 'n_channels', 'n_jobs': 'n_jobs'}), '(n_clusters=n_channels, n_jobs=n_jobs)\n', (218, 256), False, 'import sklearn\n'), ((375, 393), 'numpy.eye', 'np.eye', (['n_channels'], {}), '(n_channels)\n', (381, 393), True, 'import numpy as np\n')] |
# 12. Create a four dimensions array get sum over the last two axis at once.
import numpy as np
np_array = np.random.random(16).reshape(2, 2, 2, 2)
print(np_array)
np.sum(np_array, axis=0)
| [
"numpy.random.random",
"numpy.sum"
] | [((166, 190), 'numpy.sum', 'np.sum', (['np_array'], {'axis': '(0)'}), '(np_array, axis=0)\n', (172, 190), True, 'import numpy as np\n'), ((109, 129), 'numpy.random.random', 'np.random.random', (['(16)'], {}), '(16)\n', (125, 129), True, 'import numpy as np\n')] |
import json
import warnings
from collections import defaultdict
from io import StringIO
import numpy as np
from .base import (
UniformCountScoringModelBase,
DecayRateCountScoringModelBase,
LogarithmicCountScoringModelBase,
MassScalingCountScoringModel,
ScoringFeatureBase)
class ChargeStateDistributionScoringModelBase(ScoringFeatureBase):
feature_type = "charge_count"
def get_state_count(self, chromatogram):
return chromatogram.n_charge_states
def get_states(self, chromatogram):
return chromatogram.charge_states
def get_signal_proportions(self, chromatogram):
proportions = {}
states = self.get_states(chromatogram)
rest = chromatogram
total = 0
for state in states:
part, rest = rest.bisect_charge(state)
proportions[state] = part.total_signal
total += part.total_signal
for k in proportions:
proportions[k] /= total
# Anything left in `rest` is from a charge state with too
# little support to be used
return proportions
def reject(self, score_components, solution):
score = score_components[self.feature_type]
return score < 0.05
_CHARGE_MODEL = ChargeStateDistributionScoringModelBase
class UniformChargeStateScoringModel(
_CHARGE_MODEL, UniformCountScoringModelBase):
pass
class DecayRateChargeStateScoringModel(
_CHARGE_MODEL, DecayRateCountScoringModelBase):
pass
class LogarithmicChargeStateScoringModel(
_CHARGE_MODEL, LogarithmicCountScoringModelBase):
pass
def decay(x, step=0.4, rate=1.5):
v = 0
for i in range(x):
v += (step / (i + rate))
return v
def ones(x):
return (x - (np.floor(x / 10.) * 10))
def neighborhood_of(x, scale=100.):
n = x / scale
up = ones(n) > 5
if up:
neighborhood = (np.floor(n / 10.) + 1) * 10
else:
neighborhood = (np.floor(n / 10.) + 1) * 10
return neighborhood * scale
uniform_model = UniformChargeStateScoringModel()
decay_model = DecayRateChargeStateScoringModel()
class MassScalingChargeStateScoringModel(_CHARGE_MODEL, MassScalingCountScoringModel):
def __init__(self, table, neighborhood_width=100., fit_information=None):
# self.table = table
# self.neighborhood_width = neighborhood_width
_CHARGE_MODEL.__init__(self)
MassScalingCountScoringModel.__init__(self, table, neighborhood_width)
self.fit_information = fit_information or {}
def handle_missing_neighborhood(self, chromatogram, neighborhood, *args, **kwargs):
warnings.warn(
("%f was not found for this charge state "
"scoring model. Defaulting to uniform model") % neighborhood)
return uniform_model.score(chromatogram, *args, **kwargs)
def handle_missing_bin(self, chromatogram, bins, key, neighborhood, *args, **kwargs):
warnings.warn("%d not found for this mass range (%f). Using bin average (%r)" % (
key, neighborhood, chromatogram.charge_states))
return sum(bins.values()) / float(len(bins))
def transform_state(self, state):
return abs(state)
@classmethod
def fit(cls, observations, missing=0.01, neighborhood_width=100.,
ignore_singly_charged=False):
bins = defaultdict(lambda: defaultdict(float))
fit_info = {
"ignore_singly_charged": ignore_singly_charged,
"missing": missing,
}
self = cls({}, neighborhood_width=neighborhood_width)
for sol in observations:
neighborhood = self.neighborhood_of(sol.neutral_mass)
for c, val in self.get_signal_proportions(sol).items():
c = self.transform_state(c)
if ignore_singly_charged and c == 1:
continue
bins[neighborhood][c] += 1
model_table = {}
all_states = set()
for level in bins.values():
all_states.update(level.keys())
all_states.add(1 * (min(all_states) / abs(min(all_states))))
for neighborhood, counts in bins.items():
for c in all_states:
counts[c] += missing
total = sum(counts.values())
entry = {k: v / total for k, v in counts.items()}
model_table[neighborhood] = entry
return cls(model_table, neighborhood_width, fit_information=fit_info)
def dump(self, file_obj, include_fit_information=True):
json.dump(
{
"neighborhood_width": self.neighborhood_width,
"table": self.table,
"fit_information": self.fit_information if include_fit_information else {}
},
file_obj, indent=4, sort_keys=True)
@classmethod
def load(cls, file_obj):
data = json.load(file_obj)
table = data.pop("table")
width = float(data.pop("neighborhood_width"))
def numeric_keys(table, dtype=float, convert_value=lambda x: x):
return {abs(dtype(k)): convert_value(v) for k, v in table.items()}
table = numeric_keys(table, convert_value=lambda x: numeric_keys(x, int))
return cls(table=table, neighborhood_width=width)
def clone(self):
text_buffer = StringIO()
self.dump(text_buffer)
text_buffer.seek(0)
return self.load(text_buffer)
class WeightedMassScalingChargeStateScoringModel(MassScalingChargeStateScoringModel):
@classmethod
def fit(cls, observations, missing=0.01, neighborhood_width=100.,
ignore_singly_charged=False, smooth=0):
bins = defaultdict(lambda: defaultdict(float))
fit_info = {
"ignore_singly_charged": ignore_singly_charged,
"missing": missing,
"smooth": smooth,
"track": defaultdict(lambda: defaultdict(list)),
"count": defaultdict(int)
}
self = cls({}, neighborhood_width=neighborhood_width)
for sol in observations:
neighborhood = self.neighborhood_of(sol.neutral_mass)
fit_info['count'][neighborhood] += 1
for c, val in self.get_signal_proportions(sol).items():
c = self.transform_state(c)
if ignore_singly_charged and c == 1:
continue
fit_info['track'][neighborhood][c].append(val)
bins[neighborhood][c] += val
model_table = {}
all_states = set()
for level in bins.values():
all_states.update(level.keys())
all_states.add(1 * (min(all_states) / abs(min(all_states))))
for neighborhood, counts in bins.items():
largest_charge = None
largest_charge_total = 0
for c in all_states:
counts[c] += missing
if counts[c] > largest_charge_total:
largest_charge = c
largest_charge_total = counts[c]
if smooth > 0:
smooth_shift = largest_charge_total * smooth
for c in all_states:
if c != largest_charge and counts[c] > missing:
counts[c] += smooth_shift
total = sum(counts.values())
entry = {k: v / total for k, v in counts.items()}
model_table[neighborhood] = entry
return cls(model_table, neighborhood_width, fit_information=fit_info)
| [
"json.dump",
"io.StringIO",
"json.load",
"numpy.floor",
"collections.defaultdict",
"warnings.warn"
] | [((2641, 2764), 'warnings.warn', 'warnings.warn', (["('%f was not found for this charge state scoring model. Defaulting to uniform model'\n % neighborhood)"], {}), "(\n '%f was not found for this charge state scoring model. Defaulting to uniform model'\n % neighborhood)\n", (2654, 2764), False, 'import warnings\n'), ((2951, 3088), 'warnings.warn', 'warnings.warn', (["('%d not found for this mass range (%f). Using bin average (%r)' % (key,\n neighborhood, chromatogram.charge_states))"], {}), "(\n '%d not found for this mass range (%f). Using bin average (%r)' % (key,\n neighborhood, chromatogram.charge_states))\n", (2964, 3088), False, 'import warnings\n'), ((4542, 4742), 'json.dump', 'json.dump', (["{'neighborhood_width': self.neighborhood_width, 'table': self.table,\n 'fit_information': self.fit_information if include_fit_information else {}}", 'file_obj'], {'indent': '(4)', 'sort_keys': '(True)'}), "({'neighborhood_width': self.neighborhood_width, 'table': self.\n table, 'fit_information': self.fit_information if\n include_fit_information else {}}, file_obj, indent=4, sort_keys=True)\n", (4551, 4742), False, 'import json\n'), ((4883, 4902), 'json.load', 'json.load', (['file_obj'], {}), '(file_obj)\n', (4892, 4902), False, 'import json\n'), ((5330, 5340), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5338, 5340), False, 'from io import StringIO\n'), ((1765, 1783), 'numpy.floor', 'np.floor', (['(x / 10.0)'], {}), '(x / 10.0)\n', (1773, 1783), True, 'import numpy as np\n'), ((5946, 5962), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5957, 5962), False, 'from collections import defaultdict\n'), ((1902, 1920), 'numpy.floor', 'np.floor', (['(n / 10.0)'], {}), '(n / 10.0)\n', (1910, 1920), True, 'import numpy as np\n'), ((1964, 1982), 'numpy.floor', 'np.floor', (['(n / 10.0)'], {}), '(n / 10.0)\n', (1972, 1982), True, 'import numpy as np\n'), ((3376, 3394), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (3387, 3394), False, 'from collections import defaultdict\n'), ((5700, 5718), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (5711, 5718), False, 'from collections import defaultdict\n'), ((5905, 5922), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5916, 5922), False, 'from collections import defaultdict\n')] |
from functools import partial
import numpy as np
def _generate_jitted_eigsh_lanczos(jax):
"""
Helper function to generate jitted lanczos function used
in JaxBackend.eigsh_lanczos. The function `jax_lanczos`
returned by this higher-order function has the following
call signature:
```
eigenvalues, eigenvectors = jax_lanczos(matvec:Callable,
arguments: List[Tensor],
init: Tensor,
ncv: int,
neig: int,
landelta: float,
reortho: bool)
```
`matvec`: A callable implementing the matrix-vector product of a
linear operator. `arguments`: Arguments to `matvec` additional to
an input vector. `matvec` will be called as `matvec(init, *args)`.
`init`: An initial input state to `matvec`.
`ncv`: Number of krylov iterations (i.e. dimension of the Krylov space).
`neig`: Number of eigenvalue-eigenvector pairs to be computed.
`landelta`: Convergence parameter: if the norm of the current Lanczos vector
falls below `landelta`, iteration is stopped.
`reortho`: If `True`, reorthogonalize all krylov vectors at each step.
This should be used if `neig>1`.
Args:
jax: The `jax` module.
Returns:
Callable: A jitted function that does a lanczos iteration.
"""
@partial(jax.jit, static_argnums=(3, 4, 5, 6))
def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):
"""
Jitted lanczos routine.
Args:
matvec: A callable implementing the matrix-vector product of a
linear operator.
arguments: Arguments to `matvec` additional to an input vector.
`matvec` will be called as `matvec(init, *args)`.
init: An initial input state to `matvec`.
ncv: Number of krylov iterations (i.e. dimension of the Krylov space).
neig: Number of eigenvalue-eigenvector pairs to be computed.
landelta: Convergence parameter: if the norm of the current Lanczos vector
falls below `landelta`, iteration is stopped.
reortho: If `True`, reorthogonalize all krylov vectors at each step.
This should be used if `neig>1`.
Returns:
list: Eigen values
list: Eigen values
"""
def body_modified_gram_schmidt(i, vals):
vector, krylov_vectors = vals
v = krylov_vectors[i, :]
vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)
return [vector, krylov_vectors]
def body_lanczos(vals):
current_vector, krylov_vectors, vector_norms = vals[0:3]
diagonal_elements, matvec, args, _ = vals[3:7]
threshold, i, maxiteration = vals[7:]
norm = jax.numpy.linalg.norm(current_vector)
normalized_vector = current_vector / norm
normalized_vector, krylov_vectors = jax.lax.cond(
reortho, True,
lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,
[normalized_vector, krylov_vectors]),
False, lambda x: [normalized_vector, krylov_vectors])
Av = matvec(normalized_vector, *args)
diag_element = jax.numpy.vdot(normalized_vector, Av)
res = jax.numpy.reshape(
jax.numpy.ravel(Av) -
jax.numpy.ravel(normalized_vector) * diag_element -
krylov_vectors[i - 1] * norm, Av.shape)
krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],
jax.numpy.ravel(normalized_vector))
vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],
norm)
diagonal_elements = jax.ops.index_update(diagonal_elements,
jax.ops.index[i - 1],
diag_element)
return [
res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,
norm, threshold, i + 1, maxiteration
]
def cond_fun(vals):
_, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals
def check_thresh(check_vals):
val, thresh = check_vals
return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)
return jax.lax.cond(iteration <= maxiteration, [norm, threshold],
check_thresh, False, lambda x: x)
numel = jax.numpy.prod(init.shape)
krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)
norms = jax.numpy.zeros(ncv, dtype=init.dtype)
diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)
norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)
norms_dtype = np.real(init.dtype).dtype
initvals = [
init, krylov_vecs, norms, diag_elems, matvec, arguments,
norms_dtype.type(1.0), landelta, 1, ncv
]
output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)
final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output
krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],
jax.numpy.ravel(final_state))
A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(
norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)
eigvals, U = jax.numpy.linalg.eigh(A_tridiag)
eigvals = eigvals.astype(A_tridiag.dtype)
def body_vector(i, vals):
krv, unitary, states = vals
dim = unitary.shape[1]
n, m = jax.numpy.divmod(i, dim)
states = jax.ops.index_add(states, jax.ops.index[n, :],
krv[m + 1, :] * unitary[m, n])
return [krv, unitary, states]
state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)
_, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),
body_vector,
[krylov_vecs, U, state_vectors])
return jax.numpy.array(eigvals[0:neig]), [
jax.numpy.reshape(vectors[n, :], init.shape) /
jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)
]
return jax_lanczos
def _generate_arnoldi_factorization(jax):
"""
Helper function to create a jitted arnoldi factorization.
The function returns a function `_arnoldi_fact` which
performs an m-step arnoldi factorization.
`_arnoldi_fact` computes an m-step arnoldi factorization
of an input callable `matvec`, with m = min(`it`,`num_krylov_vecs`).
`_arnoldi_fact` will do at most `num_krylov_vecs` steps.
`_arnoldi_fact` returns arrays `kv` and `H` which satisfy
the Arnoldi recurrence relation
```
matrix @ Vm - Vm @ Hm - fm * em = 0
```
with `matrix` the matrix representation of `matvec` and
`Vm = jax.numpy.transpose(kv[:it, :])`,
`Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)
and `em` a kartesian basis vector of shape `(1, kv.shape[1])`
with `em[0, -1] == 1` and 0 elsewhere.
Note that the caller is responsible for dtype consistency between
the inputs, i.e. dtypes between all input arrays have to match.
Args:
matvec: The matrix vector product. This function has to be wrapped into
`jax.tree_util.Partial`. `matvec` will be called as `matvec(x, *args)`
for an input vector `x`.
args: List of arguments to `matvec`.
v0: Initial state to `matvec`.
krylov_vectors: An array for storing the krylov vectors. The individual
vectors are stored as columns. The shape of `krylov_vecs` has to be
(num_krylov_vecs + 1, np.ravel(v0).shape[0]).
H: Matrix of overlaps. The shape has to be
(num_krylov_vecs + 1,num_krylov_vecs + 1).
start: Integer denoting the start position where the first
produced krylov_vector should be inserted into `krylov_vectors`
num_krylov_vecs: Number of krylov iterations, should be identical to
`krylov_vectors.shape[0] + 1`
eps: Convergence parameter. Iteration is terminated if the norm of a
krylov-vector falls below `eps`.
Returns:
kv: An array of krylov vectors
H: A matrix of overlaps
it: The number of performed iterations.
"""
@jax.jit
def modified_gram_schmidt_step_arnoldi(j, vals):
"""
Single step of a modified gram-schmidt orthogonalization.
Args:
j: Integer value denoting the vector to be orthogonalized.
vals: A list of variables:
`vector`: The current vector to be orthogonalized
to all previous ones
`krylov_vectors`: jax.array of collected krylov vectors
`n`: integer denoting the column-position of the overlap
<`krylov_vector`|`vector`> within `H`.
Returns:
updated vals.
"""
vector, krylov_vectors, n, H = vals
v = krylov_vectors[j, :]
h = jax.numpy.vdot(v, vector)
H = jax.ops.index_update(H, jax.ops.index[j, n], h)
vector = vector - h * jax.numpy.reshape(v, vector.shape)
return [vector, krylov_vectors, n, H]
@partial(jax.jit, static_argnums=(5, 6, 7))
def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,
eps):
"""
Compute an m-step arnoldi factorization of `matvec`, with
m = min(`it`,`num_krylov_vecs`). The factorization will
do at most `num_krylov_vecs` steps. The returned arrays
`kv` and `H` will satisfy the Arnoldi recurrence relation
```
matrix @ Vm - Vm @ Hm - fm * em = 0
```
with `matrix` the matrix representation of `matvec` and
`Vm = jax.numpy.transpose(kv[:it, :])`,
`Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)
and `em` a cartesian basis vector of shape `(1, kv.shape[1])`
with `em[0, -1] == 1` and 0 elsewhere.
Note that the caller is responsible for dtype consistency between
the inputs, i.e. dtypes between all input arrays have to match.
Args:
matvec: The matrix vector product.
args: List of arguments to `matvec`.
v0: Initial state to `matvec`.
krylov_vectors: An array for storing the krylov vectors. The individual
vectors are stored as columns. The shape of `krylov_vecs` has to be
(num_krylov_vecs + 1, np.ravel(v0).shape[0]).
H: Matrix of overlaps. The shape has to be
(num_krylov_vecs + 1,num_krylov_vecs + 1).
start: Integer denoting the start position where the first
produced krylov_vector should be inserted into `krylov_vectors`
num_krylov_vecs: Number of krylov iterations, should be identical to
`krylov_vectors.shape[0] + 1`
eps: Convergence parameter. Iteration is terminated if the norm of a
krylov-vector falls below `eps`.
Returns:
kv: An array of krylov vectors
H: A matrix of overlaps
it: The number of performed iterations.
"""
Z = jax.numpy.linalg.norm(v0)
v = v0 / Z
krylov_vectors = jax.ops.index_update(krylov_vectors,
jax.ops.index[start, :],
jax.numpy.ravel(v))
H = jax.lax.cond(
start > 0, start,
lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,
lambda x: H)
# body of the arnoldi iteration
def body(vals):
krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals
Av = matvec(vector, *args)
initial_vals = [Av, krylov_vectors, i, H]
Av, krylov_vectors, _, H = jax.lax.fori_loop(
0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)
norm = jax.numpy.linalg.norm(Av)
Av /= norm
H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)
krylov_vectors = jax.ops.index_update(krylov_vectors,
jax.ops.index[i + 1, :],
jax.numpy.ravel(Av))
return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]
def cond_fun(vals):
_, _, _, _, norm, threshold, iteration, maxiter = vals
# check if an invariant subspace has been found
def check_thresh(check_vals):
val, thresh = check_vals
return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)
return jax.lax.cond(iteration < maxiter, [norm, threshold], check_thresh,
False, lambda x: x)
norms_dtype = np.real(v0.dtype).dtype
kvfinal, Hfinal, _, _, norm, _, it, _ = jax.lax.while_loop(
cond_fun, body, [
krylov_vectors, H, matvec, v,
norms_dtype.type(1E3), eps, start, num_krylov_vecs
])
return kvfinal, Hfinal, it, norm < eps
return _arnoldi_fact
| [
"functools.partial",
"numpy.real"
] | [((1443, 1488), 'functools.partial', 'partial', (['jax.jit'], {'static_argnums': '(3, 4, 5, 6)'}), '(jax.jit, static_argnums=(3, 4, 5, 6))\n', (1450, 1488), False, 'from functools import partial\n'), ((9025, 9067), 'functools.partial', 'partial', (['jax.jit'], {'static_argnums': '(5, 6, 7)'}), '(jax.jit, static_argnums=(5, 6, 7))\n', (9032, 9067), False, 'from functools import partial\n'), ((4752, 4771), 'numpy.real', 'np.real', (['init.dtype'], {}), '(init.dtype)\n', (4759, 4771), True, 'import numpy as np\n'), ((12416, 12433), 'numpy.real', 'np.real', (['v0.dtype'], {}), '(v0.dtype)\n', (12423, 12433), True, 'import numpy as np\n')] |
import numpy as np
import lunarsky.tests as ltest
from astropy.coordinates import ICRS, GCRS, EarthLocation, AltAz
from astropy.time import Time
from lunarsky import MoonLocation, SkyCoord, LunarTopo, MCMF
# Check that the changes to SkyCoord don't cause unexpected behavior.
def test_skycoord_transforms():
# An EarthLocation object should still get copied over
# under transformations.
eloc = EarthLocation.from_geodetic(0.0, 10.0)
coords = ltest.get_catalog()
altaz = coords.transform_to(AltAz(location=eloc, obstime=Time.now()))
assert altaz.location == eloc
gcrs = altaz.transform_to(GCRS())
assert gcrs.location == eloc
icrs = altaz.transform_to(ICRS())
assert icrs.location == eloc
def test_skycoord_with_lunar_frames():
# Check that defining a SkyCoord with frames
# lunartopo and mcmf works correctly.
Nsrcs = 10
alts = np.random.uniform(0, np.pi / 2, Nsrcs)
azs = np.random.uniform(0, 2 * np.pi, Nsrcs)
t0 = Time.now()
loc = MoonLocation.from_selenodetic(0, 0)
src = SkyCoord(alt=alts, az=azs, unit='rad', frame='lunartopo',
obstime=t0, location=loc)
assert src.location == loc
assert isinstance(src.frame, LunarTopo)
x, y, z = src.transform_to('mcmf').cartesian.xyz
src2 = SkyCoord(x=x, y=y, z=z, frame='mcmf',
obstime=t0, location=loc)
assert isinstance(src2.frame, MCMF)
icrs2 = src2.transform_to('icrs')
icrs1 = src.transform_to('icrs')
assert np.allclose(icrs2.ra.deg, icrs1.ra.deg, atol=1e-5)
assert np.allclose(icrs2.dec.deg, icrs1.dec.deg, atol=1e-5)
def test_earth_and_moon():
# Check that as I do transforms with both Earth and Moon positions,
# the transform graph doesn't break.
Nsrcs = 10
alts = np.random.uniform(0, np.pi / 2, Nsrcs)
azs = np.random.uniform(0, 2 * np.pi, Nsrcs)
t0 = Time.now()
loc = MoonLocation.from_selenodetic(0, 0)
src = SkyCoord(alt=alts, az=azs, unit='rad', frame='lunartopo',
obstime=t0, location=loc)
eloc = EarthLocation.from_geodetic(0, 0)
src2 = SkyCoord(alt=alts, az=azs, unit='rad', frame='altaz',
obstime=t0, location=eloc)
src.transform_to('icrs')
src2.transform_to('icrs')
| [
"astropy.coordinates.EarthLocation.from_geodetic",
"numpy.random.uniform",
"lunarsky.tests.get_catalog",
"lunarsky.MoonLocation.from_selenodetic",
"astropy.coordinates.GCRS",
"astropy.coordinates.ICRS",
"numpy.allclose",
"astropy.time.Time.now",
"lunarsky.SkyCoord"
] | [((414, 452), 'astropy.coordinates.EarthLocation.from_geodetic', 'EarthLocation.from_geodetic', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (441, 452), False, 'from astropy.coordinates import ICRS, GCRS, EarthLocation, AltAz\n'), ((466, 485), 'lunarsky.tests.get_catalog', 'ltest.get_catalog', ([], {}), '()\n', (483, 485), True, 'import lunarsky.tests as ltest\n'), ((901, 939), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(np.pi / 2)', 'Nsrcs'], {}), '(0, np.pi / 2, Nsrcs)\n', (918, 939), True, 'import numpy as np\n'), ((950, 988), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', 'Nsrcs'], {}), '(0, 2 * np.pi, Nsrcs)\n', (967, 988), True, 'import numpy as np\n'), ((998, 1008), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (1006, 1008), False, 'from astropy.time import Time\n'), ((1019, 1054), 'lunarsky.MoonLocation.from_selenodetic', 'MoonLocation.from_selenodetic', (['(0)', '(0)'], {}), '(0, 0)\n', (1048, 1054), False, 'from lunarsky import MoonLocation, SkyCoord, LunarTopo, MCMF\n'), ((1065, 1152), 'lunarsky.SkyCoord', 'SkyCoord', ([], {'alt': 'alts', 'az': 'azs', 'unit': '"""rad"""', 'frame': '"""lunartopo"""', 'obstime': 't0', 'location': 'loc'}), "(alt=alts, az=azs, unit='rad', frame='lunartopo', obstime=t0,\n location=loc)\n", (1073, 1152), False, 'from lunarsky import MoonLocation, SkyCoord, LunarTopo, MCMF\n'), ((1308, 1371), 'lunarsky.SkyCoord', 'SkyCoord', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'frame': '"""mcmf"""', 'obstime': 't0', 'location': 'loc'}), "(x=x, y=y, z=z, frame='mcmf', obstime=t0, location=loc)\n", (1316, 1371), False, 'from lunarsky import MoonLocation, SkyCoord, LunarTopo, MCMF\n'), ((1520, 1571), 'numpy.allclose', 'np.allclose', (['icrs2.ra.deg', 'icrs1.ra.deg'], {'atol': '(1e-05)'}), '(icrs2.ra.deg, icrs1.ra.deg, atol=1e-05)\n', (1531, 1571), True, 'import numpy as np\n'), ((1582, 1635), 'numpy.allclose', 'np.allclose', (['icrs2.dec.deg', 'icrs1.dec.deg'], {'atol': '(1e-05)'}), '(icrs2.dec.deg, icrs1.dec.deg, atol=1e-05)\n', (1593, 1635), True, 'import numpy as np\n'), ((1804, 1842), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(np.pi / 2)', 'Nsrcs'], {}), '(0, np.pi / 2, Nsrcs)\n', (1821, 1842), True, 'import numpy as np\n'), ((1853, 1891), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', 'Nsrcs'], {}), '(0, 2 * np.pi, Nsrcs)\n', (1870, 1891), True, 'import numpy as np\n'), ((1901, 1911), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (1909, 1911), False, 'from astropy.time import Time\n'), ((1922, 1957), 'lunarsky.MoonLocation.from_selenodetic', 'MoonLocation.from_selenodetic', (['(0)', '(0)'], {}), '(0, 0)\n', (1951, 1957), False, 'from lunarsky import MoonLocation, SkyCoord, LunarTopo, MCMF\n'), ((1968, 2055), 'lunarsky.SkyCoord', 'SkyCoord', ([], {'alt': 'alts', 'az': 'azs', 'unit': '"""rad"""', 'frame': '"""lunartopo"""', 'obstime': 't0', 'location': 'loc'}), "(alt=alts, az=azs, unit='rad', frame='lunartopo', obstime=t0,\n location=loc)\n", (1976, 2055), False, 'from lunarsky import MoonLocation, SkyCoord, LunarTopo, MCMF\n'), ((2083, 2116), 'astropy.coordinates.EarthLocation.from_geodetic', 'EarthLocation.from_geodetic', (['(0)', '(0)'], {}), '(0, 0)\n', (2110, 2116), False, 'from astropy.coordinates import ICRS, GCRS, EarthLocation, AltAz\n'), ((2128, 2213), 'lunarsky.SkyCoord', 'SkyCoord', ([], {'alt': 'alts', 'az': 'azs', 'unit': '"""rad"""', 'frame': '"""altaz"""', 'obstime': 't0', 'location': 'eloc'}), "(alt=alts, az=azs, unit='rad', frame='altaz', obstime=t0, location=eloc\n )\n", (2136, 2213), False, 'from lunarsky import MoonLocation, SkyCoord, LunarTopo, MCMF\n'), ((627, 633), 'astropy.coordinates.GCRS', 'GCRS', ([], {}), '()\n', (631, 633), False, 'from astropy.coordinates import ICRS, GCRS, EarthLocation, AltAz\n'), ((700, 706), 'astropy.coordinates.ICRS', 'ICRS', ([], {}), '()\n', (704, 706), False, 'from astropy.coordinates import ICRS, GCRS, EarthLocation, AltAz\n'), ((548, 558), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (556, 558), False, 'from astropy.time import Time\n')] |
from .models import NeuropowerModel
from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder
from crispy_forms.bootstrap import PrependedAppendedText
from crispy_forms.helper import FormHelper
from django.core import exceptions
from django import forms
import numpy as np
class ParameterForm(forms.ModelForm):
class Meta:
model = NeuropowerModel
fields = ['map_url','spmfile','maskfile','ZorT','Exc','Subj','Samples',
'alpha','SmoothEst','Smoothx','Smoothy','Smoothz','Voxx','Voxy','Voxz']
def __init__(self,*args,**kwargs):
self.default_url = kwargs.pop('default_url')
self.err = kwargs.pop('err')
super(ParameterForm,self).__init__(*args,**kwargs)
self.fields['map_url'].widget = forms.URLInput(attrs={'placeholder':self.default_url})
self.fields['map_url'].label = "URL"
self.fields['map_url'].required = False
self.fields['spmfile'].label = "Upload"
self.fields['spmfile'].required = False
#self.fields['maskfile'].label = "Upload a full brain mask or a Region-of-Interest mask."
self.fields['maskfile'].required = False
self.fields['ZorT'].label = "Are the data Z- or T-values?"
self.fields['Exc'].label = "What is the screening threshold, also known as the clusterforming threshold or the excursion threshold (either p-value or z-value units)?"
self.fields['Exc'].required = True
self.fields['Subj'].label = "How many subjects does the group map represent?"
self.fields['Samples'].label = "Is this a one-sample or a two-sample test?"
self.fields['alpha'].label = "At which alpha-level are the statistical tests carried out?"
self.fields['SmoothEst'].label = "Do you want to manually specify the smoothness or estimate from the data? <br> Note though that estimating smoothness on statistical maps leads to <a href='http://www.fil.ion.ucl.ac.uk/spm/doc/papers/sjk_robust.pdf'>biases</a>. It is preferable to manually specify the data."
self.fields['SmoothEst'].widget = forms.RadioSelect()
self.fields['Smoothx'].label = ""
self.fields['Smoothx'].widget = forms.TextInput(attrs={'placeholder':'x'})
self.fields['Smoothx'].required = False
self.fields['Smoothy'].label = ""
self.fields['Smoothy'].widget = forms.TextInput(attrs={'placeholder':'y'})
self.fields['Smoothy'].required = False
self.fields['Smoothz'].label = ""
self.fields['Smoothz'].widget = forms.TextInput(attrs={'placeholder':'z'})
self.fields['Smoothz'].required = False
self.fields['Voxx'].label = ""
self.fields['Voxx'].widget = forms.TextInput(attrs={'placeholder':'x'})
self.fields['Voxx'].required = False
self.fields['Voxy'].label = ""
self.fields['Voxy'].widget = forms.TextInput(attrs={'placeholder':'y'})
self.fields['Voxy'].required = False
self.fields['Voxz'].label = ""
self.fields['Voxz'].widget = forms.TextInput(attrs={'placeholder':'z'})
self.fields['Voxz'].required = False
def clean(self):
cleaned_data = super(ParameterForm,self).clean()
map_url = cleaned_data.get('map_url')
spmfile = cleaned_data.get('spmfile')
maskfile = cleaned_data.get('maskfile')
exc = cleaned_data.get('Exc')
subj = cleaned_data.get("Subj")
alpha = cleaned_data.get("alpha")
smoothest = cleaned_data.get("SmoothEst")
smooth = np.array([cleaned_data.get('Smoothx'),cleaned_data.get('Smoothy'),cleaned_data.get('Smoothz'),cleaned_data.get('Voxx'),cleaned_data.get('Voxy'),cleaned_data.get('Voxz')])
if smoothest == 2:
pass
else:
print(smooth)
if np.sum(np.equal(smooth,None))>0:
raise forms.ValidationError("For manual selection of smoothness, please fill out the smoothness and voxelsize in all three dimensions.")
if self.err == "dim":
raise forms.ValidationError("The selected statistical map and mask do not have the same dimensions.")
if self.err == "median":
raise forms.ValidationError("Are you sure this is a statistical map? The interquartile range is extremely large.")
if self.err == "shape":
raise forms.ValidationError("Are you sure this is a statistical map? Your map has more than 3 dimensions.")
if map_url and not spmfile == None:
raise forms.ValidationError("Please choose: either paste a link to the data or upload your map. Not both.")
if not map_url and spmfile == None:
raise forms.ValidationError("Please tell us where to find the data: either paste a link to the data or upload your map.")
if map_url and spmfile == None:
if not (map_url.endswith('.nii.gz') or map_url.endswith('.nii.gz')):
raise forms.ValidationError("The statistical map has the wrong format: please choose a nifti-file")
if not spmfile == None:
if spmfile.name:
if " " in spmfile.name:
raise forms.ValidationError("The app currently can't handle filenames that have spaces. Please rename the statistical map without spaces.")
if not map_url and not spmfile == None:
if not (spmfile.name.endswith('.nii') or spmfile.name.endswith('.nii.gz')):
raise forms.ValidationError("The statistical map has the wrong format: please choose a nifti-file")
if spmfile.size > 10**9:
raise forms.ValidationError("Maximum file size for the statistical map: 100 MB")
if not maskfile == None:
if " " in maskfile.name:
raise forms.ValidationError("The app currently can't handle filenames that have spaces. Please rename the mask without spaces.")
if not (maskfile.name.endswith('.nii') or maskfile.name.endswith('.nii.gz')):
raise forms.ValidationError("The mask has the wrong format: please choose a nifti-file")
if maskfile.size > 10**7:
raise forms.ValidationError("Maximum mask file size: 10 MB")
if 0.5 < exc < 2:
raise forms.ValidationError("For a p-value, that screening threshold is too big; for a t-value it's too small.")
if exc > 5:
raise forms.ValidationError("Your screening threshold is impossibly high.")
if subj < 10:
raise forms.ValidationError("We found that our power estimations are not valid for sample sizes smaller than 10!")
if alpha > 0.20:
raise forms.ValidationError("Are you sure about that alpha level? Your tests have a high chance of producing false positives.")
return cleaned_data
helper = FormHelper()
helper.form_method = 'POST'
helper.field_class = 'col-lg-12'
helper.label_class = 'col-lg-12'
helper.layout = Layout(
Fieldset(
'Data location',
HTML("""<h6 style="margin-left: 15px">Either paste a link to the online nifti-file <b>OR</b> upload your statistical map.</h6>"""),
'map_url',
'spmfile'
),
HTML("""<br><br>"""),
Fieldset(
'Mask location (optional)',
HTML("""<h6 style="margin-left: 15px">Upload a full brain mask or a Region-of-Interest mask. If no mask is selected, all non-null voxels are used.</h6>"""),
'maskfile'
),
HTML("""<br><br>"""),
Fieldset(
'Design specifications',
'ZorT','Exc','Subj','Samples','alpha','SmoothEst'
),
HTML("""<p style="margin-left: 15px"><b> \n What is the smoothness of the data in mm? </b></p>"""),
Div(
Div(Field('Smoothx'), css_class='col-xs-4'),
Div(Field('Smoothy'), css_class='col-xs-4'),
Div(Field('Smoothz'), css_class='col-xs-4'),
css_class='row-xs-12'
),
HTML("""<p style="margin-left: 15px"><b> \n What is the voxel size in mm? </b></p>"""),
Div(
Div(Field('Voxx'), css_class='col-xs-4'),
Div(Field('Voxy'), css_class='col-xs-4'),
Div(Field('Voxz'), css_class='col-xs-4'),
css_class='row-xs-12'
),
HTML("""<br><br><br><br><br>"""),
ButtonHolder(Submit('Submit', 'Submit parameters', css_class='btn-black')),
HTML("""<br><br><br><br><br>"""),
)
class PeakTableForm(forms.ModelForm):
class Meta:
model = NeuropowerModel
fields = '__all__'
class MixtureForm(forms.ModelForm):
class Meta:
model = NeuropowerModel
fields = '__all__'
class PowerTableForm(forms.ModelForm):
class Meta:
model = NeuropowerModel
fields = '__all__'
class PowerForm(forms.ModelForm):
SID = forms.CharField(required=False)
reqPow = forms.DecimalField(required=False,label = "Power")
reqSS = forms.IntegerField(required=False,label = "Sample size")
class Meta:
model = NeuropowerModel
fields = ['MCP','reqSS','reqPow']
helper = FormHelper()
helper.form_method = 'POST'
helper.field_class = 'col-lg-12'
helper.label_class = 'col-lg-12'
helper.layout = Layout(
Fieldset(
'Power',
HTML("To see the power for a certain sample size or vice versa, please fill out either the minimal power or the sample size."),
HTML("""<br><br><br>"""),
'MCP','reqSS','reqPow'
),
HTML("""<br>"""),
ButtonHolder(Submit('Submit', 'Submit parameters', css_class='btn-secondary'))
)
def clean(self):
super(forms.ModelForm,self).clean()
reqPow = self.cleaned_data['reqPow']
reqSS = self.cleaned_data['reqSS']
if reqPow > 1:
raise exceptions.ValidationError("Power cannot exceed 1.")
if reqPow and reqSS:
raise exceptions.ValidationError("Please fill out only either the power or the sample size, not both.")
if not reqPow:
self.cleaned_data['reqPow'] = 0
if not reqSS:
self.cleaned_data['reqSS'] = 0
return self.cleaned_data
| [
"crispy_forms.layout.HTML",
"django.forms.RadioSelect",
"django.core.exceptions.ValidationError",
"django.forms.IntegerField",
"django.forms.URLInput",
"crispy_forms.helper.FormHelper",
"django.forms.TextInput",
"crispy_forms.layout.Fieldset",
"crispy_forms.layout.Field",
"numpy.equal",
"django.... | [((6833, 6845), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (6843, 6845), False, 'from crispy_forms.helper import FormHelper\n'), ((8897, 8928), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (8912, 8928), False, 'from django import forms\n'), ((8942, 8991), 'django.forms.DecimalField', 'forms.DecimalField', ([], {'required': '(False)', 'label': '"""Power"""'}), "(required=False, label='Power')\n", (8960, 8991), False, 'from django import forms\n'), ((9005, 9060), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'required': '(False)', 'label': '"""Sample size"""'}), "(required=False, label='Sample size')\n", (9023, 9060), False, 'from django import forms\n'), ((9165, 9177), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (9175, 9177), False, 'from crispy_forms.helper import FormHelper\n'), ((790, 845), 'django.forms.URLInput', 'forms.URLInput', ([], {'attrs': "{'placeholder': self.default_url}"}), "(attrs={'placeholder': self.default_url})\n", (804, 845), False, 'from django import forms\n'), ((2096, 2115), 'django.forms.RadioSelect', 'forms.RadioSelect', ([], {}), '()\n', (2113, 2115), False, 'from django import forms\n'), ((2199, 2242), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'x'}"}), "(attrs={'placeholder': 'x'})\n", (2214, 2242), False, 'from django import forms\n'), ((2372, 2415), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'y'}"}), "(attrs={'placeholder': 'y'})\n", (2387, 2415), False, 'from django import forms\n'), ((2545, 2588), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'z'}"}), "(attrs={'placeholder': 'z'})\n", (2560, 2588), False, 'from django import forms\n'), ((2712, 2755), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'x'}"}), "(attrs={'placeholder': 'x'})\n", (2727, 2755), False, 'from django import forms\n'), ((2876, 2919), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'y'}"}), "(attrs={'placeholder': 'y'})\n", (2891, 2919), False, 'from django import forms\n'), ((3040, 3083), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'z'}"}), "(attrs={'placeholder': 'z'})\n", (3055, 3083), False, 'from django import forms\n'), ((7239, 7255), 'crispy_forms.layout.HTML', 'HTML', (['"""<br><br>"""'], {}), "('<br><br>')\n", (7243, 7255), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((7535, 7551), 'crispy_forms.layout.HTML', 'HTML', (['"""<br><br>"""'], {}), "('<br><br>')\n", (7539, 7551), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((7565, 7658), 'crispy_forms.layout.Fieldset', 'Fieldset', (['"""Design specifications"""', '"""ZorT"""', '"""Exc"""', '"""Subj"""', '"""Samples"""', '"""alpha"""', '"""SmoothEst"""'], {}), "('Design specifications', 'ZorT', 'Exc', 'Subj', 'Samples', 'alpha',\n 'SmoothEst')\n", (7573, 7658), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((7696, 7803), 'crispy_forms.layout.HTML', 'HTML', (['"""<p style="margin-left: 15px"><b> \n What is the smoothness of the data in mm? </b></p>"""'], {}), '(\n """<p style="margin-left: 15px"><b> \n What is the smoothness of the data in mm? </b></p>"""\n )\n', (7700, 7803), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((8032, 8127), 'crispy_forms.layout.HTML', 'HTML', (['"""<p style="margin-left: 15px"><b> \n What is the voxel size in mm? </b></p>"""'], {}), '(\n """<p style="margin-left: 15px"><b> \n What is the voxel size in mm? </b></p>"""\n )\n', (8036, 8127), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((8345, 8373), 'crispy_forms.layout.HTML', 'HTML', (['"""<br><br><br><br><br>"""'], {}), "('<br><br><br><br><br>')\n", (8349, 8373), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((8471, 8499), 'crispy_forms.layout.HTML', 'HTML', (['"""<br><br><br><br><br>"""'], {}), "('<br><br><br><br><br>')\n", (8475, 8499), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((9591, 9603), 'crispy_forms.layout.HTML', 'HTML', (['"""<br>"""'], {}), "('<br>')\n", (9595, 9603), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((4041, 4141), 'django.forms.ValidationError', 'forms.ValidationError', (['"""The selected statistical map and mask do not have the same dimensions."""'], {}), "(\n 'The selected statistical map and mask do not have the same dimensions.')\n", (4062, 4141), False, 'from django import forms\n'), ((4189, 4308), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Are you sure this is a statistical map? The interquartile range is extremely large."""'], {}), "(\n 'Are you sure this is a statistical map? The interquartile range is extremely large.'\n )\n", (4210, 4308), False, 'from django import forms\n'), ((4350, 4462), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Are you sure this is a statistical map? Your map has more than 3 dimensions."""'], {}), "(\n 'Are you sure this is a statistical map? Your map has more than 3 dimensions.'\n )\n", (4371, 4462), False, 'from django import forms\n'), ((4516, 4628), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Please choose: either paste a link to the data or upload your map. Not both."""'], {}), "(\n 'Please choose: either paste a link to the data or upload your map. Not both.'\n )\n", (4537, 4628), False, 'from django import forms\n'), ((4682, 4807), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Please tell us where to find the data: either paste a link to the data or upload your map."""'], {}), "(\n 'Please tell us where to find the data: either paste a link to the data or upload your map.'\n )\n", (4703, 4807), False, 'from django import forms\n'), ((6258, 6374), 'django.forms.ValidationError', 'forms.ValidationError', (['"""For a p-value, that screening threshold is too big; for a t-value it\'s too small."""'], {}), '(\n "For a p-value, that screening threshold is too big; for a t-value it\'s too small."\n )\n', (6279, 6374), False, 'from django import forms\n'), ((6404, 6473), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Your screening threshold is impossibly high."""'], {}), "('Your screening threshold is impossibly high.')\n", (6425, 6473), False, 'from django import forms\n'), ((6515, 6633), 'django.forms.ValidationError', 'forms.ValidationError', (['"""We found that our power estimations are not valid for sample sizes smaller than 10!"""'], {}), "(\n 'We found that our power estimations are not valid for sample sizes smaller than 10!'\n )\n", (6536, 6633), False, 'from django import forms\n'), ((6668, 6799), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Are you sure about that alpha level? Your tests have a high chance of producing false positives."""'], {}), "(\n 'Are you sure about that alpha level? Your tests have a high chance of producing false positives.'\n )\n", (6689, 6799), False, 'from django import forms\n'), ((7039, 7175), 'crispy_forms.layout.HTML', 'HTML', (['"""<h6 style="margin-left: 15px">Either paste a link to the online nifti-file <b>OR</b> upload your statistical map.</h6>"""'], {}), '(\n \'<h6 style="margin-left: 15px">Either paste a link to the online nifti-file <b>OR</b> upload your statistical map.</h6>\'\n )\n', (7043, 7175), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((7331, 7493), 'crispy_forms.layout.HTML', 'HTML', (['"""<h6 style="margin-left: 15px">Upload a full brain mask or a Region-of-Interest mask. If no mask is selected, all non-null voxels are used.</h6>"""'], {}), '(\n \'<h6 style="margin-left: 15px">Upload a full brain mask or a Region-of-Interest mask. If no mask is selected, all non-null voxels are used.</h6>\'\n )\n', (7335, 7493), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((8400, 8460), 'crispy_forms.layout.Submit', 'Submit', (['"""Submit"""', '"""Submit parameters"""'], {'css_class': '"""btn-black"""'}), "('Submit', 'Submit parameters', css_class='btn-black')\n", (8406, 8460), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((9363, 9499), 'crispy_forms.layout.HTML', 'HTML', (['"""To see the power for a certain sample size or vice versa, please fill out either the minimal power or the sample size."""'], {}), "(\n 'To see the power for a certain sample size or vice versa, please fill out either the minimal power or the sample size.'\n )\n", (9367, 9499), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((9503, 9523), 'crispy_forms.layout.HTML', 'HTML', (['"""<br><br><br>"""'], {}), "('<br><br><br>')\n", (9507, 9523), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((9634, 9698), 'crispy_forms.layout.Submit', 'Submit', (['"""Submit"""', '"""Submit parameters"""'], {'css_class': '"""btn-secondary"""'}), "('Submit', 'Submit parameters', css_class='btn-secondary')\n", (9640, 9698), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((9900, 9952), 'django.core.exceptions.ValidationError', 'exceptions.ValidationError', (['"""Power cannot exceed 1."""'], {}), "('Power cannot exceed 1.')\n", (9926, 9952), False, 'from django.core import exceptions\n'), ((10000, 10102), 'django.core.exceptions.ValidationError', 'exceptions.ValidationError', (['"""Please fill out only either the power or the sample size, not both."""'], {}), "(\n 'Please fill out only either the power or the sample size, not both.')\n", (10026, 10102), False, 'from django.core import exceptions\n'), ((3860, 4000), 'django.forms.ValidationError', 'forms.ValidationError', (['"""For manual selection of smoothness, please fill out the smoothness and voxelsize in all three dimensions."""'], {}), "(\n 'For manual selection of smoothness, please fill out the smoothness and voxelsize in all three dimensions.'\n )\n", (3881, 4000), False, 'from django import forms\n'), ((4942, 5040), 'django.forms.ValidationError', 'forms.ValidationError', (['"""The statistical map has the wrong format: please choose a nifti-file"""'], {}), "(\n 'The statistical map has the wrong format: please choose a nifti-file')\n", (4963, 5040), False, 'from django import forms\n'), ((5458, 5556), 'django.forms.ValidationError', 'forms.ValidationError', (['"""The statistical map has the wrong format: please choose a nifti-file"""'], {}), "(\n 'The statistical map has the wrong format: please choose a nifti-file')\n", (5479, 5556), False, 'from django import forms\n'), ((5611, 5685), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Maximum file size for the statistical map: 100 MB"""'], {}), "('Maximum file size for the statistical map: 100 MB')\n", (5632, 5685), False, 'from django import forms\n'), ((5779, 5912), 'django.forms.ValidationError', 'forms.ValidationError', (['"""The app currently can\'t handle filenames that have spaces. Please rename the mask without spaces."""'], {}), '(\n "The app currently can\'t handle filenames that have spaces. Please rename the mask without spaces."\n )\n', (5800, 5912), False, 'from django import forms\n'), ((6015, 6102), 'django.forms.ValidationError', 'forms.ValidationError', (['"""The mask has the wrong format: please choose a nifti-file"""'], {}), "(\n 'The mask has the wrong format: please choose a nifti-file')\n", (6036, 6102), False, 'from django import forms\n'), ((6158, 6212), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Maximum mask file size: 10 MB"""'], {}), "('Maximum mask file size: 10 MB')\n", (6179, 6212), False, 'from django import forms\n'), ((7824, 7840), 'crispy_forms.layout.Field', 'Field', (['"""Smoothx"""'], {}), "('Smoothx')\n", (7829, 7840), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((7881, 7897), 'crispy_forms.layout.Field', 'Field', (['"""Smoothy"""'], {}), "('Smoothy')\n", (7886, 7897), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((7938, 7954), 'crispy_forms.layout.Field', 'Field', (['"""Smoothz"""'], {}), "('Smoothz')\n", (7943, 7954), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((8148, 8161), 'crispy_forms.layout.Field', 'Field', (['"""Voxx"""'], {}), "('Voxx')\n", (8153, 8161), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((8201, 8214), 'crispy_forms.layout.Field', 'Field', (['"""Voxy"""'], {}), "('Voxy')\n", (8206, 8214), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((8254, 8267), 'crispy_forms.layout.Field', 'Field', (['"""Voxz"""'], {}), "('Voxz')\n", (8259, 8267), False, 'from crispy_forms.layout import Submit, Layout, Field, Div, HTML, Fieldset, ButtonHolder\n'), ((3812, 3834), 'numpy.equal', 'np.equal', (['smooth', 'None'], {}), '(smooth, None)\n', (3820, 3834), True, 'import numpy as np\n'), ((5164, 5308), 'django.forms.ValidationError', 'forms.ValidationError', (['"""The app currently can\'t handle filenames that have spaces. Please rename the statistical map without spaces."""'], {}), '(\n "The app currently can\'t handle filenames that have spaces. Please rename the statistical map without spaces."\n )\n', (5185, 5308), False, 'from django import forms\n')] |
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import ellipse
import sys
from scipy.ndimage.measurements import center_of_mass
from numpy import unravel_index
import scipy
from .core import mfi
plt.close("all")
plt.rcParams.update({'font.size': 18})
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx, array[idx]
def reconstruction(rec_times, alpha_1, alpha_2, alpha_3, alpha_4):
"""
Perform a tomographic reconstruction using the current directory files for the designated time instants.
The algorithm will use each reconstruction as the initial guess for the next one.
For this reason you should only provide consecutive time instants.
input:
rec_times: iterable
times to use for reconstruction make sure it's coherent with the time axis provided in signals.npy
output:
first_guess: 2D numpy array
matrix with the emissivity profile. Last index is the x coordinate
reconstruction result: 2D numpy array
matrix with the emissivity profile. Last index is the x coordinate
x_array: numpy array
array with x positions according to the provided resolution
y_array: numpy array
array with y positions according to the provided resolution
"""
#########################################################################
# #
# PREPARATION SPECIFIC #
# #
#########################################################################
# Projections vector p ------------------------------------------------------
fname = 'projections.npy'
print('Reading:', fname)
projections = np.load(fname)
print('projections:', projections.shape, projections.dtype)
P = projections.reshape((projections.shape[0], -1))
print('P:', P.shape, P.dtype)
# Signals and vector f -----------------------------------------------------
fname = 'signals_data.npy'
print('Reading:', fname)
signals_data = np.load(fname)
print('signals_data:', signals_data.shape, signals_data.dtype)
fname = 'signals_time.npy'
print('Reading:', fname)
signals_time = np.load(fname)
print('signals_time:', signals_time.shape, signals_time.dtype)
# time=18000.
# time_index,time=find_nearest(signals_time[0],time)
# f=signals_data[:,time_index]
# signals=[]
# times=np.arange(18000,19001,500)
# for time in times:
# time_index,time=find_nearest(signals_time[0],time)
# signals.append(signals_data[:,time_index])
signals = []
for time in rec_times:
time_index, time = find_nearest(signals_time[0], time)
signals.append(signals_data[:, time_index])
# Reconstruction Resolution -----------------------------------------------
n_rows = projections.shape[1]
n_cols = projections.shape[2]
res = [4.4444, 4.4444] # x,y (mm)
# x and y arrays for plotting purposes. Coordinates represent the top left corner of each pixel
x_array_plot = (np.arange(n_cols + 1) - n_cols / 2.) * res[0]
y_array_plot = (n_rows / 2. - np.arange(n_rows + 1)) * res[1]
# x and y arrays for calculation purposes. Coordinates represent the center of each pixel
x_array = np.arange(n_cols) * res[0] - n_cols / 2. * res[0]
y_array = n_rows / 2. * res[1] - np.arange(n_rows) * res[1]
# Convergence parameters --------------------------------------------------
stop_criteria = 1e-10
max_iterations = 20
# Overwrite Regularization parameters -----------------------------------------------
# alpha_1 = 0.00005
# alpha_2 = alpha_1
# alpha_3 = 1
g_list, first_g = mfi(signals=signals,
projections=projections,
stop_criteria=stop_criteria,
alpha_1=alpha_1,
alpha_2=alpha_2,
alpha_3=alpha_3,
alpha_4=alpha_4,
max_iterations=max_iterations)
# Evaluate chi square
P_tilde = P / 1.0
f_tilde = signals[-1] / 1.0
number_sensors = 32.
chi_square = np.sum((np.dot(P_tilde, g_list[-1].flatten()) - f_tilde) ** 2) / number_sensors
print("Chi Squared: %f" % chi_square)
return first_g, g_list, x_array_plot, y_array_plot
| [
"numpy.load",
"numpy.abs",
"matplotlib.pyplot.close",
"numpy.asarray",
"matplotlib.pyplot.rcParams.update",
"numpy.arange"
] | [((219, 235), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (228, 235), True, 'import matplotlib.pyplot as plt\n'), ((237, 275), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (256, 275), True, 'import matplotlib.pyplot as plt\n'), ((322, 339), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (332, 339), True, 'import numpy as np\n'), ((1961, 1975), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (1968, 1975), True, 'import numpy as np\n'), ((2295, 2309), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2302, 2309), True, 'import numpy as np\n'), ((2458, 2472), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2465, 2472), True, 'import numpy as np\n'), ((351, 372), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (357, 372), True, 'import numpy as np\n'), ((3333, 3354), 'numpy.arange', 'np.arange', (['(n_cols + 1)'], {}), '(n_cols + 1)\n', (3342, 3354), True, 'import numpy as np\n'), ((3413, 3434), 'numpy.arange', 'np.arange', (['(n_rows + 1)'], {}), '(n_rows + 1)\n', (3422, 3434), True, 'import numpy as np\n'), ((3554, 3571), 'numpy.arange', 'np.arange', (['n_cols'], {}), '(n_cols)\n', (3563, 3571), True, 'import numpy as np\n'), ((3641, 3658), 'numpy.arange', 'np.arange', (['n_rows'], {}), '(n_rows)\n', (3650, 3658), True, 'import numpy as np\n')] |
from rlkit.envs.remote import RemoteRolloutEnv
from rlkit.misc import eval_util
from rlkit.samplers.rollout_functions import rollout
from rlkit.torch.core import PyTorchModule
import rlkit.torch.pytorch_util as ptu
import argparse
import pickle
import uuid
from rlkit.core import logger
import torch
from sawyer_control.envs.sawyer_grip import SawyerGripEnv
import matplotlib.pyplot as plt
filename = str(uuid.uuid4())
import numpy as np
import ipdb
import pandas as pd
import numpy as np
import argparse
# from rlkit.torch.conv_networks import CNN, ConcatCNN, ConcatBottleneckCNN, TwoHeadCNN, VQVAEEncoderCNN
import torchvision.transforms.functional as F
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
import torch
import rlkit.torch.pytorch_util as ptu
import seaborn as sns
import os
import pickle
from rlkit.data_management.load_buffer_real import *
ptu.set_gpu_mode(True)
parser = argparse.ArgumentParser()
parser.add_argument('--buffer', default=1)
parser.add_argument('--local', action='store_false', default=True)
args = parser.parse_args()
if not args.local:
data_path = '/nfs/kun1/users/ashvin/data/val_data'
observation_key = 'image'
paths = []
args.buffer = int(args.buffer)
if args.buffer == 0:
print('lid on')
paths.append((os.path.join(data_path, 'fixed_pot_demos.npy'), os.path.join(data_path, 'fixed_pot_demos_putlidon_rew.pkl')))
elif args.buffer == 1:
print('lid off')
paths.append((os.path.join(data_path, 'fixed_pot_demos.npy'), os.path.join(data_path, 'fixed_pot_demos_takeofflid_rew.pkl')))
elif args.buffer == 2:
print('tray')
paths.append((os.path.join(data_path, 'fixed_tray_demos.npy'), os.path.join(data_path, 'fixed_tray_demos_rew.pkl')))
elif args.buffer == 3:
print('drawer')
paths.append((os.path.join(data_path, 'fixed_drawer_demos.npy'), os.path.join(data_path, 'fixed_drawer_demos_rew.pkl')))
elif args.buffer == 4:
print('Stephen Tool Use')
path = '/nfs/kun1/users/stephentian/on_policy_longer_1_26_buffers/move_tool_obj_together_fixed_6_2_train.pkl'
elif args.buffer == 5:
print('General Demos')
paths.append((os.path.join(data_path, 'general_demos.npy'), None))
else:
assert False
if args.buffer in [4]:
replay_buffer = pickle.load(open(path, 'rb'))
else:
replay_buffer = get_buffer(observation_key=observation_key, image_shape=(64, 64, 3))
for path, rew_path in paths:
load_path(path, rew_path, replay_buffer, bc=False)
else:
img = Image.open('/home/ashvin/ros_ws/src/sawyer_control/src/frame.png')
trans = transforms.ToPILImage()
trans1 = transforms.ToTensor()
obs_local = trans1(img.convert("RGB"))
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision.transforms.functional as F
from PIL import Image
def tellme(s):
print(s)
plt.title(s, fontsize=16)
plt.draw()
plt.clf()
plt.setp(plt.gca(), autoscale_on=True)
from sawyer_control.envs.sawyer_grip import SawyerGripEnv
env = SawyerGripEnv(action_mode='position',
config_name='ashvin_config',
reset_free=False,
position_action_scale=0.05,
max_speed=0.4,
step_sleep_time=0.2,
crop_version_str="crop_val_torch")
def plot(obs_img):
if type(obs_img) == torch.Tensor:
from torchvision import transforms
im_new = transforms.ToPILImage()(obs_img.cpu())
else:
im_new = obs_img
plt.imshow(im_new)
def crop(img, img_dim = (64,64)):
from matplotlib import cm
img = img.astype(float)
img /= 255.
img = img[:, 50:530, :]
img = Image.fromarray(np.uint8(img*255))
img = F.resize(img, img_dim, Image.ANTIALIAS)
img = np.array(img)
img = img*1.0/255
img = img.transpose([2,0,1]) #.flatten()
return torch.from_numpy(img).float()
while True:
pts = []
while len(pts) < 4:
observation = env._get_obs()
if args.local:
obs_img_curr = np.flip(observation['hires_image_observation'], axis=-1)
else:
obs_img_curr = crop(np.flip(observation['hires_image_observation'], axis=-1), img_dim=(64,64))
plot(obs_img_curr)
tellme('Select 4 corners with mouse')
pts = np.asarray(plt.ginput(4, timeout=-1))
if len(pts) < 4:
tellme('Too few points, starting over')
time.sleep(1) # Wait a second
ph = plt.fill(pts[:, 0], pts[:, 1], 'r', lw=2)
tellme('Happy? Key click for yes, mouse click for no')
if plt.waitforbuttonpress():
for p in ph:
p.remove()
break
for p in ph:
p.remove()
src = pts
while True:
pts = []
while len(pts) < 4:
observation = obs_local if args.local else torch.from_numpy(replay_buffer._obs['image'][0].reshape(3,64,64))
obs_img = observation if args.local else torch.from_numpy(observation.numpy().swapaxes(-2,-1))
plot(obs_img)
tellme('Select 4 corners with mouse')
pts = np.asarray(plt.ginput(4, timeout=-1))
if len(pts) < 4:
tellme('Too few points, starting over')
time.sleep(1) # Wait a second
ph = plt.fill(pts[:, 0], pts[:, 1], 'r', lw=2)
tellme('Happy? Key click for yes, mouse click for no')
if plt.waitforbuttonpress():
break
plt.clf()
dest = pts
plt.close()
import cv2
# from torchgeometry.core.imgwarp import warp_perspective
matrix = cv2.getPerspectiveTransform(src.astype(np.float32),dest.astype(np.float32)) #Try this SWAP
obsnp = obs_img_curr if args.local else obs_img_curr.permute(1,2,0).cpu().numpy()
# cv2.imshow('original', obsnp)
# cv2.waitKey(0)
warped = cv2.warpPerspective(obsnp, matrix, (obsnp.shape[1], obsnp.shape[0])).squeeze()
# cv2.imshow('warped', warped)
# cv2.waitKey(0)
def plot_two_img(obs_img, obs_img2):
plt.figure()
plt.subplot(1,2,1)
if type(obs_img) == torch.Tensor:
from torchvision import transforms
im_new = transforms.ToPILImage()(obs_img)
else:
im_new = obs_img
plt.imshow(im_new)
plt.subplot(1,2,2)
if type(obs_img2) == torch.Tensor:
from torchvision import transforms
im_new = transforms.ToPILImage()(obs_img2)
else:
im_new = obs_img2
plt.imshow(im_new)
plt.show()
obs = env._get_obs()['hires_image_observation']
warped = cv2.warpPerspective(obs, matrix, (obs.shape[1], obs.shape[0])).squeeze()
obs = np.flip(obs, axis=-1)
img_curr = np.flip(warped, axis=-1)
plot_two_img(obs, img_curr)
if args.local:
img1, img2 = crop(obs,img_dim=(64,64)), crop(img_curr, img_dim=(64,64))
plot_two_img(img1, img2)
np.save('/home/ashvin/ros_ws/src/railrl-private_anikait/scripts/matrix.npy', matrix)
import ipdb; ipdb.set_trace() | [
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"ipdb.set_trace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"os.path.join",
"cv2.warpPerspective",
"matplotlib.pyplot.close",
"matplotlib.pyplot.imshow",
"torchvision.transforms.ToPILImage",
"matplotlib.p... | [((881, 903), 'rlkit.torch.pytorch_util.set_gpu_mode', 'ptu.set_gpu_mode', (['(True)'], {}), '(True)\n', (897, 903), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((914, 939), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (937, 939), False, 'import argparse\n'), ((2999, 3008), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3006, 3008), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3304), 'sawyer_control.envs.sawyer_grip.SawyerGripEnv', 'SawyerGripEnv', ([], {'action_mode': '"""position"""', 'config_name': '"""ashvin_config"""', 'reset_free': '(False)', 'position_action_scale': '(0.05)', 'max_speed': '(0.4)', 'step_sleep_time': '(0.2)', 'crop_version_str': '"""crop_val_torch"""'}), "(action_mode='position', config_name='ashvin_config',\n reset_free=False, position_action_scale=0.05, max_speed=0.4,\n step_sleep_time=0.2, crop_version_str='crop_val_torch')\n", (3126, 3304), False, 'from sawyer_control.envs.sawyer_grip import SawyerGripEnv\n'), ((5467, 5478), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5476, 5478), True, 'import matplotlib.pyplot as plt\n'), ((6554, 6575), 'numpy.flip', 'np.flip', (['obs'], {'axis': '(-1)'}), '(obs, axis=-1)\n', (6561, 6575), True, 'import numpy as np\n'), ((6587, 6611), 'numpy.flip', 'np.flip', (['warped'], {'axis': '(-1)'}), '(warped, axis=-1)\n', (6594, 6611), True, 'import numpy as np\n'), ((6760, 6848), 'numpy.save', 'np.save', (['"""/home/ashvin/ros_ws/src/railrl-private_anikait/scripts/matrix.npy"""', 'matrix'], {}), "('/home/ashvin/ros_ws/src/railrl-private_anikait/scripts/matrix.npy',\n matrix)\n", (6767, 6848), True, 'import numpy as np\n'), ((6858, 6874), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (6872, 6874), False, 'import ipdb\n'), ((405, 417), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (415, 417), False, 'import uuid\n'), ((2597, 2663), 'PIL.Image.open', 'Image.open', (['"""/home/ashvin/ros_ws/src/sawyer_control/src/frame.png"""'], {}), "('/home/ashvin/ros_ws/src/sawyer_control/src/frame.png')\n", (2607, 2663), False, 'from PIL import Image\n'), ((2676, 2699), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (2697, 2699), False, 'from torchvision import transforms\n'), ((2713, 2734), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2732, 2734), False, 'from torchvision import transforms\n'), ((2957, 2982), 'matplotlib.pyplot.title', 'plt.title', (['s'], {'fontsize': '(16)'}), '(s, fontsize=16)\n', (2966, 2982), True, 'import matplotlib.pyplot as plt\n'), ((2987, 2997), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2995, 2997), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3027), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3025, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3565, 3583), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_new'], {}), '(im_new)\n', (3575, 3583), True, 'import matplotlib.pyplot as plt\n'), ((3782, 3821), 'torchvision.transforms.functional.resize', 'F.resize', (['img', 'img_dim', 'Image.ANTIALIAS'], {}), '(img, img_dim, Image.ANTIALIAS)\n', (3790, 3821), True, 'import torchvision.transforms.functional as F\n'), ((3832, 3845), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3840, 3845), True, 'import numpy as np\n'), ((4526, 4567), 'matplotlib.pyplot.fill', 'plt.fill', (['pts[:, 0]', 'pts[:, 1]', '"""r"""'], {'lw': '(2)'}), "(pts[:, 0], pts[:, 1], 'r', lw=2)\n", (4534, 4567), True, 'import matplotlib.pyplot as plt\n'), ((4636, 4660), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (4658, 4660), True, 'import matplotlib.pyplot as plt\n'), ((5289, 5330), 'matplotlib.pyplot.fill', 'plt.fill', (['pts[:, 0]', 'pts[:, 1]', '"""r"""'], {'lw': '(2)'}), "(pts[:, 0], pts[:, 1], 'r', lw=2)\n", (5297, 5330), True, 'import matplotlib.pyplot as plt\n'), ((5399, 5423), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (5421, 5423), True, 'import matplotlib.pyplot as plt\n'), ((5444, 5453), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5451, 5453), True, 'import matplotlib.pyplot as plt\n'), ((5959, 5971), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5969, 5971), True, 'import matplotlib.pyplot as plt\n'), ((5976, 5996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5987, 5996), True, 'import matplotlib.pyplot as plt\n'), ((6165, 6183), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_new'], {}), '(im_new)\n', (6175, 6183), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6209), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (6200, 6209), True, 'import matplotlib.pyplot as plt\n'), ((6381, 6399), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_new'], {}), '(im_new)\n', (6391, 6399), True, 'import matplotlib.pyplot as plt\n'), ((6405, 6415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6413, 6415), True, 'import matplotlib.pyplot as plt\n'), ((3748, 3767), 'numpy.uint8', 'np.uint8', (['(img * 255)'], {}), '(img * 255)\n', (3756, 3767), True, 'import numpy as np\n'), ((5790, 5858), 'cv2.warpPerspective', 'cv2.warpPerspective', (['obsnp', 'matrix', '(obsnp.shape[1], obsnp.shape[0])'], {}), '(obsnp, matrix, (obsnp.shape[1], obsnp.shape[0]))\n', (5809, 5858), False, 'import cv2\n'), ((6474, 6536), 'cv2.warpPerspective', 'cv2.warpPerspective', (['obs', 'matrix', '(obs.shape[1], obs.shape[0])'], {}), '(obs, matrix, (obs.shape[1], obs.shape[0]))\n', (6493, 6536), False, 'import cv2\n'), ((3487, 3510), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3508, 3510), False, 'from torchvision import transforms\n'), ((3925, 3946), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (3941, 3946), False, 'import torch\n'), ((4092, 4148), 'numpy.flip', 'np.flip', (["observation['hires_image_observation']"], {'axis': '(-1)'}), "(observation['hires_image_observation'], axis=-1)\n", (4099, 4148), True, 'import numpy as np\n'), ((4368, 4393), 'matplotlib.pyplot.ginput', 'plt.ginput', (['(4)'], {'timeout': '(-1)'}), '(4, timeout=-1)\n', (4378, 4393), True, 'import matplotlib.pyplot as plt\n'), ((4484, 4497), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4494, 4497), False, 'import time\n'), ((5131, 5156), 'matplotlib.pyplot.ginput', 'plt.ginput', (['(4)'], {'timeout': '(-1)'}), '(4, timeout=-1)\n', (5141, 5156), True, 'import matplotlib.pyplot as plt\n'), ((5247, 5260), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5257, 5260), False, 'import time\n'), ((6093, 6116), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6114, 6116), False, 'from torchvision import transforms\n'), ((6307, 6330), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6328, 6330), False, 'from torchvision import transforms\n'), ((1303, 1349), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_pot_demos.npy"""'], {}), "(data_path, 'fixed_pot_demos.npy')\n", (1315, 1349), False, 'import os\n'), ((1351, 1410), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_pot_demos_putlidon_rew.pkl"""'], {}), "(data_path, 'fixed_pot_demos_putlidon_rew.pkl')\n", (1363, 1410), False, 'import os\n'), ((4195, 4251), 'numpy.flip', 'np.flip', (["observation['hires_image_observation']"], {'axis': '(-1)'}), "(observation['hires_image_observation'], axis=-1)\n", (4202, 4251), True, 'import numpy as np\n'), ((1487, 1533), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_pot_demos.npy"""'], {}), "(data_path, 'fixed_pot_demos.npy')\n", (1499, 1533), False, 'import os\n'), ((1535, 1596), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_pot_demos_takeofflid_rew.pkl"""'], {}), "(data_path, 'fixed_pot_demos_takeofflid_rew.pkl')\n", (1547, 1596), False, 'import os\n'), ((1670, 1717), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_tray_demos.npy"""'], {}), "(data_path, 'fixed_tray_demos.npy')\n", (1682, 1717), False, 'import os\n'), ((1719, 1770), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_tray_demos_rew.pkl"""'], {}), "(data_path, 'fixed_tray_demos_rew.pkl')\n", (1731, 1770), False, 'import os\n'), ((1846, 1895), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_drawer_demos.npy"""'], {}), "(data_path, 'fixed_drawer_demos.npy')\n", (1858, 1895), False, 'import os\n'), ((1897, 1950), 'os.path.join', 'os.path.join', (['data_path', '"""fixed_drawer_demos_rew.pkl"""'], {}), "(data_path, 'fixed_drawer_demos_rew.pkl')\n", (1909, 1950), False, 'import os\n'), ((2212, 2256), 'os.path.join', 'os.path.join', (['data_path', '"""general_demos.npy"""'], {}), "(data_path, 'general_demos.npy')\n", (2224, 2256), False, 'import os\n')] |
import warnings
from abc import ABC, abstractmethod
from typing import Any, Dict, Generator, List, Optional, Union
import numpy as np
import torch as th
from gym import spaces
from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape
from stable_baselines3.common.type_aliases import (
DictReplayBufferSamples,
DictRolloutBufferSamples,
ReplayBufferSamples,
RolloutBufferSamples,
)
from stable_baselines3.common.vec_env import VecNormalize
try:
import psutil
except ImportError:
psutil = None
class BaseBuffer(ABC):
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
):
super(BaseBuffer, self).__init__()
self.buffer_size = buffer_size
self.observation_space = observation_space
self.action_space = action_space
self.obs_shape = get_obs_shape(observation_space)
self.action_dim = get_action_dim(action_space)
self.pos = 0
self.full = False
self.device = device
self.n_envs = n_envs
@staticmethod
def swap_and_flatten(arr: np.ndarray) -> np.ndarray:
shape = arr.shape
if len(shape) < 3:
shape = shape + (1,)
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
def size(self) -> int:
if self.full:
return self.buffer_size
return self.pos
def add(self, *args, **kwargs) -> None:
raise NotImplementedError()
def extend(self, *args, **kwargs) -> None:
for data in zip(*args):
self.add(*data)
def reset(self) -> None:
self.pos = 0
self.full = False
def sample(self, batch_size: int, env: Optional[VecNormalize] = None):
upper_bound = self.buffer_size if self.full else self.pos
batch_inds = np.random.randint(0, upper_bound, size=batch_size)
return self._get_samples(batch_inds, env=env)
@abstractmethod
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> Union[ReplayBufferSamples, RolloutBufferSamples]:
raise NotImplementedError()
def to_torch(self, array: np.ndarray, copy: bool = True) -> th.Tensor:
if copy:
return th.tensor(array).to(self.device)
return th.as_tensor(array).to(self.device)
@staticmethod
def _normalize_obs(
obs: Union[np.ndarray, Dict[str, np.ndarray]],
env: Optional[VecNormalize] = None,
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
if env is not None:
return env.normalize_obs(obs)
return obs
@staticmethod
def _normalize_reward(
reward: np.ndarray, env: Optional[VecNormalize] = None
) -> np.ndarray:
if env is not None:
return env.normalize_reward(reward).astype(np.float32)
return reward
class ReplayBuffer(BaseBuffer):
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
optimize_memory_usage: bool = False,
handle_timeout_termination: bool = True,
):
super(ReplayBuffer, self).__init__(
buffer_size, observation_space, action_space, device, n_envs=n_envs
)
assert n_envs == 1, "Replay buffer only support single environment for now"
if psutil is not None:
mem_available = psutil.virtual_memory().available
self.optimize_memory_usage = optimize_memory_usage
self.observations = np.zeros(
(self.buffer_size, self.n_envs) + self.obs_shape,
dtype=observation_space.dtype,
)
if optimize_memory_usage:
self.next_observations = None
else:
self.next_observations = np.zeros(
(self.buffer_size, self.n_envs) + self.obs_shape,
dtype=observation_space.dtype,
)
self.actions = np.zeros(
(self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype
)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.handle_timeout_termination = handle_timeout_termination
self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
if psutil is not None:
total_memory_usage = (
self.observations.nbytes
+ self.actions.nbytes
+ self.rewards.nbytes
+ self.dones.nbytes
)
if self.next_observations is not None:
total_memory_usage += self.next_observations.nbytes
if total_memory_usage > mem_available:
total_memory_usage /= 1e9
mem_available /= 1e9
warnings.warn(
"This system does not have apparently enough memory to store the complete "
f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"
)
def add(
self,
obs: np.ndarray,
next_obs: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
infos: List[Dict[str, Any]],
) -> None:
self.observations[self.pos] = np.array(obs).copy()
if self.optimize_memory_usage:
self.observations[(self.pos + 1) % self.buffer_size] = np.array(
next_obs
).copy()
else:
self.next_observations[self.pos] = np.array(next_obs).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.dones[self.pos] = np.array(done).copy()
if self.handle_timeout_termination:
self.timeouts[self.pos] = np.array(
[info.get("TimeLimit.truncated", False) for info in infos]
)
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
def sample(
self, batch_size: int, env: Optional[VecNormalize] = None
) -> ReplayBufferSamples:
if not self.optimize_memory_usage:
return super().sample(batch_size=batch_size, env=env)
if self.full:
batch_inds = (
np.random.randint(1, self.buffer_size, size=batch_size) + self.pos
) % self.buffer_size
else:
batch_inds = np.random.randint(0, self.pos, size=batch_size)
return self._get_samples(batch_inds, env=env)
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> ReplayBufferSamples:
if self.optimize_memory_usage:
next_obs = self._normalize_obs(
self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env
)
else:
next_obs = self._normalize_obs(
self.next_observations[batch_inds, 0, :], env
)
data = (
self._normalize_obs(self.observations[batch_inds, 0, :], env),
self.actions[batch_inds, 0, :],
next_obs,
self.dones[batch_inds] * (1 - self.timeouts[batch_inds]),
self._normalize_reward(self.rewards[batch_inds], env),
)
return ReplayBufferSamples(*tuple(map(self.to_torch, data)))
class RolloutBuffer(BaseBuffer):
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
super(RolloutBuffer, self).__init__(
buffer_size, observation_space, action_space, device, n_envs=n_envs
)
self.gae_lambda = gae_lambda
self.gamma = gamma
self.observations, self.actions, self.rewards, self.advantages = (
None,
None,
None,
None,
)
self.returns, self.episode_starts, self.values, self.log_probs = (
None,
None,
None,
None,
)
self.generator_ready = False
self.reset()
def reset(self) -> None:
self.observations = np.zeros(
(self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32
)
self.actions = np.zeros(
(self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32
)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.episode_starts = np.zeros(
(self.buffer_size, self.n_envs), dtype=np.float32
)
self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.generator_ready = False
super(RolloutBuffer, self).reset()
def compute_returns_and_advantage(
self, last_values: th.Tensor, dones: np.ndarray
) -> None:
last_values = last_values.clone().cpu().numpy().flatten()
last_gae_lam = 0
for step in reversed(range(self.buffer_size)):
if step == self.buffer_size - 1:
next_non_terminal = 1.0 - dones
next_values = last_values
else:
next_non_terminal = 1.0 - self.episode_starts[step + 1]
next_values = self.values[step + 1]
delta = (
self.rewards[step]
+ self.gamma * next_values * next_non_terminal
- self.values[step]
)
last_gae_lam = (
delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam
)
self.advantages[step] = last_gae_lam
self.returns = self.advantages + self.values
def add(
self,
obs: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
episode_start: np.ndarray,
value: th.Tensor,
log_prob: th.Tensor,
) -> None:
if len(log_prob.shape) == 0:
log_prob = log_prob.reshape(-1, 1)
if isinstance(self.observation_space, spaces.Discrete):
obs = obs.reshape((self.n_envs,) + self.obs_shape)
self.observations[self.pos] = np.array(obs).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.episode_starts[self.pos] = np.array(episode_start).copy()
self.values[self.pos] = value.clone().cpu().numpy().flatten()
self.log_probs[self.pos] = log_prob.clone().cpu().numpy()
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
def get(
self, batch_size: Optional[int] = None
) -> Generator[RolloutBufferSamples, None, None]:
assert self.full, ""
indices = np.random.permutation(self.buffer_size * self.n_envs)
if not self.generator_ready:
_tensor_names = [
"observations",
"actions",
"values",
"log_probs",
"advantages",
"returns",
]
for tensor in _tensor_names:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
yield self._get_samples(indices[start_idx : start_idx + batch_size])
start_idx += batch_size
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> RolloutBufferSamples:
data = (
self.observations[batch_inds],
self.actions[batch_inds],
self.values[batch_inds].flatten(),
self.log_probs[batch_inds].flatten(),
self.advantages[batch_inds].flatten(),
self.returns[batch_inds].flatten(),
)
return RolloutBufferSamples(*tuple(map(self.to_torch, data)))
class DictReplayBuffer(ReplayBuffer):
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
optimize_memory_usage: bool = False,
handle_timeout_termination: bool = True,
):
super(ReplayBuffer, self).__init__(
buffer_size, observation_space, action_space, device, n_envs=n_envs
)
assert isinstance(
self.obs_shape, dict
), "DictReplayBuffer must be used with Dict obs space only"
assert n_envs == 1, "Replay buffer only support single environment for now"
if psutil is not None:
mem_available = psutil.virtual_memory().available
assert (
optimize_memory_usage is False
), "DictReplayBuffer does not support optimize_memory_usage"
self.optimize_memory_usage = optimize_memory_usage
self.observations = {
key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape)
for key, _obs_shape in self.obs_shape.items()
}
self.next_observations = {
key: np.zeros((self.buffer_size, self.n_envs) + _obs_shape)
for key, _obs_shape in self.obs_shape.items()
}
self.actions = np.zeros(
(self.buffer_size, self.action_dim), dtype=action_space.dtype
)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.handle_timeout_termination = handle_timeout_termination
self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
if psutil is not None:
obs_nbytes = 0
for _, obs in self.observations.items():
obs_nbytes += obs.nbytes
total_memory_usage = (
obs_nbytes
+ self.actions.nbytes
+ self.rewards.nbytes
+ self.dones.nbytes
)
if self.next_observations is not None:
next_obs_nbytes = 0
for _, obs in self.observations.items():
next_obs_nbytes += obs.nbytes
total_memory_usage += next_obs_nbytes
if total_memory_usage > mem_available:
total_memory_usage /= 1e9
mem_available /= 1e9
warnings.warn(
"This system does not have apparently enough memory to store the complete "
f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"
)
def add(
self,
obs: Dict[str, np.ndarray],
next_obs: Dict[str, np.ndarray],
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
infos: List[Dict[str, Any]],
) -> None:
for key in self.observations.keys():
self.observations[key][self.pos] = np.array(obs[key]).copy()
for key in self.next_observations.keys():
self.next_observations[key][self.pos] = np.array(next_obs[key]).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.dones[self.pos] = np.array(done).copy()
if self.handle_timeout_termination:
self.timeouts[self.pos] = np.array(
[info.get("TimeLimit.truncated", False) for info in infos]
)
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
def sample(
self, batch_size: int, env: Optional[VecNormalize] = None
) -> DictReplayBufferSamples:
return super(ReplayBuffer, self).sample(batch_size=batch_size, env=env)
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> DictReplayBufferSamples:
obs_ = self._normalize_obs(
{key: obs[batch_inds, 0, :] for key, obs in self.observations.items()}
)
next_obs_ = self._normalize_obs(
{key: obs[batch_inds, 0, :] for key, obs in self.next_observations.items()}
)
observations = {key: self.to_torch(obs) for key, obs in obs_.items()}
next_observations = {key: self.to_torch(obs) for key, obs in next_obs_.items()}
return DictReplayBufferSamples(
observations=observations,
actions=self.to_torch(self.actions[batch_inds]),
next_observations=next_observations,
dones=self.to_torch(
self.dones[batch_inds] * (1 - self.timeouts[batch_inds])
),
rewards=self.to_torch(
self._normalize_reward(self.rewards[batch_inds], env)
),
)
class DictRolloutBuffer(RolloutBuffer):
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
super(RolloutBuffer, self).__init__(
buffer_size, observation_space, action_space, device, n_envs=n_envs
)
assert isinstance(
self.obs_shape, dict
), "DictRolloutBuffer must be used with Dict obs space only"
self.gae_lambda = gae_lambda
self.gamma = gamma
self.observations, self.actions, self.rewards, self.advantages = (
None,
None,
None,
None,
)
self.returns, self.episode_starts, self.values, self.log_probs = (
None,
None,
None,
None,
)
self.generator_ready = False
self.reset()
def reset(self) -> None:
assert isinstance(
self.obs_shape, dict
), "DictRolloutBuffer must be used with Dict obs space only"
self.observations = {}
for key, obs_input_shape in self.obs_shape.items():
self.observations[key] = np.zeros(
(self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32
)
self.actions = np.zeros(
(self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32
)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.episode_starts = np.zeros(
(self.buffer_size, self.n_envs), dtype=np.float32
)
self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.generator_ready = False
super(RolloutBuffer, self).reset()
def add(
self,
obs: Dict[str, np.ndarray],
action: np.ndarray,
reward: np.ndarray,
episode_start: np.ndarray,
value: th.Tensor,
log_prob: th.Tensor,
) -> None:
if len(log_prob.shape) == 0:
log_prob = log_prob.reshape(-1, 1)
for key in self.observations.keys():
obs_ = np.array(obs[key]).copy()
if isinstance(self.observation_space.spaces[key], spaces.Discrete):
obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key])
self.observations[key][self.pos] = obs_
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.episode_starts[self.pos] = np.array(episode_start).copy()
self.values[self.pos] = value.clone().cpu().numpy().flatten()
self.log_probs[self.pos] = log_prob.clone().cpu().numpy()
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
def get(
self, batch_size: Optional[int] = None
) -> Generator[DictRolloutBufferSamples, None, None]:
assert self.full, ""
indices = np.random.permutation(self.buffer_size * self.n_envs)
if not self.generator_ready:
for key, obs in self.observations.items():
self.observations[key] = self.swap_and_flatten(obs)
_tensor_names = ["actions", "values", "log_probs", "advantages", "returns"]
for tensor in _tensor_names:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
yield self._get_samples(indices[start_idx : start_idx + batch_size])
start_idx += batch_size
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> DictRolloutBufferSamples:
return DictRolloutBufferSamples(
observations={
key: self.to_torch(obs[batch_inds])
for (key, obs) in self.observations.items()
},
actions=self.to_torch(self.actions[batch_inds]),
old_values=self.to_torch(self.values[batch_inds].flatten()),
old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),
advantages=self.to_torch(self.advantages[batch_inds].flatten()),
returns=self.to_torch(self.returns[batch_inds].flatten()),
)
| [
"psutil.virtual_memory",
"numpy.zeros",
"stable_baselines3.common.preprocessing.get_obs_shape",
"stable_baselines3.common.preprocessing.get_action_dim",
"numpy.random.randint",
"numpy.array",
"numpy.random.permutation",
"torch.as_tensor",
"warnings.warn",
"torch.tensor"
] | [((981, 1013), 'stable_baselines3.common.preprocessing.get_obs_shape', 'get_obs_shape', (['observation_space'], {}), '(observation_space)\n', (994, 1013), False, 'from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape\n'), ((1040, 1068), 'stable_baselines3.common.preprocessing.get_action_dim', 'get_action_dim', (['action_space'], {}), '(action_space)\n', (1054, 1068), False, 'from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape\n'), ((1950, 2000), 'numpy.random.randint', 'np.random.randint', (['(0)', 'upper_bound'], {'size': 'batch_size'}), '(0, upper_bound, size=batch_size)\n', (1967, 2000), True, 'import numpy as np\n'), ((3731, 3825), 'numpy.zeros', 'np.zeros', (['((self.buffer_size, self.n_envs) + self.obs_shape)'], {'dtype': 'observation_space.dtype'}), '((self.buffer_size, self.n_envs) + self.obs_shape, dtype=\n observation_space.dtype)\n', (3739, 3825), True, 'import numpy as np\n'), ((4143, 4232), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs, self.action_dim)'], {'dtype': 'action_space.dtype'}), '((self.buffer_size, self.n_envs, self.action_dim), dtype=\n action_space.dtype)\n', (4151, 4232), True, 'import numpy as np\n'), ((4273, 4332), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (4281, 4332), True, 'import numpy as np\n'), ((4354, 4413), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (4362, 4413), True, 'import numpy as np\n'), ((4507, 4566), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (4515, 4566), True, 'import numpy as np\n'), ((8551, 8627), 'numpy.zeros', 'np.zeros', (['((self.buffer_size, self.n_envs) + self.obs_shape)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32)\n', (8559, 8627), True, 'import numpy as np\n'), ((8673, 8749), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs, self.action_dim)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32)\n', (8681, 8749), True, 'import numpy as np\n'), ((8795, 8854), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (8803, 8854), True, 'import numpy as np\n'), ((8878, 8937), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (8886, 8937), True, 'import numpy as np\n'), ((8968, 9027), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (8976, 9027), True, 'import numpy as np\n'), ((9072, 9131), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (9080, 9131), True, 'import numpy as np\n'), ((9157, 9216), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (9165, 9216), True, 'import numpy as np\n'), ((9243, 9302), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (9251, 9302), True, 'import numpy as np\n'), ((11379, 11432), 'numpy.random.permutation', 'np.random.permutation', (['(self.buffer_size * self.n_envs)'], {}), '(self.buffer_size * self.n_envs)\n', (11400, 11432), True, 'import numpy as np\n'), ((13979, 14050), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.action_dim)'], {'dtype': 'action_space.dtype'}), '((self.buffer_size, self.action_dim), dtype=action_space.dtype)\n', (13987, 14050), True, 'import numpy as np\n'), ((14096, 14155), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (14104, 14155), True, 'import numpy as np\n'), ((14177, 14236), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (14185, 14236), True, 'import numpy as np\n'), ((14330, 14389), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (14338, 14389), True, 'import numpy as np\n'), ((18931, 19007), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs, self.action_dim)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32)\n', (18939, 19007), True, 'import numpy as np\n'), ((19053, 19112), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (19061, 19112), True, 'import numpy as np\n'), ((19136, 19195), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (19144, 19195), True, 'import numpy as np\n'), ((19226, 19285), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (19234, 19285), True, 'import numpy as np\n'), ((19330, 19389), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (19338, 19389), True, 'import numpy as np\n'), ((19415, 19474), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (19423, 19474), True, 'import numpy as np\n'), ((19501, 19560), 'numpy.zeros', 'np.zeros', (['(self.buffer_size, self.n_envs)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs), dtype=np.float32)\n', (19509, 19560), True, 'import numpy as np\n'), ((20825, 20878), 'numpy.random.permutation', 'np.random.permutation', (['(self.buffer_size * self.n_envs)'], {}), '(self.buffer_size * self.n_envs)\n', (20846, 20878), True, 'import numpy as np\n'), ((3983, 4077), 'numpy.zeros', 'np.zeros', (['((self.buffer_size, self.n_envs) + self.obs_shape)'], {'dtype': 'observation_space.dtype'}), '((self.buffer_size, self.n_envs) + self.obs_shape, dtype=\n observation_space.dtype)\n', (3991, 4077), True, 'import numpy as np\n'), ((6698, 6745), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.pos'], {'size': 'batch_size'}), '(0, self.pos, size=batch_size)\n', (6715, 6745), True, 'import numpy as np\n'), ((13658, 13712), 'numpy.zeros', 'np.zeros', (['((self.buffer_size, self.n_envs) + _obs_shape)'], {}), '((self.buffer_size, self.n_envs) + _obs_shape)\n', (13666, 13712), True, 'import numpy as np\n'), ((13833, 13887), 'numpy.zeros', 'np.zeros', (['((self.buffer_size, self.n_envs) + _obs_shape)'], {}), '((self.buffer_size, self.n_envs) + _obs_shape)\n', (13841, 13887), True, 'import numpy as np\n'), ((18800, 18877), 'numpy.zeros', 'np.zeros', (['((self.buffer_size, self.n_envs) + obs_input_shape)'], {'dtype': 'np.float32'}), '((self.buffer_size, self.n_envs) + obs_input_shape, dtype=np.float32)\n', (18808, 18877), True, 'import numpy as np\n'), ((2426, 2445), 'torch.as_tensor', 'th.as_tensor', (['array'], {}), '(array)\n', (2438, 2445), True, 'import torch as th\n'), ((3610, 3633), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (3631, 3633), False, 'import psutil\n'), ((5065, 5230), 'warnings.warn', 'warnings.warn', (['f"""This system does not have apparently enough memory to store the complete replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"""'], {}), "(\n f'This system does not have apparently enough memory to store the complete replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB'\n )\n", (5078, 5230), False, 'import warnings\n'), ((5537, 5550), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (5545, 5550), True, 'import numpy as np\n'), ((5840, 5856), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (5848, 5856), True, 'import numpy as np\n'), ((5897, 5913), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (5905, 5913), True, 'import numpy as np\n'), ((5952, 5966), 'numpy.array', 'np.array', (['done'], {}), '(done)\n', (5960, 5966), True, 'import numpy as np\n'), ((10783, 10796), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (10791, 10796), True, 'import numpy as np\n'), ((10837, 10853), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (10845, 10853), True, 'import numpy as np\n'), ((10894, 10910), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (10902, 10910), True, 'import numpy as np\n'), ((10958, 10981), 'numpy.array', 'np.array', (['episode_start'], {}), '(episode_start)\n', (10966, 10981), True, 'import numpy as np\n'), ((13389, 13412), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (13410, 13412), False, 'import psutil\n'), ((15124, 15289), 'warnings.warn', 'warnings.warn', (['f"""This system does not have apparently enough memory to store the complete replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"""'], {}), "(\n f'This system does not have apparently enough memory to store the complete replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB'\n )\n", (15137, 15289), False, 'import warnings\n'), ((15864, 15880), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (15872, 15880), True, 'import numpy as np\n'), ((15921, 15937), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (15929, 15937), True, 'import numpy as np\n'), ((15976, 15990), 'numpy.array', 'np.array', (['done'], {}), '(done)\n', (15984, 15990), True, 'import numpy as np\n'), ((20279, 20295), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (20287, 20295), True, 'import numpy as np\n'), ((20336, 20352), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (20344, 20352), True, 'import numpy as np\n'), ((20400, 20423), 'numpy.array', 'np.array', (['episode_start'], {}), '(episode_start)\n', (20408, 20423), True, 'import numpy as np\n'), ((2378, 2394), 'torch.tensor', 'th.tensor', (['array'], {}), '(array)\n', (2387, 2394), True, 'import torch as th\n'), ((5664, 5682), 'numpy.array', 'np.array', (['next_obs'], {}), '(next_obs)\n', (5672, 5682), True, 'import numpy as np\n'), ((5781, 5799), 'numpy.array', 'np.array', (['next_obs'], {}), '(next_obs)\n', (5789, 5799), True, 'import numpy as np\n'), ((6559, 6614), 'numpy.random.randint', 'np.random.randint', (['(1)', 'self.buffer_size'], {'size': 'batch_size'}), '(1, self.buffer_size, size=batch_size)\n', (6576, 6614), True, 'import numpy as np\n'), ((15672, 15690), 'numpy.array', 'np.array', (['obs[key]'], {}), '(obs[key])\n', (15680, 15690), True, 'import numpy as np\n'), ((15800, 15823), 'numpy.array', 'np.array', (['next_obs[key]'], {}), '(next_obs[key])\n', (15808, 15823), True, 'import numpy as np\n'), ((20014, 20032), 'numpy.array', 'np.array', (['obs[key]'], {}), '(obs[key])\n', (20022, 20032), True, 'import numpy as np\n')] |
import sys
import os
import time
import progressbar
import numpy as np
import pickle
import pandas as pd
from joblib import Parallel, delayed
def timeseries_as_many2one(d, nb_timesteps_in, columns, timelag=0):
t = {c: d[c].values for c in columns}
X = []
for i in range(len(d)-nb_timesteps_in-timelag):
x = []
for c in columns:
x += list(t[c][i:i+nb_timesteps_in])
X.append(x)
colnames = []
for c in columns:
colnames += ["%s_%d"%(c, i) for i in range(nb_timesteps_in) ]
X = np.r_[X].reshape(-1, nb_timesteps_in*len(columns))
X = pd.DataFrame(X, index=d.index[nb_timesteps_in+timelag:], columns=colnames)
r = X.join(d)
return r
def lstm_as_many2one_timeseries_dataset(dl, nb_timestep_in, target_column="target"):
indices = []
targets = []
indices_target = []
lstm_data = []
nfolds = dl.shape[0]
for i in pbar(maxval=nfolds)(range(dl.shape[0])):
assert nb_timestep_in > 0, 'Error values loock'
if nb_timestep_in+i <= dl.index.shape[0]:
t_aux = dl.iloc[nb_timestep_in+i-1:nb_timestep_in+i]
indices_target.append(t_aux.index.min())
targets.append(t_aux[target_column].values)
aux = dl[i:nb_timestep_in+i]
_ = aux.pop(target_column)
indices.append(aux.index.max())
lstm_record = aux.values.reshape((nb_timestep_in,aux.shape[1]))
columns = aux.columns.values
if len(lstm_data)>0:
lstm_data.append(lstm_record)
else:
lstm_data = [lstm_record]
return np.r_[lstm_data], indices, np.array(targets), indices_target, columns
def to_timedelta(t):
bd_class = pd.tseries.offsets.BusinessDay
return t if type(t) in [bd_class, pd.Timedelta] else pd.Timedelta(t)
def pbar(**kwargs):
sys.stdout.flush()
sys.stderr.flush()
time.sleep(.2)
return progressbar.ProgressBar(**kwargs)
class mParallel(Parallel):
def _print(self, msg, msg_args):
if self.verbose > 10:
fmsg = '[%s]: %s' % (self, msg % msg_args)
sys.stdout.write('\r ' + fmsg)
sys.stdout.flush()
class Timeseries_Experiment:
def __init__(self, data, train_period, test_period, metrics_funcs, metrics_funcs_args={},
gap_between_train_and_test = "1s", target_col="target",
predict_on_train=True, n_jobs=-1,
input_cols_to_results = [], ignore_columns=[],
as_many2one=False, nb_timesteps_in=None,
max_folds=None, metadata=None, description=None,
align_folds_to_weekstart = False,
target_mode = "vector",
loss_on_validation_data=False):
assert as_many2one in [False, 'linearized', '3D'] , "invalid as_many2one only None, 'linearized' or '3D' allowed"
assert target_mode in ["vector", "column", "onehot"] , "invalid target_mode only 'vector', 'column', 'onehot' allowed"
assert type(data.index[0]) == pd.Timestamp, "data must be a time indexed dataframe"
assert len(input_cols_to_results)==0 or np.alltrue([i in data.columns for i in input_cols_to_results]), "all input_cols_to_results must exist in data"
assert metrics_funcs is not None, "must set metrics functions"
assert not as_many2one or (nb_timesteps_in and nb_timesteps_in>0), "must set nb_timesteps_in>0 if using as_many2one"
assert np.alltrue([i in metrics_funcs for i in metrics_funcs_args.keys()]), "function name in metrics_funcs_args not existing in metrics_funcs"
self.estimator = None
self.data = data
self.train_period = to_timedelta(train_period)
self.test_period = to_timedelta(test_period)
self.gap_between_train_and_test = to_timedelta(gap_between_train_and_test)
self.target_col = target_col
self.predict_on_train = predict_on_train
self.metrics_funcs = metrics_funcs
self.metrics_funcs_args = metrics_funcs_args
self.input_cols_to_results = input_cols_to_results
self.ignore_columns = ignore_columns
self.n_jobs = n_jobs
self.fold_results_test = {}
self.fold_results_train = {}
self.as_many2one = as_many2one
self.nb_timesteps_in = nb_timesteps_in
self.max_folds = max_folds
self.metadata = metadata
self.description = description if description is not None else "saved experiment"
self.target_mode = target_mode
self.loss_on_validation_data = loss_on_validation_data
self.align_folds_to_weekstart = align_folds_to_weekstart
# feature mode
self.set_as_many2one()
# target mode
self.set_target_mode()
def set_as_many2one(self):
if self.as_many2one:
dirname = "" if not "dir" in dir(self.data) else self.data.dir+"/"
name = "tseries" if not "name" in dir(self.data) else self.data.name
self.m2o_pkl_fname = dirname + name +"_%d_timesteps_in.hd5"%self.nb_timesteps_in
if os.path.isfile(self.m2o_pkl_fname):
print ("using cached many2one dataset")
d, di, t, ti, cols = pickle.load(open(self.m2o_pkl_fname, "rb"))
else:
print ("creating many2one dataset")
d = self.data[[i for i in self.data.columns if i==self.target_col or i not in self.ignore_columns]]
d, di, t, ti, cols = lstm_as_many2one_timeseries_dataset(d,
nb_timestep_in=self.nb_timesteps_in,
target_column=self.target_col)
self.m2o = (d,di,t,ti,cols)
#pickle.dump((d,di,t,ti,cols), open(self.m2o_pkl_fname, "wb" ))
assert len(di)==len(ti)==len(d)==len(t) and np.alltrue([di[i]==ti[i] for i in range(len(di))]), "error in many2one dataset generation"
self.m2o_columns = cols
if self.as_many2one == 'linearized':
self.index = np.r_[di]
self.X = d.reshape(-1, d.shape[1]*d.shape[2])
if self.as_many2one == '3D':
self.index = np.r_[di]
self.X = d
else:
self.index = np.r_[[ pd.Timestamp(date) for date in self.data.index.values]]
cols = [c for c in self.data.columns if c!=self.target_col and not c in self.ignore_columns]
self.X = self.data[cols].values
def set_target_mode(self):
if self.target_mode == 'vector':
self.y = self.data.loc[self.index][self.target_col].values
if self.target_mode == 'column':
self.y = self.data.loc[self.index][[self.target_col]].values
if self.target_mode == 'onehot':
# assert 'target_class' == self.target_col, "a 'target_class' column is necessary"
list_class = np.sort(np.unique(self.data[[self.target_col]].values))
onehot_target = []
for onehot in self.data.loc[self.index][self.target_col].values:
onehot_target.append(1*(onehot==list_class))
self.y = np.array(onehot_target)
def set_estimator(self, estimator, fit_params={}):
self.estimator = estimator
self.fit_params = fit_params
def get_fold_limits(self, test_start):
test_start = pd.Timestamp(test_start)
test_end = test_start + pd.Timedelta(self.test_period) - self.gap_between_train_and_test
train_start = test_start - pd.Timedelta(self.train_period)
train_end = test_start - self.gap_between_train_and_test
# fix to mondays
if train_start.weekday() ==5:
train_start = train_start - pd.Timedelta('1d')
elif train_start.weekday() ==6:
train_start = train_start - pd.Timedelta('2d')
return test_start, test_end, train_start, train_end
def extract_train_test_data(self, dates):
test_start, test_end, train_start, train_end = dates
trix = np.r_[[(i>=train_start) & (i<=train_end) for i in self.index]]
tsix = np.r_[[(i>=test_start) & (i<=test_end) for i in self.index]]
Xtr, ytr = self.X[trix], self.y[trix]
Xts, yts = self.X[tsix], self.y[tsix]
tr_index = self.index[trix]
ts_index = self.index[tsix]
train_input_cols_to_results = self.data.loc[tr_index][[i for i in self.input_cols_to_results]]
test_input_cols_to_results = self.data.loc[ts_index][[i for i in self.input_cols_to_results]]
return (Xtr, Xts, ytr, yts, tr_index, ts_index,
train_input_cols_to_results, test_input_cols_to_results)
def run_fold(self, test_start):
assert self.estimator is not None, "must call set_estimator before running experiments"
dates = self.get_fold_limits(test_start)
test_start, test_end, train_start, train_end = dates
# print('dates:')
# print('train', train_start, train_end)
# print('test', test_start, test_end)
(Xtr, Xts, ytr, yts, tr_index, ts_index,
train_input_cols_to_results, test_input_cols_to_results) = self.extract_train_test_data(dates)
k = {i:test_input_cols_to_results[i].values for i in self.input_cols_to_results}
if len(Xts)>0 and len(Xtr)>0:
results_tr = Timeseries_Experiment_Resultset(metrics_funcs=self.metrics_funcs,
metrics_funcs_args=self.metrics_funcs_args,
extra_info_names=self.input_cols_to_results)
results_ts = Timeseries_Experiment_Resultset(metrics_funcs=self.metrics_funcs,
metrics_funcs_args=self.metrics_funcs_args,
extra_info_names=self.input_cols_to_results)
v = {"validation_data": (Xts, yts)} if self.loss_on_validation_data else {}
self.estimator.fit(Xtr,ytr, **self.fit_params, **v)
predsts = self.estimator.predict(Xts)
# print(yts)
# print(type(yts))
tmp = yts#[:,0]
if self.target_mode == 'column':
predsts = predsts[:,0]
tmp = yts[:,0]
if self.target_mode == 'onehot':
predsts = [(aux).argmax() for aux in predsts]
tmp = [(aux).argmax() for aux in yts]
probsts = {"probs": self.estimator.predict_proba(Xts)} if "predict_proba" in dir(self.estimator) else {}
#print('probs', len(probsts))
results_ts.ladd(ts_index, tmp, predsts, **probsts, **{i:test_input_cols_to_results[i].values for i in self.input_cols_to_results})
results_ts.add_metainfo(test_start = test_start, test_end=test_end,
train_start = train_start, train_end = train_end)
if hasattr(self.estimator, "feature_importances_"):
results_ts.add_metainfo(feature_importances=self.estimator.feature_importances_)
if self.predict_on_train:
predstr = self.estimator.predict(Xtr)
predstr = predstr[:,0] if len(predstr.shape)==2 else predstr
probstr = {"probs": self.estimator.predict_proba(Xtr)} if "predict_proba" in dir(self.estimator) else {}
tmp = ytr[:,0] if len(ytr.shape)==2 else ytr
results_tr.ladd(tr_index, tmp, predstr, **probstr, **{i:train_input_cols_to_results[i].values for i in self.input_cols_to_results})
results_tr.add_metainfo(test_start = test_start, test_end=test_end,
train_start = train_start, train_end = train_end)
if hasattr(self.estimator, "feature_importances_"):
results_tr.add_metainfo(feature_importances=self.estimator.feature_importances_)
results_ts.close()
results_tr.close()
return results_ts, results_tr
else:
return None
def get_folds_info(self, test_start=None, test_end=None):
test_start = pd.Timestamp(test_start) if test_start is not None else None
test_end = pd.Timestamp(test_end) if test_end is not None else None
test_start = np.min(self.data.index) + self.train_period if test_start is None else test_start
test_start = pd.Timestamp(test_start)
test_end = np.max(self.data.index) if test_end is None else test_end
test_end = pd.Timestamp(test_end)
assert test_end>test_start, "test_start %s must be before test_end %s"%(str(test_start), str(test_end))
self.fold_results_test = {}
self.fold_results_train = {}
r = []
n_folds = 0
ftest_start = test_start
while (ftest_start<=test_end):
ftest_start, ftest_end, ftrain_start, ftrain_end = self.get_fold_limits(ftest_start)
# fix to mondays
if self.align_folds_to_weekstart and ftrain_start.weekday() == 5:
ftrain_start = ftrain_start - pd.Timedelta('1d')
elif self.align_folds_to_weekstart and ftrain_start.weekday() ==6:
ftrain_start = ftrain_start - pd.Timedelta('2d')
if not self.align_folds_to_weekstart or ftest_start.weekday() <= 4:
r.append( {"test_start": ftest_start, "test_end": ftest_end,
"train_start": ftrain_start, "train_end": ftrain_end})
n_folds += 1
ftest_start += self.test_period
return r
def print_folds_info(self, date_fmt="%Y-%m-%d %H:%M"):
f = self.get_folds_info()
print ("experiment has %d time based folds"%len(f))
print ("------------------------------------")
print ("train start train end test start test end")
r = [i["train_start"].strftime(date_fmt)+" -- "+i["train_end"].strftime(date_fmt)+" " +\
i["test_start"].strftime(date_fmt)+" -- "+i["test_end"].strftime(date_fmt) for i in f]
print ("\n".join(r))
def run(self, test_start=None, test_end=None):
from time import time
start_t = time()
from joblib import delayed
folds_info = self.get_folds_info(test_start, test_end)
folds_info = folds_info[:self.max_folds] if self.max_folds else folds_info
# now run the folds
if self.n_jobs==1:
for fold_info in pbar()(folds_info):
#print('test_start', fold_info["test_start"])
resu = self.run_fold(fold_info["test_start"])
if resu is not None:
results_ts, results_tr = resu
self.fold_results_test[fold_info["test_start"]] = results_ts
self.fold_results_train[fold_info["test_start"]] = results_tr
else:
#print(folds_info)
f = lambda x: (x["test_start"], self.run_fold(x["test_start"]))
r = mParallel(n_jobs=self.n_jobs, verbose=30)(delayed(f)(i) for i in folds_info)
for test_start, resu in r:
if resu is not None:
results_ts, results_tr = resu
self.fold_results_test[test_start] = results_ts
self.fold_results_train[test_start] = results_tr
self.results_test = None
for v in self.fold_results_test.values():
self.results_test = v if self.results_test is None else self.results_test.append(v)
self.results_train = None
for v in self.fold_results_train.values():
self.results_train = v if self.results_train is None else self.results_train.append(v)
# if self.autosave_dir is not None:
# self.save(self.autosave_dir)
self.run_time = time()-start_t
def save(self, dir_name):
import pickle, datetime
r = {i: self.__getattribute__(i) for i in self.__dict__ if not i in ["data"]}
from copy import copy
dr = {}
dr["data_start_date"] = np.min(self.data.index)
dr["data_end_date"] = np.max(self.data.index)
dr["data_len"] = len(self.data)
dr["data_columns"] = self.data.columns
r = copy(self)
r.data = dr
r.fold_results_test = None
r.fold_results_train = None
now = str(datetime.datetime.now()).replace(" ", "__")
fname="%s/%s_%s_%d.pkl.bz"%(dir_name, self.estimator.__class__.__name__, str(now).split(".")[0], id(self))
import bz2, pickle
pickle.dump(r, bz2.BZ2File(fname, "w"))
print ("\nexperiment config saved to", fname)
@staticmethod
def load(fname, with_data=None):
import bz2, pickle
r = pickle.load(bz2.BZ2File(fname, "r"))
if with_data is not None:
data_spec = r.data
r.data = with_data.loc[data_spec["data_start_date"]:data_spec["data_end_date"]][data_spec["data_columns"]]
return r
def fix_outrange_price_predictions(results):
results["pred"] = [i for i in map(lambda x: 0 if x<0 else 180 if x>180 else x,results.pred.values)]
return results
def filter_outrange_price_predictions(results):
return results[(results.pred>=0)&(results.pred<=180)&(results.target!=0)].copy()
class Timeseries_Experiment_Resultset:
def __init__(self, metrics_funcs, metrics_funcs_args={}, extra_info_names = []):
"""
extra_info_names: variable names for extra info at each result report
metrics_funcs: set of functions to be called upon get_metrics below on resampled result dataframes
holding at least "target" and "pred" columns. If "binary", automatically include
metrics for binary classification.
"""
self.dates = []
self.targets = []
self.preds = []
self.probs = []
self.extra_info = {i:[] for i in extra_info_names}
self.is_closed = False
self.metainfo = {}
self.metrics_funcs = ["count"]+metrics_funcs if not "count" in metrics_funcs else metrics_funcs
self.metrics_funcs_args = metrics_funcs_args
def add(self, date, target, pred, **kwargs):
assert not self.is_closed, "this resultset has already been closed"
self.dates.append(date)
self.targets.append(target)
self.preds.append(pred)
if "probs" in kwargs.keys():
self.probs.append(kwargs["probs"])
for k in self.extra_info.keys():
assert k in kwargs.keys(), "extra info %s not reported for date %s"%(k, str(date))
self.extra_info[k].append(kwargs[k])
def ladd(self, dates, targets, preds, **kwargs):
dates = list(dates)
targets = list(targets)
preds = list(preds)
assert not self.is_closed, "this resultset has already been closed"
n = len(dates)
assert len(targets)==n and len(preds)==n, "all lists must have the same number of items"
self.dates += list(dates)
self.targets += list(targets)
self.preds += list(preds)
if "probs" in kwargs.keys():
self.probs += list(kwargs["probs"])
for k in self.extra_info.keys():
assert k in kwargs.keys(), "extra info %s not reported for date %s"%(k, str(date))
tmp = list(kwargs[k])
assert len(tmp)==n, "all lists must have the same number of items"
self.extra_info[k] += tmp
def add_metainfo(self, **kwargs):
for k,v in kwargs.items():
self.metainfo[k] = v
def close(self):
r = pd.DataFrame(np.r_[[self.targets, self.preds] + list(self.extra_info.values())].T,
index = self.dates,
columns = ["target", "pred"] + list(self.extra_info.keys()))
if len(self.probs)>0:
r["probs"] = self.probs
self.details = r
self.is_closed = True
# check if is necesary save the dataset
def get_metrics(self, groupby=None, resampling_period=None):
assert self.metrics_funcs is not None, "must set metrics functions for this experiment"
a = 1 if groupby else 0
b = 1 if resampling_period else 0
assert a+b!=2, "cannot set both groupby and resampling period"
# get all data if no grouping or sampling
if a+b==0:
resampling_period = pd.Timedelta(np.max(self.details.index) - np.min(self.details.index)) + pd.Timedelta("1s")
g=pd.Grouper(freq=resampling_period) if resampling_period else groupby
r = None
for fname in self.metrics_funcs:
fargs = {} if fname not in self.metrics_funcs_args.keys() else self.metrics_funcs_args[fname]
fname = "metrics_"+fname
f = lambda x: self.__getattribute__(fname)(x, **fargs)
k = self.details.groupby(g).apply(f)
r = k if r is None else r.join(k)
return r
def append(self, other):
assert self.is_closed and other.is_closed, "resultsets must be closed"
# print(self.details.columns)
# print(other.details.columns)
assert len(self.details.columns)==len(other.details.columns) and \
np.alltrue([self.details.columns[i]==other.details.columns[i] for i in range(len(self.details.columns))]), \
"result sets must have the same column structre"
r = self.__class__(metrics_funcs = self.metrics_funcs, metrics_funcs_args=self.metrics_funcs_args, extra_info_names=list(self.extra_info.keys()))
r.details = self.details.append(other.details)
r.is_closed = True
return r
def plot(self, **fig_kwargs):
from bokeh.plotting import figure, show
k = self.details
bfig = figure(y_axis_label='price', x_axis_type='datetime', **fig_kwargs)
bfig.line(k.index, k.target, color="navy", line_width=2, legend="target", alpha=.5)
bfig.line(k.index, k.pred, color="red", line_width=2, legend="prediction", alpha=.5)
show(bfig)
@staticmethod
def metrics_binary(x):
y = x.target
p = x.pred
acc = np.mean(y==p) if len(y)>0 else 0
tpr = np.mean(p[y==1]) if sum(y==1)>0 else 1
tnr = np.mean(1-p[y==0]) if sum(y==0)>0 else 1
fpr = np.mean(p[y==0]) if sum(y==0)>0 else 1
fnr = np.mean(1-p[y==1]) if sum(y==1)>0 else 1
return pd.Series([acc,tpr, fnr, tnr, fpr], index=["accuracy", "tpr", "fnr", "tnr", "fpr"])
@staticmethod
def metrics_multiclass_ignore_nones(x):
y = x.target.values
p = x.pred.values
acc = np.mean( y[p!=None]==p[p!=None])
pct = np.mean(p!=None)
return pd.Series([acc, pct], index=["accuracy", "pct_predicted"])
@staticmethod
def metrics_n_classes(x, class_labels):
y = x.target
p = x.pred
global_acc = np.mean(y==p) if len(y)>0 else 0
class_prec = [np.mean(y[p==i]==i) for i in class_labels]
class_rec = [np.mean(p[y==i]==i) for i in class_labels]
return pd.Series([global_acc]+\
class_prec+class_rec,
index=["global_acc"]+\
["%d_prec"%i for i in class_labels]+\
["%d_recall"%i for i in class_labels]).sort_index()
@staticmethod
def metrics_mape(x):
y = x.target
p = x.pred
mape = np.mean(np.abs(y-p)/np.mean(y))
return pd.Series([mape], index=["mape"])
@staticmethod
def metrics_trend(x, include_class_distribution=False):
y = x.target
p = x.pred
gt = np.mean(p[y>0]>0)
lt = np.mean(p[y<0]<0)
eq = np.mean(p[y==0]==0)
if include_class_distribution:
gtd = np.mean(y>0)
ltd = np.mean(y<0)
eqd = np.mean(y==0)
return pd.Series([lt,eq,gt, ltd, eqd, gtd],
index=["<0", "=0", ">0", "<0(%)", "=0(%)", ">0(%)"])
else:
return pd.Series([lt,eq,gt], index=["<0", "=0", ">0"])
@staticmethod
def metrics_count(x):
return pd.Series([len(x)], index=["count"])
@staticmethod
def metrics_rmse(x):
y = x.target.values
p = x.pred.values
return pd.Series([np.sqrt(np.mean((y-p)**2))], index=["rmse"])
@staticmethod
def metrics_pnlexpectation(x, L0_value=0):
pred = x.pred.values
y = x.delta_price.values
y_dn = y[pred<L0_value]
#p_dn = pred[pred<L0_value]
y_up = y[pred>L0_value]
#p_up = pred[pred>L0_value]
eloss_dn, eprof_dn = (np.mean(y_dn[y_dn>0])*np.mean(y_dn>0), -np.mean(y_dn[y_dn<0])*np.mean(y_dn<0)) if np.sum(y_dn>0)>0 else (0.,0.)
eloss_up, eprof_up = (-np.mean(y_up[y_up<0])*np.mean(y_up<0), np.mean(y_up[y_up>0])*np.mean(y_up>0)) if np.sum(y_up<0)>0 else (0.,0.)
epnl_dn = eprof_dn - eloss_dn
epnl_up = eprof_up - eloss_up
eprof = eprof_dn*np.mean(pred<L0_value) + eprof_up*np.mean(pred>L0_value)
eloss = eloss_dn*np.mean(pred<L0_value) + eloss_up*np.mean(pred>L0_value)
dpup = np.mean(pred > L0_value)
dpdn = np.mean(pred < L0_value)
dyup = np.mean(y > 0)
dydn = np.mean(y < 0)
acc_dn = np.mean(pred[y < 0] < L0_value)
acc_up = np.mean(pred[y > 0]>L0_value)
acc_zr = np.mean(pred[y==0]==L0_value)
return pd.Series([eloss_up, eloss_dn, eprof_up, eprof_dn,
epnl_up, epnl_dn, eprof, eloss, eprof-eloss, eprof/eloss,
dpup, dpdn, dyup, dydn, acc_dn, acc_up, acc_zr],
index=["E_loss_Lp+","E_loss_Lp-","E_profit_Lp+","E_profit_Lp-",
"PNL_Lp+", "PNL_Lp-", "E_profit", "E_loss", "PNL", "PL_rate",
"P(Lp+)", "P(Lp-)","P(y+)", "P(y-)", "acc-", "acc+", "acc_0"
])
@staticmethod
def metrics_riskprofit(x, class_spec=None, n_classes=None):
"""
class_spec: i.e. {"-":[0,1], "0":[2,3] "+": [4,5,6]} details what classes stand for
a positive/negative/zero price difference
if None, n_classes must be given and be of odd length so that the center
class is taken as "0", and above/below as positive/negative. For instance,
n_classes=5 results in {"-":[0,1], "0":[2] "+": [3,4]}
"""
assert n_classes is not None or class_spec is not None, "must set n_classes or class_spec"
if class_spec is None:
assert n_classes%2==1, "n_classes must be odd so that the center class is considered the zero class"
classes_zero = [int(n_classes/2)]
classes_negative = [i for i in range(classes_zero[0])]
classes_positive = [i for i in range(classes_zero[0]+1, n_classes)]
class_spec = {"-":classes_negative, "0": classes_zero, "+": classes_positive}
z = class_spec["0"]
po = class_spec["+"]
no = class_spec["-"]
zp = z + po
zn = z + no
p = x.pred.values
valid_preds = np.logical_not(pd.isna(p))
valid_preds_pct = np.mean(valid_preds)
p = x.pred.values[valid_preds]
y = x.target.values[valid_preds]
# risk free
rfree = np.mean([ (y[i] in zp and p[i] in zp) or (y[i] in zn and p[i] in zn) for i in range(len(y))])
# profit only
ponly = np.mean([ ((y[i] in po and p[i] in po) or \
(y[i] in no and p[i] in no)) for i in range(len(y)) if p[i] not in z and p[i] not in z])
puponly = np.mean([ (y[i] in po) for i in range(len(y)) if p[i] in po])
pdnonly = np.mean([ (y[i] in no) for i in range(len(y)) if p[i] in no])
lonly = np.mean([ ((y[i] in po and p[i] in no) or \
(y[i] in no and p[i] in po)) for i in range(len(y)) if p[i] not in z])
ldnonly = np.mean([ (y[i] in po) for i in range(len(y)) if p[i] in no])
luponly = np.mean([ (y[i] in no) for i in range(len(y)) if p[i] in po])
return pd.Series([rfree, ponly, pdnonly, puponly, lonly, ldnonly, luponly, valid_preds_pct],
index=["risk_free_accuracy",
"profit_only_accuracy", "profitdn_only_accuracy", "profitup_only_accuracy",
"loss_only_accuracy", "lossdn_only_accuracy", "lossup_only_accuracy",
"prediction_pct "])
| [
"sys.stdout.write",
"numpy.abs",
"numpy.sum",
"bz2.BZ2File",
"os.path.isfile",
"numpy.mean",
"sys.stdout.flush",
"pandas.Grouper",
"numpy.unique",
"pandas.DataFrame",
"numpy.max",
"bokeh.plotting.show",
"pandas.Timedelta",
"pandas.isna",
"sys.stderr.flush",
"datetime.datetime.now",
"... | [((614, 690), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'index': 'd.index[nb_timesteps_in + timelag:]', 'columns': 'colnames'}), '(X, index=d.index[nb_timesteps_in + timelag:], columns=colnames)\n', (626, 690), True, 'import pandas as pd\n'), ((1883, 1901), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1899, 1901), False, 'import sys\n'), ((1906, 1924), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (1922, 1924), False, 'import sys\n'), ((1929, 1944), 'time.time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1939, 1944), False, 'from time import time\n'), ((1955, 1988), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {}), '(**kwargs)\n', (1978, 1988), False, 'import progressbar\n'), ((1673, 1690), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (1681, 1690), True, 'import numpy as np\n'), ((1842, 1857), 'pandas.Timedelta', 'pd.Timedelta', (['t'], {}), '(t)\n', (1854, 1857), True, 'import pandas as pd\n'), ((7555, 7579), 'pandas.Timestamp', 'pd.Timestamp', (['test_start'], {}), '(test_start)\n', (7567, 7579), True, 'import pandas as pd\n'), ((12640, 12664), 'pandas.Timestamp', 'pd.Timestamp', (['test_start'], {}), '(test_start)\n', (12652, 12664), True, 'import pandas as pd\n'), ((12762, 12784), 'pandas.Timestamp', 'pd.Timestamp', (['test_end'], {}), '(test_end)\n', (12774, 12784), True, 'import pandas as pd\n'), ((14474, 14480), 'time.time', 'time', ([], {}), '()\n', (14478, 14480), False, 'from time import time\n'), ((16341, 16364), 'numpy.min', 'np.min', (['self.data.index'], {}), '(self.data.index)\n', (16347, 16364), True, 'import numpy as np\n'), ((16395, 16418), 'numpy.max', 'np.max', (['self.data.index'], {}), '(self.data.index)\n', (16401, 16418), True, 'import numpy as np\n'), ((16520, 16530), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (16524, 16530), False, 'from copy import copy\n'), ((22074, 22140), 'bokeh.plotting.figure', 'figure', ([], {'y_axis_label': '"""price"""', 'x_axis_type': '"""datetime"""'}), "(y_axis_label='price', x_axis_type='datetime', **fig_kwargs)\n", (22080, 22140), False, 'from bokeh.plotting import figure, show\n'), ((22334, 22344), 'bokeh.plotting.show', 'show', (['bfig'], {}), '(bfig)\n', (22338, 22344), False, 'from bokeh.plotting import figure, show\n'), ((22710, 22798), 'pandas.Series', 'pd.Series', (['[acc, tpr, fnr, tnr, fpr]'], {'index': "['accuracy', 'tpr', 'fnr', 'tnr', 'fpr']"}), "([acc, tpr, fnr, tnr, fpr], index=['accuracy', 'tpr', 'fnr', 'tnr',\n 'fpr'])\n", (22719, 22798), True, 'import pandas as pd\n'), ((22926, 22963), 'numpy.mean', 'np.mean', (['(y[p != None] == p[p != None])'], {}), '(y[p != None] == p[p != None])\n', (22933, 22963), True, 'import numpy as np\n'), ((22973, 22991), 'numpy.mean', 'np.mean', (['(p != None)'], {}), '(p != None)\n', (22980, 22991), True, 'import numpy as np\n'), ((23005, 23063), 'pandas.Series', 'pd.Series', (['[acc, pct]'], {'index': "['accuracy', 'pct_predicted']"}), "([acc, pct], index=['accuracy', 'pct_predicted'])\n", (23014, 23063), True, 'import pandas as pd\n'), ((23777, 23810), 'pandas.Series', 'pd.Series', (['[mape]'], {'index': "['mape']"}), "([mape], index=['mape'])\n", (23786, 23810), True, 'import pandas as pd\n'), ((23943, 23964), 'numpy.mean', 'np.mean', (['(p[y > 0] > 0)'], {}), '(p[y > 0] > 0)\n', (23950, 23964), True, 'import numpy as np\n'), ((23974, 23995), 'numpy.mean', 'np.mean', (['(p[y < 0] < 0)'], {}), '(p[y < 0] < 0)\n', (23981, 23995), True, 'import numpy as np\n'), ((24005, 24028), 'numpy.mean', 'np.mean', (['(p[y == 0] == 0)'], {}), '(p[y == 0] == 0)\n', (24012, 24028), True, 'import numpy as np\n'), ((25455, 25479), 'numpy.mean', 'np.mean', (['(pred > L0_value)'], {}), '(pred > L0_value)\n', (25462, 25479), True, 'import numpy as np\n'), ((25495, 25519), 'numpy.mean', 'np.mean', (['(pred < L0_value)'], {}), '(pred < L0_value)\n', (25502, 25519), True, 'import numpy as np\n'), ((25535, 25549), 'numpy.mean', 'np.mean', (['(y > 0)'], {}), '(y > 0)\n', (25542, 25549), True, 'import numpy as np\n'), ((25565, 25579), 'numpy.mean', 'np.mean', (['(y < 0)'], {}), '(y < 0)\n', (25572, 25579), True, 'import numpy as np\n'), ((25597, 25628), 'numpy.mean', 'np.mean', (['(pred[y < 0] < L0_value)'], {}), '(pred[y < 0] < L0_value)\n', (25604, 25628), True, 'import numpy as np\n'), ((25646, 25677), 'numpy.mean', 'np.mean', (['(pred[y > 0] > L0_value)'], {}), '(pred[y > 0] > L0_value)\n', (25653, 25677), True, 'import numpy as np\n'), ((25693, 25726), 'numpy.mean', 'np.mean', (['(pred[y == 0] == L0_value)'], {}), '(pred[y == 0] == L0_value)\n', (25700, 25726), True, 'import numpy as np\n'), ((25740, 26110), 'pandas.Series', 'pd.Series', (['[eloss_up, eloss_dn, eprof_up, eprof_dn, epnl_up, epnl_dn, eprof, eloss, \n eprof - eloss, eprof / eloss, dpup, dpdn, dyup, dydn, acc_dn, acc_up,\n acc_zr]'], {'index': "['E_loss_Lp+', 'E_loss_Lp-', 'E_profit_Lp+', 'E_profit_Lp-', 'PNL_Lp+',\n 'PNL_Lp-', 'E_profit', 'E_loss', 'PNL', 'PL_rate', 'P(Lp+)', 'P(Lp-)',\n 'P(y+)', 'P(y-)', 'acc-', 'acc+', 'acc_0']"}), "([eloss_up, eloss_dn, eprof_up, eprof_dn, epnl_up, epnl_dn, eprof,\n eloss, eprof - eloss, eprof / eloss, dpup, dpdn, dyup, dydn, acc_dn,\n acc_up, acc_zr], index=['E_loss_Lp+', 'E_loss_Lp-', 'E_profit_Lp+',\n 'E_profit_Lp-', 'PNL_Lp+', 'PNL_Lp-', 'E_profit', 'E_loss', 'PNL',\n 'PL_rate', 'P(Lp+)', 'P(Lp-)', 'P(y+)', 'P(y-)', 'acc-', 'acc+', 'acc_0'])\n", (25749, 26110), True, 'import pandas as pd\n'), ((27526, 27546), 'numpy.mean', 'np.mean', (['valid_preds'], {}), '(valid_preds)\n', (27533, 27546), True, 'import numpy as np\n'), ((28454, 28753), 'pandas.Series', 'pd.Series', (['[rfree, ponly, pdnonly, puponly, lonly, ldnonly, luponly, valid_preds_pct]'], {'index': "['risk_free_accuracy', 'profit_only_accuracy', 'profitdn_only_accuracy',\n 'profitup_only_accuracy', 'loss_only_accuracy', 'lossdn_only_accuracy',\n 'lossup_only_accuracy', 'prediction_pct ']"}), "([rfree, ponly, pdnonly, puponly, lonly, ldnonly, luponly,\n valid_preds_pct], index=['risk_free_accuracy', 'profit_only_accuracy',\n 'profitdn_only_accuracy', 'profitup_only_accuracy',\n 'loss_only_accuracy', 'lossdn_only_accuracy', 'lossup_only_accuracy',\n 'prediction_pct '])\n", (28463, 28753), True, 'import pandas as pd\n'), ((2149, 2179), 'sys.stdout.write', 'sys.stdout.write', (["('\\r ' + fmsg)"], {}), "('\\r ' + fmsg)\n", (2165, 2179), False, 'import sys\n'), ((2191, 2209), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2207, 2209), False, 'import sys\n'), ((3226, 3290), 'numpy.alltrue', 'np.alltrue', (['[(i in data.columns) for i in input_cols_to_results]'], {}), '([(i in data.columns) for i in input_cols_to_results])\n', (3236, 3290), True, 'import numpy as np\n'), ((5183, 5217), 'os.path.isfile', 'os.path.isfile', (['self.m2o_pkl_fname'], {}), '(self.m2o_pkl_fname)\n', (5197, 5217), False, 'import os\n'), ((7336, 7359), 'numpy.array', 'np.array', (['onehot_target'], {}), '(onehot_target)\n', (7344, 7359), True, 'import numpy as np\n'), ((7717, 7748), 'pandas.Timedelta', 'pd.Timedelta', (['self.train_period'], {}), '(self.train_period)\n', (7729, 7748), True, 'import pandas as pd\n'), ((12377, 12401), 'pandas.Timestamp', 'pd.Timestamp', (['test_start'], {}), '(test_start)\n', (12389, 12401), True, 'import pandas as pd\n'), ((12459, 12481), 'pandas.Timestamp', 'pd.Timestamp', (['test_end'], {}), '(test_end)\n', (12471, 12481), True, 'import pandas as pd\n'), ((12685, 12708), 'numpy.max', 'np.max', (['self.data.index'], {}), '(self.data.index)\n', (12691, 12708), True, 'import numpy as np\n'), ((16098, 16104), 'time.time', 'time', ([], {}), '()\n', (16102, 16104), False, 'from time import time\n'), ((16850, 16873), 'bz2.BZ2File', 'bz2.BZ2File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (16861, 16873), False, 'import bz2, pickle\n'), ((17036, 17059), 'bz2.BZ2File', 'bz2.BZ2File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (17047, 17059), False, 'import bz2, pickle\n'), ((20795, 20829), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': 'resampling_period'}), '(freq=resampling_period)\n', (20805, 20829), True, 'import pandas as pd\n'), ((22446, 22461), 'numpy.mean', 'np.mean', (['(y == p)'], {}), '(y == p)\n', (22453, 22461), True, 'import numpy as np\n'), ((22493, 22511), 'numpy.mean', 'np.mean', (['p[y == 1]'], {}), '(p[y == 1])\n', (22500, 22511), True, 'import numpy as np\n'), ((22546, 22568), 'numpy.mean', 'np.mean', (['(1 - p[y == 0])'], {}), '(1 - p[y == 0])\n', (22553, 22568), True, 'import numpy as np\n'), ((22601, 22619), 'numpy.mean', 'np.mean', (['p[y == 0]'], {}), '(p[y == 0])\n', (22608, 22619), True, 'import numpy as np\n'), ((22654, 22676), 'numpy.mean', 'np.mean', (['(1 - p[y == 1])'], {}), '(1 - p[y == 1])\n', (22661, 22676), True, 'import numpy as np\n'), ((23188, 23203), 'numpy.mean', 'np.mean', (['(y == p)'], {}), '(y == p)\n', (23195, 23203), True, 'import numpy as np\n'), ((23243, 23266), 'numpy.mean', 'np.mean', (['(y[p == i] == i)'], {}), '(y[p == i] == i)\n', (23250, 23266), True, 'import numpy as np\n'), ((23308, 23331), 'numpy.mean', 'np.mean', (['(p[y == i] == i)'], {}), '(p[y == i] == i)\n', (23315, 23331), True, 'import numpy as np\n'), ((24082, 24096), 'numpy.mean', 'np.mean', (['(y > 0)'], {}), '(y > 0)\n', (24089, 24096), True, 'import numpy as np\n'), ((24113, 24127), 'numpy.mean', 'np.mean', (['(y < 0)'], {}), '(y < 0)\n', (24120, 24127), True, 'import numpy as np\n'), ((24144, 24159), 'numpy.mean', 'np.mean', (['(y == 0)'], {}), '(y == 0)\n', (24151, 24159), True, 'import numpy as np\n'), ((24177, 24272), 'pandas.Series', 'pd.Series', (['[lt, eq, gt, ltd, eqd, gtd]'], {'index': "['<0', '=0', '>0', '<0(%)', '=0(%)', '>0(%)']"}), "([lt, eq, gt, ltd, eqd, gtd], index=['<0', '=0', '>0', '<0(%)',\n '=0(%)', '>0(%)'])\n", (24186, 24272), True, 'import pandas as pd\n'), ((24330, 24379), 'pandas.Series', 'pd.Series', (['[lt, eq, gt]'], {'index': "['<0', '=0', '>0']"}), "([lt, eq, gt], index=['<0', '=0', '>0'])\n", (24339, 24379), True, 'import pandas as pd\n'), ((27488, 27498), 'pandas.isna', 'pd.isna', (['p'], {}), '(p)\n', (27495, 27498), True, 'import pandas as pd\n'), ((7096, 7142), 'numpy.unique', 'np.unique', (['self.data[[self.target_col]].values'], {}), '(self.data[[self.target_col]].values)\n', (7105, 7142), True, 'import numpy as np\n'), ((7616, 7646), 'pandas.Timedelta', 'pd.Timedelta', (['self.test_period'], {}), '(self.test_period)\n', (7628, 7646), True, 'import pandas as pd\n'), ((7921, 7939), 'pandas.Timedelta', 'pd.Timedelta', (['"""1d"""'], {}), "('1d')\n", (7933, 7939), True, 'import pandas as pd\n'), ((12537, 12560), 'numpy.min', 'np.min', (['self.data.index'], {}), '(self.data.index)\n', (12543, 12560), True, 'import numpy as np\n'), ((20765, 20783), 'pandas.Timedelta', 'pd.Timedelta', (['"""1s"""'], {}), "('1s')\n", (20777, 20783), True, 'import pandas as pd\n'), ((23366, 23531), 'pandas.Series', 'pd.Series', (['([global_acc] + class_prec + class_rec)'], {'index': "(['global_acc'] + [('%d_prec' % i) for i in class_labels] + [('%d_recall' %\n i) for i in class_labels])"}), "([global_acc] + class_prec + class_rec, index=['global_acc'] + [(\n '%d_prec' % i) for i in class_labels] + [('%d_recall' % i) for i in\n class_labels])\n", (23375, 23531), True, 'import pandas as pd\n'), ((23738, 23751), 'numpy.abs', 'np.abs', (['(y - p)'], {}), '(y - p)\n', (23744, 23751), True, 'import numpy as np\n'), ((23750, 23760), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (23757, 23760), True, 'import numpy as np\n'), ((25027, 25043), 'numpy.sum', 'np.sum', (['(y_dn > 0)'], {}), '(y_dn > 0)\n', (25033, 25043), True, 'import numpy as np\n'), ((25169, 25185), 'numpy.sum', 'np.sum', (['(y_up < 0)'], {}), '(y_up < 0)\n', (25175, 25185), True, 'import numpy as np\n'), ((25300, 25324), 'numpy.mean', 'np.mean', (['(pred < L0_value)'], {}), '(pred < L0_value)\n', (25307, 25324), True, 'import numpy as np\n'), ((25334, 25358), 'numpy.mean', 'np.mean', (['(pred > L0_value)'], {}), '(pred > L0_value)\n', (25341, 25358), True, 'import numpy as np\n'), ((25382, 25406), 'numpy.mean', 'np.mean', (['(pred < L0_value)'], {}), '(pred < L0_value)\n', (25389, 25406), True, 'import numpy as np\n'), ((25416, 25440), 'numpy.mean', 'np.mean', (['(pred > L0_value)'], {}), '(pred > L0_value)\n', (25423, 25440), True, 'import numpy as np\n'), ((6457, 6475), 'pandas.Timestamp', 'pd.Timestamp', (['date'], {}), '(date)\n', (6469, 6475), True, 'import pandas as pd\n'), ((8020, 8038), 'pandas.Timedelta', 'pd.Timedelta', (['"""2d"""'], {}), "('2d')\n", (8032, 8038), True, 'import pandas as pd\n'), ((13330, 13348), 'pandas.Timedelta', 'pd.Timedelta', (['"""1d"""'], {}), "('1d')\n", (13342, 13348), True, 'import pandas as pd\n'), ((16640, 16663), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16661, 16663), False, 'import pickle, datetime\n'), ((24607, 24628), 'numpy.mean', 'np.mean', (['((y - p) ** 2)'], {}), '((y - p) ** 2)\n', (24614, 24628), True, 'import numpy as np\n'), ((24945, 24968), 'numpy.mean', 'np.mean', (['y_dn[y_dn > 0]'], {}), '(y_dn[y_dn > 0])\n', (24952, 24968), True, 'import numpy as np\n'), ((24967, 24984), 'numpy.mean', 'np.mean', (['(y_dn > 0)'], {}), '(y_dn > 0)\n', (24974, 24984), True, 'import numpy as np\n'), ((25007, 25024), 'numpy.mean', 'np.mean', (['(y_dn < 0)'], {}), '(y_dn < 0)\n', (25014, 25024), True, 'import numpy as np\n'), ((25110, 25127), 'numpy.mean', 'np.mean', (['(y_up < 0)'], {}), '(y_up < 0)\n', (25117, 25127), True, 'import numpy as np\n'), ((25127, 25150), 'numpy.mean', 'np.mean', (['y_up[y_up > 0]'], {}), '(y_up[y_up > 0])\n', (25134, 25150), True, 'import numpy as np\n'), ((25149, 25166), 'numpy.mean', 'np.mean', (['(y_up > 0)'], {}), '(y_up > 0)\n', (25156, 25166), True, 'import numpy as np\n'), ((13474, 13492), 'pandas.Timedelta', 'pd.Timedelta', (['"""2d"""'], {}), "('2d')\n", (13486, 13492), True, 'import pandas as pd\n'), ((15322, 15332), 'joblib.delayed', 'delayed', (['f'], {}), '(f)\n', (15329, 15332), False, 'from joblib import delayed\n'), ((20706, 20732), 'numpy.max', 'np.max', (['self.details.index'], {}), '(self.details.index)\n', (20712, 20732), True, 'import numpy as np\n'), ((20735, 20761), 'numpy.min', 'np.min', (['self.details.index'], {}), '(self.details.index)\n', (20741, 20761), True, 'import numpy as np\n'), ((24985, 25008), 'numpy.mean', 'np.mean', (['y_dn[y_dn < 0]'], {}), '(y_dn[y_dn < 0])\n', (24992, 25008), True, 'import numpy as np\n'), ((25088, 25111), 'numpy.mean', 'np.mean', (['y_up[y_up < 0]'], {}), '(y_up[y_up < 0])\n', (25095, 25111), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def angSepVincenty(ra1, dec1, ra2, dec2):
"""
Vincenty formula for distances on a sphere
"""
ra1_rad = np.radians(ra1)
dec1_rad = np.radians(dec1)
ra2_rad = np.radians(ra2)
dec2_rad = np.radians(dec2)
sin_dec1, cos_dec1 = np.sin(dec1_rad), np.cos(dec1_rad)
sin_dec2, cos_dec2 = np.sin(dec2_rad), np.cos(dec2_rad)
delta_ra = ra2_rad - ra1_rad
cos_delta_ra, sin_delta_ra = np.cos(delta_ra), np.sin(delta_ra)
diffpos = np.arctan2(np.sqrt((cos_dec2 * sin_delta_ra) ** 2 +
(cos_dec1 * sin_dec2 -
sin_dec1 * cos_dec2 * cos_delta_ra) ** 2),
sin_dec1 * sin_dec2 + cos_dec1 * cos_dec2 *
cos_delta_ra)
return np.degrees(diffpos)
if __name__ == '__main__':
# TIC is stored at
tic_dir = '/Users/tom/Dropbox/TIC4/CTL/'
# we are going to make a version of the TIC
# star is a 6 degrees radius circle
# we are going to center the coordinates on
# ra = 50 degrees
# dec = -30 degrees
# that means we use the files:
tic_file = '02-04.csv'
usecols = [0, 1, 2, 3, 4, 5, 6, 7, 20]
header_file = 'header.txt'
h = pd.read_csv(tic_dir + header_file, usecols=usecols)
# i don't like some column names
h.rename(columns={'RA': 'RA_DEG',
'DEC': 'DEC_DEG'}, inplace=True)
TIC = pd.read_csv(tic_dir + tic_file, header=0, usecols=usecols)
TIC.columns = h.columns
deg_from_center = angSepVincenty(TIC.RA_DEG, TIC.DEC_DEG, 50, -30)
savetic = TIC[deg_from_center < 12.].copy()
print('saving file with {} lines'.format(savetic.size))
savetic.to_hdf('tic_at_50_minus30.h5', key='data', mode='w')
| [
"numpy.radians",
"numpy.degrees",
"pandas.read_csv",
"numpy.sin",
"numpy.cos",
"numpy.sqrt"
] | [((159, 174), 'numpy.radians', 'np.radians', (['ra1'], {}), '(ra1)\n', (169, 174), True, 'import numpy as np\n'), ((190, 206), 'numpy.radians', 'np.radians', (['dec1'], {}), '(dec1)\n', (200, 206), True, 'import numpy as np\n'), ((221, 236), 'numpy.radians', 'np.radians', (['ra2'], {}), '(ra2)\n', (231, 236), True, 'import numpy as np\n'), ((252, 268), 'numpy.radians', 'np.radians', (['dec2'], {}), '(dec2)\n', (262, 268), True, 'import numpy as np\n'), ((794, 813), 'numpy.degrees', 'np.degrees', (['diffpos'], {}), '(diffpos)\n', (804, 813), True, 'import numpy as np\n'), ((1241, 1292), 'pandas.read_csv', 'pd.read_csv', (['(tic_dir + header_file)'], {'usecols': 'usecols'}), '(tic_dir + header_file, usecols=usecols)\n', (1252, 1292), True, 'import pandas as pd\n'), ((1435, 1493), 'pandas.read_csv', 'pd.read_csv', (['(tic_dir + tic_file)'], {'header': '(0)', 'usecols': 'usecols'}), '(tic_dir + tic_file, header=0, usecols=usecols)\n', (1446, 1493), True, 'import pandas as pd\n'), ((295, 311), 'numpy.sin', 'np.sin', (['dec1_rad'], {}), '(dec1_rad)\n', (301, 311), True, 'import numpy as np\n'), ((313, 329), 'numpy.cos', 'np.cos', (['dec1_rad'], {}), '(dec1_rad)\n', (319, 329), True, 'import numpy as np\n'), ((355, 371), 'numpy.sin', 'np.sin', (['dec2_rad'], {}), '(dec2_rad)\n', (361, 371), True, 'import numpy as np\n'), ((373, 389), 'numpy.cos', 'np.cos', (['dec2_rad'], {}), '(dec2_rad)\n', (379, 389), True, 'import numpy as np\n'), ((456, 472), 'numpy.cos', 'np.cos', (['delta_ra'], {}), '(delta_ra)\n', (462, 472), True, 'import numpy as np\n'), ((474, 490), 'numpy.sin', 'np.sin', (['delta_ra'], {}), '(delta_ra)\n', (480, 490), True, 'import numpy as np\n'), ((517, 626), 'numpy.sqrt', 'np.sqrt', (['((cos_dec2 * sin_delta_ra) ** 2 + (cos_dec1 * sin_dec2 - sin_dec1 *\n cos_dec2 * cos_delta_ra) ** 2)'], {}), '((cos_dec2 * sin_delta_ra) ** 2 + (cos_dec1 * sin_dec2 - sin_dec1 *\n cos_dec2 * cos_delta_ra) ** 2)\n', (524, 626), True, 'import numpy as np\n')] |
import os
from pyTSEB import TSEB
from pyTSEB import MO_similarity as mo
from pyTSEB import wind_profile as wind
from pyTSEB import resistances as res
from pyTSEB import energy_combination_ET as pet
from pyTSEB import meteo_utils as met
from pyTSEB import net_radiation as rad
from pypro4sail import four_sail as fs
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import ipywidgets as w
from IPython.display import display, clear_output
print("Gracias! librerías correctamente importadas")
print("Puedes continuar con las siguientes tareas")
slide_kwargs = {"continuous_update": False}
FIGSIZE = (12.0, 6.0)
np.seterr(all="ignore")
# Generate the list with VZAs (from 0 to 89)
INPUT_FOLDER = os.path.join(os.path.dirname(os.path.dirname(__file__)), "input")
OUTPUT_FOLDER = os.path.join(os.path.dirname(os.path.dirname(__file__)), "output")
CIMIS_FILE_PATH = os.path.join(INPUT_FOLDER, "meteo", "meteo_daily.csv")
N_SIM = 50
LAIS = np.linspace(0, 6, N_SIM)
pet.ITERATIONS = 5
# Generate the list with VZAs (from 0 to 89)
VZAS = np.arange(0, 89)
DELTA_T_LIMS = -5, 20
FLUX_LIMS = 0, 500
ET_LIMS = 0, 10
U = 5. # set wind speed (measured at 10m above the canopy)
Z_U = 10.
Z_T = 10.
LAI_REF = 24 * 0.12
H_C_REF = 0.12 # Canopy height
LEAF_WIDTH = 0.05 # Leaf width (m)
# Create list of heights
ZS = np.linspace(0, Z_U, N_SIM)
US = np.linspace(0.50, 20, N_SIM)
EMIS_C = 0.98
EMIS_S = 0.05
RES_FORM = [TSEB.KUSTAS_NORMAN_1999, {}]
# mean stomatal resistance, rsT, is taken as 400sm-1. It follows from Eq. (19)
# that, for a leaf area index, L , of 4, the bulk stomatal resistance is 50 sm-1
RST_MIN = 400
GST_REF = 0.415
# Calculations are carried out for the following meteorological conditions: R, =
# 400 W m-2; D = 0,10,20 mb; T, = 25 “C; and u = 2ms-’. Such meteorological conditions
# might be considered typical for midday in the middle of a growing season at a subtropical
# site. However, the objective is not to make detailed predictions for particular meteoro-
# logical conditions, it is rather to illustrate the general features of the theoretical treatment
# described.
SDN = 300. # We assume sn=400 to try to approach rn=400 when computing ln in the module
TAIR = 25 + 273.15 # K
PRESS = 1013. # mb
VPD = 0.5 * met.calc_vapor_pressure(TAIR) # mb
# For bare soil zb is commonly taken as 0.01 m (see Van Bavel and Hillel 1976)
Z0_SOIL = 0.01
CIMIS_DATA = pd.read_csv(CIMIS_FILE_PATH)
CIMIS_DATA['Date'] = pd.to_datetime(CIMIS_DATA['Date'], format="%m/%d/%Y")
CIMIS_DATA["VPD_mean"] = TSEB.met.calc_vapor_pressure(CIMIS_DATA["Avg Air Temp (C)"] + 273.15) \
- 10 * CIMIS_DATA["Avg Vap Pres (kPa)"]
w_lai = w.FloatSlider(value=LAI_REF, min=0, max=10, step=0.1, description='LAI (m²/m²)',
**slide_kwargs)
w_leaf_angle = w.FloatSlider(value=57, min=1, max=90, step=1,
description='Leaf Angle (deg.)', **slide_kwargs)
w_zol = w.FloatSlider(min=-1, max=1, value=0, step=0.01, description='Estabilidad',
**slide_kwargs)
w_u = w.FloatSlider(min=0.1, max=20, value=U, step=0.1, description='Velocidad del viento (m/s)',
**slide_kwargs)
w_hc = w.FloatSlider(min=0.01, max=8, value=H_C_REF, step=0.01, description='Altura dosel',
**slide_kwargs)
w_hb_ratio = w.FloatSlider(min=0.0, max=0.9, value=0.5, step=0.01,
description='Inicio del dosel', **slide_kwargs)
w_r_ss = w.FloatSlider(min=0, max=10000, value=2000, step=100,
description='R$_{ss}$ (s/m)', **slide_kwargs)
w_g_st = w.FloatSlider(min=0, max=0.5, value=GST_REF, step=0.001,
description='g$_{st}$ (mmol/m²s¹)', **slide_kwargs)
w_lai_range = w.FloatRangeSlider(min=0, max=10, value=[0, 4], step=0.1,
description='LAI', **slide_kwargs)
w_vza = w.FloatSlider(min=0, max=89, value=0, step=1,
description='VZA (deg.)', **slide_kwargs)
w_ev = w.FloatSlider(min=0.97, max=1, value=0.99, step=0.001,
description='$\epsilon_V$', readout_format='.3f',
**slide_kwargs)
w_es = w.FloatSlider(min=0.90, max=1, value=0.97, step=0.001,
description='$\epsilon_S$', readout_format='.3f',
**slide_kwargs)
def plot_fveg(lai, leaf_angle=57):
chi = rad.leafangle_2_chi(leaf_angle)
# Create an empty list with the returning f_c for each canopy
fc = TSEB.calc_F_theta_campbell(VZAS, lai, x_LAD=chi)
fc_sph = TSEB.calc_F_theta_campbell(VZAS, lai, x_LAD=1)
# Plot the results
fig, axs = plt.subplots(figsize=FIGSIZE)
axs.plot(VZAS, fc, 'r')
axs.plot(VZAS, fc_sph, 'k--', label='Spherical')
axs.set_xlabel('VZA (degrees)')
axs.set_ylabel('Crop Fraction Observed by the Sensor')
axs.set_ylim((0, 1.05))
axs.set_xlim((0, 90))
axs.legend(loc='lower right')
plt.tight_layout()
plt.show()
return fc
def calc_roughness(h_c):
z_0m = h_c / 8.
d_0 = 2. * h_c / 3.
return z_0m, d_0
def l_2_zol(z_0m, zol):
if zol == 0.0:
l_mo = np.inf
else:
l_mo = z_0m / zol
return l_mo
def wind_profile(zol, lai, h_c=0.12):
# Estimate surface roughness
z_0m, d_0 = calc_roughness(h_c)
l_mo = l_2_zol(z_0m, zol)
# Calculate the friction velocity
u_friction = mo.calc_u_star(U, Z_U, l_mo, d_0, z_0m)
upper = ZS >= h_c
u_z = np.ones(ZS.shape)
u_c = wind.calc_u_C_star(u_friction, h_c, d_0, z_0m, l_mo)
u_z[upper] = wind.calc_u_C_star(u_friction, ZS[upper], d_0, z_0m, l_mo)
u_z[~upper] = wind.calc_u_Goudriaan(u_c, h_c, lai, LEAF_WIDTH, ZS[~upper])
return u_z, u_c
def wind_profile_homogeneous(zol, lai, hc):
u_z, u_c = wind_profile(zol, lai, h_c=hc)
u_z_ref, _ = wind_profile(0., LAI_REF, h_c=H_C_REF)
plot_profile(u_z, u_c, hc, u_z_ref=u_z_ref)
return u_z, u_c
def wind_profile_heterogeneous(zol, lai, hc, hb_ratio=0.5):
# Estimate surface roughness
z_0m, d_0 = calc_roughness(hc)
l_mo = l_2_zol(z_0m, zol)
# Calculate the friction velocity
u_friction = mo.calc_u_star(U, Z_U, l_mo, d_0, z_0m)
upper = ZS >= hc
u_z = np.ones(ZS.shape)
u_c = wind.calc_u_C_star(u_friction, hc, d_0, z_0m, l_mo)
u_z[upper] = wind.calc_u_C_star(u_friction, ZS[upper], d_0, z_0m, l_mo)
h_b = hb_ratio * hc
Xi_max, sigma_u, sigma_l = wind.canopy_shape(hc,
h_b=h_b,
h_max=0.5)
f_a = wind.calc_canopy_distribution(Xi_max, sigma_u, sigma_l)
f_a_cum = wind.calc_cummulative_canopy_distribution(f_a)
u_z[~upper] = wind.calc_u_Massman(np.full(np.sum(~upper), u_c),
np.full(np.sum(~upper), hc),
np.full(np.sum(~upper), lai),
ZS[~upper],
f_a_cum,
xi_soil=Z0_SOIL/hc)
plot_profile(u_z, u_c, hc)
plt.figure(figsize=FIGSIZE)
plt.plot(f_a, np.linspace(0, hc, np.size(f_a)))
plt.ylim((0, hc))
plt.xlim((0, None))
plt.xlabel('Foliar density')
plt.ylabel('Height above ground (m)')
plt.tight_layout()
plt.show()
return u_z, u_c
def plot_profile(u_z, u_c, hc, u_z_ref=None):
# Plots the wind profile for given stability lenght compared to a neutral atmosphere
fig, axs = plt.subplots(figsize=FIGSIZE)
axs.plot(u_z, ZS, 'b', label="Wind profile")
# Plot the ufriction wind canopy
axs.plot(u_c, hc, marker='*', markerfacecolor="none", markeredgecolor ="blue",
ms=12, ls="None", label='$u_c$')
if u_z_ref is not None:
axs.plot(u_z_ref, ZS, 'k--', label="FAO56 reference profile")
# Plot the canopy windspeed according to the two different methods
axs.legend(loc='upper left')
axs.set_xlim((0, U))
axs.set_ylim((0, Z_U))
axs.set_xlabel('Wind Speed (m)')
axs.set_ylabel('Height above ground (m)')
plt.tight_layout()
plt.show()
def plot_aerodynamic_resistance(zol, hc):
def calc_r_a(zol, h_c):
z_0m, d_0 = calc_roughness(np.full_like(ZS, h_c))
if zol != 0:
l_mo = z_0m / zol
else:
l_mo = np.inf
u_friction = mo.calc_u_star(US,
np.full_like(US, Z_U),
l_mo,
d_0,
z_0m)
ra = res.calc_R_A(Z_U, u_friction, l_mo, d_0, z_0m)
return ra
# Plots the resistances for a range of windspeeds
fig, axs = plt.subplots(figsize=FIGSIZE)
ra = calc_r_a(zol, hc)
axs.plot(US, ra, 'k')
axs.set_ylabel('Aerodynamic Resistance (s/m)')
axs.set_ylim((0, 200))
axs.set_xlabel('Wind speed (m/s)')
axs.set_xlim((0, None))
plt.tight_layout()
plt.show()
def plot_resistances(lai, hc, l_mo, leaf_width, z0_soil, delta_t):
def calc_resistances(lai, hc, l_mo, leaf_width, z0_soil, delta_t,
resistance_flag=0):
rs_list = []
rx_list = []
ra_list = []
z_0m = np.full_like(ZS, hc) / 8.
d_0 = 2. * np.full_like(ZS, hc) / 2.
for u in US:
u_friction = mo.calc_u_star(u, Z_U, l_mo, d_0, z_0m)
u_c = wind.calc_u_C_star(u_friction, hc, d_0, z_0m, l_mo)
# Resistances
if resistance_flag == 0:
u_S = wind.calc_u_Goudriaan(u_c, hc, lai, leaf_width, z0_soil)
u_d_zm = wind.calc_u_Goudriaan(u_c, hc, lai, leaf_width, d_0 + z_0m)
rx = res.calc_R_x_Norman(lai, leaf_width, u_d_zm)
rs = res.calc_R_S_Kustas(u_S, delta_t)
elif resistance_flag == 1:
rx = res.calc_R_x_Choudhury(u_c, lai, leaf_width)
rs = res.calc_R_S_Choudhury(u_friction, hc, z_0m, d_0, Z_U,
z0_soil)
elif resistance_flag == 2:
rx = res.calc_R_x_McNaughton(lai, leaf_width, u_friction)
rs = res.calc_R_S_McNaughton(u_friction)
elif resistance_flag == 3:
alpha_k = wind.calc_A_Goudriaan(hc, lai, leaf_width)
alpha_prime = float(alpha_k)
rx = res.calc_R_x_Choudhury(u_c, lai, leaf_width,
alpha_prime=alpha_prime)
rs = res.calc_R_S_Choudhury(u_friction, hc, z_0m, d_0, Z_U,
z0_soil, alpha_k=alpha_k)
ra = res.calc_R_A(Z_U, u_friction, l_mo, d_0, z_0m)
# Add the results to the ouput list
rs_list.append(rs)
rx_list.append(rx)
ra_list.append(ra)
return ra_list, rx_list, rs_list
# Plots the resistances for a range of windspeeds
fig, axs = plt.subplots(3, 1, sharex=True, figsize=FIG_SIZE)
ra, rx, rs = calc_resistances(lai, hc, l_mo, leaf_width, z0_soil,
delta_t, resistance_flag=0)
axs[0].plot(US, ra, 'k', label='KN99')
axs[1].plot(US, rx, 'k', label='KN99')
axs[2].plot(US, rs, 'k', label='KN99')
ra, rx, rs = calc_resistances(lai, hc, l_mo, leaf_width, z0_soil,
delta_t, resistance_flag=1)
axs[0].plot(US, ra, 'r', label='CM88')
axs[1].plot(US, rx, 'r', label='CM88')
axs[2].plot(US, rx, 'r', label='CM88')
ra, rx, rs = calc_resistances(lai, hc, l_mo, leaf_width, z0_soil,
delta_t, resistance_flag=2)
axs[0].plot(US, ra, 'b', label='MH95')
axs[1].plot(US, rx, 'b', label='MH95')
axs[2].plot(US, rx, 'b', label='MH95')
ra, rx, rs = calc_resistances(lai, hc, l_mo, leaf_width, z0_soil,
delta_t, resistance_flag=3)
axs[0].plot(US, ra, 'g', label='N14')
axs[1].plot(US, rx, 'g', label='N14')
axs[2].plot(US, rs, 'g', label='N14')
axs[0].legend(bbox_to_anchor=(0, 1), loc=3, ncol=4)
axs[0].set_ylabel('Aerodynamie Resistance')
axs[0].tick_params(axis='x', which='both', bottom='off', top='off',
labelbottom='off')
axs[1].set_ylabel('Canopy Resistance')
axs[1].tick_params(axis='x', which='both', bottom='off', top='off',
labelbottom='off')
axs[2].set_ylabel('Soil Resistance')
axs[2].set_xlabel('Wind speed')
axs[0].set_ylim((0, 200))
axs[1].set_ylim((0, 200))
axs[2].set_ylim((0, 200))
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
def plot_flux_variation(values, le, le_c, le_pm, le_fao, t_c, t_s, t_0,
var="LAI"):
fig, axs = plt.subplots(3, figsize=FIGSIZE, sharex=True)
axs[0].plot(values, le, linestyle="-", color="blue", label="ET$_{SW}$")
axs[0].plot(values, le_pm, linestyle="-", color="red", label="ET$_{PM}$")
axs[0].plot(0.5 * LAI_REF, le_fao, color="black", markersize=12, marker="*", ls="none",
label="ET$_{FAO56}$")
axs[1].plot(values, le_c / le, linestyle="-", color="green", label="$\Delta$ET$_{C}$")
axs[1].plot(values, 1 - le_c / le, linestyle="-", color="orange", label="\Delta$ET$_{S}$")
axs[2].plot(values, t_0, linestyle="-", color="black", label="T$_0$ - T$_a$")
axs[2].plot(values, t_c, linestyle="-", color="green", label="T$_c$ - T$_a$")
axs[2].plot(values, t_s, linestyle="-", color="orange", label="T$_s$ - T$_a$")
axs[2].axhline(0, c="silver", ls=":")
value_lims = np.min(values), np.max(values)
axs[0].legend()
axs[2].legend()
axs[0].set_ylabel("ET (mm / day)$)")
axs[0].set_ylim(ET_LIMS)
axs[1].set_ylabel("LAYER FRACTION")
axs[1].set_ylim((0, 1))
axs[2].set_ylabel("T$_x$ - T$_a$ (K)")
axs[2].set_ylim(DELTA_T_LIMS)
axs[0].set_xlim(value_lims)
axs[1].set_xlim(value_lims)
axs[2].set_xlim(value_lims)
axs[2].set_xlabel(var)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
def fluxes_and_resistances(g_st=GST_REF, r_ss=2000, h_c=H_C_REF):
tair = np.full(N_SIM, TAIR)
r_st = 1. / (TSEB.res.molm2s1_2_ms1(tair, PRESS) * g_st)
sn = np.full(N_SIM, SDN) * (1. - 0.23)
sn_s = sn * np.exp(-0.5 * LAIS)
sn_c = sn - sn_s
es = met.calc_vapor_pressure(tair)
ea = es - VPD
ldn = rad.calc_emiss_atm(ea, tair) * met.calc_stephan_boltzmann(tair)
z_0m, d_0 = calc_roughness(np.full(N_SIM, h_c))
[_, t_s, t_c, _, _, _, le, _, le_c, *_] = pet.shuttleworth_wallace(
tair,
U,
ea,
PRESS,
sn_c,
sn_s,
ldn,
LAIS,
h_c,
EMIS_C,
EMIS_S,
z_0m,
d_0,
Z_U,
Z_T,
leaf_width=LEAF_WIDTH,
z0_soil=Z0_SOIL,
Rst_min=r_st,
R_ss=r_ss,
resistance_form=RES_FORM,
calcG_params=[[0], np.zeros(N_SIM)],
leaf_type=1,
verbose=False)
le_c[0] = 0
t_c[0] = np.nan
[_, t_0, _, le_pm, *_] = pet.penman_monteith(tair,
U,
ea,
PRESS,
sn,
ldn,
EMIS_C,
LAIS,
z_0m,
d_0,
Z_U,
Z_T,
Rst_min=r_st,
calcG_params=[[0],
np.zeros(N_SIM)],
leaf_type=1,
verbose=False)
le_fao = pet.pet_fao56(TAIR,
U,
ea[0],
es[0],
PRESS,
np.asarray(SDN),
Z_U,
Z_T,
f_cd=1,
is_daily=True)
le_pm = met.flux_2_evaporation(le_pm, t_k=TAIR, time_domain=24)
le_fao = met.flux_2_evaporation(le_fao, t_k=TAIR, time_domain=24)
le = met.flux_2_evaporation(le, t_k=TAIR, time_domain=24)
le_c = met.flux_2_evaporation(le_c, t_k=TAIR, time_domain=24)
plot_flux_variation(LAIS, le, le_c, le_pm, le_fao,
t_c - TAIR, t_s - TAIR, t_0 - TAIR,
var="LAI")
return t_c, t_s, t_0
def get_land_surface_temperature(vza, leaf_angle, temperatures, e_v=0.98, e_s=0.95):
t_c, t_s, t_0 = temperatures.result
bt_obs, emiss = lst_from_4sail(e_v, e_s, t_c, t_s, LAIS, vza, leaf_angle, t_atm=243.)
chi = rad.leafangle_2_chi(leaf_angle)
fc = TSEB.calc_F_theta_campbell(vza, LAIS, x_LAD=chi)
lst = (fc * t_c**4 + (1. - fc) * t_s**4)**0.25
bt_obs[LAIS == 0] = t_s[LAIS == 0]
lst[LAIS == 0] = t_s[LAIS == 0]
fig, axs = plt.subplots(nrows=2, figsize=FIGSIZE, sharex=True)
axs[0].plot(LAIS, fc, 'k-')
axs[0].set_ylabel('Crop Fraction Observed by the Sensor')
axs[0].set_ylim((0, 1))
axs[1].plot(LAIS, t_0, 'k-', label='T$_0$')
axs[1].plot(LAIS, lst, 'r-', label='LST simplified')
axs[1].plot(LAIS, bt_obs, 'r:', label='LST analytical')
axs[1].set_xlabel('LAI')
axs[1].set_ylabel('LST')
axs[1].legend(loc='upper right')
axs[1].set_ylim((TAIR - 5, TAIR + 20))
axs[1].set_xlim((0, np.max(LAIS)))
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
def plot_kcs(dates, lais, et_ref, et, kcs):
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=FIGSIZE, sharex="col")
gs = axs[0, 1].get_gridspec()
for ax in axs[:, 1]:
ax.remove()
for ax in axs[:, 2]:
ax.remove()
axbig = fig.add_subplot(gs[0:, 1:])
axs[0, 0].plot(dates, lais, linestyle="-", color="green", lw=0.5)
axs[1, 0].plot(dates, et_ref, linestyle="-", color="black", lw=0.5,
label="ET$_{ref}$")
axs[1, 0].plot(dates, et, linestyle="-", color="blue", lw=0.5,
label="ET$_{a}$")
axbig.scatter(lais, kcs, c="black", s=3, label="$Kc_{SW}$")
axs[1, 0].legend()
axs[0, 0].set_ylabel("LAI")
axs[0, 0].set_ylim((0, 4))
axs[1, 0].set_ylabel("ET (mm/day)")
axs[1, 0].set_ylim(ET_LIMS)
axs[1, 0].xaxis.set_major_formatter(mdates.DateFormatter('%b'))
axs[1, 0].xaxis.set_major_locator(mdates.MonthLocator(bymonth=range(1, 13, 3)))
axbig.set_ylabel("Crop coefficient")
axbig.set_ylim((0., 2))
axbig.set_xlabel("LAI")
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
def build_day(doys, lai_range):
doy_angle = np.pi * (doys - 180.) / 365
lais = lai_range[0] + (lai_range[1] - lai_range[0]) * np.cos(doy_angle) ** 4
return np.maximum(0, lais)
def crop_coefficients(g_st=GST_REF, r_ss=2000, h_c=H_C_REF, lai_range=(0, 5)):
lais = build_day(CIMIS_DATA["Jul"], lai_range)
sn = CIMIS_DATA["Sol Rad (W/sq.m)"].values * (1. - 0.23)
sn_s = sn * np.exp(-0.5 * lais)
sn_c = sn - sn_s
tair = CIMIS_DATA["Avg Air Temp (C)"].values + 273.15
r_st = 1. / (TSEB.res.molm2s1_2_ms1(tair, PRESS) * g_st)
ea = 10 * CIMIS_DATA["Avg Vap Pres (kPa)"].values
ldn = rad.calc_emiss_atm(ea, tair) * met.calc_stephan_boltzmann(tair)
z_0m, d_0 = calc_roughness(np.full_like(sn, h_c))
[_, t_s, t_c, _, _, _, le, _, le_c, *_] = pet.shuttleworth_wallace(
tair,
CIMIS_DATA["Avg Wind Speed (m/s)"].values,
ea,
PRESS,
sn_c,
sn_s,
ldn,
lais,
h_c,
EMIS_C,
EMIS_S,
z_0m,
d_0,
Z_U,
Z_T,
leaf_width=LEAF_WIDTH,
z0_soil=Z0_SOIL,
Rst_min=r_st,
R_ss=r_ss,
resistance_form=RES_FORM,
calcG_params=[[0], np.zeros(sn.shape)],
leaf_type=1,
verbose=False)
et = met.flux_2_evaporation(le, t_k=TAIR, time_domain=24)
kcs_sw = et / CIMIS_DATA["ETo (mm)"].values
out_file = os.path.join(OUTPUT_FOLDER, "lai_vs_kc.csv")
if not os.path.isdir(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
result = pd.DataFrame({"LAI": lais, "Kc": kcs_sw})
result.to_csv(out_file, index=False)
plot_kcs(CIMIS_DATA["Date"], lais, CIMIS_DATA["ETo (mm)"],
et, kcs_sw)
def lst_from_4sail(e_v, e_s, t_c, t_s, lais, vza, leaf_angle, t_atm=0):
# Apply Kirchoff's law to get the soil and leaf bihemispherical reflectances
rsoil = np.full((1, N_SIM), 1 - e_s)
rho_leaf = np.full((1, N_SIM), 1 - e_v)
tau_leaf = np.zeros((1, N_SIM))
# Calculate the lidf,
lidf = fs.calc_lidf_campbell_vec(np.full(N_SIM, leaf_angle))
# 4SAIL for canopy reflectance and transmittance factors
[tss, too, tsstoo, rdd, tdd, rsd, tsd, rdo, tdo, rso, rsos, rsod, rddt,
rsdt, rdot,
rsodt, rsost, rsot, gammasdf, gammasdb,
gammaso] = fs.foursail_vec(lais,
np.full(N_SIM, 0.01),
lidf,
np.full(N_SIM, 37),
np.full(N_SIM, vza),
np.zeros(N_SIM),
rho_leaf, tau_leaf, rsoil)
gammad = 1 - rdd - tdd
gammao = 1 - rdo - tdo - too
ttot = (too + tdo) / (1. - rsoil * rdd)
gammaot = gammao + ttot * rsoil * gammad
aeev = gammaot
aees = ttot * e_v
# Get the different canopy broadband emssion components
h_vc = met.calc_stephan_boltzmann(t_c)
h_gc = met.calc_stephan_boltzmann(t_s)
h_sky = met.calc_stephan_boltzmann(t_atm)
# Calculate the blackbody emission temperature
lw = (rdot * h_sky + (aeev * h_vc + aees * h_gc )) / np.pi
lst_obs = (np.pi * lw / rad.SB) ** (0.25)
# Estimate the apparent surface directional emissivity
emiss = 1 - rdot
return lst_obs.reshape(-1), emiss.reshape(-1)
def rc_to_gst(rc, lai=0.5 * LAI_REF):
rst = rc * lai
gst = rst_to_gst(rst)
return gst
def rst_to_gst(rst, t_c=293.15, p=1013.25):
gst = 1. / (rst * res.molm2s1_2_ms1(t_c, p=p))
return gst
| [
"numpy.maximum",
"numpy.sum",
"pandas.read_csv",
"pyTSEB.resistances.calc_R_A",
"pyTSEB.wind_profile.calc_A_Goudriaan",
"numpy.ones",
"pyTSEB.wind_profile.calc_canopy_distribution",
"matplotlib.pyplot.figure",
"numpy.arange",
"pyTSEB.meteo_utils.calc_vapor_pressure",
"numpy.exp",
"pyTSEB.wind_... | [((667, 690), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (676, 690), True, 'import numpy as np\n'), ((919, 973), 'os.path.join', 'os.path.join', (['INPUT_FOLDER', '"""meteo"""', '"""meteo_daily.csv"""'], {}), "(INPUT_FOLDER, 'meteo', 'meteo_daily.csv')\n", (931, 973), False, 'import os\n'), ((993, 1017), 'numpy.linspace', 'np.linspace', (['(0)', '(6)', 'N_SIM'], {}), '(0, 6, N_SIM)\n', (1004, 1017), True, 'import numpy as np\n'), ((1089, 1105), 'numpy.arange', 'np.arange', (['(0)', '(89)'], {}), '(0, 89)\n', (1098, 1105), True, 'import numpy as np\n'), ((1361, 1387), 'numpy.linspace', 'np.linspace', (['(0)', 'Z_U', 'N_SIM'], {}), '(0, Z_U, N_SIM)\n', (1372, 1387), True, 'import numpy as np\n'), ((1393, 1420), 'numpy.linspace', 'np.linspace', (['(0.5)', '(20)', 'N_SIM'], {}), '(0.5, 20, N_SIM)\n', (1404, 1420), True, 'import numpy as np\n'), ((2434, 2462), 'pandas.read_csv', 'pd.read_csv', (['CIMIS_FILE_PATH'], {}), '(CIMIS_FILE_PATH)\n', (2445, 2462), True, 'import pandas as pd\n'), ((2484, 2537), 'pandas.to_datetime', 'pd.to_datetime', (["CIMIS_DATA['Date']"], {'format': '"""%m/%d/%Y"""'}), "(CIMIS_DATA['Date'], format='%m/%d/%Y')\n", (2498, 2537), True, 'import pandas as pd\n'), ((2712, 2813), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'value': 'LAI_REF', 'min': '(0)', 'max': '(10)', 'step': '(0.1)', 'description': '"""LAI (m²/m²)"""'}), "(value=LAI_REF, min=0, max=10, step=0.1, description=\n 'LAI (m²/m²)', **slide_kwargs)\n", (2725, 2813), True, 'import ipywidgets as w\n'), ((2847, 2947), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'value': '(57)', 'min': '(1)', 'max': '(90)', 'step': '(1)', 'description': '"""Leaf Angle (deg.)"""'}), "(value=57, min=1, max=90, step=1, description=\n 'Leaf Angle (deg.)', **slide_kwargs)\n", (2860, 2947), True, 'import ipywidgets as w\n'), ((2981, 3076), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(-1)', 'max': '(1)', 'value': '(0)', 'step': '(0.01)', 'description': '"""Estabilidad"""'}), "(min=-1, max=1, value=0, step=0.01, description='Estabilidad',\n **slide_kwargs)\n", (2994, 3076), True, 'import ipywidgets as w\n'), ((3101, 3213), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0.1)', 'max': '(20)', 'value': 'U', 'step': '(0.1)', 'description': '"""Velocidad del viento (m/s)"""'}), "(min=0.1, max=20, value=U, step=0.1, description=\n 'Velocidad del viento (m/s)', **slide_kwargs)\n", (3114, 3213), True, 'import ipywidgets as w\n'), ((3237, 3342), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0.01)', 'max': '(8)', 'value': 'H_C_REF', 'step': '(0.01)', 'description': '"""Altura dosel"""'}), "(min=0.01, max=8, value=H_C_REF, step=0.01, description=\n 'Altura dosel', **slide_kwargs)\n", (3250, 3342), True, 'import ipywidgets as w\n'), ((3374, 3480), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0.0)', 'max': '(0.9)', 'value': '(0.5)', 'step': '(0.01)', 'description': '"""Inicio del dosel"""'}), "(min=0.0, max=0.9, value=0.5, step=0.01, description=\n 'Inicio del dosel', **slide_kwargs)\n", (3387, 3480), True, 'import ipywidgets as w\n'), ((3512, 3616), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0)', 'max': '(10000)', 'value': '(2000)', 'step': '(100)', 'description': '"""R$_{ss}$ (s/m)"""'}), "(min=0, max=10000, value=2000, step=100, description=\n 'R$_{ss}$ (s/m)', **slide_kwargs)\n", (3525, 3616), True, 'import ipywidgets as w\n'), ((3644, 3757), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0)', 'max': '(0.5)', 'value': 'GST_REF', 'step': '(0.001)', 'description': '"""g$_{st}$ (mmol/m²s¹)"""'}), "(min=0, max=0.5, value=GST_REF, step=0.001, description=\n 'g$_{st}$ (mmol/m²s¹)', **slide_kwargs)\n", (3657, 3757), True, 'import ipywidgets as w\n'), ((3790, 3886), 'ipywidgets.FloatRangeSlider', 'w.FloatRangeSlider', ([], {'min': '(0)', 'max': '(10)', 'value': '[0, 4]', 'step': '(0.1)', 'description': '"""LAI"""'}), "(min=0, max=10, value=[0, 4], step=0.1, description='LAI',\n **slide_kwargs)\n", (3808, 3886), True, 'import ipywidgets as w\n'), ((3924, 4016), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0)', 'max': '(89)', 'value': '(0)', 'step': '(1)', 'description': '"""VZA (deg.)"""'}), "(min=0, max=89, value=0, step=1, description='VZA (deg.)', **\n slide_kwargs)\n", (3937, 4016), True, 'import ipywidgets as w\n'), ((4042, 4168), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0.97)', 'max': '(1)', 'value': '(0.99)', 'step': '(0.001)', 'description': '"""$\\\\epsilon_V$"""', 'readout_format': '""".3f"""'}), "(min=0.97, max=1, value=0.99, step=0.001, description=\n '$\\\\epsilon_V$', readout_format='.3f', **slide_kwargs)\n", (4055, 4168), True, 'import ipywidgets as w\n'), ((4212, 4337), 'ipywidgets.FloatSlider', 'w.FloatSlider', ([], {'min': '(0.9)', 'max': '(1)', 'value': '(0.97)', 'step': '(0.001)', 'description': '"""$\\\\epsilon_S$"""', 'readout_format': '""".3f"""'}), "(min=0.9, max=1, value=0.97, step=0.001, description=\n '$\\\\epsilon_S$', readout_format='.3f', **slide_kwargs)\n", (4225, 4337), True, 'import ipywidgets as w\n'), ((2290, 2319), 'pyTSEB.meteo_utils.calc_vapor_pressure', 'met.calc_vapor_pressure', (['TAIR'], {}), '(TAIR)\n', (2313, 2319), True, 'from pyTSEB import meteo_utils as met\n'), ((2563, 2632), 'pyTSEB.TSEB.met.calc_vapor_pressure', 'TSEB.met.calc_vapor_pressure', (["(CIMIS_DATA['Avg Air Temp (C)'] + 273.15)"], {}), "(CIMIS_DATA['Avg Air Temp (C)'] + 273.15)\n", (2591, 2632), False, 'from pyTSEB import TSEB\n'), ((4421, 4452), 'pyTSEB.net_radiation.leafangle_2_chi', 'rad.leafangle_2_chi', (['leaf_angle'], {}), '(leaf_angle)\n', (4440, 4452), True, 'from pyTSEB import net_radiation as rad\n'), ((4528, 4576), 'pyTSEB.TSEB.calc_F_theta_campbell', 'TSEB.calc_F_theta_campbell', (['VZAS', 'lai'], {'x_LAD': 'chi'}), '(VZAS, lai, x_LAD=chi)\n', (4554, 4576), False, 'from pyTSEB import TSEB\n'), ((4590, 4636), 'pyTSEB.TSEB.calc_F_theta_campbell', 'TSEB.calc_F_theta_campbell', (['VZAS', 'lai'], {'x_LAD': '(1)'}), '(VZAS, lai, x_LAD=1)\n', (4616, 4636), False, 'from pyTSEB import TSEB\n'), ((4675, 4704), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'FIGSIZE'}), '(figsize=FIGSIZE)\n', (4687, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4973, 4991), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4989, 4991), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5004, 5006), True, 'import matplotlib.pyplot as plt\n'), ((5426, 5465), 'pyTSEB.MO_similarity.calc_u_star', 'mo.calc_u_star', (['U', 'Z_U', 'l_mo', 'd_0', 'z_0m'], {}), '(U, Z_U, l_mo, d_0, z_0m)\n', (5440, 5465), True, 'from pyTSEB import MO_similarity as mo\n'), ((5498, 5515), 'numpy.ones', 'np.ones', (['ZS.shape'], {}), '(ZS.shape)\n', (5505, 5515), True, 'import numpy as np\n'), ((5526, 5578), 'pyTSEB.wind_profile.calc_u_C_star', 'wind.calc_u_C_star', (['u_friction', 'h_c', 'd_0', 'z_0m', 'l_mo'], {}), '(u_friction, h_c, d_0, z_0m, l_mo)\n', (5544, 5578), True, 'from pyTSEB import wind_profile as wind\n'), ((5596, 5654), 'pyTSEB.wind_profile.calc_u_C_star', 'wind.calc_u_C_star', (['u_friction', 'ZS[upper]', 'd_0', 'z_0m', 'l_mo'], {}), '(u_friction, ZS[upper], d_0, z_0m, l_mo)\n', (5614, 5654), True, 'from pyTSEB import wind_profile as wind\n'), ((5673, 5733), 'pyTSEB.wind_profile.calc_u_Goudriaan', 'wind.calc_u_Goudriaan', (['u_c', 'h_c', 'lai', 'LEAF_WIDTH', 'ZS[~upper]'], {}), '(u_c, h_c, lai, LEAF_WIDTH, ZS[~upper])\n', (5694, 5733), True, 'from pyTSEB import wind_profile as wind\n'), ((6185, 6224), 'pyTSEB.MO_similarity.calc_u_star', 'mo.calc_u_star', (['U', 'Z_U', 'l_mo', 'd_0', 'z_0m'], {}), '(U, Z_U, l_mo, d_0, z_0m)\n', (6199, 6224), True, 'from pyTSEB import MO_similarity as mo\n'), ((6256, 6273), 'numpy.ones', 'np.ones', (['ZS.shape'], {}), '(ZS.shape)\n', (6263, 6273), True, 'import numpy as np\n'), ((6284, 6335), 'pyTSEB.wind_profile.calc_u_C_star', 'wind.calc_u_C_star', (['u_friction', 'hc', 'd_0', 'z_0m', 'l_mo'], {}), '(u_friction, hc, d_0, z_0m, l_mo)\n', (6302, 6335), True, 'from pyTSEB import wind_profile as wind\n'), ((6353, 6411), 'pyTSEB.wind_profile.calc_u_C_star', 'wind.calc_u_C_star', (['u_friction', 'ZS[upper]', 'd_0', 'z_0m', 'l_mo'], {}), '(u_friction, ZS[upper], d_0, z_0m, l_mo)\n', (6371, 6411), True, 'from pyTSEB import wind_profile as wind\n'), ((6467, 6508), 'pyTSEB.wind_profile.canopy_shape', 'wind.canopy_shape', (['hc'], {'h_b': 'h_b', 'h_max': '(0.5)'}), '(hc, h_b=h_b, h_max=0.5)\n', (6484, 6508), True, 'from pyTSEB import wind_profile as wind\n'), ((6617, 6672), 'pyTSEB.wind_profile.calc_canopy_distribution', 'wind.calc_canopy_distribution', (['Xi_max', 'sigma_u', 'sigma_l'], {}), '(Xi_max, sigma_u, sigma_l)\n', (6646, 6672), True, 'from pyTSEB import wind_profile as wind\n'), ((6687, 6733), 'pyTSEB.wind_profile.calc_cummulative_canopy_distribution', 'wind.calc_cummulative_canopy_distribution', (['f_a'], {}), '(f_a)\n', (6728, 6733), True, 'from pyTSEB import wind_profile as wind\n'), ((7127, 7154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'FIGSIZE'}), '(figsize=FIGSIZE)\n', (7137, 7154), True, 'import matplotlib.pyplot as plt\n'), ((7211, 7228), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, hc)'], {}), '((0, hc))\n', (7219, 7228), True, 'import matplotlib.pyplot as plt\n'), ((7233, 7252), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, None)'], {}), '((0, None))\n', (7241, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7257, 7285), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Foliar density"""'], {}), "('Foliar density')\n", (7267, 7285), True, 'import matplotlib.pyplot as plt\n'), ((7290, 7327), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Height above ground (m)"""'], {}), "('Height above ground (m)')\n", (7300, 7327), True, 'import matplotlib.pyplot as plt\n'), ((7332, 7350), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7348, 7350), True, 'import matplotlib.pyplot as plt\n'), ((7355, 7365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7363, 7365), True, 'import matplotlib.pyplot as plt\n'), ((7538, 7567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'FIGSIZE'}), '(figsize=FIGSIZE)\n', (7550, 7567), True, 'import matplotlib.pyplot as plt\n'), ((8124, 8142), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8140, 8142), True, 'import matplotlib.pyplot as plt\n'), ((8147, 8157), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8155, 8157), True, 'import matplotlib.pyplot as plt\n'), ((8751, 8780), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'FIGSIZE'}), '(figsize=FIGSIZE)\n', (8763, 8780), True, 'import matplotlib.pyplot as plt\n'), ((8983, 9001), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8999, 9001), True, 'import matplotlib.pyplot as plt\n'), ((9006, 9016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9014, 9016), True, 'import matplotlib.pyplot as plt\n'), ((11010, 11059), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'figsize': 'FIG_SIZE'}), '(3, 1, sharex=True, figsize=FIG_SIZE)\n', (11022, 11059), True, 'import matplotlib.pyplot as plt\n'), ((12647, 12665), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12663, 12665), True, 'import matplotlib.pyplot as plt\n'), ((12670, 12699), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (12689, 12699), True, 'import matplotlib.pyplot as plt\n'), ((12704, 12714), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12712, 12714), True, 'import matplotlib.pyplot as plt\n'), ((12840, 12885), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'figsize': 'FIGSIZE', 'sharex': '(True)'}), '(3, figsize=FIGSIZE, sharex=True)\n', (12852, 12885), True, 'import matplotlib.pyplot as plt\n'), ((14080, 14098), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14096, 14098), True, 'import matplotlib.pyplot as plt\n'), ((14103, 14132), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (14122, 14132), True, 'import matplotlib.pyplot as plt\n'), ((14137, 14147), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14145, 14147), True, 'import matplotlib.pyplot as plt\n'), ((14227, 14247), 'numpy.full', 'np.full', (['N_SIM', 'TAIR'], {}), '(N_SIM, TAIR)\n', (14234, 14247), True, 'import numpy as np\n'), ((14419, 14448), 'pyTSEB.meteo_utils.calc_vapor_pressure', 'met.calc_vapor_pressure', (['tair'], {}), '(tair)\n', (14442, 14448), True, 'from pyTSEB import meteo_utils as met\n'), ((16482, 16537), 'pyTSEB.meteo_utils.flux_2_evaporation', 'met.flux_2_evaporation', (['le_pm'], {'t_k': 'TAIR', 'time_domain': '(24)'}), '(le_pm, t_k=TAIR, time_domain=24)\n', (16504, 16537), True, 'from pyTSEB import meteo_utils as met\n'), ((16551, 16607), 'pyTSEB.meteo_utils.flux_2_evaporation', 'met.flux_2_evaporation', (['le_fao'], {'t_k': 'TAIR', 'time_domain': '(24)'}), '(le_fao, t_k=TAIR, time_domain=24)\n', (16573, 16607), True, 'from pyTSEB import meteo_utils as met\n'), ((16617, 16669), 'pyTSEB.meteo_utils.flux_2_evaporation', 'met.flux_2_evaporation', (['le'], {'t_k': 'TAIR', 'time_domain': '(24)'}), '(le, t_k=TAIR, time_domain=24)\n', (16639, 16669), True, 'from pyTSEB import meteo_utils as met\n'), ((16681, 16735), 'pyTSEB.meteo_utils.flux_2_evaporation', 'met.flux_2_evaporation', (['le_c'], {'t_k': 'TAIR', 'time_domain': '(24)'}), '(le_c, t_k=TAIR, time_domain=24)\n', (16703, 16735), True, 'from pyTSEB import meteo_utils as met\n'), ((17140, 17171), 'pyTSEB.net_radiation.leafangle_2_chi', 'rad.leafangle_2_chi', (['leaf_angle'], {}), '(leaf_angle)\n', (17159, 17171), True, 'from pyTSEB import net_radiation as rad\n'), ((17181, 17229), 'pyTSEB.TSEB.calc_F_theta_campbell', 'TSEB.calc_F_theta_campbell', (['vza', 'LAIS'], {'x_LAD': 'chi'}), '(vza, LAIS, x_LAD=chi)\n', (17207, 17229), False, 'from pyTSEB import TSEB\n'), ((17371, 17422), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': 'FIGSIZE', 'sharex': '(True)'}), '(nrows=2, figsize=FIGSIZE, sharex=True)\n', (17383, 17422), True, 'import matplotlib.pyplot as plt\n'), ((17891, 17909), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17907, 17909), True, 'import matplotlib.pyplot as plt\n'), ((17914, 17943), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (17933, 17943), True, 'import matplotlib.pyplot as plt\n'), ((17948, 17958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17956, 17958), True, 'import matplotlib.pyplot as plt\n'), ((18020, 18081), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': 'FIGSIZE', 'sharex': '"""col"""'}), "(nrows=2, ncols=3, figsize=FIGSIZE, sharex='col')\n", (18032, 18081), True, 'import matplotlib.pyplot as plt\n'), ((19010, 19028), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19026, 19028), True, 'import matplotlib.pyplot as plt\n'), ((19033, 19062), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (19052, 19062), True, 'import matplotlib.pyplot as plt\n'), ((19067, 19077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19075, 19077), True, 'import matplotlib.pyplot as plt\n'), ((19248, 19267), 'numpy.maximum', 'np.maximum', (['(0)', 'lais'], {}), '(0, lais)\n', (19258, 19267), True, 'import numpy as np\n'), ((20370, 20422), 'pyTSEB.meteo_utils.flux_2_evaporation', 'met.flux_2_evaporation', (['le'], {'t_k': 'TAIR', 'time_domain': '(24)'}), '(le, t_k=TAIR, time_domain=24)\n', (20392, 20422), True, 'from pyTSEB import meteo_utils as met\n'), ((20486, 20530), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', '"""lai_vs_kc.csv"""'], {}), "(OUTPUT_FOLDER, 'lai_vs_kc.csv')\n", (20498, 20530), False, 'import os\n'), ((20620, 20661), 'pandas.DataFrame', 'pd.DataFrame', (["{'LAI': lais, 'Kc': kcs_sw}"], {}), "({'LAI': lais, 'Kc': kcs_sw})\n", (20632, 20661), True, 'import pandas as pd\n'), ((20959, 20987), 'numpy.full', 'np.full', (['(1, N_SIM)', '(1 - e_s)'], {}), '((1, N_SIM), 1 - e_s)\n', (20966, 20987), True, 'import numpy as np\n'), ((21003, 21031), 'numpy.full', 'np.full', (['(1, N_SIM)', '(1 - e_v)'], {}), '((1, N_SIM), 1 - e_v)\n', (21010, 21031), True, 'import numpy as np\n'), ((21047, 21067), 'numpy.zeros', 'np.zeros', (['(1, N_SIM)'], {}), '((1, N_SIM))\n', (21055, 21067), True, 'import numpy as np\n'), ((21966, 21997), 'pyTSEB.meteo_utils.calc_stephan_boltzmann', 'met.calc_stephan_boltzmann', (['t_c'], {}), '(t_c)\n', (21992, 21997), True, 'from pyTSEB import meteo_utils as met\n'), ((22009, 22040), 'pyTSEB.meteo_utils.calc_stephan_boltzmann', 'met.calc_stephan_boltzmann', (['t_s'], {}), '(t_s)\n', (22035, 22040), True, 'from pyTSEB import meteo_utils as met\n'), ((22053, 22086), 'pyTSEB.meteo_utils.calc_stephan_boltzmann', 'met.calc_stephan_boltzmann', (['t_atm'], {}), '(t_atm)\n', (22079, 22086), True, 'from pyTSEB import meteo_utils as met\n'), ((781, 806), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (796, 806), False, 'import os\n'), ((863, 888), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (878, 888), False, 'import os\n'), ((8616, 8662), 'pyTSEB.resistances.calc_R_A', 'res.calc_R_A', (['Z_U', 'u_friction', 'l_mo', 'd_0', 'z_0m'], {}), '(Z_U, u_friction, l_mo, d_0, z_0m)\n', (8628, 8662), True, 'from pyTSEB import resistances as res\n'), ((13666, 13680), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (13672, 13680), True, 'import numpy as np\n'), ((13682, 13696), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (13688, 13696), True, 'import numpy as np\n'), ((14319, 14338), 'numpy.full', 'np.full', (['N_SIM', 'SDN'], {}), '(N_SIM, SDN)\n', (14326, 14338), True, 'import numpy as np\n'), ((14369, 14388), 'numpy.exp', 'np.exp', (['(-0.5 * LAIS)'], {}), '(-0.5 * LAIS)\n', (14375, 14388), True, 'import numpy as np\n'), ((14477, 14505), 'pyTSEB.net_radiation.calc_emiss_atm', 'rad.calc_emiss_atm', (['ea', 'tair'], {}), '(ea, tair)\n', (14495, 14505), True, 'from pyTSEB import net_radiation as rad\n'), ((14508, 14540), 'pyTSEB.meteo_utils.calc_stephan_boltzmann', 'met.calc_stephan_boltzmann', (['tair'], {}), '(tair)\n', (14534, 14540), True, 'from pyTSEB import meteo_utils as met\n'), ((14572, 14591), 'numpy.full', 'np.full', (['N_SIM', 'h_c'], {}), '(N_SIM, h_c)\n', (14579, 14591), True, 'import numpy as np\n'), ((16311, 16326), 'numpy.asarray', 'np.asarray', (['SDN'], {}), '(SDN)\n', (16321, 16326), True, 'import numpy as np\n'), ((18795, 18821), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b"""'], {}), "('%b')\n", (18815, 18821), True, 'import matplotlib.dates as mdates\n'), ((19477, 19496), 'numpy.exp', 'np.exp', (['(-0.5 * lais)'], {}), '(-0.5 * lais)\n', (19483, 19496), True, 'import numpy as np\n'), ((19701, 19729), 'pyTSEB.net_radiation.calc_emiss_atm', 'rad.calc_emiss_atm', (['ea', 'tair'], {}), '(ea, tair)\n', (19719, 19729), True, 'from pyTSEB import net_radiation as rad\n'), ((19732, 19764), 'pyTSEB.meteo_utils.calc_stephan_boltzmann', 'met.calc_stephan_boltzmann', (['tair'], {}), '(tair)\n', (19758, 19764), True, 'from pyTSEB import meteo_utils as met\n'), ((19796, 19817), 'numpy.full_like', 'np.full_like', (['sn', 'h_c'], {}), '(sn, h_c)\n', (19808, 19817), True, 'import numpy as np\n'), ((20542, 20570), 'os.path.isdir', 'os.path.isdir', (['OUTPUT_FOLDER'], {}), '(OUTPUT_FOLDER)\n', (20555, 20570), False, 'import os\n'), ((20580, 20606), 'os.makedirs', 'os.makedirs', (['OUTPUT_FOLDER'], {}), '(OUTPUT_FOLDER)\n', (20591, 20606), False, 'import os\n'), ((21131, 21157), 'numpy.full', 'np.full', (['N_SIM', 'leaf_angle'], {}), '(N_SIM, leaf_angle)\n', (21138, 21157), True, 'import numpy as np\n'), ((21429, 21449), 'numpy.full', 'np.full', (['N_SIM', '(0.01)'], {}), '(N_SIM, 0.01)\n', (21436, 21449), True, 'import numpy as np\n'), ((21521, 21539), 'numpy.full', 'np.full', (['N_SIM', '(37)'], {}), '(N_SIM, 37)\n', (21528, 21539), True, 'import numpy as np\n'), ((21573, 21592), 'numpy.full', 'np.full', (['N_SIM', 'vza'], {}), '(N_SIM, vza)\n', (21580, 21592), True, 'import numpy as np\n'), ((21626, 21641), 'numpy.zeros', 'np.zeros', (['N_SIM'], {}), '(N_SIM)\n', (21634, 21641), True, 'import numpy as np\n'), ((6780, 6794), 'numpy.sum', 'np.sum', (['(~upper)'], {}), '(~upper)\n', (6786, 6794), True, 'import numpy as np\n'), ((6848, 6862), 'numpy.sum', 'np.sum', (['(~upper)'], {}), '(~upper)\n', (6854, 6862), True, 'import numpy as np\n'), ((6915, 6929), 'numpy.sum', 'np.sum', (['(~upper)'], {}), '(~upper)\n', (6921, 6929), True, 'import numpy as np\n'), ((7192, 7204), 'numpy.size', 'np.size', (['f_a'], {}), '(f_a)\n', (7199, 7204), True, 'import numpy as np\n'), ((8265, 8286), 'numpy.full_like', 'np.full_like', (['ZS', 'h_c'], {}), '(ZS, h_c)\n', (8277, 8286), True, 'import numpy as np\n'), ((8455, 8476), 'numpy.full_like', 'np.full_like', (['US', 'Z_U'], {}), '(US, Z_U)\n', (8467, 8476), True, 'import numpy as np\n'), ((9278, 9298), 'numpy.full_like', 'np.full_like', (['ZS', 'hc'], {}), '(ZS, hc)\n', (9290, 9298), True, 'import numpy as np\n'), ((9395, 9434), 'pyTSEB.MO_similarity.calc_u_star', 'mo.calc_u_star', (['u', 'Z_U', 'l_mo', 'd_0', 'z_0m'], {}), '(u, Z_U, l_mo, d_0, z_0m)\n', (9409, 9434), True, 'from pyTSEB import MO_similarity as mo\n'), ((9453, 9504), 'pyTSEB.wind_profile.calc_u_C_star', 'wind.calc_u_C_star', (['u_friction', 'hc', 'd_0', 'z_0m', 'l_mo'], {}), '(u_friction, hc, d_0, z_0m, l_mo)\n', (9471, 9504), True, 'from pyTSEB import wind_profile as wind\n'), ((10710, 10756), 'pyTSEB.resistances.calc_R_A', 'res.calc_R_A', (['Z_U', 'u_friction', 'l_mo', 'd_0', 'z_0m'], {}), '(Z_U, u_friction, l_mo, d_0, z_0m)\n', (10722, 10756), True, 'from pyTSEB import resistances as res\n'), ((14266, 14301), 'pyTSEB.TSEB.res.molm2s1_2_ms1', 'TSEB.res.molm2s1_2_ms1', (['tair', 'PRESS'], {}), '(tair, PRESS)\n', (14288, 14301), False, 'from pyTSEB import TSEB\n'), ((17872, 17884), 'numpy.max', 'np.max', (['LAIS'], {}), '(LAIS)\n', (17878, 17884), True, 'import numpy as np\n'), ((19593, 19628), 'pyTSEB.TSEB.res.molm2s1_2_ms1', 'TSEB.res.molm2s1_2_ms1', (['tair', 'PRESS'], {}), '(tair, PRESS)\n', (19615, 19628), False, 'from pyTSEB import TSEB\n'), ((22545, 22572), 'pyTSEB.resistances.molm2s1_2_ms1', 'res.molm2s1_2_ms1', (['t_c'], {'p': 'p'}), '(t_c, p=p)\n', (22562, 22572), True, 'from pyTSEB import resistances as res\n'), ((9323, 9343), 'numpy.full_like', 'np.full_like', (['ZS', 'hc'], {}), '(ZS, hc)\n', (9335, 9343), True, 'import numpy as np\n'), ((9590, 9646), 'pyTSEB.wind_profile.calc_u_Goudriaan', 'wind.calc_u_Goudriaan', (['u_c', 'hc', 'lai', 'leaf_width', 'z0_soil'], {}), '(u_c, hc, lai, leaf_width, z0_soil)\n', (9611, 9646), True, 'from pyTSEB import wind_profile as wind\n'), ((9672, 9731), 'pyTSEB.wind_profile.calc_u_Goudriaan', 'wind.calc_u_Goudriaan', (['u_c', 'hc', 'lai', 'leaf_width', '(d_0 + z_0m)'], {}), '(u_c, hc, lai, leaf_width, d_0 + z_0m)\n', (9693, 9731), True, 'from pyTSEB import wind_profile as wind\n'), ((9753, 9797), 'pyTSEB.resistances.calc_R_x_Norman', 'res.calc_R_x_Norman', (['lai', 'leaf_width', 'u_d_zm'], {}), '(lai, leaf_width, u_d_zm)\n', (9772, 9797), True, 'from pyTSEB import resistances as res\n'), ((9819, 9852), 'pyTSEB.resistances.calc_R_S_Kustas', 'res.calc_R_S_Kustas', (['u_S', 'delta_t'], {}), '(u_S, delta_t)\n', (9838, 9852), True, 'from pyTSEB import resistances as res\n'), ((15028, 15043), 'numpy.zeros', 'np.zeros', (['N_SIM'], {}), '(N_SIM)\n', (15036, 15043), True, 'import numpy as np\n'), ((15974, 15989), 'numpy.zeros', 'np.zeros', (['N_SIM'], {}), '(N_SIM)\n', (15982, 15989), True, 'import numpy as np\n'), ((19214, 19231), 'numpy.cos', 'np.cos', (['doy_angle'], {}), '(doy_angle)\n', (19220, 19231), True, 'import numpy as np\n'), ((20295, 20313), 'numpy.zeros', 'np.zeros', (['sn.shape'], {}), '(sn.shape)\n', (20303, 20313), True, 'import numpy as np\n'), ((9913, 9957), 'pyTSEB.resistances.calc_R_x_Choudhury', 'res.calc_R_x_Choudhury', (['u_c', 'lai', 'leaf_width'], {}), '(u_c, lai, leaf_width)\n', (9935, 9957), True, 'from pyTSEB import resistances as res\n'), ((9979, 10042), 'pyTSEB.resistances.calc_R_S_Choudhury', 'res.calc_R_S_Choudhury', (['u_friction', 'hc', 'z_0m', 'd_0', 'Z_U', 'z0_soil'], {}), '(u_friction, hc, z_0m, d_0, Z_U, z0_soil)\n', (10001, 10042), True, 'from pyTSEB import resistances as res\n'), ((10148, 10200), 'pyTSEB.resistances.calc_R_x_McNaughton', 'res.calc_R_x_McNaughton', (['lai', 'leaf_width', 'u_friction'], {}), '(lai, leaf_width, u_friction)\n', (10171, 10200), True, 'from pyTSEB import resistances as res\n'), ((10222, 10257), 'pyTSEB.resistances.calc_R_S_McNaughton', 'res.calc_R_S_McNaughton', (['u_friction'], {}), '(u_friction)\n', (10245, 10257), True, 'from pyTSEB import resistances as res\n'), ((10323, 10365), 'pyTSEB.wind_profile.calc_A_Goudriaan', 'wind.calc_A_Goudriaan', (['hc', 'lai', 'leaf_width'], {}), '(hc, lai, leaf_width)\n', (10344, 10365), True, 'from pyTSEB import wind_profile as wind\n'), ((10432, 10501), 'pyTSEB.resistances.calc_R_x_Choudhury', 'res.calc_R_x_Choudhury', (['u_c', 'lai', 'leaf_width'], {'alpha_prime': 'alpha_prime'}), '(u_c, lai, leaf_width, alpha_prime=alpha_prime)\n', (10454, 10501), True, 'from pyTSEB import resistances as res\n'), ((10567, 10652), 'pyTSEB.resistances.calc_R_S_Choudhury', 'res.calc_R_S_Choudhury', (['u_friction', 'hc', 'z_0m', 'd_0', 'Z_U', 'z0_soil'], {'alpha_k': 'alpha_k'}), '(u_friction, hc, z_0m, d_0, Z_U, z0_soil, alpha_k=alpha_k\n )\n', (10589, 10652), True, 'from pyTSEB import resistances as res\n')] |
import numpy as np
class Data_Source( object ):
def __init__( self, opts ):
self.batch_size = opts.batch_size
self.train_sample = None
self.test_sample = None
self.train_label = None
self.test_label = None
self.num_train = 0
self.num_test = 0
self.cur_train = 0
self.cur_test = 0
self.opts = opts
def append_train_sample( self, sample_matrix ):
if self.train_sample is None:
self.train_sample = sample_matrix
else:
self.train_sample = np.concatenate( ( self.train_sample, sample_matrix ), axis = 0 )
self.num_train += sample_matrix.shape[0]
def append_train_label( self, label_matrix ):
if self.train_label is None:
self.train_label = label_matrix
else:
self.train_label = np.concatenate( ( self.train_label, label_matrix ), axis = 0 )
def append_test_sample( self, sample_matrix ):
if self.test_sample is None:
self.test_sample = sample_matrix
else:
self.test_sample = np.concatenate( ( self.test_sample, sample_matrix ), axis = 0 )
self.num_test += sample_matrix.shape[0]
def append_test_label( self, label_matrix ):
if self.test_label is None:
self.test_label = label_matrix
else:
self.test_label = np.concatenate( ( self.test_label, label_matrix ), axis = 0 )
def load_unsplit_samples( self ):
train_sample_path = self.opts.train_sample_path
test_sample_path = self.opts.test_sample_path
train_label_path = self.opts.train_label_path
test_label_path = self.opts.test_label_path
self.append_train_sample( np.load( train_sample_path ) )
self.append_test_sample( np.load( test_sample_path) )
self.append_train_label( np.load( train_label_path ) )
self.append_test_label( np.load( test_label_path ) )
# initial shuffle
index = np.arange( self.num_train, dtype = np.int )
np.random.shuffle( index )
self.train_label = self.train_label[ index ]
self.train_sample = self.train_sample[ index ]
index = np.arange( self.num_test, dtype = np.int )
np.random.shuffle( index )
self.test_label = self.test_label[ index ]
self.test_sample = self.test_sample[ index ]
print(self.num_train, self.train_label.shape[0], self.num_test, self.test_label.shape[0])
if ( self.num_train != self.train_label.shape[0] or self.num_test != self.test_label.shape[0]):
raise EnvironmentError( "Train or test label or sample does not match" )
def next_batch( self, batch_size = -1 ):
if batch_size == -1:
batch_size = self.batch_size
sample = self.train_sample[ self.cur_train: self.cur_train + batch_size, : ]
label = self.train_label[ self.cur_train: self.cur_train + batch_size, : ]
self.cur_train += self.batch_size
# print(self.cur_train)
if( self.cur_train + self.batch_size >= self.num_train ):
index = np.arange( self.num_train, dtype = np.int )
np.random.shuffle( index )
self.train_label = self.train_label[ index ]
self.train_sample = self.train_sample[ index ]
self.cur_train = 0
return sample, label
def get_test( self, batch_size = -1 ):
# if batch_size == -1:
# batch_size = self.batch_size
# sample = self.test_sample[ self.cur_test: self.cur_test + batch_size, : ]
# label = self.test_label[ self.cur_test: self.cur_test + batch_size, : ]
# self.cur_test += batch_size
# if( self.cur_test + self.batch_size >= self.num_test ):
# self.cur_test = 0
# return None, None
if batch_size == -1:
return self.test_sample, self.test_label
else:
return self.test_sample[ :batch_size ], self.test_label[ :batch_size ] | [
"numpy.load",
"numpy.arange",
"numpy.concatenate",
"numpy.random.shuffle"
] | [((2008, 2047), 'numpy.arange', 'np.arange', (['self.num_train'], {'dtype': 'np.int'}), '(self.num_train, dtype=np.int)\n', (2017, 2047), True, 'import numpy as np\n'), ((2060, 2084), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (2077, 2084), True, 'import numpy as np\n'), ((2212, 2250), 'numpy.arange', 'np.arange', (['self.num_test'], {'dtype': 'np.int'}), '(self.num_test, dtype=np.int)\n', (2221, 2250), True, 'import numpy as np\n'), ((2263, 2287), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (2280, 2287), True, 'import numpy as np\n'), ((579, 637), 'numpy.concatenate', 'np.concatenate', (['(self.train_sample, sample_matrix)'], {'axis': '(0)'}), '((self.train_sample, sample_matrix), axis=0)\n', (593, 637), True, 'import numpy as np\n'), ((870, 926), 'numpy.concatenate', 'np.concatenate', (['(self.train_label, label_matrix)'], {'axis': '(0)'}), '((self.train_label, label_matrix), axis=0)\n', (884, 926), True, 'import numpy as np\n'), ((1113, 1170), 'numpy.concatenate', 'np.concatenate', (['(self.test_sample, sample_matrix)'], {'axis': '(0)'}), '((self.test_sample, sample_matrix), axis=0)\n', (1127, 1170), True, 'import numpy as np\n'), ((1398, 1453), 'numpy.concatenate', 'np.concatenate', (['(self.test_label, label_matrix)'], {'axis': '(0)'}), '((self.test_label, label_matrix), axis=0)\n', (1412, 1453), True, 'import numpy as np\n'), ((1749, 1775), 'numpy.load', 'np.load', (['train_sample_path'], {}), '(train_sample_path)\n', (1756, 1775), True, 'import numpy as np\n'), ((1813, 1838), 'numpy.load', 'np.load', (['test_sample_path'], {}), '(test_sample_path)\n', (1820, 1838), True, 'import numpy as np\n'), ((1875, 1900), 'numpy.load', 'np.load', (['train_label_path'], {}), '(train_label_path)\n', (1882, 1900), True, 'import numpy as np\n'), ((1937, 1961), 'numpy.load', 'np.load', (['test_label_path'], {}), '(test_label_path)\n', (1944, 1961), True, 'import numpy as np\n'), ((3126, 3165), 'numpy.arange', 'np.arange', (['self.num_train'], {'dtype': 'np.int'}), '(self.num_train, dtype=np.int)\n', (3135, 3165), True, 'import numpy as np\n'), ((3182, 3206), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (3199, 3206), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Harmonize the features between the target and the source data so that:
- same feature space is considered between the source and the target.
- features are odered in the same way, avoiding permutation issue.
"""
import numpy as np
import pandas as pd
def harmonize_feature_naming(target_data,
source_data,
target_gene_names,
source_gene_names,
remove_mytochondria=False,
gene_lookup_file=None):
#Find common genes
common_genes = np.intersect1d(target_gene_names, source_gene_names)
#Find common gene location
target_common_gene_index = np.where(np.isin(target_gene_names, common_genes))[0]
source_common_gene_index = np.where(np.isin(source_gene_names, common_genes))[0]
#Stack data
target_data = target_data[:,target_common_gene_index]
source_data = source_data[:,source_common_gene_index]
if remove_mytochondria and gene_lookup_file is not None:
#Load table
genes_lookup_table = pd.read_csv(gene_lookup_file, delimiter=',')
print(genes_lookup_table.shape)
genes_lookup_table = genes_lookup_table.drop_duplicates(subset=['ENSEMBL'], keep=False)
print(genes_lookup_table.shape)
#Transform gene names
df_genes_name = pd.DataFrame(common_genes, columns=['ENSEMBL'])
df_genes_name = df_genes_name.merge(genes_lookup_table, on='ENSEMBL', how='left')
chromosome_name = df_genes_name['chromosome_name'].values
#Filter data on mitochondria
accepted_chromosomes = np.array(list(range(1,23)) + ['X','Y'])
filtered_genes_index = np.where(np.isin(chromosome_name, accepted_chromosomes))[0]
target_data = target_data[:,filtered_genes_index]
source_data = source_data[:,filtered_genes_index]
common_genes = common_genes[filtered_genes_index]
return target_data, source_data, common_genes | [
"pandas.read_csv",
"numpy.isin",
"numpy.intersect1d",
"pandas.DataFrame"
] | [((631, 683), 'numpy.intersect1d', 'np.intersect1d', (['target_gene_names', 'source_gene_names'], {}), '(target_gene_names, source_gene_names)\n', (645, 683), True, 'import numpy as np\n'), ((1138, 1182), 'pandas.read_csv', 'pd.read_csv', (['gene_lookup_file'], {'delimiter': '""","""'}), "(gene_lookup_file, delimiter=',')\n", (1149, 1182), True, 'import pandas as pd\n'), ((1414, 1461), 'pandas.DataFrame', 'pd.DataFrame', (['common_genes'], {'columns': "['ENSEMBL']"}), "(common_genes, columns=['ENSEMBL'])\n", (1426, 1461), True, 'import pandas as pd\n'), ((760, 800), 'numpy.isin', 'np.isin', (['target_gene_names', 'common_genes'], {}), '(target_gene_names, common_genes)\n', (767, 800), True, 'import numpy as np\n'), ((845, 885), 'numpy.isin', 'np.isin', (['source_gene_names', 'common_genes'], {}), '(source_gene_names, common_genes)\n', (852, 885), True, 'import numpy as np\n'), ((1767, 1813), 'numpy.isin', 'np.isin', (['chromosome_name', 'accepted_chromosomes'], {}), '(chromosome_name, accepted_chromosomes)\n', (1774, 1813), True, 'import numpy as np\n')] |
from typing import List
from abc import ABC, abstractmethod
import numpy as np
from nodes import *
class Operation(Node, ABC):
@abstractmethod
def symbol(self):
...
def __init__(self, input_nodes: List[Node] = None):
super().__init__(input_nodes)
for node in input_nodes:
node.consumers.append(self)
def __str__(self):
return self.symbol
class Unary(Operation, ABC):
def __init__(self, i):
super().__init__([i])
def forward(self, *inputs, **placeholders):
return self.forward_op(inputs[0])
@abstractmethod
def forward_op(self, i):
...
class Negative(Unary):
symbol = 'neg'
def forward_op(self, i):
return -i
def backward(self, gradient, ctx):
return -gradient
class Log(Unary):
symbol = 'log'
def forward_op(self, i):
return np.log(i)
def backward(self, gradient, ctx):
input = self.inputs[0]
return gradient/ctx[input]
class Sigmoid(Unary):
symbol = '𝜎'
def forward_op(self, i):
ex = np.exp(i)
return ex/(ex+1)
def backward(self, gradient, ctx):
sigmoid = ctx[self]
return gradient * sigmoid * (1 - sigmoid)
class ReLU(Unary):
symbol = 'relu'
def forward_op(self, i):
return max(0, i)
def backward(self, gradient, ctx):
out = ctx[self]
return gradient if out > 0 else 0
class Binary(Operation, ABC):
def __init__(self, l, r):
super().__init__([l, r])
def forward(self, *inputs, **placeholders):
self.forward_op(inputs[0], inputs[1])
@abstractmethod
def forward_op(self, l, r):
...
class Add(Binary):
symbol = '+'
def forward_op(self, l, r):
return np.add(l, r)
def backward(self, gradient, ctx):
return gradient, gradient
class Sub(Binary):
symbol = '-'
def forward_op(self, l, r):
return np.subtract(l, r)
def backward(self, gradient, ctx):
return gradient, -gradient
class Mul(Binary):
symbol = '*'
def forward_op(self, l, r):
return np.multiply(l, r)
def backward(self, gradient, ctx):
left, right = self.inputs
return gradient * ctx[right], gradient * ctx[left]
class Div(Binary):
symbol = '/'
def forward_op(self, l, r):
return np.divide(l, r)
def backward(self, gradient, ctx):
left, right = self.inputs
return gradient * (1 / ctx[right]), gradient * (-ctx[left] / (ctx[right]**2))
class Pow(Binary):
symbol = '^'
def forward_op(self, l, r):
return np.power(l, r)
def backward(self, gradient, ctx):
base_node, exponent_node = self.inputs
base, exponent = ctx[base_node], ctx[exponent_node]
return (gradient * exponent * np.power(base, exponent - 1),
gradient * ctx[self] * np.log(base))
class Matmul(Binary):
symbol = '@'
def forward_op(self, l, r):
return np.dot(l, r)
def backward(self, gradient, ctx):
left, right = self.inputs
return np.dot(gradient, right.T), np.dot(left.T, gradient)
| [
"numpy.divide",
"numpy.multiply",
"numpy.subtract",
"numpy.log",
"numpy.power",
"numpy.exp",
"numpy.dot",
"numpy.add"
] | [((887, 896), 'numpy.log', 'np.log', (['i'], {}), '(i)\n', (893, 896), True, 'import numpy as np\n'), ((1087, 1096), 'numpy.exp', 'np.exp', (['i'], {}), '(i)\n', (1093, 1096), True, 'import numpy as np\n'), ((1784, 1796), 'numpy.add', 'np.add', (['l', 'r'], {}), '(l, r)\n', (1790, 1796), True, 'import numpy as np\n'), ((1957, 1974), 'numpy.subtract', 'np.subtract', (['l', 'r'], {}), '(l, r)\n', (1968, 1974), True, 'import numpy as np\n'), ((2136, 2153), 'numpy.multiply', 'np.multiply', (['l', 'r'], {}), '(l, r)\n', (2147, 2153), True, 'import numpy as np\n'), ((2373, 2388), 'numpy.divide', 'np.divide', (['l', 'r'], {}), '(l, r)\n', (2382, 2388), True, 'import numpy as np\n'), ((2635, 2649), 'numpy.power', 'np.power', (['l', 'r'], {}), '(l, r)\n', (2643, 2649), True, 'import numpy as np\n'), ((3007, 3019), 'numpy.dot', 'np.dot', (['l', 'r'], {}), '(l, r)\n', (3013, 3019), True, 'import numpy as np\n'), ((3109, 3134), 'numpy.dot', 'np.dot', (['gradient', 'right.T'], {}), '(gradient, right.T)\n', (3115, 3134), True, 'import numpy as np\n'), ((3136, 3160), 'numpy.dot', 'np.dot', (['left.T', 'gradient'], {}), '(left.T, gradient)\n', (3142, 3160), True, 'import numpy as np\n'), ((2835, 2863), 'numpy.power', 'np.power', (['base', '(exponent - 1)'], {}), '(base, exponent - 1)\n', (2843, 2863), True, 'import numpy as np\n'), ((2904, 2916), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (2910, 2916), True, 'import numpy as np\n')] |
from superdifferentiator.forward.functions import X
import numpy as np
import math
def bgfs(f, init_x, accuracy = 1e-8, alphas =[.00001,.00005,.0001,.0005,0.001,0.005,0.01,0.05,0.1,0.5,1,5],
max_iter = 1000,verbose = False):
x_current = [X(init_x[i],'x'+str(i)) for i in range(len(init_x))]
B = np.eye(len(init_x))
for i_iter in range(max_iter):
func = f(x_current)
f_jac = func.jacobian()[1][0].T
p = -np.linalg.pinv(B) @f_jac
f_minmin = 10**13
alpha_minmin = .00001
for alpha in alphas:
f_min = f([x_current[i] + alpha * p[i,0] for i in range(len(x_current))])
if f_min.val[0] < f_minmin:
f_minmin = f_min.val[0]
alpha_minmin = alpha
if math.isnan(float(p[0])):
print("It doesn't converge using bgfs! Please use some other methods or check if a maximum/minimum exists!")
return [x_current[i].val[0] for i in range(len(x_current))]
s = alpha_minmin * p
if np.linalg.norm(s) <= accuracy:
print('Since the shifted distance is smaller than {}, we stop the loop at {}th iteration.'.format(accuracy,i_iter))
break
x_current = [x_current[i] + alpha_minmin * p[i,0] for i in range(len(x_current))]
if verbose:
print('At {}th iteration, current x value is: '.format(i_iter),[x_current[i].val[0] for i in range(len(x_current))],
', the step taken is: ',s,'.\n' )
y_current = f(x_current).jacobian()[1][0].T - f_jac
B += y_current.dot(y_current.T)/(y_current.T.dot(s)) - B.dot(s).dot(s.T).dot(B.T)/(s.T.dot(B).dot(s))
final_del = [x_current[i].val[0] for i in range(len(x_current))]
if i_iter == max_iter-1:
print("Notice that the change in distance of x is still larger than input accuracy! It probably doesn't converge using bgfs! ")
if verbose:
print('The final value we get is',final_del,'.')
return final_del
| [
"numpy.linalg.pinv",
"numpy.linalg.norm"
] | [((1084, 1101), 'numpy.linalg.norm', 'np.linalg.norm', (['s'], {}), '(s)\n', (1098, 1101), True, 'import numpy as np\n'), ((462, 479), 'numpy.linalg.pinv', 'np.linalg.pinv', (['B'], {}), '(B)\n', (476, 479), True, 'import numpy as np\n')] |
"""Defining a set of classes that represent causal functions/ mechanisms.
Author: <NAME>
Modified by <NAME>, July 24th 2019
.. MIT License
..
.. Copyright (c) 2018 <NAME>
..
.. Permission is hereby granted, free of charge, to any person obtaining a copy
.. of this software and associated documentation files (the "Software"), to deal
.. in the Software without restriction, including without limitation the rights
.. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
.. copies of the Software, and to permit persons to whom the Software is
.. furnished to do so, subject to the following conditions:
..
.. The above copyright notice and this permission notice shall be included in all
.. copies or substantial portions of the Software.
..
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
.. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
.. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
.. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
.. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
.. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
.. SOFTWARE.
"""
import random
import numpy as np
from scipy.stats import bernoulli
from sklearn.mixture import GaussianMixture as GMM
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.gaussian_process import GaussianProcessRegressor
import torch as th
import copy
class LinearMechanism(object):
"""Linear mechanism, where Effect = alpha*Cause + Noise."""
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(LinearMechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.coefflist = []
self.other_coefflist = []
self.noise_coeff = noise_coeff
self.noise_function = noise_function
for i in range(ncauses):
coeff = np.random.uniform(0.25, 1)
if np.random.randint(2) == 0:
coeff *= -1
self.coefflist.append(coeff)
self.old_coefflist = self.coefflist[:]
def parametric_intervention(self):
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(0.5, 1)
if c > 0:
coeff = c + change
else:
coeff = c - change
self.coefflist[i] = coeff
def unique_parametric_intervention(self):
if len(self.other_coefflist) == 0:
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(2, 5)
if np.random.randint(2) == 0:
change *= -1
if c > 0:
coeff = c + change
else:
coeff = c - change
self.other_coefflist.append(coeff)
self.coefflist = self.other_coefflist[:]
def reinit(self):
self.coefflist = self.old_coefflist[:]
def __call__(self, causes):
"""Run the mechanism."""
# Additive only, for now
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.coefflist[par]*causes[:, par]
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class SigmoidMix_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(SigmoidMix_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.a = np.random.exponential(1/4) + 1
ber = bernoulli.rvs(0.5)
self.b = ber * np.random.uniform(-2, -0.5) + (1-ber)*np.random.uniform(0.5, 2)
self.c = np.random.uniform(-2, 2)
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.old_b = self.b
self.old_c = self.c
self.other_b = None
self.other_c = None
def parametric_intervention(self):
change = np.random.uniform(0.5, 1)
if self.b <= -0.5:
self.b -= change
else:
self.b += change
change = np.random.uniform(-1, 1)
self.c += change
def unique_parametric_intervention(self):
if self.other_b is None and self.other_c is None:
self.parametric_intervention()
self.other_b = self.b
self.other_c = self.c
self.b = self.other_b
self.c = self.other_c
def reinit(self):
self.b = self.old_b
self.c = self.old_c
def mechanism(self, causes):
"""Mechanism function."""
self.noise = self.noise_coeff * self.noise_function(self.points)
result = np.zeros((self.points, 1))
for i in range(self.points):
pre_add_effect = 0
for c in range(causes.shape[1]):
pre_add_effect += causes[i, c]
pre_add_effect += self.noise[i]
result[i, 0] = self.a * self.b * \
(pre_add_effect + self.c)/(1 + abs(self.b*(pre_add_effect + self.c)))
return result
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
effect[:, 0] = self.mechanism(causes)[:, 0]
return effect
class SigmoidAM_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(SigmoidAM_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.a = np.random.exponential(1/4) + 1
ber = bernoulli.rvs(0.5)
self.b = ber * np.random.uniform(-2, -0.5) + (1-ber)*np.random.uniform(0.5, 2)
self.c = np.random.uniform(-2, 2)
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.old_b = self.b
self.old_c = self.c
self.other_b = None
self.other_c = None
def mechanism(self, x):
"""Mechanism function."""
result = np.zeros((self.points, 1))
for i in range(self.points):
result[i, 0] = self.a * self.b * (x[i] + self.c) / (1 + abs(self.b * (x[i] + self.c)))
return result
def __call__(self, causes):
"""Run the mechanism."""
# Additive only
self.noise = self.noise_coeff * self.noise_function(self.points)
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.mechanism(causes[:, par])[:, 0]
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class ANM_Mechanism(object):
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(ANM_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_function = noise_function
self.noise_coeff = noise_coeff
self.nb_step = 0
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], x.shape[1]))
if(self.nb_step == 1):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
self.gpr = GaussianProcessRegressor()
self.gpr.fit(x, y)
else:
y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.mechanism(causes)
else:
effect[:, 0] = self.mechanism(self.noise)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class NN_Mechanism_Add(object):
def __init__(self, ncauses, points, noise_function, nh=10, noise_coeff=.4):
"""Init the mechanism."""
super(NN_Mechanism_Add, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.nb_step = 0
self.nh = nh
self.layers = self.initialize()
self.old_layers = copy.deepcopy(self.layers)
self.other_layers = None
def weight_init(self, model):
if isinstance(model, th.nn.modules.Linear):
th.nn.init.normal_(model.weight.data, mean=0., std=1)
def initialize(self):
"""Mechanism function."""
layers = []
layers.append(th.nn.modules.Linear(self.n_causes, self.nh))
layers.append(th.nn.PReLU())
layers.append(th.nn.modules.Linear(self.nh, 1))
layers = th.nn.Sequential(*layers)
layers.apply(self.weight_init)
return layers
def parametric_intervention(self):
for i,layer in enumerate(self.layers):
if isinstance(layer, th.nn.modules.Linear):
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=.1)
def unique_parametric_intervention(self):
if self.other_layers is None:
self.other_layers = copy.deepcopy(self.layers)
for i,layer in enumerate(self.other_layers):
if isinstance(layer, th.nn.modules.Linear) and i > 0:
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=1)
self.layers = copy.deepcopy(self.other_layers)
def reinit(self):
self.layers = copy.deepcopy(self.old_layers)
def apply_nn(self, x):
data = x.astype('float32')
data = th.from_numpy(data)
return np.reshape(self.layers(data).data, (x.shape[0],))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if (causes.shape[1] > 0):
effect[:, 0] = self.apply_nn(causes)
else:
print("abnormal")
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class NN_Mechanism(object):
def __init__(self, ncauses, points, noise_function, nh=20, noise_coeff=.4):
"""Init the mechanism."""
super(NN_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.nb_step = 0
self.nh = nh
self.layers = self.initialize()
self.old_layers = copy.deepcopy(self.layers)
self.other_layers = None
def weight_init(self, model):
if isinstance(model, th.nn.modules.Linear):
th.nn.init.normal_(model.weight.data, mean=0., std=1)
def initialize(self):
"""Mechanism function."""
layers = []
layers.append(th.nn.modules.Linear(self.n_causes+1, self.nh))
layers.append(th.nn.Tanh())
layers.append(th.nn.modules.Linear(self.nh, 1))
layers = th.nn.Sequential(*layers)
layers.apply(self.weight_init)
return layers
def parametric_intervention(self):
for i,layer in enumerate(self.layers):
if isinstance(layer, th.nn.modules.Linear):
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=.1)
def unique_parametric_intervention(self):
if self.other_layers is None:
self.other_layers = copy.deepcopy(self.layers)
for i,layer in enumerate(self.other_layers):
if isinstance(layer, th.nn.modules.Linear) and i > 0:
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=1)
self.layers = copy.deepcopy(self.other_layers)
def reinit(self):
self.layers = copy.deepcopy(self.old_layers)
def apply_nn(self, x):
data = x.astype('float32')
data = th.from_numpy(data)
return np.reshape(self.layers(data).data, (x.shape[0],))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if (causes.shape[1] > 0):
mix = np.hstack((causes, self.noise))
effect[:, 0] = self.apply_nn(mix)
else:
effect[:, 0] = self.apply_nn(self.noise)
return effect
# === Multimodal Mechanisms ===
class Multimodal_X_Mechanism(object):
"""Mecanism with multimodal distribution: usually a combination of multiple
functions"""
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(Multimodal_X_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.coefflist = []
self.other_coefflist = []
self.noise_coeff = noise_coeff
self.noise_function = noise_function
for i in range(ncauses):
coeff = np.random.uniform(0.5, 1)
if np.random.randint(2) == 0:
coeff *= -1
self.coefflist.append(coeff)
self.old_coefflist = self.coefflist[:]
def parametric_intervention(self):
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(0.5, 1)
if c > 0:
coeff = c + change
else:
coeff = c - change
self.coefflist[i] = coeff
def unique_parametric_intervention(self):
if len(self.other_coefflist) == 0:
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(0.5, 1)
if c > 0:
coeff = c + change
else:
coeff = c - change
self.other_coefflist.append(coeff)
self.coefflist = self.other_coefflist[:]
def reinit(self):
self.coefflist = self.old_coefflist[:]
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
selector = np.random.choice([-1,1], size=self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
for i, sel in enumerate(selector):
effect[i, 0] = effect[i, 0] + sel*self.coefflist[par]*causes[i, par]
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class Multimodal_Circle_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(Multimodal_Circle_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.sin_scale = np.random.uniform(0.5, 1.5) #1
self.period = np.random.uniform(0.5, 1.5) #1
self.phase_shift = np.pi/2
# make copy of initial parameters
self.old_sin_scale = self.sin_scale
self.old_period = self.period
self.old_phase_shift = self.phase_shift
self.other_sin_scale = None
self.other_period = None
self.other_phase_shift = None
def parametric_intervention(self):
change = np.random.uniform(0.5, 1.5)
self.sin_scale = self.old_phase_shift
self.period = np.random.uniform(0.5, 1.5) #1
self.phase_shift = np.pi/2
def unique_parametric_intervention(self):
if self.other_sin_scale is None:
self.parametric_intervention()
self.other_sin_scale = self.sin_scale
self.other_period = self.period
self.other_phase_shift = self.phase_shift
self.sin_scale = self.other_sin_scale
self.period = self.other_period
self.phase_shift = self.other_phase_shift
def reinit(self):
self.sin_scale = self.old_sin_scale
self.period = self.old_period
self.phase_shift = self.old_phase_shift
def mechanism(self, sel, x):
if sel:
sin_scale = -self.sin_scale
else:
sin_scale = self.sin_scale
return sin_scale * np.sin(self.period * (x + self.phase_shift))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
selector = np.random.choice([0,1], size=self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
for i, sel in enumerate(selector):
effect[i, 0] = effect[i, 0] + self.mechanism(sel, causes[i, par])
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class Multimodal_ADN_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(Multimodal_ADN_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.sin_scale = np.random.uniform(0.5, 1.5) #1
self.period = np.random.uniform(1, 2) #1
self.phase_shift = np.pi/2
# make copy of initial parameters
self.old_sin_scale = self.sin_scale
self.old_period = self.period
self.old_phase_shift = self.phase_shift
self.other_sin_scale = None
self.other_period = None
self.other_phase_shift = None
def parametric_intervention(self):
# change = np.random.uniform(1, 2)
self.sin_scale = self.old_phase_shift
change = np.random.uniform(1, 2)
self.period = self.old_period + change
self.phase_shift = np.pi/2
def unique_parametric_intervention(self):
if self.other_sin_scale is None:
self.parametric_intervention()
self.other_sin_scale = self.sin_scale
self.other_period = self.period
self.other_phase_shift = self.phase_shift
self.sin_scale = self.other_sin_scale
self.period = self.other_period
self.phase_shift = self.other_phase_shift
def reinit(self):
self.sin_scale = self.old_sin_scale
self.period = self.old_period
self.phase_shift = self.old_phase_shift
def mechanism(self, sel, x):
if sel:
sin_scale = -self.sin_scale
else:
sin_scale = self.sin_scale
return sin_scale * np.sin(self.period * (x + self.phase_shift))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
selector = np.random.choice([0,1], size=self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
for i, sel in enumerate(selector):
effect[i, 0] = effect[i, 0] + self.mechanism(sel, causes[i, par])
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class Function_Template:
def __init__(self, sign, slope, intercept, sin_scale, period, phase_shift):
self.sign = sign
self.slope = slope
self.intercept = intercept
self.sin_scale = sin_scale
self.period = period
self.phase_shift = phase_shift
def __call__(self, x):
return self.sign*self.slope*x + self.intercept \
+ self.sin_scale*np.sin(self.period*(x + self.phase_shift))
# ====================================
class Polynomial_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=2, noise_coeff=.4):
"""Init the mechanism."""
super(Polynomial_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.d = d
self.polycause = []
for c in range(ncauses):
self.coefflist = []
for j in range(self.d + 1):
self.coefflist.append(random.random())
self.polycause.append(self.coefflist)
self.ber = bernoulli.rvs(0.5)
self.noise = noise_coeff * noise_function(points)
def mechanism(self, x, par):
"""Mechanism function."""
list_coeff = self.polycause[par]
result = np.zeros((self.points, 1))
for i in range(self.points):
for j in range(self.d+1):
result[i, 0] += list_coeff[j]*np.power(x[i], j)
result[i, 0] = min(result[i, 0], 1)
result[i, 0] = max(result[i, 0], -1)
return result
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.mechanism(causes[:, par], par)[:, 0]
if(self.ber > 0 and causes.shape[1] > 0):
effect[:, 0] = effect[:, 0] * self.noise[:, 0]
else:
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
def computeGaussKernel(x):
"""Compute the gaussian kernel on a 1D vector."""
xnorm = np.power(euclidean_distances(x, x), 2)
return np.exp(-xnorm / (2.0))
class GaussianProcessAdd_Mechanism(object):
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(GaussianProcessAdd_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise = noise_coeff * noise_function(points)
self.nb_step = 0
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], 1))
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
# if(self.nb_step < 5):
# cov = computeGaussKernel(x)
# mean = np.zeros((1, self.points))[0, :]
# y = np.random.multivariate_normal(mean, cov)
# elif(self.nb_step == 5):
# cov = computeGaussKernel(x)
# mean = np.zeros((1, self.points))[0, :]
# y = np.random.multivariate_normal(mean, cov)
# self.gpr = GaussianProcessRegressor()
# self.gpr.fit(x, y)
# y = self.gpr.predict(x)
# else:
# y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
# Additive only
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.mechanism(causes[:, par])
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class GaussianProcessMix_Mechanism(object):
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(GaussianProcessMix_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise = noise_coeff * noise_function(points)
self.nb_step = 0
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], x.shape[1]))
if(self.nb_step < 2):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
elif(self.nb_step == 2):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
self.gpr = GaussianProcessRegressor()
self.gpr.fit(x, y)
y = self.gpr.predict(x)
else:
y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
if(causes.shape[1] > 0):
mix = np.hstack((causes, self.noise))
effect[:, 0] = self.mechanism(mix)
else:
effect[:, 0] = self.mechanism(self.noise)
return effect
class pnl_gp_mechanism(object):
""" Post-Nonlinear model using a GP with additive noise. The
second non-linearity is a sigmoid """
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(pnl_gp_mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise = noise_coeff * noise_function(points)
self.nb_step = 0
self.f2 = lambda x: 1 / (1 + np.exp(-x))
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], x.shape[1]))
if(self.nb_step == 1):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
self.gpr = GaussianProcessRegressor()
self.gpr.fit(x, y)
y = self.gpr.predict(x)
else:
y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.mechanism(causes)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
else:
effect[:, 0] = self.mechanism(self.noise)
effect[:, 0] = self.f2(effect[:, 0])
return effect
class pnl_mult_mechanism(object):
""" Post-Nonlinear model using a exp and log as the non-linearities.
This results in a multiplicative model. """
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(pnl_mult_mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_function = noise_function
self.noise_coeff = noise_coeff
self.f1 = lambda x: np.log(np.sum(x, axis=1))
self.f2 = lambda x: np.exp(x)
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.f1(causes) #[:, 0]
else:
effect[:, 0] = self.f1(self.noise)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
effect[:, 0] = self.f2(effect[:, 0])
return effect
class PostNonLinear_Mechanism:
def __init__(self, ncauses, points, noise_function, f1=None, f2=None, noise_coeff=.4):
self.gp = GaussianProcessAdd_Mechanism(ncauses, points, noise_function,
noise_coeff=0)
self.points = points
self.noise = noise_coeff * noise_function(points)
self.f1 = f1
self.f2 = f2
if f1 is None and f2 is None:
raise ValueError("f1 and f2 have to de defined!")
elif f1 is None and f2 is not None:
self.f1 = self.gp
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.f1(causes)[:,0] # mult [:, 0]
else:
effect[:, 0] = self.f1(self.noise)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
effect[:, 0] = self.f2(effect[:, 0])
return effect
def gmm_cause(points, k=4, p1=2, p2=2):
"""Init a root cause with a Gaussian Mixture Model w/ a spherical covariance type."""
g = GMM(k, covariance_type="spherical")
g.fit(np.random.randn(300, 1))
g.means_ = p1 * np.random.randn(k, 1)
g.covars_ = np.power(abs(p2 * np.random.randn(k, 1) + 1), 2)
g.weights_ = abs(np.random.rand(k))
g.weights_ = g.weights_ / sum(g.weights_)
return g.sample(points)[0].reshape(-1)
def gaussian_cause(points):
"""Init a root cause with a Gaussian."""
return np.random.randn(points, 1)[:, 0]
def variable_gaussian_cause(points):
"""Init a root cause with a Gaussian. Similar to gaussian_cause
but have variable variance. Identical to J.Peters with default value (set noise_coeff=0.2)"""
# + np.random.rand(points, 1)[:, 0] - 1
return np.sqrt(np.random.rand(1) + 1) * np.random.randn(points, 1)[:, 0]
def uniform_cause(points):
"""Init a root cause with a uniform."""
return np.random.rand(points, 1)[:, 0] * 2 - 1
def uniform_cause_positive(points):
"""Init a root cause with a uniform."""
return np.random.rand(points, 1)[:, 0] * 2
def normal_noise(points):
"""Init a normal noise variable."""
return np.random.rand(1) * np.random.randn(points, 1) \
+ random.sample([2, -2], 1)
def variable_normal_noise(points):
"""Init a normal noise variable. Similar to normal_noise
but make sure to have at least a std of 1. Identical to
J.Peters with default value (set noise_coeff=0.2)"""
return np.sqrt(np.random.rand(1) + 1) * np.random.randn(points, 1)
def absolute_gaussian_noise(points):
"""Init an absolute normal noise variable."""
return np.abs(np.random.rand(points, 1) * np.random.rand(1))
def laplace_noise(points):
"""Init a Laplace noise variable."""
lambda_ = np.random.rand(1)
return np.random.laplace(0, lambda_, (points, 1))
def uniform_noise(points):
"""Init a uniform noise variable."""
return np.random.rand(1) * np.random.uniform(size=(points, 1)) \
+ random.sample([2, -2], 1)
class NormalCause(object):
def __init__(self, mean=0, std=1, std_min=None, std_max=None):
self.mean = mean
if std_min is None and std_max is None:
self.std = std
else:
self.std = np.random.uniform(std_min, std_max)
def __call__(self, points):
return np.random.normal(self.mean, self.std, \
size=(points))
class UniformCause(object):
def __init__(self, _min=-1, _max=1):
self._min = _min
self._max = _max
def __call__(self, points):
return np.random.uniform(self._min, self._max, size=(points))
class nn_noise(object):
def __init__(self, noise=variable_normal_noise, n_hidden=20):
"""Init the mechanism."""
super(nn_noise, self).__init__()
self.noise = noise
self.n_hidden = n_hidden
self.initialize_nn()
def initialize_nn(self):
layers = []
layers.append(th.nn.modules.Linear(1, self.n_hidden))
layers.append(th.nn.Tanh())
layers.append(th.nn.modules.Linear(self.n_hidden, 1))
self.layers = th.nn.Sequential(*layers)
# use a normal initialization
# self.layers.apply(self.weight_init)
def weight_init(self, model):
if isinstance(model, th.nn.modules.Linear):
th.nn.init.normal_(model.weight.data, mean=0., std=0.5)
def __call__(self, points):
x = self.noise(points)
data = x.astype('float32')
data = th.from_numpy(data)
data = self.layers(data).data.numpy()
return data
| [
"numpy.sum",
"random.sample",
"numpy.random.exponential",
"sklearn.mixture.GaussianMixture",
"numpy.sin",
"numpy.random.randint",
"numpy.exp",
"numpy.random.normal",
"torch.empty_like",
"torch.no_grad",
"numpy.random.randn",
"numpy.power",
"sklearn.metrics.pairwise.euclidean_distances",
"n... | [((22034, 22054), 'numpy.exp', 'np.exp', (['(-xnorm / 2.0)'], {}), '(-xnorm / 2.0)\n', (22040, 22054), True, 'import numpy as np\n'), ((28683, 28718), 'sklearn.mixture.GaussianMixture', 'GMM', (['k'], {'covariance_type': '"""spherical"""'}), "(k, covariance_type='spherical')\n", (28686, 28718), True, 'from sklearn.mixture import GaussianMixture as GMM\n'), ((30369, 30386), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (30383, 30386), True, 'import numpy as np\n'), ((30398, 30440), 'numpy.random.laplace', 'np.random.laplace', (['(0)', 'lambda_', '(points, 1)'], {}), '(0, lambda_, (points, 1))\n', (30415, 30440), True, 'import numpy as np\n'), ((3197, 3223), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (3205, 3223), True, 'import numpy as np\n'), ((3867, 3885), 'scipy.stats.bernoulli.rvs', 'bernoulli.rvs', (['(0.5)'], {}), '(0.5)\n', (3880, 3885), False, 'from scipy.stats import bernoulli\n'), ((3990, 4014), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(2)'], {}), '(-2, 2)\n', (4007, 4014), True, 'import numpy as np\n'), ((4269, 4294), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (4286, 4294), True, 'import numpy as np\n'), ((4412, 4436), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (4429, 4436), True, 'import numpy as np\n'), ((4976, 5002), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (4984, 5002), True, 'import numpy as np\n'), ((5447, 5473), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (5455, 5473), True, 'import numpy as np\n'), ((5918, 5936), 'scipy.stats.bernoulli.rvs', 'bernoulli.rvs', (['(0.5)'], {}), '(0.5)\n', (5931, 5936), False, 'from scipy.stats import bernoulli\n'), ((6041, 6065), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(2)'], {}), '(-2, 2)\n', (6058, 6065), True, 'import numpy as np\n'), ((6344, 6370), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (6352, 6370), True, 'import numpy as np\n'), ((6710, 6736), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (6718, 6736), True, 'import numpy as np\n'), ((7437, 7476), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1])'], {}), '(x, (x.shape[0], x.shape[1]))\n', (7447, 7476), True, 'import numpy as np\n'), ((7890, 7916), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (7898, 7916), True, 'import numpy as np\n'), ((8719, 8745), 'copy.deepcopy', 'copy.deepcopy', (['self.layers'], {}), '(self.layers)\n', (8732, 8745), False, 'import copy\n'), ((9193, 9218), 'torch.nn.Sequential', 'th.nn.Sequential', (['*layers'], {}), '(*layers)\n', (9209, 9218), True, 'import torch as th\n'), ((9970, 10002), 'copy.deepcopy', 'copy.deepcopy', (['self.other_layers'], {}), '(self.other_layers)\n', (9983, 10002), False, 'import copy\n'), ((10048, 10078), 'copy.deepcopy', 'copy.deepcopy', (['self.old_layers'], {}), '(self.old_layers)\n', (10061, 10078), False, 'import copy\n'), ((10157, 10176), 'torch.from_numpy', 'th.from_numpy', (['data'], {}), '(data)\n', (10170, 10176), True, 'import torch as th\n'), ((10326, 10352), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (10334, 10352), True, 'import numpy as np\n'), ((11122, 11148), 'copy.deepcopy', 'copy.deepcopy', (['self.layers'], {}), '(self.layers)\n', (11135, 11148), False, 'import copy\n'), ((11597, 11622), 'torch.nn.Sequential', 'th.nn.Sequential', (['*layers'], {}), '(*layers)\n', (11613, 11622), True, 'import torch as th\n'), ((12374, 12406), 'copy.deepcopy', 'copy.deepcopy', (['self.other_layers'], {}), '(self.other_layers)\n', (12387, 12406), False, 'import copy\n'), ((12452, 12482), 'copy.deepcopy', 'copy.deepcopy', (['self.old_layers'], {}), '(self.old_layers)\n', (12465, 12482), False, 'import copy\n'), ((12561, 12580), 'torch.from_numpy', 'th.from_numpy', (['data'], {}), '(data)\n', (12574, 12580), True, 'import torch as th\n'), ((12730, 12756), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (12738, 12756), True, 'import numpy as np\n'), ((14737, 14763), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (14745, 14763), True, 'import numpy as np\n'), ((14857, 14900), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': 'self.points'}), '([-1, 1], size=self.points)\n', (14873, 14900), True, 'import numpy as np\n'), ((15586, 15613), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (15603, 15613), True, 'import numpy as np\n'), ((15639, 15666), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (15656, 15666), True, 'import numpy as np\n'), ((16042, 16069), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (16059, 16069), True, 'import numpy as np\n'), ((16138, 16165), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (16155, 16165), True, 'import numpy as np\n'), ((17070, 17096), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (17078, 17096), True, 'import numpy as np\n'), ((17190, 17232), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'self.points'}), '([0, 1], size=self.points)\n', (17206, 17232), True, 'import numpy as np\n'), ((17908, 17935), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (17925, 17935), True, 'import numpy as np\n'), ((17961, 17984), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (17978, 17984), True, 'import numpy as np\n'), ((18449, 18472), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (18466, 18472), True, 'import numpy as np\n'), ((19421, 19447), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (19429, 19447), True, 'import numpy as np\n'), ((19541, 19583), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'self.points'}), '([0, 1], size=self.points)\n', (19557, 19583), True, 'import numpy as np\n'), ((20913, 20931), 'scipy.stats.bernoulli.rvs', 'bernoulli.rvs', (['(0.5)'], {}), '(0.5)\n', (20926, 20931), False, 'from scipy.stats import bernoulli\n'), ((21116, 21142), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (21124, 21142), True, 'import numpy as np\n'), ((21485, 21511), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (21493, 21511), True, 'import numpy as np\n'), ((21993, 22018), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'x'], {}), '(x, x)\n', (22012, 22018), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((22518, 22548), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], 1)'], {}), '(x, (x.shape[0], 1))\n', (22528, 22548), True, 'import numpy as np\n'), ((22646, 22686), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {}), '(mean, cov)\n', (22675, 22686), True, 'import numpy as np\n'), ((23367, 23393), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (23375, 23393), True, 'import numpy as np\n'), ((24093, 24132), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1])'], {}), '(x, (x.shape[0], x.shape[1]))\n', (24103, 24132), True, 'import numpy as np\n'), ((24763, 24789), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (24771, 24789), True, 'import numpy as np\n'), ((25649, 25688), 'numpy.reshape', 'np.reshape', (['x', '(x.shape[0], x.shape[1])'], {}), '(x, (x.shape[0], x.shape[1]))\n', (25659, 25688), True, 'import numpy as np\n'), ((26138, 26164), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (26146, 26164), True, 'import numpy as np\n'), ((27124, 27150), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (27132, 27150), True, 'import numpy as np\n'), ((28192, 28218), 'numpy.zeros', 'np.zeros', (['(self.points, 1)'], {}), '((self.points, 1))\n', (28200, 28218), True, 'import numpy as np\n'), ((28729, 28752), 'numpy.random.randn', 'np.random.randn', (['(300)', '(1)'], {}), '(300, 1)\n', (28744, 28752), True, 'import numpy as np\n'), ((28775, 28796), 'numpy.random.randn', 'np.random.randn', (['k', '(1)'], {}), '(k, 1)\n', (28790, 28796), True, 'import numpy as np\n'), ((28883, 28900), 'numpy.random.rand', 'np.random.rand', (['k'], {}), '(k)\n', (28897, 28900), True, 'import numpy as np\n'), ((29076, 29102), 'numpy.random.randn', 'np.random.randn', (['points', '(1)'], {}), '(points, 1)\n', (29091, 29102), True, 'import numpy as np\n'), ((29822, 29847), 'random.sample', 'random.sample', (['[2, -2]', '(1)'], {}), '([2, -2], 1)\n', (29835, 29847), False, 'import random\n'), ((30106, 30132), 'numpy.random.randn', 'np.random.randn', (['points', '(1)'], {}), '(points, 1)\n', (30121, 30132), True, 'import numpy as np\n'), ((30589, 30614), 'random.sample', 'random.sample', (['[2, -2]', '(1)'], {}), '([2, -2], 1)\n', (30602, 30614), False, 'import random\n'), ((30931, 30981), 'numpy.random.normal', 'np.random.normal', (['self.mean', 'self.std'], {'size': 'points'}), '(self.mean, self.std, size=points)\n', (30947, 30981), True, 'import numpy as np\n'), ((31186, 31238), 'numpy.random.uniform', 'np.random.uniform', (['self._min', 'self._max'], {'size': 'points'}), '(self._min, self._max, size=points)\n', (31203, 31238), True, 'import numpy as np\n'), ((31731, 31756), 'torch.nn.Sequential', 'th.nn.Sequential', (['*layers'], {}), '(*layers)\n', (31747, 31756), True, 'import torch as th\n'), ((32110, 32129), 'torch.from_numpy', 'th.from_numpy', (['data'], {}), '(data)\n', (32123, 32129), True, 'import torch as th\n'), ((2041, 2067), 'numpy.random.uniform', 'np.random.uniform', (['(0.25)', '(1)'], {}), '(0.25, 1)\n', (2058, 2067), True, 'import numpy as np\n'), ((2338, 2363), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (2355, 2363), True, 'import numpy as np\n'), ((3822, 3850), 'numpy.random.exponential', 'np.random.exponential', (['(1 / 4)'], {}), '(1 / 4)\n', (3843, 3850), True, 'import numpy as np\n'), ((5873, 5901), 'numpy.random.exponential', 'np.random.exponential', (['(1 / 4)'], {}), '(1 / 4)\n', (5894, 5901), True, 'import numpy as np\n'), ((7617, 7657), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {}), '(mean, cov)\n', (7646, 7657), True, 'import numpy as np\n'), ((7681, 7707), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (7705, 7707), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((8878, 8932), 'torch.nn.init.normal_', 'th.nn.init.normal_', (['model.weight.data'], {'mean': '(0.0)', 'std': '(1)'}), '(model.weight.data, mean=0.0, std=1)\n', (8896, 8932), True, 'import torch as th\n'), ((9036, 9080), 'torch.nn.modules.Linear', 'th.nn.modules.Linear', (['self.n_causes', 'self.nh'], {}), '(self.n_causes, self.nh)\n', (9056, 9080), True, 'import torch as th\n'), ((9104, 9117), 'torch.nn.PReLU', 'th.nn.PReLU', ([], {}), '()\n', (9115, 9117), True, 'import torch as th\n'), ((9141, 9173), 'torch.nn.modules.Linear', 'th.nn.modules.Linear', (['self.nh', '(1)'], {}), '(self.nh, 1)\n', (9161, 9173), True, 'import torch as th\n'), ((9664, 9690), 'copy.deepcopy', 'copy.deepcopy', (['self.layers'], {}), '(self.layers)\n', (9677, 9690), False, 'import copy\n'), ((11281, 11335), 'torch.nn.init.normal_', 'th.nn.init.normal_', (['model.weight.data'], {'mean': '(0.0)', 'std': '(1)'}), '(model.weight.data, mean=0.0, std=1)\n', (11299, 11335), True, 'import torch as th\n'), ((11439, 11487), 'torch.nn.modules.Linear', 'th.nn.modules.Linear', (['(self.n_causes + 1)', 'self.nh'], {}), '(self.n_causes + 1, self.nh)\n', (11459, 11487), True, 'import torch as th\n'), ((11509, 11521), 'torch.nn.Tanh', 'th.nn.Tanh', ([], {}), '()\n', (11519, 11521), True, 'import torch as th\n'), ((11545, 11577), 'torch.nn.modules.Linear', 'th.nn.modules.Linear', (['self.nh', '(1)'], {}), '(self.nh, 1)\n', (11565, 11577), True, 'import torch as th\n'), ((12068, 12094), 'copy.deepcopy', 'copy.deepcopy', (['self.layers'], {}), '(self.layers)\n', (12081, 12094), False, 'import copy\n'), ((12926, 12957), 'numpy.hstack', 'np.hstack', (['(causes, self.noise)'], {}), '((causes, self.noise))\n', (12935, 12957), True, 'import numpy as np\n'), ((13693, 13718), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (13710, 13718), True, 'import numpy as np\n'), ((13989, 14014), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (14006, 14014), True, 'import numpy as np\n'), ((16942, 16986), 'numpy.sin', 'np.sin', (['(self.period * (x + self.phase_shift))'], {}), '(self.period * (x + self.phase_shift))\n', (16948, 16986), True, 'import numpy as np\n'), ((19293, 19337), 'numpy.sin', 'np.sin', (['(self.period * (x + self.phase_shift))'], {}), '(self.period * (x + self.phase_shift))\n', (19299, 19337), True, 'import numpy as np\n'), ((22601, 22627), 'numpy.zeros', 'np.zeros', (['(1, self.points)'], {}), '((1, self.points))\n', (22609, 22627), True, 'import numpy as np\n'), ((24272, 24312), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {}), '(mean, cov)\n', (24301, 24312), True, 'import numpy as np\n'), ((24885, 24916), 'numpy.hstack', 'np.hstack', (['(causes, self.noise)'], {}), '((causes, self.noise))\n', (24894, 24916), True, 'import numpy as np\n'), ((25829, 25869), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {}), '(mean, cov)\n', (25858, 25869), True, 'import numpy as np\n'), ((25893, 25919), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (25917, 25919), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((27031, 27040), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (27037, 27040), True, 'import numpy as np\n'), ((29401, 29427), 'numpy.random.randn', 'np.random.randn', (['points', '(1)'], {}), '(points, 1)\n', (29416, 29427), True, 'import numpy as np\n'), ((29649, 29674), 'numpy.random.rand', 'np.random.rand', (['points', '(1)'], {}), '(points, 1)\n', (29663, 29674), True, 'import numpy as np\n'), ((29763, 29780), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (29777, 29780), True, 'import numpy as np\n'), ((29783, 29809), 'numpy.random.randn', 'np.random.randn', (['points', '(1)'], {}), '(points, 1)\n', (29798, 29809), True, 'import numpy as np\n'), ((30239, 30264), 'numpy.random.rand', 'np.random.rand', (['points', '(1)'], {}), '(points, 1)\n', (30253, 30264), True, 'import numpy as np\n'), ((30267, 30284), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (30281, 30284), True, 'import numpy as np\n'), ((30521, 30538), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (30535, 30538), True, 'import numpy as np\n'), ((30541, 30576), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(points, 1)'}), '(size=(points, 1))\n', (30558, 30576), True, 'import numpy as np\n'), ((30847, 30882), 'numpy.random.uniform', 'np.random.uniform', (['std_min', 'std_max'], {}), '(std_min, std_max)\n', (30864, 30882), True, 'import numpy as np\n'), ((31570, 31608), 'torch.nn.modules.Linear', 'th.nn.modules.Linear', (['(1)', 'self.n_hidden'], {}), '(1, self.n_hidden)\n', (31590, 31608), True, 'import torch as th\n'), ((31632, 31644), 'torch.nn.Tanh', 'th.nn.Tanh', ([], {}), '()\n', (31642, 31644), True, 'import torch as th\n'), ((31668, 31706), 'torch.nn.modules.Linear', 'th.nn.modules.Linear', (['self.n_hidden', '(1)'], {}), '(self.n_hidden, 1)\n', (31688, 31706), True, 'import torch as th\n'), ((31940, 31996), 'torch.nn.init.normal_', 'th.nn.init.normal_', (['model.weight.data'], {'mean': '(0.0)', 'std': '(0.5)'}), '(model.weight.data, mean=0.0, std=0.5)\n', (31958, 31996), True, 'import torch as th\n'), ((2083, 2103), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (2100, 2103), True, 'import numpy as np\n'), ((2681, 2704), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(5)'], {}), '(2, 5)\n', (2698, 2704), True, 'import numpy as np\n'), ((3909, 3936), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(-0.5)'], {}), '(-2, -0.5)\n', (3926, 3936), True, 'import numpy as np\n'), ((3947, 3972), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(2)'], {}), '(0.5, 2)\n', (3964, 3972), True, 'import numpy as np\n'), ((5960, 5987), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(-0.5)'], {}), '(-2, -0.5)\n', (5977, 5987), True, 'import numpy as np\n'), ((5998, 6023), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(2)'], {}), '(0.5, 2)\n', (6015, 6023), True, 'import numpy as np\n'), ((7568, 7594), 'numpy.zeros', 'np.zeros', (['(1, self.points)'], {}), '((1, self.points))\n', (7576, 7594), True, 'import numpy as np\n'), ((13734, 13754), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (13751, 13754), True, 'import numpy as np\n'), ((14332, 14357), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (14349, 14357), True, 'import numpy as np\n'), ((20288, 20332), 'numpy.sin', 'np.sin', (['(self.period * (x + self.phase_shift))'], {}), '(self.period * (x + self.phase_shift))\n', (20294, 20332), True, 'import numpy as np\n'), ((24223, 24249), 'numpy.zeros', 'np.zeros', (['(1, self.points)'], {}), '((1, self.points))\n', (24231, 24249), True, 'import numpy as np\n'), ((24454, 24494), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov'], {}), '(mean, cov)\n', (24483, 24494), True, 'import numpy as np\n'), ((24518, 24544), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (24542, 24544), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((25780, 25806), 'numpy.zeros', 'np.zeros', (['(1, self.points)'], {}), '((1, self.points))\n', (25788, 25806), True, 'import numpy as np\n'), ((26984, 27001), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (26990, 27001), True, 'import numpy as np\n'), ((29376, 29393), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (29390, 29393), True, 'import numpy as np\n'), ((29517, 29542), 'numpy.random.rand', 'np.random.rand', (['points', '(1)'], {}), '(points, 1)\n', (29531, 29542), True, 'import numpy as np\n'), ((30081, 30098), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (30095, 30098), True, 'import numpy as np\n'), ((2724, 2744), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (2741, 2744), True, 'import numpy as np\n'), ((9445, 9457), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (9455, 9457), True, 'import torch as th\n'), ((11849, 11861), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (11859, 11861), True, 'import torch as th\n'), ((20826, 20841), 'random.random', 'random.random', ([], {}), '()\n', (20839, 20841), False, 'import random\n'), ((21264, 21281), 'numpy.power', 'np.power', (['x[i]', 'j'], {}), '(x[i], j)\n', (21272, 21281), True, 'import numpy as np\n'), ((24405, 24431), 'numpy.zeros', 'np.zeros', (['(1, self.points)'], {}), '((1, self.points))\n', (24413, 24431), True, 'import numpy as np\n'), ((25536, 25546), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (25542, 25546), True, 'import numpy as np\n'), ((28831, 28852), 'numpy.random.randn', 'np.random.randn', (['k', '(1)'], {}), '(k, 1)\n', (28846, 28852), True, 'import numpy as np\n'), ((9843, 9855), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (9853, 9855), True, 'import torch as th\n'), ((12247, 12259), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (12257, 12259), True, 'import torch as th\n'), ((9495, 9522), 'torch.empty_like', 'th.empty_like', (['layer.weight'], {}), '(layer.weight)\n', (9508, 9522), True, 'import torch as th\n'), ((11899, 11926), 'torch.empty_like', 'th.empty_like', (['layer.weight'], {}), '(layer.weight)\n', (11912, 11926), True, 'import torch as th\n'), ((9897, 9924), 'torch.empty_like', 'th.empty_like', (['layer.weight'], {}), '(layer.weight)\n', (9910, 9924), True, 'import torch as th\n'), ((12301, 12328), 'torch.empty_like', 'th.empty_like', (['layer.weight'], {}), '(layer.weight)\n', (12314, 12328), True, 'import torch as th\n')] |
import numpy as np
import os
from os.path import join
import glob
import matplotlib
import matplotlib.pyplot as plt
import torch
import pandas as pd
from random import randint
import SimpleITK as sitk
from torchio.transforms import RandomMotion, RandomSpike, RandomGhosting, RandomBiasField
from medseg.common_utils.basic_operations import check_dir, rescale_intensity, crop_or_pad, load_img_label_from_path, recover_image
from medseg.common_utils.save import save_medical_image
def preprocess3D(image, min_val=0, max_val=1):
global device
'''
preprocess 3D data
:param image: 3D array
:param label: 3D array
'''
output = np.zeros_like(image, dtype=image.dtype)
for idx in range(image.shape[0]): #
slice_data = image[idx]
a_min_val, a_max_val = np.percentile(slice_data, (0, 100))
# restrict the intensity range
slice_data[slice_data <= a_min_val] = a_min_val
slice_data[slice_data >= a_max_val] = a_max_val
# perform normalisation
scale = (max_val - min_val) / (a_max_val - a_min_val)
bias = max_val - scale * a_max_val
output[idx] = slice_data * scale + bias
return output
if __name__ == '__main__':
# hyperparameters
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
save_internal_output = True
# set up test image data path
dataset_root = '/vol/biomedic3/cc215/Project/DeformADA/Data/bias_corrected_and_normalized'
frame = 'ES'
sub_dir = join(dataset_root, frame)
id_list = [7, 8, 9, 10, 27, 28, 29, 30, 47,
48, 49, 50, 67, 68, 69, 70, 87, 88, 89, 90]
test_image_paths = [[str(pid).zfill(3), join(
sub_dir, '{}_img.nrrd').format(str(pid).zfill(3))] for pid in id_list]
test_label_paths = [[str(pid).zfill(3), join(
sub_dir, '{}_seg.nrrd').format(str(pid).zfill(3))] for pid in id_list]
fix_img_size = [192, 192]
attackers = {
'RandomMotion': RandomMotion(degrees=30, translation=10),
'RandomSpike': RandomSpike(),
'RandomGhosting': RandomGhosting(),
'RandomBias': RandomBiasField()
}
n_augmented = 3
# output
save_dir = '/vol/biomedic3/cc215/Project/DeformADA/Data/ACDC_artefacted'
check_dir(save_dir, create=True)
for attack_name, attacker in attackers.items():
for j in range(n_augmented):
for i in range(len(test_image_paths)):
image, sitkImage = load_img_label_from_path(
img_path=test_image_paths[i][1])
image, label, h_s, w_s, original_h, original_w = crop_or_pad(
image=image, label=None, crop_size=fix_img_size)
image = preprocess3D(image)
origin_image3D_tensor = torch.tensor(
image[:, np.newaxis, :, :], requires_grad=False).float()
attacked_image = attacker(
origin_image3D_tensor.permute(1, 0, 2, 3)) # NCHW->CNHW
attacked_image = attacked_image.permute(
1, 0, 2, 3) # CNHW->NCHW
attacked_image = rescale_intensity(
attacked_image, new_min=0, new_max=1)
attacked_image = attacked_image.to(device)
pid = test_label_paths[i][0]
print('pid: {} n:{}'.format(pid, j))
# save 3D images to the dir:
patient_dir = join(
save_dir, *[attack_name, str(pid) + '_' + str(j)])
check_dir(patient_dir, create=True)
print('save to', patient_dir)
# recover size:
file_path = join(patient_dir, '{}_img.nrrd'.format(frame))
print(file_path)
adv_image = recover_image(attacked_image.data.cpu().numpy()[
:, 0, :, :], h_s, w_s, original_h, original_w)
save_medical_image(sitk.GetImageFromArray(
adv_image), output_path=file_path, refImage=sitkImage)
# label path
label_path = test_label_paths[i][1]
target_label_path = join(
patient_dir, '{}_label.nrrd'.format(frame))
if os.path.islink(target_label_path):
os.unlink(target_label_path)
os.symlink(label_path, target_label_path)
| [
"medseg.common_utils.basic_operations.crop_or_pad",
"numpy.zeros_like",
"torchio.transforms.RandomMotion",
"torchio.transforms.RandomGhosting",
"medseg.common_utils.basic_operations.load_img_label_from_path",
"os.unlink",
"medseg.common_utils.basic_operations.rescale_intensity",
"numpy.percentile",
... | [((653, 692), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'image.dtype'}), '(image, dtype=image.dtype)\n', (666, 692), True, 'import numpy as np\n'), ((1505, 1530), 'os.path.join', 'join', (['dataset_root', 'frame'], {}), '(dataset_root, frame)\n', (1509, 1530), False, 'from os.path import join\n'), ((2254, 2286), 'medseg.common_utils.basic_operations.check_dir', 'check_dir', (['save_dir'], {'create': '(True)'}), '(save_dir, create=True)\n', (2263, 2286), False, 'from medseg.common_utils.basic_operations import check_dir, rescale_intensity, crop_or_pad, load_img_label_from_path, recover_image\n'), ((797, 832), 'numpy.percentile', 'np.percentile', (['slice_data', '(0, 100)'], {}), '(slice_data, (0, 100))\n', (810, 832), True, 'import numpy as np\n'), ((1970, 2010), 'torchio.transforms.RandomMotion', 'RandomMotion', ([], {'degrees': '(30)', 'translation': '(10)'}), '(degrees=30, translation=10)\n', (1982, 2010), False, 'from torchio.transforms import RandomMotion, RandomSpike, RandomGhosting, RandomBiasField\n'), ((2035, 2048), 'torchio.transforms.RandomSpike', 'RandomSpike', ([], {}), '()\n', (2046, 2048), False, 'from torchio.transforms import RandomMotion, RandomSpike, RandomGhosting, RandomBiasField\n'), ((2076, 2092), 'torchio.transforms.RandomGhosting', 'RandomGhosting', ([], {}), '()\n', (2090, 2092), False, 'from torchio.transforms import RandomMotion, RandomSpike, RandomGhosting, RandomBiasField\n'), ((2116, 2133), 'torchio.transforms.RandomBiasField', 'RandomBiasField', ([], {}), '()\n', (2131, 2133), False, 'from torchio.transforms import RandomMotion, RandomSpike, RandomGhosting, RandomBiasField\n'), ((1274, 1299), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1297, 1299), False, 'import torch\n'), ((2462, 2519), 'medseg.common_utils.basic_operations.load_img_label_from_path', 'load_img_label_from_path', ([], {'img_path': 'test_image_paths[i][1]'}), '(img_path=test_image_paths[i][1])\n', (2486, 2519), False, 'from medseg.common_utils.basic_operations import check_dir, rescale_intensity, crop_or_pad, load_img_label_from_path, recover_image\n'), ((2606, 2666), 'medseg.common_utils.basic_operations.crop_or_pad', 'crop_or_pad', ([], {'image': 'image', 'label': 'None', 'crop_size': 'fix_img_size'}), '(image=image, label=None, crop_size=fix_img_size)\n', (2617, 2666), False, 'from medseg.common_utils.basic_operations import check_dir, rescale_intensity, crop_or_pad, load_img_label_from_path, recover_image\n'), ((3121, 3176), 'medseg.common_utils.basic_operations.rescale_intensity', 'rescale_intensity', (['attacked_image'], {'new_min': '(0)', 'new_max': '(1)'}), '(attacked_image, new_min=0, new_max=1)\n', (3138, 3176), False, 'from medseg.common_utils.basic_operations import check_dir, rescale_intensity, crop_or_pad, load_img_label_from_path, recover_image\n'), ((3525, 3560), 'medseg.common_utils.basic_operations.check_dir', 'check_dir', (['patient_dir'], {'create': '(True)'}), '(patient_dir, create=True)\n', (3534, 3560), False, 'from medseg.common_utils.basic_operations import check_dir, rescale_intensity, crop_or_pad, load_img_label_from_path, recover_image\n'), ((4234, 4267), 'os.path.islink', 'os.path.islink', (['target_label_path'], {}), '(target_label_path)\n', (4248, 4267), False, 'import os\n'), ((4334, 4375), 'os.symlink', 'os.symlink', (['label_path', 'target_label_path'], {}), '(label_path, target_label_path)\n', (4344, 4375), False, 'import os\n'), ((1683, 1711), 'os.path.join', 'join', (['sub_dir', '"""{}_img.nrrd"""'], {}), "(sub_dir, '{}_img.nrrd')\n", (1687, 1711), False, 'from os.path import join\n'), ((1812, 1840), 'os.path.join', 'join', (['sub_dir', '"""{}_seg.nrrd"""'], {}), "(sub_dir, '{}_seg.nrrd')\n", (1816, 1840), False, 'from os.path import join\n'), ((3927, 3960), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['adv_image'], {}), '(adv_image)\n', (3949, 3960), True, 'import SimpleITK as sitk\n'), ((4289, 4317), 'os.unlink', 'os.unlink', (['target_label_path'], {}), '(target_label_path)\n', (4298, 4317), False, 'import os\n'), ((2772, 2833), 'torch.tensor', 'torch.tensor', (['image[:, np.newaxis, :, :]'], {'requires_grad': '(False)'}), '(image[:, np.newaxis, :, :], requires_grad=False)\n', (2784, 2833), False, 'import torch\n')] |
import os
import shutil
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy
import numpy as np
from keract import get_activations
from tensorflow.keras import Input
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.layers import Dense, Dropout, LSTM, Flatten, Conv1D
from tensorflow.keras.models import load_model, Model
# from tensorflow.python.keras.utils.vis_utils import plot_model
from keras.utils.vis_utils import plot_model
# KERAS_ATTENTION_DEBUG: If set to 1. Will switch to debug mode.
# In debug mode, the class Attention is no longer a Keras layer.
# What it means in practice is that we can have access to the internal values
# of each tensor. If we don't use debug, Keras treats the object
# as a layer and we can only get the final output.
# In this example we need it because we want to extract all the intermediate output values.
os.environ['KERAS_ATTENTION_DEBUG'] = '1'
from attention import Attention
def task_add_two_numbers_after_delimiter(
n: int, seq_length: int, delimiter: float = 0.0,
index_1: int = None, index_2: int = None ) -> (np.array, np.array):
"""
Task: Add the two numbers that come right after the delimiter.
x = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8]. Result is y = 4 + 7 = 11.
@param n: number of samples in (x, y).
@param seq_length: length of the sequence of x.
@param delimiter: value of the delimiter. Default is 0.0
@param index_1: index of the number that comes after the first 0.
@param index_2: index of the number that comes after the second 0.
@return: returns two numpy.array x and y of shape (n, seq_length, 1) and (n, 1).
"""
x = np.random.uniform(0, 1, (n, seq_length,2))
y = np.zeros(shape=(n, 1))
for i in range(len(x)):
if index_1 is None and index_2 is None:
a, b = np.random.choice(range(1, len(x[i])), size=2, replace=False)
else:
a, b = index_1, index_2
y[i,0] = 0.5 * x[i, a:a + 1,0] + 0.5 * x[i, b:b + 1,0]
# y[i,1] = 0.5 * x[i, a:a + 1] + 0.5 * x[i, b:b + 1]
# y[i,2] = 0.5 * x[i, a:a + 1] + 0.5 * x[i, b:b + 1]
x[i, a - 1:a] = delimiter
x[i, b - 1:b] = delimiter
# x = np.expand_dims(x, axis=-1)
return x, y
def main():
numpy.random.seed(7)
max_epoch = int(sys.argv[1]) if len(sys.argv) > 1 else 150
# data. definition of the problem.
seq_length = 20
x_train, y_train = task_add_two_numbers_after_delimiter(20_000, seq_length)
x_val, y_val = task_add_two_numbers_after_delimiter(4_000, seq_length)
# just arbitrary values. it's for visual purposes. easy to see than random values.
test_index_1 = 1
test_index_2 = 19
x_test, _ = task_add_two_numbers_after_delimiter(10, seq_length, 0, test_index_1, test_index_2)
# x_test_mask is just a mask that, if applied to x_test, would still contain the information to solve the problem.
# we expect the attention map to look like this mask.
x_test_mask = np.zeros_like(x_test[..., 0])
x_test_mask[:, test_index_1:test_index_1 + 1] = 1
x_test_mask[:, test_index_2:test_index_2 + 1] = 1
# Define/compile the model.
model_input = Input(shape=( seq_length,2))
x = LSTM(100, return_sequences=True, name='encoder_')(model_input)
# x = Conv1D(100,3, padding='same', name='encoder_')(model_input)
# x = Flatten()(x)
# x = Dense(20, use_bias=False, activation='tanh', name='attention_weight') (x)
x = Attention()(x)
x = Dropout(0.2)(x)
x = Dense(1, activation='linear')(x)
model = Model(model_input, x)
model.compile(loss='mae', optimizer='adam')
# Visualize the model.
model.summary()
plot_model(model,show_dtype=True,show_shapes=True,expand_nested=True,show_layer_activations=True)
# Will display the activation map in task_add_two_numbers/
output_dir = Path('task_add_two_numbers')
if output_dir.exists():
shutil.rmtree(str(output_dir))
output_dir.mkdir(parents=True, exist_ok=True)
class VisualiseAttentionMap(Callback):
def on_epoch_end(self, epoch, logs=None):
attention_map = get_activations(model, x_test)['attention_weight']
# attention_map = get_activations(model, x_test)['encoder_']
print("x_test")
print(x_test.shape)
# print(x_test)
print("attention_map")
print(attention_map.shape)
print(model.output.get_shape())
# exit()
# top is attention map, bottom is ground truth.
plt.imshow(np.concatenate([attention_map, x_test_mask]), cmap='hot')
iteration_no = str(epoch).zfill(3)
plt.axis('off')
plt.title(f'Iteration {iteration_no} / {max_epoch}')
output_filename = f'{output_dir}/epoch_{iteration_no}.png'
print(f'Saving to {output_filename}.')
plt.savefig(output_filename)
plt.close()
# train.
print('x_train:',x_train.shape)
print('y_train:',y_train.shape)
print('x_val:',x_val.shape)
print('y_val:',y_val.shape)
model.fit(
x_train, y_train, validation_data=(x_val, y_val),
epochs=max_epoch, verbose=2, batch_size=64,
callbacks=[VisualiseAttentionMap()]
)
# test save/reload model.
pred1 = model.predict(x_val)
model.save('test_model.h5')
model_h5 = load_model('test_model.h5')
pred2 = model_h5.predict(x_val)
np.testing.assert_almost_equal(pred1, pred2)
print('Success.')
if __name__ == '__main__':
# pip install pydot
# pip install keract
main()
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"attention.Attention",
"tensorflow.keras.layers.Dense",
"pathlib.Path",
"numpy.zeros_like",
"numpy.testing.assert_almost_equal",
"tensorflow.keras.Input",
"keras.utils.vis_utils.plot_model",
"matplotlib.pyplot.close",
"tensorflow.keras.models.load_... | [((1689, 1732), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(n, seq_length, 2)'], {}), '(0, 1, (n, seq_length, 2))\n', (1706, 1732), True, 'import numpy as np\n'), ((1740, 1762), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, 1)'}), '(shape=(n, 1))\n', (1748, 1762), True, 'import numpy as np\n'), ((2293, 2313), 'numpy.random.seed', 'numpy.random.seed', (['(7)'], {}), '(7)\n', (2310, 2313), False, 'import numpy\n'), ((3018, 3047), 'numpy.zeros_like', 'np.zeros_like', (['x_test[..., 0]'], {}), '(x_test[..., 0])\n', (3031, 3047), True, 'import numpy as np\n'), ((3207, 3235), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(seq_length, 2)'}), '(shape=(seq_length, 2))\n', (3212, 3235), False, 'from tensorflow.keras import Input\n'), ((3585, 3606), 'tensorflow.keras.models.Model', 'Model', (['model_input', 'x'], {}), '(model_input, x)\n', (3590, 3606), False, 'from tensorflow.keras.models import load_model, Model\n'), ((3707, 3812), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'show_dtype': '(True)', 'show_shapes': '(True)', 'expand_nested': '(True)', 'show_layer_activations': '(True)'}), '(model, show_dtype=True, show_shapes=True, expand_nested=True,\n show_layer_activations=True)\n', (3717, 3812), False, 'from keras.utils.vis_utils import plot_model\n'), ((3886, 3914), 'pathlib.Path', 'Path', (['"""task_add_two_numbers"""'], {}), "('task_add_two_numbers')\n", (3890, 3914), False, 'from pathlib import Path\n'), ((5409, 5436), 'tensorflow.keras.models.load_model', 'load_model', (['"""test_model.h5"""'], {}), "('test_model.h5')\n", (5419, 5436), False, 'from tensorflow.keras.models import load_model, Model\n'), ((5477, 5521), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pred1', 'pred2'], {}), '(pred1, pred2)\n', (5507, 5521), True, 'import numpy as np\n'), ((3244, 3293), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(100)'], {'return_sequences': '(True)', 'name': '"""encoder_"""'}), "(100, return_sequences=True, name='encoder_')\n", (3248, 3293), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM, Flatten, Conv1D\n'), ((3493, 3504), 'attention.Attention', 'Attention', ([], {}), '()\n', (3502, 3504), False, 'from attention import Attention\n'), ((3516, 3528), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (3523, 3528), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM, Flatten, Conv1D\n'), ((3540, 3569), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (3545, 3569), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM, Flatten, Conv1D\n'), ((4705, 4720), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4713, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4733, 4785), 'matplotlib.pyplot.title', 'plt.title', (['f"""Iteration {iteration_no} / {max_epoch}"""'], {}), "(f'Iteration {iteration_no} / {max_epoch}')\n", (4742, 4785), True, 'import matplotlib.pyplot as plt\n'), ((4920, 4948), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filename'], {}), '(output_filename)\n', (4931, 4948), True, 'import matplotlib.pyplot as plt\n'), ((4961, 4972), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4970, 4972), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4184), 'keract.get_activations', 'get_activations', (['model', 'x_test'], {}), '(model, x_test)\n', (4169, 4184), False, 'from keract import get_activations\n'), ((4588, 4632), 'numpy.concatenate', 'np.concatenate', (['[attention_map, x_test_mask]'], {}), '([attention_map, x_test_mask])\n', (4602, 4632), True, 'import numpy as np\n')] |
# Copyright 2017-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
import sys
import argparse
import textwrap
import numpy as np
from scipy.linalg import norm
from vezda.plot_utils import FontColor
from vezda.data_utils import load_data, load_impulse_responses
from vezda.LinearSamplingClass import LinearSamplingProblem
def info():
commandName = FontColor.BOLD + 'vzsolve:' + FontColor.END
description = ' solve for the unknown source function to obtain an image'
return commandName + description
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--nfe', action='store_true',
help='Solve the near-field equation (NFE).')
parser.add_argument('--lse', action='store_true',
help='Solve the Lippmann-Schwinger equation (LSE).')
parser.add_argument('--ngs', '-n', action='store_true',
help='''Normalize the impulse response by its energy. (Only used when
solving the near-field equation (NFE).''')
parser.add_argument('--domain', '-d', type=str, default='freq', choices=['time', 'freq'],
help='''Specify whether to solve the linear system in the time domain
or frequency domain. Default is set to frequency domain for faster
performance.''')
parser.add_argument('--method', '-m', type=str, default='lsmr', choices=['lsmr', 'lsqr', 'svd'],
help='''Specify the method for solving the linear system of equations:
iterative least-squares (lsmr/lsqr) or singular-value decomposition (svd).''')
parser.add_argument('--fly', '-f', action='store_true',
help='''Solve on the fly. Default behavior is to load full array 'B' of right-hand side
vectors for bulk processing before solution of a linear systems Ax=b, where each vector
'b' is a column of 'B'. This defualt behavior can be slow for large arrays B. Solving on
the flly pulls and processes a single vector 'b' at a time for rapid compute.''')
parser.add_argument('--nproc', type=int,
help='''Specify the number of processors to parallelize over. Default is serial
(i.e., one processor). nproc=-1 uses all available processors. nproc=-2 uses all
but one available processors.''')
parser.add_argument('--numVals', '-k', type=int,
help='''Specify the number of singular values/vectors to compute.
Must a positive integer between 1 and the order of the linear operator.''')
parser.add_argument('--regPar', '--alpha', type=float,
help='''Specify the value of the regularization parameter. Default is set
to zero.''')
parser.add_argument('--atol', type=float,
help='''Specify the error tolerance of the linear operator A. (1e-q roughly
corresponds to 'q' correct decimal digits. Default is set to 1e-8.''')
parser.add_argument('--btol', type=float,
help='''Specify the error tolerance of the right-hand side vectors b. (1e-q roughly
corresponds to 'q' correct decimal digits. Default is set to 1e-8.''')
parser.add_argument('--medium', type=str, default='constant', choices=['constant', 'variable'],
help='''Specify whether the background medium is constant or variable
(inhomogeneous). If argument is set to 'constant', the velocity defined in
the required 'pulsesFun.py' file is used. Default is set to 'constant'.''')
args = parser.parse_args()
#==========================================================================
# Check the value of the regularization parameter
#==========================================================================
if args.regPar is not None:
if args.regPar >= 0.0:
alpha = args.regPar
else:
sys.exit(textwrap.dedent(
'''
Error: Optional argument '--regPar/--alpha' cannot be negative.
The regularization parameter must be greater than or equal to zero.
'''))
else:
# if args.regPar is None
alpha = 0.0
#==========================================================================
# Check the value of the error tolerance of the linear operator
#==========================================================================
if args.atol is not None:
if args.atol > 0.0:
atol = args.atol
else:
sys.exit(textwrap.dedent(
'''
Error: Optional argument '--atol' must be positive.
'''))
else:
# if args.atol is None
atol = 1.0e-8
#==========================================================================
# Check the value of the tolerance for convergence of iterative methods
#==========================================================================
if args.btol is not None:
if args.btol > 0.0:
btol = args.btol
else:
sys.exit(textwrap.dedent(
'''
Error: Optional argument '--btol' must be positive.
'''))
else:
# if args.btol is None
btol = 1.0e-8
#==========================================================================
# Check the number of processors specified
#==========================================================================
if args.nproc is not None:
if args.nproc != 0:
nproc = args.nproc
else:
sys.exit(textwrap.dedent(
'''
Error: Optional argument '--nproc' must be nonzero.
'''))
else:
# if args.nproc is None
nproc = 1
#==========================================================================
# determine whether to solve near-field equation or Lippmann-Schwinger equation
# load data, impulseResponses
#==========================================================================
if args.nfe:
# Solve using the linear sampling method
# data form the kernel of the linear operator A
data = load_data(args.domain, taper=True, verbose=True, skip_fft=False)
# impulse responses are the right-hand side vectors b
impulseResponses = load_impulse_responses(args.domain, args.medium, skip_fft=args.fly)
if args.ngs:
print('Normalizing impulse responses by their energy...')
for k in range(impulseResponses.shape[2]):
impulseResponses[:, :, k] /= norm(impulseResponses[:, :, k])
p = LinearSamplingProblem(operatorName='nfo', kernel=data, rhs_vectors=impulseResponses)
elif args.lse:
# Solve using Lippmann-Schwinger inversion
# data are the right-hand side vectors b
data = load_data(args.domain, taper=True, verbose=True, skip_fft=args.fly)
# impulse responses form the kernel of the linear operator A
impulseResponses = load_impulse_responses(args.domain, args.medium, skip_fft=False)
if args.domain == 'time':
# This is particular to solving the Lippmann-Schwinger equation in the time domain
# Pad data in the time domain to length 2*Nt-1 (length of circular convolution)
N = data.shape[1] - 1
npad = ((0, 0), (N, 0), (0, 0))
data = np.pad(data, pad_width=npad, mode='constant', constant_values=0)
p = LinearSamplingProblem(operatorName='lso', kernel=impulseResponses, rhs_vectors=data)
else:
userResponded = False
print(textwrap.dedent(
'''
Please specify which linear equation you would like to solve:
Enter 'nfe' to solve the near-field equation. (Default)
Enter 'lse' to solve the Lippmann-Schwinger equation.
Enter 'q/quit' to exit.
'''))
while userResponded == False:
answer = input('Action: ')
if answer == '' or answer == 'nfe':
args.nfe = True
print('Solving the near-field equation...')
# data form the kernel of the linear operator A
data = load_data(args.domain, taper=True, verbose=True, skip_fft=False)
# impulse responses are the right-hand side vectors b
impulseResponses = load_impulse_responses(args.domain, args.medium, skip_fft=args.fly)
if args.ngs:
print('Normalizing impulse responses by their energy...')
for k in range(impulseResponses.shape[2]):
impulseResponses[:, :, k] /= norm(impulseResponses[:, :, k])
p = LinearSamplingProblem(operatorName='nfo', kernel=data, rhs_vectors=impulseResponses)
userResponded = True
break
elif answer == 'lse':
args.lse = True
print('Solving the Lippmann-Schwinger equation...')
# data are the right-hand side vectors b
data = load_data(args.domain, taper=True, verbose=True, skip_fft=args.fly)
# impulse responses form the kernel of the linear operator A
impulseResponses = load_impulse_responses(args.domain, args.medium, skip_fft=False)
if args.domain == 'time':
# This is particular to solving the Lippmann-Schwinger equation in the time domain
# Pad data in the time domain to length 2*Nt-1 (length of circular convolution)
N = data.shape[1] - 1
npad = ((0, 0), (N, 0), (0, 0))
data = np.pad(data, pad_width=npad, mode='constant', constant_values=0)
p = LinearSamplingProblem(operatorName='lso', kernel=impulseResponses, rhs_vectors=data)
userResponded = True
break
elif answer == 'q' or answer == 'quit':
sys.exit('Exiting program.\n')
else:
print('Invalid response. Please enter \'nfe\', \'lse\', or \'q/quit\'.')
#==========================================================================
# Solve the problem using the specified method, regularization parameter,
# and tolerance.
# Construct images from solutions.
# Save solutions and images to file.
#==========================================================================
# define extension for saving files
if args.nfe:
extension = 'NFE.npz'
elif args.lse:
extension = 'LSE.npz'
X = p.solve(args.method, args.fly, nproc, alpha, atol, btol, args.numVals)
Image = p.construct_image(X)
np.savez('solution'+extension, X=X, alpha=alpha, domain=args.domain)
np.savez('image'+extension, Image=Image, method=args.method,
alpha=alpha, atol=atol, btol=btol, domain=args.domain) | [
"textwrap.dedent",
"numpy.pad",
"argparse.ArgumentParser",
"vezda.LinearSamplingClass.LinearSamplingProblem",
"vezda.data_utils.load_impulse_responses",
"scipy.linalg.norm",
"vezda.data_utils.load_data",
"numpy.savez",
"sys.exit"
] | [((1138, 1163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1161, 1163), False, 'import argparse\n'), ((11925, 11995), 'numpy.savez', 'np.savez', (["('solution' + extension)"], {'X': 'X', 'alpha': 'alpha', 'domain': 'args.domain'}), "('solution' + extension, X=X, alpha=alpha, domain=args.domain)\n", (11933, 11995), True, 'import numpy as np\n'), ((11998, 12119), 'numpy.savez', 'np.savez', (["('image' + extension)"], {'Image': 'Image', 'method': 'args.method', 'alpha': 'alpha', 'atol': 'atol', 'btol': 'btol', 'domain': 'args.domain'}), "('image' + extension, Image=Image, method=args.method, alpha=alpha,\n atol=atol, btol=btol, domain=args.domain)\n", (12006, 12119), True, 'import numpy as np\n'), ((7188, 7252), 'vezda.data_utils.load_data', 'load_data', (['args.domain'], {'taper': '(True)', 'verbose': '(True)', 'skip_fft': '(False)'}), '(args.domain, taper=True, verbose=True, skip_fft=False)\n', (7197, 7252), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((7351, 7418), 'vezda.data_utils.load_impulse_responses', 'load_impulse_responses', (['args.domain', 'args.medium'], {'skip_fft': 'args.fly'}), '(args.domain, args.medium, skip_fft=args.fly)\n', (7373, 7418), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((7672, 7761), 'vezda.LinearSamplingClass.LinearSamplingProblem', 'LinearSamplingProblem', ([], {'operatorName': '"""nfo"""', 'kernel': 'data', 'rhs_vectors': 'impulseResponses'}), "(operatorName='nfo', kernel=data, rhs_vectors=\n impulseResponses)\n", (7693, 7761), False, 'from vezda.LinearSamplingClass import LinearSamplingProblem\n'), ((7905, 7972), 'vezda.data_utils.load_data', 'load_data', (['args.domain'], {'taper': '(True)', 'verbose': '(True)', 'skip_fft': 'args.fly'}), '(args.domain, taper=True, verbose=True, skip_fft=args.fly)\n', (7914, 7972), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((8078, 8142), 'vezda.data_utils.load_impulse_responses', 'load_impulse_responses', (['args.domain', 'args.medium'], {'skip_fft': '(False)'}), '(args.domain, args.medium, skip_fft=False)\n', (8100, 8142), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((8556, 8644), 'vezda.LinearSamplingClass.LinearSamplingProblem', 'LinearSamplingProblem', ([], {'operatorName': '"""lso"""', 'kernel': 'impulseResponses', 'rhs_vectors': 'data'}), "(operatorName='lso', kernel=impulseResponses,\n rhs_vectors=data)\n", (8577, 8644), False, 'from vezda.LinearSamplingClass import LinearSamplingProblem\n'), ((4773, 5000), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Error: Optional argument \'--regPar/--alpha\' cannot be negative. \n The regularization parameter must be greater than or equal to zero.\n """'], {}), '(\n """\n Error: Optional argument \'--regPar/--alpha\' cannot be negative. \n The regularization parameter must be greater than or equal to zero.\n """\n )\n', (4788, 5000), False, 'import textwrap\n'), ((5435, 5562), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Error: Optional argument \'--atol\' must be positive. \n """'], {}), '(\n """\n Error: Optional argument \'--atol\' must be positive. \n """\n )\n', (5450, 5562), False, 'import textwrap\n'), ((6005, 6132), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Error: Optional argument \'--btol\' must be positive. \n """'], {}), '(\n """\n Error: Optional argument \'--btol\' must be positive. \n """\n )\n', (6020, 6132), False, 'import textwrap\n'), ((6549, 6676), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Error: Optional argument \'--nproc\' must be nonzero. \n """'], {}), '(\n """\n Error: Optional argument \'--nproc\' must be nonzero. \n """\n )\n', (6564, 6676), False, 'import textwrap\n'), ((7619, 7650), 'scipy.linalg.norm', 'norm', (['impulseResponses[:, :, k]'], {}), '(impulseResponses[:, :, k])\n', (7623, 7650), False, 'from scipy.linalg import norm\n'), ((8470, 8534), 'numpy.pad', 'np.pad', (['data'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(data, pad_width=npad, mode='constant', constant_values=0)\n", (8476, 8534), True, 'import numpy as np\n'), ((8704, 9023), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Please specify which linear equation you would like to solve:\n \n Enter \'nfe\' to solve the near-field equation. (Default)\n Enter \'lse\' to solve the Lippmann-Schwinger equation.\n Enter \'q/quit\' to exit.\n """'], {}), '(\n """\n Please specify which linear equation you would like to solve:\n \n Enter \'nfe\' to solve the near-field equation. (Default)\n Enter \'lse\' to solve the Lippmann-Schwinger equation.\n Enter \'q/quit\' to exit.\n """\n )\n', (8719, 9023), False, 'import textwrap\n'), ((9334, 9398), 'vezda.data_utils.load_data', 'load_data', (['args.domain'], {'taper': '(True)', 'verbose': '(True)', 'skip_fft': '(False)'}), '(args.domain, taper=True, verbose=True, skip_fft=False)\n', (9343, 9398), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((9504, 9571), 'vezda.data_utils.load_impulse_responses', 'load_impulse_responses', (['args.domain', 'args.medium'], {'skip_fft': 'args.fly'}), '(args.domain, args.medium, skip_fft=args.fly)\n', (9526, 9571), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((9847, 9936), 'vezda.LinearSamplingClass.LinearSamplingProblem', 'LinearSamplingProblem', ([], {'operatorName': '"""nfo"""', 'kernel': 'data', 'rhs_vectors': 'impulseResponses'}), "(operatorName='nfo', kernel=data, rhs_vectors=\n impulseResponses)\n", (9868, 9936), False, 'from vezda.LinearSamplingClass import LinearSamplingProblem\n'), ((10218, 10285), 'vezda.data_utils.load_data', 'load_data', (['args.domain'], {'taper': '(True)', 'verbose': '(True)', 'skip_fft': 'args.fly'}), '(args.domain, taper=True, verbose=True, skip_fft=args.fly)\n', (10227, 10285), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((10398, 10462), 'vezda.data_utils.load_impulse_responses', 'load_impulse_responses', (['args.domain', 'args.medium'], {'skip_fft': '(False)'}), '(args.domain, args.medium, skip_fft=False)\n', (10420, 10462), False, 'from vezda.data_utils import load_data, load_impulse_responses\n'), ((10931, 11019), 'vezda.LinearSamplingClass.LinearSamplingProblem', 'LinearSamplingProblem', ([], {'operatorName': '"""lso"""', 'kernel': 'impulseResponses', 'rhs_vectors': 'data'}), "(operatorName='lso', kernel=impulseResponses,\n rhs_vectors=data)\n", (10952, 11019), False, 'from vezda.LinearSamplingClass import LinearSamplingProblem\n'), ((9795, 9826), 'scipy.linalg.norm', 'norm', (['impulseResponses[:, :, k]'], {}), '(impulseResponses[:, :, k])\n', (9799, 9826), False, 'from scipy.linalg import norm\n'), ((10829, 10893), 'numpy.pad', 'np.pad', (['data'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(data, pad_width=npad, mode='constant', constant_values=0)\n", (10835, 10893), True, 'import numpy as np\n'), ((11156, 11186), 'sys.exit', 'sys.exit', (['"""Exiting program.\n"""'], {}), "('Exiting program.\\n')\n", (11164, 11186), False, 'import sys\n')] |
import rospy
import cv2
import cv2.aruco as aruco
import sys
import numpy as np
import time
class TrackByAruco:
def __init__(self, imageSize):
self.imageSize = imageSize
self.ideatToTrack = 1
self.targetSize = None
def setIdeaToTrack(self, ideatToTrack):
self.ideatToTrack = ideatToTrack
def getTemplateSize(self):
return self.targetSize
def trackObject(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_100)
parameters = aruco.DetectorParameters_create()
#lists of ids and the corners beloning to each id
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
image = aruco.drawDetectedMarkers(image, corners)
centerPoint = self.returnPointForSelectedIde(corners,ids,self.ideatToTrack)
if centerPoint is not None:
image = cv2.circle(image, (centerPoint[0],centerPoint[1]) , 5, (255,0,255) , -1)
lineSize = 20
image = cv2.line(image,(self.imageSize[0]/2, self.imageSize[1]/2-lineSize),(self.imageSize[0]/2, self.imageSize[1]/2+lineSize),(0,0,255),2)
image = cv2.line(image,(self.imageSize[0]/2-lineSize, self.imageSize[1]/2),(self.imageSize[0]/2+lineSize, self.imageSize[1]/2),(0,0,255),2)
self.createWindows("Aruco", image,(900,600))
return centerPoint
def returnCenterOfArcuo(self, markerCorners,ide = 0):
centerPoint = np.array([0, 0])
if len(markerCorners) > 0:
centerPoint[0] = int((markerCorners[ide][0][1][0] + markerCorners[ide][0][2][0] + markerCorners[ide][0][3][0] + markerCorners[ide][0][0][0])/4)
centerPoint[1] = int((markerCorners[ide][0][1][1] + markerCorners[ide][0][2][1] + markerCorners[ide][0][3][1] + markerCorners[ide][0][0][1])/4)
self.targetSize = abs(markerCorners[ide][0][0][0] - markerCorners[ide][0][2][0])
# print self.targetSize
else:
centerPoint = self.imageSize/2
return centerPoint
def returnPointForSelectedIde(self,corners,ids,number):
if ids is not None:
for x, ide in enumerate(ids,0):
if ide == number:
centerPoint = self.returnCenterOfArcuo(corners, x)
return centerPoint
return None
def createWindows(self, imageName, imageToShow, WindowSize = (640,320)):
cv2.namedWindow(imageName, cv2.WINDOW_NORMAL)
cv2.resizeWindow(imageName, WindowSize)
cv2.imshow(imageName, imageToShow)
cv2.waitKey(10)
| [
"cv2.line",
"cv2.aruco.drawDetectedMarkers",
"cv2.circle",
"cv2.aruco.DetectorParameters_create",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.aruco.Dictionary_get",
"cv2.aruco.detectMarkers",
"numpy.array",
"cv2.resizeWindow",
"cv2.imshow",
"cv2.namedWindow"
] | [((445, 484), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (457, 484), False, 'import cv2\n'), ((506, 546), 'cv2.aruco.Dictionary_get', 'aruco.Dictionary_get', (['aruco.DICT_4X4_100'], {}), '(aruco.DICT_4X4_100)\n', (526, 546), True, 'import cv2.aruco as aruco\n'), ((569, 602), 'cv2.aruco.DetectorParameters_create', 'aruco.DetectorParameters_create', ([], {}), '()\n', (600, 602), True, 'import cv2.aruco as aruco\n'), ((704, 764), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', (['gray', 'aruco_dict'], {'parameters': 'parameters'}), '(gray, aruco_dict, parameters=parameters)\n', (723, 764), True, 'import cv2.aruco as aruco\n'), ((781, 822), 'cv2.aruco.drawDetectedMarkers', 'aruco.drawDetectedMarkers', (['image', 'corners'], {}), '(image, corners)\n', (806, 822), True, 'import cv2.aruco as aruco\n'), ((1075, 1228), 'cv2.line', 'cv2.line', (['image', '(self.imageSize[0] / 2, self.imageSize[1] / 2 - lineSize)', '(self.imageSize[0] / 2, self.imageSize[1] / 2 + lineSize)', '(0, 0, 255)', '(2)'], {}), '(image, (self.imageSize[0] / 2, self.imageSize[1] / 2 - lineSize),\n (self.imageSize[0] / 2, self.imageSize[1] / 2 + lineSize), (0, 0, 255), 2)\n', (1083, 1228), False, 'import cv2\n'), ((1223, 1376), 'cv2.line', 'cv2.line', (['image', '(self.imageSize[0] / 2 - lineSize, self.imageSize[1] / 2)', '(self.imageSize[0] / 2 + lineSize, self.imageSize[1] / 2)', '(0, 0, 255)', '(2)'], {}), '(image, (self.imageSize[0] / 2 - lineSize, self.imageSize[1] / 2),\n (self.imageSize[0] / 2 + lineSize, self.imageSize[1] / 2), (0, 0, 255), 2)\n', (1231, 1376), False, 'import cv2\n'), ((1516, 1532), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1524, 1532), True, 'import numpy as np\n'), ((2431, 2476), 'cv2.namedWindow', 'cv2.namedWindow', (['imageName', 'cv2.WINDOW_NORMAL'], {}), '(imageName, cv2.WINDOW_NORMAL)\n', (2446, 2476), False, 'import cv2\n'), ((2485, 2524), 'cv2.resizeWindow', 'cv2.resizeWindow', (['imageName', 'WindowSize'], {}), '(imageName, WindowSize)\n', (2501, 2524), False, 'import cv2\n'), ((2533, 2567), 'cv2.imshow', 'cv2.imshow', (['imageName', 'imageToShow'], {}), '(imageName, imageToShow)\n', (2543, 2567), False, 'import cv2\n'), ((2576, 2591), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2587, 2591), False, 'import cv2\n'), ((964, 1037), 'cv2.circle', 'cv2.circle', (['image', '(centerPoint[0], centerPoint[1])', '(5)', '(255, 0, 255)', '(-1)'], {}), '(image, (centerPoint[0], centerPoint[1]), 5, (255, 0, 255), -1)\n', (974, 1037), False, 'import cv2\n')] |
import pyfere as pf
import numpy as np
import time
def execute(BloomFilter, n, m, k, in_parallel=True):
def get_data(n):
return list(np.random.choice(2**63-1, n))
def benchmark(func, tag):
start = time.time()
result = func()
end = time.time()
print ("%ss (%s)" % (end-start, tag))
return result
Filter = BloomFilter(m, k, in_parallel)
print ("Benchmark %s" % Filter, n, m, k)
x = get_data(n)
y = get_data(n)
while len(set(x) & set(y)) > 0 or \
len(set(x)) < n or len(set(y)) < n:
x = get_data(n)
y = get_data(n)
op_insert = lambda : Filter.insert(x)
benchmark(op_insert, "insert x")
op_query = lambda : Filter.query(x)
result = benchmark(op_query, "query x")
print ("true positive rate: TPR = %s" % (sum(result)/float(n)))
op_query = lambda : Filter.query(y)
result = benchmark(op_query, "query y")
print ("false positive rate: FPR = %s" % (sum(result)/float(n)))
n = 10**7
m = 8*n
k = int(float(m)/float(n)*np.log(2))
execute(pf.BloomFilter, n, m, k)
execute(pf.PartitionedBloomFilter, n, m, k)
| [
"numpy.random.choice",
"numpy.log",
"time.time"
] | [((225, 236), 'time.time', 'time.time', ([], {}), '()\n', (234, 236), False, 'import time\n'), ((275, 286), 'time.time', 'time.time', ([], {}), '()\n', (284, 286), False, 'import time\n'), ((1058, 1067), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1064, 1067), True, 'import numpy as np\n'), ((147, 179), 'numpy.random.choice', 'np.random.choice', (['(2 ** 63 - 1)', 'n'], {}), '(2 ** 63 - 1, n)\n', (163, 179), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
x=np.arange(1,6)
y=np.arange(2,11,2)
fig = plt.figure()
axes1=fig.add_axes([0.1,0.1,0.8,0.8])
axes1.plot(x,x**2,color="red",marker="o",markersize=20,markerfacecolor="black")
plt.show() | [
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((54, 69), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (63, 69), True, 'import numpy as np\n'), ((71, 90), 'numpy.arange', 'np.arange', (['(2)', '(11)', '(2)'], {}), '(2, 11, 2)\n', (80, 90), True, 'import numpy as np\n'), ((96, 108), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (106, 108), True, 'import matplotlib.pyplot as plt\n'), ((229, 239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (237, 239), True, 'import matplotlib.pyplot as plt\n')] |
from typing import List, Dict
import numpy as np
from rlo import utils
def by_rep(es):
return utils.group_by(
[e for e in es if "repetition" in e], lambda e: int(e["repetition"])
)
def filter_event_fields(events):
return [
{
k: v
for k, v in e.items()
if k
not in [
"total_train_time",
"total_train_cpu",
"repetition",
"device_id",
"gnn_eval_time",
"saved_state_file",
"model_file",
]
}
for e in events
]
def sort_order(events):
return sorted(
events,
key=lambda e: (
e.get("generation", 0),
e["event"],
e.get("expr", e.get("eval_expr", "")),
e.get("distill_net", ""),
),
)
def compare_two_reps(rep1, rep2, desc):
print(
"Comparing repetition {} with {} events vs {}".format(
desc, len(rep1), len(rep2)
)
)
# Discard excess events, as one run could go for longer than the other
for i, (e1, e2) in enumerate(
zip(filter_event_fields(rep1), filter_event_fields(rep2))
):
same_keys, diff_keys = same_and_different_keys(e1, e2)
if diff_keys:
print("same: " + str({k: e1[k] for k in same_keys}))
print("first: " + str({k: e1[k] for k in diff_keys if k in e1}))
print("second: " + str({k: e2[k] for k in diff_keys if k in e2}))
raise ValueError(f"Repetitions {desc} differ at index {i}.")
def is_similar(v1, v2) -> bool:
# Check if two (possibly nested) values are similar. Float values can differ by some tolerance; other data types must match exactly.
# TODO #19704 instead of having this complicated logic and generous tolerance for float values, use torch.backends.cudnn.deterministic for determinism tests.
if type(v1) != type(v2):
print(f"{v1} ({type(v1)}) and {v2} ({type(v2)}) have different types.")
return False
if isinstance(v1, List):
if len(v1) != len(v2):
print(
f"Lists have different lengths: first has {len(v1)} items, second has {len(v2)} items."
)
return False
return all(is_similar(x1, x2) for x1, x2 in zip(v1, v2))
elif isinstance(v1, Dict):
_, diff_keys = same_and_different_keys(v1, v2)
if diff_keys:
return False
return True
elif isinstance(v1, float):
# Floats don't have to match exactly
return np.allclose(v1, v2, atol=1e-6)
else:
# Any other type does have to match exactly
return v1 == v2
def same_and_different_keys(v1, v2):
same_keys = set(
k
for k in frozenset(v1.keys()).intersection(v2.keys())
if is_similar(v1[k], v2[k])
)
diff_keys = frozenset(v1.keys()).union(v2.keys()).difference(same_keys)
return same_keys, diff_keys
def compare_runs(run1, run2, sort=False):
sort_fn = sort_order if sort else lambda evs: evs
def list_reps(evs):
return frozenset([e["repetition"] for e in evs if "repetition" in e])
if list_reps(run1) != list_reps(run2):
raise ValueError("Runs have different sets of repetitions")
for rep, (es1, es2) in sorted(utils.zip_values(by_rep(run1), by_rep(run2)).items()):
compare_two_reps(sort_fn(es1), sort_fn(es2), desc=rep)
def compare_reps_within_run(events, sort=False):
sort_fn = sort_order if sort else lambda evs: evs
reps = sorted(by_rep(events).items())
first_rep, first_events = reps[0]
for rep, events in reps[1:]:
compare_two_reps(
sort_fn(first_events), sort_fn(events), "{} vs {}".format(first_rep, rep)
)
| [
"numpy.allclose"
] | [((2609, 2640), 'numpy.allclose', 'np.allclose', (['v1', 'v2'], {'atol': '(1e-06)'}), '(v1, v2, atol=1e-06)\n', (2620, 2640), True, 'import numpy as np\n')] |
"""
This code implements a perceptron algorithm (PLA).
First, we visualise the dataset which contains 2 features. We can see that the dataset can be clearly separated by drawing a straight line between them. The goal is to write an algorithm that finds that line and classifies all of these data points correctly.
The output file (e.g. 'output1_f.csv') contains the values of w1, w2 and b which define the 'threshold' line. The last row will be the most accurate one. Each time it goes through each of the examples in 'input1.csv', it adds a new line to the output file containing a comma-separated list of the weights w_1, w_2, and b (bias) in that order.
Upon convergence, the program stops, and the final values of w_1, w_2, and b are printed to the output file (output1.csv). This defines the decision boundary that your PLA has computed for the given dataset.
Note: When implementing your PLA, in case of tie (sum of w_jx_ij = 0), please follow the lecture note and classify the datapoint as -1.
Ensure this file can be executed as:
$ python3 problem1.py input1.csv output1.csv
The code includes plotting functions. However those are disabled when executing the code from the command line in the format specified immediately above.
"""
# builtin modules
import os
import psutil
import requests
import sys
# 3rd party modules
import pandas as pd
import numpy as np
import plotly.graph_objects as go
class problem1:
def get_data(source_file, names_in:list = ['feature1','feature2','labels']):
# Define input and output filepaths
input_path = os.path.join(os.getcwd(),'datasets','in', source_file)
# Read input data
df = pd.read_csv(input_path, names=names_in)
return df
def perceptron_classify(df, n:int = 200, names_in:list = ['feature1','feature2','labels']):
"""
1. set b = w = 0
2. for N iterations, or until weights do not change
(a) for each training example xᵏ with label yᵏ
i. if yᵏ — f(xᵏ) = 0, continue
ii. else, update wᵢ, △wᵢ = (yᵏ — f(xᵏ)) xᵢ
"""
# transform the dataframe to an array
data = np.asmatrix(df, dtype = 'float64')
# get the first two columns as pairs of values
features = data[:, :-1]
# get the last column
labels = data[:, -1]
# assign zero weight as a starting point to features and labels
w = np.zeros(shape=(1, features.shape[1]+1)) #e.g. array([0., 0., 0.])
w_ = np.empty(shape=[0,3]) # declare w_ as an empty matrix of same shape as w
for iteration in range(0,n):
for x, label in zip(features, labels):
x = np.insert(x, 0, 1) # add a column of 1s to represent w0
f = np.dot(w, x.transpose()) # a scalar
if f * label <= 0:
w += (x * label.item(0,0)).tolist() # because label comes from being a matrix (matrix([[1.]])) and needs to be converted to scalar
else:
iteration = n
w_ = np.vstack((w_, w))
return (w_, w_[-1])
def plot_results(df, weights, names_in:list = ['feature1','feature2','labels']):
"""
Plot the Perceptron classifier, from the following inputs:
- source_file: csv file with the input samples
- weights: from perceptron_classify function
- names_in: a list of the names of the columns (headers) in the input df
returns:
- a plot of the figure in the default browser, and
- a PNG version of the plot to the "images" project directory
"""
# Create the figure for plotting the initial data
fig = go.Figure(data=go.Scatter(x=df[names_in[0]],
y=df[names_in[1]],
mode='markers',
marker=dict(
color=df[names_in[2]],
colorscale='Viridis',
line_width=1,
size = 16),
text=df[names_in[2]], # hover text goes here
showlegend=False)) # turn off legend only for this item
# Create the 1D array for X values from the first feature; this is just to be able to plot a line
# within the space defined by the two features explored
X = np.linspace(0, max(df[names_in[0]].max(),df[names_in[1]].max()))
# Vector Y will calculated from the weights, w1, w2, the bias, b, and the value of X in its 1D linear space
Y = []
for b, w1, w2 in [weights]: #(matrix.tolist()[0] for matrix in weights):
for x in X:
if w2 == 0:
y = 0.0
else:
y = (-(b / w2) / (b / w1))* x + (-b / w2) # per the equation of a line, e.g. C = Ax + By
Y.append(y)
# Add the threshold line to the plot
fig.add_trace(go.Scatter(x=X, y=Y,
mode= 'lines',
name = 'Threshold'))
# Give the figure a title
fig.update_layout(title='Perceptron Algorithm | Problem 1')
# Show the figure, by default will open a browser window
fig.show()
# export plot to png file to images directory
# create an images directory if not already present
if not os.path.exists("images"):
os.mkdir("images")
## write the png file with the plot/figure
return fig.write_image("images/fig1.png")
def write_csv(filename, weights):
# write the outputs csv file
filepath = os.path.join(os.getcwd(),'datasets','out', filename)
dataframe = pd.DataFrame(data=weights, columns=('b','w1','w2'))
# reorder the columns in the dataframe in accordance with assignment
order = [1,2,0] # setting column's order, 'b' goes as first column followed by weights
dataframe = dataframe[[dataframe.columns[i] for i in order]]
dataframe.to_csv(filepath)
return print("New Outputs file saved to: <<", filename, ">>", sep='', end='\n')
def main():
in_data = 'input1.csv'
out_data = 'output1.csv'
df = get_data(in_data)
(w_, w) = perceptron_classify(df)
plot_results(df, w)
write_csv(out_data, w_)
if __name__ == '__main__':
main() | [
"pandas.DataFrame",
"plotly.graph_objects.Scatter",
"os.mkdir",
"pandas.read_csv",
"numpy.empty",
"os.getcwd",
"numpy.zeros",
"os.path.exists",
"numpy.insert",
"numpy.asmatrix",
"numpy.vstack"
] | [((1678, 1717), 'pandas.read_csv', 'pd.read_csv', (['input_path'], {'names': 'names_in'}), '(input_path, names=names_in)\n', (1689, 1717), True, 'import pandas as pd\n'), ((2185, 2217), 'numpy.asmatrix', 'np.asmatrix', (['df'], {'dtype': '"""float64"""'}), "(df, dtype='float64')\n", (2196, 2217), True, 'import numpy as np\n'), ((2452, 2494), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, features.shape[1] + 1)'}), '(shape=(1, features.shape[1] + 1))\n', (2460, 2494), True, 'import numpy as np\n'), ((2532, 2554), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 3]'}), '(shape=[0, 3])\n', (2540, 2554), True, 'import numpy as np\n'), ((5906, 5959), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'weights', 'columns': "('b', 'w1', 'w2')"}), "(data=weights, columns=('b', 'w1', 'w2'))\n", (5918, 5959), True, 'import pandas as pd\n'), ((1596, 1607), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1605, 1607), False, 'import os\n'), ((3086, 3104), 'numpy.vstack', 'np.vstack', (['(w_, w)'], {}), '((w_, w))\n', (3095, 3104), True, 'import numpy as np\n'), ((5127, 5179), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'X', 'y': 'Y', 'mode': '"""lines"""', 'name': '"""Threshold"""'}), "(x=X, y=Y, mode='lines', name='Threshold')\n", (5137, 5179), True, 'import plotly.graph_objects as go\n'), ((5575, 5599), 'os.path.exists', 'os.path.exists', (['"""images"""'], {}), "('images')\n", (5589, 5599), False, 'import os\n'), ((5613, 5631), 'os.mkdir', 'os.mkdir', (['"""images"""'], {}), "('images')\n", (5621, 5631), False, 'import os\n'), ((5846, 5857), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5855, 5857), False, 'import os\n'), ((2714, 2732), 'numpy.insert', 'np.insert', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (2723, 2732), True, 'import numpy as np\n')] |
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
import numpy as np
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Limit to the two first classes, and split into training and test
X_train, X_test, y_train, y_test = train_test_split(X[y < 2], y[y < 2],
test_size=.5,
random_state=random_state)
# Create a simple classifier
classifier = svm.LinearSVC(random_state=random_state)
classifier.fit(X_train, y_train)
y_score = classifier.decision_function(X_test)
from sklearn.metrics import average_precision_score
average_precision = average_precision_score(y_test, y_score)
print('Average precision-recall score: {0:0.2f}'.format(
average_precision))
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from inspect import signature
precision, recall, _ = precision_recall_curve(y_test, y_score)
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision)) | [
"sklearn.datasets.load_iris",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.ylabel",
"numpy.random.RandomState",
"sklearn.metrics.precision_recall_curve",
"matplotlib.pyplot.step",
"inspect.signature",
"sklearn.svm.LinearSVC",
... | [((114, 134), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (132, 134), False, 'from sklearn import svm, datasets\n'), ((202, 226), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (223, 226), True, 'import numpy as np\n'), ((424, 502), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X[y < 2]', 'y[y < 2]'], {'test_size': '(0.5)', 'random_state': 'random_state'}), '(X[y < 2], y[y < 2], test_size=0.5, random_state=random_state)\n', (440, 502), False, 'from sklearn.model_selection import train_test_split\n'), ((649, 689), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (662, 689), False, 'from sklearn import svm, datasets\n'), ((843, 883), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_test', 'y_score'], {}), '(y_test, y_score)\n', (866, 883), False, 'from sklearn.metrics import average_precision_score\n'), ((1106, 1145), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'y_score'], {}), '(y_test, y_score)\n', (1128, 1145), False, 'from sklearn.metrics import precision_recall_curve\n'), ((1342, 1405), 'matplotlib.pyplot.step', 'plt.step', (['recall', 'precision'], {'color': '"""b"""', 'alpha': '(0.2)', 'where': '"""post"""'}), "(recall, precision, color='b', alpha=0.2, where='post')\n", (1350, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1487), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['recall', 'precision'], {'alpha': '(0.2)', 'color': '"""b"""'}), "(recall, precision, alpha=0.2, color='b', **step_kwargs)\n", (1431, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1509), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (1499, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1510, 1533), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (1520, 1533), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1555), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (1542, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1556, 1576), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1564, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1306), 'inspect.signature', 'signature', (['plt.fill_between'], {}), '(plt.fill_between)\n', (1288, 1306), False, 'from inspect import signature\n')] |
from FICUS import MagnetReader as mr
from FICUS import AnalyticForce as af
import numpy as np
import matplotlib.pyplot as plt
import sys
'''
This program reads the output of sample_fields.py
and evaluates forces and torques from the magnetic field
Exports a Mx30 .csv file
head = 'Xn [m] (N face center), Yn [m], Zn [m], \
Xs [m] (S face center), Ys [m], Zs [m], \
Xc [m] (COM coordinate), Yc [m], Zc [m], \
Bcx [T] (avg com B), Bcy [T], Bcz [T], \
Fcx [N] (COM force), Fcy [N], Fcz [N], \
Tx [N m] (Torque), Ty [N m], Tz [N m] \
fnx [N] (torque force couple N), fny [N], fnz [N], \
fsx [N] (torque force couple S), fsy [N], fsz [N], \
Fnx [N] (Sum forces N), Fny [N], Fnz [N], \
Fsx [N] (Sum forces S), Fsy [N], Fsz [N], \
\n'
Updated 31 May 2021
'''
path = './'
f_mag = 'block_zot80_3sq_1.csv'
try:
f_field = sys.argv[1]
except:
print('usage: python analysis.py f_field.csv')
f_field = 'B-field-n2-fix2.csv'
# load field data
with open(path+f_field) as f:
datain = f.readlines()
data = np.array([ line.strip().split(',') for line in datain[1:]] , float)
N_samples = len(data)
print('read data shape:', data.shape)
rx,ry,rz,bx,by,bz = data.T
bmag = np.sqrt(bx*bx + by*by + bz*bz)
Bvec = np.array([bx,by,bz]).T
# load magnet file
mag = mr.Magnet_3D(path+f_mag)
X,Y,Z = mag.com.T
s = mag.sgn
M = mag.M[0]
L = np.mean(mag.L)
Q = M*L*L
print('M', M)
print('L', L)
print('Q', Q)
# Compute forces and torques
N_magnets = mag.N_magnets
N_charges = int( N_samples / N_magnets / 2 )
print('N samples:', N_samples)
print('N magnets:', N_magnets)
print('N charges:', N_charges) # per face
B_arr = np.transpose( np.reshape(Bvec,(N_magnets,2*N_charges,3)), axes=(1,0,2) )
#B_arr = np.reshape(Bvec,(2*N_charges,N_magnets,3))
Bp = np.mean(B_arr[:N_charges],axis=0) # positive face samples
Bm = np.mean(B_arr[N_charges:],axis=0) # negative face samples
Bc = (Bp + Bm)/2
Fp = Q * Bp
Fm = -Q * Bm
Fc = Fp + Fm
rvec = np.array([rx,ry,rz]).T
r_arr = np.transpose( np.reshape(rvec,(N_magnets,2*N_charges,3)), axes=(1,0,2) )
#r_arr = np.reshape(rvec,(N_charges*2, N_magnets,3))
rn = np.mean(r_arr[:N_charges],axis=0)
rs = np.mean(r_arr[N_charges:],axis=0)
rcom = (rn + rs)/2 # average
rp = rn - rcom
rm = rs - rcom
tau = np.cross(rp,Fp) + np.cross(rm, Fm)
# export forces
Fn = np.cross(tau,rp) / (np.linalg.norm(rp,axis=1)**2)[:,np.newaxis] / 2
Fs = np.cross(tau,rm) / (np.linalg.norm(rm,axis=1)**2)[:,np.newaxis] / 2
Fpn = Fc/2 + Fn
Fps = Fc/2 + Fs
print('peak field:', np.max(bmag))
print('peak face force:', np.max( np.linalg.norm(Fpn,axis=1) ))
print('peak torque:', np.max( np.linalg.norm(tau,axis=1) ))
# make plot
plt.figure(figsize=(9,7))
plt.subplot(3,3,1)
plt.hist(np.linalg.norm(Bvec,axis=1),100, color = 'C2')
plt.title('B element [T]')
plt.subplot(3,3,2)
plt.hist(np.linalg.norm(Bp,axis=1),100, color = 'C2')
plt.title('B face [T]')
plt.subplot(3,3,3)
plt.hist(np.linalg.norm(Bc,axis=1),100, color = 'C2')
plt.title('B center [T]')
plt.subplot(3,3,4)
plt.hist(np.linalg.norm(tau,axis=1),100, color = 'C4')
plt.title('Torque [N m]')
plt.subplot(3,3,5)
plt.hist(np.linalg.norm(Fp,axis=1),100, color = 'C1')
plt.title('F face [N]')
plt.subplot(3,3,6)
plt.hist(np.linalg.norm(Fc,axis=1),100, color = 'C1')
plt.title('F center [N]')
plt.subplot(3,3,7)
# removed /2
plt.hist(np.linalg.norm(Fn,axis=1), 100, color = 'C0')
plt.title('Torque Force (+)')
plt.subplot(3,3,8)
plt.hist(np.linalg.norm(Fc/2,axis=1),100, color = 'C0')
plt.title('COM Force (+)')
plt.subplot(3,3,9)
plt.hist(np.linalg.norm(Fpn,axis=1),100, color = 'C0')
plt.title('Total Pole Force (+)')
plt.suptitle(f_field)
f_plot = f_field[:-4] + '.png'
print('saving:', f_plot)
plt.savefig(f_plot)
plt.tight_layout()
plt.draw()
plt.show()
def export_1():
# prepare data for export
xn,yn,zn = rn.T
xs,ys,zs = rs.T
xc,yc,zc = rcom.T
fnx,fny,fnz = Fpn.T
fsx,fsy,fsz = Fps.T
tx, ty, tz = tau.T
data = np.array([xn,yn,zn,fnx,fny,fnz,
xs,ys,zs,fsx,fsy,fsz,
xc,yc,zc,tx,ty,tz] ).T
# write
f_write = 'ForceTorque-v6-n%i.csv' % (N_charges*2)
with open(f_write,'w') as f:
head = 'Xn [m], Yn [m], Zn [m], Fnx [N], Fny [N], Fnz [N], \
Xs [m], Ys [m], Zs [m], Fsx [N], Fsy [N], Fsz [N], \
Xc [m], Yc [m], Zc [m], Tx [N m], Ty [N m], Tz [N m] \n'
f.write(head)
for line in data:
#x,y,z,fx,fy,fz = line
out = '{:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}\n'.format(*line)
f.write(out)
print('wrote to file:', f_write)
def export_2():
# prepare data for export
xn,yn,zn = rn.T
xs,ys,zs = rs.T
xc,yc,zc = rcom.T
fcx,fcy,fcz = Fc.T / 2
bcx,bcy,bcz = Bc.T
# torque forces
tx, ty, tz = tau.T
fnx,fny,fnz = Fn.T
fsx,fsy,fsz = Fs.T
# sum of torques and COM forces
Fnx,Fny,Fnz = Fpn.T
Fsx,Fsy,Fsz = Fps.T
data = np.array([xn,yn,zn,
xs,ys,zs,
xc,yc,zc,
bcx,bcy,bcz,
fcx,fcy,fcz,
tx,ty,tz,
fnx,fny,fnz,
fsx,fsy,fsz,
Fnx,Fny,Fnz,
Fsx,Fsy,Fsz
] ).T
# write
f_write = 'Field-Force-Torque-v6-n%i.csv' % (N_charges*2)
with open(f_write,'w') as f:
head = 'Xn [m] (N face center), Yn [m], Zn [m], \
Xs [m] (S face center), Ys [m], Zs [m], \
Xc [m] (COM coordinate), Yc [m], Zc [m], \
Bcx [T] (avg com B), Bcy [T], Bcz [T], \
Fcx [N] (COM force), Fcy [N], Fcz [N], \
Tx [N m] (Torque), Ty [N m], Tz [N m] \
fnx [N] (torque force couple N), fny [N], fnz [N], \
fsx [N] (torque force couple S), fsy [N], fsz [N], \
Fnx [N] (Sum forces N), Fny [N], Fnz [N], \
Fsx [N] (Sum forces S), Fsy [N], Fsz [N], \
\n'
f.write(head)
for line in data:
out = '{:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}, {:.6e}\n'.format(*line)
f.write(out)
print('wrote to file:', f_write)
export_2()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"FICUS.MagnetReader.Magnet_3D",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"numpy.cross",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.max",
"numpy.linalg.norm",... | [((1294, 1330), 'numpy.sqrt', 'np.sqrt', (['(bx * bx + by * by + bz * bz)'], {}), '(bx * bx + by * by + bz * bz)\n', (1301, 1330), True, 'import numpy as np\n'), ((1381, 1407), 'FICUS.MagnetReader.Magnet_3D', 'mr.Magnet_3D', (['(path + f_mag)'], {}), '(path + f_mag)\n', (1393, 1407), True, 'from FICUS import MagnetReader as mr\n'), ((1455, 1469), 'numpy.mean', 'np.mean', (['mag.L'], {}), '(mag.L)\n', (1462, 1469), True, 'import numpy as np\n'), ((1870, 1904), 'numpy.mean', 'np.mean', (['B_arr[:N_charges]'], {'axis': '(0)'}), '(B_arr[:N_charges], axis=0)\n', (1877, 1904), True, 'import numpy as np\n'), ((1934, 1968), 'numpy.mean', 'np.mean', (['B_arr[N_charges:]'], {'axis': '(0)'}), '(B_arr[N_charges:], axis=0)\n', (1941, 1968), True, 'import numpy as np\n'), ((2222, 2256), 'numpy.mean', 'np.mean', (['r_arr[:N_charges]'], {'axis': '(0)'}), '(r_arr[:N_charges], axis=0)\n', (2229, 2256), True, 'import numpy as np\n'), ((2261, 2295), 'numpy.mean', 'np.mean', (['r_arr[N_charges:]'], {'axis': '(0)'}), '(r_arr[N_charges:], axis=0)\n', (2268, 2295), True, 'import numpy as np\n'), ((2767, 2793), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (2777, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2814), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(1)'], {}), '(3, 3, 1)\n', (2805, 2814), True, 'import matplotlib.pyplot as plt\n'), ((2869, 2895), 'matplotlib.pyplot.title', 'plt.title', (['"""B element [T]"""'], {}), "('B element [T]')\n", (2878, 2895), True, 'import matplotlib.pyplot as plt\n'), ((2897, 2917), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(2)'], {}), '(3, 3, 2)\n', (2908, 2917), True, 'import matplotlib.pyplot as plt\n'), ((2970, 2993), 'matplotlib.pyplot.title', 'plt.title', (['"""B face [T]"""'], {}), "('B face [T]')\n", (2979, 2993), True, 'import matplotlib.pyplot as plt\n'), ((2995, 3015), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(3)'], {}), '(3, 3, 3)\n', (3006, 3015), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3093), 'matplotlib.pyplot.title', 'plt.title', (['"""B center [T]"""'], {}), "('B center [T]')\n", (3077, 3093), True, 'import matplotlib.pyplot as plt\n'), ((3095, 3115), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(4)'], {}), '(3, 3, 4)\n', (3106, 3115), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3194), 'matplotlib.pyplot.title', 'plt.title', (['"""Torque [N m]"""'], {}), "('Torque [N m]')\n", (3178, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3216), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(5)'], {}), '(3, 3, 5)\n', (3207, 3216), True, 'import matplotlib.pyplot as plt\n'), ((3269, 3292), 'matplotlib.pyplot.title', 'plt.title', (['"""F face [N]"""'], {}), "('F face [N]')\n", (3278, 3292), True, 'import matplotlib.pyplot as plt\n'), ((3294, 3314), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(6)'], {}), '(3, 3, 6)\n', (3305, 3314), True, 'import matplotlib.pyplot as plt\n'), ((3367, 3392), 'matplotlib.pyplot.title', 'plt.title', (['"""F center [N]"""'], {}), "('F center [N]')\n", (3376, 3392), True, 'import matplotlib.pyplot as plt\n'), ((3394, 3414), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(7)'], {}), '(3, 3, 7)\n', (3405, 3414), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3510), 'matplotlib.pyplot.title', 'plt.title', (['"""Torque Force (+)"""'], {}), "('Torque Force (+)')\n", (3490, 3510), True, 'import matplotlib.pyplot as plt\n'), ((3512, 3532), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(8)'], {}), '(3, 3, 8)\n', (3523, 3532), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3613), 'matplotlib.pyplot.title', 'plt.title', (['"""COM Force (+)"""'], {}), "('COM Force (+)')\n", (3596, 3613), True, 'import matplotlib.pyplot as plt\n'), ((3615, 3635), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(9)'], {}), '(3, 3, 9)\n', (3626, 3635), True, 'import matplotlib.pyplot as plt\n'), ((3689, 3722), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Pole Force (+)"""'], {}), "('Total Pole Force (+)')\n", (3698, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3745), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f_field'], {}), '(f_field)\n', (3736, 3745), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3822), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f_plot'], {}), '(f_plot)\n', (3814, 3822), True, 'import matplotlib.pyplot as plt\n'), ((3823, 3841), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3839, 3841), True, 'import matplotlib.pyplot as plt\n'), ((3842, 3852), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3850, 3852), True, 'import matplotlib.pyplot as plt\n'), ((3853, 3863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3861, 3863), True, 'import matplotlib.pyplot as plt\n'), ((1332, 1354), 'numpy.array', 'np.array', (['[bx, by, bz]'], {}), '([bx, by, bz])\n', (1340, 1354), True, 'import numpy as np\n'), ((1754, 1801), 'numpy.reshape', 'np.reshape', (['Bvec', '(N_magnets, 2 * N_charges, 3)'], {}), '(Bvec, (N_magnets, 2 * N_charges, 3))\n', (1764, 1801), True, 'import numpy as np\n'), ((2059, 2081), 'numpy.array', 'np.array', (['[rx, ry, rz]'], {}), '([rx, ry, rz])\n', (2067, 2081), True, 'import numpy as np\n'), ((2105, 2152), 'numpy.reshape', 'np.reshape', (['rvec', '(N_magnets, 2 * N_charges, 3)'], {}), '(rvec, (N_magnets, 2 * N_charges, 3))\n', (2115, 2152), True, 'import numpy as np\n'), ((2362, 2378), 'numpy.cross', 'np.cross', (['rp', 'Fp'], {}), '(rp, Fp)\n', (2370, 2378), True, 'import numpy as np\n'), ((2380, 2396), 'numpy.cross', 'np.cross', (['rm', 'Fm'], {}), '(rm, Fm)\n', (2388, 2396), True, 'import numpy as np\n'), ((2615, 2627), 'numpy.max', 'np.max', (['bmag'], {}), '(bmag)\n', (2621, 2627), True, 'import numpy as np\n'), ((2822, 2850), 'numpy.linalg.norm', 'np.linalg.norm', (['Bvec'], {'axis': '(1)'}), '(Bvec, axis=1)\n', (2836, 2850), True, 'import numpy as np\n'), ((2925, 2951), 'numpy.linalg.norm', 'np.linalg.norm', (['Bp'], {'axis': '(1)'}), '(Bp, axis=1)\n', (2939, 2951), True, 'import numpy as np\n'), ((3023, 3049), 'numpy.linalg.norm', 'np.linalg.norm', (['Bc'], {'axis': '(1)'}), '(Bc, axis=1)\n', (3037, 3049), True, 'import numpy as np\n'), ((3123, 3150), 'numpy.linalg.norm', 'np.linalg.norm', (['tau'], {'axis': '(1)'}), '(tau, axis=1)\n', (3137, 3150), True, 'import numpy as np\n'), ((3224, 3250), 'numpy.linalg.norm', 'np.linalg.norm', (['Fp'], {'axis': '(1)'}), '(Fp, axis=1)\n', (3238, 3250), True, 'import numpy as np\n'), ((3322, 3348), 'numpy.linalg.norm', 'np.linalg.norm', (['Fc'], {'axis': '(1)'}), '(Fc, axis=1)\n', (3336, 3348), True, 'import numpy as np\n'), ((3435, 3461), 'numpy.linalg.norm', 'np.linalg.norm', (['Fn'], {'axis': '(1)'}), '(Fn, axis=1)\n', (3449, 3461), True, 'import numpy as np\n'), ((3540, 3570), 'numpy.linalg.norm', 'np.linalg.norm', (['(Fc / 2)'], {'axis': '(1)'}), '(Fc / 2, axis=1)\n', (3554, 3570), True, 'import numpy as np\n'), ((3643, 3670), 'numpy.linalg.norm', 'np.linalg.norm', (['Fpn'], {'axis': '(1)'}), '(Fpn, axis=1)\n', (3657, 3670), True, 'import numpy as np\n'), ((2419, 2436), 'numpy.cross', 'np.cross', (['tau', 'rp'], {}), '(tau, rp)\n', (2427, 2436), True, 'import numpy as np\n'), ((2492, 2509), 'numpy.cross', 'np.cross', (['tau', 'rm'], {}), '(tau, rm)\n', (2500, 2509), True, 'import numpy as np\n'), ((2663, 2690), 'numpy.linalg.norm', 'np.linalg.norm', (['Fpn'], {'axis': '(1)'}), '(Fpn, axis=1)\n', (2677, 2690), True, 'import numpy as np\n'), ((2723, 2750), 'numpy.linalg.norm', 'np.linalg.norm', (['tau'], {'axis': '(1)'}), '(tau, axis=1)\n', (2737, 2750), True, 'import numpy as np\n'), ((4063, 4155), 'numpy.array', 'np.array', (['[xn, yn, zn, fnx, fny, fnz, xs, ys, zs, fsx, fsy, fsz, xc, yc, zc, tx, ty, tz]'], {}), '([xn, yn, zn, fnx, fny, fnz, xs, ys, zs, fsx, fsy, fsz, xc, yc, zc,\n tx, ty, tz])\n', (4071, 4155), True, 'import numpy as np\n'), ((5171, 5323), 'numpy.array', 'np.array', (['[xn, yn, zn, xs, ys, zs, xc, yc, zc, bcx, bcy, bcz, fcx, fcy, fcz, tx, ty,\n tz, fnx, fny, fnz, fsx, fsy, fsz, Fnx, Fny, Fnz, Fsx, Fsy, Fsz]'], {}), '([xn, yn, zn, xs, ys, zs, xc, yc, zc, bcx, bcy, bcz, fcx, fcy, fcz,\n tx, ty, tz, fnx, fny, fnz, fsx, fsy, fsz, Fnx, Fny, Fnz, Fsx, Fsy, Fsz])\n', (5179, 5323), True, 'import numpy as np\n'), ((2439, 2465), 'numpy.linalg.norm', 'np.linalg.norm', (['rp'], {'axis': '(1)'}), '(rp, axis=1)\n', (2453, 2465), True, 'import numpy as np\n'), ((2512, 2538), 'numpy.linalg.norm', 'np.linalg.norm', (['rm'], {'axis': '(1)'}), '(rm, axis=1)\n', (2526, 2538), True, 'import numpy as np\n')] |
import time
from tools.montecarlo_python import get_equity as py_get_equity
import tools.nn_equity as nn_equity
from gym_env.env import HoldemTable
from tools.nn_equity import sample_cards
import numpy as np
import matplotlib.pyplot as plt
import cppimport
def test_model(get_equity_func, my_cards, cards_on_table, players, runs):
start_time = time.time()
res = get_equity_func(my_cards, cards_on_table, players, runs)
end_time = time.time()
execution_time = end_time - start_time
return res, execution_time
def sample_scenario():
table = HoldemTable()
# Create deck
table._create_card_deck()
# Sample player cards
p1_cards = sample_cards(table.deck, 2)
p2_cards = sample_cards(table.deck, 2)
# Sample table cards from either preflop,
# flop, river or turn
stage_card_nums = [0, 3, 4, 5]
num_table_samples = np.random.choice(stage_card_nums)
cards_on_table = sample_cards(table.deck, num_table_samples)
my_cards = set(p1_cards)
cards_on_table = set(cards_on_table)
return my_cards, cards_on_table
def speed_and_error_comparison(number_of_samples):
# python_equity_calculator = montecarlo_python.MonteCarlo()
# py_equity_func = python_equity_calculator.run_montecarlo
cpp_calculator = cppimport.imp("tools.montecarlo_cpp.pymontecarlo")
cpp_equity_func = cpp_calculator.montecarlo
players = 2
load_model = "equity_optuna_4_17"
nn_equity_calculator = nn_equity.PredictEquity(load_model_name=load_model, load_model_dir='./tools/nn_equity_model/')
nn_equity_func = nn_equity_calculator.get_equity
model_data_dict = {'cpp_montecarlo_10k': {'equity_function': cpp_equity_func, 'runs': 10000, 'error_data': [], 'time_data': []},
'cpp_montecarlo_1k': {'equity_function': cpp_equity_func, 'runs': 1000, 'error_data': [], 'time_data': []},
# 'py_montecarlo_1k': {'equity_function': py_get_equity, 'runs': 1000, 'error_data': [], 'time_data': []},
'neural_network': {'equity_function': nn_equity_func, 'runs': 1, 'error_data': [], 'time_data': []}}
for i in range(number_of_samples):
print("Sample number {}".format(i))
my_cards, cards_on_table = sample_scenario()
base_line_res, base_line_time = test_model(cpp_equity_func, my_cards, cards_on_table, players, runs=100000)
for key in model_data_dict.keys():
equity_func = model_data_dict[key]['equity_function']
runs = model_data_dict[key]['runs']
res, ex_time = test_model(equity_func, my_cards, cards_on_table, players, runs=runs)
error = abs(res - base_line_res)
if error > 0.1 and key=='neural_network':
print(my_cards)
print(cards_on_table)
# Don't save first results. Tensorflow is slow on first prediction use
# of a model
if i == 0:
continue
print("Name: {}, \tError: {}, \tTime: {}".format(key, error, ex_time))
model_data_dict[key]['error_data'].append(error)
model_data_dict[key]['time_data'].append(ex_time)
print(model_data_dict['neural_network']['time_data'])
return model_data_dict
def plot_model_comparison(model_data_dict):
model_names = model_data_dict.keys()
# Calculate statistics
for key in model_names:
model_errors = model_data_dict[key]['error_data']
model_times = model_data_dict[key]['time_data']
model_data_dict[key]['mean_error'] = np.mean(model_errors)
model_data_dict[key]['std_error'] = np.std(model_errors)
model_data_dict[key]['mean_time'] = np.mean(model_times)
model_data_dict[key]['std_time'] = np.std(model_times)
model_mean_errors = [model_data_dict[key]['mean_error'] for key in model_names]
model_error_stdevs = [model_data_dict[key]['std_error'] for key in model_names]
ind = np.arange(len(model_mean_errors)) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, model_mean_errors, width, yerr=model_error_stdevs,
color='SkyBlue', label='time')
ax.set_ylabel('Absolute equity approximation error (compared to montecarlo 100k runs)')
ax.set_xlabel('Model')
ax.set_xticks(ind)
ax.set_xticklabels(model_names)
plt.savefig('./error_comparison.png', dpi=100)
plt.show()
plt.close()
model_mean_time = [model_data_dict[key]['mean_time'] for key in model_names]
model_time_stdevs = [model_data_dict[key]['std_time'] for key in model_names]
fig, ax = plt.subplots()
rects2 = ax.bar(ind, model_mean_time, width, yerr=model_time_stdevs,
color='IndianRed', label='error')
ax.set_xticks(ind)
ax.set_xticklabels(model_names)
ax.set_ylabel('Average computation time (s)')
ax.set_xlabel('Model')
plt.savefig('./time_comparison.png', dpi=100)
plt.show()
plt.close()
def main():
model_data_dict = speed_and_error_comparison(100)
plot_model_comparison(model_data_dict)
if __name__ == "__main__":
main() | [
"gym_env.env.HoldemTable",
"matplotlib.pyplot.show",
"numpy.std",
"cppimport.imp",
"matplotlib.pyplot.close",
"time.time",
"numpy.mean",
"tools.nn_equity.sample_cards",
"numpy.random.choice",
"tools.nn_equity.PredictEquity",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((350, 361), 'time.time', 'time.time', ([], {}), '()\n', (359, 361), False, 'import time\n'), ((444, 455), 'time.time', 'time.time', ([], {}), '()\n', (453, 455), False, 'import time\n'), ((568, 581), 'gym_env.env.HoldemTable', 'HoldemTable', ([], {}), '()\n', (579, 581), False, 'from gym_env.env import HoldemTable\n'), ((673, 700), 'tools.nn_equity.sample_cards', 'sample_cards', (['table.deck', '(2)'], {}), '(table.deck, 2)\n', (685, 700), False, 'from tools.nn_equity import sample_cards\n'), ((716, 743), 'tools.nn_equity.sample_cards', 'sample_cards', (['table.deck', '(2)'], {}), '(table.deck, 2)\n', (728, 743), False, 'from tools.nn_equity import sample_cards\n'), ((876, 909), 'numpy.random.choice', 'np.random.choice', (['stage_card_nums'], {}), '(stage_card_nums)\n', (892, 909), True, 'import numpy as np\n'), ((931, 974), 'tools.nn_equity.sample_cards', 'sample_cards', (['table.deck', 'num_table_samples'], {}), '(table.deck, num_table_samples)\n', (943, 974), False, 'from tools.nn_equity import sample_cards\n'), ((1285, 1335), 'cppimport.imp', 'cppimport.imp', (['"""tools.montecarlo_cpp.pymontecarlo"""'], {}), "('tools.montecarlo_cpp.pymontecarlo')\n", (1298, 1335), False, 'import cppimport\n'), ((1467, 1566), 'tools.nn_equity.PredictEquity', 'nn_equity.PredictEquity', ([], {'load_model_name': 'load_model', 'load_model_dir': '"""./tools/nn_equity_model/"""'}), "(load_model_name=load_model, load_model_dir=\n './tools/nn_equity_model/')\n", (1490, 1566), True, 'import tools.nn_equity as nn_equity\n'), ((4094, 4108), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4106, 4108), True, 'import matplotlib.pyplot as plt\n'), ((4418, 4464), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./error_comparison.png"""'], {'dpi': '(100)'}), "('./error_comparison.png', dpi=100)\n", (4429, 4464), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4477, 4479), True, 'import matplotlib.pyplot as plt\n'), ((4484, 4495), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4493, 4495), True, 'import matplotlib.pyplot as plt\n'), ((4676, 4690), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4688, 4690), True, 'import matplotlib.pyplot as plt\n'), ((4958, 5003), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./time_comparison.png"""'], {'dpi': '(100)'}), "('./time_comparison.png', dpi=100)\n", (4969, 5003), True, 'import matplotlib.pyplot as plt\n'), ((5008, 5018), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5016, 5018), True, 'import matplotlib.pyplot as plt\n'), ((5028, 5039), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5037, 5039), True, 'import matplotlib.pyplot as plt\n'), ((3573, 3594), 'numpy.mean', 'np.mean', (['model_errors'], {}), '(model_errors)\n', (3580, 3594), True, 'import numpy as np\n'), ((3639, 3659), 'numpy.std', 'np.std', (['model_errors'], {}), '(model_errors)\n', (3645, 3659), True, 'import numpy as np\n'), ((3705, 3725), 'numpy.mean', 'np.mean', (['model_times'], {}), '(model_times)\n', (3712, 3725), True, 'import numpy as np\n'), ((3769, 3788), 'numpy.std', 'np.std', (['model_times'], {}), '(model_times)\n', (3775, 3788), True, 'import numpy as np\n')] |
from __future__ import print_function
"""
mini_summary_plots.py
Simple plots of data points for mini analysis from mini_analysis.py
<NAME>, 3/2018
"""
import os
import sys
import re
import pickle
import numpy as np
from collections import OrderedDict
from matplotlib import rc
import matplotlib.pyplot as mpl
import pandas as pd
import seaborn as sns
import scipy.stats
rc('text', usetex=False)
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
import pylibrary.plotting.plothelpers as PH
class MiniSummarize():
# each mouse m entry in d has the following keys:
# ['amplitudes', 'genotype', 'intervals', 'mouse', 'protocols', 'eventcounts']
def __init__(self, id):
self.experiment_id = id
def load_file(self, fn):
fh = open(fn, 'rb')
self.d = pickle.load(fh) # , encoding='latin1') # encoding needed for python 2 file written, to read in python 3
fh.close()
self.filename = fn
print ('Mice/cells: ', self.d.keys())
def set_groups(self, groups):
self.group_names = groups
# ['WT', 'CHL1']
#self.groups = ['F/+', 'F/F']
def average_taus(self, d, t):
tau = np.zeros(len(d))
for i in range(len(d)):
tau[i] = d[i]['fit'][t]
return np.mean(tau)
def compute_means(self):
self.gtypes = []
self.holding = []
self.amps = []
self.meanamps = []
self.intvls = []
self.mouse = []
self.nevents = []
self.tau1 = []
self.tau2 = []
for i, m in enumerate(self.d.keys()):
#print ('m: ', m)
gt = self.d[m]['genotype']
#print('gt: ', gt)
# if gt not in self.gtypes:
self.gtypes.append(gt)
# if i == 0:
# print( self.d[m].keys())
self.holding.append(self.d[m]['holding'])
self.amps.append(np.nanmean(self.d[m]['amplitude_midpoint']))
self.meanamps.append(np.nanmean(self.d[m]['amplitudes']))
self.intvls.append(np.nanmean(self.d[m]['intervals']))
self.nevents.append(len(self.d[m]['intervals']))
self.tau1.append(self.average_taus(d[m]['averaged'], 'tau1'))
self.tau2.append(self.average_taus(d[m]['averaged'], 'tau2'))
self.mouse.append(m)
self.pddata = pd.DataFrame({'Genotype': pd.Categorical(self.gtypes),
'Holding': np.array(self.holding),
'Amp': np.array(self.amps),
'MeanAmp': np.array(self.meanamps),
'Intvls': 1000./np.array(self.intvls),
'Nevents': np.array(self.nevents),
'tau1' : np.array(self.tau1),
'tau2' : np.array(self.tau2),
'Mouse': pd.Categorical(self.mouse)
})
ps = str(self.pddata)
ps = re.sub(' +', '\t', ps)
# print(ps)
df = self.pddata
g1 = []
g2 = []
for i in range(len(self.gtypes)):
if self.gtypes[i] == "F/+":
g1.append(self.amps[i])
else:
g2.append(self.amps[i])
gm1 = []
gm2 = []
for i in range(len(self.gtypes)):
if self.gtypes[i] == "F/+":
gm1.append(self.meanamps[i])
else:
gm2.append(self.meanamps[i])
# print( g1, g2)
t, p = scipy.stats.ttest_ind(g1, g2, axis=0, equal_var=False, nan_policy='propagate')
print('F/+: u = {0:.3f} (SD={1:.3f})'.format(np.mean(g1), np.std(g1)))
print('F/F: u = {0:.3f} (SD={1:.3f})'.format(np.mean(g2), np.std(g2)))
print('t= {0:.3f}, p={1:.3f}'.format(t, p))
tm, pm = scipy.stats.ttest_ind(gm1, gm2, axis=0, equal_var=False, nan_policy='propagate')
print('F/+: u = {0:.3f} (SD={1:.3f})'.format(np.mean(gm1), np.std(gm1)))
print('F/F: u = {0:.3f} (SD={1:.3f})'.format(np.mean(gm2), np.std(gm2)))
print('t= {0:.3f}, p={1:.3f}'.format(tm, pm))
# print (self.amps)
# print (self.meanamps)
# print (self.intvls)
self.plot()
# compute stats on the ampitudes and intervals
def plot(self):
sizer = OrderedDict([ ('A', {'pos': [0.12, 0.2, 0.15, 0.5]}),
('B', {'pos': [0.45, 0.2, 0.15, 0.5]}),
('C', {'pos': [0.72, 0.2, 0.15, 0.5]}),
]) # dict elements are [left, width, bottom, height] for the axes in the plot.
n_panels = len(sizer.keys())
gr = [(a, a+1, 0, 1) for a in range(0, n_panels)] # just generate subplots - shape does not matter
axmap = OrderedDict(zip(sizer.keys(), gr))
P = PH.Plotter((n_panels, 1), axmap=axmap, label=True, labeloffset=[-0.15, 0.08],
fontsize={'tick': 8, 'label': 10, 'panel': 14}, figsize=(5., 3.))
P.resize(sizer) # perform positioning magic
# P.axdict['A'].plot(np.ones(len(self.intvls[self.group_names[0]])), 1000./np.array(self.intvls[self.group_names[0]]),
# 'ko', markersize=4.0, label=self.group_names[0])
# P.axdict['A'].plot(1+np.ones(len(self.intvls[self.group_names[1]])), 1000./np.array(self.intvls[self.group_names[1]]),
# 'bs', markersize=4.0, label=self.group_names[1])
# P.axdict['B'].plot(np.ones(len(self.amps[self.group_names[0]])), self.amps[self.group_names[0]],
# 'ko', markerfacecolor='k', markeredgecolor='k', markersize=4.0, markeredgewidth=1)
# P.axdict['B'].plot(1.0 + np.ones(len(self.amps[self.group_names[1]])), self.amps[self.group_names[1]],
# 'bs', markerfacecolor='b', markeredgecolor='b', markersize=4.0, markeredgewidth=1)
# P.axdict['B'].plot(0.2 + np.ones(len(self.meanamps[self.group_names[0]])), self.meanamps[self.group_names[0]],
# 'ko', markerfacecolor='w', markeredgecolor='k', markersize=4.0, markeredgewidth=1)
# P.axdict['B'].plot(1.2 + np.ones(len(self.meanamps[self.group_names[1]])), self.meanamps[self.group_names[1]],
# 'bs', markerfacecolor='w', markeredgecolor='b', markersize=4.0, markeredgewidth=1)
for a in ['A', 'B', 'C']:
PH.formatTicks(P.axdict[a], font='Helvetica')
sns.swarmplot(x='Genotype', y='Intvls', data=self.pddata, ax=P.axdict['A'])
sns.boxplot(x='Genotype', y='Intvls', data=self.pddata, ax=P.axdict['A'], color="0.8")
sns.swarmplot(x='Genotype', y='Amp', data=self.pddata, ax=P.axdict['B'])
sns.boxplot(x='Genotype', y='Amp', data=self.pddata, ax=P.axdict['B'], color="0.8")
sns.swarmplot(x='Genotype', y='MeanAmp', data=self.pddata, ax=P.axdict['C'])
sns.boxplot(x='Genotype', y='MeanAmp', data=self.pddata, ax=P.axdict['C'], color="0.8")
P.axdict['A'].set_ylim(0.0, 25.)
P.axdict['A'].set_ylabel('Event Frequency (Hz)')
P.axdict['B'].set_ylim(0.0, 30.)
P.axdict['B'].set_ylabel('Median Amplitude (pA)')
P.axdict['C'].set_ylabel('Mean Amplitude (pA)')
P.axdict['C'].set_ylim(0.0, 30.)
# P.axdict['A'].set_xlabel('Group')
# P.axdict['B'].set_xlabel('Group')
# P.axdict['A'].set_xlim(0.5, 2.5)
# P.axdict['B'].set_xlim(0.5, 2.5)
# P.axdict['B'].set_xlim(0.5, 2.5)
# P.axdict['A'].legend()
P.figure_handle.suptitle(self.filename.replace(r'_', r'\_'))
mpl.savefig('msummary_%s.pdf' % self.experiment_id)
mpl.show()
if __name__ == '__main__':
fn = 'summarydata_%s.p' % sys.argv[1] # data file to plot from - should hold a dict of mouse entries
MS = MiniSummarize(sys.argv[1])
MS.load_file(fn)
MS.set_groups(['F/+', 'F/F'])
#MS.set_groups(['WT', 'CHL1'])
MS.compute_means()
| [
"matplotlib.rc",
"matplotlib.pyplot.show",
"numpy.std",
"pylibrary.plotting.plothelpers.formatTicks",
"seaborn.swarmplot",
"numpy.mean",
"pickle.load",
"seaborn.boxplot",
"pandas.Categorical",
"numpy.array",
"collections.OrderedDict",
"pylibrary.plotting.plothelpers.Plotter",
"re.sub",
"ma... | [((374, 398), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (376, 398), False, 'from matplotlib import rc\n'), ((810, 825), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (821, 825), False, 'import pickle\n'), ((1297, 1309), 'numpy.mean', 'np.mean', (['tau'], {}), '(tau)\n', (1304, 1309), True, 'import numpy as np\n'), ((3075, 3097), 're.sub', 're.sub', (['""" +"""', '"""\t"""', 'ps'], {}), "(' +', '\\t', ps)\n", (3081, 3097), False, 'import re\n'), ((4417, 4555), 'collections.OrderedDict', 'OrderedDict', (["[('A', {'pos': [0.12, 0.2, 0.15, 0.5]}), ('B', {'pos': [0.45, 0.2, 0.15, \n 0.5]}), ('C', {'pos': [0.72, 0.2, 0.15, 0.5]})]"], {}), "([('A', {'pos': [0.12, 0.2, 0.15, 0.5]}), ('B', {'pos': [0.45, \n 0.2, 0.15, 0.5]}), ('C', {'pos': [0.72, 0.2, 0.15, 0.5]})])\n", (4428, 4555), False, 'from collections import OrderedDict\n'), ((4929, 5079), 'pylibrary.plotting.plothelpers.Plotter', 'PH.Plotter', (['(n_panels, 1)'], {'axmap': 'axmap', 'label': '(True)', 'labeloffset': '[-0.15, 0.08]', 'fontsize': "{'tick': 8, 'label': 10, 'panel': 14}", 'figsize': '(5.0, 3.0)'}), "((n_panels, 1), axmap=axmap, label=True, labeloffset=[-0.15, 0.08\n ], fontsize={'tick': 8, 'label': 10, 'panel': 14}, figsize=(5.0, 3.0))\n", (4939, 5079), True, 'import pylibrary.plotting.plothelpers as PH\n'), ((6489, 6564), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""Genotype"""', 'y': '"""Intvls"""', 'data': 'self.pddata', 'ax': "P.axdict['A']"}), "(x='Genotype', y='Intvls', data=self.pddata, ax=P.axdict['A'])\n", (6502, 6564), True, 'import seaborn as sns\n'), ((6573, 6663), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Genotype"""', 'y': '"""Intvls"""', 'data': 'self.pddata', 'ax': "P.axdict['A']", 'color': '"""0.8"""'}), "(x='Genotype', y='Intvls', data=self.pddata, ax=P.axdict['A'],\n color='0.8')\n", (6584, 6663), True, 'import seaborn as sns\n'), ((6669, 6741), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""Genotype"""', 'y': '"""Amp"""', 'data': 'self.pddata', 'ax': "P.axdict['B']"}), "(x='Genotype', y='Amp', data=self.pddata, ax=P.axdict['B'])\n", (6682, 6741), True, 'import seaborn as sns\n'), ((6750, 6837), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Genotype"""', 'y': '"""Amp"""', 'data': 'self.pddata', 'ax': "P.axdict['B']", 'color': '"""0.8"""'}), "(x='Genotype', y='Amp', data=self.pddata, ax=P.axdict['B'],\n color='0.8')\n", (6761, 6837), True, 'import seaborn as sns\n'), ((6843, 6919), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""Genotype"""', 'y': '"""MeanAmp"""', 'data': 'self.pddata', 'ax': "P.axdict['C']"}), "(x='Genotype', y='MeanAmp', data=self.pddata, ax=P.axdict['C'])\n", (6856, 6919), True, 'import seaborn as sns\n'), ((6928, 7019), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Genotype"""', 'y': '"""MeanAmp"""', 'data': 'self.pddata', 'ax': "P.axdict['C']", 'color': '"""0.8"""'}), "(x='Genotype', y='MeanAmp', data=self.pddata, ax=P.axdict['C'],\n color='0.8')\n", (6939, 7019), True, 'import seaborn as sns\n'), ((7674, 7725), 'matplotlib.pyplot.savefig', 'mpl.savefig', (["('msummary_%s.pdf' % self.experiment_id)"], {}), "('msummary_%s.pdf' % self.experiment_id)\n", (7685, 7725), True, 'import matplotlib.pyplot as mpl\n'), ((7734, 7744), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (7742, 7744), True, 'import matplotlib.pyplot as mpl\n'), ((6427, 6472), 'pylibrary.plotting.plothelpers.formatTicks', 'PH.formatTicks', (['P.axdict[a]'], {'font': '"""Helvetica"""'}), "(P.axdict[a], font='Helvetica')\n", (6441, 6472), True, 'import pylibrary.plotting.plothelpers as PH\n'), ((1935, 1978), 'numpy.nanmean', 'np.nanmean', (["self.d[m]['amplitude_midpoint']"], {}), "(self.d[m]['amplitude_midpoint'])\n", (1945, 1978), True, 'import numpy as np\n'), ((2013, 2048), 'numpy.nanmean', 'np.nanmean', (["self.d[m]['amplitudes']"], {}), "(self.d[m]['amplitudes'])\n", (2023, 2048), True, 'import numpy as np\n'), ((2081, 2115), 'numpy.nanmean', 'np.nanmean', (["self.d[m]['intervals']"], {}), "(self.d[m]['intervals'])\n", (2091, 2115), True, 'import numpy as np\n'), ((2407, 2434), 'pandas.Categorical', 'pd.Categorical', (['self.gtypes'], {}), '(self.gtypes)\n', (2421, 2434), True, 'import pandas as pd\n'), ((2483, 2505), 'numpy.array', 'np.array', (['self.holding'], {}), '(self.holding)\n', (2491, 2505), True, 'import numpy as np\n'), ((2550, 2569), 'numpy.array', 'np.array', (['self.amps'], {}), '(self.amps)\n', (2558, 2569), True, 'import numpy as np\n'), ((2618, 2641), 'numpy.array', 'np.array', (['self.meanamps'], {}), '(self.meanamps)\n', (2626, 2641), True, 'import numpy as np\n'), ((2765, 2787), 'numpy.array', 'np.array', (['self.nevents'], {}), '(self.nevents)\n', (2773, 2787), True, 'import numpy as np\n'), ((2834, 2853), 'numpy.array', 'np.array', (['self.tau1'], {}), '(self.tau1)\n', (2842, 2853), True, 'import numpy as np\n'), ((2900, 2919), 'numpy.array', 'np.array', (['self.tau2'], {}), '(self.tau2)\n', (2908, 2919), True, 'import numpy as np\n'), ((2966, 2992), 'pandas.Categorical', 'pd.Categorical', (['self.mouse'], {}), '(self.mouse)\n', (2980, 2992), True, 'import pandas as pd\n'), ((3750, 3761), 'numpy.mean', 'np.mean', (['g1'], {}), '(g1)\n', (3757, 3761), True, 'import numpy as np\n'), ((3763, 3773), 'numpy.std', 'np.std', (['g1'], {}), '(g1)\n', (3769, 3773), True, 'import numpy as np\n'), ((3829, 3840), 'numpy.mean', 'np.mean', (['g2'], {}), '(g2)\n', (3836, 3840), True, 'import numpy as np\n'), ((3842, 3852), 'numpy.std', 'np.std', (['g2'], {}), '(g2)\n', (3848, 3852), True, 'import numpy as np\n'), ((4059, 4071), 'numpy.mean', 'np.mean', (['gm1'], {}), '(gm1)\n', (4066, 4071), True, 'import numpy as np\n'), ((4073, 4084), 'numpy.std', 'np.std', (['gm1'], {}), '(gm1)\n', (4079, 4084), True, 'import numpy as np\n'), ((4140, 4152), 'numpy.mean', 'np.mean', (['gm2'], {}), '(gm2)\n', (4147, 4152), True, 'import numpy as np\n'), ((4154, 4165), 'numpy.std', 'np.std', (['gm2'], {}), '(gm2)\n', (4160, 4165), True, 'import numpy as np\n'), ((2695, 2716), 'numpy.array', 'np.array', (['self.intvls'], {}), '(self.intvls)\n', (2703, 2716), True, 'import numpy as np\n')] |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for additional type-checking."""
import collections
import numpy as np
class MatrixShapeOrTypeException(Exception):
"""Exception indicating a non-matrix argument."""
pass
def CheckIsMatrix(arg, shape=None):
return isinstance(arg, np.matrix) and (shape is None or arg.shape == shape)
def RequireMatrixArguments(*shapes):
"""A decorator that ensures arguments are np.matrix objects of given shapes.
Args:
*shapes: A list whose elements are either None or two element tuples (n, m).
There should be one element per argument to the decorated function. The
non-None arguments will be required to be n-by-m numpy.matrix objects.
Returns:
Decorator.
"""
def _CheckArguments(f):
assert len(shapes) == f.func_code.co_argcount
def _Wrapped(*args, **kwargs):
for (arg, shape) in zip(args, shapes):
if shape is not None and not CheckIsMatrix(arg, shape=shape):
raise MatrixShapeOrTypeException(shape)
return f(*args, **kwargs)
return _Wrapped
return _CheckArguments
def MakeNamedVectorClass(name, field_indices):
"""Generate a class for handling vectors with named sub-components.
Returns a class which extends collections.namedtuple so that each
element is a "slice" of a dim-by-1 np.matrix. The field_indices
argument is a list of pairs. The first entry of each pair is the
field name, the second entry is a list of vector indices, e.g.:
FooClass = MakeNamedVectorClass('Foo', [('r', [0]), ('i', [1, 2, 3])])
foo_instance = Foo(r=np.matrix[[1.0]], i=np.matrix[[2.0], [3.0], [4.0]])
Here, the total dimension is 4, and foo_instance.ToVector() will be
np.matrix([[0.0], [1.0], [2.0], [3.0]]).
Args:
name: Name to give the class.
field_indices: List of tuples defining the class as above.
Returns:
Named vector class defined as above.
"""
keys = [key for (key, _) in field_indices]
indices = [index for (_, index) in field_indices]
all_indices = []
for index in indices:
all_indices += index
dim = len(all_indices)
assert set(all_indices) == set(range(dim))
tuple_type = collections.namedtuple(name + 'Repr', keys)
class NamedVector(tuple_type):
"""Class representing a dim-by-1 np.matrix with named slices."""
def __init__(self, *args, **kwargs):
indices_dict = {key: index for (key, index) in field_indices}
for (key, value) in kwargs.iteritems():
if not CheckIsMatrix(value, shape=(len(indices_dict[key]), 1)):
raise MatrixShapeOrTypeException((key, value))
super(NamedVector, self).__init__(*args, **kwargs)
def ToVector(self):
"""Return the dim-by-1 np.matrix combining the named component vectors."""
vector = np.matrix(np.zeros((dim, 1)))
for i, index in enumerate(indices):
vector[index] = self[i]
return vector
@classmethod
@RequireMatrixArguments(None, (dim, 1))
def FromVector(cls, vector):
"""Inverse of ToVector()."""
values = [None for _ in keys]
for i, index in enumerate(indices):
values[i] = vector[index]
return cls(*values)
@classmethod
def GetIndices(cls):
"""Get a namedtuple whose elements are the component indices."""
return tuple_type(*indices)
@classmethod
def GetDim(cls):
return dim
@classmethod
def StepVector(cls, step_sizes):
"""Maps a {field_name: step_size} dict to a vector of step sizes."""
step_vector = np.matrix(np.zeros((cls.GetDim(), 1)))
indices = cls.GetIndices()
for field_name, size in step_sizes.iteritems():
step_vector[getattr(indices, field_name), 0] = size
assert (step_vector > 0.0).all
return step_vector
return NamedVector
def MakeStateClass(name, field_indices):
"""Creates a class for representing system state.
Generates a class for representing the state of a system where some
components of the state lie on manifolds such as SO(3). This
involves constructing two classes. The first is a class that
behaves like a namedtuple with each entry being a component of the
state. The second class behaves like a NamedVector and represents a
tangent vector for this space. The user must make a subclass of
this StateClass returned by this method to handle moving states
along tangent directions and recovering tangent directions from
pairs of states. An example is given below:
class AttitudeState(MakeStateClass(
'AttitudeState, [('omega', range(0, 3)),
('dcm_g2b', range(3, 6))])):
def Increment(self, tangent, step=1.0):
...
def Decrement(self, other_state):
...
state = AttitudeState(omega=np.matrix(np.zeros((3, 1))),
dcm_g2b=np.matrix(np.eye(3)))
tangent = AttitudeState.Tangent(domega=np.matrix([[1.0], [2.0], [3.0]]),
ddcm_g2b=np.matrix([[4.0], [5.0], [6.0]]))
# This is equivalent to np.matrix([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]).
tangent.ToVector()
The structure of the state is given by field_indices which is a list
of pairs (field_name, tangent_indices). The string field_name gives
a name to this component of the state.
The Tangent class is a NamedVector with fields named 'd' +
field_name which are stored in the tangent_indices components of the
vector.
Args:
name: Name of the class to create.
field_indices: List of pairs (field_name, tangent_indices) describing
the structure of the class to create.
Returns:
A new class as described above.
"""
keys = [key for (key, _) in field_indices]
class StateClass(collections.namedtuple(name, keys)):
"""Class representing the state of a system."""
Tangent = MakeNamedVectorClass( # pylint: disable=invalid-name
name + 'Tangent',
[('d' + key, value) for (key, value) in field_indices])
def Increment(self, tangent, step=1.0):
raise NotImplementedError
def Difference(self, other_state):
raise NotImplementedError
return StateClass
def MakeFlatStateClass(name, field_indices):
"""Creates a class for representing system state in R^n.
Generates a class for representing the state of a system where
the Tangent vectors can be defined by element-wise addition
and subtraction of the states.
Args:
name: See MakeStateClass.
field_indices: See MakeStateClass.
Returns:
A new class as described above.
"""
class FlatStateClass(MakeStateClass(name, field_indices)):
"""StateClass representing a state in R^n."""
def Increment(self, tangent, step=1.0):
assert isinstance(tangent, FlatStateClass.Tangent)
return FlatStateClass(
*[value + step * tangent_value
for (value, tangent_value) in zip(self, tangent)])
def Difference(self, other_state):
return FlatStateClass.Tangent(
*[other_value - value
for (other_value, value) in zip(other_state, self)])
return FlatStateClass
| [
"numpy.zeros",
"collections.namedtuple"
] | [((2728, 2771), 'collections.namedtuple', 'collections.namedtuple', (["(name + 'Repr')", 'keys'], {}), "(name + 'Repr', keys)\n", (2750, 2771), False, 'import collections\n'), ((6259, 6293), 'collections.namedtuple', 'collections.namedtuple', (['name', 'keys'], {}), '(name, keys)\n', (6281, 6293), False, 'import collections\n'), ((3348, 3366), 'numpy.zeros', 'np.zeros', (['(dim, 1)'], {}), '((dim, 1))\n', (3356, 3366), True, 'import numpy as np\n')] |
import os
import h5py
import time
import numpy as np
from pathlib import Path
from sklearn.model_selection import train_test_split
import json
import argparse
def read_json(path):
with open(path) as json_data:
return json.load(json_data)
# Add arguments to parser
parser = argparse.ArgumentParser(description='Generate MLM entities')
parser.add_argument('--dataset', default='MLM_v1_eu', type=str,
choices=['MLM_v1', 'MLM_v1_sample', 'MLM_v1_eu', 'MLM_v2'], help='dataset')
args = parser.parse_args()
# define paths
ROOT_PATH = Path(os.path.dirname(__file__)).parent.parent
train = h5py.File(os.path.join(ROOT_PATH, f'dataset/{args.dataset}/train.h5'), 'a')
test = h5py.File(os.path.join(ROOT_PATH, f'dataset/{args.dataset}/test.h5'), 'a')
val = h5py.File(os.path.join(ROOT_PATH, f'dataset/{args.dataset}/val.h5'), 'a')
all_hdf5 = {
'train': [train, read_json(ROOT_PATH.parent / 'clusters_train')],
'val': [val, read_json(ROOT_PATH.parent / 'clusters_val')],
'test': [test, read_json(ROOT_PATH.parent / 'clusters_test')]
}
all_ids = [('train', train['ids']), ('val', val['ids']), ('test', test['ids'])]
tic = time.perf_counter()
for part, ids in all_ids:
# read hdf5 and clusters
h5f = all_hdf5[part][0]
clusters = all_hdf5[part][1]
for i, id in enumerate(ids):
onehot = h5f[f'{id}_onehot'][()]
cell = str(np.argmax(onehot))
cluster = set(clusters[cell]).copy() # get cluster
cluster.remove(id) # remove id from cluster
if f'{id}_cluster' not in h5f:
h5f.create_dataset(name=f'{id}_cluster', data=np.array(list(cluster), dtype=np.int), compression="gzip", compression_opts=9)
toc = time.perf_counter()
print(f'====> Finished id {id} -- {((i + 1) / len(ids)) * 100:.2f}% -- {toc - tic:0.2f}s -- {part}')
# close hdf5 files
h5f.close()
| [
"json.load",
"argparse.ArgumentParser",
"numpy.argmax",
"os.path.dirname",
"time.perf_counter",
"os.path.join"
] | [((287, 347), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate MLM entities"""'}), "(description='Generate MLM entities')\n", (310, 347), False, 'import argparse\n'), ((1164, 1183), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1181, 1183), False, 'import time\n'), ((632, 691), 'os.path.join', 'os.path.join', (['ROOT_PATH', 'f"""dataset/{args.dataset}/train.h5"""'], {}), "(ROOT_PATH, f'dataset/{args.dataset}/train.h5')\n", (644, 691), False, 'import os\n'), ((715, 773), 'os.path.join', 'os.path.join', (['ROOT_PATH', 'f"""dataset/{args.dataset}/test.h5"""'], {}), "(ROOT_PATH, f'dataset/{args.dataset}/test.h5')\n", (727, 773), False, 'import os\n'), ((796, 853), 'os.path.join', 'os.path.join', (['ROOT_PATH', 'f"""dataset/{args.dataset}/val.h5"""'], {}), "(ROOT_PATH, f'dataset/{args.dataset}/val.h5')\n", (808, 853), False, 'import os\n'), ((230, 250), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (239, 250), False, 'import json\n'), ((1714, 1733), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1731, 1733), False, 'import time\n'), ((572, 597), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (587, 597), False, 'import os\n'), ((1393, 1410), 'numpy.argmax', 'np.argmax', (['onehot'], {}), '(onehot)\n', (1402, 1410), True, 'import numpy as np\n')] |
#!/home/wanghongwei/anaconda3/envs/tf114/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
def degree_compute(image, joints):
base_vec = joints[0] - joints[5]
l_edge_vec = joints[1] - joints[5]
r_edge_vec = joints[2] - joints[5]
arrow_vec = joints[4] - joints[5]
base_len = np.sqrt(base_vec[0]**2 + base_vec[1]**2)
l_edge_len = np.sqrt(l_edge_vec[0]**2 + l_edge_vec[1]**2)
r_edge_len = np.sqrt(r_edge_vec[0]**2 + r_edge_vec[1]**2)
arrow_len = np.sqrt(arrow_vec[0]**2 + arrow_vec[1]**2)
cos_theta0 = np.dot(base_vec, l_edge_vec) / (base_len * l_edge_len)
cos_theta1 = np.dot(base_vec, r_edge_vec) / (base_len * r_edge_len)
cos_theta2 = np.dot(base_vec, arrow_vec) / (base_len * arrow_len)
cos_theta3 = np.dot(arrow_vec, l_edge_vec) / (arrow_len * l_edge_len)
# cos_theta4 = np.dot(arrow_vec, r_edge_vec) / (arrow_len * r_edge_len)
degree_flag = 5
if cos_theta2 >= cos_theta0: # theta2 <= theta0
if cos_theta3 < cos_theta0: # theta3 > theta0
cv2.putText(image, 'high pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
degree_flag = 4
else:
if cos_theta2 == cos_theta0:
cv2.putText(image, 'low pressure attention', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
degree_flag = 1
else:
cv2.putText(image, 'low pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
degree_flag = 0
elif cos_theta2 < cos_theta0 and cos_theta2 >= cos_theta1:
if cos_theta3 > cos_theta2:
if cos_theta2 - cos_theta1 < 1e-1:
cv2.putText(image, 'high pressure attention', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 10, 255), 2)
degree_flag = 3
elif cos_theta0 - cos_theta2 < 1e-1:
cv2.putText(image, 'low pressure attention', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
degree_flag = 1
else:
cv2.putText(image, 'OK', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
degree_flag = 2
else:
cv2.putText(image, 'high pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
degree_flag = 4
else:
cv2.putText(image, 'high pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
degree_flag = 4
return degree_flag
def write_image(Full_Img, degree_flag, path_list):
# cv2.imwrite('')
pass
| [
"numpy.dot",
"cv2.putText",
"numpy.sqrt"
] | [((312, 356), 'numpy.sqrt', 'np.sqrt', (['(base_vec[0] ** 2 + base_vec[1] ** 2)'], {}), '(base_vec[0] ** 2 + base_vec[1] ** 2)\n', (319, 356), True, 'import numpy as np\n'), ((370, 418), 'numpy.sqrt', 'np.sqrt', (['(l_edge_vec[0] ** 2 + l_edge_vec[1] ** 2)'], {}), '(l_edge_vec[0] ** 2 + l_edge_vec[1] ** 2)\n', (377, 418), True, 'import numpy as np\n'), ((432, 480), 'numpy.sqrt', 'np.sqrt', (['(r_edge_vec[0] ** 2 + r_edge_vec[1] ** 2)'], {}), '(r_edge_vec[0] ** 2 + r_edge_vec[1] ** 2)\n', (439, 480), True, 'import numpy as np\n'), ((493, 539), 'numpy.sqrt', 'np.sqrt', (['(arrow_vec[0] ** 2 + arrow_vec[1] ** 2)'], {}), '(arrow_vec[0] ** 2 + arrow_vec[1] ** 2)\n', (500, 539), True, 'import numpy as np\n'), ((553, 581), 'numpy.dot', 'np.dot', (['base_vec', 'l_edge_vec'], {}), '(base_vec, l_edge_vec)\n', (559, 581), True, 'import numpy as np\n'), ((625, 653), 'numpy.dot', 'np.dot', (['base_vec', 'r_edge_vec'], {}), '(base_vec, r_edge_vec)\n', (631, 653), True, 'import numpy as np\n'), ((697, 724), 'numpy.dot', 'np.dot', (['base_vec', 'arrow_vec'], {}), '(base_vec, arrow_vec)\n', (703, 724), True, 'import numpy as np\n'), ((767, 796), 'numpy.dot', 'np.dot', (['arrow_vec', 'l_edge_vec'], {}), '(arrow_vec, l_edge_vec)\n', (773, 796), True, 'import numpy as np\n'), ((1040, 1134), 'cv2.putText', 'cv2.putText', (['image', '"""high pressure"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), "(image, 'high pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 0, 0, 255), 2)\n", (1051, 1134), False, 'import cv2\n'), ((2317, 2411), 'cv2.putText', 'cv2.putText', (['image', '"""high pressure"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), "(image, 'high pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 0, 0, 255), 2)\n", (2328, 2411), False, 'import cv2\n'), ((1229, 1332), 'cv2.putText', 'cv2.putText', (['image', '"""low pressure attention"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), "(image, 'low pressure attention', (0, 20), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n", (1240, 1332), False, 'import cv2\n'), ((1394, 1487), 'cv2.putText', 'cv2.putText', (['image', '"""low pressure"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 0, 0)', '(2)'], {}), "(image, 'low pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 0, 0), 2)\n", (1405, 1487), False, 'import cv2\n'), ((2181, 2275), 'cv2.putText', 'cv2.putText', (['image', '"""high pressure"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), "(image, 'high pressure', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 0, 0, 255), 2)\n", (2192, 2275), False, 'import cv2\n'), ((1677, 1784), 'cv2.putText', 'cv2.putText', (['image', '"""high pressure attention"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 10, 255)', '(2)'], {}), "(image, 'high pressure attention', (0, 20), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (255, 10, 255), 2)\n", (1688, 1784), False, 'import cv2\n'), ((1877, 1982), 'cv2.putText', 'cv2.putText', (['image', '"""low pressure attention"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 255)', '(2)'], {}), "(image, 'low pressure attention', (0, 20), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n", (1888, 1982), False, 'import cv2\n'), ((2044, 2122), 'cv2.putText', 'cv2.putText', (['image', '"""OK"""', '(0, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 0)', '(2)'], {}), "(image, 'OK', (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\n", (2055, 2122), False, 'import cv2\n')] |
from colour import Color
from mobject.mobject import Mobject
from pytest import approx
from unittest.mock import call
from unittest.mock import create_autospec
import camera.camera
import constants as const
import inspect
import mobject.mobject
import numpy as np
import os
import pytest
SEED = 386735
np.random.seed(SEED)
def get_random_mobject(num_points=10, depth=False):
m = Mobject()
m.points = np.random.rand(num_points, 3)
if not depth:
m.points[:, 2] = 0
return m
def test_init():
m = Mobject()
# test default instance variables
default_config = Mobject.CONFIG
assert m.color == Color(default_config["color"])
assert m.name == m.__class__.__name__
assert m.dim == default_config["dim"]
assert m.target == default_config["target"]
assert m.submobjects == []
# All submobjects must be of type Mobject
with pytest.raises(Exception):
m = Mobject(5)
def test_str():
m = Mobject()
assert str(m) == "Mobject"
def test_reset_points():
m = Mobject()
m.reset_points()
assert np.all(m.points == np.zeros((0, m.dim)))
def test_add():
m = Mobject()
s1 = Mobject()
s2 = Mobject()
# All submobjects must be of type Mobject
with pytest.raises(Exception):
m.add(5)
# Mobject cannot contain self
with pytest.raises(Exception):
m.add(m)
m.add(s1)
assert m.submobjects == [s1]
# Mobject.submobjects cannot contain duplicates
m.add(s1)
assert m.submobjects == [s1]
# Newly added Mobjects become the last elements of Mobject.submobjects
m.add(s2)
assert m.submobjects == [s1, s2]
# Repeated additions move mobjects to the end
m.add(s1)
assert m.submobjects == [s2, s1]
def test_add_to_back():
s1 = Mobject()
s2 = Mobject()
s3 = Mobject()
m = Mobject(s1, s2, s3)
# All submobjects must be of type Mobject
with pytest.raises(Exception):
m.add_to_back(5)
# Mobject cannot contain self
with pytest.raises(Exception):
m.add_to_back(m)
# Newly added Mobjects become the first elements of Mobject.submobjects
s4 = Mobject()
m.add_to_back(s4)
assert m.submobjects == [s4, s1, s2, s3]
m.add_to_back(s1, s2)
assert m.submobjects == [s1, s2, s4, s3]
def test_remove():
s1 = Mobject()
s2 = Mobject()
s3 = Mobject()
m = Mobject(s1, s2, s3)
m.remove(s2)
assert m.submobjects == [s1, s3]
m.remove(s1, s3)
assert m.submobjects == []
m.remove(s1)
assert m.submobjects == []
def test_get_array_attrs():
assert Mobject().get_array_attrs() == ["points"]
def test_digest_mobject_attrs():
m = Mobject()
a = Mobject()
m.attr = a
m.digest_mobject_attrs()
assert m.submobjects == [a]
m.attr = m
with pytest.raises(Exception):
m.digest_mobject_attrs()
def test_apply_over_attr_arrays():
m = Mobject()
for attr in m.get_array_attrs():
setattr(m, attr, np.zeros((3, 3)))
m.apply_over_attr_arrays(lambda x: x + 1)
for attr in m.get_array_attrs():
assert getattr(m, attr) == approx(np.ones((3, 3)))
def test_get_image(mocker):
mock_get_image = mocker.patch.object(camera.camera.Camera, "get_image")
mock_capture_mobject = mocker.patch.object(
camera.camera.Camera,
"capture_mobject",
)
m = Mobject()
m.get_image()
mock_capture_mobject.assert_called_once_with(m)
mock_get_image.assert_called_once_with()
def test_show(mocker):
mocker.patch.object(mobject.mobject.Mobject, "get_image")
m = Mobject()
m.show()
expected = call(camera=None).show().call_list()
assert m.get_image.mock_calls == expected
def test_save_image(mocker, monkeypatch):
m = Mobject()
mock_animations_dir = "test_dir"
mock_path = os.path.join(mock_animations_dir, f"{str(m)}.png")
mocker.patch.object(mobject.mobject.Mobject, "get_image")
monkeypatch.setattr(mobject.mobject, "ANIMATIONS_DIR", mock_animations_dir)
mocker.spy(os.path, "join")
m.save_image()
os.path.join.assert_called_once_with(mock_animations_dir, f"{str(m)}.png")
expected_get_image_calls = call().save(mock_path).call_list()
assert m.get_image.mock_calls == expected_get_image_calls
def test_copy():
m1 = Mobject()
for attr in m1.get_array_attrs():
setattr(m1, attr, np.zeros((3, 3)))
m1.submobjects = [Mobject()]
m1.mob_attr = m1.submobjects[0]
m1.submobjects[0].points = np.zeros((3, 3))
m1.arr = [0]
m2 = m1.copy()
for attr in m2.get_array_attrs():
setattr(m2, attr, np.ones((3, 3)))
m2.add(Mobject())
m2.mob_attr = None
m2.arr[0] = 1
# mobjects attributes, (nd)array attributes, and
# submobjects should not be shared
assert np.allclose(m1.points, np.zeros((3, 3)))
assert len(m1.submobjects) == 1
assert m1.mob_attr is not None
# other attributes are shared
assert m1.arr[0] == 1
def test_deepcopy():
m1 = Mobject()
for attr in m1.get_array_attrs():
setattr(m1, attr, np.zeros((3, 3)))
m1.submobjects = [Mobject()]
m1.mob_attr = m1.submobjects[0]
m1.submobjects[0].points = np.zeros((3, 3))
m1.arr = [0]
m2 = m1.deepcopy()
for attr in m2.get_array_attrs():
setattr(m2, attr, np.ones((3, 3)))
m2.add(Mobject())
m2.mob_attr = None
m2.arr[0] = 1
# no attributes are shared
assert np.allclose(m1.points, np.zeros((3, 3)))
assert len(m1.submobjects) == 1
assert m1.mob_attr is not None
assert m1.arr[0] == 0
def test_generate_target():
m1 = Mobject()
for attr in m1.get_array_attrs():
setattr(m1, attr, np.zeros((3, 3)))
m1.submobjects = [Mobject()]
m1.mob_attr = m1.submobjects[0]
m1.submobjects[0].points = np.zeros((3, 3))
m1.arr = [0]
# test shallow copy
m1.generate_target()
for attr in m1.target.get_array_attrs():
setattr(m1.target, attr, np.ones((3, 3)))
m1.target.add(Mobject())
m1.target.mob_attr = None
m1.target.arr[0] = 1
assert np.allclose(m1.points, np.zeros((3, 3)))
assert len(m1.submobjects) == 1
assert m1.mob_attr is not None
assert m1.arr[0] == 1
# test deep copy
m1.generate_target(use_deepcopy=True)
m1.target = m1.deepcopy()
for attr in m1.target.get_array_attrs():
setattr(m1.target, attr, np.full((3, 3), 2))
m1.target.add(Mobject())
m1.target.mob_attr = None
m1.target.arr[0] = 2
assert np.allclose(m1.points, np.zeros((3, 3)))
assert len(m1.submobjects) == 1
assert m1.mob_attr is not None
assert m1.arr[0] == 1
def test_update(monkeypatch):
# patch get_num_args since it always returns 2 on Mocks
mock_updater_1 = create_autospec(lambda x: None)
mock_updater_2 = create_autospec(lambda x, y: None)
mock_updater_3 = create_autospec(lambda x, y, z: None)
def mock_get_num_args(func):
if func == mock_updater_1:
return 1
elif func == mock_updater_2:
return 2
elif func == mock_updater_3:
return 3
else:
raise Exception()
monkeypatch.setattr(mobject.mobject, "get_num_args", mock_get_num_args)
m = Mobject()
m.add_updater(mock_updater_1, call_updater=False)
m.add_updater(mock_updater_2, call_updater=False)
assert len(m.get_updaters()) == 2
assert len(m.get_time_based_updaters()) == 1
m.update(1)
mock_updater_1.assert_called_once_with(m)
mock_updater_2.assert_called_once_with(m, 1)
mock_updater_1.reset_mock()
mock_updater_2.reset_mock()
m.remove_updater(mock_updater_2)
assert len(m.get_updaters()) == 1
assert len(m.get_time_based_updaters()) == 0
m.update(1)
mock_updater_1.assert_called_once_with(m)
mock_updater_2.assert_not_called()
mock_updater_1.reset_mock()
mock_updater_2.reset_mock()
m.add_updater(mock_updater_2, call_updater=False)
m.add_updater(mock_updater_3, call_updater=False)
assert len(m.get_updaters()) == 3
assert len(m.get_time_based_updaters()) == 1
with pytest.raises(Exception):
m.update(1)
mock_updater_1.reset_mock()
mock_updater_2.reset_mock()
mock_updater_3.reset_mock()
m.clear_updaters()
assert len(m.get_updaters()) == 0
assert len(m.get_time_based_updaters()) == 0
def test_apply_to_family():
submob1 = Mobject()
submob1.points = np.zeros((3, 3))
submob2 = Mobject()
submob2.points = np.zeros((3, 3))
m = Mobject(submob1, submob2)
def func(mob):
mob.points = np.ones((3, 3))
m.apply_to_family(func)
for mob in m.family_members_with_points():
assert mob.points == approx(np.ones((3, 3)))
def test_shift():
submob = Mobject()
m = Mobject(submob)
mob_points = np.random.rand(10, 3)
submob_points = np.random.rand(5, 3)
m.points = mob_points
submob.points = submob_points
m.shift(2 * const.RIGHT - 1 * const.UP)
assert m.points == approx(mob_points + np.array([2, -1, 0]))
assert submob.points == approx(submob_points + np.array([2, -1, 0]))
def test_scale(mocker):
mocker.patch.object(
mobject.mobject.Mobject,
"apply_points_function_about_point",
autospec=True,
)
m = Mobject()
m.scale(3)
m.apply_points_function_about_point.assert_called_once()
args, kwargs = m.apply_points_function_about_point.call_args
assert args[0] is m
assert callable(args[1])
assert (
inspect.getsource(args[1]).strip() ==
"lambda points: scale_factor * points, **kwargs"
)
assert kwargs == {}
def test_rotate_about_origin(mocker):
mocker.patch.object(mobject.mobject.Mobject, "rotate")
m = Mobject()
angle = 3 * const.PI / 4
m.rotate_about_origin(angle)
m.rotate.assert_called_once_with(
angle,
const.OUT,
about_point=const.ORIGIN,
)
def test_rotate(mocker):
mock_rotation_matrix = mocker.patch("mobject.mobject.rotation_matrix")
mocker.patch.object(
mobject.mobject.Mobject,
"apply_points_function_about_point",
autospec=True,
)
m = Mobject()
angle = 3 * const.PI / 4
m.rotate(angle)
mock_rotation_matrix.assert_called_once_with(angle, const.OUT)
m.apply_points_function_about_point.assert_called_once()
args, kwargs = m.apply_points_function_about_point.call_args
assert args[0] is m
assert callable(args[1])
assert (
inspect.getsource(args[1]).strip().replace("\n", " ") ==
"lambda points: np.dot(points, rot_matrix.T),"
)
assert kwargs == {}
def test_flip(mocker):
mocker.spy(mobject.mobject.Mobject, "rotate")
m = Mobject()
m.points = np.array([
[1, 1, 0],
[-1, -1, 0],
[2, 2, 0],
[-2, -2, 0],
])
m.flip()
m.rotate.assert_called_once_with(m, const.TAU / 2, const.UP)
expected = np.array([[-1, 1, 0],
[1, -1, 0],
[-2, 2, 0],
[2, -2, 0]])
assert(np.allclose(m.points, expected))
def test_stretch(mocker):
mocker.spy(
mobject.mobject.Mobject, "apply_points_function_about_point"
)
m = Mobject()
m.points = np.array([
[0, 1, 0],
[0, 0, 0],
[0, -1, 0],
])
m.stretch(3, 1)
m.apply_points_function_about_point.assert_called_once()
args, kwargs = m.apply_points_function_about_point.call_args
assert args[0] is m
assert callable(args[1])
assert (
inspect.getsource(args[1]).strip() ==
"def func(points):\n"
" points[:, dim] *= factor\n"
" return points"
)
assert kwargs == {}
assert(np.allclose(m.points, [[0, 3, 0],
[0, 0, 0],
[0, -3, 0]]))
def test_apply_function(mocker):
mocker.patch.object(
mobject.mobject.Mobject,
"apply_points_function_about_point",
autospec=True,
)
mock_func = mocker.Mock()
m = Mobject()
m.apply_function(mock_func)
m.apply_points_function_about_point.assert_called_once()
args, kwargs = m.apply_points_function_about_point.call_args
assert args[0] is m
assert callable(args[1])
assert (
inspect.getsource(args[1]).strip() ==
"lambda points: np.apply_along_axis(function, 1, points),"
)
assert kwargs == {"about_point": const.ORIGIN}
def test_apply_function_to_position(mocker):
mock_get_center_return = np.random.randint(1000)
mocker.patch.object(
mobject.mobject.Mobject,
'get_center',
autospec=True,
return_value=mock_get_center_return,
)
mock_func_return = np.random.randint(1000)
mock_func = mocker.Mock(return_value=mock_func_return)
mocker.patch.object(mobject.mobject.Mobject, 'move_to', autospec=True)
m = Mobject()
m.apply_function_to_position(mock_func)
m.get_center.assert_called_once_with(m)
mock_func.assert_called_once_with(mock_get_center_return)
m.move_to.assert_called_once_with(m, mock_func_return)
def test_apply_function_to_submobject_positions(mocker):
s1 = Mobject()
s2 = Mobject()
s3 = Mobject()
m = Mobject(s1, s2, s3)
mock_func = mocker.Mock()
mocker.patch.object(mobject.mobject.Mobject, 'apply_function_to_position')
m.apply_function_to_submobject_positions(mock_func)
for submob in m.submobjects:
submob.apply_function_to_position.assert_called_with(mock_func)
def test_apply_matrix():
points = np.random.rand(10, 3)
matrix = np.random.rand(3, 3)
m = Mobject()
m.points = points.copy()
m.apply_matrix(matrix)
expected = points.copy()
expected -= const.ORIGIN
expected = np.dot(points, matrix.T)
expected += const.ORIGIN
assert(np.allclose(m.points, expected))
m.points = points
about_point = np.random.rand(1, 3)
expected = points.copy()
expected -= about_point
expected = np.dot(expected, matrix.T)
expected += about_point
m.apply_matrix(matrix, about_point=about_point)
assert(np.allclose(m.points, expected))
def test_apply_complex_function(mocker):
mocker.patch.object(
mobject.mobject.Mobject,
'apply_function',
autospec=True,
)
mock_func = mocker.Mock()
m = Mobject()
m.apply_complex_function(mock_func)
m.apply_function.assert_called_once()
args, kwargs = m.apply_function.call_args
assert(len(args) == 2)
assert(args[0] is m)
assert (
inspect.getsource(args[1]).strip() ==
"lambda x_y_z: complex_to_R3(function(complex(x_y_z[0], x_y_z[1]))),"
)
assert(kwargs == {})
# this is a rather odd function. the current contract is simply that wag() will
# do what is does in this version (0c3e1308cd40e12f795e0f8e753acca02874c2b3).
def test_wag():
points = np.random.rand(10, 3)
m = Mobject()
m.points = points.copy()
m.wag()
expected = points.copy()
alphas = np.dot(expected, np.transpose(const.DOWN))
alphas -= min(alphas)
alphas /= max(alphas)
# alphas = alphas**wag_factor
expected += np.dot(
alphas.reshape((len(alphas), 1)),
np.array(const.RIGHT).reshape((1, m.dim))
)
assert(np.allclose(m.points, expected))
def test_reverse_points():
points = np.random.rand(10, 3)
m = Mobject()
m.points = points.copy()
m.reverse_points()
expected = points.copy()
assert(np.allclose(m.points, np.flip(expected, axis=0)))
def test_repeat():
m_points = np.random.rand(10, 3)
s_points = np.random.rand(10, 3)
s = Mobject()
s.points = s_points.copy()
m = Mobject(s)
m.points = m_points.copy()
m.repeat(3)
m_points_expected = np.tile(m_points, (3, 1))
s_points_expected = np.tile(s_points, (3, 1))
assert np.allclose(m.points, m_points_expected)
assert np.allclose(s.points, s_points_expected)
def test_apply_points_function_about_point(mocker):
mocker.patch.object(
mobject.mobject.Mobject,
'get_critical_point',
return_value=const.ORIGIN,
)
m_points = np.random.rand(10, 3)
func_return_points = np.random.rand(10, 3)
mock_func = mocker.Mock(return_value=func_return_points)
m = Mobject()
m.points = m_points.copy()
m.apply_points_function_about_point(mock_func, about_edge=const.ORIGIN)
m.get_critical_point.assert_called_once()
args, kwargs = m.get_critical_point.call_args
assert(np.allclose(args, const.ORIGIN) and kwargs == {})
mock_func.assert_called_once()
args, kwargs = mock_func.call_args
assert(np.allclose(args, m_points) and kwargs == {})
# deprecated methods
# def rotate_in_place():
# def scale_in_place():
# def scale_about_point():
def test_pose_at_angle(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'rotate', autospec=True),
m = Mobject()
m.pose_at_angle()
m.rotate.assert_called_once()
args, kwargs = m.rotate.call_args
assert args[0] == m
assert args[1] == const.TAU / 14
assert np.allclose(args[2], const.RIGHT + const.UP)
def test_center(mocker):
MOCK_CENTER = 5
mocker.patch.object(mobject.mobject.Mobject, 'shift')
mocker.patch.object(
mobject.mobject.Mobject,
'get_center',
return_value=MOCK_CENTER,
)
m = Mobject()
m.center()
m.shift.assert_called_once_with(-MOCK_CENTER)
def test_align_on_border(mocker):
mock_dir = np.random.rand(3)
mock_point_to_align = np.random.rand(3)
mock_buff = np.random.rand(3)
mock_offset = np.random.rand(3)
mocker.patch.object(mobject.mobject.Mobject, 'get_critical_point', return_value=mock_point_to_align)
mocker.patch.object(mobject.mobject.Mobject, 'shift')
m = Mobject()
m.points = np.random.rand(10, 3)
m.align_on_border(mock_dir, buff=mock_buff, initial_offset=mock_offset)
mock_target_point = \
np.sign(mock_dir) * (const.FRAME_X_RADIUS, const.FRAME_Y_RADIUS, 0)
mock_shift_val = mock_target_point - mock_point_to_align - mock_buff * np.array(mock_dir)
mock_shift_val = mock_shift_val * abs(np.sign(mock_dir))
m.get_critical_point.assert_called()
args, kwargs = m.get_critical_point.call_args
assert np.allclose(args[0], mock_dir)
assert kwargs == {}
m.shift.assert_called()
args, kwargs = m.shift.call_args
assert np.allclose(args[0], mock_shift_val + mock_offset)
assert kwargs == {}
def test_to_corner(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'align_on_border')
mock_corner = np.random.rand(1, 3)
mock_buff = np.random.rand(1, 3)
mock_offset = np.random.rand(1, 3)
m = Mobject()
m.to_corner(mock_corner, buff=mock_buff, initial_offset=mock_offset)
m.align_on_border.assert_called_once()
args, kwargs = m.align_on_border.call_args
assert np.allclose(args[0], mock_corner)
assert np.allclose(args[1], mock_buff)
assert np.allclose(args[2], mock_offset)
assert kwargs == {}
def test_to_edge(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'align_on_border')
mock_edge = np.random.rand(1, 3)
mock_buff = np.random.rand(1, 3)
mock_offset = np.random.rand(1, 3)
m = Mobject()
m.to_edge(mock_edge, buff=mock_buff, initial_offset=mock_offset)
m.align_on_border.assert_called_once()
args, kwargs = m.align_on_border.call_args
assert np.allclose(args[0], mock_edge)
assert np.allclose(args[1], mock_buff)
assert np.allclose(args[2], mock_offset)
assert kwargs == {}
def test_next_to(mocker):
mock_point_to_align=np.random.rand(1, 3)
mocker.patch.object(mobject.mobject.Mobject, 'get_critical_point', return_value=mock_point_to_align)
mocker.patch.object(mobject.mobject.Mobject, 'shift')
mock_mobject_or_point = np.random.rand(1, 3)
mock_direction=np.random.rand(1, 3)
mock_buff=np.random.rand(1, 3)
mock_aligned_edge=np.random.rand(1, 3)
m = Mobject()
m.next_to(
mock_mobject_or_point,
mock_direction,
mock_buff,
mock_aligned_edge,
)
m.shift.assert_called_once()
args, kwargs = m.shift.call_args
assert np.allclose(args[0], (mock_mobject_or_point - mock_point_to_align) +
mock_buff * mock_direction)
def test_align_to():
m1_points = np.random.rand(1, 3)
m1 = Mobject()
m1.points = m1_points
m2_points = np.random.rand(1, 3)
m2 = Mobject()
m2.points = m2_points
m1.align_to(m2)
assert m1.get_critical_point(const.UP)[1] == m2.get_critical_point(const.UP)[1]
def test_shift_onto_screen():
m = Mobject()
m.points = np.array([[const.FRAME_X_RADIUS + 1, const.FRAME_Y_RADIUS + 1, 0]])
m.shift_onto_screen()
assert np.dot(m.get_top(), const.UP) == const.FRAME_Y_RADIUS - const.DEFAULT_MOBJECT_TO_EDGE_BUFFER
assert np.dot(m.get_right(), const.RIGHT) == const.FRAME_X_RADIUS - const.DEFAULT_MOBJECT_TO_EDGE_BUFFER
def test_is_off_screen():
m = Mobject()
m.points = np.array([[3, 4, 0]])
assert not m.is_off_screen()
m.points = np.array([[0, const.FRAME_Y_RADIUS + 1, 0]])
m.points = np.array([[const.FRAME_X_RADIUS + 1, 0, 0]])
assert m.is_off_screen()
def test_stretch_about_point(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'stretch')
point = np.random.rand(1, 3)
m = Mobject()
m.stretch_about_point(3, 1, point)
m.stretch.assert_called_once()
args, kwargs = m.stretch.call_args
assert args[0] == 3
assert args[1] == 1
assert np.allclose(kwargs["about_point"], point)
# def stretch_in_place():
def test_rescale_to_fit(mocker):
mocker.spy(mobject.mobject.Mobject, 'length_over_dim')
mocker.patch.object(mobject.mobject.Mobject, 'stretch')
mocker.patch.object(mobject.mobject.Mobject, 'scale')
mock_dim = 1
mock_length = 3
points = np.random.rand(10, 3)
m = Mobject()
m.points = points
m.rescale_to_fit(mock_length, mock_dim, stretch=True)
m.length_over_dim.assert_called_once()
assert m.length_over_dim.call_args == call(m, mock_dim)
length = m.length_over_dim(mock_dim)
m.stretch.assert_called_once_with(mock_length / length, mock_dim)
def test_stretch_to_fit_width(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'rescale_to_fit')
mock_width = np.random.randint(10)
m = Mobject()
m.stretch_to_fit_width(mock_width)
m.rescale_to_fit.assert_called_once_with(mock_width, 0, stretch=True)
def test_stretch_to_fit_height(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'rescale_to_fit')
mock_height = np.random.randint(10)
m = Mobject()
m.stretch_to_fit_height(mock_height)
m.rescale_to_fit.assert_called_once_with(mock_height, 1, stretch=True)
def test_stretch_to_fit_depth(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'rescale_to_fit')
mock_depth = np.random.randint(10)
m = Mobject()
m.stretch_to_fit_depth(mock_depth)
m.rescale_to_fit.assert_called_once_with(mock_depth, 2, stretch=True)
def test_set_width(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'rescale_to_fit')
mock_width = np.random.randint(10)
mock_stretch = True
m = Mobject()
m.set_width(mock_width, stretch=mock_stretch)
m.rescale_to_fit.assert_called_once_with(mock_width, 0, stretch=True)
m.rescale_to_fit.reset_mock()
mock_stretch = False
m.set_width(mock_width, mock_stretch)
m.rescale_to_fit.assert_called_once_with(mock_width, 0, stretch=False)
def test_set_height(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'rescale_to_fit')
mock_height = np.random.randint(10)
mock_stretch = True
m = Mobject()
m.set_height(mock_height, stretch=mock_stretch)
m.rescale_to_fit.assert_called_once_with(mock_height, 1, stretch=True)
m.rescale_to_fit.reset_mock()
mock_stretch = False
m.set_height(mock_height, mock_stretch)
m.rescale_to_fit.assert_called_once_with(mock_height, 1, stretch=False)
def test_set_depth(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'rescale_to_fit')
mock_depth = np.random.randint(10)
mock_stretch = True
m = Mobject()
m.set_depth(mock_depth, stretch=mock_stretch)
m.rescale_to_fit.assert_called_once_with(mock_depth, 2, stretch=True)
m.rescale_to_fit.reset_mock()
mock_stretch = False
m.set_depth(mock_depth, mock_stretch)
m.rescale_to_fit.assert_called_once_with(mock_depth, 2, stretch=False)
def test_space_out_submobjects(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'scale')
mock_factor = np.random.randint(10)
s1 = Mobject()
s2 = Mobject()
s3 = Mobject()
m = Mobject(s1, s2, s3)
m.space_out_submobjects(mock_factor)
assert m.scale.called_once_with(mock_factor)
for s in m.submobjects:
assert s.scale.called_once_with(1. / mock_factor)
def test_move_to(mocker):
mock_point_to_align = np.random.rand(1, 3)
mock_coor_mask = np.random.rand(1, 3)
mocker.patch.object(
mobject.mobject.Mobject,
'get_critical_point',
return_value=mock_point_to_align,
)
mocker.patch.object(mobject.mobject.Mobject, 'shift')
m = Mobject()
mock_point_or_mobject = np.random.rand(1, 3)
m.move_to(mock_point_or_mobject, coor_mask=mock_coor_mask)
m.get_critical_point.assert_called_once()
args, kwargs = m.get_critical_point.call_args
assert np.allclose(args[0], const.ORIGIN)
assert kwargs == {}
m.shift.assert_called_once()
args, kwargs = m.shift.call_args
assert np.allclose(
args[0],
(mock_point_or_mobject - mock_point_to_align) * mock_coor_mask,
)
def test_replace():
m1_points = np.random.rand(10, 3)
m1 = Mobject()
m1.points = m1_points.copy()
m2_points = np.random.rand(10, 3)
m2 = Mobject()
m2.points = m2_points.copy()
def get_ratio(mob):
return mob.length_over_dim(0) / mob.length_over_dim(1)
m1_orig_ratio = get_ratio(m1)
assert m1.length_over_dim(0) != m2.length_over_dim(0)
assert(not np.allclose(m1.get_center(), m2.get_center()))
m1.replace(m2)
assert get_ratio(m1) == approx(m1_orig_ratio)
assert m1.length_over_dim(0) == approx(m2.length_over_dim(0))
assert np.allclose(m1.get_center(), m2.get_center())
m1.points = m1_points.copy()
m1.replace(m2, stretch=True)
assert get_ratio(m1) != approx(m1_orig_ratio)
assert m1.length_over_dim(0) == approx(m2.length_over_dim(0))
assert np.allclose(m1.get_center(), m2.get_center())
def test_surround(mocker):
mocker.patch.object(mobject.mobject.Mobject, 'replace')
mocker.patch.object(mobject.mobject.Mobject, 'scale_in_place')
m1 = Mobject()
m2 = Mobject()
m1.surround(m2)
m1.replace.assert_called_once_with(m2, 0, False)
m1.scale_in_place.assert_called_once_with(1.2)
def test_position_endpoints_on(mocker):
mocker.spy(mobject.mobject.Mobject, 'scale')
mocker.spy(mobject.mobject.Mobject, 'rotate')
mocker.spy(mobject.mobject.Mobject, 'shift')
m = get_random_mobject()
mock_start = np.append(np.random.rand(2), 0)
mock_end = np.append(np.random.rand(2), 0)
m.position_endpoints_on(mock_start, mock_end)
assert np.allclose(m.points[0], mock_start)
assert np.allclose(m.points[-1], mock_end)
m.scale.assert_called_once()
m.rotate.assert_called_once()
m.shift.assert_called_once()
# def add_background_rectangle():
# def add_background_rectangle_to_submobjects():
# def add_background_rectangle_to_family_members_with_points():
# def match_color():
# def match_dim():
# def match_width():
# def match_height():
# def match_depth():
# def set_color():
# def set_color_by_gradient():
# def set_colors_by_radial_gradient():
# def set_submobject_colors_by_gradient():
# def set_submobject_colors_by_radial_gradient():
# def to_original_color():
# # used by default for fade()ing
# def fade_to_no_recurse():
# def fade_to():
# def fade_no_recurse():
# def fade():
# def get_color():
# def save_state():
# def restore():
# def reduce_across_dimension():
# # Note, this default means things like empty VGroups
# def nonempty_submobjects():
# def get_merged_array():
# def get_all_points():
# def get_points_defining_boundary():
# def get_num_points():
# def get_critical_point():
# def get_edge_center():
# def get_corner():
# def get_center():
# def get_center_of_mass():
# def get_boundary_point():
# def get_top():
# def get_bottom():
# def get_right():
# def get_left():
# def get_zenith():
# def get_nadir():
# def length_over_dim():
# def get_width():
# def get_height():
# def get_depth():
# def point_from_proportion():
# def __getitem__():
# def __iter__():
# def __len__():
# def get_group_class():
# def split():
# def submobject_family():
# def family_members_with_points():
# def arrange_submobjects():
# def arrange_submobjects_in_grid():
# def sort_submobjects():
# def shuffle_submobjects():
# def print_submobject_family():
# def align_data():
# def get_point_mobject():
# def align_points():
# def align_points_with_larger():
# def align_submobjects():
# def null_point_align():
# def push_self_into_submobjects():
# def add_n_more_submobjects():
# def repeat_submobject():
# def interpolate(self, mobject1, mobject2,
# def interpolate_color():
# def become_partial():
# def pointwise_become_partial():
| [
"unittest.mock.create_autospec",
"numpy.random.seed",
"numpy.allclose",
"numpy.ones",
"numpy.random.randint",
"numpy.tile",
"numpy.full",
"numpy.transpose",
"pytest.raises",
"inspect.getsource",
"mobject.mobject.Mobject",
"numpy.dot",
"pytest.approx",
"colour.Color",
"numpy.flip",
"num... | [((305, 325), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (319, 325), True, 'import numpy as np\n'), ((388, 397), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (395, 397), False, 'from mobject.mobject import Mobject\n'), ((413, 442), 'numpy.random.rand', 'np.random.rand', (['num_points', '(3)'], {}), '(num_points, 3)\n', (427, 442), True, 'import numpy as np\n'), ((528, 537), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (535, 537), False, 'from mobject.mobject import Mobject\n'), ((960, 969), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (967, 969), False, 'from mobject.mobject import Mobject\n'), ((1036, 1045), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (1043, 1045), False, 'from mobject.mobject import Mobject\n'), ((1145, 1154), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (1152, 1154), False, 'from mobject.mobject import Mobject\n'), ((1164, 1173), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (1171, 1173), False, 'from mobject.mobject import Mobject\n'), ((1183, 1192), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (1190, 1192), False, 'from mobject.mobject import Mobject\n'), ((1791, 1800), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (1798, 1800), False, 'from mobject.mobject import Mobject\n'), ((1810, 1819), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (1817, 1819), False, 'from mobject.mobject import Mobject\n'), ((1829, 1838), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (1836, 1838), False, 'from mobject.mobject import Mobject\n'), ((1847, 1866), 'mobject.mobject.Mobject', 'Mobject', (['s1', 's2', 's3'], {}), '(s1, s2, s3)\n', (1854, 1866), False, 'from mobject.mobject import Mobject\n'), ((2155, 2164), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2162, 2164), False, 'from mobject.mobject import Mobject\n'), ((2334, 2343), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2341, 2343), False, 'from mobject.mobject import Mobject\n'), ((2353, 2362), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2360, 2362), False, 'from mobject.mobject import Mobject\n'), ((2372, 2381), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2379, 2381), False, 'from mobject.mobject import Mobject\n'), ((2390, 2409), 'mobject.mobject.Mobject', 'Mobject', (['s1', 's2', 's3'], {}), '(s1, s2, s3)\n', (2397, 2409), False, 'from mobject.mobject import Mobject\n'), ((2693, 2702), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2700, 2702), False, 'from mobject.mobject import Mobject\n'), ((2711, 2720), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2718, 2720), False, 'from mobject.mobject import Mobject\n'), ((2926, 2935), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2933, 2935), False, 'from mobject.mobject import Mobject\n'), ((3383, 3392), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (3390, 3392), False, 'from mobject.mobject import Mobject\n'), ((3603, 3612), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (3610, 3612), False, 'from mobject.mobject import Mobject\n'), ((3776, 3785), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (3783, 3785), False, 'from mobject.mobject import Mobject\n'), ((4319, 4328), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (4326, 4328), False, 'from mobject.mobject import Mobject\n'), ((4511, 4527), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4519, 4527), True, 'import numpy as np\n'), ((5018, 5027), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (5025, 5027), False, 'from mobject.mobject import Mobject\n'), ((5210, 5226), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5218, 5226), True, 'import numpy as np\n'), ((5632, 5641), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (5639, 5641), False, 'from mobject.mobject import Mobject\n'), ((5824, 5840), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5832, 5840), True, 'import numpy as np\n'), ((6774, 6805), 'unittest.mock.create_autospec', 'create_autospec', (['(lambda x: None)'], {}), '(lambda x: None)\n', (6789, 6805), False, 'from unittest.mock import create_autospec\n'), ((6827, 6861), 'unittest.mock.create_autospec', 'create_autospec', (['(lambda x, y: None)'], {}), '(lambda x, y: None)\n', (6842, 6861), False, 'from unittest.mock import create_autospec\n'), ((6883, 6920), 'unittest.mock.create_autospec', 'create_autospec', (['(lambda x, y, z: None)'], {}), '(lambda x, y, z: None)\n', (6898, 6920), False, 'from unittest.mock import create_autospec\n'), ((7257, 7266), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (7264, 7266), False, 'from mobject.mobject import Mobject\n'), ((8429, 8438), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (8436, 8438), False, 'from mobject.mobject import Mobject\n'), ((8460, 8476), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (8468, 8476), True, 'import numpy as np\n'), ((8491, 8500), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (8498, 8500), False, 'from mobject.mobject import Mobject\n'), ((8522, 8538), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (8530, 8538), True, 'import numpy as np\n'), ((8547, 8572), 'mobject.mobject.Mobject', 'Mobject', (['submob1', 'submob2'], {}), '(submob1, submob2)\n', (8554, 8572), False, 'from mobject.mobject import Mobject\n'), ((8792, 8801), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (8799, 8801), False, 'from mobject.mobject import Mobject\n'), ((8810, 8825), 'mobject.mobject.Mobject', 'Mobject', (['submob'], {}), '(submob)\n', (8817, 8825), False, 'from mobject.mobject import Mobject\n'), ((8843, 8864), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (8857, 8864), True, 'import numpy as np\n'), ((8885, 8905), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (8899, 8905), True, 'import numpy as np\n'), ((9314, 9323), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (9321, 9323), False, 'from mobject.mobject import Mobject\n'), ((9771, 9780), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (9778, 9780), False, 'from mobject.mobject import Mobject\n'), ((10197, 10206), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (10204, 10206), False, 'from mobject.mobject import Mobject\n'), ((10748, 10757), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (10755, 10757), False, 'from mobject.mobject import Mobject\n'), ((10773, 10831), 'numpy.array', 'np.array', (['[[1, 1, 0], [-1, -1, 0], [2, 2, 0], [-2, -2, 0]]'], {}), '([[1, 1, 0], [-1, -1, 0], [2, 2, 0], [-2, -2, 0]])\n', (10781, 10831), True, 'import numpy as np\n'), ((10964, 11022), 'numpy.array', 'np.array', (['[[-1, 1, 0], [1, -1, 0], [-2, 2, 0], [2, -2, 0]]'], {}), '([[-1, 1, 0], [1, -1, 0], [-2, 2, 0], [2, -2, 0]])\n', (10972, 11022), True, 'import numpy as np\n'), ((11109, 11140), 'numpy.allclose', 'np.allclose', (['m.points', 'expected'], {}), '(m.points, expected)\n', (11120, 11140), True, 'import numpy as np\n'), ((11269, 11278), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (11276, 11278), False, 'from mobject.mobject import Mobject\n'), ((11294, 11338), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, -1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\n', (11302, 11338), True, 'import numpy as np\n'), ((11784, 11841), 'numpy.allclose', 'np.allclose', (['m.points', '[[0, 3, 0], [0, 0, 0], [0, -3, 0]]'], {}), '(m.points, [[0, 3, 0], [0, 0, 0], [0, -3, 0]])\n', (11795, 11841), True, 'import numpy as np\n'), ((12116, 12125), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (12123, 12125), False, 'from mobject.mobject import Mobject\n'), ((12596, 12619), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (12613, 12619), True, 'import numpy as np\n'), ((12797, 12820), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (12814, 12820), True, 'import numpy as np\n'), ((12963, 12972), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (12970, 12972), False, 'from mobject.mobject import Mobject\n'), ((13251, 13260), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (13258, 13260), False, 'from mobject.mobject import Mobject\n'), ((13270, 13279), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (13277, 13279), False, 'from mobject.mobject import Mobject\n'), ((13289, 13298), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (13296, 13298), False, 'from mobject.mobject import Mobject\n'), ((13307, 13326), 'mobject.mobject.Mobject', 'Mobject', (['s1', 's2', 's3'], {}), '(s1, s2, s3)\n', (13314, 13326), False, 'from mobject.mobject import Mobject\n'), ((13638, 13659), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (13652, 13659), True, 'import numpy as np\n'), ((13673, 13693), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (13687, 13693), True, 'import numpy as np\n'), ((13702, 13711), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (13709, 13711), False, 'from mobject.mobject import Mobject\n'), ((13842, 13866), 'numpy.dot', 'np.dot', (['points', 'matrix.T'], {}), '(points, matrix.T)\n', (13848, 13866), True, 'import numpy as np\n'), ((13907, 13938), 'numpy.allclose', 'np.allclose', (['m.points', 'expected'], {}), '(m.points, expected)\n', (13918, 13938), True, 'import numpy as np\n'), ((13981, 14001), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (13995, 14001), True, 'import numpy as np\n'), ((14075, 14101), 'numpy.dot', 'np.dot', (['expected', 'matrix.T'], {}), '(expected, matrix.T)\n', (14081, 14101), True, 'import numpy as np\n'), ((14194, 14225), 'numpy.allclose', 'np.allclose', (['m.points', 'expected'], {}), '(m.points, expected)\n', (14205, 14225), True, 'import numpy as np\n'), ((14421, 14430), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (14428, 14430), False, 'from mobject.mobject import Mobject\n'), ((14968, 14989), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (14982, 14989), True, 'import numpy as np\n'), ((14998, 15007), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (15005, 15007), False, 'from mobject.mobject import Mobject\n'), ((15354, 15385), 'numpy.allclose', 'np.allclose', (['m.points', 'expected'], {}), '(m.points, expected)\n', (15365, 15385), True, 'import numpy as np\n'), ((15429, 15450), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (15443, 15450), True, 'import numpy as np\n'), ((15459, 15468), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (15466, 15468), False, 'from mobject.mobject import Mobject\n'), ((15648, 15669), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (15662, 15669), True, 'import numpy as np\n'), ((15685, 15706), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (15699, 15706), True, 'import numpy as np\n'), ((15715, 15724), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (15722, 15724), False, 'from mobject.mobject import Mobject\n'), ((15764, 15774), 'mobject.mobject.Mobject', 'Mobject', (['s'], {}), '(s)\n', (15771, 15774), False, 'from mobject.mobject import Mobject\n'), ((15847, 15872), 'numpy.tile', 'np.tile', (['m_points', '(3, 1)'], {}), '(m_points, (3, 1))\n', (15854, 15872), True, 'import numpy as np\n'), ((15897, 15922), 'numpy.tile', 'np.tile', (['s_points', '(3, 1)'], {}), '(s_points, (3, 1))\n', (15904, 15922), True, 'import numpy as np\n'), ((15934, 15974), 'numpy.allclose', 'np.allclose', (['m.points', 'm_points_expected'], {}), '(m.points, m_points_expected)\n', (15945, 15974), True, 'import numpy as np\n'), ((15986, 16026), 'numpy.allclose', 'np.allclose', (['s.points', 's_points_expected'], {}), '(s.points, s_points_expected)\n', (15997, 16026), True, 'import numpy as np\n'), ((16225, 16246), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (16239, 16246), True, 'import numpy as np\n'), ((16272, 16293), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (16286, 16293), True, 'import numpy as np\n'), ((16364, 16373), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (16371, 16373), False, 'from mobject.mobject import Mobject\n'), ((16986, 16995), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (16993, 16995), False, 'from mobject.mobject import Mobject\n'), ((17163, 17207), 'numpy.allclose', 'np.allclose', (['args[2]', '(const.RIGHT + const.UP)'], {}), '(args[2], const.RIGHT + const.UP)\n', (17174, 17207), True, 'import numpy as np\n'), ((17441, 17450), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (17448, 17450), False, 'from mobject.mobject import Mobject\n'), ((17567, 17584), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (17581, 17584), True, 'import numpy as np\n'), ((17611, 17628), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (17625, 17628), True, 'import numpy as np\n'), ((17645, 17662), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (17659, 17662), True, 'import numpy as np\n'), ((17681, 17698), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (17695, 17698), True, 'import numpy as np\n'), ((17871, 17880), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (17878, 17880), False, 'from mobject.mobject import Mobject\n'), ((17896, 17917), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (17910, 17917), True, 'import numpy as np\n'), ((18355, 18385), 'numpy.allclose', 'np.allclose', (['args[0]', 'mock_dir'], {}), '(args[0], mock_dir)\n', (18366, 18385), True, 'import numpy as np\n'), ((18487, 18537), 'numpy.allclose', 'np.allclose', (['args[0]', '(mock_shift_val + mock_offset)'], {}), '(args[0], mock_shift_val + mock_offset)\n', (18498, 18537), True, 'import numpy as np\n'), ((18678, 18698), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (18692, 18698), True, 'import numpy as np\n'), ((18715, 18735), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (18729, 18735), True, 'import numpy as np\n'), ((18754, 18774), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (18768, 18774), True, 'import numpy as np\n'), ((18784, 18793), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (18791, 18793), False, 'from mobject.mobject import Mobject\n'), ((18970, 19003), 'numpy.allclose', 'np.allclose', (['args[0]', 'mock_corner'], {}), '(args[0], mock_corner)\n', (18981, 19003), True, 'import numpy as np\n'), ((19015, 19046), 'numpy.allclose', 'np.allclose', (['args[1]', 'mock_buff'], {}), '(args[1], mock_buff)\n', (19026, 19046), True, 'import numpy as np\n'), ((19058, 19091), 'numpy.allclose', 'np.allclose', (['args[2]', 'mock_offset'], {}), '(args[2], mock_offset)\n', (19069, 19091), True, 'import numpy as np\n'), ((19228, 19248), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (19242, 19248), True, 'import numpy as np\n'), ((19265, 19285), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (19279, 19285), True, 'import numpy as np\n'), ((19304, 19324), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (19318, 19324), True, 'import numpy as np\n'), ((19334, 19343), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (19341, 19343), False, 'from mobject.mobject import Mobject\n'), ((19516, 19547), 'numpy.allclose', 'np.allclose', (['args[0]', 'mock_edge'], {}), '(args[0], mock_edge)\n', (19527, 19547), True, 'import numpy as np\n'), ((19559, 19590), 'numpy.allclose', 'np.allclose', (['args[1]', 'mock_buff'], {}), '(args[1], mock_buff)\n', (19570, 19590), True, 'import numpy as np\n'), ((19602, 19635), 'numpy.allclose', 'np.allclose', (['args[2]', 'mock_offset'], {}), '(args[2], mock_offset)\n', (19613, 19635), True, 'import numpy as np\n'), ((19712, 19732), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (19726, 19732), True, 'import numpy as np\n'), ((19924, 19944), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (19938, 19944), True, 'import numpy as np\n'), ((19965, 19985), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (19979, 19985), True, 'import numpy as np\n'), ((20000, 20020), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (20014, 20020), True, 'import numpy as np\n'), ((20043, 20063), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (20057, 20063), True, 'import numpy as np\n'), ((20073, 20082), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (20080, 20082), False, 'from mobject.mobject import Mobject\n'), ((20286, 20385), 'numpy.allclose', 'np.allclose', (['args[0]', '(mock_mobject_or_point - mock_point_to_align + mock_buff * mock_direction)'], {}), '(args[0], mock_mobject_or_point - mock_point_to_align + \n mock_buff * mock_direction)\n', (20297, 20385), True, 'import numpy as np\n'), ((20445, 20465), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (20459, 20465), True, 'import numpy as np\n'), ((20475, 20484), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (20482, 20484), False, 'from mobject.mobject import Mobject\n'), ((20527, 20547), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (20541, 20547), True, 'import numpy as np\n'), ((20557, 20566), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (20564, 20566), False, 'from mobject.mobject import Mobject\n'), ((20737, 20746), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (20744, 20746), False, 'from mobject.mobject import Mobject\n'), ((20762, 20829), 'numpy.array', 'np.array', (['[[const.FRAME_X_RADIUS + 1, const.FRAME_Y_RADIUS + 1, 0]]'], {}), '([[const.FRAME_X_RADIUS + 1, const.FRAME_Y_RADIUS + 1, 0]])\n', (20770, 20829), True, 'import numpy as np\n'), ((21105, 21114), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (21112, 21114), False, 'from mobject.mobject import Mobject\n'), ((21130, 21151), 'numpy.array', 'np.array', (['[[3, 4, 0]]'], {}), '([[3, 4, 0]])\n', (21138, 21151), True, 'import numpy as np\n'), ((21200, 21244), 'numpy.array', 'np.array', (['[[0, const.FRAME_Y_RADIUS + 1, 0]]'], {}), '([[0, const.FRAME_Y_RADIUS + 1, 0]])\n', (21208, 21244), True, 'import numpy as np\n'), ((21260, 21304), 'numpy.array', 'np.array', (['[[const.FRAME_X_RADIUS + 1, 0, 0]]'], {}), '([[const.FRAME_X_RADIUS + 1, 0, 0]])\n', (21268, 21304), True, 'import numpy as np\n'), ((21446, 21466), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (21460, 21466), True, 'import numpy as np\n'), ((21475, 21484), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (21482, 21484), False, 'from mobject.mobject import Mobject\n'), ((21658, 21699), 'numpy.allclose', 'np.allclose', (["kwargs['about_point']", 'point'], {}), "(kwargs['about_point'], point)\n", (21669, 21699), True, 'import numpy as np\n'), ((21990, 22011), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (22004, 22011), True, 'import numpy as np\n'), ((22020, 22029), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (22027, 22029), False, 'from mobject.mobject import Mobject\n'), ((22450, 22471), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (22467, 22471), True, 'import numpy as np\n'), ((22480, 22489), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (22487, 22489), False, 'from mobject.mobject import Mobject\n'), ((22730, 22751), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (22747, 22751), True, 'import numpy as np\n'), ((22760, 22769), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (22767, 22769), False, 'from mobject.mobject import Mobject\n'), ((23011, 23032), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (23028, 23032), True, 'import numpy as np\n'), ((23041, 23050), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (23048, 23050), False, 'from mobject.mobject import Mobject\n'), ((23278, 23299), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (23295, 23299), True, 'import numpy as np\n'), ((23332, 23341), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (23339, 23341), False, 'from mobject.mobject import Mobject\n'), ((23759, 23780), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (23776, 23780), True, 'import numpy as np\n'), ((23813, 23822), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (23820, 23822), False, 'from mobject.mobject import Mobject\n'), ((24244, 24265), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (24261, 24265), True, 'import numpy as np\n'), ((24298, 24307), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (24305, 24307), False, 'from mobject.mobject import Mobject\n'), ((24727, 24748), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (24744, 24748), True, 'import numpy as np\n'), ((24758, 24767), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (24765, 24767), False, 'from mobject.mobject import Mobject\n'), ((24777, 24786), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (24784, 24786), False, 'from mobject.mobject import Mobject\n'), ((24796, 24805), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (24803, 24805), False, 'from mobject.mobject import Mobject\n'), ((24814, 24833), 'mobject.mobject.Mobject', 'Mobject', (['s1', 's2', 's3'], {}), '(s1, s2, s3)\n', (24821, 24833), False, 'from mobject.mobject import Mobject\n'), ((25064, 25084), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (25078, 25084), True, 'import numpy as np\n'), ((25106, 25126), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (25120, 25126), True, 'import numpy as np\n'), ((25329, 25338), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (25336, 25338), False, 'from mobject.mobject import Mobject\n'), ((25367, 25387), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (25381, 25387), True, 'import numpy as np\n'), ((25559, 25593), 'numpy.allclose', 'np.allclose', (['args[0]', 'const.ORIGIN'], {}), '(args[0], const.ORIGIN)\n', (25570, 25593), True, 'import numpy as np\n'), ((25700, 25788), 'numpy.allclose', 'np.allclose', (['args[0]', '((mock_point_or_mobject - mock_point_to_align) * mock_coor_mask)'], {}), '(args[0], (mock_point_or_mobject - mock_point_to_align) *\n mock_coor_mask)\n', (25711, 25788), True, 'import numpy as np\n'), ((25846, 25867), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (25860, 25867), True, 'import numpy as np\n'), ((25877, 25886), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (25884, 25886), False, 'from mobject.mobject import Mobject\n'), ((25936, 25957), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (25950, 25957), True, 'import numpy as np\n'), ((25967, 25976), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (25974, 25976), False, 'from mobject.mobject import Mobject\n'), ((26850, 26859), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (26857, 26859), False, 'from mobject.mobject import Mobject\n'), ((26869, 26878), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (26876, 26878), False, 'from mobject.mobject import Mobject\n'), ((27381, 27417), 'numpy.allclose', 'np.allclose', (['m.points[0]', 'mock_start'], {}), '(m.points[0], mock_start)\n', (27392, 27417), True, 'import numpy as np\n'), ((27429, 27464), 'numpy.allclose', 'np.allclose', (['m.points[-1]', 'mock_end'], {}), '(m.points[-1], mock_end)\n', (27440, 27464), True, 'import numpy as np\n'), ((635, 665), 'colour.Color', 'Color', (["default_config['color']"], {}), "(default_config['color'])\n", (640, 665), False, 'from colour import Color\n'), ((885, 909), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (898, 909), False, 'import pytest\n'), ((923, 933), 'mobject.mobject.Mobject', 'Mobject', (['(5)'], {}), '(5)\n', (930, 933), False, 'from mobject.mobject import Mobject\n'), ((1249, 1273), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1262, 1273), False, 'import pytest\n'), ((1336, 1360), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1349, 1360), False, 'import pytest\n'), ((1923, 1947), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1936, 1947), False, 'import pytest\n'), ((2018, 2042), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2031, 2042), False, 'import pytest\n'), ((2822, 2846), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2835, 2846), False, 'import pytest\n'), ((4433, 4442), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (4440, 4442), False, 'from mobject.mobject import Mobject\n'), ((4657, 4666), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (4664, 4666), False, 'from mobject.mobject import Mobject\n'), ((4836, 4852), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4844, 4852), True, 'import numpy as np\n'), ((5132, 5141), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (5139, 5141), False, 'from mobject.mobject import Mobject\n'), ((5360, 5369), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (5367, 5369), False, 'from mobject.mobject import Mobject\n'), ((5478, 5494), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5486, 5494), True, 'import numpy as np\n'), ((5746, 5755), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (5753, 5755), False, 'from mobject.mobject import Mobject\n'), ((6021, 6030), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (6028, 6030), False, 'from mobject.mobject import Mobject\n'), ((6121, 6137), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6129, 6137), True, 'import numpy as np\n'), ((6446, 6455), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (6453, 6455), False, 'from mobject.mobject import Mobject\n'), ((6546, 6562), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6554, 6562), True, 'import numpy as np\n'), ((8132, 8156), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8145, 8156), False, 'import pytest\n'), ((8614, 8629), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (8621, 8629), True, 'import numpy as np\n'), ((15109, 15133), 'numpy.transpose', 'np.transpose', (['const.DOWN'], {}), '(const.DOWN)\n', (15121, 15133), True, 'import numpy as np\n'), ((15584, 15609), 'numpy.flip', 'np.flip', (['expected'], {'axis': '(0)'}), '(expected, axis=0)\n', (15591, 15609), True, 'import numpy as np\n'), ((16589, 16620), 'numpy.allclose', 'np.allclose', (['args', 'const.ORIGIN'], {}), '(args, const.ORIGIN)\n', (16600, 16620), True, 'import numpy as np\n'), ((16724, 16751), 'numpy.allclose', 'np.allclose', (['args', 'm_points'], {}), '(args, m_points)\n', (16735, 16751), True, 'import numpy as np\n'), ((18029, 18046), 'numpy.sign', 'np.sign', (['mock_dir'], {}), '(mock_dir)\n', (18036, 18046), True, 'import numpy as np\n'), ((22196, 22213), 'unittest.mock.call', 'call', (['m', 'mock_dim'], {}), '(m, mock_dim)\n', (22200, 22213), False, 'from unittest.mock import call\n'), ((26300, 26321), 'pytest.approx', 'approx', (['m1_orig_ratio'], {}), '(m1_orig_ratio)\n', (26306, 26321), False, 'from pytest import approx\n'), ((26540, 26561), 'pytest.approx', 'approx', (['m1_orig_ratio'], {}), '(m1_orig_ratio)\n', (26546, 26561), False, 'from pytest import approx\n'), ((27250, 27267), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (27264, 27267), True, 'import numpy as np\n'), ((27297, 27314), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (27311, 27314), True, 'import numpy as np\n'), ((1097, 1117), 'numpy.zeros', 'np.zeros', (['(0, m.dim)'], {}), '((0, m.dim))\n', (1105, 1117), True, 'import numpy as np\n'), ((2998, 3014), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3006, 3014), True, 'import numpy as np\n'), ((4393, 4409), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4401, 4409), True, 'import numpy as np\n'), ((4629, 4644), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (4636, 4644), True, 'import numpy as np\n'), ((5092, 5108), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5100, 5108), True, 'import numpy as np\n'), ((5332, 5347), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (5339, 5347), True, 'import numpy as np\n'), ((5706, 5722), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5714, 5722), True, 'import numpy as np\n'), ((5986, 6001), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (5993, 6001), True, 'import numpy as np\n'), ((6408, 6426), 'numpy.full', 'np.full', (['(3, 3)', '(2)'], {}), '((3, 3), 2)\n', (6415, 6426), True, 'import numpy as np\n'), ((18172, 18190), 'numpy.array', 'np.array', (['mock_dir'], {}), '(mock_dir)\n', (18180, 18190), True, 'import numpy as np\n'), ((18233, 18250), 'numpy.sign', 'np.sign', (['mock_dir'], {}), '(mock_dir)\n', (18240, 18250), True, 'import numpy as np\n'), ((2608, 2617), 'mobject.mobject.Mobject', 'Mobject', ([], {}), '()\n', (2615, 2617), False, 'from mobject.mobject import Mobject\n'), ((3141, 3156), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3148, 3156), True, 'import numpy as np\n'), ((8742, 8757), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (8749, 8757), True, 'import numpy as np\n'), ((9053, 9073), 'numpy.array', 'np.array', (['[2, -1, 0]'], {}), '([2, -1, 0])\n', (9061, 9073), True, 'import numpy as np\n'), ((9126, 9146), 'numpy.array', 'np.array', (['[2, -1, 0]'], {}), '([2, -1, 0])\n', (9134, 9146), True, 'import numpy as np\n'), ((9539, 9565), 'inspect.getsource', 'inspect.getsource', (['args[1]'], {}), '(args[1])\n', (9556, 9565), False, 'import inspect\n'), ((11590, 11616), 'inspect.getsource', 'inspect.getsource', (['args[1]'], {}), '(args[1])\n', (11607, 11616), False, 'import inspect\n'), ((12358, 12384), 'inspect.getsource', 'inspect.getsource', (['args[1]'], {}), '(args[1])\n', (12375, 12384), False, 'import inspect\n'), ((14632, 14658), 'inspect.getsource', 'inspect.getsource', (['args[1]'], {}), '(args[1])\n', (14649, 14658), False, 'import inspect\n'), ((15295, 15316), 'numpy.array', 'np.array', (['const.RIGHT'], {}), '(const.RIGHT)\n', (15303, 15316), True, 'import numpy as np\n'), ((3641, 3658), 'unittest.mock.call', 'call', ([], {'camera': 'None'}), '(camera=None)\n', (3645, 3658), False, 'from unittest.mock import call\n'), ((4194, 4200), 'unittest.mock.call', 'call', ([], {}), '()\n', (4198, 4200), False, 'from unittest.mock import call\n'), ((10523, 10549), 'inspect.getsource', 'inspect.getsource', (['args[1]'], {}), '(args[1])\n', (10540, 10549), False, 'import inspect\n')] |
from deap import base, creator, tools
import random
import numpy as np
import statsmodels.api as sm
import pandas as pd
from tqdm import tqdm
class Patient_opt:
def __init__(self, patients, mutpb=0.05, copb=0.5, n_indviduals=100, n_gens=100):
super().__init__()
self.patients = patients
self.mutpb = mutpb
self.copb = copb
self.n_indviduals = n_indviduals
self.n_gens = n_gens
self.toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,-1.0))
creator.create("solution", list, fitness=creator.FitnessMax)
self.toolbox = base.Toolbox()
# Initialise each attribute of the member as either 0 or 1 - The grouping function
self.toolbox.register('grouping', np.random.randint, 0, 2)
# Use the grouping function to fill each member of the population with 25 different
self.toolbox.register('solution', tools.initRepeat, creator.solution, self.toolbox.grouping, n = self.patients.shape[0])
# Create a population container for a number of potential grouping sets (Solutions)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.solution)
# Register selection method
self.toolbox.register("select", tools.selTournament, tournsize=5)
# Register crossover method
self.toolbox.register("mate", tools.cxTwoPoint)
# Register mutate method
self.toolbox.register("mutate", tools.mutFlipBit, indpb = 0.2)
def create_population(self):
# Fill with 100 different member of the population
self.population = self.toolbox.population(n=self.n_indviduals)
def evaluate_individual(self, individual):
duration = self.patients['survival (months)'].values
death_obs = (self.patients['DiedvsAlive'] == 'Died').values
groups = individual
# Perform log-rank test to determine statistic to maximise
stat, p = sm.duration.survdiff(duration, death_obs, groups)
# Compute the balance between datasets
bal = np.abs((len(groups)/2) - np.sum(groups))
# Return cost functions to maximise
return (stat.astype(np.float16), bal)
def select_individuals(self):
new_gen = self.toolbox.select(self.population, len(self.population))
# Clone the selected individuals
self.offspring = [self.toolbox.clone(child) for child in new_gen]
def crossover_individuals(self):
for child1, child2 in zip(self.offspring[::2], self.offspring[1::2]):
if random.random() < self.copb:
self.toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
def mutate_individuals(self):
for mutant in self.offspring:
if random.random() < self.copb:
self.toolbox.mutate(mutant)
del mutant.fitness.values
def check_fitness(self):
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in self.offspring if not ind.fitness.valid]
fitnesses = [self.evaluate_individual(indv) for indv in invalid_ind]
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring
self.population = self.offspring
self.fitnesses = np.array([fitness[0] for fitness in fitnesses])
def run_optimisation(self):
self.create_population()
self.results = []
for i in tqdm(range(self.n_gens)):
self.select_individuals()
self.crossover_individuals()
self.mutate_individuals()
self.check_fitness()
self.results.append({'Individuals': self.population
,'Fitnesses': self.fitnesses})
| [
"numpy.sum",
"statsmodels.api.duration.survdiff",
"deap.base.Toolbox",
"random.random",
"deap.creator.create",
"numpy.array"
] | [((459, 473), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (471, 473), False, 'from deap import base, creator, tools\n'), ((483, 546), 'deap.creator.create', 'creator.create', (['"""FitnessMax"""', 'base.Fitness'], {'weights': '(1.0, -1.0)'}), "('FitnessMax', base.Fitness, weights=(1.0, -1.0))\n", (497, 546), False, 'from deap import base, creator, tools\n'), ((554, 614), 'deap.creator.create', 'creator.create', (['"""solution"""', 'list'], {'fitness': 'creator.FitnessMax'}), "('solution', list, fitness=creator.FitnessMax)\n", (568, 614), False, 'from deap import base, creator, tools\n'), ((639, 653), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (651, 653), False, 'from deap import base, creator, tools\n'), ((1988, 2037), 'statsmodels.api.duration.survdiff', 'sm.duration.survdiff', (['duration', 'death_obs', 'groups'], {}), '(duration, death_obs, groups)\n', (2008, 2037), True, 'import statsmodels.api as sm\n'), ((3442, 3489), 'numpy.array', 'np.array', (['[fitness[0] for fitness in fitnesses]'], {}), '([fitness[0] for fitness in fitnesses])\n', (3450, 3489), True, 'import numpy as np\n'), ((2125, 2139), 'numpy.sum', 'np.sum', (['groups'], {}), '(groups)\n', (2131, 2139), True, 'import numpy as np\n'), ((2597, 2612), 'random.random', 'random.random', ([], {}), '()\n', (2610, 2612), False, 'import random\n'), ((2856, 2871), 'random.random', 'random.random', ([], {}), '()\n', (2869, 2871), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 15 22:57:52 2019
@author: Kellin
"""
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
#parameters
eta = 0.2
epsilon = 0.36
gamma = 0.7
beta = 0.96
delta = 0.1
theta = 1.0
alpha = 0.36
gridsize = 50
K = 2.5 #initial value
def Viter(V,U,gridsize):
maxiter = 1000
diff = 1
tol = 1.0e-6
i = 0
while diff > tol and i < maxiter:
Vold = V
V = np.nanmax( U + beta * np.tile(V @ [[1/2, 1/2],[1/2, 1/2]] ,[gridsize,1,1]) , 1)
diff = np.linalg.norm(Vold - V,np.inf)
i += 1
pol = np.nanargmax( U + beta * np.tile(V @ [[1/2, 1/2],[1/2, 1/2]] ,[gridsize,1,1]) , 1)
return V, pol
def Ksim(pol,kgrid):
T = np.zeros([gridsize,2,gridsize,2])
for i in range(gridsize):
for j in range(2):
T[i,j,pol[i,j],:] = [1/2, 1/2]
T = T.reshape([gridsize*2,gridsize*2])
p0 = np.ones([1,gridsize*2])/(gridsize*2)
p_inf = p0 @ (np.linalg.matrix_power(T,10000))
p_inf = p_inf.reshape([gridsize,2])
K2 = np.sum(kgrid @ p_inf)
return K2
def Kupdate(K):
L = ( (1/2) * (eta**(1 + epsilon) + (1 - eta)**(1 + epsilon)) / (gamma**epsilon) * ((1 - alpha)*K**alpha)**epsilon )**(1/(1 + alpha*epsilon))
w = (1 - alpha)*K**alpha * L**(-alpha)
r = alpha*K**(alpha - 1)*L**(1 - alpha) - delta
kgrid = np.linspace(0,20,gridsize)
zgrid = np.array([eta, 1 - eta])
zcube = np.tile(zgrid,[gridsize,gridsize,1])
kcube= np.tile(kgrid,[gridsize,2,1]).transpose(2,0,1)
temp = ( (1/gamma)**epsilon*(1/(1+epsilon))*(w*zcube)**(1+epsilon)+(1+r)*kcube-kcube.transpose(1,0,2))
U = np.log(temp.clip(0.0))
V = np.zeros([gridsize,2])
[V,pol] = Viter(V,U,gridsize)
Knew = Ksim(pol,kgrid)
return (Knew - K)**2
Ksteady = optimize.minimize(Kupdate,2.5,bounds = [(2,7)])
def postprocess(K):
L = ( (1/2) * (eta**(1 + epsilon) + (1 - eta)**(1 + epsilon)) / (gamma**epsilon) * ((1 - alpha)*K**alpha)**epsilon )**(1/(1 + alpha*epsilon))
w = (1 - alpha)*K**alpha * L**(-alpha)
r = alpha*K**(alpha - 1)*L**(1 - alpha) - delta
kgrid = np.linspace(0,20,gridsize)
zgrid = np.array([eta, 1 - eta])
zcube = np.tile(zgrid,[gridsize,gridsize,1])
kcube= np.tile(kgrid,[gridsize,2,1]).transpose(2,0,1)
temp = ( (1/gamma)**epsilon*(1/(1+epsilon))*(w*zcube)**(1+epsilon)+(1+r)*kcube-kcube.transpose(1,0,2))
U = np.log(temp.clip(0))
V = np.ones([gridsize,2])
[V,pol] = Viter(V,U,gridsize)
print("eta = " + str(eta))
print("Steady state capital: " + str(Ksteady["x"]))
print("Wage: " + str(w))
print("r: " + str(r))
print("Labor:" + str(L))
cpol = (kgrid*(1+r)*np.ones([2,1])).T - kgrid[pol] + np.ones([gridsize,1])*((1/gamma)**epsilon*(w*np.array([eta, 1 - eta]))**(1+epsilon))
return [V, pol, cpol, w, r]
[V,pol,cpol, w, r] = postprocess(Ksteady["x"])
def dist(pol,w,r,kgrid,gridsize):
simsize = 1000
kdist = np.zeros([simsize, gridsize])
kdist[0,:] = np.linspace(1,gridsize,gridsize)
cdist = np.zeros([simsize - 1, gridsize])
shocks = np.random.randint(0,1,[simsize, gridsize])
zgrid = [eta, 1 - eta]
for i in range(gridsize - 1):
ind = int(kdist[0,i])
for j in range(1,simsize - 1):
kdist[j,i] = pol[ind,shocks[j,i]]
cdist[j,i - 1] = kgrid[ind]*(1+r) - kgrid[int(kdist[j,i])] + (zgrid[shocks[j,i]]*w)**(1+epsilon)*(1/gamma)**epsilon
ind = int(kdist[j,i])
return [kdist,cdist]
kgrid = np.linspace(0,20,gridsize)
[kdist,cdist] = dist(pol,w,r,kgrid,gridsize)
kgrid = np.linspace(0,20,gridsize)
fig, ax = plt.subplots(1,2,figsize = (9,7))
ax[0].set_title("Value function")
ax[0].plot(kgrid,V[:,0], 'blue')
ax[0].plot(kgrid,V[:,1], 'red')
ax[0].set_title("Consumption")
ax[1].plot(kgrid,cpol[:,0],'blue')
ax[1].plot(kgrid,cpol[:,1],'red')
eta = .01
Ksteady = optimize.minimize(Kupdate,2.5,bounds = [(2,7)])
[V,pol,cpol, w, r] = postprocess(Ksteady["x"])
fig2, ax2 = plt.subplots(1,2,figsize = (9,7))
ax2[0].plot(kgrid,V[:,0], 'green')
ax2[0].plot(kgrid,V[:,1], 'yellow')
ax2[1].plot(kgrid,cpol[:,0],'green')
ax2[1].plot(kgrid,cpol[:,1],'yellow')
| [
"scipy.optimize.minimize",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.random.randint",
"numpy.array",
"numpy.tile",
"numpy.linspace",
"numpy.linalg.norm",
"numpy.linalg.matrix_power",
"matplotlib.pyplot.subplots"
] | [((1898, 1946), 'scipy.optimize.minimize', 'optimize.minimize', (['Kupdate', '(2.5)'], {'bounds': '[(2, 7)]'}), '(Kupdate, 2.5, bounds=[(2, 7)])\n', (1915, 1946), False, 'from scipy import optimize\n'), ((3667, 3695), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', 'gridsize'], {}), '(0, 20, gridsize)\n', (3678, 3695), True, 'import numpy as np\n'), ((3760, 3788), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', 'gridsize'], {}), '(0, 20, gridsize)\n', (3771, 3788), True, 'import numpy as np\n'), ((3798, 3832), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 7)'}), '(1, 2, figsize=(9, 7))\n', (3810, 3832), True, 'import matplotlib.pyplot as plt\n'), ((4067, 4115), 'scipy.optimize.minimize', 'optimize.minimize', (['Kupdate', '(2.5)'], {'bounds': '[(2, 7)]'}), '(Kupdate, 2.5, bounds=[(2, 7)])\n', (4084, 4115), False, 'from scipy import optimize\n'), ((4178, 4212), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 7)'}), '(1, 2, figsize=(9, 7))\n', (4190, 4212), True, 'import matplotlib.pyplot as plt\n'), ((781, 817), 'numpy.zeros', 'np.zeros', (['[gridsize, 2, gridsize, 2]'], {}), '([gridsize, 2, gridsize, 2])\n', (789, 817), True, 'import numpy as np\n'), ((1118, 1139), 'numpy.sum', 'np.sum', (['(kgrid @ p_inf)'], {}), '(kgrid @ p_inf)\n', (1124, 1139), True, 'import numpy as np\n'), ((1439, 1467), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', 'gridsize'], {}), '(0, 20, gridsize)\n', (1450, 1467), True, 'import numpy as np\n'), ((1479, 1503), 'numpy.array', 'np.array', (['[eta, 1 - eta]'], {}), '([eta, 1 - eta])\n', (1487, 1503), True, 'import numpy as np\n'), ((1519, 1558), 'numpy.tile', 'np.tile', (['zgrid', '[gridsize, gridsize, 1]'], {}), '(zgrid, [gridsize, gridsize, 1])\n', (1526, 1558), True, 'import numpy as np\n'), ((1767, 1790), 'numpy.zeros', 'np.zeros', (['[gridsize, 2]'], {}), '([gridsize, 2])\n', (1775, 1790), True, 'import numpy as np\n'), ((2234, 2262), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', 'gridsize'], {}), '(0, 20, gridsize)\n', (2245, 2262), True, 'import numpy as np\n'), ((2274, 2298), 'numpy.array', 'np.array', (['[eta, 1 - eta]'], {}), '([eta, 1 - eta])\n', (2282, 2298), True, 'import numpy as np\n'), ((2314, 2353), 'numpy.tile', 'np.tile', (['zgrid', '[gridsize, gridsize, 1]'], {}), '(zgrid, [gridsize, gridsize, 1])\n', (2321, 2353), True, 'import numpy as np\n'), ((2560, 2582), 'numpy.ones', 'np.ones', (['[gridsize, 2]'], {}), '([gridsize, 2])\n', (2567, 2582), True, 'import numpy as np\n'), ((3095, 3124), 'numpy.zeros', 'np.zeros', (['[simsize, gridsize]'], {}), '([simsize, gridsize])\n', (3103, 3124), True, 'import numpy as np\n'), ((3143, 3177), 'numpy.linspace', 'np.linspace', (['(1)', 'gridsize', 'gridsize'], {}), '(1, gridsize, gridsize)\n', (3154, 3177), True, 'import numpy as np\n'), ((3189, 3222), 'numpy.zeros', 'np.zeros', (['[simsize - 1, gridsize]'], {}), '([simsize - 1, gridsize])\n', (3197, 3222), True, 'import numpy as np\n'), ((3237, 3281), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)', '[simsize, gridsize]'], {}), '(0, 1, [simsize, gridsize])\n', (3254, 3281), True, 'import numpy as np\n'), ((584, 616), 'numpy.linalg.norm', 'np.linalg.norm', (['(Vold - V)', 'np.inf'], {}), '(Vold - V, np.inf)\n', (598, 616), True, 'import numpy as np\n'), ((978, 1004), 'numpy.ones', 'np.ones', (['[1, gridsize * 2]'], {}), '([1, gridsize * 2])\n', (985, 1004), True, 'import numpy as np\n'), ((1034, 1066), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['T', '(10000)'], {}), '(T, 10000)\n', (1056, 1066), True, 'import numpy as np\n'), ((1568, 1600), 'numpy.tile', 'np.tile', (['kgrid', '[gridsize, 2, 1]'], {}), '(kgrid, [gridsize, 2, 1])\n', (1575, 1600), True, 'import numpy as np\n'), ((2363, 2395), 'numpy.tile', 'np.tile', (['kgrid', '[gridsize, 2, 1]'], {}), '(kgrid, [gridsize, 2, 1])\n', (2370, 2395), True, 'import numpy as np\n'), ((2857, 2879), 'numpy.ones', 'np.ones', (['[gridsize, 1]'], {}), '([gridsize, 1])\n', (2864, 2879), True, 'import numpy as np\n'), ((670, 733), 'numpy.tile', 'np.tile', (['(V @ [[1 / 2, 1 / 2], [1 / 2, 1 / 2]])', '[gridsize, 1, 1]'], {}), '(V @ [[1 / 2, 1 / 2], [1 / 2, 1 / 2]], [gridsize, 1, 1])\n', (677, 733), True, 'import numpy as np\n'), ((509, 572), 'numpy.tile', 'np.tile', (['(V @ [[1 / 2, 1 / 2], [1 / 2, 1 / 2]])', '[gridsize, 1, 1]'], {}), '(V @ [[1 / 2, 1 / 2], [1 / 2, 1 / 2]], [gridsize, 1, 1])\n', (516, 572), True, 'import numpy as np\n'), ((2824, 2839), 'numpy.ones', 'np.ones', (['[2, 1]'], {}), '([2, 1])\n', (2831, 2839), True, 'import numpy as np\n'), ((2902, 2926), 'numpy.array', 'np.array', (['[eta, 1 - eta]'], {}), '([eta, 1 - eta])\n', (2910, 2926), True, 'import numpy as np\n')] |
import re
from html_table_parser.parser import HTMLTableParser
from lxml.html import fromstring, tostring
from pandas import DataFrame
from numpy import array
import pandas as pd
from data_utils import get_matches_info, get_players_data_from_matches_stats
data = {
'hits_count_mult_success_percent': {
'Team1':
{
'<NAME>': 61.5429,
'<NAME>': 55.9860,
'<NAME>': 79.9362,
},
'Team2':
{
'<NAME>': 58.3758,
'<NAME>': 55.5128,
'<NAME>': 50.0100,
}
},
'match_based_stat': {
'Team1':
{
'<NAME>': 61.5429,
'<NAME>': 55.9860,
'<NAME>': 79.9362,
},
'Team2':
{
'<NAME>': 58.3758,
'<NAME>': 55.5128,
'<NAME>': 50.0100,
}
}
}
if __name__ == '__main__':
# df = get_players_data_from_matches_stats(a, headers)
# print(df.to_dict())
# print(df)
# with open(r'D:\GitHub\hockey_prediction_app\sandbox\table_example.html', encoding='utf-8') as file_:
# s = file_.read()
# p = HTMLTableParser()
# p.feed(s)
# table_set = array([table[1:] for table_group in p.tables for table in table_group])
# df2 = DataFrame(table_set[1:], columns=table_set[0])
# df2 = df2.convert_objects(convert_numeric=True)
# multiplication_result = df2.get('S/Z') * df2.get('RÚS')
# df3 = DataFrame({'Name': df2.get('Jméno'), 'Team': df2.get('Tým'), 'Probability': multiplication_result})
#
# result_df = df.merge(df3, left_index=True, right_on='Name')
#
# print(result_df)
# print(result_df.get('Name'))
# print(result_df.get('Team'))
# r = requests.get(a, headers=headers)
#
# # print(r.content)
# doc = fromstring(r.content)
# c = doc.find_class('col-soupisky-home') + doc.find_class('col-soupisky-visitor')
#
# for el in c:
# for table in el.findall('table'):
# table_string = tostring(table, encoding='utf-8')
# p = HTMLTableParser()
# decoded_str = table_string.decode('utf-8')
# # print(decoded_str)
# p.feed(decoded_str)
# for row in p.tables[0][1:]:
# if row[0]:
# print(extract_player_name(row[2], cut_last_element=False))
# print('=' * 40)
# for el in c:
# for sub_el in el.findall('ul/li'):
# pattern = re.compile(r'(\d+\.\d+\.\d+)')
# search_result = pattern.search(sub_el.text_content())
# if search_result is not None:
# print(search_result.group())
#
#
# p = HTMLTableParser()
# p.feed(table_string.decode('utf-8'))
# print(p.tables)
#
# for table_set in p.tables:
# for table in table_set[1:]:
# scoring_probability = float(table[7]) * float(table[-1])
# if scoring_probability > 15.0:
# a = '%s %f | %s %f' % (table[1], scoring_probability, table[1], scoring_probability)
# print(a)
with open('table_example.html', encoding='utf-8') as file_:
s = file_.read()
p = HTMLTableParser()
p.feed(s)
table_set = array([table[1:] for table_group in p.tables for table in table_group])
#
df = DataFrame(table_set[1:], columns=table_set[0])
df = df.convert_objects(convert_numeric=True)
df['ZS'] = 9
for team, indexes in df.groupby('Tým').groups.items():
team_data_set = df.loc[indexes, ['Z', 'ZS']]
print((team_data_set['ZS'] / team_data_set['Z']).sum())
# # for team in df.get('Tým').unique():
# # print(team)
# multiplication_result = df.get('S/Z') * df.get('RÚS')
# result_df = DataFrame({'Name': df.get('Jméno'), 'Team': df.get('Tým'), 'Probability':multiplication_result})
# for team, indexes in result_df.groupby('Team').groups.items():
# print(result_df.loc[indexes, ['Name', 'Probability']].set_index('Name').to_dict()['Probability'])
# for table_set in p.tables:
# for table in table_set[1:]:
# scoring_probability = float(table[7]) * float(table[-1])
# if scoring_probability > 50.0:
#
# a = '%20s %3.4f | %20s %3.4f' %(table[1], scoring_probability, table[1], scoring_probability)
# print(a)
| [
"pandas.DataFrame",
"html_table_parser.parser.HTMLTableParser",
"numpy.array"
] | [((3331, 3348), 'html_table_parser.parser.HTMLTableParser', 'HTMLTableParser', ([], {}), '()\n', (3346, 3348), False, 'from html_table_parser.parser import HTMLTableParser\n'), ((3387, 3458), 'numpy.array', 'array', (['[table[1:] for table_group in p.tables for table in table_group]'], {}), '([table[1:] for table_group in p.tables for table in table_group])\n', (3392, 3458), False, 'from numpy import array\n'), ((3478, 3524), 'pandas.DataFrame', 'DataFrame', (['table_set[1:]'], {'columns': 'table_set[0]'}), '(table_set[1:], columns=table_set[0])\n', (3487, 3524), False, 'from pandas import DataFrame\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.