code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_med/'
##### To load the underlying populations:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/' #Lognormal_mass_Earthlike_rocky/
run_number = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys, sssp = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
##### To load some mass-radius tables:
# NWG-2018 model:
MR_table_file = '../../data/MRpredict_table_weights3025_R1001_Q1001.txt'
with open(MR_table_file, 'r') as file:
lines = (line for line in file if not line.startswith('#'))
MR_table = np.genfromtxt(lines, names=True, delimiter=', ')
# Li Zeng models:
MR_earthlike_rocky = np.genfromtxt('../../data/MR_earthlike_rocky.txt', names=['mass','radius']) # mass and radius are in Earth units
MR_pure_iron = np.genfromtxt('../../data/MR_pure_iron.txt', names=['mass','radius']) # mass and radius are in Earth units
# To construct an interpolation function for each MR relation:
MR_NWG2018_interp = scipy.interpolate.interp1d(10.**MR_table['log_R'], 10.**MR_table['05'])
MR_earthlike_rocky_interp = scipy.interpolate.interp1d(MR_earthlike_rocky['radius'], MR_earthlike_rocky['mass'])
MR_pure_iron_interp = scipy.interpolate.interp1d(MR_pure_iron['radius'], MR_pure_iron['mass'])
# To find where the Earth-like rocky relation intersects with the NWG2018 mean relation (between 1.4-1.5 R_earth):
def diff_MR(R):
M_NWG2018 = MR_NWG2018_interp(R)
M_earthlike_rocky = MR_earthlike_rocky_interp(R)
return np.abs(M_NWG2018 - M_earthlike_rocky)
# The intersection is approximately 1.472 R_earth
radii_switch = 1.472
# IDEA 1: Normal distribution for rho centered around Earth-like rocky, with a sigma_rho that grows with radius
# To define sigma_rho such that log10(sigma_rho) is a linear function of radius:
rho_earthlike_rocky = rho_from_M_R(MR_earthlike_rocky['mass'], MR_earthlike_rocky['radius']) # mean density (g/cm^3) for Earth-like rocky as a function of radius
rho_pure_iron = rho_from_M_R(MR_pure_iron['mass'], MR_pure_iron['radius']) # mean density (g/cm^3) for pure iron as a function of radius
sigma_rho_at_radii_switch = 3. # std of mean density (g/cm^3) at radii_switch
sigma_rho_at_radii_min = 1. # std of mean density (g/cm^3) at radii_min
rho_radius_slope = (np.log10(sigma_rho_at_radii_switch)-np.log10(sigma_rho_at_radii_min)) / (radii_switch - radii_min) # dlog(rho)/dR; slope between radii_min and radii_switch in log(rho)
sigma_rho = 10.**( rho_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + np.log10(sigma_rho_at_radii_min) )
# IDEA 2: Lognormal distribution for mass centered around Earth-like rocky, with a sigma_log_M that grows with radius
# To define sigma_log_M as a linear function of radius:
sigma_log_M_at_radii_switch = 0.3 # std of log_M (Earth masses) at radii_switch
sigma_log_M_at_radii_min = 0.04 # std of log_M (Earth masses) at radii_min
sigma_log_M_radius_slope = (sigma_log_M_at_radii_switch - sigma_log_M_at_radii_min) / (radii_switch - radii_min)
sigma_log_M = sigma_log_M_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + sigma_log_M_at_radii_min
##### To make mass-radius plots:
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 16 #legend labels font size
bins = 100
# Density vs. radius for new model based on Li Zeng's Earth-like rocky:
fig = plt.figure(figsize=(8,8))
plot = GridSpec(4, 1, left=0.15, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[0,:]) # sigma_rho vs. radius
plt.plot(MR_earthlike_rocky['radius'], sigma_rho, color='orange', ls='-', lw=3, label=r'Linear $\log(\sigma_\rho)$ vs $R_p$')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.xticks([])
plt.yticks([1., 2., 3., 4., 5.])
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 4.])
plt.ylabel(r'$\sigma_\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='upper left', bbox_to_anchor=(0.01,0.99), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,:]) # rho vs. radius
plt.plot(MR_pure_iron['radius'], rho_pure_iron, color='r', ls='--', lw=3, label='Pure iron')
plt.plot(MR_earthlike_rocky['radius'], rho_earthlike_rocky, color='orange', ls='--', lw=3, label='Earth-like rocky')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - sigma_rho, rho_earthlike_rocky + sigma_rho, color='orange', alpha=0.5, label=r'Earth-like rocky $\pm \sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 2.*sigma_rho, rho_earthlike_rocky + 2.*sigma_rho, color='orange', alpha=0.3, label=r'Earth-like rocky $\pm 2\sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 3.*sigma_rho, rho_earthlike_rocky + 3.*sigma_rho, color='orange', alpha=0.1, label=r'Earth-like rocky $\pm 3\sigma_\rho$')
plt.axhline(y=1., color='c', lw=3, label='Water density (1 g/cm^3)')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.minorticks_off()
plt.yticks([1., 2., 3., 4., 5., 7., 10., 15.])
ax.yaxis.set_minor_formatter(ticker.ScalarFormatter())
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 20.])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=tfs)
plt.ylabel(r'$\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='lower right', bbox_to_anchor=(0.99,0.01), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + 'Density_radius.pdf')
plt.close()
plt.show()
# Mass vs. radius:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(5, 5, left=0.1, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[1:,:4])
masses_all = sssp_per_sys['mass_all'][sssp_per_sys['mass_all'] > 0.]
radii_all = sssp_per_sys['radii_all'][sssp_per_sys['radii_all'] > 0.]
corner.hist2d(np.log10(radii_all), np.log10(masses_all), bins=50, plot_density=True, contour_kwargs={'colors': ['0.6','0.4','0.2','0']}, data_kwargs={'color': 'k'})
plt.plot(MR_table['log_R'], MR_table['05'], '-', color='g', label='Mean prediction (NWG2018)')
plt.fill_between(MR_table['log_R'], MR_table['016'], MR_table['084'], color='g', alpha=0.5, label=r'16%-84% (NWG2018)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=5.51)), color='b', label='Earth density (5.51 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=3.9)), color='m', label='Mars density (3.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=1.)), color='c', label='Water density (1 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=7.9)), color='r', label='Iron density (7.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=100.)), color='k', label='100 g/cm^3')
plt.plot(np.log10(MR_earthlike_rocky['radius']), np.log10(MR_earthlike_rocky['mass']), color='orange', ls='--', lw=3, label='Earth-like rocky')
#plt.fill_between(np.log10(MR_earthlike_rocky['radius']), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky-sigma_rho)), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky+sigma_rho)), color='orange', alpha=0.5, label=r'16%-84% ($\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, \sigma_\rho(R_p))$)') #label=r'$\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, 10^{[\frac{d\log\rho}{dR_p}(R_p - 0.5) + \log{\rho_0}]})$'
plt.fill_between(np.log10(MR_earthlike_rocky['radius']), np.log10(MR_earthlike_rocky['mass']) - sigma_log_M, np.log10(MR_earthlike_rocky['mass']) + sigma_log_M, color='orange', alpha=0.5, label=r'16%-84% ($\log{M_p} \sim \mathcal{N}(M_{p,\rm Earthlike\:rocky}, \sigma_{\log{M_p}})$)')
plt.plot(np.log10(MR_pure_iron['radius']), np.log10(MR_pure_iron['mass']), color='r', ls='--', lw=3, label='Pure iron')
#plt.axvline(x=np.log10(0.7), color='k', ls='--', lw=3)
plt.axvline(x=np.log10(radii_switch), color='k', ls='--', lw=3)
ax.tick_params(axis='both', labelsize=afs)
xtick_vals = np.array([0.5, 1., 2., 4., 10.])
ytick_vals = np.array([1e-1, 1., 10., 1e2])
plt.xticks(np.log10(xtick_vals), xtick_vals)
plt.yticks(np.log10(ytick_vals), ytick_vals)
plt.xlim([np.log10(radii_min), np.log10(radii_max)])
plt.ylim([np.log10(0.07), 2.])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=tfs)
plt.ylabel(r'$M_p$ ($M_\oplus$)', fontsize=tfs)
plt.legend(loc='lower right', bbox_to_anchor=(0.99,0.01), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[0,:4]) # top histogram
plt.hist(radii_all, bins=np.logspace(np.log10(radii_min), np.log10(radii_max), bins+1), histtype='step', color='k', ls='-', label=r'All')
#plt.axvline(x=0.7, color='k', ls='--', lw=3)
plt.axvline(x=radii_switch, color='k', ls='--', lw=3)
plt.gca().set_xscale("log")
plt.xlim([radii_min, radii_max])
plt.xticks([])
plt.yticks([])
plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,4]) # side histogram
plt.hist(masses_all, bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='k', ls='-', label='All')
radii_cut = radii_switch
plt.hist(masses_all[radii_all > radii_cut], bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='b', ls='-', label=r'$R_p > %s R_\oplus$' % radii_cut)
plt.hist(masses_all[radii_all < radii_cut], bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='r', ls='-', label=r'$R_p < %s R_\oplus$' % radii_cut)
plt.gca().set_yscale("log")
plt.ylim([0.07, 1e2])
plt.xticks([])
plt.yticks([])
plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + 'MR_diagram.pdf')
plt.close()
plt.show()
| [
"numpy.log10",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.array",
"matplotlib.ticker.ScalarFormatter",
"numpy.genfromtxt",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"ma... | [((2221, 2297), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../../data/MR_earthlike_rocky.txt"""'], {'names': "['mass', 'radius']"}), "('../../data/MR_earthlike_rocky.txt', names=['mass', 'radius'])\n", (2234, 2297), True, 'import numpy as np\n'), ((2349, 2419), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../../data/MR_pure_iron.txt"""'], {'names': "['mass', 'radius']"}), "('../../data/MR_pure_iron.txt', names=['mass', 'radius'])\n", (2362, 2419), True, 'import numpy as np\n'), ((4893, 4919), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4903, 4919), True, 'import matplotlib.pyplot as plt\n'), ((4926, 5005), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(1)'], {'left': '(0.15)', 'bottom': '(0.1)', 'right': '(0.98)', 'top': '(0.98)', 'wspace': '(0)', 'hspace': '(0)'}), '(4, 1, left=0.15, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)\n', (4934, 5005), False, 'from matplotlib.gridspec import GridSpec\n'), ((5012, 5035), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot[0, :]'], {}), '(plot[0, :])\n', (5023, 5035), True, 'import matplotlib.pyplot as plt\n'), ((5058, 5189), 'matplotlib.pyplot.plot', 'plt.plot', (["MR_earthlike_rocky['radius']", 'sigma_rho'], {'color': '"""orange"""', 'ls': '"""-"""', 'lw': '(3)', 'label': '"""Linear $\\\\log(\\\\sigma_\\\\rho)$ vs $R_p$"""'}), "(MR_earthlike_rocky['radius'], sigma_rho, color='orange', ls='-',\n lw=3, label='Linear $\\\\log(\\\\sigma_\\\\rho)$ vs $R_p$')\n", (5066, 5189), True, 'import matplotlib.pyplot as plt\n'), ((5255, 5269), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5265, 5269), True, 'import matplotlib.pyplot as plt\n'), ((5270, 5307), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[1.0, 2.0, 3.0, 4.0, 5.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0])\n', (5280, 5307), True, 'import matplotlib.pyplot as plt\n'), ((5463, 5498), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[radii_min, radii_switch]'], {}), '([radii_min, radii_switch])\n', (5471, 5498), True, 'import matplotlib.pyplot as plt\n'), ((5499, 5519), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.9, 4.0]'], {}), '([0.9, 4.0])\n', (5507, 5519), True, 'import matplotlib.pyplot as plt\n'), ((5519, 5573), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma_\\\\rho$ ($g/cm^3$)"""'], {'fontsize': 'tfs'}), "('$\\\\sigma_\\\\rho$ ($g/cm^3$)', fontsize=tfs)\n", (5529, 5573), True, 'import matplotlib.pyplot as plt\n'), ((5573, 5672), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'bbox_to_anchor': '(0.01, 0.99)', 'ncol': '(1)', 'frameon': '(False)', 'fontsize': 'lfs'}), "(loc='upper left', bbox_to_anchor=(0.01, 0.99), ncol=1, frameon=\n False, fontsize=lfs)\n", (5583, 5672), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5697), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot[1:, :]'], {}), '(plot[1:, :])\n', (5684, 5697), True, 'import matplotlib.pyplot as plt\n'), ((5714, 5810), 'matplotlib.pyplot.plot', 'plt.plot', (["MR_pure_iron['radius']", 'rho_pure_iron'], {'color': '"""r"""', 'ls': '"""--"""', 'lw': '(3)', 'label': '"""Pure iron"""'}), "(MR_pure_iron['radius'], rho_pure_iron, color='r', ls='--', lw=3,\n label='Pure iron')\n", (5722, 5810), True, 'import matplotlib.pyplot as plt\n'), ((5807, 5927), 'matplotlib.pyplot.plot', 'plt.plot', (["MR_earthlike_rocky['radius']", 'rho_earthlike_rocky'], {'color': '"""orange"""', 'ls': '"""--"""', 'lw': '(3)', 'label': '"""Earth-like rocky"""'}), "(MR_earthlike_rocky['radius'], rho_earthlike_rocky, color='orange',\n ls='--', lw=3, label='Earth-like rocky')\n", (5815, 5927), True, 'import matplotlib.pyplot as plt\n'), ((5924, 6118), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (["MR_earthlike_rocky['radius']", '(rho_earthlike_rocky - sigma_rho)', '(rho_earthlike_rocky + sigma_rho)'], {'color': '"""orange"""', 'alpha': '(0.5)', 'label': '"""Earth-like rocky $\\\\pm \\\\sigma_\\\\rho$"""'}), "(MR_earthlike_rocky['radius'], rho_earthlike_rocky -\n sigma_rho, rho_earthlike_rocky + sigma_rho, color='orange', alpha=0.5,\n label='Earth-like rocky $\\\\pm \\\\sigma_\\\\rho$')\n", (5940, 6118), True, 'import matplotlib.pyplot as plt\n'), ((6109, 6317), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (["MR_earthlike_rocky['radius']", '(rho_earthlike_rocky - 2.0 * sigma_rho)', '(rho_earthlike_rocky + 2.0 * sigma_rho)'], {'color': '"""orange"""', 'alpha': '(0.3)', 'label': '"""Earth-like rocky $\\\\pm 2\\\\sigma_\\\\rho$"""'}), "(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 2.0 *\n sigma_rho, rho_earthlike_rocky + 2.0 * sigma_rho, color='orange', alpha\n =0.3, label='Earth-like rocky $\\\\pm 2\\\\sigma_\\\\rho$')\n", (6125, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6301, 6509), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (["MR_earthlike_rocky['radius']", '(rho_earthlike_rocky - 3.0 * sigma_rho)', '(rho_earthlike_rocky + 3.0 * sigma_rho)'], {'color': '"""orange"""', 'alpha': '(0.1)', 'label': '"""Earth-like rocky $\\\\pm 3\\\\sigma_\\\\rho$"""'}), "(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 3.0 *\n sigma_rho, rho_earthlike_rocky + 3.0 * sigma_rho, color='orange', alpha\n =0.1, label='Earth-like rocky $\\\\pm 3\\\\sigma_\\\\rho$')\n", (6317, 6509), True, 'import matplotlib.pyplot as plt\n'), ((6493, 6562), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(1.0)', 'color': '"""c"""', 'lw': '(3)', 'label': '"""Water density (1 g/cm^3)"""'}), "(y=1.0, color='c', lw=3, label='Water density (1 g/cm^3)')\n", (6504, 6562), True, 'import matplotlib.pyplot as plt\n'), ((6633, 6653), 'matplotlib.pyplot.minorticks_off', 'plt.minorticks_off', ([], {}), '()\n', (6651, 6653), True, 'import matplotlib.pyplot as plt\n'), ((6654, 6708), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[1.0, 2.0, 3.0, 4.0, 5.0, 7.0, 10.0, 15.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 7.0, 10.0, 15.0])\n', (6664, 6708), True, 'import matplotlib.pyplot as plt\n'), ((6916, 6951), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[radii_min, radii_switch]'], {}), '([radii_min, radii_switch])\n', (6924, 6951), True, 'import matplotlib.pyplot as plt\n'), ((6952, 6973), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.9, 20.0]'], {}), '([0.9, 20.0])\n', (6960, 6973), True, 'import matplotlib.pyplot as plt\n'), ((6973, 7020), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$R_p$ ($R_\\\\oplus$)"""'], {'fontsize': 'tfs'}), "('$R_p$ ($R_\\\\oplus$)', fontsize=tfs)\n", (6983, 7020), True, 'import matplotlib.pyplot as plt\n'), ((7021, 7067), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rho$ ($g/cm^3$)"""'], {'fontsize': 'tfs'}), "('$\\\\rho$ ($g/cm^3$)', fontsize=tfs)\n", (7031, 7067), True, 'import matplotlib.pyplot as plt\n'), ((7068, 7168), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'bbox_to_anchor': '(0.99, 0.01)', 'ncol': '(1)', 'frameon': '(False)', 'fontsize': 'lfs'}), "(loc='lower right', bbox_to_anchor=(0.99, 0.01), ncol=1, frameon=\n False, fontsize=lfs)\n", (7078, 7168), True, 'import matplotlib.pyplot as plt\n'), ((7258, 7268), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7266, 7268), True, 'import matplotlib.pyplot as plt\n'), ((7298, 7325), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (7308, 7325), True, 'import matplotlib.pyplot as plt\n'), ((7332, 7410), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(5)', '(5)'], {'left': '(0.1)', 'bottom': '(0.1)', 'right': '(0.98)', 'top': '(0.98)', 'wspace': '(0)', 'hspace': '(0)'}), '(5, 5, left=0.1, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)\n', (7340, 7410), False, 'from matplotlib.gridspec import GridSpec\n'), ((7417, 7442), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot[1:, :4]'], {}), '(plot[1:, :4])\n', (7428, 7442), True, 'import matplotlib.pyplot as plt\n'), ((7746, 7845), 'matplotlib.pyplot.plot', 'plt.plot', (["MR_table['log_R']", "MR_table['05']", '"""-"""'], {'color': '"""g"""', 'label': '"""Mean prediction (NWG2018)"""'}), "(MR_table['log_R'], MR_table['05'], '-', color='g', label=\n 'Mean prediction (NWG2018)')\n", (7754, 7845), True, 'import matplotlib.pyplot as plt\n'), ((7841, 7964), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (["MR_table['log_R']", "MR_table['016']", "MR_table['084']"], {'color': '"""g"""', 'alpha': '(0.5)', 'label': '"""16%-84% (NWG2018)"""'}), "(MR_table['log_R'], MR_table['016'], MR_table['084'], color\n ='g', alpha=0.5, label='16%-84% (NWG2018)')\n", (7857, 7964), True, 'import matplotlib.pyplot as plt\n'), ((9804, 9840), 'numpy.array', 'np.array', (['[0.5, 1.0, 2.0, 4.0, 10.0]'], {}), '([0.5, 1.0, 2.0, 4.0, 10.0])\n', (9812, 9840), True, 'import numpy as np\n'), ((9850, 9883), 'numpy.array', 'np.array', (['[0.1, 1.0, 10.0, 100.0]'], {}), '([0.1, 1.0, 10.0, 100.0])\n', (9858, 9883), True, 'import numpy as np\n'), ((10055, 10102), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$R_p$ ($R_\\\\oplus$)"""'], {'fontsize': 'tfs'}), "('$R_p$ ($R_\\\\oplus$)', fontsize=tfs)\n", (10065, 10102), True, 'import matplotlib.pyplot as plt\n'), ((10103, 10150), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$M_p$ ($M_\\\\oplus$)"""'], {'fontsize': 'tfs'}), "('$M_p$ ($M_\\\\oplus$)', fontsize=tfs)\n", (10113, 10150), True, 'import matplotlib.pyplot as plt\n'), ((10151, 10251), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'bbox_to_anchor': '(0.99, 0.01)', 'ncol': '(1)', 'frameon': '(False)', 'fontsize': 'lfs'}), "(loc='lower right', bbox_to_anchor=(0.99, 0.01), ncol=1, frameon=\n False, fontsize=lfs)\n", (10161, 10251), True, 'import matplotlib.pyplot as plt\n'), ((10252, 10276), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot[0, :4]'], {}), '(plot[0, :4])\n', (10263, 10276), True, 'import matplotlib.pyplot as plt\n'), ((10476, 10529), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'radii_switch', 'color': '"""k"""', 'ls': '"""--"""', 'lw': '(3)'}), "(x=radii_switch, color='k', ls='--', lw=3)\n", (10487, 10529), True, 'import matplotlib.pyplot as plt\n'), ((10558, 10590), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[radii_min, radii_max]'], {}), '([radii_min, radii_max])\n', (10566, 10590), True, 'import matplotlib.pyplot as plt\n'), ((10591, 10605), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (10601, 10605), True, 'import matplotlib.pyplot as plt\n'), ((10606, 10620), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (10616, 10620), True, 'import matplotlib.pyplot as plt\n'), ((10621, 10721), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'bbox_to_anchor': '(0.99, 0.99)', 'ncol': '(1)', 'frameon': '(False)', 'fontsize': 'lfs'}), "(loc='upper right', bbox_to_anchor=(0.99, 0.99), ncol=1, frameon=\n False, fontsize=lfs)\n", (10631, 10721), True, 'import matplotlib.pyplot as plt\n'), ((10722, 10746), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot[1:, 4]'], {}), '(plot[1:, 4])\n', (10733, 10746), True, 'import matplotlib.pyplot as plt\n'), ((11346, 11369), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.07, 100.0]'], {}), '([0.07, 100.0])\n', (11354, 11369), True, 'import matplotlib.pyplot as plt\n'), ((11368, 11382), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (11378, 11382), True, 'import matplotlib.pyplot as plt\n'), ((11383, 11397), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (11393, 11397), True, 'import matplotlib.pyplot as plt\n'), ((11398, 11498), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'bbox_to_anchor': '(0.99, 0.99)', 'ncol': '(1)', 'frameon': '(False)', 'fontsize': 'lfs'}), "(loc='upper right', bbox_to_anchor=(0.99, 0.99), ncol=1, frameon=\n False, fontsize=lfs)\n", (11408, 11498), True, 'import matplotlib.pyplot as plt\n'), ((11584, 11594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11592, 11594), True, 'import matplotlib.pyplot as plt\n'), ((2132, 2180), 'numpy.genfromtxt', 'np.genfromtxt', (['lines'], {'names': '(True)', 'delimiter': '""", """'}), "(lines, names=True, delimiter=', ')\n", (2145, 2180), True, 'import numpy as np\n'), ((3053, 3090), 'numpy.abs', 'np.abs', (['(M_NWG2018 - M_earthlike_rocky)'], {}), '(M_NWG2018 - M_earthlike_rocky)\n', (3059, 3090), True, 'import numpy as np\n'), ((5332, 5356), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {}), '()\n', (5354, 5356), False, 'from matplotlib import ticker\n'), ((6730, 6754), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {}), '()\n', (6752, 6754), False, 'from matplotlib import ticker\n'), ((6785, 6809), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {}), '()\n', (6807, 6809), False, 'from matplotlib import ticker\n'), ((7184, 7241), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savefigures_directory + 'Density_radius.pdf')"], {}), "(savefigures_directory + 'Density_radius.pdf')\n", (7195, 7241), True, 'import matplotlib.pyplot as plt\n'), ((7246, 7257), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7255, 7257), True, 'import matplotlib.pyplot as plt\n'), ((7595, 7614), 'numpy.log10', 'np.log10', (['radii_all'], {}), '(radii_all)\n', (7603, 7614), True, 'import numpy as np\n'), ((7616, 7636), 'numpy.log10', 'np.log10', (['masses_all'], {}), '(masses_all)\n', (7624, 7636), True, 'import numpy as np\n'), ((8614, 8652), 'numpy.log10', 'np.log10', (["MR_earthlike_rocky['radius']"], {}), "(MR_earthlike_rocky['radius'])\n", (8622, 8652), True, 'import numpy as np\n'), ((8654, 8690), 'numpy.log10', 'np.log10', (["MR_earthlike_rocky['mass']"], {}), "(MR_earthlike_rocky['mass'])\n", (8662, 8690), True, 'import numpy as np\n'), ((9240, 9278), 'numpy.log10', 'np.log10', (["MR_earthlike_rocky['radius']"], {}), "(MR_earthlike_rocky['radius'])\n", (9248, 9278), True, 'import numpy as np\n'), ((9517, 9549), 'numpy.log10', 'np.log10', (["MR_pure_iron['radius']"], {}), "(MR_pure_iron['radius'])\n", (9525, 9549), True, 'import numpy as np\n'), ((9551, 9581), 'numpy.log10', 'np.log10', (["MR_pure_iron['mass']"], {}), "(MR_pure_iron['mass'])\n", (9559, 9581), True, 'import numpy as np\n'), ((9892, 9912), 'numpy.log10', 'np.log10', (['xtick_vals'], {}), '(xtick_vals)\n', (9900, 9912), True, 'import numpy as np\n'), ((9937, 9957), 'numpy.log10', 'np.log10', (['ytick_vals'], {}), '(ytick_vals)\n', (9945, 9957), True, 'import numpy as np\n'), ((11514, 11567), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savefigures_directory + 'MR_diagram.pdf')"], {}), "(savefigures_directory + 'MR_diagram.pdf')\n", (11525, 11567), True, 'import matplotlib.pyplot as plt\n'), ((11572, 11583), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11581, 11583), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3861), 'numpy.log10', 'np.log10', (['sigma_rho_at_radii_switch'], {}), '(sigma_rho_at_radii_switch)\n', (3834, 3861), True, 'import numpy as np\n'), ((3862, 3894), 'numpy.log10', 'np.log10', (['sigma_rho_at_radii_min'], {}), '(sigma_rho_at_radii_min)\n', (3870, 3894), True, 'import numpy as np\n'), ((4075, 4107), 'numpy.log10', 'np.log10', (['sigma_rho_at_radii_min'], {}), '(sigma_rho_at_radii_min)\n', (4083, 4107), True, 'import numpy as np\n'), ((5184, 5193), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5191, 5193), True, 'import matplotlib.pyplot as plt\n'), ((6562, 6571), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6569, 6571), True, 'import matplotlib.pyplot as plt\n'), ((9280, 9316), 'numpy.log10', 'np.log10', (["MR_earthlike_rocky['mass']"], {}), "(MR_earthlike_rocky['mass'])\n", (9288, 9316), True, 'import numpy as np\n'), ((9332, 9368), 'numpy.log10', 'np.log10', (["MR_earthlike_rocky['mass']"], {}), "(MR_earthlike_rocky['mass'])\n", (9340, 9368), True, 'import numpy as np\n'), ((9698, 9720), 'numpy.log10', 'np.log10', (['radii_switch'], {}), '(radii_switch)\n', (9706, 9720), True, 'import numpy as np\n'), ((9981, 10000), 'numpy.log10', 'np.log10', (['radii_min'], {}), '(radii_min)\n', (9989, 10000), True, 'import numpy as np\n'), ((10002, 10021), 'numpy.log10', 'np.log10', (['radii_max'], {}), '(radii_max)\n', (10010, 10021), True, 'import numpy as np\n'), ((10034, 10048), 'numpy.log10', 'np.log10', (['(0.07)'], {}), '(0.07)\n', (10042, 10048), True, 'import numpy as np\n'), ((10530, 10539), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10537, 10539), True, 'import matplotlib.pyplot as plt\n'), ((11318, 11327), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11325, 11327), True, 'import matplotlib.pyplot as plt\n'), ((10329, 10348), 'numpy.log10', 'np.log10', (['radii_min'], {}), '(radii_min)\n', (10337, 10348), True, 'import numpy as np\n'), ((10350, 10369), 'numpy.log10', 'np.log10', (['radii_max'], {}), '(radii_max)\n', (10358, 10369), True, 'import numpy as np\n'), ((10801, 10815), 'numpy.log10', 'np.log10', (['(0.07)'], {}), '(0.07)\n', (10809, 10815), True, 'import numpy as np\n'), ((10991, 11005), 'numpy.log10', 'np.log10', (['(0.07)'], {}), '(0.07)\n', (10999, 11005), True, 'import numpy as np\n'), ((11185, 11199), 'numpy.log10', 'np.log10', (['(0.07)'], {}), '(0.07)\n', (11193, 11199), True, 'import numpy as np\n'), ((707, 733), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (723, 733), False, 'import os\n')] |
import cv2 as cv
import numpy as np
img = cv.imread("D:/Programming/Python_Projects/Computer_Vision/Resources/Image/park.jfif")
cv.imshow('Park', img)
blank = np.zeros(img.shape[:2], dtype='uint8')
# cv.imshow('Blank Image', blank)
circle = cv.circle(blank.copy(), (img.shape[1]//2 + 45,img.shape[0]//2), 100, 255, -1)
rectangle = cv.rectangle(blank.copy(), (70,150), (370,370), 255,-1)
weird_shape = cv.bitwise_or(circle,rectangle)
# cv.imshow('Weird Shape', weird_shape)
masked = cv.bitwise_and(img,img,mask=weird_shape)
cv.imshow('Weird Shaped Masked Image', masked)
cv.waitKey(0) | [
"cv2.bitwise_and",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.bitwise_or",
"cv2.imread"
] | [((46, 136), 'cv2.imread', 'cv.imread', (['"""D:/Programming/Python_Projects/Computer_Vision/Resources/Image/park.jfif"""'], {}), "(\n 'D:/Programming/Python_Projects/Computer_Vision/Resources/Image/park.jfif')\n", (55, 136), True, 'import cv2 as cv\n'), ((133, 155), 'cv2.imshow', 'cv.imshow', (['"""Park"""', 'img'], {}), "('Park', img)\n", (142, 155), True, 'import cv2 as cv\n'), ((167, 205), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (175, 205), True, 'import numpy as np\n'), ((419, 451), 'cv2.bitwise_or', 'cv.bitwise_or', (['circle', 'rectangle'], {}), '(circle, rectangle)\n', (432, 451), True, 'import cv2 as cv\n'), ((504, 546), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'weird_shape'}), '(img, img, mask=weird_shape)\n', (518, 546), True, 'import cv2 as cv\n'), ((546, 592), 'cv2.imshow', 'cv.imshow', (['"""Weird Shaped Masked Image"""', 'masked'], {}), "('Weird Shaped Masked Image', masked)\n", (555, 592), True, 'import cv2 as cv\n'), ((596, 609), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (606, 609), True, 'import cv2 as cv\n')] |
import numpy as np
import math
from numpy import *
import copy
from MST_Direction import *
def CalM(CtgDirection,DFlag,weight_mat):
M = 0
D = 1
ContigCount = len(CtgDirection)
for n1 in range(ContigCount-1):
for n2 in range(n1,ContigCount):
if DFlag[n1][n2]!=0:
D +=weight_mat[n1][n2]
M +=weight_mat[n1][n2]*abs(CtgDirection[n1]*CtgDirection[n2]-DFlag[n1][n2])/2
return M/D
def CalML(CtgDirection,DFlag,weight_mat):
ContigCount = len(CtgDirection)
ML = np.zeros(ContigCount)
DL = np.ones(ContigCount)
for n1 in range(ContigCount):
for n2 in range(ContigCount):
if DFlag[n1][n2]!=0:
DL[n1] +=weight_mat[n1][n2]
ML[n1] +=weight_mat[n1][n2]*abs(CtgDirection[n1]*CtgDirection[n2]-DFlag[n1][n2])/2
return np.array(ML)/np.array(DL)
def CalTotalContradict(CtgDirection,J_mat):
ContigCount = len(CtgDirection)
TC = 0
for n1 in range(ContigCount):
for n2 in range(n1,ContigCount):
TC += (J_mat[n1,n2])*(CtgDirection[n1])*(CtgDirection[n2])
return TC
def CalH(CtgDirection,J_mat):
size = len(CtgDirection)
DVector = np.array(CtgDirection)
H = np.dot(np.dot(DVector.T,J_mat),DVector)
return (-H)
def DeltaH(CtgDirection,J_mat):
size = len(CtgDirection)
DVector = np.array(CtgDirection)
Deltah = np.dot(J_mat,DVector)
Deltah = np.dot(np.diag(DVector),Deltah)
#print(DVector,DeltaH)
return Deltah
def DeltaH_W(CtgDirection,J_mat):
size = len(CtgDirection)
DVector = np.array(CtgDirection)
Deltah = np.dot(J_mat,DVector)
Deltah = np.dot(np.diag(DVector),Deltah)
J_rowsum = np.abs(J_mat).sum(axis=1)
return Deltah/J_rowsum
def DeltaTC(CtgDirection,DFlag):
ContigCount = len(CtgDirection)
ML = np.zeros(ContigCount)
for n1 in range(ContigCount):
for n2 in range(n1,ContigCount):
ML[n1] += DFlag[n1][n2]*CtgDirection[n2]
return ML*CtgDirection
def DDFS(visit,n1,DFlag,weight_mat,CtgDirection,contig):
weight = copy.deepcopy(weight_mat[n1,])
max_nei = np.argmax(weight)
link_weight = weight[max_nei]
weight[max_nei] = 0
while link_weight>0:
if visit[str(max_nei)]==0 and DFlag[n1][max_nei]!=0 :
CtgDirection[max_nei] = DFlag[n1][max_nei]*CtgDirection[n1]
visit[str(max_nei)]=1
#print("from",contig[n1],'visit',contig[max_nei],CtgDirection[max_nei],link_weight)
DDFS(visit,max_nei,DFlag,weight_mat,CtgDirection,contig)
max_nei = np.argmax(weight)
#print('from ',n1,'to the next',max_nei,weight_mat[n1,max_nei],weight[max_nei])
link_weight = weight[max_nei]
weight[max_nei] = 0
def GetDW(direct_shortdict,direct_longdict,contig,truc):
ContigCount = len(contig)
DFlag = np.zeros((ContigCount,ContigCount))
weight_mat = np.zeros((ContigCount,ContigCount))
J_mat = np.zeros((ContigCount,ContigCount))
for n1 in range(ContigCount):
for n2 in range(n1,ContigCount):
c1 = contig[n1]
c2 = contig[n2]
key = str(c1)+'-'+str(c2)
d_list = []
direct_sum = 0
if key in direct_shortdict:
d_list = direct_shortdict[key]
if key in direct_longdict:
d_list += direct_longdict[key]
direct_sum = sum(d_list)
posi_count = (len(d_list)+direct_sum)/2
nega_count = (len(d_list)-direct_sum)/2
w = max(posi_count,nega_count)
weight_mat[n1][n2] += w
weight_mat[n2][n1] += w
J_mat[n1][n2] = direct_sum
J_mat[n2][n1] = direct_sum
if w > truc :#and min(posi_count,nega_count)<truc: #or (int(c1) in OnlyNeighbor) or (int(c2) in OnlyNeighbor)
if direct_sum==0:
print("direct_sum==0!",len(d_list),direct_sum,posi_count,nega_count)
direct_sum = 1
DFlag[n1][n2] = np.sign(direct_sum)
DFlag[n2][n1] = np.sign(direct_sum)
'''
if key in direct_shortdict: #direct_shortdict is first considered to decide DFlag,for more error in long libraries.
d_list = direct_shortdict[key]
direct_sum = sum(d_list)
posi_count = (len(d_list)+direct_sum)/2
nega_count = (len(d_list)-direct_sum)/2
w = max(posi_count,nega_count)
weight_mat[n1][n2] += w
weight_mat[n2][n1] += w
J_mat[n1][n2] = direct_sum
J_mat[n2][n1] = direct_sum
if w > truc :#
DFlag[n1][n2] = np.sign(direct_sum)
DFlag[n2][n1] = np.sign(direct_sum)
continue #If short lib link is enough to decide the DFlag, stop here
if key in direct_longdict: #If short lib link is not enough to decide the DFlag,take long lib into consideration at the same time
d_list += direct_longdict[key]
direct_sum += sum(d_list)
posi_count = (len(d_list)+direct_sum)/2
nega_count = (len(d_list)-direct_sum)/2
w = max(posi_count,nega_count)
weight_mat[n1][n2] += w
weight_mat[n2][n1] += w
J_mat[n1][n2] = direct_sum
J_mat[n2][n1] = direct_sum
if w > truc:
DFlag[n1][n2] = np.sign(direct_sum)
DFlag[n2][n1] = np.sign(direct_sum)'''
return (DFlag,weight_mat,J_mat) #DFlag/100 np.sign(DFlag)*np.log(np.abs(DFlag)+1
def DirectC_W(direct_shortdict,direct_longdict,contig,truc):
ContigCount = len(contig)
(DFlag,weight_mat,J_mat) = GetDW(direct_shortdict,direct_longdict,contig,truc)
##Initialize:
CtgDirection = [0 for i in range(ContigCount)]
CtgDirection[0] = 1
visit = {}
for i in range(ContigCount):
visit[str(i)]=0
visit[str(0)]=1
DDFS(visit,0,DFlag,weight_mat,CtgDirection,contig)
for i in range(ContigCount): #for contigs not initialized
if CtgDirection[i]==0:
print("Not Initialized: ", contig[i])
CtgDirection[i] = 1
min_DeltaH = -10
print("here 0")
H = CalH(CtgDirection,J_mat)
print("here H:",H)
while min_DeltaH<0 :
H_old = H
deltaH = list(DeltaH_W(CtgDirection,J_mat))#list(DeltaH(CtgDirection,J_mat))
min_DeltaH = min(deltaH)
min_id = deltaH.index(min_DeltaH)
print("min:",contig[min_id],min_DeltaH)
if min_DeltaH >=0:
print("Perfect orientation!")
break
CtgDirection[min_id]=-CtgDirection[min_id]
H = CalH(CtgDirection,J_mat)
print("H_new:", H, "Old:",H_old)
if H>=H_old: #change back!
CtgDirection[min_id] = -CtgDirection[min_id]
break
return(CtgDirection)
def DirectC_array(direct_shortdict,direct_longdict,contig,truc):#,Contiglen_list
ContigCount = len(contig)
(DFlag,weight_mat,J_mat) = GetDW(direct_shortdict,direct_longdict,contig,truc)
##Initialize:
CtgDirection = [0 for i in range(ContigCount)]
CtgDirection[0] = 1
visit = {}
for i in range(ContigCount):
visit[str(i)]=0
visit[str(0)]=1
DDFS(visit,0,DFlag,weight_mat,CtgDirection,contig)
for i in range(ContigCount): #for contigs not initialized
if CtgDirection[i]==0:
print("Not Initialized: ", contig[i])
CtgDirection[i] = 1
#CtgDirection = DirectMST(direct_shortdict,direct_longdict,contig,Contiglen_list)
min_DeltaH = -10
TC = CalTotalContradict(CtgDirection,J_mat)
while min_DeltaH<0 :
TC_old = TC
deltaH = list(DeltaH(CtgDirection,J_mat))
min_DeltaH = min(deltaH)
min_id = deltaH.index(min_DeltaH)
print("min:",contig[min_id],min_DeltaH)
if min_DeltaH >=0:
print("Perfect orientation!")
break
CtgDirection[min_id]=-CtgDirection[min_id]
TC = CalTotalContradict(CtgDirection,J_mat)
print("TC_new:", TC, "Old:",TC_old)
if TC<=TC_old: #change back!
CtgDirection[max_1] = -CtgDirection[max_1]
return(CtgDirection)
def DirectC(direct_shortdict,direct_longdict,contig,truc):
ContigCount = len(contig)
(DFlag,weight_mat,J_mat) = GetDW(direct_shortdict,direct_longdict,contig,truc) #,OnlyNeighbor
#np.savetxt("Weight_Mat",weight_mat,fmt="%d", delimiter=",")
##Initialize:
CtgDirection = [0 for i in range(ContigCount)]
CtgDirection[0] = 1
visit = {}
for i in range(ContigCount):
visit[str(i)]=0
visit[str(0)]=1
DDFS(visit,0,DFlag,weight_mat,CtgDirection,contig)
for i in range(ContigCount): #for contigs not initialized
if CtgDirection[i]==0:
print("Not Initialized: ", contig[i])
CtgDirection[i] = 1
##optimization
TC = CalTotalContradict(CtgDirection,J_mat)
min_ML = -10
TC_old = -100
while min_ML<0 and TC>TC_old:
TC_old = TC
ML = DeltaTC(CtgDirection,J_mat)
min_ML = min(ML)
max_1 = np.argmax(-ML)
print("max_1:",contig[max_1],ML[max_1])
if min_ML==0:
print("Perfect orientation!")
break
#if max_1 ==0: break
CtgDirection[max_1]=-CtgDirection[max_1]
TC = CalTotalContradict(CtgDirection,J_mat)
print("TC_new:", TC, TC_old)
if TC<=TC_old: #change back!
CtgDirection[max_1] = -CtgDirection[max_1]
return(CtgDirection)
| [
"numpy.abs",
"numpy.ones",
"numpy.argmax",
"numpy.diag",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.sign",
"copy.deepcopy"
] | [((495, 516), 'numpy.zeros', 'np.zeros', (['ContigCount'], {}), '(ContigCount)\n', (503, 516), True, 'import numpy as np\n'), ((524, 544), 'numpy.ones', 'np.ones', (['ContigCount'], {}), '(ContigCount)\n', (531, 544), True, 'import numpy as np\n'), ((1096, 1118), 'numpy.array', 'np.array', (['CtgDirection'], {}), '(CtgDirection)\n', (1104, 1118), True, 'import numpy as np\n'), ((1253, 1275), 'numpy.array', 'np.array', (['CtgDirection'], {}), '(CtgDirection)\n', (1261, 1275), True, 'import numpy as np\n'), ((1287, 1309), 'numpy.dot', 'np.dot', (['J_mat', 'DVector'], {}), '(J_mat, DVector)\n', (1293, 1309), True, 'import numpy as np\n'), ((1469, 1491), 'numpy.array', 'np.array', (['CtgDirection'], {}), '(CtgDirection)\n', (1477, 1491), True, 'import numpy as np\n'), ((1503, 1525), 'numpy.dot', 'np.dot', (['J_mat', 'DVector'], {}), '(J_mat, DVector)\n', (1509, 1525), True, 'import numpy as np\n'), ((1709, 1730), 'numpy.zeros', 'np.zeros', (['ContigCount'], {}), '(ContigCount)\n', (1717, 1730), True, 'import numpy as np\n'), ((1941, 1971), 'copy.deepcopy', 'copy.deepcopy', (['weight_mat[n1,]'], {}), '(weight_mat[n1,])\n', (1954, 1971), False, 'import copy\n'), ((1984, 2001), 'numpy.argmax', 'np.argmax', (['weight'], {}), '(weight)\n', (1993, 2001), True, 'import numpy as np\n'), ((2643, 2679), 'numpy.zeros', 'np.zeros', (['(ContigCount, ContigCount)'], {}), '((ContigCount, ContigCount))\n', (2651, 2679), True, 'import numpy as np\n'), ((2694, 2730), 'numpy.zeros', 'np.zeros', (['(ContigCount, ContigCount)'], {}), '((ContigCount, ContigCount))\n', (2702, 2730), True, 'import numpy as np\n'), ((2740, 2776), 'numpy.zeros', 'np.zeros', (['(ContigCount, ContigCount)'], {}), '((ContigCount, ContigCount))\n', (2748, 2776), True, 'import numpy as np\n'), ((765, 777), 'numpy.array', 'np.array', (['ML'], {}), '(ML)\n', (773, 777), True, 'import numpy as np\n'), ((778, 790), 'numpy.array', 'np.array', (['DL'], {}), '(DL)\n', (786, 790), True, 'import numpy as np\n'), ((1132, 1156), 'numpy.dot', 'np.dot', (['DVector.T', 'J_mat'], {}), '(DVector.T, J_mat)\n', (1138, 1156), True, 'import numpy as np\n'), ((1327, 1343), 'numpy.diag', 'np.diag', (['DVector'], {}), '(DVector)\n', (1334, 1343), True, 'import numpy as np\n'), ((1543, 1559), 'numpy.diag', 'np.diag', (['DVector'], {}), '(DVector)\n', (1550, 1559), True, 'import numpy as np\n'), ((2388, 2405), 'numpy.argmax', 'np.argmax', (['weight'], {}), '(weight)\n', (2397, 2405), True, 'import numpy as np\n'), ((8125, 8139), 'numpy.argmax', 'np.argmax', (['(-ML)'], {}), '(-ML)\n', (8134, 8139), True, 'import numpy as np\n'), ((1581, 1594), 'numpy.abs', 'np.abs', (['J_mat'], {}), '(J_mat)\n', (1587, 1594), True, 'import numpy as np\n'), ((3609, 3628), 'numpy.sign', 'np.sign', (['direct_sum'], {}), '(direct_sum)\n', (3616, 3628), True, 'import numpy as np\n'), ((3650, 3669), 'numpy.sign', 'np.sign', (['direct_sum'], {}), '(direct_sum)\n', (3657, 3669), True, 'import numpy as np\n')] |
# Data loading based on https://github.com/NVIDIA/flownet2-pytorch
from ast import NotEq
import numpy as np
import torch
import torch.utils.data as data
from torch.utils.data import DistributedSampler
import torch.nn.functional as F
import os
import math
import random
from glob import glob
import os.path as osp
import re
from utils import frame_utils
from utils.augmentor import FlowAugmentor, SparseFlowAugmentor
from utils.utils import print0
from sklearn.model_selection import train_test_split
shift_info_printed = False
# sparse: sparse (kitti .png) format of flow data
class FlowDataset(data.Dataset):
def __init__(self, aug_params=None, sparse=False):
self.augmentor = None
self.sparse = sparse
if aug_params is not None:
if sparse:
# Only KITTI, HD1k, VIPER are sparse.
self.augmentor = SparseFlowAugmentor(self.ds_name, **aug_params)
else:
self.augmentor = FlowAugmentor(self.ds_name, **aug_params)
global shift_info_printed
if not shift_info_printed and aug_params['shift_prob']:
print("Shift aug: {}, prob {}".format( \
self.augmentor.shift_sigmas, self.augmentor.shift_prob))
shift_info_printed = True
# if is_test, do not return flow (only for LB submission).
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
self.extra_info = None
self.occ_list = None
self.seg_list = None
self.seg_inv_list = None
def __getitem__(self, index):
if self.extra_info is not None:
extra_info = self.extra_info[index]
else:
extra_info = 0
# if is_test, do not return flow (only for LB submission).
# If there's groundtruth flow, then is_test=False, e.g. on chairs and things.
if self.is_test:
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
img1 = np.array(img1).astype(np.uint8)[..., :3]
img2 = np.array(img2).astype(np.uint8)[..., :3]
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
return img1, img2, extra_info
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
index = index % len(self.image_list)
valid = None
# KITTI flow is saved as image files.
# KITTI, HD1K, VIPER are sparse format.
if self.sparse:
flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
else:
# read_gen: general read? choose reader according to the file extension.
flow = frame_utils.read_gen(self.flow_list[index])
if self.occ_list is not None:
occ = frame_utils.read_gen(self.occ_list[index])
occ = np.array(occ).astype(np.uint8)
occ = torch.from_numpy(occ // 255).bool()
if self.seg_list is not None:
f_in = np.array(frame_utils.read_gen(self.seg_list[index]))
seg_r = f_in[:, :, 0].astype('int32')
seg_g = f_in[:, :, 1].astype('int32')
seg_b = f_in[:, :, 2].astype('int32')
seg_map = (seg_r * 256 + seg_g) * 256 + seg_b
seg_map = torch.from_numpy(seg_map)
if self.seg_inv_list is not None:
seg_inv = frame_utils.read_gen(self.seg_inv_list[index])
seg_inv = np.array(seg_inv).astype(np.uint8)
seg_inv = torch.from_numpy(seg_inv // 255).bool()
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = np.array(flow).astype(np.float32)
img1 = np.array(img1).astype(np.uint8)
img2 = np.array(img2).astype(np.uint8)
# grayscale images
if len(img1.shape) == 2:
img1 = np.tile(img1[...,None], (1, 1, 3))
img2 = np.tile(img2[...,None], (1, 1, 3))
else:
# Remove alpha?
img1 = img1[..., :3]
img2 = img2[..., :3]
if self.augmentor is not None:
if self.sparse:
img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)
else:
# shift augmentation will return valid. Otherwise valid is None.
img1, img2, flow, valid = self.augmentor(img1, img2, flow)
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow).permute(2, 0, 1).float()
if valid is not None:
valid = torch.from_numpy(valid)
else:
valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)
if self.occ_list is not None:
return img1, img2, flow, valid.float(), occ, self.occ_list[index]
elif self.seg_list is not None and self.seg_inv_list is not None:
return img1, img2, flow, valid.float(), seg_map, seg_inv
else:
return img1, img2, flow, valid.float(), extra_info
def __rmul__(self, v):
self.flow_list = v * self.flow_list
self.image_list = v * self.image_list
if self.extra_info is not None:
self.extra_info = v * self.extra_info
return self
def __len__(self):
return len(self.image_list)
class MpiSintel(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean',
occlusion=False, segmentation=False, debug=False):
self.ds_name = f'sintel-{split}-{dstype}'
super(MpiSintel, self).__init__(aug_params)
flow_root = osp.join(root, split, 'flow')
image_root = osp.join(root, split, dstype)
occ_root = osp.join(root, split, 'occlusions')
# occ_root = osp.join(root, split, 'occ_plus_out')
# occ_root = osp.join(root, split, 'in_frame_occ')
# occ_root = osp.join(root, split, 'out_of_frame')
if debug:
self.extra_info = []
seg_root = osp.join(root, split, 'segmentation')
seg_inv_root = osp.join(root, split, 'segmentation_invalid')
self.segmentation = segmentation
self.occlusion = occlusion
if self.occlusion:
self.occ_list = []
if self.segmentation:
self.seg_list = []
self.seg_inv_list = []
if split == 'test':
self.is_test = True
for scene in sorted(os.listdir(image_root)):
image_list = sorted(glob(osp.join(image_root, scene, '*.png')))
for i in range(len(image_list)-1):
self.image_list += [ [image_list[i], image_list[i+1]] ]
# i: frame_id, the sequence number of the image.
# The first image in this folder is numbered 0.
if debug:
self.extra_info += [ (scene, i) ] # scene and frame_id
if split != 'test':
self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))
if self.occlusion:
self.occ_list += sorted(glob(osp.join(occ_root, scene, '*.png')))
if self.segmentation:
self.seg_list += sorted(glob(osp.join(seg_root, scene, '*.png')))
self.seg_inv_list += sorted(glob(osp.join(seg_inv_root, scene, '*.png')))
print(f"{self.ds_name}: {len(self.image_list)} image pairs.")
class FlyingChairs(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/FlyingChairs_release/data'):
self.ds_name = f'chairs-{split}'
super(FlyingChairs, self).__init__(aug_params)
images = sorted(glob(osp.join(root, '*.ppm')))
flows = sorted(glob(osp.join(root, '*.flo')))
assert (len(images)//2 == len(flows))
split_list = np.loadtxt('datasets/FlyingChairs_release/FlyingChairs_train_val.txt', dtype=np.int32)
for i in range(len(flows)):
xid = split_list[i]
if (split=='training' and xid==1) or (split=='validation' and xid==2):
self.flow_list += [ flows[i] ]
self.image_list += [ [images[2*i], images[2*i+1]] ]
print(f"{self.ds_name}: {len(self.image_list)} image pairs.")
class FlyingThings3D(FlowDataset):
def __init__(self, aug_params=None, root='datasets/FlyingThings3D', split='training', dstype='frames_cleanpass'):
ds_type_short = { 'frames_cleanpass': 'clean',
'frames_finalpass': 'final' }
self.ds_name = f'things-{split}-{ds_type_short[dstype]}'
super(FlyingThings3D, self).__init__(aug_params)
if split == 'training':
for cam in ['left']:
for direction in ['into_future', 'into_past']:
image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))
flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted(glob(osp.join(idir, '*.png')) )
# We converted pfm to flo to reduce half of the space.
flows = sorted(glob(osp.join(fdir, '*.flo')) )
if len(flows) == 0:
flows = sorted(glob(osp.join(fdir, '*.pfm')) )
for i in range(len(flows)-1):
if direction == 'into_future':
self.image_list += [ [images[i], images[i+1]] ]
self.flow_list += [ flows[i] ]
elif direction == 'into_past':
self.image_list += [ [images[i+1], images[i]] ]
self.flow_list += [ flows[i+1] ]
elif split == 'validation':
for cam in ['left']:
for direction in ['into_future', 'into_past']:
image_dirs = sorted(glob(osp.join(root, dstype, 'TEST/*/*')))
image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TEST/*/*')))
flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted(glob(osp.join(idir, '*.png')))
# We converted pfm to flo to reduce half of the space.
flows = sorted(glob(osp.join(fdir, '*.flo')))
if len(flows) == 0:
flows = sorted(glob(osp.join(fdir, '*.pfm')))
for i in range(len(flows) - 1):
if direction == 'into_future':
self.image_list += [[images[i], images[i + 1]]]
self.flow_list += [flows[i]]
elif direction == 'into_past':
self.image_list += [[images[i + 1], images[i]]]
self.flow_list += [flows[i + 1]]
valid_list = np.loadtxt('things_val_test_set.txt', dtype=np.int32)
self.image_list = [self.image_list[ind] for ind, sel in enumerate(valid_list) if sel]
self.flow_list = [self.flow_list[ind] for ind, sel in enumerate(valid_list) if sel]
print(f"{self.ds_name}: {len(self.image_list)} image pairs.")
class KITTI(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/KITTI',
debug=False):
self.ds_name = f'kitti-{split}'
super(KITTI, self).__init__(aug_params, sparse=True)
if debug:
self.extra_info = []
if split == 'testing':
self.is_test = True
root = osp.join(root, split)
images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
for img1, img2 in zip(images1, images2):
frame_id = img1.split('/')[-1]
self.image_list += [ [img1, img2] ]
if debug:
self.extra_info += [ [frame_id] ]
if split == 'training':
self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
print(f"{self.ds_name}: {len(self.image_list)} image pairs.")
# Further split KITTI training data into training and testing sets.
class KITTITrain(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/KITTI',
debug=False):
self.ds_name = f'kittitrain-{split}'
super(KITTITrain, self).__init__(aug_params, sparse=True)
root = osp.join(root, "training")
images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
extra_info = []
image_list = []
for img1, img2 in zip(images1, images2):
frame_id = img1.split('/')[-1]
image_list += [ [img1, img2] ]
extra_info += [ [frame_id] ]
image_list_train, image_list_test, flow_list_train, flow_list_test, \
extra_info_train, extra_info_test = \
train_test_split(image_list, flow_list, extra_info, test_size=0.3, random_state=42)
if split == 'training':
self.image_list = image_list_train
self.flow_list = flow_list_train
if debug:
self.extra_info = extra_info_train
else:
self.image_list = image_list_test
self.flow_list = flow_list_test
if debug:
self.extra_info = extra_info_test
print(f"{self.ds_name}: {len(self.image_list)} image pairs.")
class HD1K(FlowDataset):
def __init__(self, aug_params=None, root='datasets/HD1k'):
self.ds_name = f'hd1k'
super(HD1K, self).__init__(aug_params, sparse=True)
seq_ix = 0
while 1:
flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))
images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))
if len(flows) == 0:
break
for i in range(len(flows)-1):
self.flow_list += [flows[i]]
self.image_list += [ [images[i], images[i+1]] ]
seq_ix += 1
print(f"{self.ds_name}: {len(self.image_list)} image pairs.")
class Autoflow(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/autoflow',
debug=False):
self.ds_name = f'autoflow-{split}'
super(Autoflow, self).__init__(aug_params)
scene_count = len(os.listdir(root))
training_size = int(scene_count * 0.9)
if debug:
self.extra_info = []
for i, scene in enumerate(sorted(os.listdir(root))):
if split == 'training' and i <= training_size or \
split == 'test' and i > training_size:
image0_path = osp.join(root, scene, 'im0.png')
image1_path = osp.join(root, scene, 'im1.png')
flow_path = osp.join(root, scene, 'forward.flo')
self.image_list += [ [image0_path, image1_path] ]
self.flow_list += [ flow_path ]
if debug:
self.extra_info += [ [scene] ]
print(f"{self.ds_name}: {len(self.image_list)} image pairs.")
# The VIPER .npz flow files have been converted to KITTI .png format.
class VIPER(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/viper/', filetype='jpg',
debug=False):
self.ds_name = f'viper-{split}'
super(VIPER, self).__init__(aug_params, sparse=True)
split_map = { 'training': 'train', 'validation': 'val', 'test': 'test' }
split = split_map[split]
split_img_root = osp.join(root, filetype, split, 'img')
split_flow_root = osp.join(root, filetype, split, 'flow')
skip_count = 0
if debug:
self.extra_info = []
if split == 'test':
# 001_00001, 001_00076, ...
TEST_FRAMES = open(osp.join(root, "test_frames.txt"))
test_frames_dict = {}
for frame_trunk in TEST_FRAMES:
frame_trunk = frame_trunk.strip()
test_frames_dict[frame_trunk] = 1
print0("{} test frame names loaded".format(len(test_frames_dict)))
self.is_test = True
for i, scene in enumerate(sorted(os.listdir(split_img_root))):
# scene: 001, 002, ...
# dir: viper/train/img/001
# img0_name: 001_00001.png, 001_00010.png, ...
for img0_name in sorted(os.listdir(osp.join(split_img_root, scene))):
matches = re.match(r"(\d{3})_(\d{5}).(jpg|png)", img0_name)
if not matches:
breakpoint()
scene0 = matches.group(1)
img0_idx = matches.group(2)
suffix = matches.group(3)
assert scene == scene0
# img0_trunk: img0_name without suffix.
img0_trunk = f"{scene}_{img0_idx}"
if (split == 'train' or split == 'val') and img0_idx[-1] == '0' \
or split == 'test' and img0_trunk in test_frames_dict:
img1_idx = "{:05d}".format(int(img0_idx) + 1)
img1_name = f"{scene}_{img1_idx}.{suffix}"
flow_name = img0_name[:-3] + "png"
image0_path = osp.join(split_img_root, scene, img0_name)
image1_path = osp.join(split_img_root, scene, img1_name)
flow_path = osp.join(split_flow_root, scene, flow_name)
# Sometimes image1 is missing. Skip this pair.
if not os.path.isfile(image1_path):
# In the test set, image1 should always be there.
if split == 'test':
breakpoint()
skip_count += 1
continue
# if both image0_path and image1_path exist, then flow_path should exist.
if split != 'test' and not os.path.isfile(flow_path):
skip_count += 1
continue
# This file is not considered as the first frame. Skip.
else:
skip_count += 1
continue
self.image_list += [ [image0_path, image1_path] ]
self.flow_list += [ flow_path ]
if debug:
self.extra_info += [ [img0_trunk] ]
print0(f"{self.ds_name}: {len(self.image_list)} image pairs. {skip_count} files skipped.")
class SlowFlow(FlowDataset):
def __init__(self, aug_params=None, split='test', root='datasets/slowflow/', filetype='png',
blur_mag=100, blur_num_frames=0, debug=True):
self.ds_name = f'slowflow-{split}-{blur_mag}-{blur_num_frames}'
super(SlowFlow, self).__init__(aug_params, sparse=False)
sequence_folder = "sequence" if blur_num_frames == 0 else f"sequence_R0{blur_num_frames}"
sequence_root = osp.join(root, str(blur_mag), sequence_folder)
print0(sequence_root)
flow_root = osp.join(root, str(blur_mag), 'flow')
skip_count = 0
if debug:
self.extra_info = []
for i, scene in enumerate(sorted(os.listdir(sequence_root))):
# scene: Animals, Ball...
# img0_name: seq5_0000000.png, seq5_0000001.png, ...
for img0_name in sorted(os.listdir(osp.join(sequence_root, scene))):
matches = re.match(r"seq(\d+)_(\d+).png", img0_name)
if not matches:
breakpoint()
subseq_idx = matches.group(1)
img0_idx = matches.group(2)
# This image is img1. Skip.
if img0_idx[-1] == '1':
continue
if img0_idx[-1] != '0':
breakpoint()
# img0_trunk: img0_name without suffix.
img0_trunk = f"seq{subseq_idx}_{img0_idx}"
img1_idx = img0_idx[:-1] + '1'
img1_name = f"seq{subseq_idx}_{img1_idx}.png"
flow_name = img0_trunk + ".flo"
image0_path = osp.join(sequence_root, scene, img0_name)
image1_path = osp.join(sequence_root, scene, img1_name)
flow_path = osp.join(flow_root, scene, flow_name)
if not os.path.isfile(flow_path):
skip_count += 1
continue
self.image_list += [ [image0_path, image1_path] ]
self.flow_list += [ flow_path ]
if debug:
self.extra_info += [ [scene, img0_trunk] ]
print0(f"{self.ds_name}: {len(self.image_list)} image pairs. {skip_count} skipped")
# 'crop_size' is first used to bound the minimal size of images after resizing. Then it's used to crop the image.
def fetch_dataloader(args, SINTEL_TRAIN_DS='C+T+K+S+H'):
""" Create the data loader for the corresponding training set """
if args.stage == 'chairs':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0,
'do_flip': True,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas }
train_dataset = FlyingChairs(aug_params, split='training')
elif args.stage == 'things':
# For experiments to understand inborn vs. acquired robustness against image shifting,
# only do image shifting augmentation on Things.
# Things is non-sparse. So only need to work on FlowAugmentor
# (no need to work on SparseFlowAugmentor).
aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas }
things_clean = FlyingThings3D(aug_params, dstype='frames_cleanpass', split='training')
things_final = FlyingThings3D(aug_params, dstype='frames_finalpass', split='training')
train_dataset = things_clean + things_final
elif args.stage == 'autoflow':
# autoflow image size: (488, 576)
# minimal scale = 2**0.42 = 1.338. 576*1.338=770.6 > 768. Otherwise there'll be exceptions.
train_dataset = Autoflow({'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.8,
'spatial_aug_prob': 1, 'do_flip': True,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas })
elif args.stage == 'sintel':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas }
things_clean = FlyingThings3D(aug_params, dstype='frames_cleanpass')
sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')
sintel_final = MpiSintel(aug_params, split='training', dstype='final')
if SINTEL_TRAIN_DS == 'C+T+K+S+H':
kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas })
hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas })
train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things_clean
elif SINTEL_TRAIN_DS == 'C+T+K/S':
train_dataset = 100*sintel_clean + 100*sintel_final + things_clean
elif args.stage == 'kitti':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas }
train_dataset = KITTI(aug_params, split='training')
elif args.stage == 'kittitrain':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas }
train_dataset = KITTITrain(aug_params, split='training')
elif args.stage == 'viper':
aug_params = {'crop_size': args.image_size, 'min_scale': -1, 'max_scale': -0.5,
'spatial_aug_prob': 1, 'do_flip': False,
'shift_prob': args.shift_aug_prob, 'shift_sigmas': args.shift_sigmas }
train_dataset = VIPER(aug_params, split='training')
if args.ddp:
train_sampler = DistributedSampler(train_dataset, shuffle=True)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, sampler=train_sampler,
pin_memory=True, shuffle=shuffle, num_workers=args.num_workers, drop_last=True)
print0('Training with %d image pairs' % len(train_dataset))
return train_loader
| [
"torch.utils.data.DistributedSampler",
"utils.augmentor.FlowAugmentor",
"torch.from_numpy",
"utils.frame_utils.read_gen",
"numpy.array",
"utils.utils.print0",
"os.listdir",
"numpy.random.seed",
"utils.augmentor.SparseFlowAugmentor",
"numpy.tile",
"sklearn.model_selection.train_test_split",
"to... | [((26076, 26247), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'sampler': 'train_sampler', 'pin_memory': '(True)', 'shuffle': 'shuffle', 'num_workers': 'args.num_workers', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, sampler=\n train_sampler, pin_memory=True, shuffle=shuffle, num_workers=args.\n num_workers, drop_last=True)\n', (26091, 26247), True, 'import torch.utils.data as data\n'), ((3944, 3991), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.image_list[index][0]'], {}), '(self.image_list[index][0])\n', (3964, 3991), False, 'from utils import frame_utils\n'), ((4007, 4054), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.image_list[index][1]'], {}), '(self.image_list[index][1])\n', (4027, 4054), False, 'from utils import frame_utils\n'), ((6102, 6131), 'os.path.join', 'osp.join', (['root', 'split', '"""flow"""'], {}), "(root, split, 'flow')\n", (6110, 6131), True, 'import os.path as osp\n'), ((6153, 6182), 'os.path.join', 'osp.join', (['root', 'split', 'dstype'], {}), '(root, split, dstype)\n', (6161, 6182), True, 'import os.path as osp\n'), ((6202, 6237), 'os.path.join', 'osp.join', (['root', 'split', '"""occlusions"""'], {}), "(root, split, 'occlusions')\n", (6210, 6237), True, 'import os.path as osp\n'), ((6486, 6523), 'os.path.join', 'osp.join', (['root', 'split', '"""segmentation"""'], {}), "(root, split, 'segmentation')\n", (6494, 6523), True, 'import os.path as osp\n'), ((6547, 6592), 'os.path.join', 'osp.join', (['root', 'split', '"""segmentation_invalid"""'], {}), "(root, split, 'segmentation_invalid')\n", (6555, 6592), True, 'import os.path as osp\n'), ((8309, 8399), 'numpy.loadtxt', 'np.loadtxt', (['"""datasets/FlyingChairs_release/FlyingChairs_train_val.txt"""'], {'dtype': 'np.int32'}), "('datasets/FlyingChairs_release/FlyingChairs_train_val.txt',\n dtype=np.int32)\n", (8319, 8399), True, 'import numpy as np\n'), ((12531, 12552), 'os.path.join', 'osp.join', (['root', 'split'], {}), '(root, split)\n', (12539, 12552), True, 'import os.path as osp\n'), ((13422, 13448), 'os.path.join', 'osp.join', (['root', '"""training"""'], {}), "(root, 'training')\n", (13430, 13448), True, 'import os.path as osp\n'), ((14026, 14113), 'sklearn.model_selection.train_test_split', 'train_test_split', (['image_list', 'flow_list', 'extra_info'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(image_list, flow_list, extra_info, test_size=0.3,\n random_state=42)\n', (14042, 14113), False, 'from sklearn.model_selection import train_test_split\n'), ((16789, 16827), 'os.path.join', 'osp.join', (['root', 'filetype', 'split', '"""img"""'], {}), "(root, filetype, split, 'img')\n", (16797, 16827), True, 'import os.path as osp\n'), ((16854, 16893), 'os.path.join', 'osp.join', (['root', 'filetype', 'split', '"""flow"""'], {}), "(root, filetype, split, 'flow')\n", (16862, 16893), True, 'import os.path as osp\n'), ((20274, 20295), 'utils.utils.print0', 'print0', (['sequence_root'], {}), '(sequence_root)\n', (20280, 20295), False, 'from utils.utils import print0\n'), ((25922, 25969), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['train_dataset'], {'shuffle': '(True)'}), '(train_dataset, shuffle=True)\n', (25940, 25969), False, 'from torch.utils.data import DistributedSampler\n'), ((1981, 2028), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.image_list[index][0]'], {}), '(self.image_list[index][0])\n', (2001, 2028), False, 'from utils import frame_utils\n'), ((2048, 2095), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.image_list[index][1]'], {}), '(self.image_list[index][1])\n', (2068, 2095), False, 'from utils import frame_utils\n'), ((2450, 2484), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (2482, 2484), False, 'import torch\n'), ((2916, 2964), 'utils.frame_utils.readFlowKITTI', 'frame_utils.readFlowKITTI', (['self.flow_list[index]'], {}), '(self.flow_list[index])\n', (2941, 2964), False, 'from utils import frame_utils\n'), ((3083, 3126), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.flow_list[index]'], {}), '(self.flow_list[index])\n', (3103, 3126), False, 'from utils import frame_utils\n'), ((3184, 3226), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.occ_list[index]'], {}), '(self.occ_list[index])\n', (3204, 3226), False, 'from utils import frame_utils\n'), ((3671, 3696), 'torch.from_numpy', 'torch.from_numpy', (['seg_map'], {}), '(seg_map)\n', (3687, 3696), False, 'import torch\n'), ((3762, 3808), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.seg_inv_list[index]'], {}), '(self.seg_inv_list[index])\n', (3782, 3808), False, 'from utils import frame_utils\n'), ((4279, 4314), 'numpy.tile', 'np.tile', (['img1[..., None]', '(1, 1, 3)'], {}), '(img1[..., None], (1, 1, 3))\n', (4286, 4314), True, 'import numpy as np\n'), ((4333, 4368), 'numpy.tile', 'np.tile', (['img2[..., None]', '(1, 1, 3)'], {}), '(img2[..., None], (1, 1, 3))\n', (4340, 4368), True, 'import numpy as np\n'), ((5042, 5065), 'torch.from_numpy', 'torch.from_numpy', (['valid'], {}), '(valid)\n', (5058, 5065), False, 'import torch\n'), ((6922, 6944), 'os.listdir', 'os.listdir', (['image_root'], {}), '(image_root)\n', (6932, 6944), False, 'import os\n'), ((15541, 15557), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (15551, 15557), False, 'import os\n'), ((874, 921), 'utils.augmentor.SparseFlowAugmentor', 'SparseFlowAugmentor', (['self.ds_name'], {}), '(self.ds_name, **aug_params)\n', (893, 921), False, 'from utils.augmentor import FlowAugmentor, SparseFlowAugmentor\n'), ((973, 1014), 'utils.augmentor.FlowAugmentor', 'FlowAugmentor', (['self.ds_name'], {}), '(self.ds_name, **aug_params)\n', (986, 1014), False, 'from utils.augmentor import FlowAugmentor, SparseFlowAugmentor\n'), ((2541, 2574), 'torch.manual_seed', 'torch.manual_seed', (['worker_info.id'], {}), '(worker_info.id)\n', (2558, 2574), False, 'import torch\n'), ((2591, 2621), 'numpy.random.seed', 'np.random.seed', (['worker_info.id'], {}), '(worker_info.id)\n', (2605, 2621), True, 'import numpy as np\n'), ((2638, 2665), 'random.seed', 'random.seed', (['worker_info.id'], {}), '(worker_info.id)\n', (2649, 2665), False, 'import random\n'), ((3397, 3439), 'utils.frame_utils.read_gen', 'frame_utils.read_gen', (['self.seg_list[index]'], {}), '(self.seg_list[index])\n', (3417, 3439), False, 'from utils import frame_utils\n'), ((4071, 4085), 'numpy.array', 'np.array', (['flow'], {}), '(flow)\n', (4079, 4085), True, 'import numpy as np\n'), ((4120, 4134), 'numpy.array', 'np.array', (['img1'], {}), '(img1)\n', (4128, 4134), True, 'import numpy as np\n'), ((4167, 4181), 'numpy.array', 'np.array', (['img2'], {}), '(img2)\n', (4175, 4181), True, 'import numpy as np\n'), ((8161, 8184), 'os.path.join', 'osp.join', (['root', '"""*.ppm"""'], {}), "(root, '*.ppm')\n", (8169, 8184), True, 'import os.path as osp\n'), ((8215, 8238), 'os.path.join', 'osp.join', (['root', '"""*.flo"""'], {}), "(root, '*.flo')\n", (8223, 8238), True, 'import os.path as osp\n'), ((12583, 12617), 'os.path.join', 'osp.join', (['root', '"""image_2/*_10.png"""'], {}), "(root, 'image_2/*_10.png')\n", (12591, 12617), True, 'import os.path as osp\n'), ((12650, 12684), 'os.path.join', 'osp.join', (['root', '"""image_2/*_11.png"""'], {}), "(root, 'image_2/*_11.png')\n", (12658, 12684), True, 'import os.path as osp\n'), ((13479, 13513), 'os.path.join', 'osp.join', (['root', '"""image_2/*_10.png"""'], {}), "(root, 'image_2/*_10.png')\n", (13487, 13513), True, 'import os.path as osp\n'), ((13546, 13580), 'os.path.join', 'osp.join', (['root', '"""image_2/*_11.png"""'], {}), "(root, 'image_2/*_11.png')\n", (13554, 13580), True, 'import os.path as osp\n'), ((13615, 13650), 'os.path.join', 'osp.join', (['root', '"""flow_occ/*_10.png"""'], {}), "(root, 'flow_occ/*_10.png')\n", (13623, 13650), True, 'import os.path as osp\n'), ((15707, 15723), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (15717, 15723), False, 'import os\n'), ((15878, 15910), 'os.path.join', 'osp.join', (['root', 'scene', '"""im0.png"""'], {}), "(root, scene, 'im0.png')\n", (15886, 15910), True, 'import os.path as osp\n'), ((15941, 15973), 'os.path.join', 'osp.join', (['root', 'scene', '"""im1.png"""'], {}), "(root, scene, 'im1.png')\n", (15949, 15973), True, 'import os.path as osp\n'), ((16004, 16040), 'os.path.join', 'osp.join', (['root', 'scene', '"""forward.flo"""'], {}), "(root, scene, 'forward.flo')\n", (16012, 16040), True, 'import os.path as osp\n'), ((17068, 17101), 'os.path.join', 'osp.join', (['root', '"""test_frames.txt"""'], {}), "(root, 'test_frames.txt')\n", (17076, 17101), True, 'import os.path as osp\n'), ((17446, 17472), 'os.listdir', 'os.listdir', (['split_img_root'], {}), '(split_img_root)\n', (17456, 17472), False, 'import os\n'), ((17717, 17767), 're.match', 're.match', (['"""(\\\\d{3})_(\\\\d{5}).(jpg|png)"""', 'img0_name'], {}), "('(\\\\d{3})_(\\\\d{5}).(jpg|png)', img0_name)\n", (17725, 17767), False, 'import re\n'), ((20470, 20495), 'os.listdir', 'os.listdir', (['sequence_root'], {}), '(sequence_root)\n', (20480, 20495), False, 'import os\n'), ((20709, 20752), 're.match', 're.match', (['"""seq(\\\\d+)_(\\\\d+).png"""', 'img0_name'], {}), "('seq(\\\\d+)_(\\\\d+).png', img0_name)\n", (20717, 20752), False, 'import re\n'), ((21407, 21448), 'os.path.join', 'osp.join', (['sequence_root', 'scene', 'img0_name'], {}), '(sequence_root, scene, img0_name)\n', (21415, 21448), True, 'import os.path as osp\n'), ((21479, 21520), 'os.path.join', 'osp.join', (['sequence_root', 'scene', 'img1_name'], {}), '(sequence_root, scene, img1_name)\n', (21487, 21520), True, 'import os.path as osp\n'), ((21551, 21588), 'os.path.join', 'osp.join', (['flow_root', 'scene', 'flow_name'], {}), '(flow_root, scene, flow_name)\n', (21559, 21588), True, 'import os.path as osp\n'), ((3245, 3258), 'numpy.array', 'np.array', (['occ'], {}), '(occ)\n', (3253, 3258), True, 'import numpy as np\n'), ((3294, 3322), 'torch.from_numpy', 'torch.from_numpy', (['(occ // 255)'], {}), '(occ // 255)\n', (3310, 3322), False, 'import torch\n'), ((3831, 3848), 'numpy.array', 'np.array', (['seg_inv'], {}), '(seg_inv)\n', (3839, 3848), True, 'import numpy as np\n'), ((3888, 3920), 'torch.from_numpy', 'torch.from_numpy', (['(seg_inv // 255)'], {}), '(seg_inv // 255)\n', (3904, 3920), False, 'import torch\n'), ((6984, 7020), 'os.path.join', 'osp.join', (['image_root', 'scene', '"""*.png"""'], {}), "(image_root, scene, '*.png')\n", (6992, 7020), True, 'import os.path as osp\n'), ((11826, 11879), 'numpy.loadtxt', 'np.loadtxt', (['"""things_val_test_set.txt"""'], {'dtype': 'np.int32'}), "('things_val_test_set.txt', dtype=np.int32)\n", (11836, 11879), True, 'import numpy as np\n'), ((12974, 13009), 'os.path.join', 'osp.join', (['root', '"""flow_occ/*_10.png"""'], {}), "(root, 'flow_occ/*_10.png')\n", (12982, 13009), True, 'import os.path as osp\n'), ((14804, 14870), 'os.path.join', 'os.path.join', (['root', '"""hd1k_flow_gt"""', "('flow_occ/%06d_*.png' % seq_ix)"], {}), "(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)\n", (14816, 14870), False, 'import os\n'), ((14906, 14969), 'os.path.join', 'os.path.join', (['root', '"""hd1k_input"""', "('image_2/%06d_*.png' % seq_ix)"], {}), "(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)\n", (14918, 14969), False, 'import os\n'), ((17656, 17687), 'os.path.join', 'osp.join', (['split_img_root', 'scene'], {}), '(split_img_root, scene)\n', (17664, 17687), True, 'import os.path as osp\n'), ((18491, 18533), 'os.path.join', 'osp.join', (['split_img_root', 'scene', 'img0_name'], {}), '(split_img_root, scene, img0_name)\n', (18499, 18533), True, 'import os.path as osp\n'), ((18569, 18611), 'os.path.join', 'osp.join', (['split_img_root', 'scene', 'img1_name'], {}), '(split_img_root, scene, img1_name)\n', (18577, 18611), True, 'import os.path as osp\n'), ((18647, 18690), 'os.path.join', 'osp.join', (['split_flow_root', 'scene', 'flow_name'], {}), '(split_flow_root, scene, flow_name)\n', (18655, 18690), True, 'import os.path as osp\n'), ((20649, 20679), 'os.path.join', 'osp.join', (['sequence_root', 'scene'], {}), '(sequence_root, scene)\n', (20657, 20679), True, 'import os.path as osp\n'), ((21616, 21641), 'os.path.isfile', 'os.path.isfile', (['flow_path'], {}), '(flow_path)\n', (21630, 21641), False, 'import os\n'), ((2115, 2129), 'numpy.array', 'np.array', (['img1'], {}), '(img1)\n', (2123, 2129), True, 'import numpy as np\n'), ((2175, 2189), 'numpy.array', 'np.array', (['img2'], {}), '(img2)\n', (2183, 2189), True, 'import numpy as np\n'), ((4817, 4839), 'torch.from_numpy', 'torch.from_numpy', (['img1'], {}), '(img1)\n', (4833, 4839), False, 'import torch\n'), ((4880, 4902), 'torch.from_numpy', 'torch.from_numpy', (['img2'], {}), '(img2)\n', (4896, 4902), False, 'import torch\n'), ((4943, 4965), 'torch.from_numpy', 'torch.from_numpy', (['flow'], {}), '(flow)\n', (4959, 4965), False, 'import torch\n'), ((7451, 7486), 'os.path.join', 'osp.join', (['flow_root', 'scene', '"""*.flo"""'], {}), "(flow_root, scene, '*.flo')\n", (7459, 7486), True, 'import os.path as osp\n'), ((18785, 18812), 'os.path.isfile', 'os.path.isfile', (['image1_path'], {}), '(image1_path)\n', (18799, 18812), False, 'import os\n'), ((2235, 2257), 'torch.from_numpy', 'torch.from_numpy', (['img1'], {}), '(img1)\n', (2251, 2257), False, 'import torch\n'), ((2302, 2324), 'torch.from_numpy', 'torch.from_numpy', (['img2'], {}), '(img2)\n', (2318, 2324), False, 'import torch\n'), ((7573, 7607), 'os.path.join', 'osp.join', (['occ_root', 'scene', '"""*.png"""'], {}), "(occ_root, scene, '*.png')\n", (7581, 7607), True, 'import os.path as osp\n'), ((7697, 7731), 'os.path.join', 'osp.join', (['seg_root', 'scene', '"""*.png"""'], {}), "(seg_root, scene, '*.png')\n", (7705, 7731), True, 'import os.path as osp\n'), ((7787, 7825), 'os.path.join', 'osp.join', (['seg_inv_root', 'scene', '"""*.png"""'], {}), "(seg_inv_root, scene, '*.png')\n", (7795, 7825), True, 'import os.path as osp\n'), ((9295, 9330), 'os.path.join', 'osp.join', (['root', 'dstype', '"""TRAIN/*/*"""'], {}), "(root, dstype, 'TRAIN/*/*')\n", (9303, 9330), True, 'import os.path as osp\n'), ((9374, 9390), 'os.path.join', 'osp.join', (['f', 'cam'], {}), '(f, cam)\n', (9382, 9390), True, 'import os.path as osp\n'), ((9458, 9498), 'os.path.join', 'osp.join', (['root', '"""optical_flow/TRAIN/*/*"""'], {}), "(root, 'optical_flow/TRAIN/*/*')\n", (9466, 9498), True, 'import os.path as osp\n'), ((9541, 9568), 'os.path.join', 'osp.join', (['f', 'direction', 'cam'], {}), '(f, direction, cam)\n', (9549, 9568), True, 'import os.path as osp\n'), ((19187, 19212), 'os.path.isfile', 'os.path.isfile', (['flow_path'], {}), '(flow_path)\n', (19201, 19212), False, 'import os\n'), ((9702, 9725), 'os.path.join', 'osp.join', (['idir', '"""*.png"""'], {}), "(idir, '*.png')\n", (9710, 9725), True, 'import os.path as osp\n'), ((9852, 9875), 'os.path.join', 'osp.join', (['fdir', '"""*.flo"""'], {}), "(fdir, '*.flo')\n", (9860, 9875), True, 'import os.path as osp\n'), ((10637, 10671), 'os.path.join', 'osp.join', (['root', 'dstype', '"""TEST/*/*"""'], {}), "(root, dstype, 'TEST/*/*')\n", (10645, 10671), True, 'import os.path as osp\n'), ((10715, 10731), 'os.path.join', 'osp.join', (['f', 'cam'], {}), '(f, cam)\n', (10723, 10731), True, 'import os.path as osp\n'), ((10799, 10838), 'os.path.join', 'osp.join', (['root', '"""optical_flow/TEST/*/*"""'], {}), "(root, 'optical_flow/TEST/*/*')\n", (10807, 10838), True, 'import os.path as osp\n'), ((10881, 10908), 'os.path.join', 'osp.join', (['f', 'direction', 'cam'], {}), '(f, direction, cam)\n', (10889, 10908), True, 'import os.path as osp\n'), ((9971, 9994), 'os.path.join', 'osp.join', (['fdir', '"""*.pfm"""'], {}), "(fdir, '*.pfm')\n", (9979, 9994), True, 'import os.path as osp\n'), ((11042, 11065), 'os.path.join', 'osp.join', (['idir', '"""*.png"""'], {}), "(idir, '*.png')\n", (11050, 11065), True, 'import os.path as osp\n'), ((11191, 11214), 'os.path.join', 'osp.join', (['fdir', '"""*.flo"""'], {}), "(fdir, '*.flo')\n", (11199, 11214), True, 'import os.path as osp\n'), ((11309, 11332), 'os.path.join', 'osp.join', (['fdir', '"""*.pfm"""'], {}), "(fdir, '*.pfm')\n", (11317, 11332), True, 'import os.path as osp\n')] |
import pandas as pd
from sklearn import preprocessing
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import TfidfVectorizer
import os
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import validation_curve
#Load train data
X_train_origin = pd.read_csv("train_gossipcop_vol2.csv", ",")
Y_train = X_train_origin['label'].values
X_train_origin = X_train_origin['text'].values
print("Train set read.")
stopwords = set(ENGLISH_STOP_WORDS)
#SVC
print("SVM Classifier training and results:")
svm_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.29, stop_words=stopwords)
X_train = svm_vectorizer.fit_transform(X_train_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X_train = svd.fit_transform(X_train)
print("SVD finished.")
svm = SVC(C=10, gamma='scale', kernel='rbf', random_state=42 ,probability=True)
svm.fit(X_train, Y_train)
print("Trained.")
Y_probas_train_svm = svm.predict_proba(X_train)
print("Probabilities predicted.")
# KNeighborsClassifier
print("KNN Classifier training and results:")
knn_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.56, stop_words=stopwords)
X_train = knn_vectorizer.fit_transform(X_train_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X_train = svd.fit_transform(X_train)
print("SVD finished.")
knn = KNeighborsClassifier(n_neighbors=7, weights='distance', metric='euclidean')
knn.fit(X_train, Y_train)
print("Trained.")
Y_probas_train_knn = knn.predict_proba(X_train)
print("Probabilities predicted.")
# LogisticRegression
print("LR Classifier training and results:")
LR_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.65, stop_words=stopwords)
X_train = LR_vectorizer.fit_transform(X_train_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X_train = svd.fit_transform(X_train)
print("SVD finished.")
LR = LogisticRegression(C = 100, penalty='l1', solver='liblinear', max_iter=1000, random_state=42)
LR.fit(X_train, Y_train)
print("Trained.")
Y_probas_train_LR = LR.predict_proba(X_train)
print("Probabilities predicted.")
# DecisionTreeClassifier
print("DT Classifier training and results:")
DT_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.25, stop_words=stopwords)
X_train = DT_vectorizer.fit_transform(X_train_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X_train = svd.fit_transform(X_train)
print("SVD finished.")
DT = DecisionTreeClassifier(criterion='entropy', max_depth=7, min_samples_split=420, random_state=42)
DT.fit(X_train, Y_train)
print("Trained.")
Y_probas_train_DT = DT.predict_proba(X_train)
print("Probabilities predicted.")
# RandomForestClassifier
print("RF Classifier training and results:")
RF_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.21, stop_words=stopwords)
X_train = RF_vectorizer.fit_transform(X_train_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X_train = svd.fit_transform(X_train)
print("SVD finished.")
RF = RandomForestClassifier(criterion='gini', max_depth=None, min_samples_split=2, n_estimators=180, random_state=42)
RF.fit(X_train, Y_train)
print("Trained.")
Y_probas_train_RF = RF.predict_proba(X_train)
print("Probabilities predicted.")
#Ensemble Classifier
Y_class1_train_svm = Y_probas_train_svm[np.newaxis, :, 1].T #each one with shape (m, 1), m=number of training instances
Y_class1_train_knn = Y_probas_train_knn[np.newaxis, :, 1].T
Y_class1_train_LR = Y_probas_train_LR[np.newaxis, :, 1].T
Y_class1_train_DT = Y_probas_train_DT[np.newaxis, :, 1].T
Y_class1_train_RF = Y_probas_train_RF[np.newaxis, :, 1].T
X_meta_train = np.concatenate((Y_class1_train_svm, Y_class1_train_knn, Y_class1_train_LR, Y_class1_train_DT, Y_class1_train_RF), axis=1) #concatenate horizontally, final shape (m, 5)
Y_meta_train = Y_train
x = [0.1, 1, 10 , 100, 1000, 1100, 1300, 1500]
train_scores, valid_scores = validation_curve(LogisticRegression(random_state=42), X_train, Y_meta_train, "C", x , cv=3 , verbose=1000, n_jobs=-1, scoring='accuracy')
train_scores = np.mean(train_scores, axis=1)
valid_scores = np.mean(valid_scores, axis=1)
plt.plot(x, train_scores, label="Train score")
plt.plot(x, valid_scores, label = "Validation score")
plt.grid(True)
plt.xlabel("C values")
plt.ylabel("accuracy Score")
plt.axis([0, max(x) + 0.001, min(min(train_scores), min(valid_scores)), max(max(train_scores), max(valid_scores))])
plt.title("Validation Curve LR-C")
plt.legend()
plt.show()
plt.clf()
x = [1, 2, 3, 4, 5]
train_scores, valid_scores = validation_curve(LogisticRegression(random_state=42), X_meta_train, Y_meta_train, "solver", ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'] , cv=3 , verbose=1000, n_jobs=-1, scoring='accuracy')
train_scores = np.mean(train_scores, axis=1)
valid_scores = np.mean(valid_scores, axis=1)
plt.plot(x, train_scores, label="Train score")
plt.plot(x, valid_scores, label = "Validation score")
plt.grid(True)
plt.xlabel("Solver")
plt.ylabel("accuracy Score")
plt.axis([0, max(x) + 0.001, min(min(train_scores), min(valid_scores)), max(max(train_scores), max(valid_scores))])
plt.title("Validation Curve LR-Solver")
plt.legend()
plt.show()
plt.clf()
x = [0.1, 1, 10 , 100, 1000, 1100, 1300, 1500]
train_scores, valid_scores = validation_curve(LogisticRegression(random_state=42), X_meta_train, Y_meta_train, "C", x , cv=3 , verbose=1000, n_jobs=-1, scoring='f1')
train_scores = np.mean(train_scores, axis=1)
valid_scores = np.mean(valid_scores, axis=1)
plt.plot(x, train_scores, label="Train score")
plt.plot(x, valid_scores, label = "Validation score")
plt.grid(True)
plt.xlabel("C values")
plt.ylabel("F1 Score")
plt.axis([0, max(x) + 0.001, min(min(train_scores), min(valid_scores)), max(max(train_scores), max(valid_scores))])
plt.title("Validation Curve LR-C")
plt.legend()
plt.show()
plt.clf()
x = [1, 2, 3, 4, 5]
train_scores, valid_scores = validation_curve(LogisticRegression(random_state=42), X_meta_train, Y_meta_train, "solver", ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'] , cv=3 , verbose=1000, n_jobs=-1, scoring='f1')
train_scores = np.mean(train_scores, axis=1)
valid_scores = np.mean(valid_scores, axis=1)
plt.plot(x, train_scores, label="Train score")
plt.plot(x, valid_scores, label = "Validation score")
plt.grid(True)
plt.xlabel("Solver")
plt.ylabel("F1 Score")
plt.axis([0, max(x) + 0.001, min(min(train_scores), min(valid_scores)), max(max(train_scores), max(valid_scores))])
plt.title("Validation Curve LR-Solver")
plt.legend()
plt.show()
plt.clf() | [
"numpy.mean",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.decomposition.TruncatedS... | [((730, 774), 'pandas.read_csv', 'pd.read_csv', (['"""train_gossipcop_vol2.csv"""', '""","""'], {}), "('train_gossipcop_vol2.csv', ',')\n", (741, 774), True, 'import pandas as pd\n'), ((995, 1064), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'sublinear_tf': '(True)', 'max_df': '(0.29)', 'stop_words': 'stopwords'}), '(sublinear_tf=True, max_df=0.29, stop_words=stopwords)\n', (1010, 1064), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1153, 1220), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(150)', 'algorithm': '"""arpack"""', 'random_state': '(42)'}), "(n_components=150, algorithm='arpack', random_state=42)\n", (1165, 1220), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((1312, 1385), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(10)', 'gamma': '"""scale"""', 'kernel': '"""rbf"""', 'random_state': '(42)', 'probability': '(True)'}), "(C=10, gamma='scale', kernel='rbf', random_state=42, probability=True)\n", (1315, 1385), False, 'from sklearn.svm import SVC\n'), ((1600, 1669), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'sublinear_tf': '(True)', 'max_df': '(0.56)', 'stop_words': 'stopwords'}), '(sublinear_tf=True, max_df=0.56, stop_words=stopwords)\n', (1615, 1669), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1758, 1825), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(150)', 'algorithm': '"""arpack"""', 'random_state': '(42)'}), "(n_components=150, algorithm='arpack', random_state=42)\n", (1770, 1825), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((1917, 1992), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)', 'weights': '"""distance"""', 'metric': '"""euclidean"""'}), "(n_neighbors=7, weights='distance', metric='euclidean')\n", (1937, 1992), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2204, 2273), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'sublinear_tf': '(True)', 'max_df': '(0.65)', 'stop_words': 'stopwords'}), '(sublinear_tf=True, max_df=0.65, stop_words=stopwords)\n', (2219, 2273), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2361, 2428), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(150)', 'algorithm': '"""arpack"""', 'random_state': '(42)'}), "(n_components=150, algorithm='arpack', random_state=42)\n", (2373, 2428), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((2519, 2614), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(100)', 'penalty': '"""l1"""', 'solver': '"""liblinear"""', 'max_iter': '(1000)', 'random_state': '(42)'}), "(C=100, penalty='l1', solver='liblinear', max_iter=1000,\n random_state=42)\n", (2537, 2614), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2825, 2894), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'sublinear_tf': '(True)', 'max_df': '(0.25)', 'stop_words': 'stopwords'}), '(sublinear_tf=True, max_df=0.25, stop_words=stopwords)\n', (2840, 2894), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2982, 3049), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(150)', 'algorithm': '"""arpack"""', 'random_state': '(42)'}), "(n_components=150, algorithm='arpack', random_state=42)\n", (2994, 3049), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((3140, 3241), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'max_depth': '(7)', 'min_samples_split': '(420)', 'random_state': '(42)'}), "(criterion='entropy', max_depth=7, min_samples_split=\n 420, random_state=42)\n", (3162, 3241), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3450, 3519), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'sublinear_tf': '(True)', 'max_df': '(0.21)', 'stop_words': 'stopwords'}), '(sublinear_tf=True, max_df=0.21, stop_words=stopwords)\n', (3465, 3519), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3607, 3674), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(150)', 'algorithm': '"""arpack"""', 'random_state': '(42)'}), "(n_components=150, algorithm='arpack', random_state=42)\n", (3619, 3674), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((3765, 3882), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'criterion': '"""gini"""', 'max_depth': 'None', 'min_samples_split': '(2)', 'n_estimators': '(180)', 'random_state': '(42)'}), "(criterion='gini', max_depth=None, min_samples_split=\n 2, n_estimators=180, random_state=42)\n", (3787, 3882), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4400, 4525), 'numpy.concatenate', 'np.concatenate', (['(Y_class1_train_svm, Y_class1_train_knn, Y_class1_train_LR,\n Y_class1_train_DT, Y_class1_train_RF)'], {'axis': '(1)'}), '((Y_class1_train_svm, Y_class1_train_knn, Y_class1_train_LR,\n Y_class1_train_DT, Y_class1_train_RF), axis=1)\n', (4414, 4525), True, 'import numpy as np\n'), ((4827, 4856), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (4834, 4856), True, 'import numpy as np\n'), ((4872, 4901), 'numpy.mean', 'np.mean', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (4879, 4901), True, 'import numpy as np\n'), ((4903, 4949), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_scores'], {'label': '"""Train score"""'}), "(x, train_scores, label='Train score')\n", (4911, 4949), True, 'import matplotlib.pyplot as plt\n'), ((4950, 5001), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'valid_scores'], {'label': '"""Validation score"""'}), "(x, valid_scores, label='Validation score')\n", (4958, 5001), True, 'import matplotlib.pyplot as plt\n'), ((5004, 5018), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5012, 5018), True, 'import matplotlib.pyplot as plt\n'), ((5019, 5041), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""C values"""'], {}), "('C values')\n", (5029, 5041), True, 'import matplotlib.pyplot as plt\n'), ((5042, 5070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy Score"""'], {}), "('accuracy Score')\n", (5052, 5070), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5221), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation Curve LR-C"""'], {}), "('Validation Curve LR-C')\n", (5196, 5221), True, 'import matplotlib.pyplot as plt\n'), ((5222, 5234), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5232, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5236, 5246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5244, 5246), True, 'import matplotlib.pyplot as plt\n'), ((5248, 5257), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5255, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5523, 5552), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (5530, 5552), True, 'import numpy as np\n'), ((5568, 5597), 'numpy.mean', 'np.mean', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (5575, 5597), True, 'import numpy as np\n'), ((5599, 5645), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_scores'], {'label': '"""Train score"""'}), "(x, train_scores, label='Train score')\n", (5607, 5645), True, 'import matplotlib.pyplot as plt\n'), ((5646, 5697), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'valid_scores'], {'label': '"""Validation score"""'}), "(x, valid_scores, label='Validation score')\n", (5654, 5697), True, 'import matplotlib.pyplot as plt\n'), ((5700, 5714), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5708, 5714), True, 'import matplotlib.pyplot as plt\n'), ((5715, 5735), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Solver"""'], {}), "('Solver')\n", (5725, 5735), True, 'import matplotlib.pyplot as plt\n'), ((5736, 5764), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy Score"""'], {}), "('accuracy Score')\n", (5746, 5764), True, 'import matplotlib.pyplot as plt\n'), ((5881, 5920), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation Curve LR-Solver"""'], {}), "('Validation Curve LR-Solver')\n", (5890, 5920), True, 'import matplotlib.pyplot as plt\n'), ((5921, 5933), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5931, 5933), True, 'import matplotlib.pyplot as plt\n'), ((5935, 5945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5943, 5945), True, 'import matplotlib.pyplot as plt\n'), ((5947, 5956), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5954, 5956), True, 'import matplotlib.pyplot as plt\n'), ((6192, 6221), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (6199, 6221), True, 'import numpy as np\n'), ((6237, 6266), 'numpy.mean', 'np.mean', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (6244, 6266), True, 'import numpy as np\n'), ((6268, 6314), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_scores'], {'label': '"""Train score"""'}), "(x, train_scores, label='Train score')\n", (6276, 6314), True, 'import matplotlib.pyplot as plt\n'), ((6315, 6366), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'valid_scores'], {'label': '"""Validation score"""'}), "(x, valid_scores, label='Validation score')\n", (6323, 6366), True, 'import matplotlib.pyplot as plt\n'), ((6369, 6383), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6377, 6383), True, 'import matplotlib.pyplot as plt\n'), ((6384, 6406), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""C values"""'], {}), "('C values')\n", (6394, 6406), True, 'import matplotlib.pyplot as plt\n'), ((6407, 6429), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (6417, 6429), True, 'import matplotlib.pyplot as plt\n'), ((6546, 6580), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation Curve LR-C"""'], {}), "('Validation Curve LR-C')\n", (6555, 6580), True, 'import matplotlib.pyplot as plt\n'), ((6581, 6593), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6591, 6593), True, 'import matplotlib.pyplot as plt\n'), ((6595, 6605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6603, 6605), True, 'import matplotlib.pyplot as plt\n'), ((6607, 6616), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6614, 6616), True, 'import matplotlib.pyplot as plt\n'), ((6876, 6905), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (6883, 6905), True, 'import numpy as np\n'), ((6921, 6950), 'numpy.mean', 'np.mean', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (6928, 6950), True, 'import numpy as np\n'), ((6952, 6998), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_scores'], {'label': '"""Train score"""'}), "(x, train_scores, label='Train score')\n", (6960, 6998), True, 'import matplotlib.pyplot as plt\n'), ((6999, 7050), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'valid_scores'], {'label': '"""Validation score"""'}), "(x, valid_scores, label='Validation score')\n", (7007, 7050), True, 'import matplotlib.pyplot as plt\n'), ((7053, 7067), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7061, 7067), True, 'import matplotlib.pyplot as plt\n'), ((7068, 7088), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Solver"""'], {}), "('Solver')\n", (7078, 7088), True, 'import matplotlib.pyplot as plt\n'), ((7089, 7111), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (7099, 7111), True, 'import matplotlib.pyplot as plt\n'), ((7228, 7267), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation Curve LR-Solver"""'], {}), "('Validation Curve LR-Solver')\n", (7237, 7267), True, 'import matplotlib.pyplot as plt\n'), ((7268, 7280), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7278, 7280), True, 'import matplotlib.pyplot as plt\n'), ((7282, 7292), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7290, 7292), True, 'import matplotlib.pyplot as plt\n'), ((7294, 7303), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7301, 7303), True, 'import matplotlib.pyplot as plt\n'), ((4688, 4723), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)'}), '(random_state=42)\n', (4706, 4723), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5325, 5360), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)'}), '(random_state=42)\n', (5343, 5360), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6054, 6089), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)'}), '(random_state=42)\n', (6072, 6089), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6684, 6719), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)'}), '(random_state=42)\n', (6702, 6719), False, 'from sklearn.linear_model import LogisticRegression\n')] |
# Implementation of various helper functions
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
from scipy.integrate import quad
from scipy.special import kve
import sys
# Electron rest energy in eV
mc2 = 0.51099895000e6
def averagedIonizationCrossSection(T, C, DI_eV, betaStar):
"""
"""
c = 299792458.0
nT = T.size
I_i = np.zeros(nT)
def intg(p, temperature):
pf = p**3 / np.sqrt(1+p**2)
kic = kineticIonizationContribution(p, C, DI_eV, betaStar)
fMe = maxwellJuttnerDistribution(p, 1, temperature)
return pf*kic*fMe
for k in range(nT):
# SciPy's "quad()" seems to sometimes have problems with this integrand
# over the infinite interval, so we split the integral into two parts:
# one over an interval which should contain most, if not all, of the
# interesting bits of the integrand, and one part covering the rest.
pmax = np.sqrt((800*T[k]/mc2 + 1)**2 - 1)
q = quad(lambda p : intg(p, T[k]), 0, pmax, epsabs=0)[0] + quad(lambda p : intg(p, T[k]), pmax, np.inf, epsabs=0)[0]
I_i[k] = 4*pi*c*q
return I_i
def maxwellJuttnerDistribution(p, n, T):
"""
Evaluates a Maxwell-Jüttner distribution function at the
given momentum, density and temperature, normalized such that
integral( 4*pi*p^2 * fMe, 0, inf)
yields the density ``n`` for all values of ``T``.
"""
global mc2
Theta = T / mc2
tK2exp = 4*pi*Theta*kve(2, 1/Theta)
gamma = np.sqrt(1+p**2)
gMinus1 = p*p/(gamma+1)
fMe = n/tK2exp * np.exp(-gMinus1/Theta)
return fMe
def kineticIonizationContribution(p, C, DI_eV, betaStar):
"""
Evaluates the total electron impact ionization cross-section.
:param p: Incident electron momentum in units mc2.
:param C: Pre-factor (undetermined by the theory, or order ~1-10).
:param DI_eV: Ionization energy in eV.
:param betaStar: Parameter which sets the near-threshold modification to
the cross-section.
"""
global mc2
a0 = 5.29e-11
Ry = 13.6 / mc2
gamma = np.sqrt(1+p**2)
Ek = p*p/(gamma+1)
DI = DI_eV/mc2
if DI <= 0:
raise Exception('Invalid ionization energy provided (<=0)')
U = Ek/DI
if np.isscalar(U): # handle vector momentum vector input
if U > 1:
I_nonRel = pi*a0**2*C*(Ry/DI)**2 * np.log(U)**(1+betaStar/U)/U
else:
I_nonRel = 0
else:
I_nonRel = pi*a0**2*C*(Ry/DI)**2 * np.log(U)**(1+betaStar/U)/U
I_nonRel[np.where(U<=1)] = 0
# v/c
beta = p/gamma
# Fine structure constant
alpha = 1/137
# Expression appearing inside the log term of
# the ultra-relativistic formula
logArg = p**2/(2*DI)
if np.isscalar(U):
if U > 1:
I_rel = pi*a0**2*alpha**2 * C*(Ry/DI) * (np.log(logArg) - beta**2)
else:
I_rel = 0
else:
I_rel = pi*a0**2*alpha**2 * C*(Ry/DI) * (np.log(logArg) - beta**2)
I_rel[np.where(U<=1)] = 0
Ek_eV = Ek*mc2
S = 1/(1+np.exp(1-Ek_eV*1e-5))
I_kinetic = (1-S)*I_nonRel + S*I_rel
return I_kinetic
if __name__ == '__main__':
C = 3.024126380275995
DI_eV = 13.598434490
betaStar = 0.291350946928159
T = 2.5113659405400823
pmax = np.sqrt((800*T/mc2 + 1)**2 - 1)
p = np.linspace(0, pmax, 1000)
pf = p**3 / np.sqrt(1+p**2)
kic, fMe = np.zeros(p.shape), np.zeros(p.shape)
for i in range(p.size):
kic[i] = kineticIonizationContribution(p[i], C, DI_eV, betaStar)
fMe[i] = maxwellJuttnerDistribution(p[i], 1, T)
print('II = [',end="")
for i in range(p.size):
print('{:.12e},'.format(pf[i]*kic[i]*fMe[i]), end="")
print('];')
plt.semilogy(p, pf*kic*fMe)
plt.show()
| [
"matplotlib.pyplot.semilogy",
"numpy.sqrt",
"numpy.isscalar",
"numpy.where",
"numpy.log",
"numpy.exp",
"numpy.zeros",
"numpy.linspace",
"scipy.special.kve",
"matplotlib.pyplot.show"
] | [((368, 380), 'numpy.zeros', 'np.zeros', (['nT'], {}), '(nT)\n', (376, 380), True, 'import numpy as np\n'), ((1537, 1556), 'numpy.sqrt', 'np.sqrt', (['(1 + p ** 2)'], {}), '(1 + p ** 2)\n', (1544, 1556), True, 'import numpy as np\n'), ((2157, 2176), 'numpy.sqrt', 'np.sqrt', (['(1 + p ** 2)'], {}), '(1 + p ** 2)\n', (2164, 2176), True, 'import numpy as np\n'), ((2333, 2347), 'numpy.isscalar', 'np.isscalar', (['U'], {}), '(U)\n', (2344, 2347), True, 'import numpy as np\n'), ((2839, 2853), 'numpy.isscalar', 'np.isscalar', (['U'], {}), '(U)\n', (2850, 2853), True, 'import numpy as np\n'), ((3389, 3426), 'numpy.sqrt', 'np.sqrt', (['((800 * T / mc2 + 1) ** 2 - 1)'], {}), '((800 * T / mc2 + 1) ** 2 - 1)\n', (3396, 3426), True, 'import numpy as np\n'), ((3429, 3455), 'numpy.linspace', 'np.linspace', (['(0)', 'pmax', '(1000)'], {}), '(0, pmax, 1000)\n', (3440, 3455), True, 'import numpy as np\n'), ((3847, 3878), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['p', '(pf * kic * fMe)'], {}), '(p, pf * kic * fMe)\n', (3859, 3878), True, 'import matplotlib.pyplot as plt\n'), ((3879, 3889), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3887, 3889), True, 'import matplotlib.pyplot as plt\n'), ((954, 994), 'numpy.sqrt', 'np.sqrt', (['((800 * T[k] / mc2 + 1) ** 2 - 1)'], {}), '((800 * T[k] / mc2 + 1) ** 2 - 1)\n', (961, 994), True, 'import numpy as np\n'), ((1508, 1525), 'scipy.special.kve', 'kve', (['(2)', '(1 / Theta)'], {}), '(2, 1 / Theta)\n', (1511, 1525), False, 'from scipy.special import kve\n'), ((1603, 1627), 'numpy.exp', 'np.exp', (['(-gMinus1 / Theta)'], {}), '(-gMinus1 / Theta)\n', (1609, 1627), True, 'import numpy as np\n'), ((3473, 3492), 'numpy.sqrt', 'np.sqrt', (['(1 + p ** 2)'], {}), '(1 + p ** 2)\n', (3480, 3492), True, 'import numpy as np\n'), ((3504, 3521), 'numpy.zeros', 'np.zeros', (['p.shape'], {}), '(p.shape)\n', (3512, 3521), True, 'import numpy as np\n'), ((3523, 3540), 'numpy.zeros', 'np.zeros', (['p.shape'], {}), '(p.shape)\n', (3531, 3540), True, 'import numpy as np\n'), ((432, 451), 'numpy.sqrt', 'np.sqrt', (['(1 + p ** 2)'], {}), '(1 + p ** 2)\n', (439, 451), True, 'import numpy as np\n'), ((2618, 2634), 'numpy.where', 'np.where', (['(U <= 1)'], {}), '(U <= 1)\n', (2626, 2634), True, 'import numpy as np\n'), ((3087, 3103), 'numpy.where', 'np.where', (['(U <= 1)'], {}), '(U <= 1)\n', (3095, 3103), True, 'import numpy as np\n'), ((3152, 3177), 'numpy.exp', 'np.exp', (['(1 - Ek_eV * 1e-05)'], {}), '(1 - Ek_eV * 1e-05)\n', (3158, 3177), True, 'import numpy as np\n'), ((3047, 3061), 'numpy.log', 'np.log', (['logArg'], {}), '(logArg)\n', (3053, 3061), True, 'import numpy as np\n'), ((2573, 2582), 'numpy.log', 'np.log', (['U'], {}), '(U)\n', (2579, 2582), True, 'import numpy as np\n'), ((2926, 2940), 'numpy.log', 'np.log', (['logArg'], {}), '(logArg)\n', (2932, 2940), True, 'import numpy as np\n'), ((2453, 2462), 'numpy.log', 'np.log', (['U'], {}), '(U)\n', (2459, 2462), True, 'import numpy as np\n')] |
import numpy as np
raw = np.load('data/training_data_bounty_attack_mobilenet.npy')
converted_data = []
for data in raw:
# data[0]
if data[1] == [0,0,0,0]:
data[1] = [0,0,0,0,1]
else:
data[1].append(0)
if data[2] == [0,0]:
data[2] = [0,0,1]
else:
data[2].append(0)
converted_data.append([data[0], data[1], data[2]])
np.save('data/converted2.npy',converted_data) | [
"numpy.load",
"numpy.save"
] | [((27, 84), 'numpy.load', 'np.load', (['"""data/training_data_bounty_attack_mobilenet.npy"""'], {}), "('data/training_data_bounty_attack_mobilenet.npy')\n", (34, 84), True, 'import numpy as np\n'), ((379, 425), 'numpy.save', 'np.save', (['"""data/converted2.npy"""', 'converted_data'], {}), "('data/converted2.npy', converted_data)\n", (386, 425), True, 'import numpy as np\n')] |
from flask import Flask, request, url_for, redirect, render_template, jsonify, abort
import pandas as pd
import pickle
import numpy as np
import json
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
cols = ['ph', 'Hardness', 'Solids', 'Chloramines', 'Sulfate', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity']
@app.route('/')
def home():
return render_template("index.html")
@app.route('/predict', methods=['POST'])
def predict():
scale = StandardScaler()
df = pd.read_csv("static/data/water_potability.csv")
df = scale.fit(df.drop(columns="Potability", axis=1))
int_features = [x for x in request.form.values()]
final = np.array(int_features)
data_unseen = pd.DataFrame([final], columns = cols)
data_unseen_scaled = scale.transform(data_unseen)
prediction = model.predict(data_unseen_scaled)
prediction = int(prediction[0])
# return render_template('index.html', pred=prediction)
return json.dumps({'potable':prediction})
if __name__ == '__main__':
app.run(debug=True) | [
"flask.render_template",
"pandas.read_csv",
"flask.Flask",
"json.dumps",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"flask.request.form.values",
"pandas.DataFrame"
] | [((206, 221), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (211, 221), False, 'from flask import Flask, request, url_for, redirect, render_template, jsonify, abort\n'), ((436, 465), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (451, 465), False, 'from flask import Flask, request, url_for, redirect, render_template, jsonify, abort\n'), ((535, 551), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (549, 551), False, 'from sklearn.preprocessing import StandardScaler\n'), ((561, 608), 'pandas.read_csv', 'pd.read_csv', (['"""static/data/water_potability.csv"""'], {}), "('static/data/water_potability.csv')\n", (572, 608), True, 'import pandas as pd\n'), ((733, 755), 'numpy.array', 'np.array', (['int_features'], {}), '(int_features)\n', (741, 755), True, 'import numpy as np\n'), ((774, 809), 'pandas.DataFrame', 'pd.DataFrame', (['[final]'], {'columns': 'cols'}), '([final], columns=cols)\n', (786, 809), True, 'import pandas as pd\n'), ((1024, 1059), 'json.dumps', 'json.dumps', (["{'potable': prediction}"], {}), "({'potable': prediction})\n", (1034, 1059), False, 'import json\n'), ((698, 719), 'flask.request.form.values', 'request.form.values', ([], {}), '()\n', (717, 719), False, 'from flask import Flask, request, url_for, redirect, render_template, jsonify, abort\n')] |
import os
import sys
import os.path as op
import random
import torch
import torch.nn as nn
import six
import torch.optim as optim
from torch.autograd import Variable
import pandas as pd
import numpy as np
import pickle
import csv
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import filenames
from fullgram_model import Agreement_model
import POS_Tagger
from utils import deps_from_tsv, dump_template_waveforms
import time
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
lengths = []
for x in sequences:
if not hasattr(x, '__len__'):
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
lengths.append(len(x))
num_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
class BatchedDataset(Dataset):
'''
This class make a general dataset that we will use to generate
the batched training data
'''
def __init__(self, x_train, y_train, verb_loc):
super(BatchedDataset, self).__init__()
self.x_train = x_train
self.y_train = y_train
self.verb_loc = verb_loc
assert (x_train).shape[0] == (y_train).shape[0]
self.length = (x_train).shape[0]
def __getitem__(self, index):
return self.x_train[index], self.y_train[index], self.verb_loc[index]
def __len__(self):
return self.length
class ood_dataset(Dataset):
'''
This class make a general dataset that we will use to generate
the batched training data
'''
def __init__(self, x_sent, x_train, y_train):
super(ood_dataset, self).__init__()
self.x_sent = x_sent
self.x_train = x_train
self.y_train = y_train
assert (x_train).shape[0] == (y_train).shape[0]
self.length = (x_train).shape[0]
def __getitem__(self, index):
return self.x_sent[index], self.x_train[index], self.y_train[index]
def __len__(self):
return self.length
class Demarcated_dataset(Dataset):
'''
This class make a general dataset that we will use to generate
the batched training data
'''
def __init__(self, x_train, y_train):
super(Demarcated_dataset, self).__init__()
self.x_train = x_train
self.y_train = y_train
assert (x_train).shape[0] == (y_train).shape[0]
self.length = (x_train).shape[0]
def __getitem__(self, index):
return self.x_train[index], self.y_train[index]
def __len__(self):
return self.length
class DECAY_RNN_Model(object):
serialized_attributes = ['vocab_to_ints', 'ints_to_vocab', 'filename',
'X_train', 'Y_train', 'deps_train',
'X_test', 'Y_test', 'deps_test']
def log(self, message):
with open('logs/' + self.output_filename, 'a') as file:
file.write(str(message) + '\n')
def log_demarcate_train(self, message):
with open('logs/demarcated_train_acc_' + self.output_filename, 'a') as file:
file.write(str(message) + '\n')
def log_demarcate_val(self, message):
with open('logs/demarcated_val_acc_' + self.output_filename, 'a') as file:
file.write(str(message) + '\n')
def log_val(self, message):
with open('logs/val_' + self.output_filename, 'a') as file:
file.write(str(message) + '\n')
def log_grad(self, message):
with open('logs/grad_' + self.output_filename, 'a') as file:
file.write(message + '\n')
def log_alpha(self,message):
with open('logs/alpha_' + self.output_filename, 'a') as file:
file.write(message + '\n')
def __init__(self, rnn_arch, filename=None, embedding_size=50, hidden_size = 50, output_size=10, num_layers=1, dropout=0.2,
maxlen=50, prop_train=0.9, mode='infreq_pos', vocab_file=filenames.vocab_file,
equalize_classes=False, criterion=None, len_after_verb=0,
output_filename='default.txt'):
'''
filename: TSV file with positive examples, or None if unserializing
criterion: dependencies that don't meet this criterion are excluded
(set to None to keep all dependencies)
verbose: passed to Keras (0 = no, 1 = progress bar, 2 = line per epoch)
'''
self.rnn_arch = rnn_arch
self.filename = filename
self.num_layers=num_layers
self.dropout=dropout
self.vocab_file = vocab_file
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.output_size = output_size
self.prop_train = prop_train
self.mode = mode
self.maxlen = maxlen
self.equalize_classes = equalize_classes
self.criterion = (lambda x: True) if criterion is None else criterion
self.len_after_verb = len_after_verb
self.output_filename = output_filename
def create_train_test_dataloader(self):
x_train = torch.tensor(self.X_train, dtype=torch.long).to(device)
y_train = torch.tensor(self.Y_train).to(device)
self.deps_train = list(self.deps_train)
verb_loc_train = torch.stack([torch.tensor(elem["verb_index"]) for elem in self.deps_train]).to(device).squeeze()
self.Train_DataGenerator = DataLoader(BatchedDataset(x_train, y_train, verb_loc_train), batch_size= self.train_bsz, shuffle=True, drop_last=False)
x_test = torch.tensor(self.X_test, dtype=torch.long).to(device)
y_test = torch.tensor(self.Y_test).to(device)
self.deps_test = list(self.deps_test)
verb_loc_test = torch.stack([torch.tensor(elem["verb_index"]) for elem in self.deps_test]).to(device).squeeze()
self.Test_DataGenerator = DataLoader(BatchedDataset(x_test, y_test, verb_loc_test), batch_size= self.test_bsz, shuffle=True, drop_last=False)
def create_train_test_dataloader_POS(self):
x_train = torch.tensor(self.X_train, dtype=torch.long).to(device)
pos_train = torch.tensor(self.POS_train).to(device)
self.deps_train = list(self.deps_train)
verb_loc_train = torch.stack([torch.tensor(elem["verb_index"]) for elem in self.deps_train]).to(device).squeeze()
self.Train_DataGenerator = DataLoader(BatchedDataset(x_train, pos_train, verb_loc_train), batch_size= self.train_bsz, shuffle=True, drop_last=False)
x_test = torch.tensor(self.X_test, dtype=torch.long).to(device)
pos_test = torch.tensor(self.POS_test).to(device)
self.deps_test = list(self.deps_test)
verb_loc_test = torch.stack([torch.tensor(elem["verb_index"]) for elem in self.deps_test]).to(device).squeeze()
self.Test_DataGenerator = DataLoader(BatchedDataset(x_test, pos_test, verb_loc_test), batch_size= self.test_bsz, shuffle=True, drop_last=False)
def create_demarcated_dataloader(self):
training_demarcated_dataloader={}
testing_demarcated_dataloader={}
if hasattr(self, 'testing_dict'):
for key in self.testing_dict.keys():
x_test, y_test = zip(*self.testing_dict[key])
x_test=torch.tensor(list(x_test), dtype=torch.long).to(device)
y_test=torch.tensor(list(y_test)).to(device)
testing_demarcated_dataloader[key] = DataLoader(Demarcated_dataset(x_test, y_test), batch_size= self.test_bsz, shuffle=True, drop_last=False)
self.testing_demarcated_dataloader = testing_demarcated_dataloader
if hasattr(self, 'training_dict'):
for key in self.training_dict.keys():
x_train, y_train = zip(*self.training_dict[key])
x_train=torch.tensor(list(x_train), dtype=torch.long).to(device)
y_train=torch.tensor(list(y_train)).to(device)
training_demarcated_dataloader[key] = DataLoader(Demarcated_dataset(x_train, y_train), batch_size= self.test_bsz, shuffle=True, drop_last=False)
self.training_demarcated_dataloader = training_demarcated_dataloader
def create_OOD_dataloader(self, final_dict_testing):
# for every key, create a dict of dataloaders...
ood_dataloader_dict={}
for key in final_dict_testing.keys():
x_sent = final_dict_testing[key][0]
x = torch.tensor(final_dict_testing[key][1], dtype=torch.long).to(device)
y = torch.tensor(final_dict_testing[key][2]).to(device)
if len(x)==0: continue
ood_dataloader_dict[key]=DataLoader(ood_dataset(x_sent, x, y), batch_size= self.train_bsz, shuffle=True, drop_last=False)
return ood_dataloader_dict
def demark_testing(self):
X_test=self.X_test
Y_test=self.Y_test
deps_test=self.deps_test
print("Size of X_test is {}".format(len(X_test)))
testing_dict={}
assert len(X_test)==len(Y_test) and len(Y_test)==len(deps_test)
for i in (range(len(X_test))):
key = (deps_test[i]['n_intervening'], deps_test[i]["n_diff_intervening"])
if not key in testing_dict.keys():
testing_dict[key]=[]
testing_dict[key].append((X_test[i], Y_test[i]))
self.testing_dict=testing_dict
X_train =self.X_train
Y_train =self.Y_train
deps_train=self.deps_train
print("Size of X_train is {}".format(len(X_train)))
training_dict={}
assert len(X_train)==len(Y_train) and len(Y_train)==len(deps_train)
for i in (range(len(X_train))):
key = (deps_train[i]['n_intervening'], deps_train[i]["n_diff_intervening"])
if not key in training_dict.keys():
training_dict[key]=[]
training_dict[key].append((X_train[i], Y_train[i]))
self.training_dict=training_dict
def result_demarcated(self):
if not hasattr(self, "testing_dict") or not hasattr(self, 'training_dict'):
print('creating demarcated dict!')
self.demark_testing()
self.create_demarcated_dataloader()
result_dict={}
self.model.eval()
if not self.demarcate_train:
loader_dict= self.testing_demarcated_dataloader
else:
loader_dict= self.training_demarcated_dataloader
hidden_state_dict={}
with torch.no_grad():
for key in loader_dict.keys():
loader = loader_dict[key]
correct=0
example_processed=0
hidden_state_dict[key]=[]
for x_test, y_test in loader:
bsz = x_test.size(0)
x_test = x_test.view(bsz, self.maxlen)
y_test = y_test.view(bsz, )
example_processed+=bsz
if self.act_attention or self.max_attn:
_, pred, _, acc, attention_weights, h_n, h_last = self.model.predict(x_test, y_test, compute_loss=True)
else:
_, pred, _, acc, h_n, h_last = self.model.predict(x_test, y_test, compute_loss=True)
correct+=acc
hidden_state_dict[key].append((h_n, pred, y_test))
result_dict[key] = (float(correct)/float(example_processed), float(example_processed))
correct=0
total=0
for key in result_dict.keys():
correct+= result_dict[key][0]*result_dict[key][1]
total+=result_dict[key][1]
acc = float(correct)/float(total)
# self.log(str(result_dict))
self.log(str(acc))
self.model.train()
# with open('hidden_dict.pkl', 'wb') as f :
# pickle.dump(hidden_state_dict, f)
return acc, result_dict
def create_model(self):
self.log('Creating model')
self.log('vocab size : ' + str(len(self.vocab_to_ints)))
print('Creating model')
print('vocab size : ' + str(len(self.vocab_to_ints)))
if self.embedding_size%self.nheads !=0:
print("Num heads should divide embedding size. Exiting !!!")
sys.exit()
if not self.train_tagger:
self.model = Agreement_model(rnn_arch= self.rnn_arch, embedding_size = self.embedding_size, hidden_size=self.hidden_size, vocab_size = len(self.vocab_to_ints)+1, num_layers=self.num_layers, output_size=self.output_size, dropout=self.dropout, act_attention=self.act_attention, max_attn=self.max_attn, num_heads=self.nheads, activation=self.activation)
else:
self.model = POS_Tagger.Agreement_model(rnn_arch= self.rnn_arch, embedding_size = self.embedding_size, hidden_size=self.hidden_size, vocab_size = len(self.vocab_to_ints)+1, num_layers=self.num_layers, output_size=self.output_size, dropout=self.dropout, act_attention=self.act_attention, max_attn=self.max_attn, num_heads=self.nheads, activation=self.activation)
self.model = self.model.to(device)
##########################################################
#### ADDITION DONE TO GET EXTERNAL TESTED #########
##########################################################
def test_ood(self, final_dict_testing):
final_dict_testing = self.create_OOD_dataloader(final_dict_testing)
result={}
hidden_state_dict={}
with torch.no_grad():
self.model.eval()
for key in final_dict_testing:
loss_=0
correct=0
hidden_last=[]
y_out=[]
y_pred=[]
total = 0
for x_sent, x_test, y_test in final_dict_testing[key]:
bsz = x_test.size(0)
x_test = x_test.view(bsz, self.maxlen)
y_test = y_test.view(bsz, )
if self.act_attention or self.max_attn:
loss, pred, _, acc, _, h_n, h_last = self.model.predict(x_test, y_test, compute_loss=True)
else:
loss, pred, _, acc, h_n, h_last = self.model.predict(x_test, y_test, compute_loss=True)
loss_+=loss*bsz
correct+=acc
total+=bsz
hidden_last.append((h_n[-1]))
y_out.append(y_test)
y_pred.append(pred)
result[key] = (float(correct)/float(total), total)
hidden_state_dict[key] = (hidden_last, y_out, y_pred)
self.log(result)
print(result)
for key in hidden_state_dict.keys():
with open(str(key)+'.pkl', 'wb') as f:
pickle.dump(hidden_state_dict[key], f)
def ood_genset_creation(self, filename, save_processed_data = False):
value = lambda key : int(key.split("_")[0]!="sing") # only requried in PVN
ex_list = [] #specialized method for waveforms visualizations
for files in os.listdir(filename):
loaded_template = pickle.load(open(os.path.join(filename, files), 'rb'))
ex_list.append((loaded_template, files))
test_example={}
for i in range(len(ex_list)):
for keys in ex_list[i][0].keys():
list1= ex_list[i][0][keys]
if len(list1[0]) > 2: #ignoring the 3 tuples in the templates
continue
if (ex_list[i][1], keys) in test_example.keys(): # this just means if (file, singular) is a key to test_example dictionary, if not then initialize the key
pass
else:
test_example[(ex_list[i][1], keys)]=[]
for X in list1:
# x, _ = X
# # x = correct sentence, x_neg is ungrammatical sentence due to inflection (but for pvn we need to focus only on grammatical sentence)
# # note that for pvn task, where we want to make the waveforms, here the label will be 0 for singulars and 1 for plural
# test_example[(ex_list[i][1], keys)].append((x, value(keys))) # we need to by pass this step for y waveform construction
x, x_neg = X
test_example[(ex_list[i][1], keys)].append((x, 0))
test_example[(ex_list[i][1], keys)].append((x_neg, 1))
external_testing_dict={}
for keys in test_example.keys():
x_test_, y_test_ = zip(*test_example[keys])
external_testing_dict[keys] = (x_test_, y_test_)
# At this time we have a dictionary that has key -->(filename, property) and value a tuple (X_test(string form), y_test)
final_dict_testing = self.valid_input(external_testing_dict)
if save_processed_data:
os.mkdir("Testing_data")
for keys in final_dict_testing.keys():
pickle_out = open(os.path.join("Testing_data", str(keys))+".pkl", "wb")
pickle.dump(final_dict_testing[keys], pickle_out)
self.test_ood(final_dict_testing)
def valid_input(self, external_testing_dict):
final_dict_testing={}
for keys in external_testing_dict.keys():
x = []
y = []
x_sentences = []
X_test, Y_test = external_testing_dict[keys]
for i in range(len(X_test)):
x_ex = []
flag=True
example = X_test[i]
token_list = example.split()
if len(token_list)>self.maxlen: #ignore big sentences than max len
continue
for tokens in token_list:
if not tokens in self.vocab_to_ints.keys(): #if unknown character, leave the example
flag=False
break
x_ex.append(self.vocab_to_ints[tokens])
if not flag:
continue
x.append(x_ex)
x_sentences.append(X_test[i])
y.append(Y_test[i])
x = pad_sequences(x, self.maxlen)
final_dict_testing[keys]=(x_sentences, x, y)
assert len(x_sentences) == len(x) == len(y), "assert failed! length of sentences is different from length of actual testing sentence per template per sing/plur"
return final_dict_testing
def pipeline(self, train, train_bsz =128, test_bsz=32,load = False, model = '', test_size=7000, model_prefix='_', num_epochs=20, load_data=False, save_data=False, test_linzen_template_pvn=False, linzen_template_filename=None,
train_size=None, data_name='Not', lr=0.001, annealing=False, nheads=1, activation='relu', act_attention=False, max_attn=False, use_hidden=False, K=5, L=1, train_tagger=False, compare_models=False, m1=None, m2=None, domain_adaption = False,
test_demarcated=False, demarcate_train=False, ood=False, augment_train=True, augment_test=True, augment_ratio=1, verb_embedding=False):
self.train_bsz = train_bsz
self.test_bsz = test_bsz
self.train=train
self.test_size = test_size
self.num_epochs = num_epochs
self.test_linzen_template_pvn = test_linzen_template_pvn
self.linzen_template_filename =linzen_template_filename
self.model_name = model
self.model_prefix = model_prefix
self.lr = lr
self.annealing=annealing
self.nheads= nheads
self.activation=activation
self.act_attention=act_attention
self.use_hidden=use_hidden
self.max_attn = max_attn
self.K = K
self.L = L
self.train_tagger=train_tagger
self.train=train
self.domain_adaption = domain_adaption
self.test_demarcated=test_demarcated
self.demarcate_train=demarcate_train
self.ood = ood
self.augment_train=augment_train
self.augment_test=augment_test
self.augment_ratio=augment_ratio
self.verb_embedding =verb_embedding
if load_data:
self.load_train_and_test(test_size, data_name, self.domain_adaption)
print("Loading Data!")
else :
self.log('creating data')
print("Creating data")
examples = self.load_examples(data_name, save_data, None if train_size is None else train_size*10)
self.create_train_and_test(examples, test_size, data_name, save_data)
# at this time we have the train and testing data in our hand !!
if train_tagger:
self.create_train_test_dataloader_POS()
else:
self.create_train_test_dataloader()
if load :
if compare_models:
pass
else:
self.load_model()
else:
self.create_model()
if self.train_tagger:
self.train_tagging()
elif self.train:
self.train_model()
print("Training Done!!! Now evaluating the full model!")
self.test_size=0
self.load_train_and_test(test_size, data_name, self.domain_adaption)
self.load_model()
self.create_train_test_dataloader()
acc = self.validate()
print("Testing complete!!!")
print(acc)
self.log(acc)
# print("Following is average attention weight distribution for the testing examples! 0 key means verb loc")
# print(self.attn_dist)
self.log(acc)
elif compare_models:
m1 = torch.load(m1)
m2 = torch.load(m2)
self.compare_models(m1, m2)
elif self.verb_embedding :
self.verb_emb()
else:
if self.test_demarcated:
acc, result_dict = self.result_demarcated()
print("Testing complete!!!")
print(acc)
print(result_dict)
self.log(str(result_dict))
self.log(str(acc))
elif self.ood:
self.ood_genset_creation(filenames.external_file)
else:
acc = self.validate()
print("Testing complete!!!")
print(acc)
self.log(acc)
print('Data : ', data_name)
self.log(data_name)
print("Done!")
def load_examples(self,data_name='Not',save_data=False, n_examples=None):
'''
Set n_examples to some positive integer to only load (up to) that
number of examples
'''
self.log('Loading examples')
if self.filename is None:
raise ValueError('Filename argument to constructor can\'t be None')
self.vocab_to_ints = {}
self.ints_to_vocab = {}
self.pos_to_ints={}
self.ints_to_pos={}
self.opp_POS={'VBZ':'VBP', 'VBP':'VBZ'}
# note that 0 in the ints of POS means that the class is reject! that is off NULL TOKEN!
examples = []
n = 0
deps = deps_from_tsv(self.filename, limit=n_examples)
for dep in deps:
tokens = dep['sentence'].split()
if len(tokens) > self.maxlen or not self.criterion(dep):
continue
tokens, POS_tags = self.process_single_dependency(dep, True)
if dep['label'] == 'ungrammatical': # this will only operate in the case of Gram. In case of PVN, it wont be there automatticaly
POS_tags[int(dep['verb_index']) - 1] = self.opp_POS[POS_tags[int(dep['verb_index']) - 1]]
ints = []
sent_POS = []
for token in tokens:
if token not in self.vocab_to_ints:
# zero is for pad
x = self.vocab_to_ints[token] = len(self.vocab_to_ints) + 1
self.ints_to_vocab[x] = token
ints.append(self.vocab_to_ints[token])
for pos in POS_tags:
if pos not in self.pos_to_ints:
x = self.pos_to_ints[pos] = len(self.pos_to_ints) + 1
self.ints_to_pos[x] = pos
sent_POS.append(self.pos_to_ints[pos])
examples.append((self.class_to_code[dep['label']], ints, sent_POS, dep))
n += 1
if n_examples is not None and n >= n_examples:
break
if (save_data) :
with open('plus5_v2i.pkl', 'wb') as f:
pickle.dump(self.vocab_to_ints, f)
with open('plus5_i2v.pkl', 'wb') as f:
pickle.dump(self.ints_to_vocab, f)
with open('plus5_p2i.pkl', 'wb') as f:
pickle.dump(self.pos_to_ints, f)
with open('plus5_i2p.pkl', 'wb') as f:
pickle.dump(self.ints_to_pos, f)
return examples
def load_model(self):
self.model = torch.load(self.model_name)
def input_to_string(self, x_input):
#x_input is the example we want to convert to the string
#x_input should be in the form of 1D list.
example_string = ""
for token in x_input:
if token == 0:
continue
str_tok = self.ints_to_vocab[token]
example_string+=str_tok+" "
return example_string
def topK_acc(self, score):
# score will be a sorted list, and we will take first K entries.
locs, _ = zip(*score)
for k in self.K_dict.keys():
a = list(locs[0:k])
for loc in a:
if loc in self.L_dict.keys():
self.K_dict[k]+=1
break
def update_attn_dict(self, attention_weights, verb_loc, x_test):
# attention_weights (bsz, input_sent_len) Get the list for every exampple in batch Having a tuple
# convert all attention weights and verb loc to numpy
bsz = x_test.size(0)
verb_index = verb_loc -1
verb_index = verb_index.squeeze().cpu().numpy()
attention_weights = attention_weights.cpu().numpy() # (bsz, len)
x_test = x_test.cpu()
input_len = [len(self.input_to_string(x_test[i].tolist()).split()) for i in range(bsz)]
per_batch_corr=0 # if top-K contains element from the set {0, 1, ... L-1}, then we increase the count
for i in range(bsz):
# flag=False
with_indices = list(attention_weights[i, -input_len[i]:])
for j in range(len(with_indices)):
with_indices[j] = (j, with_indices[j])
with_indices.sort(key = lambda x: x[1] , reverse=True) # sorting in descending order with respect to attention weights
with_indices = [(j[0]-verb_index[i], j[1]) for j in with_indices] # a list containing (location wrt verb and probablity)
self.topK_acc(with_indices)
for j in with_indices:
if j[0] in self.attn_dist.keys():
avg_prob, tot = self.attn_dist[j[0]]
avg_prob = float(avg_prob*tot + j[1])/float(tot+1)
tot+=1
self.attn_dist[j[0]] = (avg_prob, tot)
else:
self.attn_dist[j[0]] = (j[1], 1)
def compare_models(self, m1, m2):
# to compare the accuracies of 2 models and log where the 2 are producing different outputs.
x_diff_pred = []
ground_truth=[]
predicted1= []
predicted2= []
verb_location=[]
accuracy_1 = 0
accuracy_2 = 0
batches_processed=0
example_processed=0
with torch.no_grad():
m1.eval()
m2.eval()
for x_test, y_test, verb_loc_train in self.Test_DataGenerator:
bsz = x_test.size(0)
x_test = x_test.view(bsz, self.maxlen)
y_test = y_test.view(bsz, )
example_processed+=bsz
output1 = m1.predict(x_test, y_test)
output2 = m2.predict(x_test, y_test)
pred1 = output1[1].squeeze()
pred2 = output2[1].squeeze()
accuracy_1+=output1[2]
accuracy_2+=output2[2]
for j in range(len(pred1)):
if pred1[j]!=pred2[j]:
x_diff_pred.append(self.input_to_string(x_test[j].tolist()))
ground_truth.append(y_test[j].tolist())
predicted1.append(pred1[j].tolist())
predicted2.append(pred2[j].tolist())
verb_location.append(verb_loc_train[j].tolist())
batches_processed+=1
if batches_processed%50==0:
print("batches processed {}/{}".format(batches_processed, len(self.Test_DataGenerator)))
d= {'sent':x_diff_pred, 'ground_truth':ground_truth, 'verbLoc': verb_location, 'pred1':predicted1, 'pred2':predicted2}
with open("different_sent_dump.csv", "w") as outfile:
writer = csv.writer(outfile)
writer.writerow(d.keys())
writer.writerows(zip(*d.values()))
msg = "Accuracy of Model1 is {} \t Accuracy for Model 2 is {}".format(float(accuracy_1)/float(example_processed), float(accuracy_2)/float(example_processed))
print(msg)
self.log(msg)
def validate(self):
self.attn_dist={} # A dict to be used to update the average distribution over the testing examples!
# self.per_loc_probab_dist={}
self.L_dict = {i:"" for i in range(self.L)}
self.K_dict = {i:0 for i in range(1, self.K+1)}
if not self.train:
print("Total testing Dataset size {}".format(len(self.Test_DataGenerator.dataset)))
verbose=True
correct=0
self.specific = []
hidden_last=[]
y_out=[]
example_processed=0
self.model.eval()
with torch.no_grad():
loss_ = 0
counter=0
for x_test, y_test, verb_loc_test in self.Test_DataGenerator:
bsz = x_test.size(0)
x_test = x_test.view(bsz, self.maxlen)
y_test = y_test.view(bsz, )
example_processed+=bsz
if self.act_attention or self.max_attn:
loss, _, _, acc, attention_weights, h_n, h_last = self.model.predict(x_test, y_test, compute_loss=True)
else:
loss, _, _, acc, h_n, h_last = self.model.predict(x_test, y_test, compute_loss=True)
loss_+=loss*bsz
correct+=acc
hidden_last.append((h_n[-1]))
y_out.append(y_test)
if not self.train and (self.act_attention or self.max_attn):
self.update_attn_dict(attention_weights, verb_loc_test, x_test)
counter+=1
# dump distributions!!
if not self.train and (self.act_attention or self.max_attn) :
with open('attn_dist.pickle', 'wb') as f:
pickle.dump(self.attn_dist, f)
self.K_dict = {k: float(self.K_dict[k])/float(example_processed) for k in self.K_dict.keys()}
print("Top k results for L = {}".format(self.L))
print(str(self.K_dict))
self.log("Top k results for L = {}".format(self.L))
self.log(str(self.K_dict))
self.log(str(self.attn_dist))
print("Following is average attention weight distribution for the testing examples! 0 key means verb loc")
print(self.attn_dist)
hidden_last = torch.cat(hidden_last, dim=0).tolist()
y_out = torch.cat(y_out, dim=0).tolist()
with open('hidden_last.pkl', 'wb') as f:
pickle.dump((hidden_last, y_out), f)
self.model.train()
return float(correct)/float(example_processed), float(loss_)/len(self.Test_DataGenerator.dataset)
def detach_states(self, states):
return torch.stack([states[i].detach() for i in range(states.size(0))]) # per layer detach the hidden states
# def save_weights(self):
# #saves the recurrent weights
# weight_list=[]
# weight_dict={}
# for layer in range(self.num_layers):
# if self.rnn_arch!= 'LSTM' or self.rnn_arch!='GRU'or self.rnn_arch!='RNN':
# weight_list.append(self.model.recurrent_layer.weight_hh_l[layer].view(self.hidden_size,))
# else:
# weight_list.append(self.model.recurrent_layer.get_cell(layer).weight_hh.view(self.hidden_size,))
# with open('weight_hh.pkl', 'wb') as f:
# pickle.dump(weight_list, f)
def train_model(self):
self.demarcate_train=True
self.log('Training')
print('Training')
if not hasattr(self, 'model'):
self.create_model()
self.log("Total size of training set {}".format(len(self.X_train)))
print("Total size of training set {}".format(len(self.X_train)))
self.log("Total Training epochs {}".format(self.num_epochs))
print("Total Training epochs {}".format(self.num_epochs))
max_acc= 0; min_loss = float("Inf")
optimizer = optim.Adam(self.model.parameters(), lr = self.lr)
if self.annealing:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=2, verbose=True, eps=1e-6)
for epoch in range(self.num_epochs) :
h_n = None
flag=True
self.log('epoch : ' + str(epoch))
self.log_grad('epoch : ' + str(epoch))
self.log_alpha('epoch : ' + str(epoch))
index=0
total_batches = len(self.Train_DataGenerator)
epoch_loss = 0
start_time = time.time()
for x_batch, y_batch, _ in self.Train_DataGenerator :
bsz = x_batch.size(0)
x_batch = x_batch.view(bsz, self.maxlen).to(device)
y_batch = y_batch.view(bsz,).to(device)
self.model.zero_grad()
if self.act_attention or self.max_attn:
if self.use_hidden:
if flag:
loss, _, _, _, attention_weights, h_n, h_last = self.model.predict(x_batch, y_batch, h_n, compute_loss=True)
flag = False
else:
loss, _, _, _, attention_weights, h_n, h_last = self.model.predict(x_batch, y_batch, h_n[:, 0:bsz, :], compute_loss=True)
else:
loss, _, _, _, _, _, _ = self.model.predict(x_batch, y_batch, compute_loss=True)
else:
if self.use_hidden:
if flag:
loss, _, _, _, h_n, h_last = self.model.predict(x_batch, y_batch, h_n, compute_loss=True)
flag = False
else:
loss, _, _, _, h_n, h_last = self.model.predict(x_batch, y_batch, h_n[:, 0:bsz, :], compute_loss=True)
else:
loss, _, _, _, _, _ = self.model.predict(x_batch, y_batch, compute_loss=True)
epoch_loss+=loss.item()
loss.backward()
if self.use_hidden:
h_n = self.detach_states(h_n)
optimizer.step()
index+=1
if (index)%30 == 0:
elapsed_time=time.time()-start_time
acc, val_loss = self.validate()
if (acc >= max_acc) :
model_name = self.model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
if val_loss < min_loss:
min_loss = val_loss
acc, result_dict = self.result_demarcated()
self.log_demarcate_train(acc)
# self.log_demarcate_train(result_dict)
self.demarcate_train = not self.demarcate_train
acc, result_dict = self.result_demarcated()
self.log_demarcate_val(acc)
# self.log_demarcate_val(result_dict)
self.demarcate_train = not self.demarcate_train
counter = 0
args=[index, total_batches, epoch, self.num_epochs, max_acc, epoch_loss/index, min_loss, float(index)/float(elapsed_time)]
self.log("Total bsz done {}/{} || Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f} || Speed {} b/s".format(*args))
self.log_grad("Total bsz done {}/{} || Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f} || Speed {} b/s".format(*args))
print("Total bsz done {}/{} || Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f} || Speed {} b/s".format(*args))
for param in self.model.parameters():
if param.grad is not None:
self.log_grad(str(counter) + ' : ' + str(param.grad.norm().item()))
counter += 1
for name,param in self.model.named_parameters():
for i in range(self.num_layers):
if name=="recurrent_layer.cell_{}.rgate".format(i):
self.log_alpha(str((param.data)))
acc, val_loss = self.validate() # this is fraction based accuracy
if self.annealing:
scheduler.step(val_loss)
if val_loss < min_loss:
min_loss = val_loss
args=[index, total_batches, epoch, self.num_epochs, max_acc, epoch_loss/index, min_loss, float(index)/float(elapsed_time)]
self.log("Total bsz done {}/{} || Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f} || Speed {} b/s".format(*args))
self.log_grad("Total bsz done {}/{} || Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f} || Speed {} b/s".format(*args))
print("Total bsz done {}/{} || Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f} || Speed {} b/s".format(*args))
if (acc > max_acc) :
model_name = self.model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
acc, result_dict = self.result_demarcated()
self.log_demarcate_train(acc)
# self.log_demarcate_train(result_dict)
self.demarcate_train = not self.demarcate_train
acc, result_dict = self.result_demarcated()
self.log_demarcate_val(acc)
# self.log_demarcate_val(result_dict)
self.demarcate_train = not self.demarcate_train
index=0; epoch_loss=0
print("End of training !!!")
def create_model_POS(self):
self.log('Creating model POS')
self.log('vocab size : ' + str(len(self.vocab_to_ints)))
print('Creating model POS')
print('vocab size : ' + str(len(self.vocab_to_ints)))
self.model = POS_Tagger(rnn_arch= self.rnn_arch, embedding_size = self.embedding_size, hidden_size=self.hidden_size, vocab_size = len(self.vocab_to_ints)+1, num_layers=self.num_layers, output_size=len(self.pos_to_ints)+1, dropout=self.dropout, act_attention=self.act_attention, max_attn=self.max_attn, num_heads=self.nheads, activation=self.activation)
self.model = self.model.to(device)
def validate_tagger(self):
correct=0
tokens_processed=0
self.model.eval()
with torch.no_grad():
loss_ = 0
counter=0
for x_test, y_test, verb_loc_test in self.Test_DataGenerator:
bsz = x_test.size(0)
x_test = x_test.view(bsz, self.maxlen)
y_test = y_test.view(bsz, self.maxlen)
tokens_processed+=bsz*self.maxlen
loss, _, _, acc, _, _ = self.model.predict(x_test, y_test, compute_loss=True)
loss_+=loss*bsz*self.maxlen
correct+=acc
final_accuracy = float(correct)/float(tokens_processed)
loss = float(loss_)/float(tokens_processed)
return final_accuracy, loss
def train_tagging(self):
self.log('Training POS tagger!')
print('Training')
if not hasattr(self, 'model'):
self.create_model_POS()
self.log("Total size of training set {}".format(len(self.X_train)))
print("Total size of training set {}".format(len(self.X_train)))
self.log("Total Training epochs {}".format(self.num_epochs))
print("Total Training epochs {}".format(self.num_epochs))
max_acc= 0; min_loss = float("Inf")
optimizer = optim.Adam(self.model.parameters(), lr = self.lr)
if self.annealing:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=2, verbose=True, eps=1e-6)
for epoch in range(self.num_epochs) :
h_n = None
flag=True
self.log('epoch : ' + str(epoch))
self.log_grad('epoch : ' + str(epoch))
self.log_alpha('epoch : ' + str(epoch))
index=0
total_batches = len(self.Train_DataGenerator)
epoch_loss = 0
for x_batch, y_batch, _ in self.Train_DataGenerator :
bsz = x_batch.size(0)
x_batch = x_batch.view(bsz, self.maxlen).to(device)
y_batch = y_batch.view(bsz, self.maxlen).to(device)
self.model.zero_grad()
if self.use_hidden:
if flag:
loss, _, _, _, h_n, h_last = self.model.predict(x_batch, y_batch, h_n, compute_loss=True)
flag = False
else:
loss, _, _, _, h_n, h_last = self.model.predict(x_batch, y_batch, h_n[:, 0:bsz, :], compute_loss=True)
else:
loss, _, _, _, _, _ = self.model.predict(x_batch, y_batch, compute_loss=True)
epoch_loss+=loss.item()*bsz
loss.backward()
if self.use_hidden:
h_n = self.detach_states(h_n)
optimizer.step()
index+=1
if (index)%30 == 0:
acc, val_loss = self.validate()
if (acc >= max_acc) :
model_name = self.model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
if val_loss < min_loss:
min_loss = val_loss
counter = 0
self.log("Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f}".format(epoch, self.num_epochs, max_acc, epoch_loss/(index*bsz), min_loss))
self.log_grad("Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f}".format(epoch, self.num_epochs, max_acc, epoch_loss/(index*bsz), min_loss))
print("Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f}".format(epoch, self.num_epochs, max_acc, epoch_loss/(index*bsz), min_loss))
for param in self.model.parameters():
if param.grad is not None:
self.log_grad(str(counter) + ' : ' + str(param.grad.norm().item()))
counter += 1
for name,param in self.model.named_parameters():
for i in range(self.num_layers):
if name=="recurrent_layer.cell_{}.rgate".format(i):
self.log_alpha(str((param.data)))
acc, val_loss = self.validate() # this is fraction based accuracy
if self.annealing:
scheduler.step(val_loss)
if val_loss < min_loss:
min_loss = val_loss
print("Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f}".format(epoch, self.num_epochs, max_acc, epoch_loss/len(self.Train_DataGenerator.dataset), min_loss))
self.log("Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f}".format(epoch, self.num_epochs, max_acc, epoch_loss/len(self.Train_DataGenerator.dataset), min_loss))
self.log_grad("Total Epochs Done {}/{} || Max Validation Accuracy {:.4f} || Epoch loss {:.4f} || Min loss {:.4f}".format(epoch, self.num_epochs, max_acc, epoch_loss/len(self.Train_DataGenerator.dataset), min_loss))
if (acc > max_acc) :
model_name = self.model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
index=0; epoch_loss=0
print("End of training !!!")
| [
"torch.cuda.is_available",
"sys.exit",
"os.listdir",
"numpy.asarray",
"numpy.max",
"numpy.issubdtype",
"os.mkdir",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.full",
"csv.writer",
"torch.save",
"time.time",
"torch.cat",
"utils.deps_from_tsv",
"pickle.dump",
"torch.load",
"os... | [((1572, 1637), 'numpy.full', 'np.full', (['((num_samples, maxlen) + sample_shape)', 'value'], {'dtype': 'dtype'}), '((num_samples, maxlen) + sample_shape, value, dtype=dtype)\n', (1579, 1637), True, 'import numpy as np\n'), ((488, 513), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (511, 513), False, 'import torch\n'), ((979, 994), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (985, 994), True, 'import numpy as np\n'), ((1237, 1266), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.str_'], {}), '(dtype, np.str_)\n', (1250, 1266), True, 'import numpy as np\n'), ((1270, 1303), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.unicode_'], {}), '(dtype, np.unicode_)\n', (1283, 1303), True, 'import numpy as np\n'), ((1973, 2003), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (1983, 2003), True, 'import numpy as np\n'), ((14562, 14582), 'os.listdir', 'os.listdir', (['filename'], {}), '(filename)\n', (14572, 14582), False, 'import os\n'), ((21167, 21213), 'utils.deps_from_tsv', 'deps_from_tsv', (['self.filename'], {'limit': 'n_examples'}), '(self.filename, limit=n_examples)\n', (21180, 21213), False, 'from utils import deps_from_tsv, dump_template_waveforms\n'), ((22645, 22672), 'torch.load', 'torch.load', (['self.model_name'], {}), '(self.model_name)\n', (22655, 22672), False, 'import torch\n'), ((10787, 10802), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10800, 10802), False, 'import torch\n'), ((12210, 12220), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12218, 12220), False, 'import sys\n'), ((13339, 13354), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13352, 13354), False, 'import torch\n'), ((16099, 16123), 'os.mkdir', 'os.mkdir', (['"""Testing_data"""'], {}), "('Testing_data')\n", (16107, 16123), False, 'import os\n'), ((24907, 24922), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24920, 24922), False, 'import torch\n'), ((26013, 26032), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (26023, 26032), False, 'import csv\n'), ((26781, 26796), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26794, 26796), False, 'import torch\n'), ((28270, 28306), 'pickle.dump', 'pickle.dump', (['(hidden_last, y_out)', 'f'], {}), '((hidden_last, y_out), f)\n', (28281, 28306), False, 'import pickle\n'), ((29641, 29748), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'factor': '(0.5)', 'patience': '(2)', 'verbose': '(True)', 'eps': '(1e-06)'}), '(optimizer, factor=0.5, patience=\n 2, verbose=True, eps=1e-06)\n', (29683, 29748), False, 'import torch\n'), ((30029, 30040), 'time.time', 'time.time', ([], {}), '()\n', (30038, 30040), False, 'import time\n'), ((34967, 34982), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (34980, 34982), False, 'import torch\n'), ((36044, 36151), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'factor': '(0.5)', 'patience': '(2)', 'verbose': '(True)', 'eps': '(1e-06)'}), '(optimizer, factor=0.5, patience=\n 2, verbose=True, eps=1e-06)\n', (36086, 36151), False, 'import torch\n'), ((6073, 6117), 'torch.tensor', 'torch.tensor', (['self.X_train'], {'dtype': 'torch.long'}), '(self.X_train, dtype=torch.long)\n', (6085, 6117), False, 'import torch\n'), ((6141, 6167), 'torch.tensor', 'torch.tensor', (['self.Y_train'], {}), '(self.Y_train)\n', (6153, 6167), False, 'import torch\n'), ((6499, 6542), 'torch.tensor', 'torch.tensor', (['self.X_test'], {'dtype': 'torch.long'}), '(self.X_test, dtype=torch.long)\n', (6511, 6542), False, 'import torch\n'), ((6565, 6590), 'torch.tensor', 'torch.tensor', (['self.Y_test'], {}), '(self.Y_test)\n', (6577, 6590), False, 'import torch\n'), ((6963, 7007), 'torch.tensor', 'torch.tensor', (['self.X_train'], {'dtype': 'torch.long'}), '(self.X_train, dtype=torch.long)\n', (6975, 7007), False, 'import torch\n'), ((7033, 7061), 'torch.tensor', 'torch.tensor', (['self.POS_train'], {}), '(self.POS_train)\n', (7045, 7061), False, 'import torch\n'), ((7395, 7438), 'torch.tensor', 'torch.tensor', (['self.X_test'], {'dtype': 'torch.long'}), '(self.X_test, dtype=torch.long)\n', (7407, 7438), False, 'import torch\n'), ((7463, 7490), 'torch.tensor', 'torch.tensor', (['self.POS_test'], {}), '(self.POS_test)\n', (7475, 7490), False, 'import torch\n'), ((14291, 14329), 'pickle.dump', 'pickle.dump', (['hidden_state_dict[key]', 'f'], {}), '(hidden_state_dict[key], f)\n', (14302, 14329), False, 'import pickle\n'), ((16251, 16300), 'pickle.dump', 'pickle.dump', (['final_dict_testing[keys]', 'pickle_out'], {}), '(final_dict_testing[keys], pickle_out)\n', (16262, 16300), False, 'import pickle\n'), ((22313, 22347), 'pickle.dump', 'pickle.dump', (['self.vocab_to_ints', 'f'], {}), '(self.vocab_to_ints, f)\n', (22324, 22347), False, 'import pickle\n'), ((22394, 22428), 'pickle.dump', 'pickle.dump', (['self.ints_to_vocab', 'f'], {}), '(self.ints_to_vocab, f)\n', (22405, 22428), False, 'import pickle\n'), ((22475, 22507), 'pickle.dump', 'pickle.dump', (['self.pos_to_ints', 'f'], {}), '(self.pos_to_ints, f)\n', (22486, 22507), False, 'import pickle\n'), ((22554, 22586), 'pickle.dump', 'pickle.dump', (['self.ints_to_pos', 'f'], {}), '(self.ints_to_pos, f)\n', (22565, 22586), False, 'import pickle\n'), ((27664, 27694), 'pickle.dump', 'pickle.dump', (['self.attn_dist', 'f'], {}), '(self.attn_dist, f)\n', (27675, 27694), False, 'import pickle\n'), ((28142, 28171), 'torch.cat', 'torch.cat', (['hidden_last'], {'dim': '(0)'}), '(hidden_last, dim=0)\n', (28151, 28171), False, 'import torch\n'), ((28191, 28214), 'torch.cat', 'torch.cat', (['y_out'], {'dim': '(0)'}), '(y_out, dim=0)\n', (28200, 28214), False, 'import torch\n'), ((33820, 33854), 'torch.save', 'torch.save', (['self.model', 'model_name'], {}), '(self.model, model_name)\n', (33830, 33854), False, 'import torch\n'), ((39269, 39303), 'torch.save', 'torch.save', (['self.model', 'model_name'], {}), '(self.model, model_name)\n', (39279, 39303), False, 'import torch\n'), ((1187, 1200), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (1197, 1200), True, 'import numpy as np\n'), ((9075, 9133), 'torch.tensor', 'torch.tensor', (['final_dict_testing[key][1]'], {'dtype': 'torch.long'}), '(final_dict_testing[key][1], dtype=torch.long)\n', (9087, 9133), False, 'import torch\n'), ((9153, 9193), 'torch.tensor', 'torch.tensor', (['final_dict_testing[key][2]'], {}), '(final_dict_testing[key][2])\n', (9165, 9193), False, 'import torch\n'), ((14622, 14651), 'os.path.join', 'os.path.join', (['filename', 'files'], {}), '(filename, files)\n', (14634, 14651), False, 'import os\n'), ((20023, 20037), 'torch.load', 'torch.load', (['m1'], {}), '(m1)\n', (20033, 20037), False, 'import torch\n'), ((20046, 20060), 'torch.load', 'torch.load', (['m2'], {}), '(m2)\n', (20056, 20060), False, 'import torch\n'), ((31296, 31307), 'time.time', 'time.time', ([], {}), '()\n', (31305, 31307), False, 'import time\n'), ((31435, 31469), 'torch.save', 'torch.save', (['self.model', 'model_name'], {}), '(self.model, model_name)\n', (31445, 31469), False, 'import torch\n'), ((37303, 37337), 'torch.save', 'torch.save', (['self.model', 'model_name'], {}), '(self.model, model_name)\n', (37313, 37337), False, 'import torch\n'), ((6253, 6285), 'torch.tensor', 'torch.tensor', (["elem['verb_index']"], {}), "(elem['verb_index'])\n", (6265, 6285), False, 'import torch\n'), ((6673, 6705), 'torch.tensor', 'torch.tensor', (["elem['verb_index']"], {}), "(elem['verb_index'])\n", (6685, 6705), False, 'import torch\n'), ((7147, 7179), 'torch.tensor', 'torch.tensor', (["elem['verb_index']"], {}), "(elem['verb_index'])\n", (7159, 7179), False, 'import torch\n'), ((7573, 7605), 'torch.tensor', 'torch.tensor', (["elem['verb_index']"], {}), "(elem['verb_index'])\n", (7585, 7605), False, 'import torch\n')] |
import numpy as np
class Gene:
""" Gene class
Attributes:
-----------
CITY_NUM {int} -- the number of cities
GENE_SIZE {int} -- gene size
gene {np.ndarray} -- gene representation
route {list[int]} -- route converted from gene
"""
def __init__(self, gene_size, city_num):
"""
Arguments:
----------
gene_size {int} -- size of gene
city_num {int} -- the number of cities
"""
self.CITY_NUM = city_num
self.GENE_SIZE = gene_size
self._initialize_gene()
def _initialize_gene(self):
""" initialize gene information"""
self.gene = np.zeros(self.GENE_SIZE, dtype=np.int32)
for i in range(self.GENE_SIZE):
gene = np.random.randint(0, self.GENE_SIZE-i, 1)[0]
self.gene[i] = int(gene)
def get_route_pair(self):
self._convert_to_route()
for i in range(self.CITY_NUM):
city1 = self.route[i]
city2 = self.route[(i+1)%self.CITY_NUM]
yield city1, city2
def _convert_to_route(self):
city = [i for i in range(self.CITY_NUM)]
self.route = []
for g in self.gene:
next_city = city[g]
del city[g]
self.route.append(next_city) | [
"numpy.zeros",
"numpy.random.randint"
] | [((680, 720), 'numpy.zeros', 'np.zeros', (['self.GENE_SIZE'], {'dtype': 'np.int32'}), '(self.GENE_SIZE, dtype=np.int32)\n', (688, 720), True, 'import numpy as np\n'), ((781, 824), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.GENE_SIZE - i)', '(1)'], {}), '(0, self.GENE_SIZE - i, 1)\n', (798, 824), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019-05-21 19:55
# @Author : LeeHW
# @File : Prepare_data.py
# @Software: PyCharm
from glob import glob
from flags import *
import os
from scipy import misc
import numpy as np
import datetime
import imageio
from multiprocessing.dummy import Pool as ThreadPool
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--save-dir', type=str, default='/eva_data/zchin/srfbn_data',
help='save directory of images after pre-processing')
parser.add_argument('--dataroot', type=str, default='/eva_data/zchin/vrdl_hw4_data', help='raw hr training images')
parser.add_argument('--mode', type=str, default='train', help='all_train, train or val')
args = parser.parse_args()
starttime = datetime.datetime.now()
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
save_HR_path = os.path.join(args.save_dir, args.mode, 'HR_x3')
save_LR_path = os.path.join(args.save_dir, args.mode, 'LR_x3')
os.makedirs(save_HR_path, exist_ok=True)
os.makedirs(save_LR_path, exist_ok=True)
train_HR_dir = os.path.join(args.dataroot, args.mode)
file_list = sorted(glob(os.path.join(train_HR_dir, '*.png')))
# print(file_list)
HR_size = [100, 0.8, 0.7, 0.6, 0.5]
def save_HR_LR(img, size, path, idx):
HR_img = misc.imresize(img, size, interp='bicubic')
HR_img = modcrop(HR_img, 3)
rot180_img = misc.imrotate(HR_img, 180)
x4_img = misc.imresize(HR_img, 1 / 3, interp='bicubic')
x4_rot180_img = misc.imresize(rot180_img, 1 / 3, interp='bicubic')
img_path = path.split('/')[-1].split('.')[0] + '_rot0_' + 'ds' + str(idx) + '.png'
rot180img_path = path.split('/')[-1].split('.')[0] + '_rot180_' + 'ds' + str(idx) + '.png'
x4_img_path = path.split('/')[-1].split('.')[0] + '_rot0_' + 'ds' + str(idx) + '.png'
x4_rot180img_path = path.split('/')[-1].split('.')[0] + '_rot180_' + 'ds' + str(idx) + '.png'
misc.imsave(save_HR_path + '/' + img_path, HR_img)
misc.imsave(save_HR_path + '/' + rot180img_path, rot180_img)
misc.imsave(save_LR_path + '/' + x4_img_path, x4_img)
misc.imsave(save_LR_path + '/' + x4_rot180img_path, x4_rot180_img)
def modcrop(image, scale=3):
if len(image.shape) == 3:
h, w, _ = image.shape
h = h - np.mod(h, scale)
w = w - np.mod(w, scale)
image = image[0:h, 0:w, :]
else:
h, w = image.shape
h = h - np.mod(h, scale)
w = w - np.mod(w, scale)
image = image[0:h, 0:w]
return image
def main(path):
print('Processing-----{}/0800'.format(path.split('/')[-1].split('.')[0]))
img = imageio.imread(path)
idx = 0
for size in HR_size:
save_HR_LR(img, size, path, idx)
idx += 1
items = file_list
pool = ThreadPool()
pool.map(main, items)
pool.close()
pool.join()
endtime = datetime.datetime.now()
print((endtime - starttime).seconds)
| [
"scipy.misc.imrotate",
"os.makedirs",
"argparse.ArgumentParser",
"scipy.misc.imsave",
"os.path.join",
"datetime.datetime.now",
"os.path.isdir",
"scipy.misc.imresize",
"imageio.imread",
"numpy.mod",
"multiprocessing.dummy.Pool"
] | [((327, 352), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (350, 352), False, 'import argparse\n'), ((754, 777), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (775, 777), False, 'import datetime\n'), ((862, 909), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.mode', '"""HR_x3"""'], {}), "(args.save_dir, args.mode, 'HR_x3')\n", (874, 909), False, 'import os\n'), ((925, 972), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.mode', '"""LR_x3"""'], {}), "(args.save_dir, args.mode, 'LR_x3')\n", (937, 972), False, 'import os\n'), ((973, 1013), 'os.makedirs', 'os.makedirs', (['save_HR_path'], {'exist_ok': '(True)'}), '(save_HR_path, exist_ok=True)\n', (984, 1013), False, 'import os\n'), ((1014, 1054), 'os.makedirs', 'os.makedirs', (['save_LR_path'], {'exist_ok': '(True)'}), '(save_LR_path, exist_ok=True)\n', (1025, 1054), False, 'import os\n'), ((1071, 1109), 'os.path.join', 'os.path.join', (['args.dataroot', 'args.mode'], {}), '(args.dataroot, args.mode)\n', (1083, 1109), False, 'import os\n'), ((2744, 2756), 'multiprocessing.dummy.Pool', 'ThreadPool', ([], {}), '()\n', (2754, 2756), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((2814, 2837), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2835, 2837), False, 'import datetime\n'), ((786, 814), 'os.path.isdir', 'os.path.isdir', (['args.save_dir'], {}), '(args.save_dir)\n', (799, 814), False, 'import os\n'), ((820, 846), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (831, 846), False, 'import os\n'), ((1280, 1322), 'scipy.misc.imresize', 'misc.imresize', (['img', 'size'], {'interp': '"""bicubic"""'}), "(img, size, interp='bicubic')\n", (1293, 1322), False, 'from scipy import misc\n'), ((1372, 1398), 'scipy.misc.imrotate', 'misc.imrotate', (['HR_img', '(180)'], {}), '(HR_img, 180)\n', (1385, 1398), False, 'from scipy import misc\n'), ((1412, 1458), 'scipy.misc.imresize', 'misc.imresize', (['HR_img', '(1 / 3)'], {'interp': '"""bicubic"""'}), "(HR_img, 1 / 3, interp='bicubic')\n", (1425, 1458), False, 'from scipy import misc\n'), ((1479, 1529), 'scipy.misc.imresize', 'misc.imresize', (['rot180_img', '(1 / 3)'], {'interp': '"""bicubic"""'}), "(rot180_img, 1 / 3, interp='bicubic')\n", (1492, 1529), False, 'from scipy import misc\n'), ((1906, 1956), 'scipy.misc.imsave', 'misc.imsave', (["(save_HR_path + '/' + img_path)", 'HR_img'], {}), "(save_HR_path + '/' + img_path, HR_img)\n", (1917, 1956), False, 'from scipy import misc\n'), ((1961, 2021), 'scipy.misc.imsave', 'misc.imsave', (["(save_HR_path + '/' + rot180img_path)", 'rot180_img'], {}), "(save_HR_path + '/' + rot180img_path, rot180_img)\n", (1972, 2021), False, 'from scipy import misc\n'), ((2026, 2079), 'scipy.misc.imsave', 'misc.imsave', (["(save_LR_path + '/' + x4_img_path)", 'x4_img'], {}), "(save_LR_path + '/' + x4_img_path, x4_img)\n", (2037, 2079), False, 'from scipy import misc\n'), ((2084, 2150), 'scipy.misc.imsave', 'misc.imsave', (["(save_LR_path + '/' + x4_rot180img_path)", 'x4_rot180_img'], {}), "(save_LR_path + '/' + x4_rot180img_path, x4_rot180_img)\n", (2095, 2150), False, 'from scipy import misc\n'), ((2601, 2621), 'imageio.imread', 'imageio.imread', (['path'], {}), '(path)\n', (2615, 2621), False, 'import imageio\n'), ((1134, 1169), 'os.path.join', 'os.path.join', (['train_HR_dir', '"""*.png"""'], {}), "(train_HR_dir, '*.png')\n", (1146, 1169), False, 'import os\n'), ((2258, 2274), 'numpy.mod', 'np.mod', (['h', 'scale'], {}), '(h, scale)\n', (2264, 2274), True, 'import numpy as np\n'), ((2291, 2307), 'numpy.mod', 'np.mod', (['w', 'scale'], {}), '(w, scale)\n', (2297, 2307), True, 'import numpy as np\n'), ((2396, 2412), 'numpy.mod', 'np.mod', (['h', 'scale'], {}), '(h, scale)\n', (2402, 2412), True, 'import numpy as np\n'), ((2429, 2445), 'numpy.mod', 'np.mod', (['w', 'scale'], {}), '(w, scale)\n', (2435, 2445), True, 'import numpy as np\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import numpy as np
# Global constants describing the CIFAR-10 data set.
# 用于描述CiFar数据集的全局常量
IMAGE_SIZE = 32
IMAGE_DEPTH = 3
NUM_CLASSES_CIFAR10 = 10
NUM_CLASSES_CIFAR20 = 20
NUM_CLASSES_CIFAR100 = 100
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue, coarse_or_fine=None):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
# cifar10 binary中的样本记录:3072=32x32x3
# <1 x label><3072 x pixel>
# ...
# <1 x label><3072 x pixel>
# 类型标签字节数
label_bytes = 1 # 2 for CIFAR-100
height = 32
width = 32
depth = 3
# 图像字节数
image_bytes = height * width * depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
# 每一条样本记录由 标签 + 图像 组成,其字节数是固定的。
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
# 创建一个固定长度记录读取器,读取一个样本记录的所有字节(label_bytes + image_bytes)
# 由于cifar10中的记录没有header_bytes 和 footer_bytes,所以设置为0
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes, header_bytes=0, footer_bytes=0)
# 调用读取器对象的read 方法返回一条记录
_, byte_data = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
# 将一个字节组成的string类型的记录转换为长度为record_bytes,类型为unit8的一个数字向量
uint_data = tf.decode_raw(byte_data, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
# 将一个字节代表了标签,我们把它从unit8转换为int32.
label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)
label.set_shape([1])
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
# 剩余的所有字节都是图像数据,把他从unit8转换为int32
# 转为三维张量[depth,height,width]
depth_major = tf.reshape(
tf.strided_slice(uint_data, [label_bytes], [record_bytes]),
[depth, height, width])
# Convert from [depth, height, width] to [height, width, depth].
# 把图像的空间位置和深度位置顺序由[depth, height, width] 转换成[height, width, depth]
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return image, label
def read_cifar100(filename_queue, coarse_or_fine='fine'):
"""Reads and parses examples from CIFAR100 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
height = 32
width = 32
depth = 3
# cifar100中每个样本记录都有两个类别标签,每一个字节是粗略分类标签,
# 第二个字节是精细分类标签:<1 x coarse label><1 x fine label><3072 x pixel>
coarse_label_bytes = 1
fine_label_bytes = 1
# 图像字节数
image_bytes = height * width * depth
# 每一条样本记录由 标签 + 图像 组成,其字节数是固定的。
record_bytes = coarse_label_bytes + fine_label_bytes + image_bytes
# 创建一个固定长度记录读取器,读取一个样本记录的所有字节(label_bytes + image_bytes)
# 由于cifar100中的记录没有header_bytes 和 footer_bytes,所以设置为0
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes, header_bytes=0, footer_bytes=0)
# 调用读取器对象的read 方法返回一条记录
_, byte_data = reader.read(filename_queue)
# 将一系列字节组成的string类型的记录转换为长度为record_bytes,类型为unit8的一个数字向量
uint_data = tf.decode_raw(byte_data, tf.uint8)
# 将一个字节代表了粗分类标签,我们把它从unit8转换为int32.
coarse_label = tf.cast(tf.strided_slice(record_bytes, [0], [coarse_label_bytes]), tf.int32)
# 将二个字节代表了细分类标签,我们把它从unit8转换为int32.
fine_label = tf.cast(tf.strided_slice(record_bytes, [coarse_label_bytes], [coarse_label_bytes + fine_label_bytes]),
tf.int32)
if coarse_or_fine == 'fine':
label = fine_label # 100个精细分类标签
else:
label = coarse_label # 100个粗略分类标签
label.set_shape([1])
# 剩余的所有字节都是图像数据,把他从一维张量[depth * height * width]
# 转为三维张量[depth,height,width]
depth_major = tf.reshape(
tf.strided_slice(uint_data, [coarse_label_bytes + fine_label_bytes], [record_bytes]),
[depth, height, width])
# 把图像的空间位置和深度位置顺序由[depth, height, width] 转换成[height, width, depth]
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return image, label
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, height, shuffle, channels_last=True):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
if not channels_last:
image = tf.transpose(image, [2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 10),
'recons_image': image,
'recons_label': label,
}
if shuffle:
batched_features = tf.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
batched_features = tf.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=min_queue_examples + 3 * batch_size)
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 10])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = height
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 10
# Display the training images in the visualizer.
tf.summary.image('images', batched_features['images'])
return batched_features
def _distort_resize(image, height, width):
"""Distorts input images for CIFAR training.
Adds standard distortions such as flipping, cropping and changing brightness
and contrast.
Args:
image: A float32 tensor with last dimmension equal to 3.
image_size: The output image size after cropping.
Returns:
distorted_image: A float32 tensor with shape [image_size, image_size, 3].
"""
distorted_image = tf.random_crop(image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
distorted_image.set_shape([height, width, 3])
return distorted_image
def inputs(cifar10or20or100, eval_data, data_dir, batch_size, distort=False):
"""使用Reader ops 读取数据集,用于CIFAR的评估
输入参数:
cifar10or20or100:指定要读取的数据集是cifar10 还是细分类的cifar100 ,或者粗分类的cifar100
eval_data: True or False ,指示要读取的是训练集还是测试集
data_dir: 指向CIFAR-10 或者 CIFAR-100 数据集的目录
batch_size: 每个批次的图像数量
distort:数据增强
返回:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# 判断是读取cifar10 还是 cifar100(cifar100可分为20类或100类)
if cifar10or20or100 == 10:
read_cifar = read_cifar10
coarse_or_fine = None
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
if cifar10or20or100 == 20 or cifar10or20or100 == 100:
read_cifar = read_cifar100
if not eval_data:
filenames = [os.path.join(data_dir, 'train.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
if cifar10or20or100 == 100:
coarse_or_fine = 'fine'
if cifar10or20or100 == 20:
coarse_or_fine = 'coarse'
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# 根据文件名列表创建一个文件名队列
filename_queue = tf.train.string_input_producer(filenames)
# 从文件名队列的文件中读取样本
float_image, label = read_cifar(filename_queue, coarse_or_fine=coarse_or_fine)
# 要生成的目标图像的大小,在这里与原图像的尺寸保持一致
height = IMAGE_SIZE
width = IMAGE_SIZE
# 用于评估过程的图像数据预处理
if distort:
# 为图像添加padding = 4,图像尺寸变为[32+4,32+4],为后面的随机裁切留出位置
padded_image = tf.image.resize_image_with_crop_or_pad(float_image, width + 4, height + 4)
# 下面的这些操作为原始图像添加了很多不同的distortions,扩增了原始训练数据集
resized_image = _distort_resize(float_image, height, width)
else:
# Crop the central [height, width] of the image.(其实这里并未发生裁剪)
resized_image = tf.image.resize_image_with_crop_or_pad(float_image, width, height)
# 数据集标准化操作:减去均值 + 方差标准化
image = tf.image.per_image_standardization(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
print('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
# 通过构造样本队列(a queue of examples)产生一个批次的图像和标签
return _generate_image_and_label_batch(image, label,
min_queue_examples, batch_size, height,
shuffle=False if eval_data else True)
### load data by python ###
LABEL_SIZE = 1
PIXEL_DEPTH = 255
NUM_CLASSES = 10
TRAIN_NUM = 10000
TRAIN_NUMS = 50000
TEST_NUM = 10000
def extract_data(filenames):
# 验证文件是否存在
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# 读取数据
labels = None
images = None
for f in filenames:
bytestream = open(f, 'rb')
# 读取数据
buf = bytestream.read(TRAIN_NUM * (IMAGE_SIZE * IMAGE_SIZE * IMAGE_DEPTH + LABEL_SIZE))
# 把数据流转化为np的数组
data = np.frombuffer(buf, dtype=np.uint8)
# 改变数据格式
data = data.reshape(TRAIN_NUM, LABEL_SIZE + IMAGE_SIZE * IMAGE_SIZE * IMAGE_DEPTH)
# 分割数组
labels_images = np.hsplit(data, [LABEL_SIZE])
label = labels_images[0].reshape(TRAIN_NUM)
image = labels_images[1].reshape(TRAIN_NUM, IMAGE_SIZE, IMAGE_SIZE, IMAGE_DEPTH)
if labels is None:
labels = label
images = image
else:
# 合并数组,不能用加法
labels = np.concatenate((labels, label))
images = np.concatenate((images, image))
# images = (images - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH 此处无需归一化,此操作将放在计算图中
return labels, images
def extract_train_data(files_dir, valid_size=0.1):
# 获得训练数据
filenames = [os.path.join(files_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
labels, images = extract_data(filenames)
indices = np.random.permutation(TRAIN_NUMS)
valid_idx, train_idx = indices[:TRAIN_NUMS * valid_size], indices[TRAIN_NUMS * valid_size:]
return images[train_idx], images[valid_idx], labels[train_idx], labels[valid_idx]
def extract_test_data(files_dir):
# 获得测试数据
filenames = [os.path.join(files_dir, 'test_batch.bin'), ]
return extract_data(filenames)
# 把稠密数据label[1,5...]变为[[0,1,0,0...],[...]...]
def dense_to_one_hot(labels_dense, num_classes):
# 数据数量
num_labels = labels_dense.shape[0]
# 生成[0,1,2...]*10,[0,10,20...]
index_offset = np.arange(num_labels) * num_classes
# 初始化np的二维数组
labels_one_hot = np.zeros((num_labels, num_classes))
# 相对应位置赋值变为[[0,1,0,0...],[...]...]
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class Cifar10DataSet(object):
"""docstring for Cifar10DataSet"""
def __init__(self, data_dir):
super(Cifar10DataSet, self).__init__()
self.train_images, self.valid_images, self.train_labels, self.valid_labels = extract_train_data(
os.path.join(data_dir, 'cifar10/cifar-10-batches-bin'))
self.test_labels, self.test_images = extract_test_data(os.path.join(data_dir, 'cifar10/cifar-10-batches-bin'))
print(self.train_labels.size)
self.train_labels = dense_to_one_hot(self.train_labels, NUM_CLASSES)
self.test_labels = dense_to_one_hot(self.test_labels, NUM_CLASSES)
# epoch完成次数
self.epochs_completed = 0
# 当前批次在epoch中进行的进度
self.index_in_epoch = 0
def next_train_batch(self, batch_size):
# 起始位置
start = self.index_in_epoch
self.index_in_epoch += batch_size
# print "self.index_in_epoch: ",self.index_in_epoch
# 完成了一次epoch
if self.index_in_epoch > TRAIN_NUMS:
# epoch完成次数加1
self.epochs_completed += 1
# print "self.epochs_completed: ",self.epochs_completed
# 打乱数据顺序,随机性
perm = np.arange(TRAIN_NUMS)
np.random.shuffle(perm)
self.train_images = self.train_images[perm]
self.train_labels = self.train_labels[perm]
start = 0
self.index_in_epoch = batch_size
# 条件不成立会报错
assert batch_size <= TRAIN_NUMS
end = self.index_in_epoch
# print "start,end: ",start,end
return self.train_images[start:end], self.train_labels[start:end]
def valid_data(self):
return self.valid_images, self.valid_labels
def test_data(self):
return self.test_images, self.test_labels
| [
"tensorflow.transpose",
"tensorflow.FixedLengthRecordReader",
"six.moves.xrange",
"tensorflow.train.shuffle_batch",
"numpy.arange",
"tensorflow.summary.image",
"tensorflow.gfile.Exists",
"tensorflow.decode_raw",
"tensorflow.random_crop",
"numpy.concatenate",
"numpy.frombuffer",
"tensorflow.tra... | [((3084, 3173), 'tensorflow.FixedLengthRecordReader', 'tf.FixedLengthRecordReader', ([], {'record_bytes': 'record_bytes', 'header_bytes': '(0)', 'footer_bytes': '(0)'}), '(record_bytes=record_bytes, header_bytes=0,\n footer_bytes=0)\n', (3110, 3173), True, 'import tensorflow as tf\n'), ((3399, 3433), 'tensorflow.decode_raw', 'tf.decode_raw', (['byte_data', 'tf.uint8'], {}), '(byte_data, tf.uint8)\n', (3412, 3433), True, 'import tensorflow as tf\n'), ((5651, 5740), 'tensorflow.FixedLengthRecordReader', 'tf.FixedLengthRecordReader', ([], {'record_bytes': 'record_bytes', 'header_bytes': '(0)', 'footer_bytes': '(0)'}), '(record_bytes=record_bytes, header_bytes=0,\n footer_bytes=0)\n', (5677, 5740), True, 'import tensorflow as tf\n'), ((5891, 5925), 'tensorflow.decode_raw', 'tf.decode_raw', (['byte_data', 'tf.uint8'], {}), '(byte_data, tf.uint8)\n', (5904, 5925), True, 'import tensorflow as tf\n'), ((8349, 8405), 'tensorflow.reshape', 'tf.reshape', (["batched_features['labels']", '[batch_size, 10]'], {}), "(batched_features['labels'], [batch_size, 10])\n", (8359, 8405), True, 'import tensorflow as tf\n'), ((8489, 8547), 'tensorflow.reshape', 'tf.reshape', (["batched_features['recons_label']", '[batch_size]'], {}), "(batched_features['recons_label'], [batch_size])\n", (8499, 8547), True, 'import tensorflow as tf\n'), ((8770, 8824), 'tensorflow.summary.image', 'tf.summary.image', (['"""images"""', "batched_features['images']"], {}), "('images', batched_features['images'])\n", (8786, 8824), True, 'import tensorflow as tf\n'), ((9286, 9327), 'tensorflow.random_crop', 'tf.random_crop', (['image', '[height, width, 3]'], {}), '(image, [height, width, 3])\n', (9300, 9327), True, 'import tensorflow as tf\n'), ((9350, 9398), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['distorted_image'], {}), '(distorted_image)\n', (9381, 9398), True, 'import tensorflow as tf\n'), ((9421, 9478), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['distorted_image'], {'max_delta': '(63)'}), '(distorted_image, max_delta=63)\n', (9447, 9478), True, 'import tensorflow as tf\n'), ((9501, 9564), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['distorted_image'], {'lower': '(0.2)', 'upper': '(1.8)'}), '(distorted_image, lower=0.2, upper=1.8)\n', (9525, 9564), True, 'import tensorflow as tf\n'), ((11304, 11345), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['filenames'], {}), '(filenames)\n', (11334, 11345), True, 'import tensorflow as tf\n'), ((12059, 12108), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['resized_image'], {}), '(resized_image)\n', (12093, 12108), True, 'import tensorflow as tf\n'), ((14286, 14319), 'numpy.random.permutation', 'np.random.permutation', (['TRAIN_NUMS'], {}), '(TRAIN_NUMS)\n', (14307, 14319), True, 'import numpy as np\n'), ((14924, 14959), 'numpy.zeros', 'np.zeros', (['(num_labels, num_classes)'], {}), '((num_labels, num_classes))\n', (14932, 14959), True, 'import numpy as np\n'), ((3571, 3618), 'tensorflow.strided_slice', 'tf.strided_slice', (['uint_data', '[0]', '[label_bytes]'], {}), '(uint_data, [0], [label_bytes])\n', (3587, 3618), True, 'import tensorflow as tf\n'), ((3907, 3965), 'tensorflow.strided_slice', 'tf.strided_slice', (['uint_data', '[label_bytes]', '[record_bytes]'], {}), '(uint_data, [label_bytes], [record_bytes])\n', (3923, 3965), True, 'import tensorflow as tf\n'), ((4159, 4195), 'tensorflow.transpose', 'tf.transpose', (['depth_major', '[1, 2, 0]'], {}), '(depth_major, [1, 2, 0])\n', (4171, 4195), True, 'import tensorflow as tf\n'), ((5994, 6051), 'tensorflow.strided_slice', 'tf.strided_slice', (['record_bytes', '[0]', '[coarse_label_bytes]'], {}), '(record_bytes, [0], [coarse_label_bytes])\n', (6010, 6051), True, 'import tensorflow as tf\n'), ((6129, 6226), 'tensorflow.strided_slice', 'tf.strided_slice', (['record_bytes', '[coarse_label_bytes]', '[coarse_label_bytes + fine_label_bytes]'], {}), '(record_bytes, [coarse_label_bytes], [coarse_label_bytes +\n fine_label_bytes])\n', (6145, 6226), True, 'import tensorflow as tf\n'), ((6536, 6625), 'tensorflow.strided_slice', 'tf.strided_slice', (['uint_data', '[coarse_label_bytes + fine_label_bytes]', '[record_bytes]'], {}), '(uint_data, [coarse_label_bytes + fine_label_bytes], [\n record_bytes])\n', (6552, 6625), True, 'import tensorflow as tf\n'), ((6746, 6782), 'tensorflow.transpose', 'tf.transpose', (['depth_major', '[1, 2, 0]'], {}), '(depth_major, [1, 2, 0])\n', (6758, 6782), True, 'import tensorflow as tf\n'), ((7677, 7707), 'tensorflow.transpose', 'tf.transpose', (['image', '[2, 0, 1]'], {}), '(image, [2, 0, 1])\n', (7689, 7707), True, 'import tensorflow as tf\n'), ((7768, 7789), 'tensorflow.one_hot', 'tf.one_hot', (['label', '(10)'], {}), '(label, 10)\n', (7778, 7789), True, 'import tensorflow as tf\n'), ((7903, 8067), 'tensorflow.train.shuffle_batch', 'tf.train.shuffle_batch', (['features'], {'batch_size': 'batch_size', 'num_threads': '(16)', 'capacity': '(min_queue_examples + 3 * batch_size)', 'min_after_dequeue': 'min_queue_examples'}), '(features, batch_size=batch_size, num_threads=16,\n capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=\n min_queue_examples)\n', (7925, 8067), True, 'import tensorflow as tf\n'), ((8157, 8270), 'tensorflow.train.batch', 'tf.train.batch', (['features'], {'batch_size': 'batch_size', 'num_threads': '(1)', 'capacity': '(min_queue_examples + 3 * batch_size)'}), '(features, batch_size=batch_size, num_threads=1, capacity=\n min_queue_examples + 3 * batch_size)\n', (8171, 8270), True, 'import tensorflow as tf\n'), ((11651, 11725), 'tensorflow.image.resize_image_with_crop_or_pad', 'tf.image.resize_image_with_crop_or_pad', (['float_image', '(width + 4)', '(height + 4)'], {}), '(float_image, width + 4, height + 4)\n', (11689, 11725), True, 'import tensorflow as tf\n'), ((11951, 12017), 'tensorflow.image.resize_image_with_crop_or_pad', 'tf.image.resize_image_with_crop_or_pad', (['float_image', 'width', 'height'], {}), '(float_image, width, height)\n', (11989, 12017), True, 'import tensorflow as tf\n'), ((13382, 13416), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (13395, 13416), True, 'import numpy as np\n'), ((13564, 13593), 'numpy.hsplit', 'np.hsplit', (['data', '[LABEL_SIZE]'], {}), '(data, [LABEL_SIZE])\n', (13573, 13593), True, 'import numpy as np\n'), ((14154, 14202), 'os.path.join', 'os.path.join', (['files_dir', "('data_batch_%d.bin' % i)"], {}), "(files_dir, 'data_batch_%d.bin' % i)\n", (14166, 14202), False, 'import os\n'), ((14569, 14610), 'os.path.join', 'os.path.join', (['files_dir', '"""test_batch.bin"""'], {}), "(files_dir, 'test_batch.bin')\n", (14581, 14610), False, 'import os\n'), ((14850, 14871), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (14859, 14871), True, 'import numpy as np\n'), ((11181, 11199), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['f'], {}), '(f)\n', (11196, 11199), True, 'import tensorflow as tf\n'), ((13048, 13066), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['f'], {}), '(f)\n', (13063, 13066), True, 'import tensorflow as tf\n'), ((13878, 13909), 'numpy.concatenate', 'np.concatenate', (['(labels, label)'], {}), '((labels, label))\n', (13892, 13909), True, 'import numpy as np\n'), ((13931, 13962), 'numpy.concatenate', 'np.concatenate', (['(images, image)'], {}), '((images, image))\n', (13945, 13962), True, 'import numpy as np\n'), ((14212, 14224), 'six.moves.xrange', 'xrange', (['(1)', '(6)'], {}), '(1, 6)\n', (14218, 14224), False, 'from six.moves import xrange\n'), ((15361, 15415), 'os.path.join', 'os.path.join', (['data_dir', '"""cifar10/cifar-10-batches-bin"""'], {}), "(data_dir, 'cifar10/cifar-10-batches-bin')\n", (15373, 15415), False, 'import os\n'), ((15480, 15534), 'os.path.join', 'os.path.join', (['data_dir', '"""cifar10/cifar-10-batches-bin"""'], {}), "(data_dir, 'cifar10/cifar-10-batches-bin')\n", (15492, 15534), False, 'import os\n'), ((16283, 16304), 'numpy.arange', 'np.arange', (['TRAIN_NUMS'], {}), '(TRAIN_NUMS)\n', (16292, 16304), True, 'import numpy as np\n'), ((16317, 16340), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (16334, 16340), True, 'import numpy as np\n'), ((10324, 10371), 'os.path.join', 'os.path.join', (['data_dir', "('data_batch_%d.bin' % i)"], {}), "(data_dir, 'data_batch_%d.bin' % i)\n", (10336, 10371), False, 'import os\n'), ((10504, 10544), 'os.path.join', 'os.path.join', (['data_dir', '"""test_batch.bin"""'], {}), "(data_dir, 'test_batch.bin')\n", (10516, 10544), False, 'import os\n'), ((10760, 10795), 'os.path.join', 'os.path.join', (['data_dir', '"""train.bin"""'], {}), "(data_dir, 'train.bin')\n", (10772, 10795), False, 'import os\n'), ((10906, 10940), 'os.path.join', 'os.path.join', (['data_dir', '"""test.bin"""'], {}), "(data_dir, 'test.bin')\n", (10918, 10940), False, 'import os\n'), ((10381, 10393), 'six.moves.xrange', 'xrange', (['(1)', '(6)'], {}), '(1, 6)\n', (10387, 10393), False, 'from six.moves import xrange\n')] |
from abc import abstractmethod
from typing import Any, Dict, Optional, Sequence, Tuple
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
from gym_simplifiedtetris.envs._simplified_tetris_engine import _SimplifiedTetrisEngine
class _SimplifiedTetrisBaseEnv(gym.Env):
"""
All custom envs inherit from gym.Env and implement the essential methods
and spaces.
:param grid_dims: the grid dimensions.
:param piece_size: the size of every piece.
:param seed: the rng seed.
"""
metadata = {"render.modes": ["human", "rgb_array"]}
reward_range = (0, 4)
@property
def action_space(self) -> spaces.Discrete:
# Set the discrete action space.
return spaces.Discrete(self._num_actions_)
@property
@abstractmethod
def observation_space(self):
raise NotImplementedError()
def __init__(
self, *, grid_dims: Sequence[int], piece_size: int, seed: Optional[int] = 8191
) -> None:
if not isinstance(grid_dims, (list, tuple, np.array)) or len(grid_dims) != 2:
raise TypeError(
"Inappropriate format provided for grid_dims. It should be a list, tuple or numpy array of length 2 containing integers."
)
assert piece_size in [
1,
2,
3,
4,
], "piece_size should be either 1, 2, 3, or 4."
assert list(grid_dims) in [
[20, 10],
[10, 10],
[8, 6],
[7, 4],
], f"Grid dimensions must be one of (20, 10), (10, 10), (8, 6), or (7, 4)."
self._height_, self._width_ = grid_dims
self._piece_size_ = piece_size
self._num_actions_, self._num_pieces_ = {
1: (grid_dims[1], 1),
2: (2 * grid_dims[1] - 1, 1),
3: (4 * grid_dims[1] - 4, 2),
4: (4 * grid_dims[1] - 6, 7),
}[piece_size]
self._seed(seed)
self._engine = _SimplifiedTetrisEngine(
grid_dims=grid_dims,
piece_size=piece_size,
num_pieces=self._num_pieces_,
num_actions=self._num_actions_,
)
def __str__(self) -> str:
return np.array(self._engine._grid.T, dtype=int).__str__()
def __repr__(self) -> str:
return f"""{self.__class__.__name__}(({self._height_!r}, {self.
_width_!r}), {self._piece_size_!r})"""
def reset(self) -> np.array:
"""
Reset the env.
:return: the current obs.
"""
self._engine._reset()
return self._get_obs()
def step(self, action: int, /) -> Tuple[np.array, float, bool, Dict[str, Any]]:
"""
Hard drop the current piece according to the action. Terminate the
game if the piece cannot fit into the bottom 'height-piece_size' rows.
Otherwise, select a new piece and reset the anchor.
:param action: the action to be taken.
:return: the next observation, reward, game termination indicator, and env info.
"""
info = {}
translation, rotation = self._engine._get_translation_rotation(action)
self._engine._rotate_piece(rotation)
self._engine._anchor = [translation, self._piece_size_ - 1]
info["anchor"] = (translation, rotation)
self._engine._hard_drop()
self._engine._update_grid(True)
# The game terminates when any of the dropped piece's blocks occupies
# any of the top 'piece_size' rows, before any full rows are cleared.
if np.any(self._engine._grid[:, : self._piece_size_]):
info["num_rows_cleared"] = 0
self._engine._final_scores = np.append(
self._engine._final_scores, self._engine._score
)
return self._get_obs(), self._get_terminal_reward(), True, info
reward, num_rows_cleared = self._get_reward()
self._engine._score += num_rows_cleared
self._engine._update_coords_and_anchor()
info["num_rows_cleared"] = num_rows_cleared
return self._get_obs(), reward, False, info
def render(self, mode: Optional[str] = "human", /) -> np.ndarray:
"""
Render the env.
:param mode: the render mode.
:return: the image pixel values.
"""
return self._engine._render(mode)
def close(self) -> None:
"""Close the open windows."""
return self._engine._close()
def _seed(self, seed: Optional[int] = 8191, /) -> None:
"""
Seed the env.
:param seed: an optional seed to seed the rng with.
"""
self._np_random, _ = seeding.np_random(seed)
def _get_reward(self) -> Tuple[float, int]:
"""
Return the reward.
:return: the reward and the number of lines cleared.
"""
return self._engine._get_reward()
@staticmethod
def _get_terminal_reward() -> float:
"""
Return the terminal reward.
:return: the terminal reward.
"""
return 0.0
@abstractmethod
def _get_obs(self):
raise NotImplementedError()
| [
"gym_simplifiedtetris.envs._simplified_tetris_engine._SimplifiedTetrisEngine",
"gym.spaces.Discrete",
"numpy.any",
"numpy.append",
"numpy.array",
"gym.utils.seeding.np_random"
] | [((736, 771), 'gym.spaces.Discrete', 'spaces.Discrete', (['self._num_actions_'], {}), '(self._num_actions_)\n', (751, 771), False, 'from gym import spaces\n'), ((1989, 2122), 'gym_simplifiedtetris.envs._simplified_tetris_engine._SimplifiedTetrisEngine', '_SimplifiedTetrisEngine', ([], {'grid_dims': 'grid_dims', 'piece_size': 'piece_size', 'num_pieces': 'self._num_pieces_', 'num_actions': 'self._num_actions_'}), '(grid_dims=grid_dims, piece_size=piece_size,\n num_pieces=self._num_pieces_, num_actions=self._num_actions_)\n', (2012, 2122), False, 'from gym_simplifiedtetris.envs._simplified_tetris_engine import _SimplifiedTetrisEngine\n'), ((3569, 3618), 'numpy.any', 'np.any', (['self._engine._grid[:, :self._piece_size_]'], {}), '(self._engine._grid[:, :self._piece_size_])\n', (3575, 3618), True, 'import numpy as np\n'), ((4673, 4696), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (4690, 4696), False, 'from gym.utils import seeding\n'), ((3705, 3763), 'numpy.append', 'np.append', (['self._engine._final_scores', 'self._engine._score'], {}), '(self._engine._final_scores, self._engine._score)\n', (3714, 3763), True, 'import numpy as np\n'), ((2224, 2265), 'numpy.array', 'np.array', (['self._engine._grid.T'], {'dtype': 'int'}), '(self._engine._grid.T, dtype=int)\n', (2232, 2265), True, 'import numpy as np\n')] |
"quantify shape and depth diversity of FHIR data"
# conda create -n py39 python=3.9
# conda activate py39
# pip install rich, numpy
# python fhir.py
from dataclasses import dataclass
from itertools import chain
from typing import Dict, List, Optional, Tuple
import json
import os
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from rich import print
import numpy as np
Array = np.ndarray
# data from https://synthetichealth.github.io/synthea/
VERSIONS = ("dstu2", "stu3", "r4")
N_PATIENTS = 200 # use NONE to analyze all patient bundles in each folder
def test_get_paths():
item1 = {
"a": "bion",
"b": {"c": "is", "d": "cool"},
} # (("a",), ("b", "c"), ("b", "d"))
paths1 = get_paths(item1)
# print(item1, "\n", paths1)
assert paths1 == (("a",), ("b", "c"), ("b", "d"))
item2 = {"a": ("bion", "is", "cool")} # (("a", 0), ("a", 1), ("a", 2))
paths2 = get_paths(item2)
# print(item2, "\n", paths2)
assert paths2 == (("a", 0), ("a", 1), ("a", 2))
item3 = {"a": {"b": ("bion", "is", "cool")}}
paths3 = get_paths(item3)
# print(item3, "\n", paths3)
assert paths3 == (("a", "b", 0), ("a", "b", 1), ("a", "b", 2))
assert get_paths("bion") == ((),)
assert get_paths(b"cool") == ((),)
assert get_paths(True) == ((),)
assert get_paths(None) == ((),)
assert get_paths(1) == ((),)
shape1 = {"a": "bion", "b": {"c": "is", "d": "cool"}}
shape2 = {"a": "stuff", "b": {"c": True, "d": False}}
assert get_paths(shape1) == get_paths(shape2)
def get_paths(
item: any, path: Tuple[any, ...] = ()
) -> Tuple[Tuple[any, ...]]: # (()) or ((step0,), (step0, step1) ...)
"""
given a PyTree / collection, returns a tuple of path tuples, one per leaf.
leaves are non-collection types (str, int, float, bool, bytes, None)
get_paths({"a": "bion", "b": {"c": "is", "d": "cool"}}) = (("a",), ("b", "c"), ("b", "d"))
get_paths({"a": ("bion", "is", "cool")}) = (("a", 0), ("a", 1), ("a", 2))
{"a": {"b": ("bion", "is", "cool")}} = (("a", "b", 0), ("a", "b", 1), ("a", "b", 2))
"""
if isinstance(item, (str, int, float, bool, bytes, type(None))):
return (path,)
if isinstance(item, dict):
nested = tuple(get_paths(value, path + (key,)) for key, value in item.items())
# unnest nested tuples
return tuple(chain.from_iterable(nested))
if isinstance(item, (list, tuple)):
nested = tuple(
get_paths(value, path + (index,)) for index, value in enumerate(item)
)
# unnest nested tuples
return tuple(chain.from_iterable(nested))
raise TypeError(f"unsupported type: {type(item)}")
def get_data(folder: str, n_patients: Optional[int] = N_PATIENTS) -> List[dict]:
"get data from a folder"
data = []
path = os.path.join("data", folder)
for file in os.listdir(path):
if file.endswith(".json"):
filepath = os.path.join("data", folder, file)
with open(filepath) as f:
data.append(json.load(f))
if n_patients is not None and len(data) == n_patients:
break
return data
def group_by_resource_type(bundles: List[dict]) -> Dict[str, List[dict]]:
"group data by resource type"
grouped = {}
for bundle in bundles:
for entry in bundle["entry"]:
resource = entry["resource"]
resource_type = resource["resourceType"]
if resource_type not in grouped:
grouped[resource_type] = []
grouped[resource_type].append(resource)
return grouped
def get_shapes_and_depths(grouped: dict) -> Tuple[Dict[str, tuple], Dict[str, Array]]:
"get shapes and leaf depths of resources"
shapes, depths = {}, {}
for resource_type, instances in grouped.items():
if resource_type not in shapes:
shapes[resource_type] = []
if resource_type not in depths:
depths[resource_type] = []
for instance in instances:
paths = get_paths(instance)
leaf_depths = tuple(len(path) for path in paths)
shapes[resource_type].append(len(paths))
depths[resource_type].extend(leaf_depths)
shapes = {k: tuple(set(v)) for k, v in shapes.items()}
depths = {k: np.array(v) for k, v in depths.items()}
return shapes, depths
@dataclass(frozen=True)
class VersionStats:
"statistics for a FHIR version"
version: str
n_patients: int
counts: Dict[str, int] # {resource_type: count}
depths: Dict[str, Array] # {resource_type: leaf_depths}
shapes: Dict[str, tuple] # {resource_type: (shape, ...)}
def get_version_stats(version: str, n_patients: int = N_PATIENTS) -> Dict[str, tuple]:
"get shapes of resources"
data = get_data(version, n_patients)
grouped = group_by_resource_type(data)
shapes, depths = get_shapes_and_depths(grouped)
version_stats = VersionStats(
version=version,
n_patients=n_patients,
counts={k: len(v) for k, v in grouped.items()},
depths=depths,
shapes=shapes,
)
return version_stats
def show_version(stats: VersionStats) -> None:
"renders a VersionStats to stdout"
print("FHIR Version {stats.version}")
print(" n_patients: {stats.n_patients}")
for key in sorted(stats.counts.keys()):
print(f" {key}: ")
print(f" count: {stats.counts[key]}")
print(f" n_shapes: {len(stats.shapes[key])}")
print(f" avg_depth: {stats.depths[key].mean()}")
print(f" max_depth: {stats.depths[key].max()}")
@dataclass(frozen=True)
class ResourceTypeStats:
"statistics for a resource type"
resource_type: str
n_patients: int
counts: Dict[str, int] # {version: count}
depths: Dict[str, Array] # {version: depths}
shapes: Dict[str, tuple] # {version: shapes}
def get_resource_stats(
stats: Dict[str, VersionStats] # {version: version_stats}
) -> Dict[str, ResourceTypeStats]: # {resource_type: resource_type_stats}
"group the resources by type across FHIR versions"
grouped = {}
for version, version_stats in stats.items():
for resource_type, instances in version_stats.counts.items():
if resource_type not in grouped:
grouped[resource_type] = ResourceTypeStats(
resource_type=resource_type,
n_patients=version_stats.n_patients,
counts={},
depths={},
shapes={},
)
grouped[resource_type].counts[version] = instances
grouped[resource_type].depths[version] = version_stats.depths[resource_type]
grouped[resource_type].shapes[version] = version_stats.shapes[resource_type]
return grouped
def show_resource_stats(stats: ResourceTypeStats) -> None:
"renders a ResourceTypeStats to stdout"
print(f"Resource Type: {stats.resource_type}")
for version in stats.counts.keys():
print(f" {version}: ")
print(f" count: {stats.counts[version]}")
print(f" n_shapes: {len(stats.shapes[version])}")
print(f" avg_depth: {stats.depths[version].mean()}")
print(f" max_depth: {stats.depths[version].max()}")
def plot_lines_and_violins(
all_version_stats: Dict[str, VersionStats], # {version: version_stats}
all_resource_type_stats: Dict[
str, ResourceTypeStats
], # {resource_type: resource_type_stats}
) -> go.Figure:
"""
Plots 2 subfigures in 1 column
top row: a (version, n_shapes) line per resource type
top row: a (version, n_shapes) violin per version
bottom row: a (version, depths) violin per version
"""
# make a figure with 2 rows and 1 column
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.05,
subplot_titles=("", ""),
)
# label the figure
fig.update_layout(
title_text="Resource Polymorphism & Nesting Of FHIR Versions",
xaxis_title="--- FHIR Version ---> ",
yaxis_title="Count",
width=1000,
height=800,
)
# make the top row
# add a (version, n_shapes) line per resource_type
for resource_type, resource_type_stats in all_resource_type_stats.items():
fig.add_trace(
go.Scatter(
x=list(resource_type_stats.counts.keys()),
y=list(
len(resource_type_stats.shapes[version])
for version in resource_type_stats.counts.keys()
),
mode="lines",
name=resource_type,
),
row=1,
col=1,
)
# label the top row
fig.update_yaxes(
title_text="Polymorphism / # Unique Shapes (lower is better)", row=1, col=1
)
# add a (version, n_shapes) violin per version
colors = {"dstu2": "red", "stu3": "green", "r4": "blue"}
for version, version_stats in all_version_stats.items():
# group by version
n_shapes = list(map(len, version_stats.shapes.values()))
fig.add_trace(
go.Violin(
x=[version] * len(n_shapes),
y=n_shapes,
name=version,
marker_color=colors[version],
showlegend=False,
legendgroup=version,
),
row=1,
col=1,
)
# make the bottom row
# add a (version, depths) violin per version
for version, version_stats in all_version_stats.items():
# group all the depths for all the resource_types of this version
depths = np.concatenate(
[
version_stats.depths[resource_type]
for resource_type in version_stats.counts.keys()
]
)
fig.add_trace(
go.Violin(
x=[version] * len(depths),
y=depths,
name=version,
marker_color=colors[version],
legendgroup=version,
),
row=2,
col=1,
)
# label the bottom row
fig.update_yaxes(title_text="Nesting / Leaf Depth (lower is better)", row=2, col=1)
return fig
def plot_bars(
all_resource_type_stats: Dict[
str, ResourceTypeStats
], # {resource_type: resource_type_stats}
) -> go.Figure:
"""
Plots 2 subfigures in 1 column
top row: a (resource_type, n_shapes) bar group per resource_type, one bar per version
bottom row: a (resource_type, depths) box plot group per resource_type, one box per version
"""
# make a figure with 2 rows and 1 column
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.05,
subplot_titles=("", ""),
)
# label the figure
fig.update_layout(
title_text="Polymorphism & Nesting Of FHIR Resource Types",
xaxis_title="Resource Type",
yaxis_title="Count",
barmode="group",
width=1000,
height=800,
)
colors = {"dstu2": "red", "stu3": "green", "r4": "blue"}
# make the top row
# add a (resource_type, n_shapes) bar group per resource_type, one bar per version
for version in colors.keys():
x = [
resource_type
for resource_type in all_resource_type_stats.keys()
if version in all_resource_type_stats[resource_type].counts
]
y = [
len(resource_type_stats.shapes[version])
for resource_type_stats in all_resource_type_stats.values()
if version in resource_type_stats.counts
]
fig.add_trace(
go.Bar(
name=version,
x=x,
y=y,
marker_color=colors[version],
legendgroup=version,
),
row=1,
col=1,
)
# label the top row
fig.update_yaxes(
title_text="Polymorphism / # Unique Shapes (lower is better)", row=1, col=1
)
# add a (resource_type, depths) bar group per resource_type, one box per version
for version in colors.keys():
x = [
resource_type
for resource_type in all_resource_type_stats.keys()
if version in all_resource_type_stats[resource_type].counts
]
y = [
resource_type_stats.depths[version].mean()
for resource_type_stats in all_resource_type_stats.values()
if version in resource_type_stats.counts
]
fig.add_trace(
go.Bar(
name=version,
x=x,
y=y,
marker_color=colors[version],
legendgroup=version,
showlegend=False,
),
row=2,
col=1,
)
# label the bottom row
fig.update_yaxes(
title_text="Average Nesting / Leaf Depth (lower is better)", row=2, col=1
)
return fig
def find_worst_offenders(
all_resource_type_stats: Dict[str, ResourceTypeStats],
version: str,
) -> Dict[str, ResourceTypeStats]:
"""
Finds the resource types with the worst polymorphing and nesting
"""
# find the resource type with the most number of shapes
most_polymorphic_resource_type = None
deepest_resource_type_by_mean = None
deepest_resource_type_by_max = None
for resource_type, resource_type_stats in all_resource_type_stats.items():
if version not in resource_type_stats.counts:
continue
shapes = resource_type_stats.shapes[version]
depths = resource_type_stats.depths[version]
if most_polymorphic_resource_type is None or len(shapes) > len(
all_resource_type_stats[most_polymorphic_resource_type].shapes[version]
):
most_polymorphic_resource_type = resource_type
if (
deepest_resource_type_by_mean is None
or depths.mean()
> all_resource_type_stats[deepest_resource_type_by_mean]
.depths[version]
.mean()
):
deepest_resource_type_by_mean = resource_type
if (
deepest_resource_type_by_max is None
or depths.max()
> all_resource_type_stats[deepest_resource_type_by_max]
.depths[version]
.max()
):
deepest_resource_type_by_max = resource_type
return {
"version": version,
"most_polymorphic": all_resource_type_stats[most_polymorphic_resource_type],
"deepest_by_mean": all_resource_type_stats[deepest_resource_type_by_mean],
"deepest_by_max": all_resource_type_stats[deepest_resource_type_by_max],
}
# - the resource type with the most inconsistent data:
# ImagingStudy with 177 different shapes in a sample of 977 ImagingStudy instances
# - the resource type with most deeply nested data (on average):
# ImagingStudy, which requires an average of 5.3 operations to access each leaf
# - the resource type with most deeply nested data (worst case):
# ExplanationOfBenefit has a leaf which requires 8 operations to access
def show_worst_offenders(worst_offenders: dict) -> None:
version = worst_offenders["version"]
print(f"\nworst offenders in FHIR {version}\n")
most_polymorphic = worst_offenders["most_polymorphic"]
resource_type = most_polymorphic.resource_type
n_shapes = len(most_polymorphic.shapes[version])
count = most_polymorphic.counts[version]
print("the resource type with the most inconsistent data:")
print(
f"{resource_type}, with {n_shapes} unique shapes in a sample of {count} {resource_type} instances"
)
print()
deepest_by_mean = worst_offenders["deepest_by_mean"]
resource_type = deepest_by_mean.resource_type
mean_depth = deepest_by_mean.depths[version].mean()
print("the resource type with most deeply nested data (on average):")
print(
f"{resource_type}, which requires an average of {mean_depth} operations to access each leaf"
)
print()
deepest_by_max = worst_offenders["deepest_by_max"]
resource_type = deepest_by_max.resource_type
max_depth = deepest_by_max.depths[version].max()
print("the resource type with most deeply nested data (worst case):")
print(
f"{resource_type}, which has a leaf which requires {max_depth} operations to access"
)
if __name__ == "__main__":
version_stats = {
version: get_version_stats(version, n_patients=N_PATIENTS)
for version in VERSIONS
}
resource_stats = get_resource_stats(version_stats)
# to make output.txt, uncomment this and run `python fhir.py > output.txt`
for resource_type, resource_type_stats in resource_stats.items():
show_resource_stats(resource_type_stats)
# to make worst.txt, uncomment this and run `python fhir.py > worst.txt`
worst_offenders = find_worst_offenders(resource_stats, "r4")
show_worst_offenders(worst_offenders)
# to make plots, uncomment this and run `python fhir.py`
# warning: violin plots are slow if you have a lot of data
lines_and_violins = plot_lines_and_violins(version_stats, resource_stats)
lines_and_violins.show()
# lines_and_violins.write_image("by_fhir_version.png")
bars = plot_bars(resource_stats)
bars.show()
# bars.write_image("by_resource_type.png") | [
"plotly.graph_objects.Bar",
"os.listdir",
"plotly.subplots.make_subplots",
"os.path.join",
"dataclasses.dataclass",
"numpy.array",
"rich.print",
"itertools.chain.from_iterable",
"json.load"
] | [((4384, 4406), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4393, 4406), False, 'from dataclasses import dataclass\n'), ((5640, 5662), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (5649, 5662), False, 'from dataclasses import dataclass\n'), ((2842, 2870), 'os.path.join', 'os.path.join', (['"""data"""', 'folder'], {}), "('data', folder)\n", (2854, 2870), False, 'import os\n'), ((2887, 2903), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2897, 2903), False, 'import os\n'), ((5246, 5283), 'rich.print', 'print', (['"""FHIR Version {stats.version}"""'], {}), "('FHIR Version {stats.version}')\n", (5251, 5283), False, 'from rich import print\n'), ((5288, 5329), 'rich.print', 'print', (['""" n_patients: {stats.n_patients}"""'], {}), "(' n_patients: {stats.n_patients}')\n", (5293, 5329), False, 'from rich import print\n'), ((6961, 7007), 'rich.print', 'print', (['f"""Resource Type: {stats.resource_type}"""'], {}), "(f'Resource Type: {stats.resource_type}')\n", (6966, 7007), False, 'from rich import print\n'), ((7839, 7939), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.05)', 'subplot_titles': "('', '')"}), "(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05,\n subplot_titles=('', ''))\n", (7852, 7939), False, 'from plotly.subplots import make_subplots\n'), ((10773, 10873), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.05)', 'subplot_titles': "('', '')"}), "(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05,\n subplot_titles=('', ''))\n", (10786, 10873), False, 'from plotly.subplots import make_subplots\n'), ((15383, 15432), 'rich.print', 'print', (['f"""\nworst offenders in FHIR {version}\n"""'], {}), '(f"""\nworst offenders in FHIR {version}\n""")\n', (15388, 15432), False, 'from rich import print\n'), ((15643, 15702), 'rich.print', 'print', (['"""the resource type with the most inconsistent data:"""'], {}), "('the resource type with the most inconsistent data:')\n", (15648, 15702), False, 'from rich import print\n'), ((15707, 15822), 'rich.print', 'print', (['f"""{resource_type}, with {n_shapes} unique shapes in a sample of {count} {resource_type} instances"""'], {}), "(\n f'{resource_type}, with {n_shapes} unique shapes in a sample of {count} {resource_type} instances'\n )\n", (15712, 15822), False, 'from rich import print\n'), ((15831, 15838), 'rich.print', 'print', ([], {}), '()\n', (15836, 15838), False, 'from rich import print\n'), ((16006, 16075), 'rich.print', 'print', (['"""the resource type with most deeply nested data (on average):"""'], {}), "('the resource type with most deeply nested data (on average):')\n", (16011, 16075), False, 'from rich import print\n'), ((16080, 16189), 'rich.print', 'print', (['f"""{resource_type}, which requires an average of {mean_depth} operations to access each leaf"""'], {}), "(\n f'{resource_type}, which requires an average of {mean_depth} operations to access each leaf'\n )\n", (16085, 16189), False, 'from rich import print\n'), ((16198, 16205), 'rich.print', 'print', ([], {}), '()\n', (16203, 16205), False, 'from rich import print\n'), ((16367, 16436), 'rich.print', 'print', (['"""the resource type with most deeply nested data (worst case):"""'], {}), "('the resource type with most deeply nested data (worst case):')\n", (16372, 16436), False, 'from rich import print\n'), ((16441, 16542), 'rich.print', 'print', (['f"""{resource_type}, which has a leaf which requires {max_depth} operations to access"""'], {}), "(\n f'{resource_type}, which has a leaf which requires {max_depth} operations to access'\n )\n", (16446, 16542), False, 'from rich import print\n'), ((4315, 4326), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (4323, 4326), True, 'import numpy as np\n'), ((5382, 5403), 'rich.print', 'print', (['f""" {key}: """'], {}), "(f' {key}: ')\n", (5387, 5403), False, 'from rich import print\n'), ((5412, 5454), 'rich.print', 'print', (['f""" count: {stats.counts[key]}"""'], {}), "(f' count: {stats.counts[key]}')\n", (5417, 5454), False, 'from rich import print\n'), ((7056, 7081), 'rich.print', 'print', (['f""" {version}: """'], {}), "(f' {version}: ')\n", (7061, 7081), False, 'from rich import print\n'), ((7090, 7136), 'rich.print', 'print', (['f""" count: {stats.counts[version]}"""'], {}), "(f' count: {stats.counts[version]}')\n", (7095, 7136), False, 'from rich import print\n'), ((2383, 2410), 'itertools.chain.from_iterable', 'chain.from_iterable', (['nested'], {}), '(nested)\n', (2402, 2410), False, 'from itertools import chain\n'), ((2620, 2647), 'itertools.chain.from_iterable', 'chain.from_iterable', (['nested'], {}), '(nested)\n', (2639, 2647), False, 'from itertools import chain\n'), ((2963, 2997), 'os.path.join', 'os.path.join', (['"""data"""', 'folder', 'file'], {}), "('data', folder, file)\n", (2975, 2997), False, 'import os\n'), ((11796, 11882), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'name': 'version', 'x': 'x', 'y': 'y', 'marker_color': 'colors[version]', 'legendgroup': 'version'}), '(name=version, x=x, y=y, marker_color=colors[version], legendgroup=\n version)\n', (11802, 11882), True, 'import plotly.graph_objects as go\n'), ((12702, 12806), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'name': 'version', 'x': 'x', 'y': 'y', 'marker_color': 'colors[version]', 'legendgroup': 'version', 'showlegend': '(False)'}), '(name=version, x=x, y=y, marker_color=colors[version], legendgroup=\n version, showlegend=False)\n', (12708, 12806), True, 'import plotly.graph_objects as go\n'), ((3064, 3076), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3073, 3076), False, 'import json\n')] |
## PSYC493 - Directed Studies
## Jack 'jryzkns' Zhou 2018
## code used for calculating a range of
## tolerable values of approximate the sine ratio with the identity
from matplotlib.pyplot import plot, show, xlabel, ylabel, title, axis
from math import sin, pi, log
from numpy import arange
def diff(x):
return (x - sin(x))/(sin(x))
threshold = 0.05
## TOP
x=[]
y=[]
yt=[]
for i in arange(0.01,pi/2,0.01):
x.append(log(i))
y.append(diff(i))
yt.append(0.05)
if diff(i) >= threshold:
print(i*180/pi) #threshold value coverted to degrees
plot(x,y)
plot(x,yt)
title("Accuracy of using the approximation x = sin(x)")
xlabel("log(x) in radians")
ylabel("percentage error")
show()
# 30 degrees both ways for threshold 0.05 | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.log",
"matplotlib.pyplot.title",
"math.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((397, 423), 'numpy.arange', 'arange', (['(0.01)', '(pi / 2)', '(0.01)'], {}), '(0.01, pi / 2, 0.01)\n', (403, 423), False, 'from numpy import arange\n'), ((599, 609), 'matplotlib.pyplot.plot', 'plot', (['x', 'y'], {}), '(x, y)\n', (603, 609), False, 'from matplotlib.pyplot import plot, show, xlabel, ylabel, title, axis\n'), ((609, 620), 'matplotlib.pyplot.plot', 'plot', (['x', 'yt'], {}), '(x, yt)\n', (613, 620), False, 'from matplotlib.pyplot import plot, show, xlabel, ylabel, title, axis\n'), ((620, 675), 'matplotlib.pyplot.title', 'title', (['"""Accuracy of using the approximation x = sin(x)"""'], {}), "('Accuracy of using the approximation x = sin(x)')\n", (625, 675), False, 'from matplotlib.pyplot import plot, show, xlabel, ylabel, title, axis\n'), ((676, 703), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""log(x) in radians"""'], {}), "('log(x) in radians')\n", (682, 703), False, 'from matplotlib.pyplot import plot, show, xlabel, ylabel, title, axis\n'), ((704, 730), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""percentage error"""'], {}), "('percentage error')\n", (710, 730), False, 'from matplotlib.pyplot import plot, show, xlabel, ylabel, title, axis\n'), ((731, 737), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (735, 737), False, 'from matplotlib.pyplot import plot, show, xlabel, ylabel, title, axis\n'), ((337, 343), 'math.sin', 'sin', (['x'], {}), '(x)\n', (340, 343), False, 'from math import sin, pi, log\n'), ((438, 444), 'math.log', 'log', (['i'], {}), '(i)\n', (441, 444), False, 'from math import sin, pi, log\n'), ((328, 334), 'math.sin', 'sin', (['x'], {}), '(x)\n', (331, 334), False, 'from math import sin, pi, log\n')] |
"""
Calibration and image printing utility functions
"""
import matplotlib.pyplot as plt
import torchvision
import numpy as np
__all__ = ['make_image', 'show_batch', 'write_calibration']
def write_calibration(
avg_confs_in_bins,
acc_in_bin_list,
prop_bin,
min_bin,
max_bin,
min_pred=0,
write_file=None,
suffix=""
):
"""
Utility function to show calibration through a histogram of classifier confidences
"""
fig, ax1 = plt.subplots()
ax1.plot([min_pred, 1], [min_pred, 1], "k:", label="Perfectly calibrated")
ax1.plot(avg_confs_in_bins,
acc_in_bin_list,
"s-",
label="%s" % ("Discriminator Calibration"))
if write_file:
suffix = write_file.split("/")[-1].split(".")[0].split("_")[0] + "_" + suffix
ax1.set_xlabel(f"Mean predicted value {suffix}")
ax1.set_ylabel("Accuracy")
ymin = min(acc_in_bin_list + [min_pred])
ax1.set_ylim([ymin, 1.0])
ax1.legend(loc="lower right")
ax2 = ax1.twinx()
ax2.hlines(prop_bin, min_bin, max_bin, label="%s" % ("Proportion in each bin"), color="r")
ax2.set_ylabel("Proportion")
ax2.legend(loc="upper center")
if not write_file:
plt.tight_layout()
plt.show()
else:
fig.savefig(
write_file
)
def make_image(img, mean=None, std=None, normalize=True):
"""
Transform a CIFAR numpy image into a pytorch image (need to swap dimensions)
"""
if mean is None and std is None:
from mixmo.augmentations.standard_augmentations import cifar_mean, cifar_std
mean = cifar_mean
std = cifar_std
npimg = img.numpy().copy()
if normalize:
for i in range(0, 3):
npimg[i] = npimg[i] * std[i] + mean[i] # unnormalize
return np.transpose(npimg, (1, 2, 0))
def show_batch(images, normalize=True):
"""
Plot images in a batch of images
"""
images = make_image(torchvision.utils.make_grid(images), normalize=normalize)
plt.imshow(images)
plt.show()
return images
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"torchvision.utils.make_grid",
"numpy.transpose",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((470, 484), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (482, 484), True, 'import matplotlib.pyplot as plt\n'), ((1811, 1841), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (1823, 1841), True, 'import numpy as np\n'), ((2023, 2041), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images'], {}), '(images)\n', (2033, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2046, 2056), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2054, 2056), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1240), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1238, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1257, 1259), True, 'import matplotlib.pyplot as plt\n'), ((1961, 1996), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images'], {}), '(images)\n', (1988, 1996), False, 'import torchvision\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
fiberassign.targets
=====================
Functions for loading the target list
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import fitsio
# FIXME: If / when SV bit names diverge from main survey names, we
# should import the SV bitmasks here.
# AR duplicating what s required for sv2;
# AR should definitely be re-written to handle sv3, etc
# AR and duplicating for sv3...
from desitarget.targetmask import desi_mask
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask
from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask
# AR
from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask
from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask
from desitarget.targets import main_cmx_or_sv
from .utils import Logger, Timer
from .hardware import radec2xy, cs52xy
from ._internal import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
TARGET_TYPE_SUPPSKY,
Target, Targets, TargetsAvailable,
LocationsAvailable)
class TargetTagalong(object):
'''
This class holds data from the targeting input files that we want
to propagate to the output fiberassign files, and that are not
needed by the C++ layer.
'''
def __init__(self, columns, outnames={}, aliases={}):
'''
Create a new tag-along object.
Args:
*columns*: list of strings: the column names that will be saved.
*outnames*: dict, string to string: mapping from 'columns' to the name
the column will be given in the output file; None to omit
from the output file.
*aliases*: dict, string to string: for get_for_ids(), column aliases.
'''
self.columns = columns
self.outnames = outnames
self.aliases = aliases
# Internally, we store one tuple for each targeting file read
# (to avoid manipulating/reformatting the arrays too much),
# where each tuple starts with the TARGETID of the targets, followed
# by the data arrays for each column in *columns*.
self.data = []
def get_default(self, column):
'''
Returns the default value to return for a given *column*, or
None if not set.
'''
return None
def get_output_name(self, column):
'''
Returns the column name to use in the output file for the
given input column name.
'''
return self.outnames.get(column, column)
def add_data(self, targetids, tabledata, fake={}):
'''
Stores data from an input (targeting file) table.
Arguments:
*targetids*: numpy array of TARGETID values that must be in the same
order as the data arrays in *tabledata*.
*tabledata*: numpy record-array / table from which this tagalong's
*columns* will be read.
*fake*: dict from string column name to numpy array, containing column
data that will be used in place of reading from *tabledata*.
'''
tgarrays = [targetids]
for k in self.columns:
if k in fake:
assert(len(fake[k]) == len(targetids))
tgarrays.append(fake[k])
else:
assert(len(tabledata[k]) == len(targetids))
tgarrays.append(tabledata[k])
self.data.append(tgarrays)
def set_data(self, targetids, tabledata):
'''
Sets *ALL* rows of the given *tabledata* object to defaults,
and then fills in values for the given targetids, if they are found.
'''
# Set defaults, and grab output arrays
outarrs = []
for c in self.columns:
defval = self.get_default(c)
outname = self.get_output_name(c)
if outname is None:
# We're omitting this column from the output
outarrs.append(None)
else:
outarr = tabledata[outname]
if defval is not None:
outarr[:] = defval
outarrs.append(outarr)
# Build output targetid-to-index map
outmap = dict([(tid,i) for i,tid in enumerate(targetids)])
# Go through my many data arrays
for thedata in self.data:
# TARGETIDs are the first element in the tuple
tids = thedata[0]
# Search for output array indices for these targetids
outinds = np.array([outmap.get(tid, -1) for tid in tids])
# Keep only the indices of targetids that were found
ininds = np.flatnonzero(outinds >= 0)
outinds = outinds[ininds]
for outarr,inarr in zip(outarrs, thedata[1:]):
if outarr is None:
continue
outarr[outinds] = inarr[ininds]
def get_for_ids(self, targetids, names):
'''
Fetch arrays for the given columns names and given targetids.
'''
# Create output arrays
outarrs = []
colinds = []
for name in names:
name = self.aliases.get(name, name)
ic = self.columns.index(name)
# Look at the data saved for my first dataset to determine
# the output type.
dtype = self.data[0][ic+1].dtype
outarrs.append(np.zeros(len(targetids), dtype))
colinds.append(ic+1)
# Build output targetid-to-index map
outmap = dict([(tid,i) for i,tid in enumerate(targetids)])
# Go through my many data arrays
for thedata in self.data:
tids = thedata[0]
# Search for output array indices for these targetids
outinds = np.array([outmap.get(tid, -1) for tid in tids])
ininds = np.flatnonzero(outinds >= 0)
outinds = outinds[ininds]
for outarr,ic in zip(outarrs, colinds):
outarr[outinds] = thedata[ic][ininds]
return outarrs
def create_tagalong(plate_radec=True):
cols = [
'TARGET_RA',
'TARGET_DEC',
'OBSCOND',
'FA_TARGET',
]
if plate_radec:
cols.extend([
'PLATE_RA',
'PLATE_DEC',
# 'PLATE_REF_EPOCH',
])
# If PLATE_{RA,DEC} exist in the input target tables, use those
# when converting RA,DEC to focal-plane coords.
aliases = {
'RA': 'PLATE_RA',
'DEC': 'PLATE_DEC',
}
else:
aliases = {
'RA': 'TARGET_RA',
'DEC': 'TARGET_DEC',
}
# (OBSCOND doesn't appear in all the fiberassign output HDUs,
# so we handle it specially)
return TargetTagalong(cols, outnames={'OBSCOND':None}, aliases=aliases)
def str_to_target_type(input):
if input == "science":
return TARGET_TYPE_SCIENCE
elif input == "sky":
return TARGET_TYPE_SKY
elif input == "suppsky":
return TARGET_TYPE_SUPPSKY
elif input == "standard":
return TARGET_TYPE_STANDARD
elif input == "safe":
return TARGET_TYPE_SAFE
else:
raise ValueError("unknown target type '{}'".format(input))
return None
def default_main_sciencemask():
"""Returns default mask of bits for science targets in main survey.
"""
sciencemask = 0
sciencemask |= desi_mask["LRG"].mask
sciencemask |= desi_mask["ELG"].mask
sciencemask |= desi_mask["QSO"].mask
sciencemask |= desi_mask["BGS_ANY"].mask
sciencemask |= desi_mask["MWS_ANY"].mask
if "SCND_ANY" in desi_mask.names():
sciencemask |= desi_mask["SCND_ANY"].mask
else:
sciencemask |= desi_mask["SECONDARY_ANY"].mask
return sciencemask
def default_main_stdmask():
"""Returns default mask of bits for standards in main survey.
"""
stdmask = 0
stdmask |= desi_mask["STD_FAINT"].mask
stdmask |= desi_mask["STD_WD"].mask
stdmask |= desi_mask["STD_BRIGHT"].mask
return stdmask
def default_main_skymask():
"""Returns default mask of bits for sky targets in main survey.
"""
skymask = 0
skymask |= desi_mask["SKY"].mask
return skymask
def default_main_suppskymask():
"""Returns default mask of bits for suppsky targets in main survey.
"""
suppskymask = 0
suppskymask |= desi_mask["SUPP_SKY"].mask
return suppskymask
def default_main_safemask():
"""Returns default mask of bits for 'safe' targets in main survey.
Note: these are targets of last resort; they are safe locations where
we won't saturate the detector, but aren't good for anything else.
"""
safemask = 0
safemask |= desi_mask["BAD_SKY"].mask
return safemask
def default_main_excludemask():
"""Returns default mask of bits for main survey targets to NOT observe.
"""
excludemask = 0
# Exclude BRIGHT_OBJECT and IN_BRIGHT_OBJECT, but not NEAR_BRIGHT_OBJECT
excludemask |= desi_mask.BRIGHT_OBJECT
excludemask |= desi_mask.IN_BRIGHT_OBJECT
return excludemask
def default_sv3_sciencemask():
"""Returns default mask of bits for science targets in SV1 survey.
"""
sciencemask = 0
sciencemask |= sv3_mask["LRG"].mask
sciencemask |= sv3_mask["ELG"].mask
sciencemask |= sv3_mask["QSO"].mask
sciencemask |= sv3_mask["BGS_ANY"].mask
sciencemask |= sv3_mask["MWS_ANY"].mask
sciencemask |= sv3_mask["SCND_ANY"].mask
return sciencemask
def default_sv3_stdmask():
"""Returns default mask of bits for standards in SV1 survey.
"""
stdmask = 0
stdmask |= sv3_mask["STD_FAINT"].mask
stdmask |= sv3_mask["STD_WD"].mask
stdmask |= sv3_mask["STD_BRIGHT"].mask
return stdmask
def default_sv3_skymask():
"""Returns default mask of bits for sky targets in SV1 survey.
"""
skymask = 0
skymask |= sv3_mask["SKY"].mask
return skymask
def default_sv3_suppskymask():
"""Returns default mask of bits for suppsky targets in SV1 survey.
"""
suppskymask = 0
suppskymask |= sv3_mask["SUPP_SKY"].mask
return suppskymask
def default_sv3_safemask():
"""Returns default mask of bits for 'safe' targets in SV1 survey.
Note: these are targets of last resort; they are safe locations where
we won't saturate the detector, but aren't good for anything else.
"""
safemask = 0
safemask |= sv3_mask["BAD_SKY"].mask
return safemask
def default_sv3_excludemask():
"""Returns default mask of bits for SV1 survey targets to NOT observe.
"""
excludemask = 0
# Exclude BRIGHT_OBJECT and IN_BRIGHT_OBJECT, but not NEAR_BRIGHT_OBJECT
excludemask |= sv3_mask.BRIGHT_OBJECT
excludemask |= sv3_mask.IN_BRIGHT_OBJECT
return excludemask
def default_sv2_sciencemask():
"""Returns default mask of bits for science targets in SV1 survey.
"""
sciencemask = 0
sciencemask |= sv2_mask["LRG"].mask
sciencemask |= sv2_mask["ELG"].mask
sciencemask |= sv2_mask["QSO"].mask
sciencemask |= sv2_mask["BGS_ANY"].mask
sciencemask |= sv2_mask["MWS_ANY"].mask
sciencemask |= sv2_mask["SCND_ANY"].mask
return sciencemask
def default_sv2_stdmask():
"""Returns default mask of bits for standards in SV1 survey.
"""
stdmask = 0
stdmask |= sv2_mask["STD_FAINT"].mask
stdmask |= sv2_mask["STD_WD"].mask
stdmask |= sv2_mask["STD_BRIGHT"].mask
return stdmask
def default_sv2_skymask():
"""Returns default mask of bits for sky targets in SV1 survey.
"""
skymask = 0
skymask |= sv2_mask["SKY"].mask
return skymask
def default_sv2_suppskymask():
"""Returns default mask of bits for suppsky targets in SV1 survey.
"""
suppskymask = 0
suppskymask |= sv2_mask["SUPP_SKY"].mask
return suppskymask
def default_sv2_safemask():
"""Returns default mask of bits for 'safe' targets in SV1 survey.
Note: these are targets of last resort; they are safe locations where
we won't saturate the detector, but aren't good for anything else.
"""
safemask = 0
safemask |= sv2_mask["BAD_SKY"].mask
return safemask
def default_sv2_excludemask():
"""Returns default mask of bits for SV1 survey targets to NOT observe.
"""
excludemask = 0
# Exclude BRIGHT_OBJECT and IN_BRIGHT_OBJECT, but not NEAR_BRIGHT_OBJECT
excludemask |= sv2_mask.BRIGHT_OBJECT
excludemask |= sv2_mask.IN_BRIGHT_OBJECT
return excludemask
def default_sv1_sciencemask():
"""Returns default mask of bits for science targets in SV1 survey.
"""
sciencemask = 0
sciencemask |= sv1_mask["LRG"].mask
sciencemask |= sv1_mask["ELG"].mask
sciencemask |= sv1_mask["QSO"].mask
sciencemask |= sv1_mask["BGS_ANY"].mask
sciencemask |= sv1_mask["MWS_ANY"].mask
if "SCND_ANY" in desi_mask.names():
sciencemask |= desi_mask["SCND_ANY"].mask
else:
sciencemask |= desi_mask["SECONDARY_ANY"].mask
return sciencemask
def default_sv1_stdmask():
"""Returns default mask of bits for standards in SV1 survey.
"""
stdmask = 0
stdmask |= sv1_mask["STD_FAINT"].mask
stdmask |= sv1_mask["STD_WD"].mask
stdmask |= sv1_mask["STD_BRIGHT"].mask
return stdmask
def default_sv1_skymask():
"""Returns default mask of bits for sky targets in SV1 survey.
"""
skymask = 0
skymask |= sv1_mask["SKY"].mask
return skymask
def default_sv1_suppskymask():
"""Returns default mask of bits for suppsky targets in SV1 survey.
"""
suppskymask = 0
suppskymask |= sv1_mask["SUPP_SKY"].mask
return suppskymask
def default_sv1_safemask():
"""Returns default mask of bits for 'safe' targets in SV1 survey.
Note: these are targets of last resort; they are safe locations where
we won't saturate the detector, but aren't good for anything else.
"""
safemask = 0
safemask |= sv1_mask["BAD_SKY"].mask
return safemask
def default_sv1_excludemask():
"""Returns default mask of bits for SV1 survey targets to NOT observe.
"""
excludemask = 0
# Exclude BRIGHT_OBJECT and IN_BRIGHT_OBJECT, but not NEAR_BRIGHT_OBJECT
excludemask |= sv1_mask.BRIGHT_OBJECT
excludemask |= sv1_mask.IN_BRIGHT_OBJECT
return excludemask
def default_cmx_sciencemask():
"""Returns default mask of bits for science targets in CMX survey.
"""
sciencemask = 0
sciencemask |= cmx_mask["STD_GAIA"].mask
sciencemask |= cmx_mask["SV0_STD_BRIGHT"].mask
sciencemask |= cmx_mask["STD_TEST"].mask
sciencemask |= cmx_mask["STD_CALSPEC"].mask
sciencemask |= cmx_mask["STD_DITHER"].mask
sciencemask |= cmx_mask["STD_FAINT"].mask
sciencemask |= cmx_mask["SV0_BGS"].mask
sciencemask |= cmx_mask["SV0_MWS"].mask
sciencemask |= cmx_mask["SV0_LRG"].mask
sciencemask |= cmx_mask["SV0_ELG"].mask
sciencemask |= cmx_mask["SV0_QSO"].mask
sciencemask |= cmx_mask["SV0_WD"].mask
sciencemask |= cmx_mask["BACKUP_BRIGHT"].mask
sciencemask |= cmx_mask["BACKUP_FAINT"].mask
sciencemask |= cmx_mask["M31_STD_BRIGHT"].mask
sciencemask |= cmx_mask["M31_H2PN"].mask
sciencemask |= cmx_mask["M31_GC"].mask
sciencemask |= cmx_mask["M31_VAR"].mask
sciencemask |= cmx_mask["M31_QSO"].mask
sciencemask |= cmx_mask["M31_BSPL"].mask
sciencemask |= cmx_mask["M31_M31cen"].mask
sciencemask |= cmx_mask["M31_M31out"].mask
sciencemask |= cmx_mask["ORI_STD_BRIGHT"].mask
sciencemask |= cmx_mask["ORI_QSO"].mask
sciencemask |= cmx_mask["ORI_ORI"].mask
sciencemask |= cmx_mask["ORI_HA"].mask
# NEW bits for SV0- March 2020- desitarget 0.37.0
sciencemask |= cmx_mask["SV0_QSO_Z5"].mask
sciencemask |= cmx_mask["SV0_MWS_CLUSTER"].mask
sciencemask |= cmx_mask["SV0_MWS_CLUSTER_VERYBRIGHT"].mask
sciencemask |= cmx_mask["ROS_STD_BRIGHT"].mask
sciencemask |= cmx_mask["ROS_QSO"].mask
sciencemask |= cmx_mask["ROS_ROSM17"].mask
sciencemask |= cmx_mask["ROS_ROS1"].mask
sciencemask |= cmx_mask["ROS_HA"].mask
sciencemask |= cmx_mask["ROS_ROS2"].mask
sciencemask |= cmx_mask["M33_STD_BRIGHT"].mask
sciencemask |= cmx_mask["M33_H2PN"].mask
sciencemask |= cmx_mask["M33_GC"].mask
sciencemask |= cmx_mask["M33_QSO"].mask
sciencemask |= cmx_mask["M33_M33cen"].mask
sciencemask |= cmx_mask["M33_M33out"].mask
sciencemask |= cmx_mask["MINI_SV_LRG"].mask
sciencemask |= cmx_mask["MINI_SV_ELG"].mask
sciencemask |= cmx_mask["MINI_SV_QSO"].mask
sciencemask |= cmx_mask["MINI_SV_BGS_BRIGHT"].mask
return sciencemask
def default_cmx_stdmask():
"""Returns default mask of bits for standards in CMX survey.
"""
# Nothing in a CMX file is currently treated as a "standard". The
# objects are all things which should be assigned as science targets.
stdmask = 0
stdmask |= cmx_mask["STD_FAINT"].mask
stdmask |= cmx_mask["STD_BRIGHT"].mask
return stdmask
def default_cmx_skymask():
"""Returns default mask of bits for sky targets in CMX survey.
"""
skymask = 0
skymask |= cmx_mask["SKY"].mask
return skymask
def default_cmx_suppskymask():
"""Returns default mask of bits for suppsky targets in CMX survey.
"""
suppskymask = 0
suppskymask |= cmx_mask["SUPP_SKY"].mask
return suppskymask
def default_cmx_safemask():
"""Returns default mask of bits for 'safe' targets in CMX survey.
Note: these are targets of last resort; they are safe locations where
we won't saturate the detector, but aren't good for anything else.
"""
safemask = 0
safemask |= cmx_mask["BAD_SKY"].mask
return safemask
def default_cmx_excludemask():
"""Returns default mask of bits for CMX survey targets to NOT observe.
"""
excludemask = 0
return excludemask
def desi_target_type(desi_target, sciencemask, stdmask,
skymask, suppskymask, safemask, excludemask):
"""Determine fiber assign type from the data column.
Args:
desi_target (iterable): Scalar or array-like integer values.
sciencemask (int): Integer value to bitwise-and when checking for
science targets.
stdmask (int): Integer value to bitwise-and when checking for
standards targets.
skymask (int): Integer value to bitwise-and when checking for
sky targets.
suppskymask (int): Integer value to bitwise-and when checking for
suppsky targets.
safemask (int): Integer value to bitwise-and when checking for
safe targets.
excludemask (int): Integer value to bitwise-and when checking for
targets to exclude.
Returns:
(array): The fiberassign target types.
"""
# print('sciencemask {}'.format(sciencemask))
# print('stdmask {}'.format(stdmask))
# print('skymask {}'.format(skymask))
# print('safemask {}'.format(safemask))
# print('excludemask {}'.format(excludemask))
if np.isscalar(desi_target):
ttype = 0
if desi_target & sciencemask != 0:
ttype |= TARGET_TYPE_SCIENCE
if desi_target & stdmask != 0:
ttype |= TARGET_TYPE_STANDARD
if desi_target & skymask != 0:
ttype |= TARGET_TYPE_SKY
if desi_target & suppskymask != 0:
ttype |= TARGET_TYPE_SUPPSKY
if desi_target & safemask != 0:
ttype |= TARGET_TYPE_SAFE
if desi_target & excludemask != 0:
ttype = 0
else:
desi_target = np.asarray(desi_target)
ttype = np.zeros(len(desi_target), dtype=np.uint8)
ttype[desi_target & sciencemask != 0] |= TARGET_TYPE_SCIENCE
ttype[desi_target & stdmask != 0] |= TARGET_TYPE_STANDARD
ttype[desi_target & skymask != 0] |= TARGET_TYPE_SKY
ttype[desi_target & suppskymask != 0] |= TARGET_TYPE_SUPPSKY
ttype[desi_target & safemask != 0] |= TARGET_TYPE_SAFE
ttype[desi_target & excludemask != 0] = 0
return ttype
def default_survey_target_masks(survey):
"""Return the default masks for the survey.
Args:
survey (str): The survey name.
Returns:
(tuple): The science mask, standard mask, sky mask, suppsky mask,
safe mask, and exclude mask for the data.
"""
sciencemask = None
stdmask = None
skymask = None
suppskymask = None
safemask = None
excludemask = None
if survey == "main":
sciencemask = default_main_sciencemask()
stdmask = default_main_stdmask()
skymask = default_main_skymask()
suppskymask = default_main_suppskymask()
safemask = default_main_safemask()
excludemask = default_main_excludemask()
elif survey == "cmx":
sciencemask = default_cmx_sciencemask()
stdmask = default_cmx_stdmask()
skymask = default_cmx_skymask()
suppskymask = default_cmx_suppskymask()
safemask = default_cmx_safemask()
excludemask = default_cmx_excludemask()
elif survey == "sv1":
sciencemask = default_sv1_sciencemask()
stdmask = default_sv1_stdmask()
skymask = default_sv1_skymask()
suppskymask = default_sv1_suppskymask()
safemask = default_sv1_safemask()
excludemask = default_sv1_excludemask()
# AR duplicating for sv2...
elif survey == "sv2":
sciencemask = default_sv2_sciencemask()
stdmask = default_sv2_stdmask()
skymask = default_sv2_skymask()
suppskymask = default_sv2_suppskymask()
safemask = default_sv2_safemask()
excludemask = default_sv2_excludemask()
# AR duplicating for sv3...
elif survey == "sv3":
sciencemask = default_sv3_sciencemask()
stdmask = default_sv3_stdmask()
skymask = default_sv3_skymask()
suppskymask = default_sv3_suppskymask()
safemask = default_sv3_safemask()
excludemask = default_sv3_excludemask()
return (sciencemask, stdmask, skymask, suppskymask, safemask, excludemask)
def default_target_masks(data):
"""Return the column name and default mask values for the data table.
This identifies the type of target data and returns the defaults for
the program type.
Args:
data (Table): A Table or recarray.
Returns:
(tuple): The survey, column name, science mask, standard mask,
sky mask, suppsky mask, safe mask, and exclude mask for the data.
"""
col = None
filecols, filemasks, filesurvey = main_cmx_or_sv(data)
if filesurvey == "main":
col = "DESI_TARGET"
elif filesurvey == "cmx":
col = filecols[0]
elif filesurvey == "sv1":
col = "SV1_DESI_TARGET"
elif filesurvey == "sv2":
col = "SV2_DESI_TARGET"
elif filesurvey == "sv3":
col = "SV3_DESI_TARGET"
sciencemask, stdmask, skymask, suppskymask, safemask, excludemask = \
default_survey_target_masks(filesurvey)
return (filesurvey, col, sciencemask, stdmask, skymask, suppskymask,
safemask, excludemask)
def append_target_table(tgs, tagalong, tgdata, survey, typeforce, typecol,
sciencemask,
stdmask, skymask, suppskymask, safemask, excludemask):
"""Append a target recarray / table to a Targets object.
This function is used to take a slice of targets table (as read from a
file) and extract the columns containing properties which are stored
internally in a Targets object. These targets and their properties are
added to the Targets object.
Args:
tgs (Targets): The targets object to modify.
tgdata (Table): The table or recarray containing the input data.
survey (str): The survey type.
typeforce (int): If not None, all targets are considered to be this
type.
typecol (str): The name of the column to use for bitmask operations.
sciencemask (int): Integer value to bitwise-and when checking for
science targets.
stdmask (int): Integer value to bitwise-and when checking for
standards targets.
skymask (int): Integer value to bitwise-and when checking for
sky targets.
suppskymask (int): Integer value to bitwise-and when checking for
suppsky targets.
safemask (int): Integer value to bitwise-and when checking for
safe targets.
excludemask (int): Integer value to bitwise-and when checking for
targets to exclude.
Returns:
None
"""
validtypes = [
TARGET_TYPE_SCIENCE,
TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD,
TARGET_TYPE_SAFE
]
if typeforce is not None:
if typeforce not in validtypes:
raise RuntimeError("Cannot force objects to be an invalid type")
# Create buffers for column data
nrows = len(tgdata["TARGETID"][:])
# Arrays needed by C++
d_targetid = np.zeros(nrows, dtype=np.int64)
d_type = np.zeros(nrows, dtype=np.uint8)
d_nobs = np.zeros(nrows, dtype=np.int32)
d_prior = np.zeros(nrows, dtype=np.int32)
d_subprior = np.zeros(nrows, dtype=np.float64)
# Arrays that have special handling
d_ra = np.zeros(nrows, dtype=np.float64)
d_dec = np.zeros(nrows, dtype=np.float64)
d_bits = np.zeros(nrows, dtype=np.int64)
d_obscond = np.zeros(nrows, dtype=np.int32)
d_targetid[:] = tgdata["TARGETID"][:]
if "TARGET_RA" in tgdata.dtype.names:
d_ra[:] = tgdata["TARGET_RA"][:]
else:
d_ra[:] = tgdata["RA"][:]
if "TARGET_DEC" in tgdata.dtype.names:
d_dec[:] = tgdata["TARGET_DEC"][:]
else:
d_dec[:] = tgdata["DEC"][:]
if typeforce is not None:
d_type[:] = typeforce
# In this case we leave the targets bits at zero since we are
# forcibly assigning a type. In this case, the target bits cannot
# be used to determine anything about the object for QA, etc.
else:
if typecol == "FA_TYPE":
# We are using the pre-established target categories.
d_type[:] = tgdata["FA_TYPE"][:]
d_bits[:] = tgdata["FA_TARGET"][:]
else:
d_bits[:] = tgdata[typecol][:]
d_type[:] = desi_target_type(
tgdata[typecol], sciencemask, stdmask, skymask, suppskymask,
safemask, excludemask)
if "OBSCONDITIONS" in tgdata.dtype.fields:
d_obscond[:] = tgdata["OBSCONDITIONS"][:]
else:
# Set obs conditions mask to be all bits
d_obscond[:] = np.invert(np.zeros(nrows, dtype=np.int32))
if "NUMOBS_MORE" in tgdata.dtype.fields:
d_nobs[:] = tgdata["NUMOBS_MORE"][:]
elif "NUMOBS_INIT" in tgdata.dtype.fields:
d_nobs[:] = tgdata["NUMOBS_INIT"][:]
else:
d_nobs[:] = np.zeros(nrows, dtype=np.int32)
if "PRIORITY" in tgdata.dtype.fields:
d_prior[:] = tgdata["PRIORITY"][:]
elif "PRIORITY_INIT" in tgdata.dtype.fields:
d_prior[:] = tgdata["PRIORITY_INIT"][:]
else:
d_prior[:] = np.zeros(nrows, dtype=np.int32)
if "SUBPRIORITY" in tgdata.dtype.fields:
d_subprior[:] = tgdata["SUBPRIORITY"][:]
else:
d_subprior[:] = np.zeros(nrows, dtype=np.float64)
fake = {'TARGET_RA': d_ra,
'TARGET_DEC': d_dec,
'FA_TARGET': d_bits,
'OBSCOND': d_obscond}
if not 'PLATE_RA' in tgdata.dtype.fields:
print('Warning: no PLATE_RA, PLATE_DEC in target file; using RA,DEC or TARGET_RA,DEC')
fake.update({'PLATE_RA': d_ra,
'PLATE_DEC': d_dec,})
tagalong.add_data(d_targetid, tgdata, fake=fake)
# Append the data to our targets list. This will print a
# warning if there are duplicate target IDs.
tgs.append(survey, d_targetid, d_nobs, d_prior, d_subprior, d_type)
return
def load_target_table(tgs, tagalong, tgdata, survey=None, typeforce=None, typecol=None,
sciencemask=None, stdmask=None, skymask=None,
suppskymask=None, safemask=None, excludemask=None):
"""Append targets from a table.
Use the table data to append targets to the input Targets object.
A subset of the columns in the file will be stored in each Target added
to the Targets object. Each target is classified into one or more of the
4 types used internally in assignment (science, standard, sky, safe).
This classification is controlled by applying bitmasks to the specified
data column. Alternatively, all targets in the file can be forced to one
type.
Args:
tgs (Targets): The targets object on which to append this data.
tgdata (Table): A table or recarray with the target properties.
survey (str): The survey type. If None, query from columns.
typeforce (int): If specified, it must equal one of the TARGET_TYPE_*
values. All targets read from the file will be assigned this type.
typecol (str): Optional column to use for bitmask matching (default
uses the result of main_cmx_or_sv from desitarget).
sciencemask (int): Bitmask for classifying targets as science.
stdmask (int): Bitmask for classifying targets as a standard.
skymask (int): Bitmask for classifying targets as sky.
suppskymask (int): Bitmask for classifying targets as suppsky.
safemask (int): Bitmask for classifying targets as a safe location.
excludemask (int): Bitmask for excluding targets.
Returns:
None
"""
log = Logger.get()
if "TARGETID" not in tgdata.dtype.names:
msg = "TARGETID column is required"
log.error(msg)
raise RuntimeError(msg)
if tgdata.dtype["TARGETID"].char != "l":
msg = "TARGETID column should be int64"
log.error(msg)
raise RuntimeError(msg)
if "PRIORITY" in tgdata.dtype.names:
if tgdata.dtype["PRIORITY"].char not in ["i", "l"]:
msg = "PRIORITY column should be an integer type"
log.error(msg)
raise RuntimeError(msg)
if "SUBPRIORITY" not in tgdata.dtype.names:
msg = "SUBPRIORITY column is required"
log.error(msg)
raise RuntimeError(msg)
if tgdata.dtype["SUBPRIORITY"].char != "d":
msg = "SUBPRIORITY column should be float64"
log.error(msg)
raise RuntimeError(msg)
if "NUMOBS_MORE" in tgdata.dtype.names:
if tgdata.dtype["NUMOBS_MORE"].char not in ["i", "l"]:
msg = "NUMOBS_MORE column should be an integer type"
log.error(msg)
raise RuntimeError(msg)
if "NUMOBS_INIT" in tgdata.dtype.names:
if tgdata.dtype["NUMOBS_INIT"].char not in ["i", "l"]:
msg = "NUMOBS_INIT column should be an integer type"
log.error(msg)
raise RuntimeError(msg)
if "OBSCONDITIONS" not in tgdata.dtype.names:
msg = "OBSCONDITIONS column is required"
log.error(msg)
raise RuntimeError(msg)
if tgdata.dtype["OBSCONDITIONS"].char not in ["i", "l"]:
msg = "OBSCONDITIONS column should be an integer type"
log.error(msg)
raise RuntimeError(msg)
# Are we loading raw output? If so, we require the survey key to get
# the default masks.
fsurvey = None
fcol = None
fsciencemask = None
fstdmask = None
fskymask = None
fsuppskymask = None
fsafemask = None
fexcludemask = None
if typecol == "FA_TYPE":
if survey is None:
msg = "When loading raw fiberassign tables, the survey must be \
specified"
log.error(msg)
raise RuntimeError(msg)
fsciencemask, fstdmask, fskymask, fsuppskymask, fsafemask, \
fexcludemask = default_survey_target_masks(survey)
else:
fsurvey, fcol, fsciencemask, fstdmask, fskymask, fsuppskymask, \
fsafemask, fexcludemask = default_target_masks(tgdata)
if fcol is None:
# File could not be identified. In this case, the user must
# completely specify the bitmask and column to use.
if typeforce is None:
if (typecol is None) or (sciencemask is None) \
or (stdmask is None) or (skymask is None) \
or (suppskymask is None) or (safemask is None) \
or (excludemask is None):
msg = "Unknown survey type. To use this table, \
specify the column name and every bitmask."
log.error(msg)
raise RuntimeError(msg)
if survey is None:
survey = fsurvey
if typecol is None:
typecol = fcol
if sciencemask is None:
sciencemask = fsciencemask
if stdmask is None:
stdmask = fstdmask
if skymask is None:
skymask = fskymask
if suppskymask is None:
suppskymask = fsuppskymask
if safemask is None:
safemask = fsafemask
if excludemask is None:
excludemask = fexcludemask
log.debug("Target table using survey '{}', column {}:".format(
survey, typecol))
if survey == "main":
log.debug(" sciencemask {}".format(
"|".join(desi_mask.names(sciencemask))))
log.debug(" stdmask {}".format(
"|".join(desi_mask.names(stdmask))))
log.debug(" skymask {}".format(
"|".join(desi_mask.names(skymask))))
log.debug(" suppskymask {}".format(
"|".join(desi_mask.names(suppskymask))))
log.debug(" safemask {}".format(
"|".join(desi_mask.names(safemask))))
log.debug(" excludemask {}".format(
"|".join(desi_mask.names(excludemask))))
elif survey == "cmx":
log.debug(" sciencemask {}".format(
"|".join(cmx_mask.names(sciencemask))))
log.debug(" stdmask {}".format(
"|".join(cmx_mask.names(stdmask))))
log.debug(" skymask {}".format(
"|".join(cmx_mask.names(skymask))))
log.debug(" suppskymask {}".format(
"|".join(cmx_mask.names(suppskymask))))
log.debug(" safemask {}".format(
"|".join(cmx_mask.names(safemask))))
log.debug(" excludemask {}".format(
"|".join(cmx_mask.names(excludemask))))
elif survey == "sv1":
log.debug(" sciencemask {}".format(
"|".join(sv1_mask.names(sciencemask))))
log.debug(" stdmask {}".format(
"|".join(sv1_mask.names(stdmask))))
log.debug(" skymask {}".format(
"|".join(sv1_mask.names(skymask))))
log.debug(" suppskymask {}".format(
"|".join(sv1_mask.names(suppskymask))))
log.debug(" safemask {}".format(
"|".join(sv1_mask.names(safemask))))
log.debug(" excludemask {}".format(
"|".join(sv1_mask.names(excludemask))))
# AR adding sv2...
elif survey == "sv2":
log.debug(" sciencemask {}".format(
"|".join(sv2_mask.names(sciencemask))))
log.debug(" stdmask {}".format(
"|".join(sv2_mask.names(stdmask))))
log.debug(" skymask {}".format(
"|".join(sv2_mask.names(skymask))))
log.debug(" suppskymask {}".format(
"|".join(sv2_mask.names(suppskymask))))
log.debug(" safemask {}".format(
"|".join(sv2_mask.names(safemask))))
log.debug(" excludemask {}".format(
"|".join(sv2_mask.names(excludemask))))
# AR adding sv3...
elif survey == "sv3":
log.debug(" sciencemask {}".format(
"|".join(sv3_mask.names(sciencemask))))
log.debug(" stdmask {}".format(
"|".join(sv3_mask.names(stdmask))))
log.debug(" skymask {}".format(
"|".join(sv3_mask.names(skymask))))
log.debug(" suppskymask {}".format(
"|".join(sv3_mask.names(suppskymask))))
log.debug(" safemask {}".format(
"|".join(sv3_mask.names(safemask))))
log.debug(" excludemask {}".format(
"|".join(sv3_mask.names(excludemask))))
else:
raise RuntimeError("unknown survey type, should never get here!")
append_target_table(tgs, tagalong, tgdata, survey, typeforce, typecol, sciencemask,
stdmask, skymask, suppskymask, safemask, excludemask)
return
def load_target_file(tgs, tagalong, tfile, survey=None, typeforce=None, typecol=None,
sciencemask=None, stdmask=None, skymask=None,
suppskymask=None, safemask=None, excludemask=None,
rowbuffer=1000000):
"""Append targets from a file.
Read the specified file and append targets to the input Targets object.
A subset of the columns in the file will be stored in each Target added
to the Targets object. Each target is classified into one or more of the
4 types used internally in assignment (science, standard, sky, safe).
This classification is controlled by applying bitmasks to the specified
data column. Alternatively, all targets in the file can be forced to one
type.
Args:
tgs (Targets): The targets object on which to append this data.
tfile (str): The path to the target catalog.
survey (str): The survey type. If None, query from columns and
the FITS header.
typeforce (int): If specified, it must equal one of the TARGET_TYPE_*
values. All targets read from the file will be assigned this type.
typecol (str): Optional column to use for bitmask matching (default
uses the result of main_cmx_or_sv from desitarget).
sciencemask (int): Bitmask for classifying targets as science.
stdmask (int): Bitmask for classifying targets as a standard.
skymask (int): Bitmask for classifying targets as sky.
suppskymask (int): Bitmask for classifying targets as suppsky.
safemask (int): Bitmask for classifying targets as a safe location.
excludemask (int): Bitmask for excluding targets.
rowbuffer (int): Optional number of rows to read at once when loading
very large files.
Returns:
(str): The survey type.
"""
tm = Timer()
tm.start()
log = Logger.get()
# Open file
fits = fitsio.FITS(tfile, mode="r")
# Total number of rows
nrows = fits[1].get_nrows()
log.info("Target file {} has {} rows. Reading in chunks of {}"
.format(tfile, nrows, rowbuffer))
header = fits[1].read_header()
if survey is None:
if "FA_SURV" in header:
survey = str(header["FA_SURV"]).rstrip()
offset = 0
n = rowbuffer
while offset < nrows:
if offset + n > nrows:
n = nrows - offset
data = fits[1].read(rows=np.arange(offset, offset+n, dtype=np.int64))
log.debug("Target file {} read rows {} - {}"
.format(tfile, offset, offset+n-1))
load_target_table(tgs, tagalong, data, survey=survey,
typeforce=typeforce,
typecol=typecol,
sciencemask=sciencemask,
stdmask=stdmask,
skymask=skymask,
suppskymask=suppskymask,
safemask=safemask,
excludemask=excludemask)
offset += n
tm.stop()
tm.report("Read target file {}".format(tfile))
return survey
def targets_in_tiles(hw, tgs, tiles, tagalong):
'''
Returns tile_targetids, tile_x, tile_y,
which are maps from tileid to numpy arrays.
'''
tile_targetids = {}
tile_x = {}
tile_y = {}
tile_xy_cs5 = {}
target_ids = tgs.ids()
target_ra, target_dec, target_obscond = tagalong.get_for_ids(
target_ids, ['RA', 'DEC', 'OBSCOND'])
kd = _radec2kd(target_ra, target_dec)
for (tile_id, tile_ra, tile_dec, tile_obscond, tile_ha, tile_obstheta,
tile_obstime) in zip(
tiles.id, tiles.ra, tiles.dec, tiles.obscond, tiles.obshourang,
tiles.obstheta, tiles.obstime):
print('Tile', tile_id, 'at RA,Dec', tile_ra, tile_dec, 'obscond:', tile_obscond, 'HA', tile_ha, 'obstime', tile_obstime)
inds = _kd_query_radec(kd, tile_ra, tile_dec, hw.focalplane_radius_deg)
match = np.flatnonzero(target_obscond[inds] & tile_obscond)
inds = inds[match]
del match
ras = target_ra [inds]
decs = target_dec[inds]
tids = target_ids[inds]
del inds
print('Found', len(tids), 'targets near tile and matching obscond')
x, y = radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta,
tile_ha, ras, decs, True)
# Save CS5 mapping
tile_xy_cs5[tile_id] = dict((tid,(xi,yi)) for tid,xi,yi in zip(tids, x, y))
x, y = cs52xy(x, y)
tile_targetids[tile_id] = tids
tile_x[tile_id] = x
tile_y[tile_id] = y
return tile_targetids, tile_x, tile_y, tile_xy_cs5
def _radec2kd(ra, dec):
"""
Creates a scipy KDTree from the given *ra*, *dec* arrays (in deg).
"""
from scipy.spatial import KDTree
xyz = _radec2xyz(ra, dec)
return KDTree(xyz)
def _radec2xyz(ra, dec):
"""
Converts arrays from *ra*, *dec* (in deg) to XYZ unit-sphere
coordinates.
"""
rr = np.deg2rad(ra)
dd = np.deg2rad(dec)
return np.vstack((np.cos(rr) * np.cos(dd),
np.sin(rr) * np.cos(dd),
np.sin(dd))).T
def _kd_query_radec(kd, ra, dec, radius_deg):
searchrad = np.deg2rad(radius_deg)
# Convert from radius to (tangent) distance on the unit sphere.
searchrad = np.sqrt(2. * (1. - np.cos(searchrad)))
xyz = _radec2xyz([ra], [dec])
inds = kd.query_ball_point(xyz[0, :], searchrad)
inds = np.array(inds)
return inds
| [
"numpy.isscalar",
"desitarget.targets.main_cmx_or_sv",
"desitarget.targetmask.desi_mask.names",
"scipy.spatial.KDTree",
"fitsio.FITS",
"numpy.asarray",
"numpy.flatnonzero",
"desitarget.sv3.sv3_targetmask.desi_mask.names",
"desitarget.cmx.cmx_targetmask.cmx_mask.names",
"numpy.array",
"numpy.zero... | [((19268, 19292), 'numpy.isscalar', 'np.isscalar', (['desi_target'], {}), '(desi_target)\n', (19279, 19292), True, 'import numpy as np\n'), ((22804, 22824), 'desitarget.targets.main_cmx_or_sv', 'main_cmx_or_sv', (['data'], {}), '(data)\n', (22818, 22824), False, 'from desitarget.targets import main_cmx_or_sv\n'), ((25291, 25322), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int64'}), '(nrows, dtype=np.int64)\n', (25299, 25322), True, 'import numpy as np\n'), ((25336, 25367), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.uint8'}), '(nrows, dtype=np.uint8)\n', (25344, 25367), True, 'import numpy as np\n'), ((25381, 25412), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int32'}), '(nrows, dtype=np.int32)\n', (25389, 25412), True, 'import numpy as np\n'), ((25427, 25458), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int32'}), '(nrows, dtype=np.int32)\n', (25435, 25458), True, 'import numpy as np\n'), ((25476, 25509), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.float64'}), '(nrows, dtype=np.float64)\n', (25484, 25509), True, 'import numpy as np\n'), ((25561, 25594), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.float64'}), '(nrows, dtype=np.float64)\n', (25569, 25594), True, 'import numpy as np\n'), ((25607, 25640), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.float64'}), '(nrows, dtype=np.float64)\n', (25615, 25640), True, 'import numpy as np\n'), ((25654, 25685), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int64'}), '(nrows, dtype=np.int64)\n', (25662, 25685), True, 'import numpy as np\n'), ((25702, 25733), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int32'}), '(nrows, dtype=np.int32)\n', (25710, 25733), True, 'import numpy as np\n'), ((38926, 38954), 'fitsio.FITS', 'fitsio.FITS', (['tfile'], {'mode': '"""r"""'}), "(tfile, mode='r')\n", (38937, 38954), False, 'import fitsio\n'), ((41888, 41899), 'scipy.spatial.KDTree', 'KDTree', (['xyz'], {}), '(xyz)\n', (41894, 41899), False, 'from scipy.spatial import KDTree\n'), ((42034, 42048), 'numpy.deg2rad', 'np.deg2rad', (['ra'], {}), '(ra)\n', (42044, 42048), True, 'import numpy as np\n'), ((42058, 42073), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (42068, 42073), True, 'import numpy as np\n'), ((42268, 42290), 'numpy.deg2rad', 'np.deg2rad', (['radius_deg'], {}), '(radius_deg)\n', (42278, 42290), True, 'import numpy as np\n'), ((42512, 42526), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (42520, 42526), True, 'import numpy as np\n'), ((7856, 7873), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', ([], {}), '()\n', (7871, 7873), False, 'from desitarget.targetmask import desi_mask\n'), ((13074, 13091), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', ([], {}), '()\n', (13089, 13091), False, 'from desitarget.targetmask import desi_mask\n'), ((19812, 19835), 'numpy.asarray', 'np.asarray', (['desi_target'], {}), '(desi_target)\n', (19822, 19835), True, 'import numpy as np\n'), ((27571, 27604), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.float64'}), '(nrows, dtype=np.float64)\n', (27579, 27604), True, 'import numpy as np\n'), ((40993, 41044), 'numpy.flatnonzero', 'np.flatnonzero', (['(target_obscond[inds] & tile_obscond)'], {}), '(target_obscond[inds] & tile_obscond)\n', (41007, 41044), True, 'import numpy as np\n'), ((4883, 4911), 'numpy.flatnonzero', 'np.flatnonzero', (['(outinds >= 0)'], {}), '(outinds >= 0)\n', (4897, 4911), True, 'import numpy as np\n'), ((6065, 6093), 'numpy.flatnonzero', 'np.flatnonzero', (['(outinds >= 0)'], {}), '(outinds >= 0)\n', (6079, 6093), True, 'import numpy as np\n'), ((26918, 26949), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int32'}), '(nrows, dtype=np.int32)\n', (26926, 26949), True, 'import numpy as np\n'), ((27164, 27195), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int32'}), '(nrows, dtype=np.int32)\n', (27172, 27195), True, 'import numpy as np\n'), ((27410, 27441), 'numpy.zeros', 'np.zeros', (['nrows'], {'dtype': 'np.int32'}), '(nrows, dtype=np.int32)\n', (27418, 27441), True, 'import numpy as np\n'), ((39429, 39474), 'numpy.arange', 'np.arange', (['offset', '(offset + n)'], {'dtype': 'np.int64'}), '(offset, offset + n, dtype=np.int64)\n', (39438, 39474), True, 'import numpy as np\n'), ((42190, 42200), 'numpy.sin', 'np.sin', (['dd'], {}), '(dd)\n', (42196, 42200), True, 'import numpy as np\n'), ((42394, 42411), 'numpy.cos', 'np.cos', (['searchrad'], {}), '(searchrad)\n', (42400, 42411), True, 'import numpy as np\n'), ((33625, 33653), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', (['sciencemask'], {}), '(sciencemask)\n', (33640, 33653), False, 'from desitarget.targetmask import desi_mask\n'), ((33723, 33747), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', (['stdmask'], {}), '(stdmask)\n', (33738, 33747), False, 'from desitarget.targetmask import desi_mask\n'), ((33817, 33841), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', (['skymask'], {}), '(skymask)\n', (33832, 33841), False, 'from desitarget.targetmask import desi_mask\n'), ((33915, 33943), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', (['suppskymask'], {}), '(suppskymask)\n', (33930, 33943), False, 'from desitarget.targetmask import desi_mask\n'), ((34013, 34038), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', (['safemask'], {}), '(safemask)\n', (34028, 34038), False, 'from desitarget.targetmask import desi_mask\n'), ((34108, 34136), 'desitarget.targetmask.desi_mask.names', 'desi_mask.names', (['excludemask'], {}), '(excludemask)\n', (34123, 34136), False, 'from desitarget.targetmask import desi_mask\n'), ((42096, 42106), 'numpy.cos', 'np.cos', (['rr'], {}), '(rr)\n', (42102, 42106), True, 'import numpy as np\n'), ((42109, 42119), 'numpy.cos', 'np.cos', (['dd'], {}), '(dd)\n', (42115, 42119), True, 'import numpy as np\n'), ((42143, 42153), 'numpy.sin', 'np.sin', (['rr'], {}), '(rr)\n', (42149, 42153), True, 'import numpy as np\n'), ((42156, 42166), 'numpy.cos', 'np.cos', (['dd'], {}), '(dd)\n', (42162, 42166), True, 'import numpy as np\n'), ((34232, 34259), 'desitarget.cmx.cmx_targetmask.cmx_mask.names', 'cmx_mask.names', (['sciencemask'], {}), '(sciencemask)\n', (34246, 34259), False, 'from desitarget.cmx.cmx_targetmask import cmx_mask\n'), ((34329, 34352), 'desitarget.cmx.cmx_targetmask.cmx_mask.names', 'cmx_mask.names', (['stdmask'], {}), '(stdmask)\n', (34343, 34352), False, 'from desitarget.cmx.cmx_targetmask import cmx_mask\n'), ((34422, 34445), 'desitarget.cmx.cmx_targetmask.cmx_mask.names', 'cmx_mask.names', (['skymask'], {}), '(skymask)\n', (34436, 34445), False, 'from desitarget.cmx.cmx_targetmask import cmx_mask\n'), ((34519, 34546), 'desitarget.cmx.cmx_targetmask.cmx_mask.names', 'cmx_mask.names', (['suppskymask'], {}), '(suppskymask)\n', (34533, 34546), False, 'from desitarget.cmx.cmx_targetmask import cmx_mask\n'), ((34616, 34640), 'desitarget.cmx.cmx_targetmask.cmx_mask.names', 'cmx_mask.names', (['safemask'], {}), '(safemask)\n', (34630, 34640), False, 'from desitarget.cmx.cmx_targetmask import cmx_mask\n'), ((34710, 34737), 'desitarget.cmx.cmx_targetmask.cmx_mask.names', 'cmx_mask.names', (['excludemask'], {}), '(excludemask)\n', (34724, 34737), False, 'from desitarget.cmx.cmx_targetmask import cmx_mask\n'), ((34833, 34860), 'desitarget.sv1.sv1_targetmask.desi_mask.names', 'sv1_mask.names', (['sciencemask'], {}), '(sciencemask)\n', (34847, 34860), True, 'from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask\n'), ((34930, 34953), 'desitarget.sv1.sv1_targetmask.desi_mask.names', 'sv1_mask.names', (['stdmask'], {}), '(stdmask)\n', (34944, 34953), True, 'from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask\n'), ((35023, 35046), 'desitarget.sv1.sv1_targetmask.desi_mask.names', 'sv1_mask.names', (['skymask'], {}), '(skymask)\n', (35037, 35046), True, 'from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask\n'), ((35120, 35147), 'desitarget.sv1.sv1_targetmask.desi_mask.names', 'sv1_mask.names', (['suppskymask'], {}), '(suppskymask)\n', (35134, 35147), True, 'from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask\n'), ((35217, 35241), 'desitarget.sv1.sv1_targetmask.desi_mask.names', 'sv1_mask.names', (['safemask'], {}), '(safemask)\n', (35231, 35241), True, 'from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask\n'), ((35311, 35338), 'desitarget.sv1.sv1_targetmask.desi_mask.names', 'sv1_mask.names', (['excludemask'], {}), '(excludemask)\n', (35325, 35338), True, 'from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask\n'), ((35457, 35484), 'desitarget.sv2.sv2_targetmask.desi_mask.names', 'sv2_mask.names', (['sciencemask'], {}), '(sciencemask)\n', (35471, 35484), True, 'from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask\n'), ((35554, 35577), 'desitarget.sv2.sv2_targetmask.desi_mask.names', 'sv2_mask.names', (['stdmask'], {}), '(stdmask)\n', (35568, 35577), True, 'from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask\n'), ((35647, 35670), 'desitarget.sv2.sv2_targetmask.desi_mask.names', 'sv2_mask.names', (['skymask'], {}), '(skymask)\n', (35661, 35670), True, 'from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask\n'), ((35744, 35771), 'desitarget.sv2.sv2_targetmask.desi_mask.names', 'sv2_mask.names', (['suppskymask'], {}), '(suppskymask)\n', (35758, 35771), True, 'from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask\n'), ((35841, 35865), 'desitarget.sv2.sv2_targetmask.desi_mask.names', 'sv2_mask.names', (['safemask'], {}), '(safemask)\n', (35855, 35865), True, 'from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask\n'), ((35935, 35962), 'desitarget.sv2.sv2_targetmask.desi_mask.names', 'sv2_mask.names', (['excludemask'], {}), '(excludemask)\n', (35949, 35962), True, 'from desitarget.sv2.sv2_targetmask import desi_mask as sv2_mask\n'), ((36081, 36108), 'desitarget.sv3.sv3_targetmask.desi_mask.names', 'sv3_mask.names', (['sciencemask'], {}), '(sciencemask)\n', (36095, 36108), True, 'from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask\n'), ((36178, 36201), 'desitarget.sv3.sv3_targetmask.desi_mask.names', 'sv3_mask.names', (['stdmask'], {}), '(stdmask)\n', (36192, 36201), True, 'from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask\n'), ((36271, 36294), 'desitarget.sv3.sv3_targetmask.desi_mask.names', 'sv3_mask.names', (['skymask'], {}), '(skymask)\n', (36285, 36294), True, 'from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask\n'), ((36478, 36505), 'desitarget.sv3.sv3_targetmask.desi_mask.names', 'sv3_mask.names', (['suppskymask'], {}), '(suppskymask)\n', (36492, 36505), True, 'from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask\n'), ((36575, 36599), 'desitarget.sv3.sv3_targetmask.desi_mask.names', 'sv3_mask.names', (['safemask'], {}), '(safemask)\n', (36589, 36599), True, 'from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask\n'), ((36669, 36696), 'desitarget.sv3.sv3_targetmask.desi_mask.names', 'sv3_mask.names', (['excludemask'], {}), '(excludemask)\n', (36683, 36696), True, 'from desitarget.sv3.sv3_targetmask import desi_mask as sv3_mask\n')] |
# coding: utf-8
# # Extract NECOFS data using NetCDF4-Python and analyze/visualize with Pandas
# In[1]:
# Plot forecast water levels from NECOFS model from list of lon,lat locations
# (uses the nearest point, no interpolation)
import netCDF4
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from StringIO import StringIO
get_ipython().magic(u'matplotlib inline')
# In[2]:
#model='NECOFS Massbay'
#url='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST.nc'
# GOM3 Grid
#model='NECOFS GOM3'
#url='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_GOM3_FORECAST.nc'
model = 'NECOFS GOM3 Wave'
# forecast
#url = 'http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_WAVE_FORECAST.nc'
# archive
url = 'http://www.smast.umassd.edu:8080/thredds/dodsC/fvcom/archives/necofs_gom3_wave'
# In[3]:
# Desired time for snapshot
# ....right now (or some number of hours from now) ...
start = dt.datetime.utcnow() + dt.timedelta(hours=-72)
stop = dt.datetime.utcnow() + dt.timedelta(hours=+72)
# ... or specific time (UTC)
start = dt.datetime(1991,1,1,0,0,0) + dt.timedelta(hours=+0)
start = dt.datetime(1992,7,1,0,0,0) + dt.timedelta(hours=+0)
start = dt.datetime(1992,8,1,0,0,0) + dt.timedelta(hours=+0)
start = dt.datetime(2016,1,1,0,0,0) + dt.timedelta(hours=+0)
stop = dt.datetime(2016,6,1,0,0,0) + dt.timedelta(hours=+0)
# In[4]:
def dms2dd(d,m,s):
return d+(m+s/60.)/60.
# In[5]:
dms2dd(41,33,15.7)
# In[6]:
-dms2dd(70,30,20.2)
# In[7]:
x = '''
Station, Lat, Lon
Falmouth Harbor, 41.541575, -70.608020
Sage Lot Pond, 41.554361, -70.505611
'''
# In[8]:
x = '''
Station, Lat, Lon
Boston, 42.368186, -71.047984
Carolyn Seep Spot, 39.8083, -69.5917
Falmouth Harbor, 41.541575, -70.608020
'''
# In[9]:
# Enter desired (Station, Lat, Lon) values here:
x = '''
Station, Lat, Lon
Boston, 42.368186, -71.047984
Scituate Harbor, 42.199447, -70.720090
Scituate Beach, 42.209973, -70.724523
Falmouth Harbor, 41.541575, -70.608020
Marion, 41.689008, -70.746576
Marshfield, 42.108480, -70.648691
Provincetown, 42.042745, -70.171180
Sandwich, 41.767990, -70.466219
Hampton Bay, 42.900103, -70.818510
Gloucester, 42.610253, -70.660570
'''
# In[10]:
# Create a Pandas DataFrame
obs=pd.read_csv(StringIO(x.strip()), sep=",\s*",index_col='Station')
# In[11]:
obs
# In[12]:
# find the indices of the points in (x,y) closest to the points in (xi,yi)
def nearxy(x,y,xi,yi):
ind = np.ones(len(xi),dtype=int)
for i in np.arange(len(xi)):
dist = np.sqrt((x-xi[i])**2+(y-yi[i])**2)
ind[i] = dist.argmin()
return ind
# In[13]:
# open NECOFS remote OPeNDAP dataset
nc=netCDF4.Dataset(url).variables
# In[14]:
# find closest NECOFS nodes to station locations
obs['0-Based Index'] = nearxy(nc['lon'][:],nc['lat'][:],obs['Lon'],obs['Lat'])
obs
# In[15]:
# Get desired time step
time_var = nc['time']
istart = netCDF4.date2index(start,time_var,select='nearest')
istop = netCDF4.date2index(stop,time_var,select='nearest')
# In[16]:
# get time values and convert to datetime objects
jd = netCDF4.num2date(time_var[istart:istop],time_var.units)
# In[17]:
# get all time steps of water level from each station
nsta = len(obs)
z = np.ones((len(jd),nsta))
for i in range(nsta):
z[:,i] = nc['hs'][istart:istop,obs['0-Based Index'][i]]
# In[18]:
# make a DataFrame out of the interpolated time series at each location
zvals=pd.DataFrame(z,index=jd,columns=obs.index)
# In[19]:
# list out a few values
zvals.head()
# In[20]:
# model blew up producing very high waves on Jan 21, 2016
# eliminate unrealistically high values
mask = zvals>10.
zvals[mask] = np.NaN
# In[21]:
# plotting at DataFrame is easy!
ax=zvals.plot(figsize=(16,4),grid=True,title=('Wave Height from %s Forecast' % model),legend=False);
# read units from dataset for ylabel
plt.ylabel(nc['hs'].units)
# plotting the legend outside the axis is a bit tricky
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5));
# In[22]:
# what is the maximum over the whole record at a specific location
zvals['Boston'].max()
# In[23]:
# make a new DataFrame of maximum water levels at all stations
b=pd.DataFrame(zvals.idxmax(),columns=['time of max value (UTC)'])
# create heading for new column containing max water level
zmax_heading='zmax (%s)' % nc['hs'].units
# Add new column to DataFrame
b[zmax_heading]=zvals.max()
# In[24]:
b
# In[ ]:
# In[ ]:
# In[ ]:
| [
"datetime.datetime",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"datetime.datetime.utcnow",
"netCDF4.date2index",
"netCDF4.num2date",
"netCDF4.Dataset",
"pandas.DataFrame",
"datetime.timedelta"
] | [((3103, 3156), 'netCDF4.date2index', 'netCDF4.date2index', (['start', 'time_var'], {'select': '"""nearest"""'}), "(start, time_var, select='nearest')\n", (3121, 3156), False, 'import netCDF4\n'), ((3163, 3215), 'netCDF4.date2index', 'netCDF4.date2index', (['stop', 'time_var'], {'select': '"""nearest"""'}), "(stop, time_var, select='nearest')\n", (3181, 3215), False, 'import netCDF4\n'), ((3282, 3338), 'netCDF4.num2date', 'netCDF4.num2date', (['time_var[istart:istop]', 'time_var.units'], {}), '(time_var[istart:istop], time_var.units)\n', (3298, 3338), False, 'import netCDF4\n'), ((3627, 3671), 'pandas.DataFrame', 'pd.DataFrame', (['z'], {'index': 'jd', 'columns': 'obs.index'}), '(z, index=jd, columns=obs.index)\n', (3639, 3671), True, 'import pandas as pd\n'), ((4053, 4079), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["nc['hs'].units"], {}), "(nc['hs'].units)\n", (4063, 4079), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1062), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (1060, 1062), True, 'import datetime as dt\n'), ((1065, 1088), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(-72)'}), '(hours=-72)\n', (1077, 1088), True, 'import datetime as dt\n'), ((1096, 1116), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (1114, 1116), True, 'import datetime as dt\n'), ((1119, 1142), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(+72)'}), '(hours=+72)\n', (1131, 1142), True, 'import datetime as dt\n'), ((1181, 1213), 'datetime.datetime', 'dt.datetime', (['(1991)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1991, 1, 1, 0, 0, 0)\n', (1192, 1213), True, 'import datetime as dt\n'), ((1211, 1233), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(+0)'}), '(hours=+0)\n', (1223, 1233), True, 'import datetime as dt\n'), ((1242, 1274), 'datetime.datetime', 'dt.datetime', (['(1992)', '(7)', '(1)', '(0)', '(0)', '(0)'], {}), '(1992, 7, 1, 0, 0, 0)\n', (1253, 1274), True, 'import datetime as dt\n'), ((1272, 1294), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(+0)'}), '(hours=+0)\n', (1284, 1294), True, 'import datetime as dt\n'), ((1303, 1335), 'datetime.datetime', 'dt.datetime', (['(1992)', '(8)', '(1)', '(0)', '(0)', '(0)'], {}), '(1992, 8, 1, 0, 0, 0)\n', (1314, 1335), True, 'import datetime as dt\n'), ((1333, 1355), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(+0)'}), '(hours=+0)\n', (1345, 1355), True, 'import datetime as dt\n'), ((1364, 1396), 'datetime.datetime', 'dt.datetime', (['(2016)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2016, 1, 1, 0, 0, 0)\n', (1375, 1396), True, 'import datetime as dt\n'), ((1394, 1416), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(+0)'}), '(hours=+0)\n', (1406, 1416), True, 'import datetime as dt\n'), ((1425, 1457), 'datetime.datetime', 'dt.datetime', (['(2016)', '(6)', '(1)', '(0)', '(0)', '(0)'], {}), '(2016, 6, 1, 0, 0, 0)\n', (1436, 1457), True, 'import datetime as dt\n'), ((1455, 1477), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(+0)'}), '(hours=+0)\n', (1467, 1477), True, 'import datetime as dt\n'), ((2857, 2877), 'netCDF4.Dataset', 'netCDF4.Dataset', (['url'], {}), '(url)\n', (2872, 2877), False, 'import netCDF4\n'), ((2722, 2766), 'numpy.sqrt', 'np.sqrt', (['((x - xi[i]) ** 2 + (y - yi[i]) ** 2)'], {}), '((x - xi[i]) ** 2 + (y - yi[i]) ** 2)\n', (2729, 2766), True, 'import numpy as np\n')] |
"""
Simple ICP localisation demo
Compute position of each scan using ICP
with respect to the previous one
author: <NAME>
"""
import readDatasets as datasets
import matplotlib.pyplot as plt
import icp
import numpy as np
import copy
# Reading data
#scanList = datasets.read_fr079(0)
scanList = datasets.read_u2is(0)
# Copy for reference display
odomScanList = copy.deepcopy(scanList)
# Parameters for scan processing
minScan = 1
step = 5
maxScan = len(scanList)-step
# Init displays
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True,figsize=(14, 7))
c = np.random.rand(3,)
ax1.scatter(odomScanList[minScan]["x"], odomScanList[minScan]["y"], color=c, s=1)
ax1.scatter(odomScanList[minScan]["pose"][0], odomScanList[minScan]["pose"][1], color=c, s=3)
ax1.axis([-5.5, 12.5, -12.5, 6.5])
ax1.set_title('Pose from raw odometry')
ax2.scatter(scanList[minScan]["x"], scanList[minScan]["y"], color=c, s=1)
ax2.scatter(scanList[minScan]["pose"][0], scanList[minScan]["pose"][1], color=c, s=3)
ax2.axis([-5.5, 12.5, -12.5, 6.5])
ax2.set_title('Pose after ICP correction')
plt.pause(0.1)
for a in range(minScan, maxScan, step):
s1 = scanList[a]
s2 = scanList[a+step]
# perform ICP
R, t, error = icp.icp(s1, s2, 200, 1e-7,0.4,0.85)
# correct future scans
for b in range((a+step), maxScan, step):
scanList[b] = datasets.transform_scan(scanList[b], R, t)
# Display
c = np.random.rand(3,)
ax1.scatter(odomScanList[a+step]["x"], odomScanList[a+step]["y"], color=c, s=1)
ax1.scatter(odomScanList[a+step]["pose"][0], odomScanList[a+step]["pose"][1], color=c, s=3)
ax2.scatter(scanList[a+step]["x"], scanList[a+step]["y"], color=c, s=1)
ax2.scatter(scanList[a+step]["pose"][0], scanList[a+step]["pose"][1], color=c, s=3)
plt.pause(0.1)
plt.savefig('ICPLocalization.png')
print("Press Q in figure to finish...")
plt.show()
| [
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"icp.icp",
"readDatasets.read_u2is",
"readDatasets.transform_scan",
"copy.deepcopy",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((298, 319), 'readDatasets.read_u2is', 'datasets.read_u2is', (['(0)'], {}), '(0)\n', (316, 319), True, 'import readDatasets as datasets\n'), ((365, 388), 'copy.deepcopy', 'copy.deepcopy', (['scanList'], {}), '(scanList)\n', (378, 388), False, 'import copy\n'), ((506, 554), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)', 'figsize': '(14, 7)'}), '(1, 2, sharey=True, figsize=(14, 7))\n', (518, 554), True, 'import matplotlib.pyplot as plt\n'), ((559, 576), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (573, 576), True, 'import numpy as np\n'), ((1067, 1081), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1076, 1081), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1821), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ICPLocalization.png"""'], {}), "('ICPLocalization.png')\n", (1798, 1821), True, 'import matplotlib.pyplot as plt\n'), ((1862, 1872), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1870, 1872), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1245), 'icp.icp', 'icp.icp', (['s1', 's2', '(200)', '(1e-07)', '(0.4)', '(0.85)'], {}), '(s1, s2, 200, 1e-07, 0.4, 0.85)\n', (1214, 1245), False, 'import icp\n'), ((1404, 1421), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (1418, 1421), True, 'import numpy as np\n'), ((1771, 1785), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1780, 1785), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1380), 'readDatasets.transform_scan', 'datasets.transform_scan', (['scanList[b]', 'R', 't'], {}), '(scanList[b], R, t)\n', (1361, 1380), True, 'import readDatasets as datasets\n')] |
from typing import Tuple, List
import numpy as np
from math import ceil
def create_node(coordinates:Tuple[int, float, float]) -> dict:
"""
Dado o valor do indice e das coordenadas no formtato (indice, x, y),
cria um dicionario com o valores das cidades. Em primeira instancia,
a capacidade de uma cidade eh igual a 0.
"""
node = {
'index': int(coordinates[0]) - 1,
'x': float(coordinates[1]),
'y': float(coordinates[2]),
'capacity': 0 # initialized as 0
}
return node
def get_route(value: int, routes: List[np.array]) -> int:
"""
Dado o valor da cidade e uma lista de rotas, pesquisa qual
eh o valor da rota que se encontra aquela cidade
"""
for i, route in enumerate(routes):
if value in route:
return i
return -1
def distance(node1: dict, node2: dict) -> float:
"""
Dado dois nos, calcula a distancia euclidiana
"""
return ((node1['x'] - node2['x'])**2 + (node1['y'] - node2['y'])**2)**(1/2)
def route_distance(dist_array: np.array, route: np.array, nodes: List[dict], capacity: int) -> Tuple[float, bool]:
"""
Calcula a distancia de uma rota e verifica se ela ultrapassa o limite da capacidade
(se eh "feasible").
"""
# Considera que a solucao nao ultrapassa o limite da capacidade
feasible_flag = True
# Pega distancia do depot para a primeria cidade da rota
aux = dist_array[0][route[0]]
for i in range(len(route) - 1):
if route[i] < route[i+1]:
aux += dist_array[route[i]][route[i+1]]
else:
aux += dist_array[route[i+1]][route[i]]
# Pega distancia da ultima cidade para o depot
aux += dist_array[0][route[-1]]
# Verifica a capacidade e se ultrapassa o limite adiciona uma penalidade
capacity_r = sum_route_capacity(route, nodes)
penalty = 1
if capacity_r > capacity:
feasible_flag = False
penalty += (capacity_r - capacity) / capacity
return aux * penalty, feasible_flag
def total_distance(dist_array: np.array, routes: List[np.array], nodes: np.array, capacity: int) -> Tuple[float, bool]:
"""
Calcula a distancia total de todas as rotas e verifica se a solucao tem algum valor que
ultrapassa o limite do veiculo.
"""
dist = 0
feasible_flag = True
for i in range(len(routes)):
aux, feasible_flag_r = route_distance(dist_array, routes[i], nodes, capacity)
feasible_flag = feasible_flag and feasible_flag_r
dist += aux
return dist, feasible_flag
def clients_distance(nodes: List[dict], clients: int) -> np.array:
"""
Cria matriz triangular tamanho clients x clients com a distancia entre
os clientes.
"""
distances = np.zeros((clients, clients),dtype=float)
for i in range(clients):
for j in range(i + 1, clients):
# distances[i][j] = round(distance(nodes[i], nodes[j]), 0)
distances[i][j] = distance(nodes[i], nodes[j])
return distances
def sum_route_capacity(route: np.array, nodes: List[dict]) -> float:
"""
Soma a capacidade da rota.
"""
return sum([nodes[int(city)]['capacity'] for city in route])
def show_routes(distances:np.array, sol: List[np.array], nodes:np.array, capacity:int, file_writer) -> None:
"""
Mostra rotas dado uma solucao e sua capacidade e distancia.s
"""
dist = 0
for i, route in enumerate(sol):
# Mostra rota
route_aux = route.flatten()
file_writer.write(f'Rota #{i+1} - ')
route_city = ""
for route_i in route_aux.astype(int):
route_city += f"{route_i} "
file_writer.write(f'{route_city}\n')
# Mostra capacidade
file_writer.write(f'Capacidade: {sum_route_capacity(route, nodes)}\n')
# Mostra distancia
dist, _ = route_distance(distances, route, nodes, capacity)
file_writer.write(f'Distancia da rota: {dist}\n')
file_writer.write('\n') | [
"numpy.zeros"
] | [((2773, 2814), 'numpy.zeros', 'np.zeros', (['(clients, clients)'], {'dtype': 'float'}), '((clients, clients), dtype=float)\n', (2781, 2814), True, 'import numpy as np\n')] |
"""Defines metrics used to evaluate uncertainty."""
import numpy as np
from scipy.stats import norm
from utils.util import to_one_hot
def gaussian_nll(y, mu, var):
"""Calculates the negative log likelihood of Gaussian distribution.
Args:
y: numpy array, shape [batch_size], the true labels.
mu: numpy array, shape [batch_size], the predicted means.
var: numpy array, shape [batch_size], the predicted variances.
Returns:
nll: float, the resulting negative log likelihood
"""
y, mu, var = y.squeeze(), mu.squeeze(), var.squeeze()
nll = -np.mean(norm.logpdf(y, loc=mu, scale=np.sqrt(var)))
return float(nll)
def rmse(y_pred, y):
"""Calculates the root mean squared error.
Args:
y_pred: numpy array, shape [batch_size], the predictions.
y: numpy array, shape [batch_size], the corresponding labels.
Returns:
rmse: float, the resulting root mean squared error.
"""
y_pred, y = y_pred.squeeze(), y.squeeze()
rmse = np.sqrt(np.mean((y - y_pred) ** 2))
return float(rmse)
def compute_regression_calibration(pred_mean, pred_var, target, num_bins=10):
"""Compute the regression calibration. Note that we assume that the
probabilistic forecase taking the form of Gaussian.
References:
[1] https://arxiv.org/abs/1807.00263
Args:
pred_mean: numpy array, shape [num_data, ], the predicted mean.
pred_var: numpy array, shape [num_data, ], the predicted variance.
target: numpy array, shape [num_data, ], the ground truths.
num_bins: number of bins.
Returns:
cal: a dictionary
{reliability_diag: realibility diagram
calibration_error: calibration error,
sharpness: sharpness
}
"""
# Make sure the inputs have valid shape
pred_mean = pred_mean.flatten()
pred_var = pred_var.flatten()
target = target.flatten()
# Compute the predicted CDF
predicted_cdf = norm.cdf(target, loc=pred_mean, scale=np.sqrt(pred_var))
# Compute the empirical CDF
# empirical_cdf = np.zeros(len(predicted_cdf))
# for i, p in enumerate(predicted_cdf):
# empirical_cdf[i] = np.mean(predicted_cdf <= p)
# Initialize the expected confidence levels according to the number of bins
expected_conf_levels = np.linspace(0, 1, num_bins+1)[1:]
# Compute the observed confidence levels, Eq (8) in [1].
observed_conf_levels = np.zeros_like(expected_conf_levels)
for i, p in enumerate(expected_conf_levels):
observed_conf_levels[i] = np.mean(predicted_cdf < p)
# Compute the calibration error, Eq (9) in [1].
calibration_error = float(np.sum((expected_conf_levels -
observed_conf_levels)**2))
# Compute the sharpness of the predictions, Eq (10) in [1].
sharpness = np.mean(pred_var)
# Repliability diagram
reliability_diag = {
"expected_conf_levels": expected_conf_levels,
"observed_conf_levels": observed_conf_levels
}
# Saving
cal = {
'reliability_diag': reliability_diag,
'calibration_error': calibration_error,
'sharpness': sharpness
}
return cal
def filter_top_k(probabilities, labels, top_k):
"""Extract top k predicted probabilities and corresponding ground truths"""
labels_one_hot = np.zeros(probabilities.shape)
labels_one_hot[np.arange(probabilities.shape[0]), labels] = 1
if top_k is None:
return probabilities, labels_one_hot
negative_prob = -1. * probabilities
ind = np.argpartition(negative_prob, top_k-1, axis=-1)
top_k_ind = ind[:, :top_k]
rows = np.expand_dims(np.arange(probabilities.shape[0]), axis=1)
lowest_k_negative_probs = negative_prob[rows, top_k_ind]
output_probs = -1. * lowest_k_negative_probs
labels_one_hot_k = labels_one_hot[rows, top_k_ind]
return output_probs, labels_one_hot_k
def get_multiclass_predictions_and_correctness(probabilities, labels, top_k=1):
"""Returns predicted class, correctness boolean vector."""
if top_k == 1:
class_predictions = np.argmax(probabilities, -1)
top_k_probs = probabilities[np.arange(len(labels)), class_predictions]
is_correct = np.equal(class_predictions, labels)
else:
top_k_probs, is_correct = filter_top_k(probabilities, labels, top_k)
return top_k_probs, is_correct
def nll(probabilities, labels):
"""Computes the negative log-likelihood for classification problem
(cross-entropy).
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
Returns:
float: computed NLL.
"""
score = -np.log(probabilities)[range(labels.shape[0]), labels].mean()
return score
def brier_score(probabilities, labels):
"""Computes the Brier score.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
Returns:
float: computed Brier score.
"""
num_classes = probabilities.shape[1]
targets_one_hot = np.zeros_like(probabilities)
targets_one_hot[np.arange(labels.shape[0]), labels] = 1.
squared_diff = (targets_one_hot - probabilities) ** 2
score = np.mean(np.sum(squared_diff, axis=1) / num_classes)
return score
def accuracy(probabilities, labels):
"""Computes the top-1 accuracy of predictions.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
Returns:
float: Top-1 accuracy of predictions.
"""
return accuracy_top_k(probabilities, labels, 1)
def accuracy_top_k(probabilities, labels, top_k):
"""Computes the top-k accuracy of predictions.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
top_k: Integer. Number of highest-probability classes to consider.
Returns:
float: Top-k accuracy of predictions.
"""
_, ground_truth = filter_top_k(probabilities, labels, top_k)
return ground_truth.any(axis=-1).mean()
def bin_predictions_and_accuracies(probabilities, ground_truth, bins=10):
"""Computes histograms of probabilities into bins.
Args:
probabilities: A numpy vector of N probabilities assigned to
each prediction
ground_truth: A numpy vector of N ground truth labels in
{0,1, True, False}
bins: Number of equal width bins to bin predictions into in [0, 1],
or an array representing bin edges.
Returns:
bin_edges: Numpy vector of floats containing the edges of the bins
(including leftmost and rightmost).
accuracies: Numpy vector of floats for the average accuracy of the
predictions in each bin.
counts: Numpy vector of ints containing the number of examples per bin.
"""
if isinstance(bins, int):
num_bins = bins
else:
num_bins = bins.size - 1
probabilities = np.where(probabilities == 0, 1e-8, probabilities)
counts, bin_edges = np.histogram(probabilities, bins=bins, range=[0., 1.])
indices = np.digitize(probabilities, bin_edges, right=True)
accuracies = np.array([np.mean(ground_truth[indices == i])
for i in range(1, num_bins + 1)])
return bin_edges, accuracies, counts
def bin_centers_of_mass(probabilities, bin_edges):
probabilities = np.where(probabilities == 0, 1e-8, probabilities)
indices = np.digitize(probabilities, bin_edges, right=True)
return np.array([np.mean(probabilities[indices == i])
for i in range(1, len(bin_edges))])
def ece(probabilities, ground_truth, bins=10):
"""Compute the expected calibration error of a set of preditions in [0, 1].
Args:
probabilities: A numpy vector of N probabilities assigned
to each prediction
ground_truth: A numpy vector of N ground truth labels in
{0,1, True, False}
bins: Number of equal width bins to bin predictions into in [0, 1], or
an array representing bin edges.
Returns:
float: the expected calibration error.
"""
bin_edges, accuracies, counts = bin_predictions_and_accuracies(
probabilities, ground_truth, bins)
bin_centers = bin_centers_of_mass(probabilities, bin_edges)
num_examples = np.sum(counts)
ece = np.sum([(counts[i] / float(num_examples)) * np.sum(
np.abs(bin_centers[i] - accuracies[i]))
for i in range(bin_centers.size) if counts[i] > 0])
return ece
def ece_multiclass(probabilities, labels, bins=10,
top_k=1):
"""Computes expected calibration error from Guo et al. 2017.
Args:
probabilities: Array of probabilities of shape
[num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
bins: Number of equal width bins to bin predictions into in [0, 1], or
an array representing bin edges.
top_k: Integer or None. If integer, use the top k predicted
probabilities in ECE calculation (can be informative for problems
with many classes and lower top-1 accuracy).
If None, use all classes.
Returns:
float: Expected calibration error.
"""
top_k_probs, is_correct = get_multiclass_predictions_and_correctness(
probabilities, labels, top_k)
top_k_probs = top_k_probs.flatten()
is_correct = is_correct.flatten()
return ece(top_k_probs, is_correct, bins)
def compute_accuracies_at_confidences(labels, probs, thresholds):
"""Compute accuracy of samples above each confidence threshold.
Args:
labels: Array of integer categorical labels.
probs: Array of categorical probabilities.
thresholds: Array of floating point probability thresholds in [0, 1).
Returns:
accuracies: Array of accuracies over examples with confidence > T for
each T in thresholds.
counts: Count of examples with confidence > T for each T in thresholds.
"""
assert probs.shape[:-1] == labels.shape
predict_class = probs.argmax(-1)
predict_confidence = probs.max(-1)
shape = (len(thresholds),) + probs.shape[:-2]
accuracies = np.zeros(shape)
counts = np.zeros(shape)
eq = np.equal(predict_class, labels)
for i, thresh in enumerate(thresholds):
mask = predict_confidence >= thresh
counts[i] = mask.sum(-1)
accuracies[i] = np.ma.masked_array(eq, mask=~mask).mean(-1)
return accuracies, counts
def compute_calibration(y, p_mean, num_bins=10):
"""Compute the calibration.
References:
https://arxiv.org/abs/1706.04599
https://arxiv.org/abs/1807.00263
Args:
y: numpy array, shape [num_classes], the true labels.
p_mean: numpy array, size (?, num_classes)
containing the mean output predicted probabilities
num_bins: number of bins
Returns:
cal: a dictionary
{reliability_diag: realibility diagram
ece: Expected Calibration Error
mce: Maximum Calibration Error
}
"""
# Compute for every test sample x, the predicted class.
class_pred = np.argmax(p_mean, axis=1)
# Convert labels to one-hot encoding
y = to_one_hot(y)
# Compute the confidence (probability) associated with it.
conf = np.max(p_mean, axis=1)
# Convert y from one-hot encoding to the number of the class
y = np.argmax(y, axis=1)
# Storage
acc_tab = np.zeros(num_bins) # empirical (true) confidence
mean_conf = np.zeros(num_bins) # predicted confidence
nb_items_bin = np.zeros(num_bins) # number of items in the bins
tau_tab = np.linspace(0, 1, num_bins+1) # confidence bins
for i in np.arange(num_bins): # iterate over the bins
# Select the items where the predicted max probability falls in the bin
sec = (tau_tab[i + 1] > conf) & (conf >= tau_tab[i])
nb_items_bin[i] = np.sum(sec) # Number of items in the bin
# Select the predicted classes, and the true classes
class_pred_sec, y_sec = class_pred[sec], y[sec]
# Average of the predicted max probabilities
mean_conf[i] = np.mean(conf[sec]) if nb_items_bin[i] > 0 else np.nan
# Compute the empirical confidence
acc_tab[i] = np.mean(
class_pred_sec == y_sec) if nb_items_bin[i] > 0 else np.nan
# Cleaning
mean_conf = mean_conf[nb_items_bin > 0]
acc_tab = acc_tab[nb_items_bin > 0]
nb_items_bin = nb_items_bin[nb_items_bin > 0]
# Expected Calibration Error
ece = np.average(
np.absolute(mean_conf - acc_tab),
weights=nb_items_bin.astype(np.float) / np.sum(nb_items_bin))
# Maximum Calibration Error
mce = np.max(np.absolute(mean_conf - acc_tab))
# Reliability diagram
reliability_diag = (mean_conf, acc_tab)
# Saving
cal = {'reliability_diag': reliability_diag,
'ece': ece,
'mce': mce}
return cal
| [
"numpy.sqrt",
"numpy.log",
"numpy.equal",
"utils.util.to_one_hot",
"numpy.arange",
"numpy.mean",
"numpy.histogram",
"numpy.where",
"numpy.max",
"numpy.linspace",
"numpy.ma.masked_array",
"numpy.abs",
"numpy.digitize",
"numpy.argmax",
"numpy.argpartition",
"numpy.absolute",
"numpy.sum... | [((2492, 2527), 'numpy.zeros_like', 'np.zeros_like', (['expected_conf_levels'], {}), '(expected_conf_levels)\n', (2505, 2527), True, 'import numpy as np\n'), ((2898, 2915), 'numpy.mean', 'np.mean', (['pred_var'], {}), '(pred_var)\n', (2905, 2915), True, 'import numpy as np\n'), ((3406, 3435), 'numpy.zeros', 'np.zeros', (['probabilities.shape'], {}), '(probabilities.shape)\n', (3414, 3435), True, 'import numpy as np\n'), ((3622, 3672), 'numpy.argpartition', 'np.argpartition', (['negative_prob', '(top_k - 1)'], {'axis': '(-1)'}), '(negative_prob, top_k - 1, axis=-1)\n', (3637, 3672), True, 'import numpy as np\n'), ((5264, 5292), 'numpy.zeros_like', 'np.zeros_like', (['probabilities'], {}), '(probabilities)\n', (5277, 5292), True, 'import numpy as np\n'), ((7311, 7361), 'numpy.where', 'np.where', (['(probabilities == 0)', '(1e-08)', 'probabilities'], {}), '(probabilities == 0, 1e-08, probabilities)\n', (7319, 7361), True, 'import numpy as np\n'), ((7385, 7441), 'numpy.histogram', 'np.histogram', (['probabilities'], {'bins': 'bins', 'range': '[0.0, 1.0]'}), '(probabilities, bins=bins, range=[0.0, 1.0])\n', (7397, 7441), True, 'import numpy as np\n'), ((7454, 7503), 'numpy.digitize', 'np.digitize', (['probabilities', 'bin_edges'], {'right': '(True)'}), '(probabilities, bin_edges, right=True)\n', (7465, 7503), True, 'import numpy as np\n'), ((7742, 7792), 'numpy.where', 'np.where', (['(probabilities == 0)', '(1e-08)', 'probabilities'], {}), '(probabilities == 0, 1e-08, probabilities)\n', (7750, 7792), True, 'import numpy as np\n'), ((7806, 7855), 'numpy.digitize', 'np.digitize', (['probabilities', 'bin_edges'], {'right': '(True)'}), '(probabilities, bin_edges, right=True)\n', (7817, 7855), True, 'import numpy as np\n'), ((8693, 8707), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (8699, 8707), True, 'import numpy as np\n'), ((10638, 10653), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (10646, 10653), True, 'import numpy as np\n'), ((10667, 10682), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (10675, 10682), True, 'import numpy as np\n'), ((10693, 10724), 'numpy.equal', 'np.equal', (['predict_class', 'labels'], {}), '(predict_class, labels)\n', (10701, 10724), True, 'import numpy as np\n'), ((11631, 11656), 'numpy.argmax', 'np.argmax', (['p_mean'], {'axis': '(1)'}), '(p_mean, axis=1)\n', (11640, 11656), True, 'import numpy as np\n'), ((11707, 11720), 'utils.util.to_one_hot', 'to_one_hot', (['y'], {}), '(y)\n', (11717, 11720), False, 'from utils.util import to_one_hot\n'), ((11796, 11818), 'numpy.max', 'np.max', (['p_mean'], {'axis': '(1)'}), '(p_mean, axis=1)\n', (11802, 11818), True, 'import numpy as np\n'), ((11893, 11913), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (11902, 11913), True, 'import numpy as np\n'), ((11943, 11961), 'numpy.zeros', 'np.zeros', (['num_bins'], {}), '(num_bins)\n', (11951, 11961), True, 'import numpy as np\n'), ((12009, 12027), 'numpy.zeros', 'np.zeros', (['num_bins'], {}), '(num_bins)\n', (12017, 12027), True, 'import numpy as np\n'), ((12071, 12089), 'numpy.zeros', 'np.zeros', (['num_bins'], {}), '(num_bins)\n', (12079, 12089), True, 'import numpy as np\n'), ((12135, 12166), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(num_bins + 1)'], {}), '(0, 1, num_bins + 1)\n', (12146, 12166), True, 'import numpy as np\n'), ((12198, 12217), 'numpy.arange', 'np.arange', (['num_bins'], {}), '(num_bins)\n', (12207, 12217), True, 'import numpy as np\n'), ((1038, 1064), 'numpy.mean', 'np.mean', (['((y - y_pred) ** 2)'], {}), '((y - y_pred) ** 2)\n', (1045, 1064), True, 'import numpy as np\n'), ((2369, 2400), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(num_bins + 1)'], {}), '(0, 1, num_bins + 1)\n', (2380, 2400), True, 'import numpy as np\n'), ((2611, 2637), 'numpy.mean', 'np.mean', (['(predicted_cdf < p)'], {}), '(predicted_cdf < p)\n', (2618, 2637), True, 'import numpy as np\n'), ((2721, 2779), 'numpy.sum', 'np.sum', (['((expected_conf_levels - observed_conf_levels) ** 2)'], {}), '((expected_conf_levels - observed_conf_levels) ** 2)\n', (2727, 2779), True, 'import numpy as np\n'), ((3728, 3761), 'numpy.arange', 'np.arange', (['probabilities.shape[0]'], {}), '(probabilities.shape[0])\n', (3737, 3761), True, 'import numpy as np\n'), ((4172, 4200), 'numpy.argmax', 'np.argmax', (['probabilities', '(-1)'], {}), '(probabilities, -1)\n', (4181, 4200), True, 'import numpy as np\n'), ((4301, 4336), 'numpy.equal', 'np.equal', (['class_predictions', 'labels'], {}), '(class_predictions, labels)\n', (4309, 4336), True, 'import numpy as np\n'), ((12411, 12422), 'numpy.sum', 'np.sum', (['sec'], {}), '(sec)\n', (12417, 12422), True, 'import numpy as np\n'), ((13062, 13094), 'numpy.absolute', 'np.absolute', (['(mean_conf - acc_tab)'], {}), '(mean_conf - acc_tab)\n', (13073, 13094), True, 'import numpy as np\n'), ((13216, 13248), 'numpy.absolute', 'np.absolute', (['(mean_conf - acc_tab)'], {}), '(mean_conf - acc_tab)\n', (13227, 13248), True, 'import numpy as np\n'), ((2058, 2075), 'numpy.sqrt', 'np.sqrt', (['pred_var'], {}), '(pred_var)\n', (2065, 2075), True, 'import numpy as np\n'), ((3455, 3488), 'numpy.arange', 'np.arange', (['probabilities.shape[0]'], {}), '(probabilities.shape[0])\n', (3464, 3488), True, 'import numpy as np\n'), ((5313, 5339), 'numpy.arange', 'np.arange', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (5322, 5339), True, 'import numpy as np\n'), ((5433, 5461), 'numpy.sum', 'np.sum', (['squared_diff'], {'axis': '(1)'}), '(squared_diff, axis=1)\n', (5439, 5461), True, 'import numpy as np\n'), ((7531, 7566), 'numpy.mean', 'np.mean', (['ground_truth[indices == i]'], {}), '(ground_truth[indices == i])\n', (7538, 7566), True, 'import numpy as np\n'), ((7877, 7913), 'numpy.mean', 'np.mean', (['probabilities[indices == i]'], {}), '(probabilities[indices == i])\n', (7884, 7913), True, 'import numpy as np\n'), ((12648, 12666), 'numpy.mean', 'np.mean', (['conf[sec]'], {}), '(conf[sec])\n', (12655, 12666), True, 'import numpy as np\n'), ((12767, 12799), 'numpy.mean', 'np.mean', (['(class_pred_sec == y_sec)'], {}), '(class_pred_sec == y_sec)\n', (12774, 12799), True, 'import numpy as np\n'), ((10870, 10904), 'numpy.ma.masked_array', 'np.ma.masked_array', (['eq'], {'mask': '(~mask)'}), '(eq, mask=~mask)\n', (10888, 10904), True, 'import numpy as np\n'), ((13144, 13164), 'numpy.sum', 'np.sum', (['nb_items_bin'], {}), '(nb_items_bin)\n', (13150, 13164), True, 'import numpy as np\n'), ((636, 648), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (643, 648), True, 'import numpy as np\n'), ((4821, 4842), 'numpy.log', 'np.log', (['probabilities'], {}), '(probabilities)\n', (4827, 4842), True, 'import numpy as np\n'), ((8779, 8817), 'numpy.abs', 'np.abs', (['(bin_centers[i] - accuracies[i])'], {}), '(bin_centers[i] - accuracies[i])\n', (8785, 8817), True, 'import numpy as np\n')] |
import abc
import numpy as np
class Waveform(abc.ABC):
_waveform = None
@property
@abc.abstractmethod
def waveform(self) -> np.ndarray:
raise NotImplementedError
def __len__(self):
return len(self.waveform)
def shift(self, shift=0):
"""
:param shift: shift > 0 means shifting right. e.g., shift = 3: [1 2 3 4] => [0 0 0 1 2 3 4]
:return:
"""
assert isinstance(self.waveform, np.ndarray)
if shift > 0:
self._waveform = np.concatenate((np.zeros(shift), self._waveform))
if shift < 0:
self._waveform = np.concatenate((self._waveform, np.zeros(shift)))
def __add__(self, other: "Waveform"):
assert isinstance(other, Waveform), f"{other} is not a Waveform"
w = ArbitraryWaveform(length=max(len(self), len(other)))
w.waveform[:len(self)] = self.waveform
w.waveform[:len(other)] += other.waveform
return w
def __radd__(self, other):
if other == 0:
return self
return self.__add__(other)
class ArbitraryWaveform(Waveform):
def __init__(self, length=10, waveform=None):
if waveform is not None:
assert waveform.ndim == 1
self._waveform = waveform
else:
self._waveform = np.zeros(length)
@property
def waveform(self) -> np.ndarray:
return self._waveform
@waveform.setter
def waveform(self, value):
self._waveform[:len(value)] = value
| [
"numpy.zeros"
] | [((1324, 1340), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (1332, 1340), True, 'import numpy as np\n'), ((541, 556), 'numpy.zeros', 'np.zeros', (['shift'], {}), '(shift)\n', (549, 556), True, 'import numpy as np\n'), ((658, 673), 'numpy.zeros', 'np.zeros', (['shift'], {}), '(shift)\n', (666, 673), True, 'import numpy as np\n')] |
"""
Collection of Data Science helper functions
"""
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
def confusion_plot(y_true, y_pred, cmap='viridis'):
"""
Plots a confusion matrix using the Seaborn library
"""
labels = unique_labels(y_val)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns,
index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap=cmap)
ONES = pd.DataFrame(np.ones(10))
ZEROS = pd.DataFrame(np.zeros(50))
| [
"numpy.ones",
"seaborn.heatmap",
"numpy.zeros",
"sklearn.utils.multiclass.unique_labels",
"sklearn.metrics.confusion_matrix"
] | [((349, 369), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y_val'], {}), '(y_val)\n', (362, 369), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((629, 679), 'seaborn.heatmap', 'sns.heatmap', (['table'], {'annot': '(True)', 'fmt': '"""d"""', 'cmap': 'cmap'}), "(table, annot=True, fmt='d', cmap=cmap)\n", (640, 679), True, 'import seaborn as sns\n'), ((702, 713), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (709, 713), True, 'import numpy as np\n'), ((737, 749), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (745, 749), True, 'import numpy as np\n'), ((504, 536), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (520, 536), False, 'from sklearn.metrics import confusion_matrix\n')] |
'''
TODO:
Median trimmer
'''
import numpy as np
def mad(arr,axis=None):
mid = np.median(arr,axis=axis)
return np.median(abs(arr-mid),axis=axis)
def bin_median(x,y,nbin):
binsize = (x.max()-x.min()) / (2*nbin)
bin_centers = np.linspace(x.min()+binsize,x.max()-binsize,nbin)
binned = np.empty(nbin)
error = np.empty(nbin)
for c in range(bin_centers.size):
mask = (x >= bin_centers[c]-binsize) & (x <= bin_centers[c]+binsize)
binned[c] = np.median(y[mask])
error[c] = np.std(y[mask])/np.sqrt(np.sum(mask))
return bin_centers,binned,error
def bin_mean(x,y,nbin):
binsize = (x.max()-x.min()) / (2*nbin)
bin_centers = np.linspace(x.min()+binsize,x.max()-binsize,nbin)
binned = np.empty(nbin)
error = np.empty(nbin)
for c in range(bin_centers.size):
mask = (x >= bin_centers[c]-binsize) & (x <= bin_centers[c]+binsize)
binned[c] = np.mean(y[mask])
error[c] = np.std(y[mask])/np.sqrt(np.sum(mask))
return bin_centers,binned,error
def bin_sum(data, nbin):
binsize = (data.max()-data.min()) / (2*nbin)
bin_centers = np.linspace(data.min()+binsize, data.max()-binsize, nbin)
binned = np.empty(nbin)
for c in range(bin_centers.size):
mask = (data >= bin_centers[c]-binsize) & (data <= bin_centers[c] + binsize)
binned[c] = np.sum(mask)
return bin_centers, binned
def csmooth(x, y, interval, eval=None):
if eval is None:
eval = x
n_points = np.zeros(eval.size)
sums = np.zeros(eval.size)
for i in range(y.size):
mask = (x > x[i]) & (x < x[i]+interval)
if np.sum(mask) > 4:
p = np.polyfit(x[mask], y[mask], 3)
eval_mask = (eval > x[i]) & (eval < x[i]+interval)
sums[eval_mask] += np.polyval(p, eval[eval_mask])
n_points[eval_mask] += 1
n_points[n_points == 0] = 1
return sums/n_points
def jackknife_variance(func,data,N=None,args=()):
estimate = func(data,*args)
if N is None:
N = data.size
omit_points = np.arange(N)
else:
omit_points = np.randint(0,data.size,N)
other_estimates = np.empty(N)
for i in range(N):
other_estimates[i] = func(np.delete(data,i),*args)
return (data.size-1)/N * np.sum((estimate-other_estimates)**2)
'''
under construction
'''
def median_filter(x,y,width=1):
lbound = np.zeros(y.size)
ubound = np.zeros(x.size)
cen = np.zeros(x.size)
for i in range(y.size):
lo = max(i-width,0)
hi = min(i+width,y.size)
tsec = x[lo:hi]
fsec = y[lo:hi]
fitmid = np.polyval(np.polyfit(tsec, fsec, 2), tsec)
normid = np.median(fsec)
mad = min(np.median(abs(fsec-fitmid)), np.median(abs(fsec-normid)))
cen[i] = normid
sigma = 1.4826*mad
ubound[i] = normid+3*sigma
lbound[i] = normid-3*sigma
from matplotlib import pyplot as plt
plt.plot(x,lbound,'g-')
plt.plot(x,ubound,'g-')
plt.plot(x,cen,'b-')
#plt.plot(x,y,'k.')
plt.ylim(y.min(),y.max())
plt.show()
exit()
mask = ((y < ubound) & (y > lbound))
return x[mask],y[mask]
'''
Median filter test code
'''
if __name__ == '__main__':
import os
from astropy.io import fits
os.chdir('/home/ben/research/kepler_llc/007500161')
files = [f for f in os.listdir('.') if f.endswith('_llc.fits')]
contents = fits.open(files[0])[1].data
x = contents['TIME']
y = contents['PDCSAP_FLUX']
mask = np.invert(np.isnan(x) | np.isnan(y))
x = x[mask]
y = y[mask]
median_filter(x,y,11)
| [
"numpy.randint",
"numpy.mean",
"numpy.median",
"os.listdir",
"numpy.polyfit",
"numpy.delete",
"matplotlib.pyplot.plot",
"os.chdir",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"numpy.polyval",
"numpy.isnan",
"numpy.std",
"astropy.io.fits.open",
"numpy.arange",
"matplotlib.pyplot.show"... | [((84, 109), 'numpy.median', 'np.median', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (93, 109), True, 'import numpy as np\n'), ((311, 325), 'numpy.empty', 'np.empty', (['nbin'], {}), '(nbin)\n', (319, 325), True, 'import numpy as np\n'), ((338, 352), 'numpy.empty', 'np.empty', (['nbin'], {}), '(nbin)\n', (346, 352), True, 'import numpy as np\n'), ((768, 782), 'numpy.empty', 'np.empty', (['nbin'], {}), '(nbin)\n', (776, 782), True, 'import numpy as np\n'), ((795, 809), 'numpy.empty', 'np.empty', (['nbin'], {}), '(nbin)\n', (803, 809), True, 'import numpy as np\n'), ((1226, 1240), 'numpy.empty', 'np.empty', (['nbin'], {}), '(nbin)\n', (1234, 1240), True, 'import numpy as np\n'), ((1524, 1543), 'numpy.zeros', 'np.zeros', (['eval.size'], {}), '(eval.size)\n', (1532, 1543), True, 'import numpy as np\n'), ((1555, 1574), 'numpy.zeros', 'np.zeros', (['eval.size'], {}), '(eval.size)\n', (1563, 1574), True, 'import numpy as np\n'), ((2201, 2212), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (2209, 2212), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (2461, 2469), True, 'import numpy as np\n'), ((2483, 2499), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (2491, 2499), True, 'import numpy as np\n'), ((2510, 2526), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (2518, 2526), True, 'import numpy as np\n'), ((3020, 3045), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'lbound', '"""g-"""'], {}), "(x, lbound, 'g-')\n", (3028, 3045), True, 'from matplotlib import pyplot as plt\n'), ((3048, 3073), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ubound', '"""g-"""'], {}), "(x, ubound, 'g-')\n", (3056, 3073), True, 'from matplotlib import pyplot as plt\n'), ((3076, 3098), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'cen', '"""b-"""'], {}), "(x, cen, 'b-')\n", (3084, 3098), True, 'from matplotlib import pyplot as plt\n'), ((3155, 3165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3163, 3165), True, 'from matplotlib import pyplot as plt\n'), ((3367, 3418), 'os.chdir', 'os.chdir', (['"""/home/ben/research/kepler_llc/007500161"""'], {}), "('/home/ben/research/kepler_llc/007500161')\n", (3375, 3418), False, 'import os\n'), ((492, 510), 'numpy.median', 'np.median', (['y[mask]'], {}), '(y[mask])\n', (501, 510), True, 'import numpy as np\n'), ((945, 961), 'numpy.mean', 'np.mean', (['y[mask]'], {}), '(y[mask])\n', (952, 961), True, 'import numpy as np\n'), ((1384, 1396), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (1390, 1396), True, 'import numpy as np\n'), ((2103, 2115), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2112, 2115), True, 'import numpy as np\n'), ((2148, 2175), 'numpy.randint', 'np.randint', (['(0)', 'data.size', 'N'], {}), '(0, data.size, N)\n', (2158, 2175), True, 'import numpy as np\n'), ((2342, 2383), 'numpy.sum', 'np.sum', (['((estimate - other_estimates) ** 2)'], {}), '((estimate - other_estimates) ** 2)\n', (2348, 2383), True, 'import numpy as np\n'), ((2756, 2771), 'numpy.median', 'np.median', (['fsec'], {}), '(fsec)\n', (2765, 2771), True, 'import numpy as np\n'), ((530, 545), 'numpy.std', 'np.std', (['y[mask]'], {}), '(y[mask])\n', (536, 545), True, 'import numpy as np\n'), ((981, 996), 'numpy.std', 'np.std', (['y[mask]'], {}), '(y[mask])\n', (987, 996), True, 'import numpy as np\n'), ((1664, 1676), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (1670, 1676), True, 'import numpy as np\n'), ((1698, 1729), 'numpy.polyfit', 'np.polyfit', (['x[mask]', 'y[mask]', '(3)'], {}), '(x[mask], y[mask], 3)\n', (1708, 1729), True, 'import numpy as np\n'), ((1826, 1856), 'numpy.polyval', 'np.polyval', (['p', 'eval[eval_mask]'], {}), '(p, eval[eval_mask])\n', (1836, 1856), True, 'import numpy as np\n'), ((2279, 2297), 'numpy.delete', 'np.delete', (['data', 'i'], {}), '(data, i)\n', (2288, 2297), True, 'import numpy as np\n'), ((2706, 2731), 'numpy.polyfit', 'np.polyfit', (['tsec', 'fsec', '(2)'], {}), '(tsec, fsec, 2)\n', (2716, 2731), True, 'import numpy as np\n'), ((3443, 3458), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3453, 3458), False, 'import os\n'), ((3503, 3522), 'astropy.io.fits.open', 'fits.open', (['files[0]'], {}), '(files[0])\n', (3512, 3522), False, 'from astropy.io import fits\n'), ((3609, 3620), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (3617, 3620), True, 'import numpy as np\n'), ((3623, 3634), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (3631, 3634), True, 'import numpy as np\n'), ((554, 566), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (560, 566), True, 'import numpy as np\n'), ((1005, 1017), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (1011, 1017), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
class SMForward:
'''
Object for performing soil-moisture phase contribution simulations based on sensitivity of soil dielectric properties to soil moisture
'''
mvs: np.array = None
de_real = None
de_imag = None
def __init__(self, imag_slope, r_A, r_B, r_C, omega=5.405e9):
self.imag_slope = imag_slope
self.real_A = r_A
self.real_B = r_B
self.real_C = r_C
self.omega = omega
def mv2eps_real(self, sm):
return self.real_C + self.real_B * sm + self.real_A * sm**2
def mv2eps_imag(self, sm):
return sm * self.imag_slope
def set_moistures(self, mvs):
self.mvs = mvs
# self.real_B + self.real_A * self.mvs**2
self.de_real = self.mv2eps_real(self.mvs)
self.de_imag = self.mv2eps_imag(self.mvs) # self.mvs * self.imag_slope
def plot_dielectric(self):
plt.plot(self.mvs, self.de_real, label='Real Part')
plt.plot(self.mvs, self.de_imag, label='imaginary part')
plt.legend(loc='lower left')
plt.show()
def sm2eps(self, sm) -> np.complex:
complex_array = np.zeros(sm.shape, dtype=np.complex64)
complex_array += self.mv2eps_real(sm)
complex_array += (self.mv2eps_imag(sm) * 1j)
return complex_array
def k_prime(self, epsilon: complex) -> complex:
μ0 = 1.25663706212e-6 # magnetic permeability of free space
eps0 = 8.854187e-12 # electric permittivity of free space
epsilon = epsilon * eps0 # convert relative permittivity to absolute
return -np.sqrt(self.omega**2 * epsilon * μ0)
def I_dezan(self, sm):
eps1 = self.sm2eps(sm)
k1 = self.k_prime(eps1)
return 1/((2j * k1) - (2j * k1.conj()))
def reflected_I(self, sm, theta):
n1 = 1
eps = self.sm2eps(sm).real
alpha = np.sqrt(1 - ((n1/eps) * np.sin(theta))**2) / np.cos(theta)
beta = eps/n1
return (alpha - beta) / (alpha + beta)
def dubois_I_dif(self, sm1, sm2, theta=10):
eps1 = self.mv2eps_real(sm1)
eps2 = self.mv2eps_real(sm2)
return np.tan(np.radians(theta)) * (eps2 - eps1)
def get_phases_dezan(self, ref, sec, use_epsilon=False):
'''
Convert a pair of soil-moisture maps into an interferogram via an analytical forward model
Adapted from De Zan et al., 2014
'''
if not use_epsilon:
eps1 = self.sm2eps(ref)
eps2 = self.sm2eps(sec)
else:
eps1 = ref
eps2 = sec
k1 = self.k_prime(eps1)
k2 = self.k_prime(eps2)
phi = (2j * np.sqrt(k2.imag * k1.imag)) / (k2.conj() - k1)
return np.nan_to_num(phi)
| [
"numpy.radians",
"numpy.sqrt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"numpy.nan_to_num",
"matplotlib.pyplot.show"
] | [((955, 1006), 'matplotlib.pyplot.plot', 'plt.plot', (['self.mvs', 'self.de_real'], {'label': '"""Real Part"""'}), "(self.mvs, self.de_real, label='Real Part')\n", (963, 1006), True, 'from matplotlib import pyplot as plt\n'), ((1015, 1071), 'matplotlib.pyplot.plot', 'plt.plot', (['self.mvs', 'self.de_imag'], {'label': '"""imaginary part"""'}), "(self.mvs, self.de_imag, label='imaginary part')\n", (1023, 1071), True, 'from matplotlib import pyplot as plt\n'), ((1080, 1108), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (1090, 1108), True, 'from matplotlib import pyplot as plt\n'), ((1117, 1127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1125, 1127), True, 'from matplotlib import pyplot as plt\n'), ((1193, 1231), 'numpy.zeros', 'np.zeros', (['sm.shape'], {'dtype': 'np.complex64'}), '(sm.shape, dtype=np.complex64)\n', (1201, 1231), True, 'import numpy as np\n'), ((2776, 2794), 'numpy.nan_to_num', 'np.nan_to_num', (['phi'], {}), '(phi)\n', (2789, 2794), True, 'import numpy as np\n'), ((1643, 1682), 'numpy.sqrt', 'np.sqrt', (['(self.omega ** 2 * epsilon * μ0)'], {}), '(self.omega ** 2 * epsilon * μ0)\n', (1650, 1682), True, 'import numpy as np\n'), ((1970, 1983), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1976, 1983), True, 'import numpy as np\n'), ((2198, 2215), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (2208, 2215), True, 'import numpy as np\n'), ((2713, 2739), 'numpy.sqrt', 'np.sqrt', (['(k2.imag * k1.imag)'], {}), '(k2.imag * k1.imag)\n', (2720, 2739), True, 'import numpy as np\n'), ((1949, 1962), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1955, 1962), True, 'import numpy as np\n')] |
import numpy as np
from layers import (
FullyConnectedLayer, ReLULayer,
ConvolutionalLayer, MaxPoolingLayer, Flattener,
softmax_with_cross_entropy, l2_regularization, softmax
)
class ConvNet:
"""
Implements a very simple conv net
Input -> Conv[3x3] -> Relu -> Maxpool[4x4] ->
Conv[3x3] -> Relu -> MaxPool[4x4] ->
Flatten -> FC -> Softmax
"""
def __init__(self, input_shape, n_output_classes, conv1_channels, conv2_channels):
"""
Initializes the neural network
Arguments:
input_shape, tuple of 3 ints - image_width, image_height, n_channels
Will be equal to (32, 32, 3)
n_output_classes, int - number of classes to predict
conv1_channels, int - number of filters in the 1st conv layer
conv2_channels, int - number of filters in the 2nd conv layer
"""
# TODO Create necessary layers
#raise Exception("Not implemented!")
image_width, image_height, image_channels = input_shape
maxpool1_size = 4
maxpool2_size = 4
flattener_width = int(image_width / (maxpool1_size * maxpool2_size))
flattener_height = int(image_width / (maxpool1_size * maxpool2_size))
self.layers = [
ConvolutionalLayer(in_channels=image_channels, out_channels=conv1_channels, filter_size=3, padding=1),
ReLULayer(),
MaxPoolingLayer(maxpool1_size, maxpool1_size),
ConvolutionalLayer(in_channels=conv1_channels, out_channels=conv2_channels, filter_size=3, padding=1),
ReLULayer(),
MaxPoolingLayer(maxpool2_size, maxpool2_size),
Flattener(),
FullyConnectedLayer(flattener_width * flattener_height * conv2_channels, n_output_classes)
]
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients on a batch of training examples
Arguments:
:param X, np array (batch_size, height, width, input_features) - input data
:param y, np array of int (batch_size) - classes
"""
assert X.ndim == 4
assert y.ndim == 1
assert X.shape[0] == y.shape[0]
for _, param in self.params().items():
param.reset_grad()
# forward pass
out = X
for layer in self.layers:
out = layer.forward(out)
# backward pass
loss, d_out = softmax_with_cross_entropy(out, y)
for layer in reversed(self.layers):
d_out = layer.backward(d_out)
return loss
def predict(self, X):
# forward pass
out = X
for layer in self.layers:
out = layer.forward(out)
out = softmax(out)
pred = np.argmax(out, axis=1)
return pred # y_hat
def params(self):
result = {}
for index, layer in enumerate(self.layers):
for name, param in layer.params().items():
result['%s_%s' % (index, name)] = param
return result
| [
"layers.FullyConnectedLayer",
"layers.ReLULayer",
"layers.ConvolutionalLayer",
"numpy.argmax",
"layers.softmax_with_cross_entropy",
"layers.softmax",
"layers.Flattener",
"layers.MaxPoolingLayer"
] | [((2511, 2545), 'layers.softmax_with_cross_entropy', 'softmax_with_cross_entropy', (['out', 'y'], {}), '(out, y)\n', (2537, 2545), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((2804, 2816), 'layers.softmax', 'softmax', (['out'], {}), '(out)\n', (2811, 2816), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((2833, 2855), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (2842, 2855), True, 'import numpy as np\n'), ((1323, 1428), 'layers.ConvolutionalLayer', 'ConvolutionalLayer', ([], {'in_channels': 'image_channels', 'out_channels': 'conv1_channels', 'filter_size': '(3)', 'padding': '(1)'}), '(in_channels=image_channels, out_channels=conv1_channels,\n filter_size=3, padding=1)\n', (1341, 1428), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((1438, 1449), 'layers.ReLULayer', 'ReLULayer', ([], {}), '()\n', (1447, 1449), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((1463, 1508), 'layers.MaxPoolingLayer', 'MaxPoolingLayer', (['maxpool1_size', 'maxpool1_size'], {}), '(maxpool1_size, maxpool1_size)\n', (1478, 1508), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((1523, 1628), 'layers.ConvolutionalLayer', 'ConvolutionalLayer', ([], {'in_channels': 'conv1_channels', 'out_channels': 'conv2_channels', 'filter_size': '(3)', 'padding': '(1)'}), '(in_channels=conv1_channels, out_channels=conv2_channels,\n filter_size=3, padding=1)\n', (1541, 1628), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((1638, 1649), 'layers.ReLULayer', 'ReLULayer', ([], {}), '()\n', (1647, 1649), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((1663, 1708), 'layers.MaxPoolingLayer', 'MaxPoolingLayer', (['maxpool2_size', 'maxpool2_size'], {}), '(maxpool2_size, maxpool2_size)\n', (1678, 1708), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((1723, 1734), 'layers.Flattener', 'Flattener', ([], {}), '()\n', (1732, 1734), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n'), ((1748, 1842), 'layers.FullyConnectedLayer', 'FullyConnectedLayer', (['(flattener_width * flattener_height * conv2_channels)', 'n_output_classes'], {}), '(flattener_width * flattener_height * conv2_channels,\n n_output_classes)\n', (1767, 1842), False, 'from layers import FullyConnectedLayer, ReLULayer, ConvolutionalLayer, MaxPoolingLayer, Flattener, softmax_with_cross_entropy, l2_regularization, softmax\n')] |
# start, prepare data, data ready, finish, FMP
FP_DATA = {
'Chart.JS': [
[32.4, 45.1, 297.6, 1042.4, 1054.8],
[33.6, 43.4, 285.1, 1041.6, 1064.5],
[30.9, 40.9, 292.7, 1036.2, 1056.8],
],
'TimeChart': [
[29.9, 57.2, 255.2, 288.3, 853.2],
[36.2, 43.9, 236.9, 271.5, 837.4],
[34.9, 42.7, 252.4, 285.7, 871.9],
],
'μPlot': [
[29.6, 31.4, 189.8, 285.6, 335.3],
[30.6, 32.8, 197.3, 305.5, 350.0],
[37.9, 40.7, 209.7, 311.7, 351.7],
],
}
import matplotlib.pyplot as plt
import numpy as np
y = []
prepare_data = []
data_ready = []
finish = []
fmp = []
for lib in FP_DATA:
data = np.array(FP_DATA[lib])
data = data[:, 1:] - data[:, 0:1]
data = data.mean(axis=0)
y.append(lib)
prepare_data.append(data[0])
data_ready.append(data[1])
finish.append(data[2])
fmp.append(data[3])
plt.title('First Paint Time')
plt.xlabel('ms')
plt.barh(y, fmp, label='paint')
plt.barh(y, finish, label='scripting')
plt.barh(y, data_ready, label='prepare data')
plt.barh(y, prepare_data, label='load script')
plt.legend()
plt.tight_layout()
plt.savefig('docs/first_paint.png')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.barh",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend"
] | [((897, 926), 'matplotlib.pyplot.title', 'plt.title', (['"""First Paint Time"""'], {}), "('First Paint Time')\n", (906, 926), True, 'import matplotlib.pyplot as plt\n'), ((927, 943), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ms"""'], {}), "('ms')\n", (937, 943), True, 'import matplotlib.pyplot as plt\n'), ((944, 975), 'matplotlib.pyplot.barh', 'plt.barh', (['y', 'fmp'], {'label': '"""paint"""'}), "(y, fmp, label='paint')\n", (952, 975), True, 'import matplotlib.pyplot as plt\n'), ((976, 1014), 'matplotlib.pyplot.barh', 'plt.barh', (['y', 'finish'], {'label': '"""scripting"""'}), "(y, finish, label='scripting')\n", (984, 1014), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1060), 'matplotlib.pyplot.barh', 'plt.barh', (['y', 'data_ready'], {'label': '"""prepare data"""'}), "(y, data_ready, label='prepare data')\n", (1023, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1107), 'matplotlib.pyplot.barh', 'plt.barh', (['y', 'prepare_data'], {'label': '"""load script"""'}), "(y, prepare_data, label='load script')\n", (1069, 1107), True, 'import matplotlib.pyplot as plt\n'), ((1108, 1120), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1118, 1120), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1139), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1137, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1140, 1175), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""docs/first_paint.png"""'], {}), "('docs/first_paint.png')\n", (1151, 1175), True, 'import matplotlib.pyplot as plt\n'), ((673, 695), 'numpy.array', 'np.array', (['FP_DATA[lib]'], {}), '(FP_DATA[lib])\n', (681, 695), True, 'import numpy as np\n')] |
#!/usr/local/bin/python3
"""
Script to generate a fixture of a chain with N complex links.
Usage: python generate_complex_chainmail_fixture.py N
"""
import json
import pathlib
import numpy
import shapely.geometry
import shapely.ops
from fixture_utils import *
def generate_link_polygons() -> list:
"""Generate a list of Polygons for the chain link."""
half_thickness = 1e-2
width = 6
height = 5
head_hx = width / 2 - 4 * half_thickness - 0.5
foot_hx = width / 4 - 4 * half_thickness
leg_hy = 2 * height / 7 - half_thickness
leg_cy = leg_hy + half_thickness
torso_hx = width / 2
torso_cy = 2 * leg_cy - half_thickness
neck_hy = (height - torso_cy - half_thickness) / 2
neck_cy = height - neck_hy - half_thickness
area = half_thickness * (head_hx + neck_hy + torso_hx + leg_hy + foot_hx +
leg_hy + foot_hx) - 6 * half_thickness
return [
# Head
generate_rectangle(head_hx, half_thickness,
numpy.array([width / 2, height - half_thickness]),
0),
# Neck
generate_rectangle(half_thickness, neck_hy,
numpy.array([width / 2, neck_cy]), 0),
# Torso
generate_rectangle(torso_hx, half_thickness,
numpy.array([torso_hx, torso_cy]), 0),
# Left leg
generate_rectangle(half_thickness, leg_hy,
numpy.array([half_thickness, leg_cy]), 0),
# Left foot
generate_rectangle(foot_hx, half_thickness,
numpy.array([foot_hx, half_thickness]), 0),
# Right leg
generate_rectangle(half_thickness, leg_hy,
numpy.array([width - half_thickness, leg_cy]), 0),
# Right foot
generate_rectangle(foot_hx, half_thickness,
numpy.array([width - foot_hx, half_thickness]), 0),
], area
def generate_fixture(args: argparse.Namespace) -> dict:
"""Generate a fixture of a chain with N complex links."""
fixture = generate_custom_fixture(args)
rigid_bodies = fixture["rigid_body_problem"]["rigid_bodies"]
link_polygons, link_area = generate_link_polygons()
link = shapely.ops.cascaded_union(link_polygons)
link = shapely.geometry.polygon.orient(link, 1)
link_polygons = [
list(polygon.exterior.coords) for polygon in link_polygons
]
vertices = numpy.array(list(link.exterior.coords)[:-1])
angle = 90 + 45
theta = numpy.radians(angle)
R = create_2D_rotation_matrix(theta)
edges = generate_ngon_edges(vertices.shape[0])
link_mass = 0.1 # Kg
link_density = link_mass / link_area
for i in range(args.num_links):
rigid_bodies.append({
"vertices":
vertices.tolist(),
"polygons":
link_polygons,
"edges":
edges.tolist(),
"position": (R @ numpy.array([0, -3.5 * i])).tolist(),
"theta":
angle,
"velocity": [0.0, 0.0, 0.0],
"is_dof_fixed":
numpy.full(3, i == 0, dtype=bool).tolist(),
"oriented":
True,
"masses":
numpy.full(vertices.shape[0],
link_mass / vertices.shape[0]).tolist(),
"density":
link_density
})
return fixture
def main() -> None:
"""Parse command - line arguments to generate the desired fixture."""
parser = create_argument_parser("generate a chain fixture",
default_initial_epsilon=1e-3,
default_gravity=[0, -9.81, 0],
default_num_steps=5000)
parser.add_argument("--num-links",
type=int,
default=10,
help="number of links in the chain")
args = parser.parse_args()
if args.out_path is None:
directory = (pathlib.Path(__file__).resolve().parents[1] / "fixtures" /
"chain")
args.out_path = directory / f"{args.num_links:d}_link_chain.json"
args.out_path.parent.mkdir(parents=True, exist_ok=True)
print_args(args)
fixture = generate_fixture(args)
save_fixture(fixture, args.out_path)
if __name__ == "__main__":
main()
| [
"numpy.radians",
"numpy.array",
"numpy.full",
"pathlib.Path"
] | [((2548, 2568), 'numpy.radians', 'numpy.radians', (['angle'], {}), '(angle)\n', (2561, 2568), False, 'import numpy\n'), ((1023, 1072), 'numpy.array', 'numpy.array', (['[width / 2, height - half_thickness]'], {}), '([width / 2, height - half_thickness])\n', (1034, 1072), False, 'import numpy\n'), ((1199, 1232), 'numpy.array', 'numpy.array', (['[width / 2, neck_cy]'], {}), '([width / 2, neck_cy])\n', (1210, 1232), False, 'import numpy\n'), ((1334, 1367), 'numpy.array', 'numpy.array', (['[torso_hx, torso_cy]'], {}), '([torso_hx, torso_cy])\n', (1345, 1367), False, 'import numpy\n'), ((1470, 1507), 'numpy.array', 'numpy.array', (['[half_thickness, leg_cy]'], {}), '([half_thickness, leg_cy])\n', (1481, 1507), False, 'import numpy\n'), ((1612, 1650), 'numpy.array', 'numpy.array', (['[foot_hx, half_thickness]'], {}), '([foot_hx, half_thickness])\n', (1623, 1650), False, 'import numpy\n'), ((1754, 1799), 'numpy.array', 'numpy.array', (['[width - half_thickness, leg_cy]'], {}), '([width - half_thickness, leg_cy])\n', (1765, 1799), False, 'import numpy\n'), ((1905, 1951), 'numpy.array', 'numpy.array', (['[width - foot_hx, half_thickness]'], {}), '([width - foot_hx, half_thickness])\n', (1916, 1951), False, 'import numpy\n'), ((3140, 3173), 'numpy.full', 'numpy.full', (['(3)', '(i == 0)'], {'dtype': 'bool'}), '(3, i == 0, dtype=bool)\n', (3150, 3173), False, 'import numpy\n'), ((3260, 3320), 'numpy.full', 'numpy.full', (['vertices.shape[0]', '(link_mass / vertices.shape[0])'], {}), '(vertices.shape[0], link_mass / vertices.shape[0])\n', (3270, 3320), False, 'import numpy\n'), ((2981, 3007), 'numpy.array', 'numpy.array', (['[0, -3.5 * i]'], {}), '([0, -3.5 * i])\n', (2992, 3007), False, 'import numpy\n'), ((4039, 4061), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (4051, 4061), False, 'import pathlib\n')] |
import torch
import networkx as nx
import numpy as np
import pickle as pkl
import scipy.sparse as sp
def Graph_load_batch(
min_num_nodes=20,
max_num_nodes=1000,
name="ENZYMES",
node_attributes=True,
graph_labels=True,
):
"""
load many graphs, e.g. enzymes
load ENZYMES and PROTEIN and DD dataset
:return: a list of graphs
"""
print("Loading graph dataset: " + str(name))
G = nx.Graph()
# load data
path = "dataset/" + name + "/"
data_adj = np.loadtxt(path + name + "_A.txt", delimiter=",").astype(int)
if node_attributes:
data_node_att = np.loadtxt(path + name + "_node_attributes.txt", delimiter=",")
data_node_label = np.loadtxt(
path + name + "_node_labels.txt", delimiter=","
).astype(int)
data_graph_indicator = np.loadtxt(
path + name + "_graph_indicator.txt", delimiter=","
).astype(int)
if graph_labels:
data_graph_labels = np.loadtxt(
path + name + "_graph_labels.txt", delimiter=","
).astype(int)
data_tuple = list(map(tuple, data_adj))
# print(len(data_tuple))
# print(data_tuple[0])
# add edges
G.add_edges_from(data_tuple)
# add node attributes
for i in range(data_node_label.shape[0]):
if node_attributes:
G.add_node(i + 1, feature=data_node_att[i])
G.add_node(i + 1, label=data_node_label[i])
G.remove_nodes_from(list(nx.isolates(G)))
# print(G.number_of_nodes())
# print(G.number_of_edges())
# split into graphs
graph_num = data_graph_indicator.max()
node_list = np.arange(data_graph_indicator.shape[0]) + 1
graphs = []
max_nodes = 0
for i in range(graph_num):
# find the nodes for each graph
nodes = node_list[data_graph_indicator == i + 1]
G_sub = G.subgraph(nodes)
if graph_labels:
G_sub.graph["label"] = data_graph_labels[i]
# print('nodes', G_sub.number_of_nodes())
# print('edges', G_sub.number_of_edges())
# print('label', G_sub.graph)
if (
G_sub.number_of_nodes() >= min_num_nodes
and G_sub.number_of_nodes() <= max_num_nodes
):
graphs.append(G_sub)
if G_sub.number_of_nodes() > max_nodes:
max_nodes = G_sub.number_of_nodes()
# print(G_sub.number_of_nodes(), 'i', i)
# print('Graph dataset name: {}, total graph num: {}'.format(name, len(graphs)))
# logging.warning('Graphs loaded, total num: {}'.format(len(graphs)))
print("Loaded")
return graphs
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
# load cora, citeseer and pubmed dataset
def Graph_load(dataset="cora"):
"""
Load a single graph dataset
:param dataset: dataset name
:return:
"""
names = ["x", "tx", "allx", "graph"]
objects = []
for i in range(len(names)):
load = pkl.load(
open("dataset/ind.{}.{}".format(dataset, names[i]), "rb"), encoding="latin1"
)
# print('loaded')
objects.append(load)
# print(load)
x, tx, allx, graph = tuple(objects)
test_idx_reorder = parse_index_file("dataset/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == "citeseer":
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
G = nx.from_dict_of_lists(graph)
adj = nx.adjacency_matrix(G)
return adj, features, G
def bfs_seq(G, start_id):
"""
get a bfs node sequence
:param G:
:param start_id:
:return:
"""
dictionary = dict(nx.bfs_successors(G, start_id))
start = [start_id]
output = [start_id]
while len(start) > 0:
next = []
while len(start) > 0:
current = start.pop(0)
neighbor = dictionary.get(current)
if neighbor is not None:
#### a wrong example, should not permute here!
# shuffle(neighbor)
next = next + neighbor
output = output + next
start = next
return output
def encode_adj(adj, max_prev_node=10, is_full=False):
"""
:param adj: n*n, rows means time step, while columns are input dimension
:param max_degree: we want to keep row number, but truncate column numbers
:return:
"""
if is_full:
max_prev_node = adj.shape[0] - 1
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0 : n - 1]
# use max_prev_node to truncate
# note: now adj is a (n-1)*(n-1) matrix
adj_output = np.zeros((adj.shape[0], max_prev_node))
for i in range(adj.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + input_start - input_end
output_end = max_prev_node
adj_output[i, output_start:output_end] = adj[i, input_start:input_end]
adj_output[i, :] = adj_output[i, :][::-1] # reverse order
return adj_output
def decode_adj(adj_output):
"""
recover to adj from adj_output
note: here adj_output have shape (n-1)*m
"""
max_prev_node = adj_output.shape[1]
adj = np.zeros((adj_output.shape[0], adj_output.shape[0]))
for i in range(adj_output.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + max(0, i - max_prev_node + 1) - (i + 1)
output_end = max_prev_node
adj[i, input_start:input_end] = adj_output[i, ::-1][
output_start:output_end
] # reverse order
adj_full = np.zeros((adj_output.shape[0] + 1, adj_output.shape[0] + 1))
n = adj_full.shape[0]
adj_full[1:n, 0 : n - 1] = np.tril(adj, 0)
adj_full = adj_full + adj_full.T
return adj_full
def encode_adj_flexible(adj):
"""
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
"""
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0 : n - 1]
adj_output = []
input_start = 0
for i in range(adj.shape[0]):
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
adj_output.append(adj_slice)
non_zero = np.nonzero(adj_slice)[0]
input_start = input_end - len(adj_slice) + np.amin(non_zero)
return adj_output
def decode_adj_flexible(adj_output):
"""
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
"""
adj = np.zeros((len(adj_output), len(adj_output)))
for i in range(len(adj_output)):
output_start = i + 1 - len(adj_output[i])
output_end = i + 1
adj[i, output_start:output_end] = adj_output[i]
adj_full = np.zeros((len(adj_output) + 1, len(adj_output) + 1))
n = adj_full.shape[0]
adj_full[1:n, 0 : n - 1] = np.tril(adj, 0)
adj_full = adj_full + adj_full.T
return adj_full
def test_encode_decode_adj():
######## code test ###########
G = nx.ladder_graph(5)
G = nx.grid_2d_graph(20, 20)
G = nx.ladder_graph(200)
G = nx.karate_club_graph()
G = nx.connected_caveman_graph(2, 3)
print(G.number_of_nodes())
adj = np.asarray(nx.to_numpy_matrix(G))
G = nx.from_numpy_matrix(adj)
#
start_idx = np.random.randint(adj.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj = adj[np.ix_(x_idx, x_idx)]
print("adj\n", adj)
adj_output = encode_adj(adj, max_prev_node=5)
print("adj_output\n", adj_output)
adj_recover = decode_adj(adj_output, max_prev_node=5)
print("adj_recover\n", adj_recover)
print("error\n", np.amin(adj_recover - adj), np.amax(adj_recover - adj))
adj_output = encode_adj_flexible(adj)
for i in range(len(adj_output)):
print(len(adj_output[i]))
adj_recover = decode_adj_flexible(adj_output)
print(adj_recover)
print(np.amin(adj_recover - adj), np.amax(adj_recover - adj))
def encode_adj_full(adj):
"""
return a n-1*n-1*2 tensor, the first dimension is an adj matrix, the second show if each entry is valid
:param adj: adj matrix
:return:
"""
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0 : n - 1]
adj_output = np.zeros((adj.shape[0], adj.shape[1], 2))
adj_len = np.zeros(adj.shape[0])
for i in range(adj.shape[0]):
non_zero = np.nonzero(adj[i, :])[0]
input_start = np.amin(non_zero)
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
# write adj
adj_output[i, 0 : adj_slice.shape[0], 0] = adj_slice[
::-1
] # put in reverse order
# write stop token (if token is 0, stop)
adj_output[i, 0 : adj_slice.shape[0], 1] = 1 # put in reverse order
# write sequence length
adj_len[i] = adj_slice.shape[0]
return adj_output, adj_len
def decode_adj_full(adj_output):
"""
return an adj according to adj_output
:param
:return:
"""
# pick up lower tri
adj = np.zeros((adj_output.shape[0] + 1, adj_output.shape[1] + 1))
for i in range(adj_output.shape[0]):
non_zero = np.nonzero(adj_output[i, :, 1])[0] # get valid sequence
input_end = np.amax(non_zero)
adj_slice = adj_output[i, 0 : input_end + 1, 0] # get adj slice
# write adj
output_end = i + 1
output_start = i + 1 - input_end - 1
adj[i + 1, output_start:output_end] = adj_slice[::-1] # put in reverse order
adj = adj + adj.T
return adj
def test_encode_decode_adj_full():
########### code test #############
# G = nx.ladder_graph(10)
G = nx.karate_club_graph()
# get bfs adj
adj = np.asarray(nx.to_numpy_matrix(G))
G = nx.from_numpy_matrix(adj)
start_idx = np.random.randint(adj.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj = adj[np.ix_(x_idx, x_idx)]
adj_output, adj_len = encode_adj_full(adj)
print("adj\n", adj)
print("adj_output[0]\n", adj_output[:, :, 0])
print("adj_output[1]\n", adj_output[:, :, 1])
# print('adj_len\n',adj_len)
adj_recover = decode_adj_full(adj_output)
print("adj_recover\n", adj_recover)
print("error\n", adj_recover - adj)
print("error_sum\n", np.amax(adj_recover - adj), np.amin(adj_recover - adj))
class Graph_sequence_sampler_pytorch(torch.utils.data.Dataset):
"""
use pytorch dataloader
"""
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
print(
"calculating max previous node, total iteration: {}".format(iteration)
)
self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
print("max previous node: {}".format(self.max_prev_node))
else:
self.max_prev_node = max_prev_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros(
(self.n, self.max_prev_node)
) # here zeros are padded for small graph
x_batch[0, :] = 1 # the first input token is all ones
y_batch = np.zeros(
(self.n, self.max_prev_node)
) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0 : adj_encoded.shape[0], :] = adj_encoded
x_batch[1 : adj_encoded.shape[0] + 1, :] = adj_encoded
return {"x": x_batch, "y": y_batch, "len": len_batch}
def calc_max_prev_node(self, iter=20000, topk=10):
max_prev_node = []
for i in range(iter):
if i % (iter / 5) == 0:
print("iter {} times".format(i))
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max(
[len(adj_encoded[i]) for i in range(len(adj_encoded))]
)
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-1 * topk :]
return max_prev_node
class Graph_sequence_sampler_pytorch_nobfs(torch.utils.data.Dataset):
"""
use pytorch dataloader
"""
def __init__(self, G_list, max_num_node=None):
self.adj_all = []
self.len_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros(
(self.n, self.n - 1)
) # here zeros are padded for small graph
x_batch[0, :] = 1 # the first input token is all ones
y_batch = np.zeros(
(self.n, self.n - 1)
) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.n - 1)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0 : adj_encoded.shape[0], :] = adj_encoded
x_batch[1 : adj_encoded.shape[0] + 1, :] = adj_encoded
return {"x": x_batch, "y": y_batch, "len": len_batch}
class Graph_sequence_sampler_pytorch_canonical(torch.utils.data.Dataset):
"""
use pytorch dataloader
"""
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
self.max_prev_node = self.n - 1
else:
self.max_prev_node = max_prev_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros(
(self.n, self.max_prev_node)
) # here zeros are padded for small graph
x_batch[0, :] = 1 # the first input token is all ones
y_batch = np.zeros(
(self.n, self.max_prev_node)
) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
adj_encoded = encode_adj(adj_copy, max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0 : adj_encoded.shape[0], :] = adj_encoded
x_batch[1 : adj_encoded.shape[0] + 1, :] = adj_encoded
return {"x": x_batch, "y": y_batch, "len": len_batch}
def calc_max_prev_node(self, iter=20000, topk=10):
max_prev_node = []
for i in range(iter):
if i % (iter / 5) == 0:
print("iter {} times".format(i))
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max(
[len(adj_encoded[i]) for i in range(len(adj_encoded))]
)
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-1 * topk :]
return max_prev_node
##########
class Graph_sequence_sampler_pytorch_nll(torch.utils.data.Dataset):
"""
use pytorch dataloader
"""
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
for G in G_list:
adj = np.asarray(nx.to_numpy_matrix(G))
adj_temp = self.calc_adj(adj)
self.adj_all.extend(adj_temp)
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
# print('calculating max previous node, total iteration: {}'.format(iteration))
# self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
# print('max previous node: {}'.format(self.max_prev_node))
self.max_prev_node = self.n - 1
else:
self.max_prev_node = max_prev_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros(
(self.n, self.max_prev_node)
) # here zeros are padded for small graph
x_batch[0, :] = 1 # the first input token is all ones
y_batch = np.zeros(
(self.n, self.max_prev_node)
) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
# adj_copy_matrix = np.asmatrix(adj_copy)
# G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
# start_idx = G.number_of_nodes()-1
# x_idx = np.array(bfs_seq(G, start_idx))
# adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy, max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0 : adj_encoded.shape[0], :] = adj_encoded
x_batch[1 : adj_encoded.shape[0] + 1, :] = adj_encoded
return {"x": x_batch, "y": y_batch, "len": len_batch}
def calc_adj(self, adj):
max_iter = 10000
adj_all = [adj]
adj_all_len = 1
for i in range(max_iter):
adj_copy = adj.copy()
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
add_flag = True
for adj_exist in adj_all:
if np.array_equal(adj_exist, adj_copy):
add_flag = False
break
if add_flag:
adj_all.append(adj_copy)
adj_all_len += 1
if adj_all_len % 10 == 0:
print("adj found:", adj_all_len, "iter used", i)
return adj_all
| [
"numpy.asmatrix",
"networkx.bfs_successors",
"numpy.arange",
"networkx.grid_2d_graph",
"networkx.from_dict_of_lists",
"numpy.sort",
"numpy.ix_",
"networkx.karate_club_graph",
"numpy.random.permutation",
"networkx.ladder_graph",
"numpy.amin",
"networkx.adjacency_matrix",
"numpy.nonzero",
"n... | [((424, 434), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (432, 434), True, 'import networkx as nx\n'), ((3325, 3350), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (3332, 3350), True, 'import numpy as np\n'), ((3906, 3934), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (3927, 3934), True, 'import networkx as nx\n'), ((3945, 3967), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['G'], {}), '(G)\n', (3964, 3967), True, 'import networkx as nx\n'), ((4952, 4970), 'numpy.tril', 'np.tril', (['adj'], {'k': '(-1)'}), '(adj, k=-1)\n', (4959, 4970), True, 'import numpy as np\n'), ((5120, 5159), 'numpy.zeros', 'np.zeros', (['(adj.shape[0], max_prev_node)'], {}), '((adj.shape[0], max_prev_node))\n', (5128, 5159), True, 'import numpy as np\n'), ((5715, 5767), 'numpy.zeros', 'np.zeros', (['(adj_output.shape[0], adj_output.shape[0])'], {}), '((adj_output.shape[0], adj_output.shape[0]))\n', (5723, 5767), True, 'import numpy as np\n'), ((6140, 6200), 'numpy.zeros', 'np.zeros', (['(adj_output.shape[0] + 1, adj_output.shape[0] + 1)'], {}), '((adj_output.shape[0] + 1, adj_output.shape[0] + 1))\n', (6148, 6200), True, 'import numpy as np\n'), ((6258, 6273), 'numpy.tril', 'np.tril', (['adj', '(0)'], {}), '(adj, 0)\n', (6265, 6273), True, 'import numpy as np\n'), ((6566, 6584), 'numpy.tril', 'np.tril', (['adj'], {'k': '(-1)'}), '(adj, k=-1)\n', (6573, 6584), True, 'import numpy as np\n'), ((7517, 7532), 'numpy.tril', 'np.tril', (['adj', '(0)'], {}), '(adj, 0)\n', (7524, 7532), True, 'import numpy as np\n'), ((7666, 7684), 'networkx.ladder_graph', 'nx.ladder_graph', (['(5)'], {}), '(5)\n', (7681, 7684), True, 'import networkx as nx\n'), ((7693, 7717), 'networkx.grid_2d_graph', 'nx.grid_2d_graph', (['(20)', '(20)'], {}), '(20, 20)\n', (7709, 7717), True, 'import networkx as nx\n'), ((7726, 7746), 'networkx.ladder_graph', 'nx.ladder_graph', (['(200)'], {}), '(200)\n', (7741, 7746), True, 'import networkx as nx\n'), ((7755, 7777), 'networkx.karate_club_graph', 'nx.karate_club_graph', ([], {}), '()\n', (7775, 7777), True, 'import networkx as nx\n'), ((7786, 7818), 'networkx.connected_caveman_graph', 'nx.connected_caveman_graph', (['(2)', '(3)'], {}), '(2, 3)\n', (7812, 7818), True, 'import networkx as nx\n'), ((7903, 7928), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj'], {}), '(adj)\n', (7923, 7928), True, 'import networkx as nx\n'), ((7951, 7982), 'numpy.random.randint', 'np.random.randint', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (7968, 7982), True, 'import numpy as np\n'), ((8830, 8848), 'numpy.tril', 'np.tril', (['adj'], {'k': '(-1)'}), '(adj, k=-1)\n', (8837, 8848), True, 'import numpy as np\n'), ((8917, 8958), 'numpy.zeros', 'np.zeros', (['(adj.shape[0], adj.shape[1], 2)'], {}), '((adj.shape[0], adj.shape[1], 2))\n', (8925, 8958), True, 'import numpy as np\n'), ((8973, 8995), 'numpy.zeros', 'np.zeros', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (8981, 8995), True, 'import numpy as np\n'), ((9705, 9765), 'numpy.zeros', 'np.zeros', (['(adj_output.shape[0] + 1, adj_output.shape[1] + 1)'], {}), '((adj_output.shape[0] + 1, adj_output.shape[1] + 1))\n', (9713, 9765), True, 'import numpy as np\n'), ((10325, 10347), 'networkx.karate_club_graph', 'nx.karate_club_graph', ([], {}), '()\n', (10345, 10347), True, 'import networkx as nx\n'), ((10418, 10443), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj'], {}), '(adj)\n', (10438, 10443), True, 'import networkx as nx\n'), ((10460, 10491), 'numpy.random.randint', 'np.random.randint', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (10477, 10491), True, 'import numpy as np\n'), ((611, 674), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_node_attributes.txt')"], {'delimiter': '""","""'}), "(path + name + '_node_attributes.txt', delimiter=',')\n", (621, 674), True, 'import numpy as np\n'), ((1600, 1640), 'numpy.arange', 'np.arange', (['data_graph_indicator.shape[0]'], {}), '(data_graph_indicator.shape[0])\n', (1609, 1640), True, 'import numpy as np\n'), ((4138, 4168), 'networkx.bfs_successors', 'nx.bfs_successors', (['G', 'start_id'], {}), '(G, start_id)\n', (4155, 4168), True, 'import networkx as nx\n'), ((7872, 7893), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {}), '(G)\n', (7890, 7893), True, 'import networkx as nx\n'), ((8041, 8061), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (8047, 8061), True, 'import numpy as np\n'), ((8295, 8321), 'numpy.amin', 'np.amin', (['(adj_recover - adj)'], {}), '(adj_recover - adj)\n', (8302, 8321), True, 'import numpy as np\n'), ((8323, 8349), 'numpy.amax', 'np.amax', (['(adj_recover - adj)'], {}), '(adj_recover - adj)\n', (8330, 8349), True, 'import numpy as np\n'), ((8548, 8574), 'numpy.amin', 'np.amin', (['(adj_recover - adj)'], {}), '(adj_recover - adj)\n', (8555, 8574), True, 'import numpy as np\n'), ((8576, 8602), 'numpy.amax', 'np.amax', (['(adj_recover - adj)'], {}), '(adj_recover - adj)\n', (8583, 8602), True, 'import numpy as np\n'), ((9097, 9114), 'numpy.amin', 'np.amin', (['non_zero'], {}), '(non_zero)\n', (9104, 9114), True, 'import numpy as np\n'), ((9904, 9921), 'numpy.amax', 'np.amax', (['non_zero'], {}), '(non_zero)\n', (9911, 9921), True, 'import numpy as np\n'), ((10387, 10408), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {}), '(G)\n', (10405, 10408), True, 'import networkx as nx\n'), ((10550, 10570), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (10556, 10570), True, 'import numpy as np\n'), ((10929, 10955), 'numpy.amax', 'np.amax', (['(adj_recover - adj)'], {}), '(adj_recover - adj)\n', (10936, 10955), True, 'import numpy as np\n'), ((10957, 10983), 'numpy.amin', 'np.amin', (['(adj_recover - adj)'], {}), '(adj_recover - adj)\n', (10964, 10983), True, 'import numpy as np\n'), ((12013, 12051), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (12021, 12051), True, 'import numpy as np\n'), ((12196, 12234), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (12204, 12234), True, 'import numpy as np\n'), ((12388, 12428), 'numpy.random.permutation', 'np.random.permutation', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (12409, 12428), True, 'import numpy as np\n'), ((12505, 12526), 'numpy.asmatrix', 'np.asmatrix', (['adj_copy'], {}), '(adj_copy)\n', (12516, 12526), True, 'import numpy as np\n'), ((12539, 12576), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_copy_matrix'], {}), '(adj_copy_matrix)\n', (12559, 12576), True, 'import networkx as nx\n'), ((12637, 12673), 'numpy.random.randint', 'np.random.randint', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (12654, 12673), True, 'import numpy as np\n'), ((14896, 14926), 'numpy.zeros', 'np.zeros', (['(self.n, self.n - 1)'], {}), '((self.n, self.n - 1))\n', (14904, 14926), True, 'import numpy as np\n'), ((15071, 15101), 'numpy.zeros', 'np.zeros', (['(self.n, self.n - 1)'], {}), '((self.n, self.n - 1))\n', (15079, 15101), True, 'import numpy as np\n'), ((15255, 15295), 'numpy.random.permutation', 'np.random.permutation', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (15276, 15295), True, 'import numpy as np\n'), ((16502, 16540), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (16510, 16540), True, 'import numpy as np\n'), ((16685, 16723), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (16693, 16723), True, 'import numpy as np\n'), ((19477, 19515), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (19485, 19515), True, 'import numpy as np\n'), ((19660, 19698), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (19668, 19698), True, 'import numpy as np\n'), ((501, 550), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_A.txt')"], {'delimiter': '""","""'}), "(path + name + '_A.txt', delimiter=',')\n", (511, 550), True, 'import numpy as np\n'), ((697, 756), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_node_labels.txt')"], {'delimiter': '""","""'}), "(path + name + '_node_labels.txt', delimiter=',')\n", (707, 756), True, 'import numpy as np\n'), ((810, 873), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_graph_indicator.txt')"], {'delimiter': '""","""'}), "(path + name + '_graph_indicator.txt', delimiter=',')\n", (820, 873), True, 'import numpy as np\n'), ((1432, 1446), 'networkx.isolates', 'nx.isolates', (['G'], {}), '(G)\n', (1443, 1446), True, 'import networkx as nx\n'), ((3804, 3825), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (3813, 3825), True, 'import scipy.sparse as sp\n'), ((6843, 6864), 'numpy.nonzero', 'np.nonzero', (['adj_slice'], {}), '(adj_slice)\n', (6853, 6864), True, 'import numpy as np\n'), ((6919, 6936), 'numpy.amin', 'np.amin', (['non_zero'], {}), '(non_zero)\n', (6926, 6936), True, 'import numpy as np\n'), ((9050, 9071), 'numpy.nonzero', 'np.nonzero', (['adj[i, :]'], {}), '(adj[i, :])\n', (9060, 9071), True, 'import numpy as np\n'), ((9827, 9858), 'numpy.nonzero', 'np.nonzero', (['adj_output[i, :, 1]'], {}), '(adj_output[i, :, 1])\n', (9837, 9858), True, 'import numpy as np\n'), ((12457, 12477), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (12463, 12477), True, 'import numpy as np\n'), ((12750, 12770), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (12756, 12770), True, 'import numpy as np\n'), ((13503, 13543), 'numpy.random.permutation', 'np.random.permutation', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (13524, 13543), True, 'import numpy as np\n'), ((13628, 13649), 'numpy.asmatrix', 'np.asmatrix', (['adj_copy'], {}), '(adj_copy)\n', (13639, 13649), True, 'import numpy as np\n'), ((13666, 13703), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_copy_matrix'], {}), '(adj_copy_matrix)\n', (13686, 13703), True, 'import networkx as nx\n'), ((13772, 13808), 'numpy.random.randint', 'np.random.randint', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (13789, 13808), True, 'import numpy as np\n'), ((15324, 15344), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (15330, 15344), True, 'import numpy as np\n'), ((17585, 17625), 'numpy.random.permutation', 'np.random.permutation', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (17606, 17625), True, 'import numpy as np\n'), ((17710, 17731), 'numpy.asmatrix', 'np.asmatrix', (['adj_copy'], {}), '(adj_copy)\n', (17721, 17731), True, 'import numpy as np\n'), ((17748, 17785), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_copy_matrix'], {}), '(adj_copy_matrix)\n', (17768, 17785), True, 'import networkx as nx\n'), ((17854, 17890), 'numpy.random.randint', 'np.random.randint', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (17871, 17890), True, 'import numpy as np\n'), ((20657, 20697), 'numpy.random.permutation', 'np.random.permutation', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (20678, 20697), True, 'import numpy as np\n'), ((20782, 20803), 'numpy.asmatrix', 'np.asmatrix', (['adj_copy'], {}), '(adj_copy)\n', (20793, 20803), True, 'import numpy as np\n'), ((20820, 20857), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_copy_matrix'], {}), '(adj_copy_matrix)\n', (20840, 20857), True, 'import networkx as nx\n'), ((20926, 20962), 'numpy.random.randint', 'np.random.randint', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (20943, 20962), True, 'import numpy as np\n'), ((949, 1009), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_graph_labels.txt')"], {'delimiter': '""","""'}), "(path + name + '_graph_labels.txt', delimiter=',')\n", (959, 1009), True, 'import numpy as np\n'), ((13576, 13596), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (13582, 13596), True, 'import numpy as np\n'), ((13893, 13913), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (13899, 13913), True, 'import numpy as np\n'), ((17658, 17678), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (17664, 17678), True, 'import numpy as np\n'), ((17975, 17995), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (17981, 17995), True, 'import numpy as np\n'), ((18662, 18683), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {}), '(G)\n', (18680, 18683), True, 'import networkx as nx\n'), ((20730, 20750), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (20736, 20750), True, 'import numpy as np\n'), ((21047, 21067), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (21053, 21067), True, 'import numpy as np\n'), ((21154, 21189), 'numpy.array_equal', 'np.array_equal', (['adj_exist', 'adj_copy'], {}), '(adj_exist, adj_copy)\n', (21168, 21189), True, 'import numpy as np\n'), ((11301, 11322), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {}), '(G)\n', (11319, 11322), True, 'import networkx as nx\n'), ((14547, 14568), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {}), '(G)\n', (14565, 14568), True, 'import networkx as nx\n'), ((16014, 16035), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {}), '(G)\n', (16032, 16035), True, 'import networkx as nx\n')] |
# -*- coding: utf-8 -*-
import hashlib
import logging
import os
import warnings
from collections.abc import MutableMapping
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import imageio
import numpy as np
from matplotlib import cm, colors, patches
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from skimage import transform
from eyepy.core import config
from eyepy.core.drusen import DefaultDrusenFinder, DrusenFinder
from eyepy.core.quantifier import DefaultEyeQuantifier, EyeQuantifier
logger = logging.getLogger(__name__)
class Meta(MutableMapping):
def __init__(self, *args, **kwargs):
"""The Meta object is a dict with additional functionalities.
The additional functionallities are:
1. A string representation suitable for printing the meta information.
2. Checking if a keys value is a callable before returning it. In case
it is a callable, it sets the value to the return value of the callable.
This is used for lazy loading OCT data. The meta information for the OCT
and all B-Scans is only read from the file when accessed.
An instance of the meta object can be created as you would create an
ordinary dictionary.
For example:
+ Meta({"SizeX": 512})
+ Meta(SizeX=512)
+ Meta([(SizeX, 512), (SizeY, 512)])
"""
self._store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
value = self._store[key]
if callable(value):
self[key] = value()
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def __str__(self):
return f"{os.linesep}".join([f"{f}: {self[f]}" for f in self if f != "__empty"])
def __repr__(self):
return self.__str__()
class EnfaceImage:
def __init__(self, data, name=None):
self._data = data
self._name = name
@property
def data(self):
"""Return the enface image as numpy array."""
if callable(self._data):
self._data = self._data()
return self._data
@property
def name(self):
if self._name is None:
raise ValueError("This EnfaceImage has no respective filename")
else:
return self._name
class Annotation(MutableMapping):
def __init__(self, *args, **kwargs):
self._store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
self._bscan = None
def __getitem__(self, key):
value = self._store[key]
if callable(value):
self[key] = value(self.bscan)
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
# TODO: Make the annotation printable to get an overview
# def __str__(self):
# return f"{os.linesep}".join(
# [f"{f}: {self[f]}" for f in self if f != "__empty"])
# def __repr__(self):
# return self.__str__()
@property
def bscan(self):
if self._bscan is None:
raise AttributeError("bscan is not set for this Annotation.")
return self._bscan
@bscan.setter
def bscan(self, value: "Bscan"):
self._bscan = value
class LayerAnnotation(MutableMapping):
def __init__(self, data, layername_mapping=None, max_height=2000):
self._data = data
self.max_height = max_height
if layername_mapping is None:
self.mapping = config.SEG_MAPPING
else:
self.mapping = layername_mapping
@property
def data(self):
if callable(self._data):
self._data = self._data()
return self._data
def __getitem__(self, key):
data = self.data[self.mapping[key]]
nans = np.isnan(data)
empty = np.nonzero(
np.logical_or(
np.less(data, 0, where=~nans),
np.greater(data, self.max_height, where=~nans),
)
)
data = np.copy(data)
data[empty] = np.nan
if np.nansum(data) > 0:
return data
else:
raise KeyError(f"There is no data given for the {key} layer")
def __setitem__(self, key, value):
self.data[self.mapping[key]] = value
def __delitem__(self, key):
self.data[self.mapping[key], :] = np.nan
def __iter__(self):
inv_map = {v: k for k, v in self.mapping.items()}
return iter(inv_map.values())
def __len__(self):
return len(self.data.shape[0])
def layer_indices(self, key):
layer = self[key]
nan_indices = np.isnan(layer)
col_indices = np.arange(len(layer))[~nan_indices]
row_indices = np.rint(layer).astype(int)[~nan_indices]
return (row_indices, col_indices)
class Bscan:
def __new__(
cls,
data,
annotation=None,
meta=None,
data_processing=None,
oct_obj=None,
name=None,
*args,
**kwargs,
):
# Make all meta fields accessible as attributes of the BScan without
# reading them. Set a property instead
def meta_func_builder(x):
return lambda self: self.meta[x]
if meta is not None:
for key in meta:
setattr(cls, key, property(meta_func_builder(key)))
return object.__new__(cls, *args, **kwargs)
def __init__(
self,
data: Union[np.ndarray, Callable],
annotation: Optional[Annotation] = None,
meta: Optional[Union[Dict, Meta]] = None,
data_processing: Optional[Callable] = None,
oct_obj: Optional["Oct"] = None,
name: Optional[str] = None,
):
"""
Parameters
----------
data : A numpy array holding the raw B-Scan data or a callable which
returns a raw B-Scan. Raw means that it represents the unprocessed
stored data. The actual dtype and value range depends on the storage
format.
annotation: Dict holding B-Scan annotations
meta : A dictionary holding the B-Scans meta informations or
oct_obj : Reference to the OCT Volume holding the B-Scan
name : Filename of the B-Scan if B-Scan is save as individual file
"""
self._scan_raw = data
self._scan = None
self._meta = meta
self._oct_obj = oct_obj
self._annotation = annotation
if data_processing is None:
self._data_processing = lambda x: x
else:
self._data_processing = data_processing
self._name = name
@property
def oct_obj(self):
if self._oct_obj is None:
raise AttributeError("oct_obj is not set for this Bscan object")
return self._oct_obj
@oct_obj.setter
def oct_obj(self, value):
self._oct_obj = value
@property
def name(self):
if self._name is None:
self._name = str(self.index)
return self._name
@property
def index(self):
return self.oct_obj.bscans.index(self)
@property
def meta(self):
"""A dict holding all Bscan meta data."""
return self._meta
@property
def annotation(self):
"""A dict holding all Bscan annotation data."""
if self._annotation is None:
self._annotation = Annotation({})
elif callable(self._annotation):
self._annotation = self._annotation()
self._annotation.bscan = self
return self._annotation
@property
def scan_raw(self):
"""An array holding a single raw B-Scan.
The dtype is not changed after the import. If available this is
the unprocessed output of the OCT device. In any case this is
the unprocessed data imported by eyepy.
"""
if callable(self._scan_raw):
self._scan_raw = self._scan_raw()
return self._scan_raw
@property
def scan(self):
"""An array holding a single B-Scan with the commonly used contrast.
The array is of dtype <ubyte> and encodes the intensities as
values between 0 and 255.
"""
if self._scan is None:
self._scan = self._data_processing(self.scan_raw)
return self._scan
@property
def shape(self):
return self.scan.shape
@property
def layers(self):
if "layers" not in self.annotation:
l_shape = np.zeros(
(max(config.SEG_MAPPING.values()) + 1, self.oct_obj.SizeX)
)
self.annotation["layers"] = LayerAnnotation(l_shape)
if callable(self.annotation["layers"]):
self.annotation["layers"] = self.annotation["layers"]()
return self.annotation["layers"]
@property
def drusen_raw(self):
"""Return drusen computed from the RPE and BM layer segmentation.
The raw drusen are computed based on single B-Scans
"""
return self._oct_obj.drusen_raw[..., self.index]
@property
def drusen(self):
"""Return filtered drusen.
Drusen are filtered based on the complete volume
"""
return self._oct_obj.drusen[..., self.index]
def plot(
self,
ax=None,
layers=None,
drusen=False,
layers_kwargs=None,
layers_color=None,
annotation_only=False,
region=np.s_[:, :],
):
"""Plot B-Scan with segmented Layers."""
if ax is None:
ax = plt.gca()
# Complete region index expression
if region[0].start is None:
r0_start = 0
else:
r0_start = region[0].start
if region[1].start is None:
r1_start = 0
else:
r1_start = region[1].start
if region[0].stop is None:
r0_stop = self.shape[0]
else:
r0_stop = region[0].stop
if region[1].stop is None:
r1_stop = self.shape[1]
else:
r1_stop = region[1].stop
region = np.s_[r0_start:r0_stop, r1_start:r1_stop]
if layers is None:
layers = []
elif layers == "all":
layers = self.layers.keys()
if layers_kwargs is None:
layers_kwargs = config.layers_kwargs
else:
layers_kwargs = {**config.layers_kwargs, **layers_kwargs}
if layers_color is None:
layers_color = config.layers_color
else:
layers_color = {**config.layers_color, **layers_color}
if not annotation_only:
ax.imshow(self.scan[region], cmap="gray")
if drusen:
visible = np.zeros(self.drusen.shape)
visible[self.drusen] = 1.0
ax.imshow(self.drusen[region], alpha=visible[region], cmap="Reds")
for layer in layers:
color = layers_color[layer]
try:
layer_data = self.layers[layer]
# Adjust layer height to plotted region
layer_data = layer_data - region[0].start
# Remove layer if outside of region
layer_data = layer_data[region[1].start : region[1].stop]
layer_data[layer_data < 0] = 0
region_height = region[0].stop - region[0].start
layer_data[layer_data > region_height] = region_height
ax.plot(
layer_data, color=color, label=layer, **layers_kwargs,
)
except KeyError:
warnings.warn(f"Layer '{layer}' has no Segmentation", UserWarning)
class Oct:
""".vol header
-----------
All fields from the .vol header (the oct meta data) can be accessed as attributes of the
HeyexOct object.
SLO
---
The attribute `slo` of the HeyexOct object gives access to the IR SLO image
and returns it as a numpy.ndarray of dtype `uint8`.
B-Scans
-------
Individual B-Scans can be accessed using `oct_scan[index]`. The returned
HeyexBscan object exposes all B-Scan header fields as attributes and the
raw B-Scan image as `numpy.ndarray` of type `float32` under the attribute
`scan_raw`. A transformed version of the raw B-Scan which is more similar to
the Heyex experience can be accessed with the attribute `scan` and returns
the 4th root of the raw B-Scan scaled to [0,255] as `uint8`.
Segmentations
-------------
B-Scan segmentations can be accessed for individual B-Scans like
`bscan.segmentation`. This return a numpy.ndarray of shape (NumSeg, SizeX)
The `segmentation` attribute of the HeyexOct object returns a dictionary,
where the key is a number and the value is a numpy.ndarray of shape
(NumBScans, SizeX)."""
def __new__(
cls,
bscans: List[Bscan],
localizer=None,
meta=None,
data_path=None,
*args,
**kwargs,
):
# Set all the meta fields as attributes
if meta is not None:
def meta_func_builder(x):
return lambda self: self.meta[x]
for key in meta:
# Every key in meta becomes a property for the new class. The
# property calls self.meta[key] to retrieve the keys value
# This lambda func returns a lambda func which accesses the meta
# object for the key specified in the first lambda. Like this we
# read the file only on access.
setattr(cls, key, property(meta_func_builder(key)))
return object.__new__(cls, *args, **kwargs)
def __init__(
self,
bscans: List[Union[Callable, Bscan]],
localizer: Optional[Union[Callable, EnfaceImage]] = None,
meta: Optional[Meta] = None,
drusenfinder: DrusenFinder = DefaultDrusenFinder(),
eyequantifier: EyeQuantifier = DefaultEyeQuantifier(),
data_path: Optional[str] = None,
):
"""
Parameters
----------
bscans :
meta :
drusenfinder :
"""
self.bscans = bscans
self._localizer = localizer
self._meta = meta
self._drusenfinder = drusenfinder
self._eyequantifier = eyequantifier
self._tform_localizer_to_oct = None
self._drusen = None
self._drusen_raw = None
self._eyepy_id = None
if data_path is None:
self.data_path = Path.home() / ".eyepy"
self.data_path = Path(data_path)
self.drusen_path = self.data_path / ".eyepy" / f"{self.eyepy_id}_drusen_map.npy"
def __getitem__(self, index) -> Bscan:
"""The B-Scan at the given index."""
if type(index) == slice:
return [self[i] for i in range(*index.indices(len(self)))]
else:
bscan = self.bscans[index]
if callable(bscan):
self.bscans[index] = bscan()
self.bscans[index].oct_obj = self
return self.bscans[index]
def __len__(self):
"""The number of B-Scans."""
return len(self.bscans)
@classmethod
def from_heyex_xml(cls, path):
from eyepy.io.heyex import HeyexXmlReader
reader = HeyexXmlReader(path)
return cls(
bscans=reader.bscans,
localizer=reader.localizer,
meta=reader.oct_meta,
data_path=reader.path,
)
@classmethod
def from_heyex_vol(cls, path):
from eyepy.io.heyex import HeyexVolReader
reader = HeyexVolReader(path)
return cls(
bscans=reader.bscans,
localizer=reader.localizer,
meta=reader.oct_meta,
data_path=Path(path).parent,
)
@classmethod
def from_duke_mat(cls, path):
import scipy.io as sio
loaded = sio.loadmat(path)
data = np.moveaxis(loaded["images"], -1, 0)
label = np.swapaxes(loaded["layerMaps"], 1, 2)
bscans = []
mapping = {"BM": 2, "RPE": 1, "ILM": 0}
for d, l in zip(data, label):
annotation = Annotation({"layers": LayerAnnotation(l, mapping)})
bscans.append(Bscan(d, annotation=annotation))
return cls(
bscans=bscans,
meta=Meta(**{"Age": loaded["Age"]}),
data_path=Path(path).parent,
)
@classmethod
def from_folder(cls, path):
path = Path(path)
img_paths = sorted(list(path.iterdir()))
def read_func(p):
return lambda: imageio.imread(p)
bscans = [Bscan(read_func(p), name=p.name) for p in img_paths]
return cls(bscans=bscans, data_path=path)
def estimate_bscan_distance(self):
# Try to estimate B-Scan distances. Can be used if Bscan Positions
# but not their distance is in the meta information
# Pythagoras in case B-Scans are rotated with respect to the localizer
a = self[-1].StartY - self[0].StartY
b = self[-1].StartX - self[0].StartX
self.meta["Distance"] = np.sqrt(a ** 2 + b ** 2) / (len(self.bscans) - 1)
return self.Distance
@property
def eyepy_id(self):
"""Visit ID for saving visit related files."""
if self._eyepy_id is None:
# Compute a hash of the first B-Scan as ID
sha1 = hashlib.sha1()
sha1.update(self[0].scan.tobytes())
self._eyepy_id = sha1.hexdigest()
return self._eyepy_id
@property
def fovea_pos(self):
return None
@property
def shape(self):
return (self.SizeZ, self.SizeX, self.NumBScans)
@property
def SizeX(self):
try:
return self.meta["SizeX"]
except:
return self[0].scan.shape[1]
@property
def SizeZ(self):
try:
return self.meta["SizeZ"]
except:
return self[0].scan.shape[0]
@property
def NumBScans(self):
try:
return self.meta["NumBScans"]
except:
return len(self)
@property
def localizer(self):
"""A numpy array holding the OCTs localizer enface if available."""
try:
return self._localizer.data
except AttributeError:
raise AttributeError("This OCT object has no localizer image.")
@property
def volume_raw(self):
"""An array holding the OCT volume.
The dtype is not changed after the import. If available this is
the unprocessed output of the OCT device. In any case this is
the unprocessed data imported by eyepy.
"""
return np.stack([x.scan_raw for x in self.bscans], axis=-1)
@property
def volume(self):
"""An array holding the OCT volume with the commonly used contrast.
The array is of dtype <ubyte> and encodes the intensities as
values between 0 and 255.
"""
return np.stack([x.scan for x in self.bscans], axis=-1)
@property
def layers_raw(self):
"""Height maps for all layers combined into one volume.
Layers for all B-Scans are stacked such that we get a volume L x B x W
where L are different Layers, B are the B-Scans and W is the Width of
the B-Scans.
A flip on the B-Scan axis is needed to locate the first B-Scan at the
bottom of the height map.
"""
return np.flip(np.stack([x.layers.data for x in self], axis=1), axis=1)
@property
def layers(self):
"""Height maps for all layers accessible by the layers name."""
nans = np.isnan(self.layers_raw)
empty = np.nonzero(
np.logical_or(
np.less(self.layers_raw, 0, where=~nans),
np.greater(self.layers_raw, self.SizeZ, where=~nans),
)
)
data = self.layers_raw.copy()
data[empty] = np.nan
return {
name: data[i, ...]
for name, i in self[0].layers.mapping.items()
if np.nansum(data[i, ...]) != 0
}
@property
def meta(self):
"""A dict holding all OCT meta data.
The object can be printed to see all available meta data.
"""
if self._meta is None:
raise AttributeError("This volume has no meta data")
return self._meta
@property
def drusen(self):
"""Final drusen after post processing the initial raw drusen.
Here the `filter` function of the DrusenFinder has been applied
"""
if self._drusen is None:
# Try to load the drusen from the default location
try:
self._drusen = np.load(self.drusen_path)
except (NotADirectoryError, FileNotFoundError):
self._drusen = self._drusenfinder.filter(self.drusen_raw)
if config.SAVE_DRUSEN:
self.drusen_path.parent.mkdir(parents=True, exist_ok=True)
np.save(self.drusen_path, self._drusen)
return self._drusen
def drusen_recompute(self, drusenfinder=None):
"""Recompute Drusen optionally with a custom DrusenFinder.
Use this if you do not like the computed / loaded drusen
"""
if drusenfinder is not None:
self._drusenfinder = drusenfinder
self._drusen_raw = self._drusenfinder.find(self)
self._drusen = self._drusenfinder.filter(self.drusen_raw)
if config.SAVE_DRUSEN:
np.save(self.drusen_path, self._drusen)
return self._drusen
@property
def drusen_raw(self):
"""Initial drusen before post procssing.
The initial drusen found by the DrusenFinders `find` function.
"""
if self._drusen_raw is None:
self._drusen_raw = self._drusenfinder.find(self)
return self._drusen_raw
@property
def quantification(self):
return self._eyequantifier.quantify(self)
@property
def tform_localizer_to_oct(self):
if self._tform_localizer_to_oct is None:
self._tform_localizer_to_oct = self._estimate_localizer_to_oct_tform()
return self._tform_localizer_to_oct
@property
def tform_oct_to_localizer(self):
return self.tform_localizer_to_oct.inverse
@property
def localizer_shape(self):
try:
return self.localizer.shape
except:
return (self.SizeX, self.SizeX)
def _estimate_localizer_to_oct_tform(self):
oct_projection_shape = (self.NumBScans, self.SizeX)
src = np.array(
[
oct_projection_shape[0] - 1,
0, # Top left
oct_projection_shape[0] - 1,
oct_projection_shape[1] - 1, # Top right
0,
0, # Bottom left
0,
oct_projection_shape[1] - 1, # Bottom right
]
).reshape((-1, 2))
src = np.array(
[
0,
0, # Top left
0,
oct_projection_shape[1] - 1, # Top right
oct_projection_shape[0] - 1,
0, # Bottom left
oct_projection_shape[0] - 1,
oct_projection_shape[1] - 1, # Bottom right
]
).reshape((-1, 2))
try:
# Try to map the oct projection to the localizer image
dst = np.array(
[
self[-1].StartY / self.ScaleXSlo,
self[-1].StartX / self.ScaleYSlo,
self[-1].EndY / self.ScaleXSlo,
self[-1].EndX / self.ScaleYSlo,
self[0].StartY / self.ScaleXSlo,
self[0].StartX / self.ScaleYSlo,
self[0].EndY / self.ScaleXSlo,
self[0].EndX / self.ScaleYSlo,
]
).reshape((-1, 2))
except AttributeError:
# Map the oct projection to a square area of shape (bscan_width, bscan_width)
warnings.warn(
f"Bscan positions on localizer image or the scale of the "
f"localizer image is missing. We assume that the B-Scans cover "
f"a square area and are equally spaced.",
UserWarning,
)
b_width = self[0].shape[1]
dst = np.array(
[
0,
0, # Top left
0,
b_width - 1, # Top right
b_width - 1,
0, # Bottom left
b_width - 1,
b_width - 1, # Bottom right
]
).reshape((-1, 2))
src = src[:, [1, 0]]
dst = dst[:, [1, 0]]
tform = transform.estimate_transform("affine", src, dst)
if not np.allclose(tform.inverse(tform(src)), src):
msg = f"Problem with transformation of OCT Projection to the localizer image space."
raise ValueError(msg)
return tform
@property
def drusen_projection(self):
# Sum the all B-Scans along their first axis (B-Scan height)
# Swap axis such that the volume depth becomes the projections height not width
# We want the first B-Scan to be located at the bottom hence flip along axis 0
return np.flip(np.swapaxes(np.sum(self.drusen, axis=0), 0, 1), axis=0)
@property
def drusen_enface(self):
"""Drusen projection warped into the localizer space."""
return transform.warp(
self.drusen_projection.astype(float),
self.tform_oct_to_localizer,
output_shape=self.localizer_shape,
order=0,
)
@property
def drusenfinder(self):
"""Get and set the DrusenFinder object.
When the DrusenFinder object is set all drusen are removed.
"""
return self._drusenfinder
@drusenfinder.setter
def drusenfinder(self, drusenfinder):
self._drusen = None
self._drusen_raw = None
self._drusenfinder = drusenfinder
def plot(
self,
ax=None,
localizer=True,
drusen=False,
bscan_region=False,
bscan_positions=None,
masks=False,
region=np.s_[...],
drusen_kwargs=None,
):
"""
Parameters
----------
ax :
slo :
drusen :
bscan_region :
bscan_positions :
masks :
region : slice object
alpha :
Returns
-------
"""
if ax is None:
ax = plt.gca()
if localizer:
self.plot_localizer(ax=ax, region=region)
if drusen:
if drusen_kwargs is None:
drusen_kwargs = {}
self.plot_drusen(ax=ax, region=region, **drusen_kwargs)
if bscan_positions is not None:
self.plot_bscan_positions(
ax=ax,
bscan_positions=bscan_positions,
region=region,
line_kwargs={"linewidth": 0.5, "color": "green"},
)
if bscan_region:
self.plot_bscan_region(region=region, ax=ax)
if masks:
self.plot_masks(region=region, ax=ax)
# if quantification:
# self.plot_quantification(space=space, region=region, ax=ax,
# q_kwargs=q_kwargs)
def plot_bscan_ticks(self, ax=None):
if ax is None:
ax = plt.gca()
ax.yticks()
def plot_layer_distance(
self,
region=np.s_[...],
ax=None,
bot_layer="BM",
top_layer="RPE",
vmin=None,
vmax=None,
):
if ax is None:
ax = plt.gca()
dist = self.layers["BM"] - self.layers["RPE"]
img = transform.warp(
dist.astype(float),
self.tform_oct_to_localizer,
output_shape=self.localizer_shape,
order=0,
)
ax.imshow(img[region], cmap="gray", vmin=vmin, vmax=vmax)
def plot_masks(self, region=np.s_[...], ax=None, color="r", linewidth=0.5):
"""
Parameters
----------
region :
ax :
color :
linewidth :
Returns
-------
"""
primitives = self._eyequantifier.plot_primitives(self)
if ax is None:
ax = plt.gca()
for circle in primitives["circles"]:
c = patches.Circle(
circle["center"],
circle["radius"],
facecolor="none",
edgecolor=color,
linewidth=linewidth,
)
ax.add_patch(c)
for line in primitives["lines"]:
x = [line["start"][0], line["end"][0]]
y = [line["start"][1], line["end"][1]]
ax.plot(x, y, color=color, linewidth=linewidth)
def plot_localizer(self, ax=None, region=np.s_[...]):
if ax is None:
ax = plt.gca()
ax.imshow(self.localizer[region], cmap="gray")
def plot_bscan_positions(
self, bscan_positions="all", ax=None, region=np.s_[...], line_kwargs=None
):
if bscan_positions is None:
bscan_positions = []
elif bscan_positions == "all" or bscan_positions is True:
bscan_positions = range(0, len(self))
if line_kwargs is None:
line_kwargs = config.line_kwargs
else:
line_kwargs = {**config.line_kwargs, **line_kwargs}
for i in bscan_positions:
bscan = self[i]
x = np.array([bscan.StartX, bscan.EndX]) / self.ScaleXSlo
y = np.array([bscan.StartY, bscan.EndY]) / self.ScaleYSlo
ax.plot(x, y, **line_kwargs)
def plot_bscan_region(self, region=np.s_[...], ax=None):
if ax is None:
ax = plt.gca()
up_right_corner = (
self[-1].EndX / self.ScaleXSlo,
self[-1].EndY / self.ScaleYSlo,
)
width = (self[0].StartX - self[0].EndX) / self.ScaleXSlo
height = (self[0].StartY - self[-1].EndY) / self.ScaleYSlo
# Create a Rectangle patch
rect = patches.Rectangle(
up_right_corner, width, height, linewidth=1, edgecolor="r", facecolor="none"
)
# Add the patch to the Axes
ax.add_patch(rect)
def plot_drusen(
self,
ax=None,
region=np.s_[...],
cmap="Reds",
vmin=None,
vmax=None,
cbar=True,
alpha=1,
):
drusen = self.drusen_enface
if ax is None:
ax = plt.gca()
if vmax is None:
vmax = drusen.max()
if vmin is None:
vmin = 1
visible = np.zeros(drusen[region].shape)
visible[np.logical_and(vmin < drusen[region], drusen[region] < vmax)] = 1
if cbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(
cm.ScalarMappable(colors.Normalize(vmin=vmin, vmax=vmax), cmap=cmap),
cax=cax,
)
ax.imshow(
drusen[region],
alpha=visible[region] * alpha,
cmap=cmap,
vmin=vmin,
vmax=vmax,
)
def plot_localizer_bscan(self, ax=None, n_bscan=0):
"""Plot Slo with one selected B-Scan."""
raise NotImplementedError()
def plot_bscans(
self, bs_range=range(0, 8), cols=4, layers=None, layers_kwargs=None
):
"""Plot a grid with B-Scans."""
rows = int(np.ceil(len(bs_range) / cols))
if layers is None:
layers = []
fig, axes = plt.subplots(cols, rows, figsize=(rows * 4, cols * 4))
with np.errstate(invalid="ignore"):
for i in bs_range:
bscan = self[i]
ax = axes.flatten()[i]
bscan.plot(ax=ax, layers=layers, layers_kwargs=layers_kwargs)
| [
"logging.getLogger",
"eyepy.core.config.SEG_MAPPING.values",
"numpy.sqrt",
"pathlib.Path.home",
"scipy.io.loadmat",
"eyepy.core.drusen.DefaultDrusenFinder",
"numpy.array",
"numpy.moveaxis",
"hashlib.sha1",
"numpy.save",
"numpy.less",
"numpy.greater",
"pathlib.Path",
"numpy.stack",
"numpy... | [((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((4282, 4296), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (4290, 4296), True, 'import numpy as np\n'), ((4502, 4515), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (4509, 4515), True, 'import numpy as np\n'), ((5123, 5138), 'numpy.isnan', 'np.isnan', (['layer'], {}), '(layer)\n', (5131, 5138), True, 'import numpy as np\n'), ((14355, 14376), 'eyepy.core.drusen.DefaultDrusenFinder', 'DefaultDrusenFinder', ([], {}), '()\n', (14374, 14376), False, 'from eyepy.core.drusen import DefaultDrusenFinder, DrusenFinder\n'), ((14417, 14439), 'eyepy.core.quantifier.DefaultEyeQuantifier', 'DefaultEyeQuantifier', ([], {}), '()\n', (14437, 14439), False, 'from eyepy.core.quantifier import DefaultEyeQuantifier, EyeQuantifier\n'), ((15027, 15042), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (15031, 15042), False, 'from pathlib import Path\n'), ((15753, 15773), 'eyepy.io.heyex.HeyexXmlReader', 'HeyexXmlReader', (['path'], {}), '(path)\n', (15767, 15773), False, 'from eyepy.io.heyex import HeyexXmlReader\n'), ((16068, 16088), 'eyepy.io.heyex.HeyexVolReader', 'HeyexVolReader', (['path'], {}), '(path)\n', (16082, 16088), False, 'from eyepy.io.heyex import HeyexVolReader\n'), ((16369, 16386), 'scipy.io.loadmat', 'sio.loadmat', (['path'], {}), '(path)\n', (16380, 16386), True, 'import scipy.io as sio\n'), ((16402, 16438), 'numpy.moveaxis', 'np.moveaxis', (["loaded['images']", '(-1)', '(0)'], {}), "(loaded['images'], -1, 0)\n", (16413, 16438), True, 'import numpy as np\n'), ((16455, 16493), 'numpy.swapaxes', 'np.swapaxes', (["loaded['layerMaps']", '(1)', '(2)'], {}), "(loaded['layerMaps'], 1, 2)\n", (16466, 16493), True, 'import numpy as np\n'), ((16949, 16959), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (16953, 16959), False, 'from pathlib import Path\n'), ((19161, 19213), 'numpy.stack', 'np.stack', (['[x.scan_raw for x in self.bscans]'], {'axis': '(-1)'}), '([x.scan_raw for x in self.bscans], axis=-1)\n', (19169, 19213), True, 'import numpy as np\n'), ((19458, 19506), 'numpy.stack', 'np.stack', (['[x.scan for x in self.bscans]'], {'axis': '(-1)'}), '([x.scan for x in self.bscans], axis=-1)\n', (19466, 19506), True, 'import numpy as np\n'), ((20120, 20145), 'numpy.isnan', 'np.isnan', (['self.layers_raw'], {}), '(self.layers_raw)\n', (20128, 20145), True, 'import numpy as np\n'), ((25351, 25399), 'skimage.transform.estimate_transform', 'transform.estimate_transform', (['"""affine"""', 'src', 'dst'], {}), "('affine', src, dst)\n", (25379, 25399), False, 'from skimage import transform\n'), ((30781, 30881), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['up_right_corner', 'width', 'height'], {'linewidth': '(1)', 'edgecolor': '"""r"""', 'facecolor': '"""none"""'}), "(up_right_corner, width, height, linewidth=1, edgecolor=\n 'r', facecolor='none')\n", (30798, 30881), False, 'from matplotlib import cm, colors, patches\n'), ((31355, 31385), 'numpy.zeros', 'np.zeros', (['drusen[region].shape'], {}), '(drusen[region].shape)\n', (31363, 31385), True, 'import numpy as np\n'), ((32330, 32384), 'matplotlib.pyplot.subplots', 'plt.subplots', (['cols', 'rows'], {'figsize': '(rows * 4, cols * 4)'}), '(cols, rows, figsize=(rows * 4, cols * 4))\n', (32342, 32384), True, 'from matplotlib import pyplot as plt\n'), ((4556, 4571), 'numpy.nansum', 'np.nansum', (['data'], {}), '(data)\n', (4565, 4571), True, 'import numpy as np\n'), ((10030, 10039), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10037, 10039), True, 'from matplotlib import pyplot as plt\n'), ((11195, 11222), 'numpy.zeros', 'np.zeros', (['self.drusen.shape'], {}), '(self.drusen.shape)\n', (11203, 11222), True, 'import numpy as np\n'), ((17580, 17604), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (17587, 17604), True, 'import numpy as np\n'), ((17862, 17876), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (17874, 17876), False, 'import hashlib\n'), ((19939, 19986), 'numpy.stack', 'np.stack', (['[x.layers.data for x in self]'], {'axis': '(1)'}), '([x.layers.data for x in self], axis=1)\n', (19947, 19986), True, 'import numpy as np\n'), ((22011, 22050), 'numpy.save', 'np.save', (['self.drusen_path', 'self._drusen'], {}), '(self.drusen_path, self._drusen)\n', (22018, 22050), True, 'import numpy as np\n'), ((27196, 27205), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (27203, 27205), True, 'from matplotlib import pyplot as plt\n'), ((28072, 28081), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (28079, 28081), True, 'from matplotlib import pyplot as plt\n'), ((28324, 28333), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (28331, 28333), True, 'from matplotlib import pyplot as plt\n'), ((28983, 28992), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (28990, 28992), True, 'from matplotlib import pyplot as plt\n'), ((29055, 29165), 'matplotlib.patches.Circle', 'patches.Circle', (["circle['center']", "circle['radius']"], {'facecolor': '"""none"""', 'edgecolor': 'color', 'linewidth': 'linewidth'}), "(circle['center'], circle['radius'], facecolor='none',\n edgecolor=color, linewidth=linewidth)\n", (29069, 29165), False, 'from matplotlib import cm, colors, patches\n'), ((29588, 29597), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (29595, 29597), True, 'from matplotlib import pyplot as plt\n'), ((30462, 30471), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (30469, 30471), True, 'from matplotlib import pyplot as plt\n'), ((31222, 31231), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (31229, 31231), True, 'from matplotlib import pyplot as plt\n'), ((31402, 31462), 'numpy.logical_and', 'np.logical_and', (['(vmin < drusen[region])', '(drusen[region] < vmax)'], {}), '(vmin < drusen[region], drusen[region] < vmax)\n', (31416, 31462), True, 'import numpy as np\n'), ((31508, 31531), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (31527, 31531), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((32399, 32428), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (32410, 32428), True, 'import numpy as np\n'), ((4368, 4397), 'numpy.less', 'np.less', (['data', '(0)'], {'where': '(~nans)'}), '(data, 0, where=~nans)\n', (4375, 4397), True, 'import numpy as np\n'), ((4415, 4461), 'numpy.greater', 'np.greater', (['data', 'self.max_height'], {'where': '(~nans)'}), '(data, self.max_height, where=~nans)\n', (4425, 4461), True, 'import numpy as np\n'), ((14979, 14990), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (14988, 14990), False, 'from pathlib import Path\n'), ((17063, 17080), 'imageio.imread', 'imageio.imread', (['p'], {}), '(p)\n', (17077, 17080), False, 'import imageio\n'), ((20217, 20257), 'numpy.less', 'np.less', (['self.layers_raw', '(0)'], {'where': '(~nans)'}), '(self.layers_raw, 0, where=~nans)\n', (20224, 20257), True, 'import numpy as np\n'), ((20275, 20327), 'numpy.greater', 'np.greater', (['self.layers_raw', 'self.SizeZ'], {'where': '(~nans)'}), '(self.layers_raw, self.SizeZ, where=~nans)\n', (20285, 20327), True, 'import numpy as np\n'), ((21198, 21223), 'numpy.load', 'np.load', (['self.drusen_path'], {}), '(self.drusen_path)\n', (21205, 21223), True, 'import numpy as np\n'), ((23093, 23236), 'numpy.array', 'np.array', (['[oct_projection_shape[0] - 1, 0, oct_projection_shape[0] - 1, \n oct_projection_shape[1] - 1, 0, 0, 0, oct_projection_shape[1] - 1]'], {}), '([oct_projection_shape[0] - 1, 0, oct_projection_shape[0] - 1, \n oct_projection_shape[1] - 1, 0, 0, 0, oct_projection_shape[1] - 1])\n', (23101, 23236), True, 'import numpy as np\n'), ((23484, 23626), 'numpy.array', 'np.array', (['[0, 0, 0, oct_projection_shape[1] - 1, oct_projection_shape[0] - 1, 0, \n oct_projection_shape[0] - 1, oct_projection_shape[1] - 1]'], {}), '([0, 0, 0, oct_projection_shape[1] - 1, oct_projection_shape[0] - 1,\n 0, oct_projection_shape[0] - 1, oct_projection_shape[1] - 1])\n', (23492, 23626), True, 'import numpy as np\n'), ((24590, 24784), 'warnings.warn', 'warnings.warn', (['f"""Bscan positions on localizer image or the scale of the localizer image is missing. We assume that the B-Scans cover a square area and are equally spaced."""', 'UserWarning'], {}), "(\n f'Bscan positions on localizer image or the scale of the localizer image is missing. We assume that the B-Scans cover a square area and are equally spaced.'\n , UserWarning)\n", (24603, 24784), False, 'import warnings\n'), ((25941, 25968), 'numpy.sum', 'np.sum', (['self.drusen'], {'axis': '(0)'}), '(self.drusen, axis=0)\n', (25947, 25968), True, 'import numpy as np\n'), ((30193, 30229), 'numpy.array', 'np.array', (['[bscan.StartX, bscan.EndX]'], {}), '([bscan.StartX, bscan.EndX])\n', (30201, 30229), True, 'import numpy as np\n'), ((30263, 30299), 'numpy.array', 'np.array', (['[bscan.StartY, bscan.EndY]'], {}), '([bscan.StartY, bscan.EndY])\n', (30271, 30299), True, 'import numpy as np\n'), ((5219, 5233), 'numpy.rint', 'np.rint', (['layer'], {}), '(layer)\n', (5226, 5233), True, 'import numpy as np\n'), ((12062, 12128), 'warnings.warn', 'warnings.warn', (['f"""Layer \'{layer}\' has no Segmentation"""', 'UserWarning'], {}), '(f"Layer \'{layer}\' has no Segmentation", UserWarning)\n', (12075, 12128), False, 'import warnings\n'), ((16239, 16249), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (16243, 16249), False, 'from pathlib import Path\n'), ((16855, 16865), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (16859, 16865), False, 'from pathlib import Path\n'), ((20542, 20565), 'numpy.nansum', 'np.nansum', (['data[i, ...]'], {}), '(data[i, ...])\n', (20551, 20565), True, 'import numpy as np\n'), ((23960, 24245), 'numpy.array', 'np.array', (['[self[-1].StartY / self.ScaleXSlo, self[-1].StartX / self.ScaleYSlo, self[-\n 1].EndY / self.ScaleXSlo, self[-1].EndX / self.ScaleYSlo, self[0].\n StartY / self.ScaleXSlo, self[0].StartX / self.ScaleYSlo, self[0].EndY /\n self.ScaleXSlo, self[0].EndX / self.ScaleYSlo]'], {}), '([self[-1].StartY / self.ScaleXSlo, self[-1].StartX / self.\n ScaleYSlo, self[-1].EndY / self.ScaleXSlo, self[-1].EndX / self.\n ScaleYSlo, self[0].StartY / self.ScaleXSlo, self[0].StartX / self.\n ScaleYSlo, self[0].EndY / self.ScaleXSlo, self[0].EndX / self.ScaleYSlo])\n', (23968, 24245), True, 'import numpy as np\n'), ((31660, 31698), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (31676, 31698), False, 'from matplotlib import cm, colors, patches\n'), ((21496, 21535), 'numpy.save', 'np.save', (['self.drusen_path', 'self._drusen'], {}), '(self.drusen_path, self._drusen)\n', (21503, 21535), True, 'import numpy as np\n'), ((24919, 24993), 'numpy.array', 'np.array', (['[0, 0, 0, b_width - 1, b_width - 1, 0, b_width - 1, b_width - 1]'], {}), '([0, 0, 0, b_width - 1, b_width - 1, 0, b_width - 1, b_width - 1])\n', (24927, 24993), True, 'import numpy as np\n'), ((9001, 9028), 'eyepy.core.config.SEG_MAPPING.values', 'config.SEG_MAPPING.values', ([], {}), '()\n', (9026, 9028), False, 'from eyepy.core import config\n')] |
import numpy as np
import pytest
from doddle.words import Dictionary, Word, WordSeries, load_dictionary
class TestWords:
def test_word_compares_correctly(self) -> None:
# Arrange
word1 = Word("SNAKE")
word2 = Word("SHARK")
word3 = Word("snake")
word4 = Word(word1)
# Act + Assert
assert word1 > word2
assert word1 >= word2
assert word1 == word3
assert word2 < word3
assert word2 <= word3
assert word4 == word1
assert len(word3) == 5
def test_split_word(self) -> None:
# Arrange
word1 = Word("SNAKE,RAISE")
expected0 = Word("snake")
expected1 = Word("raise")
# Act
words = word1.split(",")
# Assert
assert len(words) == 2
assert words[0] == expected0
assert words[1] == expected1
def test_word_iteration(self) -> None:
# Arrange
word = Word("snake")
# Act
chars = list(word)
joined_chars = "".join(chars)
# Assert
assert joined_chars == repr(word)
def test_word_hashing(self) -> None:
# Arrange
word1 = Word("SNAKE")
word2 = Word("SHARK")
word3 = Word("snake")
word4 = Word(word1)
# Act
words = {word1, word2, word3, word4}
# Assert
assert len(words) == 2
assert word1 in words
assert word2 in words
assert word3 in words
assert word4 in words
class TestWordSeries:
def test_wordseries_regular_index_slice(self) -> None:
# Arrange
alphabet = [chr(i + ord("A")) for i in np.arange(0, 26)]
series = WordSeries(alphabet)
expected_index = np.array([2, 3, 4])
expected_words = np.array([Word("C"), Word("D"), Word("E")])
# Act
sliced = series[2:5]
# Assert
assert np.all(sliced.index == expected_index)
assert np.all(sliced.words == expected_words)
def test_wordseries_irregular_index_slice(self) -> None:
# Arrange
alphabet = [chr(i + ord("A")) for i in np.arange(0, 26)]
series = WordSeries(alphabet)[np.arange(2, 26, 3)]
expected_index = np.array([8, 11, 14])
expected_words = np.array([Word(c) for c in list("ILO")])
# Act
sliced = series[2:5]
# Assert
assert np.all(sliced.index == expected_index)
assert np.all(sliced.words == expected_words)
def test_wordseries_find_index(self) -> None:
# Arrange
alphabet = [chr(i + ord("A")) for i in np.arange(0, 26)]
series = WordSeries(alphabet)
# Act
index1 = series.find_index("C")
index2 = series.find_index("N/A")
index3 = series.find_index(np.array(["C", "E"]))
# Assert
assert index1 == +2
assert index2 == -1
assert np.all(index3 == np.array([2, 4]))
def test_wordseries_contains(self) -> None:
# Arrange
series = WordSeries(["XYZ", "ABC", "PQR"])
# Act
contains1 = "XYZ" in series
contains2 = "abc" in series
contains3 = "PQR" in series
contains3 = "PQR" in series
contains4 = "nah" in series
contains1b = Word("XYZ") in series
contains2b = Word("abc") in series
contains3b = Word("PQR") in series
contains4b = Word("nah") in series
# Assert
assert contains1
assert contains2
assert contains3
assert not contains4
assert contains1b
assert contains2b
assert contains3b
assert not contains4b
def test_wordseries_iloc(self) -> None:
# Arrange
series = WordSeries(["XYZ", "ABC", "PQR"])
expected = Word("PQR")
# Act
actual = series.iloc[1]
# Assert
assert actual == expected
def test_wordseries_repr(self) -> None:
# Arrange
series = WordSeries(["XYZ", "ABC", "PQR"])
expected = "[0] ABC\n[1] PQR\n[2] XYZ"
alphabet_long = [chr(i + ord("A")) for i in np.arange(0, 26)] * 5
series_long = WordSeries(alphabet_long)
# Act
actual_repr = repr(series)
actual_str = str(series)
# Assert
assert actual_repr == expected
assert actual_str == actual_repr
assert repr(series_long) == str(series_long)
def test_wordseries_iloc_raises_if_not_integer(self) -> None:
# Arrange
series = WordSeries(["XYZ", "ABC", "PQR"])
# Act + Assert
with pytest.raises(ValueError):
series.iloc["ABC"]
def test_wordseries_indexing_raises(self) -> None:
# Arrange
series = WordSeries(["XYZ", "ABC", "PQR"])
# Act + Assert
with pytest.raises(ValueError):
series[["ABC", "XYZ"]]
class TestDictionary:
def test_wordseries_regular_index_slice(self) -> None:
# Arrange
all_words = WordSeries(["XYZ", "ABC", "PQR", "DEF"])
common_words = WordSeries(["XYZ", "ABC", "DEF"])
sut = Dictionary(all_words, common_words)
# Act
actual = sut.word_length
# Assert
assert actual == 3
def test_load_dictionary_official(self) -> None:
# Arrange
size = 5
dictionary = load_dictionary(size)
# Act
all_words, common_words = dictionary.words
# Assert
assert len(all_words) == 12972
assert len(common_words) == 2315
def test_load_dictionary_unofficial(self) -> None:
# Arrange
size = 6
dictionary = load_dictionary(size)
# Act
all_words, common_words = dictionary.words
# Assert
assert len(all_words) == 15787
assert len(common_words) == 4563
| [
"doddle.words.load_dictionary",
"doddle.words.Dictionary",
"doddle.words.WordSeries",
"numpy.array",
"doddle.words.Word",
"pytest.raises",
"numpy.all",
"numpy.arange"
] | [((210, 223), 'doddle.words.Word', 'Word', (['"""SNAKE"""'], {}), "('SNAKE')\n", (214, 223), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((240, 253), 'doddle.words.Word', 'Word', (['"""SHARK"""'], {}), "('SHARK')\n", (244, 253), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((270, 283), 'doddle.words.Word', 'Word', (['"""snake"""'], {}), "('snake')\n", (274, 283), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((300, 311), 'doddle.words.Word', 'Word', (['word1'], {}), '(word1)\n', (304, 311), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((619, 638), 'doddle.words.Word', 'Word', (['"""SNAKE,RAISE"""'], {}), "('SNAKE,RAISE')\n", (623, 638), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((659, 672), 'doddle.words.Word', 'Word', (['"""snake"""'], {}), "('snake')\n", (663, 672), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((693, 706), 'doddle.words.Word', 'Word', (['"""raise"""'], {}), "('raise')\n", (697, 706), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((955, 968), 'doddle.words.Word', 'Word', (['"""snake"""'], {}), "('snake')\n", (959, 968), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1185, 1198), 'doddle.words.Word', 'Word', (['"""SNAKE"""'], {}), "('SNAKE')\n", (1189, 1198), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1215, 1228), 'doddle.words.Word', 'Word', (['"""SHARK"""'], {}), "('SHARK')\n", (1219, 1228), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1245, 1258), 'doddle.words.Word', 'Word', (['"""snake"""'], {}), "('snake')\n", (1249, 1258), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1275, 1286), 'doddle.words.Word', 'Word', (['word1'], {}), '(word1)\n', (1279, 1286), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1699, 1719), 'doddle.words.WordSeries', 'WordSeries', (['alphabet'], {}), '(alphabet)\n', (1709, 1719), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1745, 1764), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (1753, 1764), True, 'import numpy as np\n'), ((1911, 1949), 'numpy.all', 'np.all', (['(sliced.index == expected_index)'], {}), '(sliced.index == expected_index)\n', (1917, 1949), True, 'import numpy as np\n'), ((1965, 2003), 'numpy.all', 'np.all', (['(sliced.words == expected_words)'], {}), '(sliced.words == expected_words)\n', (1971, 2003), True, 'import numpy as np\n'), ((2233, 2254), 'numpy.array', 'np.array', (['[8, 11, 14]'], {}), '([8, 11, 14])\n', (2241, 2254), True, 'import numpy as np\n'), ((2398, 2436), 'numpy.all', 'np.all', (['(sliced.index == expected_index)'], {}), '(sliced.index == expected_index)\n', (2404, 2436), True, 'import numpy as np\n'), ((2452, 2490), 'numpy.all', 'np.all', (['(sliced.words == expected_words)'], {}), '(sliced.words == expected_words)\n', (2458, 2490), True, 'import numpy as np\n'), ((2642, 2662), 'doddle.words.WordSeries', 'WordSeries', (['alphabet'], {}), '(alphabet)\n', (2652, 2662), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((3025, 3058), 'doddle.words.WordSeries', 'WordSeries', (["['XYZ', 'ABC', 'PQR']"], {}), "(['XYZ', 'ABC', 'PQR'])\n", (3035, 3058), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((3736, 3769), 'doddle.words.WordSeries', 'WordSeries', (["['XYZ', 'ABC', 'PQR']"], {}), "(['XYZ', 'ABC', 'PQR'])\n", (3746, 3769), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((3789, 3800), 'doddle.words.Word', 'Word', (['"""PQR"""'], {}), "('PQR')\n", (3793, 3800), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((3980, 4013), 'doddle.words.WordSeries', 'WordSeries', (["['XYZ', 'ABC', 'PQR']"], {}), "(['XYZ', 'ABC', 'PQR'])\n", (3990, 4013), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((4170, 4195), 'doddle.words.WordSeries', 'WordSeries', (['alphabet_long'], {}), '(alphabet_long)\n', (4180, 4195), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((4532, 4565), 'doddle.words.WordSeries', 'WordSeries', (["['XYZ', 'ABC', 'PQR']"], {}), "(['XYZ', 'ABC', 'PQR'])\n", (4542, 4565), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((4752, 4785), 'doddle.words.WordSeries', 'WordSeries', (["['XYZ', 'ABC', 'PQR']"], {}), "(['XYZ', 'ABC', 'PQR'])\n", (4762, 4785), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((5006, 5046), 'doddle.words.WordSeries', 'WordSeries', (["['XYZ', 'ABC', 'PQR', 'DEF']"], {}), "(['XYZ', 'ABC', 'PQR', 'DEF'])\n", (5016, 5046), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((5070, 5103), 'doddle.words.WordSeries', 'WordSeries', (["['XYZ', 'ABC', 'DEF']"], {}), "(['XYZ', 'ABC', 'DEF'])\n", (5080, 5103), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((5118, 5153), 'doddle.words.Dictionary', 'Dictionary', (['all_words', 'common_words'], {}), '(all_words, common_words)\n', (5128, 5153), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((5357, 5378), 'doddle.words.load_dictionary', 'load_dictionary', (['size'], {}), '(size)\n', (5372, 5378), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((5655, 5676), 'doddle.words.load_dictionary', 'load_dictionary', (['size'], {}), '(size)\n', (5670, 5676), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((2166, 2186), 'doddle.words.WordSeries', 'WordSeries', (['alphabet'], {}), '(alphabet)\n', (2176, 2186), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((2187, 2206), 'numpy.arange', 'np.arange', (['(2)', '(26)', '(3)'], {}), '(2, 26, 3)\n', (2196, 2206), True, 'import numpy as np\n'), ((2795, 2815), 'numpy.array', 'np.array', (["['C', 'E']"], {}), "(['C', 'E'])\n", (2803, 2815), True, 'import numpy as np\n'), ((3275, 3286), 'doddle.words.Word', 'Word', (['"""XYZ"""'], {}), "('XYZ')\n", (3279, 3286), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((3318, 3329), 'doddle.words.Word', 'Word', (['"""abc"""'], {}), "('abc')\n", (3322, 3329), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((3361, 3372), 'doddle.words.Word', 'Word', (['"""PQR"""'], {}), "('PQR')\n", (3365, 3372), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((3404, 3415), 'doddle.words.Word', 'Word', (['"""nah"""'], {}), "('nah')\n", (3408, 3415), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((4603, 4628), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4616, 4628), False, 'import pytest\n'), ((4823, 4848), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4836, 4848), False, 'import pytest\n'), ((1664, 1680), 'numpy.arange', 'np.arange', (['(0)', '(26)'], {}), '(0, 26)\n', (1673, 1680), True, 'import numpy as np\n'), ((1800, 1809), 'doddle.words.Word', 'Word', (['"""C"""'], {}), "('C')\n", (1804, 1809), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1811, 1820), 'doddle.words.Word', 'Word', (['"""D"""'], {}), "('D')\n", (1815, 1820), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((1822, 1831), 'doddle.words.Word', 'Word', (['"""E"""'], {}), "('E')\n", (1826, 1831), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((2131, 2147), 'numpy.arange', 'np.arange', (['(0)', '(26)'], {}), '(0, 26)\n', (2140, 2147), True, 'import numpy as np\n'), ((2290, 2297), 'doddle.words.Word', 'Word', (['c'], {}), '(c)\n', (2294, 2297), False, 'from doddle.words import Dictionary, Word, WordSeries, load_dictionary\n'), ((2607, 2623), 'numpy.arange', 'np.arange', (['(0)', '(26)'], {}), '(0, 26)\n', (2616, 2623), True, 'import numpy as np\n'), ((2923, 2939), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (2931, 2939), True, 'import numpy as np\n'), ((4126, 4142), 'numpy.arange', 'np.arange', (['(0)', '(26)'], {}), '(0, 26)\n', (4135, 4142), True, 'import numpy as np\n')] |
import numpy as np
from ..optics import OpticalElement, Wavefront
class MonochromaticPropagator(OpticalElement):
def __init__(self, wavelength):
self.wavelength = wavelength
def make_propagator(monochromatic_propagator):
class Propagator(OpticalElement):
def __init__(self, *args, **kwargs):
self.wavelengths = []
self.monochromatic_propagators = []
self.monochromatic_args = args
self.monochromatic_kwargs = kwargs
def get_monochromatic_propagator(self, wavelength):
if len(self.wavelengths) > 0:
i = np.argmin(np.abs(wavelength - np.array(self.wavelengths)))
wavelength_closest = self.wavelengths[i]
delta_wavelength = np.abs(wavelength - wavelength_closest)
if (delta_wavelength / wavelength) < 1e-6:
return self.monochromatic_propagators[i]
m = monochromatic_propagator(*self.monochromatic_args, wavelength=wavelength, **self.monochromatic_kwargs)
self.wavelengths.append(wavelength)
self.monochromatic_propagators.append(m)
if len(self.monochromatic_propagators) > 50:
self.wavelengths.pop(0)
self.monochromatic_propagators.pop(0)
return m
def __call__(self, wavefront):
return self.forward(wavefront)
def forward(self, wavefront):
return self.get_monochromatic_propagator(wavefront.wavelength).forward(wavefront)
def backward(self, wavefront):
return self.get_monochromatic_propagator(wavefront.wavelength).backward(wavefront)
def get_transformation_matrix_forward(self, wavelength=1):
return self.get_monochromatic_propagator(wavelength).get_transformation_matrix_forward()
def get_transformation_matrix_backward(self, wavelength=1):
return self.get_monochromatic_propagator(wavelength).get_transformation_matrix_backward()
return Propagator | [
"numpy.abs",
"numpy.array"
] | [((662, 701), 'numpy.abs', 'np.abs', (['(wavelength - wavelength_closest)'], {}), '(wavelength - wavelength_closest)\n', (668, 701), True, 'import numpy as np\n'), ((564, 590), 'numpy.array', 'np.array', (['self.wavelengths'], {}), '(self.wavelengths)\n', (572, 590), True, 'import numpy as np\n')] |
# Copyright 2021 <NAME>
# All rights reserved.
import numpy
import pyaudio
from snyth import Settings
from snyth.debug.samples import get_voice_samples
def play_voice_sync(audio, voice, realtime=True, seconds=1):
frames_per_buffer = Settings.instance().sample_rate//voice.frame_rate
stream = audio.open(rate=Settings.instance().sample_rate, channels=1, frames_per_buffer=frames_per_buffer, format=pyaudio.paFloat32, output=True)
generator = voice.generate(realtime=realtime, seconds=seconds)
for samples in generator:
written = 0
num_samples = len(samples)
while written < num_samples:
to_write = min(num_samples-written, stream.get_write_available())
stream.write(samples[written:written+to_write])
written += to_write
stream.close()
def play_voice_async(audio, voice, realtime=True, seconds=1):
generator = voice.generate(realtime=realtime, seconds=seconds)
samples = numpy.array([], dtype=numpy.float32)
samples_offset = 0
def fill_stream(in_data, frame_count, time_info, status_flags):
nonlocal samples
nonlocal samples_offset
result = samples[samples_offset:samples_offset + frame_count]
samples_offset += frame_count
while len(result) < frame_count:
try:
samples = next(generator)
except StopIteration:
return result, pyaudio.paComplete
to_take = frame_count - len(result)
result = numpy.concatenate((result, samples[:to_take]))
samples_offset = to_take
return result, pyaudio.paContinue
frames_per_buffer = Settings.instance().sample_rate//voice.frame_rate
stream = audio.open(rate=Settings.instance().sample_rate, channels=1, format=pyaudio.paFloat32, output=True, frames_per_buffer=frames_per_buffer, stream_callback=fill_stream, start=True)
return stream
| [
"numpy.array",
"snyth.Settings.instance",
"numpy.concatenate"
] | [((963, 999), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'numpy.float32'}), '([], dtype=numpy.float32)\n', (974, 999), False, 'import numpy\n'), ((240, 259), 'snyth.Settings.instance', 'Settings.instance', ([], {}), '()\n', (257, 259), False, 'from snyth import Settings\n'), ((1526, 1572), 'numpy.concatenate', 'numpy.concatenate', (['(result, samples[:to_take])'], {}), '((result, samples[:to_take]))\n', (1543, 1572), False, 'import numpy\n'), ((1686, 1705), 'snyth.Settings.instance', 'Settings.instance', ([], {}), '()\n', (1703, 1705), False, 'from snyth import Settings\n'), ((319, 338), 'snyth.Settings.instance', 'Settings.instance', ([], {}), '()\n', (336, 338), False, 'from snyth import Settings\n'), ((1765, 1784), 'snyth.Settings.instance', 'Settings.instance', ([], {}), '()\n', (1782, 1784), False, 'from snyth import Settings\n')] |
# Author : <NAME>
# Date : 5/31/19
# License: MIT
# Purpose:
#
# Notes :
#
# Questions:
#
# References :
#
import sys
import re
import datetime
import random
import copy
import operator
import numpy
from classes import GTF_ENTRY
from classes import FASTQ_READ
from classes import CHROMOSOME
from classes import GENE
from classes import TRANSCRIPT
from classes import INSERT
from classes import EXON
from classes import reverse_complement
from error import exit_with_error
#********************************************************************************
#********************************************************************************
#****************************** FUNCTIONS **************************************
#********************************************************************************
#********************************************************************************
def read_gtf(PathToGtf = None):
"""
ARGS:
PathToGtf = path to gene transfer file
RETURN:
A list of GTF_ENTRYs
DESCRIPTION:
Reads in gtf file.
DEBUG:
Can reproduce input gtf file. Works as expected.
FUTURE:
"""
if(PathToGtf[-3:] != "gtf"):
exit_with_error("ERROR! You did not pass a file with the .gtf extention\n")
gtfFile = open(PathToGtf, 'r')
gtfList = []
gtfEntry = None
timeBegin = datetime.datetime.now()
for line in gtfFile:
if(line[0] == "#"):
continue
line = line.split("\t")
# Format check
if(len(line) != 9):
exit_with_error("ERROR! There should be 9 tab separated columns in a"
" GTF file\nYou only have %i\n"%(len(line)))
gtfEntry = GTF_ENTRY(Chromosome = line[0], Source = line[1], EntryType = line[2],
Start = line[3], Stop = line[4], Score = line[5], Strand = line[6],
Frame = line[7], Attribute = line[8])
gtfList.append(gtfEntry)
gtfFile.close()
timeEnd = datetime.datetime.now()
# Debug Code
#for gtfEntry in gtfList:
# gtfEntry.print_entry()
print("read_gtf() run time = %s"%(timeEnd - timeBegin))
return gtfList
def get_exon_list(gtfList):
"""
ARGS:
PathToGtf = path to gene transfer file
RETURN:
A list of GTF_ENTRYs
DESCRIPTION:
Note : this list is not unique. I.e. there are exons that will be duplicated.
I make no effort to correct / eliminate duplicates b/c it would complicate
mapping trans.exonIdxList for all the TRANSCRIPTs
DEBUG:
FUTURE:
"""
exonList = []
timeBegin = datetime.datetime.now()
for gtfEntry in gtfList:
if(gtfEntry.etype == "exon"):
exon = EXON(gtfEntry)
exonList.append(exon)
timeEnd = datetime.datetime.now()
print("get_exon_list() run time = %s"%(timeEnd - timeBegin))
return exonList
def get_transcript_list(gtfList, allExonList):
"""
ARGS:
GtfList = list of GTF_ENTRYs
AllExonList = list of EXON entries
RETURN:
A list of TRANSCRIPTS
DESCRIPTION:
Scales like crap. O(N**2), which is too slow for the whole genome + gtf
DEBUG:
Appears to correctly read in transcripts and associated exons.
I checked by eye on first 87 lines of grch38.83.gtf
FUTURE:
"""
transList = []
timeBegin = datetime.datetime.now()
for gtfEntry in gtfList:
if(gtfEntry.etype == "transcript"):
trans = TRANSCRIPT(gtfEntry)
transList.append(trans)
timeEnd = datetime.datetime.now()
print("get_transcript_list() run time = %s"%(timeEnd - timeBegin))
return transList
def get_gene_list(gtfList, allTransList):
"""
ARGS:
GtfList = list of GTF_ENTRYs
AllTransList = list of TRANSCRIPT entries
RETURN:
A list of GENES
DESCRIPTION:
Scales like crap. O(N**2), which is too slow for the whole genome + gtf
DEBUG:
Appears to correctly read in genes and associated transcripts.
I checked by eye on first 87 lines of grch38.83.gtf
FUTURE:
"""
geneList = []
timeBegin = datetime.datetime.now()
for gtfEntry in gtfList:
if(gtfEntry.etype == "gene"):
gene = GENE(gtfEntry)
geneList.append(gene)
timeEnd = datetime.datetime.now()
print("get_gene_list() run time = %s"%(timeEnd - timeBegin))
return geneList
def read_genome(seqPath):
"""
ARGS:
RETURN:
A list of Chromosomes
DESCRIPTION:
Reads in a _whole_ genome fasta file and returns a list of Chromosomes. It
follows the ensemble file format. Read in all sequences as upper case.
DEBUG:
For a shortened whole genome fasta file, it correctly reads it in.
FUTURE:
"""
seqFile = open(seqPath, "r") # 1st fasta file
chrmList = []
chrm = None
seq = ""
timeBegin = datetime.datetime.now()
# Read file
for line in seqFile:
if(line[0] == "#"): # Skip ali's comments in shortened file
continue
if(line[0] == '>' or line[0] == '\n'):
# Create chromosome, reset seq
if(chrm is not None):
#sys.stdout.write("chromosome : %s\n"%(chrm))
chrmList.append(CHROMOSOME(chrm, seq))
seq = ""
#chrm = line[1:] # Skip '>' for chrm name
line = re.split(' ', line) # only want chrm name
chrm = (line[0])[1:] # Skip '>' for chrm name
continue
else:
line = re.split('\n', line) # line is now a list of strings
seq += line[0].upper()
chrmList.append(CHROMOSOME(chrm, seq)) # pick up final chromosome.
#sys.stdout.write("chromosome : %s\n"%(chrm))
# Short debugging section, check that genome.fa duplicates original
#for chrm in chrmList:
# sys.stdout.write("\n%s\n"%chrm.chrm)
# for i in range(0,len(chrm.seq)):
# sys.stdout.write("%c"%(chrm.seq[i]))
# if((i+1) % 60 == 0 and i != 0):
# sys.stdout.write("\n")
seqFile.close()
timeEnd = datetime.datetime.now()
print("read_genome() run time = %s"%(timeEnd - timeBegin))
return chrmList
def get_exon_seq(exonList, chrmList):
"""
ARGS:
RETURN:
None
DESCRIPTION:
Gets sequences for exons from chromosome list
DEBUG:
Spot checked 3 exons, are all ok. More testing needed, however it is challenging
to get a list of all the exons (incl. seqs) in a single file
FUTURE:
"""
timeBegin = datetime.datetime.now()
for exon in exonList:
for chrm in chrmList:
chrmLen = len(chrm.seq)
if(chrm.chrm == exon.chrm):
start = exon.start - 1 # -1 b/c python is 0 indexed, gtf file isnot
end = exon.stop
if(start >= chrmLen or end >= chrmLen):
exit_with_error("ERROR!! start (%i) or stop (%i) Position > "
"chromosome length (%i)\n"%(start, end, chrmLen))
if(exon.strand == '+'):
exon.seq = chrm.seq[start:end]
elif(exon.strand == '-'):
exon.seq = reverse_complement(chrm.seq[start:end])
tmp = exon.start
exon.start = exon.stop
exon.stop = tmp
else:
exit_with_error("ERROR! strand char = %s is invalid", exon.strand)
timeEnd = datetime.datetime.now()
print("get_exon_seq() run time = %s"%(timeEnd - timeBegin))
def get_trans_seq(transList, exonList):
"""
ARGS:
RETURN:
None
DESCRIPTION:
Gets sequences for transcripts from chromosome list
DEBUG:
Tested on 2 transcripts, more testing required. Getting a transcript file
with the transcripts and sequences is challenging though
FUTURE:
"""
timeBegin = datetime.datetime.now()
for trans in transList:
exonNum = 0 # use to check that indexs are loaded in order
prevExonNum = 0
for exon in exonList:
if(exon.transID == trans.transID):
exonNum = int(exon.exonNum)
if(exonNum - prevExonNum != 1):
exit_with_error("ERROR! exon numbers for %s are loaded out of "
"order!\n"%(trans.transID))
if(trans.seq is None):
trans.seq = exon.seq
else:
trans.seq += exon.seq
prevExonNum = exonNum
timeEnd = datetime.datetime.now()
print("get_trans_seq() run time = %s"%(timeEnd - timeBegin))
def get_list_of_unique_gtf_features(gtfList):
"""
ARGS:
gtfList : list of all GTF_ENTRYs
RETURN:
uniqueFeatureList : list of unique features
DESCRIPTION:
Finds all the unique features (ie column 3) of the GTF file
DEBUG:
FUTURE:
"""
featureList = []
uniqueFeatureList = []
for gtfEntry in gtfList:
featureList.append(gtfEntry.etype)
for feature in featureList:
if feature not in uniqueFeatureList:
uniqueFeatureList.append(feature)
return uniqueFeatureList
def link_exons_trans_and_genes(gtfList, exonList, transList, geneList):
"""
ARGS:
gtfList : list of all GTF_ENTRYs
exonList : list of all EXONS
transList: list of all TRANSCRIPTS
geneList : list of all GENES
RETURN:
DESCRIPTION:
Loops through gtfList and captures the indices of exons in exonList and passes
it to the transcripts in transList. Also captures indices of transcripts in
transList and passes it to genes in geneList.
Does this in one pass through gtfList and scales roughly O(N). Should be faster
than previous versions.
DEBUG:
1. I validated by using print_transcripts_with_seqs() and comparing against the
biomart download for chromosome 1. My data file was _identical_ to biomart's.
For how this was done, see the debug comment in print_transcripts_with_seqs()
2. Checked Transcript.seq for reverse strand ('-') transcript. Used
ENST00000488147 it is correct.
FUTURE:
"""
gIdx = 0 # Gene index, for geneList
tIdx = 0 # Transcript index, for transList
eIdx = 0 # Exon index, for exonList
gtfListLen = len(gtfList)
timeBegin = datetime.datetime.now()
# Ugly / non-pythonic b/c cant find cleaner way of accessing the next gtfEntry in the list
for i in range(len(gtfList)):
if(gtfList[i].etype == "gene"):
# Check that genes in geneList are same order as gtfList
if(gtfList[i].geneID != geneList[gIdx].geneID):
exit_with_error(
"ERROR! gtfList[%i].geneID = %s and geneList[%i].geneID = %s"%(
i, gtfEntry.geneID, gIdx, geneList[gIdx].geneID)
)
j = i + 1
# Get constituent transcripts between gene entries
while(gtfList[j].etype != "gene"):
if(gtfList[j].etype == "transcript"):
# Check that transcripts in transList are same order as gtfList
# Checking transcripts after gene in gtf _actually_ are members of the gene
# Add trans info to appropriate geneList[]
if(gtfList[j].transID == transList[tIdx].transID and
gtfList[i].geneID == transList[tIdx].geneID and
gtfList[i].geneID == geneList[gIdx].geneID):
geneList[gIdx].transList.append(transList[tIdx].transID)
geneList[gIdx].transIdxList.append(tIdx)
k = j + 1
# Get constituent exons between transcript entries
while(gtfList[k].etype != "transcript"):
if(gtfList[k].etype == "exon"):
# Check exons in exonList are same order as gtfList
# Checking exons after trans in gtf are members trans
# Add exon info to appropriate transList[]
if(gtfList[k].transID == exonList[eIdx].transID and
gtfList[i].geneID == exonList[eIdx].geneID and
gtfList[i].geneID == geneList[gIdx].geneID):
transList[tIdx].exonList.append(exonList[eIdx].exonID)
transList[tIdx].exonIdxList.append(eIdx)
eIdx += 1
else:
exit_with_error(
"ERROR! gtfList[%i].transID = %s and exonList[%i]."
"transID = %s\n\tgtfList[%i].geneID = %s and "
"transList[%i].geneID = "
"%s\n\tand geneList[%i].geneID = %s\n"%
(k, gtfList[k].transID,eIdx, exonList[eIdx].transID,
k, gtfList[k].geneID, tIdx, transList[tIdx].geneID,
gIdx, geneList[gIdx].geneID)
)
k += 1
if(k == gtfListLen):
break
tIdx += 1
else:
exit_with_error(
"ERROR! gtfList[%i].transID= %s and transList[%i].transID = "
"%s\n\tgtfList[%i].geneID = %s and transList[%i].geneID = "
"%s\n\tand geneList[%i].geneID = %s\n"%
(j, gtfList[j].transID, tIdx, transList[tIdx].transID,
j, gtfList[j].geneID, tIdx, transList[tIdx].geneID, gIdx,
geneList[gIdx].geneID))
j += 1
if(j == gtfListLen):
break
gIdx += 1
# Now get transcript sequences.
for trans in transList:
trans.seq = ""
for eIdx in trans.exonIdxList:
trans.seq += exonList[eIdx].seq
timeEnd = datetime.datetime.now()
print("link_exons_trans_and_genes() run time = %s"%(timeEnd - timeBegin))
def create_gene_and_trans_lookup_dict(geneList, transList):
"""
ARGS:
geneList : list of GENEs
transList : list of TRANSCRIPTS
RETURN:
geneDict : keys = geneID, value geneList indices
transDict : keys = transID, value transList indices
DESCRIPTION:
Dictionaries which are associative arrays with the
geneID and transID as the keys and the associated geneList and transList
indices as the values.
DEBUG:
Spot checked the resulting dictionaries, appears to be correct
FUTURE:
"""
geneDict = {}
transDict = {}
timeBegin = datetime.datetime.now()
for gIdx in range(len(geneList)):
geneDict[(geneList[gIdx].geneID)[1:-1]] = gIdx # [1:-1] to strip leading and trailing "
for tIdx in range(len(transList)):
transDict[(transList[tIdx].transID)[1:-1]] = tIdx
timeEnd = datetime.datetime.now()
print("create_gene_and_trans_lookup_dict() run time = %s"%(timeEnd - timeBegin))
return geneDict, transDict
def print_gtf_statistics(exonList, transList, geneList):
"""
ARGS:
exonList : list of EXONs
transList : list of TRANSCRIPTs
geneList : list of GENEs
RETURN:
DESCRIPTION:
Prints interesting statistics
DEBUG:
I spot checked the results of transPairsDiffFile.txt and it appears to
return sets of transcripts that actually differ by 1 exon. I checked
by searching in ensembl.
Using a short.gtf, I checked the using Excel (identical):
meanExonLen
sigmaExonLen
meanTransLen
sigmaTransLen
maxTransLen
minTransLen
minExonLen
maxExonLen
exonWMaxLen
exonWMinLen
transWMaxLen
transWMinLen
FUTURE:
"""
# exons
meanExonLen = 0
sigmaExonLen= 0
maxExonLen = 0
minExonLen = 10000
exonWMaxLen = 0
exonWMinLen = 10000
meanExonNum = 0
sigmaExonLen = 0
# transcripts
meanTransLen= 0
maxTransLen = 0
minTransLen = 10000
transWMaxLen = ""
transWMinLen = ""
sigmaTransLen = 0
# gene
meanTransNum = 0
timeBegin = datetime.datetime.now()
# genes
for gene in geneList:
meanTransNum += len(gene.transIdxList)
# transcripts
for trans in transList:
transLen = len(trans.seq)
meanTransLen += transLen
# Try max and min later...
if(transLen < minTransLen):
minTransLen = transLen
transWMinLen = trans.transID
if(transLen > maxTransLen):
maxTransLen = transLen
transWMaxLen = trans.transID
meanExonNum += len(trans.exonIdxList)
# Exons
for exon in exonList:
exonLen = len(exon.seq)
meanExonLen += exonLen
#Find max and min
if(exonLen < minExonLen):
minExonLen = exonLen
exonWMinLen = exon.exonID
if(exonLen > maxExonLen):
maxExonLen = exonLen
exonWMaxLen = exon.exonID
# Averages
meanTransLen = meanTransLen / float(len(transList))
meanTransNum = meanTransNum / float(len(geneList))
meanExonNum = meanExonNum / float(len(transList))
meanExonLen = meanExonLen / float(len(exonList))
# Standard deviations
# Transcripts
for trans in transList:
transLen = len(trans.seq)
sigmaTransLen += (transLen - meanTransLen)**2
sigmaTransLen = (sigmaTransLen / float(len(transList) - 1))**0.5
# Exons
for exon in exonList:
sigmaExonLen += (len(exon.seq) - meanExonLen)**2
sigmaExonLen = (sigmaExonLen / float(len(exonList) - 1))**0.5
print("\nMean exon length : %f"%(meanExonLen))
print("Std. Dev. exon length : %f"%(sigmaExonLen))
print("Exon w/ Min length : %s, len : %i"%(exonWMinLen, minExonLen))
print("Exon w/ Max length : %s, len : %i\n"%(exonWMaxLen, maxExonLen))
print("Mean trans length : %f"%(meanTransLen))
print("Std. Dev. trans Length : %s"%(sigmaTransLen))
print("Trans w/ Min length : %s, len : %i"%(transWMinLen, minTransLen))
print("Trans w/ Max length : %s, len : %i"%(transWMaxLen, maxTransLen))
print("Mean num of exons per trans : %f\n"%(meanExonNum))
timeEnd = datetime.datetime.now()
print("print_gtf_statistics() run time = %s"%(timeEnd - timeBegin))
def find_trans_that_differ_by_1_exon(geneList, transList):
"""
ARGS:
transList : list of TRANSCRIPTs
geneList : list of GENEs
RETURN:
DESCRIPTION:
Prints interesting statistics
DEBUG:
I spot checked the results of transPairsDiffFile.txt and it appears to
return sets of transcripts that actually differ by 1 exon. I checked
by searching in ensembl.
FUTURE:
"""
transPairsDiffFile = open("transPairsDiffFile.txt", "w+")
transPairsDiffFile.write("# This file contains a list of transcripts that differ only by 1 exon\n")
transPairsDiffFile.write("# exon transcript1 transcript2 exonDiff\n")
# Find transcripts that vary by 1 exon. Write to transPairsDiffFile.txt
# This for loop should probably be its own separate program, but since
# I already have all the functions in this program
# This loop could be commented out in the final version of the code.
#
# Find transcripts that differ by 1 exon
for gene in geneList:
for tidx1 in range(len(gene.transIdxList)): # transcriptIdx1
trans1 = transList[gene.transIdxList[tidx1]]
tidx2 = tidx1 + 1
while tidx2 < len(gene.transIdxList):
trans2 = transList[gene.transIdxList[tidx2]]
# Follow Differ by one exon only
if((len(trans1.exonList) > len(trans2.exonList) + 1) or
(len(trans1.exonList) < len(trans2.exonList) - 1)):
tidx2 += 1 # without this, it will loop infinitely
continue
else:
# below two operations return the elements in argument 1 that are not in
# argument 2. Hence why we do it twice, exonDiff and exonDiff2 won't be the
# same when there is a difference of 1 exon.
exonDiff = list(set(trans1.exonList) - set(trans2.exonList))
exonDiff2 = list(set(trans2.exonList) - set(trans1.exonList))
#exonDiff = [exn for exn in trans1.exonList if exn not in trans2.exonList]
if(len(exonDiff) == 1 and len(exonDiff2) == 0):
transPairsDiffFile.write("%s %s %s %s\n"%(gene.geneID, trans1.transID,
trans2.transID, exonDiff))
if(len(exonDiff) == 0 and len(exonDiff2) == 1):
transPairsDiffFile.write("%s %s %s %s\n"%(gene.geneID, trans1.transID,
trans2.transID, exonDiff2))
tidx2 += 1
transPairsDiffFile.close()
def read_config(pathToConfig):
"""
ARGS:
pathToConfig : str, path to configuration file
RETURN:
readLength = readlength desired
desiredTransList= List of transcripts to use
abundanceList = list of relative abundances of transcripts
DESCRIPTION:
Config file format :
1. Comment lines begin with '#'
2. first non-header line begins with 'ReadLength'
3. All subsequent lines must be transcripts with relative abundance
The relative abundance can be any integer. Scaling is done
automatically.
E.g.
ENST00000488147 10
ENST00000473358 5
DEBUG:
For small config files it reads in all the fields correctly.
FUTURE:
"""
desiredTransList = []
abundanceList = [] # integers used to get relative abundance of transcripts
readLength = 0
numOfReads = 0
configFile = open(pathToConfig, 'r')
for line in configFile:
if(line[0] == "#"):
continue
line = (line.split("\n"))[0] # remove trailing \n
# Check for tabs, only spaces permitted
if(re.search('\t',line)):
exit_with_error("ERROR! Tabs not permitted in config file!\n")
line = line.split(" ")
# ReadLength
if(line[0] == "ReadLength"):
if(readLength == 0):
readLength = int(line[1])
continue
else:
exit_with_error("ERROR! multiple instances of ReadLength in config "
"file\n")
# NumberOfReads
if(line[0] == "NumberOfReads"):
if(numOfReads == 0):
numOfReads = int(line[1])
continue
else:
exit_with_error("ERROR! multiple instances of ReadLength in config "
"file\n")
# Transcripts
if(re.search('ENST', line[0])):
desiredTransList.append(line[0])
abundanceList.append(int(line[1]))
else:
exit_with_error("ERROR! Incorrect transcript entry : %s\n"
" All entries should begin with 'ENST'\n"%(line))
if(readLength == 0 or numOfReads == 0):
exit_with_error("ERROR! ReadLength or NumberOfReads not specified in "
"config.txt\n")
print("Config File Parameters : \nReadLength : %i\nNumberOfReads : %i"%(readLength,
numOfReads))
i = 0
for trans in desiredTransList:
print("%s %i"%(trans,abundanceList[i]))
i += 1
print("\n")
return readLength, desiredTransList, abundanceList, numOfReads
def print_transcripts_with_seqs(transList):
"""
ARGS:
transList
RETURN:
None
DESCRIPTION:
This takes in a transList, makes a deep copy b/c I sort it. I do not want
to mess up the actual order of transList, since the indices stored by
GENEs in transIdxList[] depends on the maintaining the order in transList.
This function sorts all the transcripts numerically.
This function is primarily used to debug link_exons_trans_and_genes()
DEBUG:
1. Created chr1.fa and chr1.gtf, passed as args to this program
2. Ran this function to print out _sorted_ transcript fasta file, called
trans_list_and_seq.fa
3. Went to http://useast.ensembl.org/biomart/martview, selected
chr1, sequences->cdna and downloaded the file. Downloaded as
biomart_export_cdna.fa
4. Created a program, validate/sort_biomart.py to read in biomart_export_cdna.fa
and generated sorted_biomart_export_cdna.fa
5. ran 'diff trans_list_and_seq.fa validate/sorted_biomart_export_cdna.fa', it
was identical.
CONCLUSION:
I correctly handle mapping the sequences to exons, and exons to
transcripts. If these weren't done correctly, (e.g. not handling
reverse complements), there is no way that this would work correctly.
The following functions _MUST_ be correctly working (at least w/r/t
transcripts):
GTF_ENTRY.__init__()
CHROMOSOME.__init__()
TRANSCRIPT.__init__()
EXON.__init__()
read_gtf()
get_exon_list()
get_transcript_list()
read_genome()
get_exon_seq()
get_trans_seq()
reverse_complement()
----> link_exons_trans_and_genes() <----
FUTURE:
"""
transCopyList = copy.deepcopy(transList)
outFile = open("trans_list_and_seq.fa", "w+")
transCopyList.sort(key=operator.attrgetter('transNum'))
for trans in transCopyList:
outFile.write(">%s|%s\n"%(trans.geneID[1:-1], trans.transID[1:-1]))
i = 0
for i in range(len(trans.seq)):
outFile.write("%s"%(trans.seq[i]))
if((i + 1) % 100 == 0):
outFile.write("\n")
outFile.write("\n")
outFile.close()
def create_insert(Transcript = None, ReadLength = None, Mu = None, Sigma = None,
ExonList = None):
"""
ARGS:
Transcript : a TRANSCRIPT instance
ReadLength : length of reads. Different from insert length
Mu : the mean of fragment length distribution
Sigma : the standard deviation of fragment length distribution
ExonList :
RETURN:
AN INSERT of length n, where n fall in a distribution of rnorm(Mu,sigma)
DESCRIPTION:
DEBUG:
FUTURE:
1. Implement Proper solution where insert going into the Illumina adapter when
stop - start < ReadLength
"""
start = 0
stop = 0
timeBegin = datetime.datetime.now()
transLength = len(Transcript.seq)
# type check
if(not isinstance(Transcript, TRANSCRIPT)):
exit_with_error("ERROR! Transcript is not of class type TRANSCRIPT 1\n")
insertLength = 0
# Ensure inserts are at least as long as the readlength
while(insertLength < ReadLength):
start = random.randint (0, transLength -1 )
stop = start + int(numpy.random.normal(Mu, Sigma))
# Avoid unrealistically short inserts
if(stop - start < ReadLength):
continue
# Avoid inserts that are past end of transcripts.
if(stop > transLength - 1):
# Proper solution here would have insert going into the Illumina adapter
stop = transLength - 1
if(stop - start < ReadLength): # Insert must be at least as large as a read
continue
insert = INSERT(Transcript = Transcript, StartWrtTrans = start,
StopWrtTrans = stop, ReadLength = ReadLength,
ExonList = ExonList)
insertLength = len(insert.seq)
timeEnd = datetime.datetime.now()
# print("get_insert_list() run time = %s"%(timeEnd - timeBegin))
return insert
def create_fastq_file(pathToFastq, desiredTransList, abundanceList, nReads,
readLength, transDict, transList, exonList, readType):
"""
ARGS:
pathToFastq : Path to output fastq file
desiredTransList : transcripts read from config file
abundanceList : list of integers that sum to a value used to normalize
the number of reads.
E.g. trans1 has 5 and trans2 has 10,
the ratio of trans1 : trans2 = 1 : 2
nReads : Rough number of desired reads, the ratios from abundanceList
is maintained at the expense of nReads.
E.g from the above example if nReads = 10,
the actual number of reads would be
3 for trans1, 6 for trans2
readLength : length of reads
transDict : Dictionary used map transID quickly to the correct
transcript in transList
transList : List of TRANSCRIPTs. Contains sequence to pass to instance of
FASTQ_READ()
exonList : List of EXONs. Passed to FASTQ_READ() to get metadata for each
fastq read. E.g. the start position and exons a read spans.
readType : either : single, paired-fr, paired-rf"
RETURN:
None. File written
DESCRIPTION:
Writes fastq file.
DEBUG:
1. Blasted against ensembl database, spot checked a couple of transcripts.
Need further debugging.
Took synthetic_sample.fastq and operated in vim on transcript ENST00000473358:
Exons are ordered as : ENSE00001947070 ENSE00001922571 ENSE00001827679
Copied synthetic_sample.fastq to poop.fq
****** in vim ******
%s/^@Read.\{1,1000\}:start:\([0-9]\{1,100\}\):exons:\(.\{1,100\}\)\n\(^[A-Z]\{50\}\)\n^+\n\(^.\{50\}\)/\1\t\3\t\2/gc
%s/:/\t/g # remove colon sep exon names
%s/"//g # remove " around exon names
****** in shell, want exon reads start at (see order above) ******
****** Avoid grepping enties with start positions on the exon prior ******
grep ENSE00001947070 poop.fq &> ENSE00001947070.txt
grep ENSE00001922571 poop.fq | grep -v ENSE00001947070 &> ENSE00001922571.txt
grep ENSE00001827679 poop.fq | grep -v ENSE00001922571 &> ENSE00001827679.txt
awk '{print $1 "\t" $2}' ENSE00001947070.txt &> ENSE00001947070_1and2.txt
awk '{print $1 "\t" $2}' ENSE00001922571.txt &> ENSE00001922571_1and2.txt
awk '{print $1 "\t" $2}' ENSE00001827679.txt &> ENSE00001827679_1and2.txt
awk '{print $2}' ENSE00001947070.txt | xargs -I{} grep -aob {} ENST00000473358.txt |
awk 'BEGIN{FS=":"}{start = $1 + 29554; print start "\t" $2}' &> awk_out.txt
awk '{print $2}' ENSE00001922571.txt | xargs -I{} grep -aob {} ENST00000473358.txt |
awk 'BEGIN{FS=":"}{start = $1 + 30564 - 486; print start "\t" $2}' &> awk_out.txt
awk '{print $2}' ENSE00001827679.txt | xargs -I{} grep -aob {} ENST00000473358.txt |
awk 'BEGIN{FS=":"}{start = $1 + 30976 - 486 - 104; print start "\t" $2}' &> awk_out.txt
Used diff to compare all the awk_out.txt to ENSE*_1and2.txt files.
CONCLUSION : they are identical. Therefor I get the correct start position from the
correct sequences.
THEREFOR : I believe that create_fastq_file and FASTQ_READ() are working as expected.
2. See debug comments of INSERT class.
CONCLUSION : single end reads of transcripts/inserts on the '+' strand
in the sense direction work.
FUTURE:
Include more error checking for goofy parameters, e.g. not enough reads for
the ratios, etc.
"""
abundanceSum = 0
transIdx = 0
readIdx = 0
for abundance in abundanceList:
abundanceSum += abundance
#abundanceNormalization = abundanceNormalization / len(abundanceList) # integer division
if(abundanceSum < 1):
exit_with_error("ERROR! abundanceSum = {}\nPlease enter abundance "
"values > 1\n".format(abundanceNormalization))
if(readType == 'single'):
pathToFastqR1 = pathToFastq + ".fq"
fastqFileR1 = open(pathToFastqR1, "w+")
fastqListR1 = []
elif(readType == 'paired-fr-first' or readType == 'paired-fr-second'):
pathToFastqR1 = pathToFastq + "-R1.fq"
pathToFastqR2 = pathToFastq + "-R2.fq"
fastqFileR1 = open(pathToFastqR1, "w+")
fastqFileR2 = open(pathToFastqR2, "w+")
fastqListR1 = []
fastqListR2 = []
else:
exit_with_error("ERROR!!! Incorrect value for {}".format(readType))
for transName in desiredTransList:
try:
trans = transList[transDict[transName]]
except KeyError:
exit_with_error("ERROR! {} is not a transcript annotated in your "
"gtf file\n".format(transName))
for i in range(int(float(abundanceList[transIdx])/float(abundanceSum) * nReads)):
insert = create_insert(trans, readLength, 150, 15, exonList)
if(readType == 'single'):
fastqEntry = FASTQ_READ(Insert = insert, ReadLength = readLength,
MetaData = "@Read_num:%i"%(readIdx),
ExonList=exonList, Direction = "forward")
fastqListR1.append(fastqEntry)
elif(readType == 'paired-fr-first'):
fastqEntry = FASTQ_READ(Insert = insert, ReadLength = readLength,
MetaData = "@Read_num:%i"%(readIdx),
ExonList = exonList, Direction = "reverse")
fastqListR1.append(fastqEntry)
fastqEntry = FASTQ_READ(Insert = insert, ReadLength = readLength,
MetaData = "@Read_num:%i"%(readIdx),
ExonList = exonList, Direction = "forward")
fastqListR2.append(fastqEntry)
elif(readType == 'paired-fr-second'):
fastqEntry = FASTQ_READ(Insert = insert, ReadLength = readLength,
MetaData = "@Read_num:%i"%(readIdx),
ExonList = exonList, Direction = "forward")
fastqListR1.append(fastqEntry)
fastqEntry = FASTQ_READ(Insert = insert, ReadLength = readLength,
MetaData = "@Read_num:%i"%(readIdx),
ExonList = exonList, Direction = "reverse")
fastqListR2.append(fastqEntry)
readIdx += 1
transIdx += 1
if(readType == 'single'):
for fastqEntry in fastqListR1:
fastqFileR1.write("%s\n%s\n+\n%s\n"%(fastqEntry.metadata,
fastqEntry.seq, fastqEntry.qual))
fastqFileR1.close()
else:
for fastqEntry in fastqListR1:
fastqFileR1.write("%s\n%s\n+\n%s\n"%(fastqEntry.metadata,
fastqEntry.seq, fastqEntry.qual))
for fastqEntry in fastqListR2:
fastqFileR2.write("%s\n%s\n+\n%s\n"%(fastqEntry.metadata,
fastqEntry.seq, fastqEntry.qual))
fastqFileR1.close()
fastqFileR2.close()
| [
"operator.attrgetter",
"re.split",
"classes.GENE",
"classes.TRANSCRIPT",
"error.exit_with_error",
"numpy.random.normal",
"classes.reverse_complement",
"classes.INSERT",
"classes.FASTQ_READ",
"classes.EXON",
"datetime.datetime.now",
"copy.deepcopy",
"classes.GTF_ENTRY",
"classes.CHROMOSOME"... | [((1370, 1393), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1391, 1393), False, 'import datetime\n'), ((2029, 2052), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2050, 2052), False, 'import datetime\n'), ((2666, 2689), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2687, 2689), False, 'import datetime\n'), ((2839, 2862), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2860, 2862), False, 'import datetime\n'), ((3437, 3460), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3458, 3460), False, 'import datetime\n'), ((3625, 3648), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3646, 3648), False, 'import datetime\n'), ((4226, 4249), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4247, 4249), False, 'import datetime\n'), ((4399, 4422), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4420, 4422), False, 'import datetime\n'), ((5007, 5030), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5028, 5030), False, 'import datetime\n'), ((6267, 6290), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6288, 6290), False, 'import datetime\n'), ((6743, 6766), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6764, 6766), False, 'import datetime\n'), ((7693, 7716), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7714, 7716), False, 'import datetime\n'), ((8149, 8172), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8170, 8172), False, 'import datetime\n'), ((8813, 8836), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8834, 8836), False, 'import datetime\n'), ((10747, 10770), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10768, 10770), False, 'import datetime\n'), ((14697, 14720), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14718, 14720), False, 'import datetime\n'), ((15438, 15461), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15459, 15461), False, 'import datetime\n'), ((15718, 15741), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15739, 15741), False, 'import datetime\n'), ((17093, 17116), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17114, 17116), False, 'import datetime\n'), ((19204, 19227), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19225, 19227), False, 'import datetime\n'), ((26987, 27011), 'copy.deepcopy', 'copy.deepcopy', (['transList'], {}), '(transList)\n', (27000, 27011), False, 'import copy\n'), ((28185, 28208), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28206, 28208), False, 'import datetime\n'), ((29316, 29339), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29337, 29339), False, 'import datetime\n'), ((1205, 1280), 'error.exit_with_error', 'exit_with_error', (['"""ERROR! You did not pass a file with the .gtf extention\n"""'], {}), "('ERROR! You did not pass a file with the .gtf extention\\n')\n", (1220, 1280), False, 'from error import exit_with_error\n'), ((1726, 1893), 'classes.GTF_ENTRY', 'GTF_ENTRY', ([], {'Chromosome': 'line[0]', 'Source': 'line[1]', 'EntryType': 'line[2]', 'Start': 'line[3]', 'Stop': 'line[4]', 'Score': 'line[5]', 'Strand': 'line[6]', 'Frame': 'line[7]', 'Attribute': 'line[8]'}), '(Chromosome=line[0], Source=line[1], EntryType=line[2], Start=line\n [3], Stop=line[4], Score=line[5], Strand=line[6], Frame=line[7],\n Attribute=line[8])\n', (1735, 1893), False, 'from classes import GTF_ENTRY\n'), ((5807, 5828), 'classes.CHROMOSOME', 'CHROMOSOME', (['chrm', 'seq'], {}), '(chrm, seq)\n', (5817, 5828), False, 'from classes import CHROMOSOME\n'), ((23310, 23331), 're.search', 're.search', (['"""\t"""', 'line'], {}), "('\\t', line)\n", (23319, 23331), False, 'import re\n'), ((24095, 24121), 're.search', 're.search', (['"""ENST"""', 'line[0]'], {}), "('ENST', line[0])\n", (24104, 24121), False, 'import re\n'), ((24444, 24532), 'error.exit_with_error', 'exit_with_error', (['"""ERROR! ReadLength or NumberOfReads not specified in config.txt\n"""'], {}), "(\n 'ERROR! ReadLength or NumberOfReads not specified in config.txt\\n')\n", (24459, 24532), False, 'from error import exit_with_error\n'), ((28321, 28393), 'error.exit_with_error', 'exit_with_error', (['"""ERROR! Transcript is not of class type TRANSCRIPT 1\n"""'], {}), "('ERROR! Transcript is not of class type TRANSCRIPT 1\\n')\n", (28336, 28393), False, 'from error import exit_with_error\n'), ((28531, 28565), 'random.randint', 'random.randint', (['(0)', '(transLength - 1)'], {}), '(0, transLength - 1)\n', (28545, 28565), False, 'import random\n'), ((29082, 29197), 'classes.INSERT', 'INSERT', ([], {'Transcript': 'Transcript', 'StartWrtTrans': 'start', 'StopWrtTrans': 'stop', 'ReadLength': 'ReadLength', 'ExonList': 'ExonList'}), '(Transcript=Transcript, StartWrtTrans=start, StopWrtTrans=stop,\n ReadLength=ReadLength, ExonList=ExonList)\n', (29088, 29197), False, 'from classes import INSERT\n'), ((2776, 2790), 'classes.EXON', 'EXON', (['gtfEntry'], {}), '(gtfEntry)\n', (2780, 2790), False, 'from classes import EXON\n'), ((3554, 3574), 'classes.TRANSCRIPT', 'TRANSCRIPT', (['gtfEntry'], {}), '(gtfEntry)\n', (3564, 3574), False, 'from classes import TRANSCRIPT\n'), ((4336, 4350), 'classes.GENE', 'GENE', (['gtfEntry'], {}), '(gtfEntry)\n', (4340, 4350), False, 'from classes import GENE\n'), ((5529, 5548), 're.split', 're.split', (['""" """', 'line'], {}), "(' ', line)\n", (5537, 5548), False, 'import re\n'), ((5695, 5715), 're.split', 're.split', (['"""\n"""', 'line'], {}), "('\\n', line)\n", (5703, 5715), False, 'import re\n'), ((23345, 23407), 'error.exit_with_error', 'exit_with_error', (['"""ERROR! Tabs not permitted in config file!\n"""'], {}), "('ERROR! Tabs not permitted in config file!\\n')\n", (23360, 23407), False, 'from error import exit_with_error\n'), ((24242, 24359), 'error.exit_with_error', 'exit_with_error', (['("""ERROR! Incorrect transcript entry : %s\n All entries should begin with \'ENST\'\n"""\n % line)'], {}), '(\n """ERROR! Incorrect transcript entry : %s\n All entries should begin with \'ENST\'\n"""\n % line)\n', (24257, 24359), False, 'from error import exit_with_error\n'), ((27089, 27120), 'operator.attrgetter', 'operator.attrgetter', (['"""transNum"""'], {}), "('transNum')\n", (27108, 27120), False, 'import operator\n'), ((11091, 11230), 'error.exit_with_error', 'exit_with_error', (["('ERROR! gtfList[%i].geneID = %s and geneList[%i].geneID = %s' % (i,\n gtfEntry.geneID, gIdx, geneList[gIdx].geneID))"], {}), "(\n 'ERROR! gtfList[%i].geneID = %s and geneList[%i].geneID = %s' % (i,\n gtfEntry.geneID, gIdx, geneList[gIdx].geneID))\n", (11106, 11230), False, 'from error import exit_with_error\n'), ((23632, 23707), 'error.exit_with_error', 'exit_with_error', (['"""ERROR! multiple instances of ReadLength in config file\n"""'], {}), "('ERROR! multiple instances of ReadLength in config file\\n')\n", (23647, 23707), False, 'from error import exit_with_error\n'), ((23942, 24017), 'error.exit_with_error', 'exit_with_error', (['"""ERROR! multiple instances of ReadLength in config file\n"""'], {}), "('ERROR! multiple instances of ReadLength in config file\\n')\n", (23957, 24017), False, 'from error import exit_with_error\n'), ((28598, 28628), 'numpy.random.normal', 'numpy.random.normal', (['Mu', 'Sigma'], {}), '(Mu, Sigma)\n', (28617, 28628), False, 'import numpy\n'), ((35004, 35131), 'classes.FASTQ_READ', 'FASTQ_READ', ([], {'Insert': 'insert', 'ReadLength': 'readLength', 'MetaData': "('@Read_num:%i' % readIdx)", 'ExonList': 'exonList', 'Direction': '"""forward"""'}), "(Insert=insert, ReadLength=readLength, MetaData='@Read_num:%i' %\n readIdx, ExonList=exonList, Direction='forward')\n", (35014, 35131), False, 'from classes import FASTQ_READ\n'), ((5397, 5418), 'classes.CHROMOSOME', 'CHROMOSOME', (['chrm', 'seq'], {}), '(chrm, seq)\n', (5407, 5418), False, 'from classes import CHROMOSOME\n'), ((7101, 7220), 'error.exit_with_error', 'exit_with_error', (["('ERROR!! start (%i) or stop (%i) Position > chromosome length (%i)\\n' % (\n start, end, chrmLen))"], {}), "(\n 'ERROR!! start (%i) or stop (%i) Position > chromosome length (%i)\\n' %\n (start, end, chrmLen))\n", (7116, 7220), False, 'from error import exit_with_error\n'), ((8489, 8581), 'error.exit_with_error', 'exit_with_error', (["('ERROR! exon numbers for %s are loaded out of order!\\n' % trans.transID)"], {}), "('ERROR! exon numbers for %s are loaded out of order!\\n' %\n trans.transID)\n", (8504, 8581), False, 'from error import exit_with_error\n'), ((35342, 35469), 'classes.FASTQ_READ', 'FASTQ_READ', ([], {'Insert': 'insert', 'ReadLength': 'readLength', 'MetaData': "('@Read_num:%i' % readIdx)", 'ExonList': 'exonList', 'Direction': '"""reverse"""'}), "(Insert=insert, ReadLength=readLength, MetaData='@Read_num:%i' %\n readIdx, ExonList=exonList, Direction='reverse')\n", (35352, 35469), False, 'from classes import FASTQ_READ\n'), ((35636, 35763), 'classes.FASTQ_READ', 'FASTQ_READ', ([], {'Insert': 'insert', 'ReadLength': 'readLength', 'MetaData': "('@Read_num:%i' % readIdx)", 'ExonList': 'exonList', 'Direction': '"""forward"""'}), "(Insert=insert, ReadLength=readLength, MetaData='@Read_num:%i' %\n readIdx, ExonList=exonList, Direction='forward')\n", (35646, 35763), False, 'from classes import FASTQ_READ\n'), ((7413, 7452), 'classes.reverse_complement', 'reverse_complement', (['chrm.seq[start:end]'], {}), '(chrm.seq[start:end])\n', (7431, 7452), False, 'from classes import reverse_complement\n'), ((7612, 7678), 'error.exit_with_error', 'exit_with_error', (['"""ERROR! strand char = %s is invalid"""', 'exon.strand'], {}), "('ERROR! strand char = %s is invalid', exon.strand)\n", (7627, 7678), False, 'from error import exit_with_error\n'), ((13907, 14230), 'error.exit_with_error', 'exit_with_error', (['("""ERROR! gtfList[%i].transID= %s and transList[%i].transID = %s\n\tgtfList[%i].geneID = %s and transList[%i].geneID = %s\n\tand geneList[%i].geneID = %s\n"""\n % (j, gtfList[j].transID, tIdx, transList[tIdx].transID, j, gtfList[j]\n .geneID, tIdx, transList[tIdx].geneID, gIdx, geneList[gIdx].geneID))'], {}), '(\n """ERROR! gtfList[%i].transID= %s and transList[%i].transID = %s\n\tgtfList[%i].geneID = %s and transList[%i].geneID = %s\n\tand geneList[%i].geneID = %s\n"""\n % (j, gtfList[j].transID, tIdx, transList[tIdx].transID, j, gtfList[j]\n .geneID, tIdx, transList[tIdx].geneID, gIdx, geneList[gIdx].geneID))\n', (13922, 14230), False, 'from error import exit_with_error\n'), ((35980, 36107), 'classes.FASTQ_READ', 'FASTQ_READ', ([], {'Insert': 'insert', 'ReadLength': 'readLength', 'MetaData': "('@Read_num:%i' % readIdx)", 'ExonList': 'exonList', 'Direction': '"""forward"""'}), "(Insert=insert, ReadLength=readLength, MetaData='@Read_num:%i' %\n readIdx, ExonList=exonList, Direction='forward')\n", (35990, 36107), False, 'from classes import FASTQ_READ\n'), ((36274, 36401), 'classes.FASTQ_READ', 'FASTQ_READ', ([], {'Insert': 'insert', 'ReadLength': 'readLength', 'MetaData': "('@Read_num:%i' % readIdx)", 'ExonList': 'exonList', 'Direction': '"""reverse"""'}), "(Insert=insert, ReadLength=readLength, MetaData='@Read_num:%i' %\n readIdx, ExonList=exonList, Direction='reverse')\n", (36284, 36401), False, 'from classes import FASTQ_READ\n'), ((13085, 13407), 'error.exit_with_error', 'exit_with_error', (['("""ERROR! gtfList[%i].transID = %s and exonList[%i].transID = %s\n\tgtfList[%i].geneID = %s and transList[%i].geneID = %s\n\tand geneList[%i].geneID = %s\n"""\n % (k, gtfList[k].transID, eIdx, exonList[eIdx].transID, k, gtfList[k].\n geneID, tIdx, transList[tIdx].geneID, gIdx, geneList[gIdx].geneID))'], {}), '(\n """ERROR! gtfList[%i].transID = %s and exonList[%i].transID = %s\n\tgtfList[%i].geneID = %s and transList[%i].geneID = %s\n\tand geneList[%i].geneID = %s\n"""\n % (k, gtfList[k].transID, eIdx, exonList[eIdx].transID, k, gtfList[k].\n geneID, tIdx, transList[tIdx].geneID, gIdx, geneList[gIdx].geneID))\n', (13100, 13407), False, 'from error import exit_with_error\n')] |
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import ray
import stats
Configuration = namedtuple('Configuration', ['banditCount'])
Observation = namedtuple('Observation', ['step', 'reward', 'agentIndex', 'lastActions'])
def smoke_test(agent):
config = Configuration(banditCount=100)
obs = Observation(step=0, reward=0, agentIndex=0, lastActions=[])
action = agent.step(obs, config)
obs = Observation(step=1, reward=0, agentIndex=0, lastActions=[action, 2])
action = agent.step(obs, config)
obs = Observation(step=2, reward=1, agentIndex=0, lastActions=[action, 5])
action = agent.step(obs, config)
@ray.remote
def simulate_mab(agent_lambdas, num_steps=2000, num_bandits=100, game_id=0):
config = Configuration(banditCount=num_bandits)
probs = np.random.rand(num_bandits)
last_actions = [0, 0]
totals = [0, 0]
agents = [l(num_bandits) for l in agent_lambdas]
d = {'step': [], 'p1_total': [], 'p2_total': []}
for i in range(num_steps):
for j, agent in enumerate(agents):
obs = Observation(step=i, reward=totals[j], agentIndex=j, lastActions=last_actions)
choice = agent.step(obs, config)
totals[j] += np.random.rand() < probs[choice]
last_actions[j] = choice
d['step'].append(i)
d['p1_total'].append(totals[0])
d['p2_total'].append(totals[1])
for action in last_actions:
probs[action] *= 0.97
df = pd.DataFrame(data=d)
df['diff'] = df.p1_total - df.p2_total
df['game_id'] = game_id
return totals, df
def compare_agents(agent_lambdas, num_games=50, num_bandits=100, num_steps=2000, min_games=20):
names = [a(num_bandits).description() for a in agent_lambdas]
print(f"{names[0]}\n{names[1]}")
num_cpus = 6
scores = []
df = pd.DataFrame()
for i in range(0, num_games, num_cpus):
result_ids = [simulate_mab.remote(
agent_lambdas,
num_steps=num_steps,
num_bandits=num_bandits,
game_id=i) for i in range(i, i + num_cpus)]
batch_results = ray.get(result_ids)
for score, game_df in batch_results:
scores.append(score)
df = df.append(game_df)
stats.print_inline_stats(scores)
p1_wins, p2_wins = stats.get_wins(scores)
p1_los = stats.get_los(p1_wins, p2_wins)
if len(scores) >= min_games and (p1_los > 0.99 or p1_los < 0.01):
break
print('\n')
return scores, df
def rank_agents(agents, num_games=50, min_games=20):
ranked = _rank_agents(agents, num_games=num_games, min_games=min_games)
print("\nFinal Ranking:")
for a in ranked:
print(a(100).description())
def _rank_agents(agents, num_games=50, min_games=20):
num_agents = len(agents)
if num_agents == 1:
return agents
if num_agents == 2:
scores, _ = compare_agents(agents, num_games=num_games, min_games=min_games)
p1_wins, p2_wins = stats.get_wins(scores)
if p1_wins > p2_wins:
return agents
else:
return agents[::-1]
sublist_1 = _rank_agents(agents[:num_agents // 2], num_games=num_games, min_games=min_games)
sublist_2 = _rank_agents(agents[num_agents // 2:], num_games=num_games, min_games=min_games)
ranked_agents = []
while True:
if not sublist_1:
ranked_agents += sublist_2
break
if not sublist_2:
ranked_agents += sublist_1
break
scores, _ = compare_agents([sublist_1[0], sublist_2[0]], num_games=num_games, min_games=min_games)
p1_wins, p2_wins = stats.get_wins(scores)
if p1_wins > p2_wins:
ranked_agents.append(sublist_1.pop(0))
else:
ranked_agents.append(sublist_2.pop(0))
return ranked_agents
def round_robin(agent_lambdas, num_games=50, num_bandits=100, num_steps=2000, min_games=20):
num_agents = len(agent_lambdas)
num_rounds = (num_agents * (num_agents - 1)) // 2
round_num = 1
agent_names = [l(num_bandits).description() for l in agent_lambdas]
records = np.zeros((num_agents, 3), dtype='int32')
los_matrix = np.full((num_agents, num_agents), 0.5)
print(f"Starting round robin with {num_agents} agents:")
for name in agent_names:
print(name)
print("")
for i in range(num_agents - 1):
for j in range(i + 1, num_agents):
print(f"Starting round {round_num} of {num_rounds}:")
scores, _ = compare_agents([agent_lambdas[i], agent_lambdas[j]],
num_games=num_games,
num_bandits=num_bandits,
num_steps=num_steps,
min_games=min_games)
p1_wins, p2_wins = stats.get_wins(scores)
ties = len(scores) - p1_wins - p2_wins
records[i] += [p1_wins, p2_wins, ties]
records[j] += [p2_wins, p1_wins, ties]
p, _ = stats.get_mean_and_ci(p1_wins, p2_wins)
los_matrix[i, j] = p
los_matrix[j, i] = 1 - p
round_num += 1
for i in range(num_agents):
print(f"{agent_names[i]}: {'-'.join(map(str, records[i]))}")
plot_los_heatmap(agent_names, los_matrix)
def graph_game_results(df):
ax = df.groupby('step').mean()['diff'].rolling(window=10).mean().plot(
title="Point difference averages over all games")
ax.set_xlabel("step")
ax.set_ylabel("P1 - P2")
def plot_los_heatmap(agent_names, los_matrix):
num_agents = len(agent_names)
order = np.argsort(-np.sum(los_matrix, axis=1))
sorted_names = [agent_names[x] for x in order]
sorted_los = los_matrix[order][:, order]
fix, ax = plt.subplots()
ax.imshow(sorted_los, cmap='gray', vmin=0, vmax=1.5)
# We want to show all ticks...
ax.set_xticks(np.arange(num_agents))
ax.set_yticks(np.arange(num_agents))
# ... and label them with the respective list entries
ax.set_xticklabels(sorted_names)
ax.set_yticklabels(sorted_names)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
for i in range(num_agents):
for j in range(num_agents):
if i == j:
continue
ax.text(j, i, "{:.2f}".format(sorted_los[i, j]), ha="center", va="center", color="w")
plt.show()
| [
"stats.get_los",
"stats.print_inline_stats",
"collections.namedtuple",
"numpy.random.rand",
"ray.get",
"stats.get_wins",
"stats.get_mean_and_ci",
"numpy.sum",
"numpy.zeros",
"pandas.DataFrame",
"numpy.full",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((149, 193), 'collections.namedtuple', 'namedtuple', (['"""Configuration"""', "['banditCount']"], {}), "('Configuration', ['banditCount'])\n", (159, 193), False, 'from collections import namedtuple\n'), ((208, 282), 'collections.namedtuple', 'namedtuple', (['"""Observation"""', "['step', 'reward', 'agentIndex', 'lastActions']"], {}), "('Observation', ['step', 'reward', 'agentIndex', 'lastActions'])\n", (218, 282), False, 'from collections import namedtuple\n'), ((848, 875), 'numpy.random.rand', 'np.random.rand', (['num_bandits'], {}), '(num_bandits)\n', (862, 875), True, 'import numpy as np\n'), ((1530, 1550), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (1542, 1550), True, 'import pandas as pd\n'), ((1890, 1904), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1902, 1904), True, 'import pandas as pd\n'), ((4212, 4252), 'numpy.zeros', 'np.zeros', (['(num_agents, 3)'], {'dtype': '"""int32"""'}), "((num_agents, 3), dtype='int32')\n", (4220, 4252), True, 'import numpy as np\n'), ((4270, 4308), 'numpy.full', 'np.full', (['(num_agents, num_agents)', '(0.5)'], {}), '((num_agents, num_agents), 0.5)\n', (4277, 4308), True, 'import numpy as np\n'), ((5880, 5894), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5892, 5894), True, 'import matplotlib.pyplot as plt\n'), ((6574, 6584), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6582, 6584), True, 'import matplotlib.pyplot as plt\n'), ((2171, 2190), 'ray.get', 'ray.get', (['result_ids'], {}), '(result_ids)\n', (2178, 2190), False, 'import ray\n'), ((2315, 2347), 'stats.print_inline_stats', 'stats.print_inline_stats', (['scores'], {}), '(scores)\n', (2339, 2347), False, 'import stats\n'), ((2375, 2397), 'stats.get_wins', 'stats.get_wins', (['scores'], {}), '(scores)\n', (2389, 2397), False, 'import stats\n'), ((2415, 2446), 'stats.get_los', 'stats.get_los', (['p1_wins', 'p2_wins'], {}), '(p1_wins, p2_wins)\n', (2428, 2446), False, 'import stats\n'), ((3066, 3088), 'stats.get_wins', 'stats.get_wins', (['scores'], {}), '(scores)\n', (3080, 3088), False, 'import stats\n'), ((3728, 3750), 'stats.get_wins', 'stats.get_wins', (['scores'], {}), '(scores)\n', (3742, 3750), False, 'import stats\n'), ((6006, 6027), 'numpy.arange', 'np.arange', (['num_agents'], {}), '(num_agents)\n', (6015, 6027), True, 'import numpy as np\n'), ((6047, 6068), 'numpy.arange', 'np.arange', (['num_agents'], {}), '(num_agents)\n', (6056, 6068), True, 'import numpy as np\n'), ((4932, 4954), 'stats.get_wins', 'stats.get_wins', (['scores'], {}), '(scores)\n', (4946, 4954), False, 'import stats\n'), ((5128, 5167), 'stats.get_mean_and_ci', 'stats.get_mean_and_ci', (['p1_wins', 'p2_wins'], {}), '(p1_wins, p2_wins)\n', (5149, 5167), False, 'import stats\n'), ((5741, 5767), 'numpy.sum', 'np.sum', (['los_matrix'], {'axis': '(1)'}), '(los_matrix, axis=1)\n', (5747, 5767), True, 'import numpy as np\n'), ((1270, 1286), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1284, 1286), True, 'import numpy as np\n')] |
"""
Tests the LIME wrapper.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: new BSD
import pytest
try:
import lime
except ImportError: # pragma: no cover
pytest.skip(
'Skipping lime wrapper tests -- lime missing.',
allow_module_level=True)
else:
del lime
import importlib
import sys
import numpy as np
import fatf.transparency.lime as ftl
import fatf.utils.models as fum
import fatf.utils.testing.imports as futi
import fatf.utils.testing.transparency as futt
from fatf.exceptions import IncompatibleModelError, IncorrectShapeError
# yapf: disable
FUTURE_WARNING = (
'The LIME wrapper will be deprecated in FAT Forensics version '
'0.0.3. Please consider using the TabularBlimeyLime explainer '
'class implemented in the fatf.transparency.predictions.'
'surrogate_explainers module instead. Alternatively, you may '
'consider building a custom surrogate explainer using the '
'functionality implemented in FAT Forensics -- see the *Tabular '
'Surrogates* how-to guide for more details.')
SAMPLE = np.array([0, 1, 0.08, 0.54])
SAMPLE_STRUCT = np.array(
[(0, 1, 0.08, 0.54)],
dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')])[0]
CLF = fum.KNN()
CLF.fit(futt.NUMERICAL_NP_ARRAY, futt.LABELS)
CLASS_NAMES = ['class0', 'class1', 'class2']
FEATURE_NAMES = ['feat0', 'feat1', 'feat2', 'feat3']
NUMERICAL_RESULTS = {
'class0': [('feat0 <= 0.00', -0.415),
('0.50 < feat1 <= 1.00', -0.280),
('0.07 < feat2 <= 0.22', 0.037),
('0.34 < feat3 <= 0.58', -0.007)],
'class1': [('0.50 < feat1 <= 1.00', 0.202),
('0.07 < feat2 <= 0.22', -0.076),
('feat0 <= 0.00', 0.019),
('0.34 < feat3 <= 0.58', -0.018)],
'class2': [('feat0 <= 0.00', 0.395),
('0.50 < feat1 <= 1.00', 0.077),
('0.07 < feat2 <= 0.22', 0.039),
('0.34 < feat3 <= 0.58', 0.025)]
}
CATEGORICAL_RESULTS = {
'class0': [('feat0=0', -0.413),
('feat1=1', -0.282),
('0.07 < feat2 <= 0.22', 0.0366),
('0.34 < feat3 <= 0.58', -0.00717)],
'class1': [('feat1=1', 0.2048),
('0.07 < feat2 <= 0.22', -0.0767),
('feat0=0', 0.0179),
('0.34 < feat3 <= 0.58', -0.018)],
'class2': [('feat0=0', 0.395),
('feat1=1', 0.077),
('0.07 < feat2 <= 0.22', 0.039),
('0.34 < feat3 <= 0.58', 0.025)]
}
REGRESSION_RESULTS = [
('feat0 <= 0.00', 1.332),
('0.50 < feat1 <= 1.00', 0.767),
('0.34 < feat3 <= 0.58', 0.149),
('0.07 < feat2 <= 0.22', -0.048)
]
# yapf: enable
USER_WARNING_MODEL_PRED = ('Since both, a model and a predictive function, '
'are provided only the latter will be used.')
LOG_WARNING = 'The model can only be used for LIME in a regressor mode.'
CLF_NON_PROBA = futt.NonProbabilisticModel(CLF.predict)
def test_import_when_missing():
"""
Tests importing :mod:`fatf.transparency.lime` module when LIME is missing.
"""
assert 'fatf.transparency.lime' in sys.modules
warning_msg = (
'Lime package is not installed on your system. You must install it in '
'order to use the fatf.transparency.lime module. One possibility is '
'to install LIME alongside this package with: pip install fatf[lime].')
with futi.module_import_tester('lime', when_missing=True):
with pytest.warns(ImportWarning) as w:
importlib.reload(ftl)
assert len(w) == 1
assert str(w[0].message) == warning_msg
assert 'fatf.transparency.lime' in sys.modules
def test_lime_init():
"""
Tests :mod:`fatf.transparency.lime.Lime` object initialisation.
This only looks into cases where the initialisation would fail.
"""
attribute_error = 'The following named parameters are not valid: {}.'
shape_error_data = ('The data parameter must be a 2-dimensional numpy '
'array.')
value_error_cat = 'LIME does not support non-numerical data arrays.'
value_error = ("The mode must be either 'classification' or 'regression'. "
"'{}' given.")
incompatible_model_error = ('LIME requires a model object to have a fit '
'method and optionally a predict_proba '
'method.')
type_error_predictor = ('The predict_fn parameter is not callable -- it '
'has to be a function.')
type_error_struct_indices = ('The categorical_features parameter either '
'has to be a list, a numpy array or None.')
incorrect_shape_struct_indices = ('categorical_features array/list is not '
'1-dimensional.')
value_error_struct_indices = ('Since categorical_features is an array of '
'indices for a structured array, all of its '
'elements should be strings.')
value_error_struct_incorrect_indices = (
'Indices given in the categorical_features parameter are not valid '
'for the input data array.')
#
attribute_error_explain = ('The following named parameters are not valid: '
'{}.')
incorrect_shape_error_explain = ('The instance to be explained should be '
'1-dimensional.')
value_error_explain = ('The instance to be explained should be purely '
'numerical -- LIME does not support categorical '
'features.')
# Wrong named parameter
with pytest.warns(FutureWarning) as warning:
with pytest.raises(AttributeError) as exin:
ftl.Lime(futt.NUMERICAL_NP_ARRAY, model=CLF, lorem='ipsum')
assert str(exin.value) == attribute_error.format("{'lorem'}")
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# Not a 2-dimensional array
with pytest.warns(FutureWarning) as warning:
with pytest.raises(IncorrectShapeError) as exin:
ftl.Lime(np.ones((6, 4, 4)))
assert str(exin.value) == shape_error_data
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# Not a numerical array
with pytest.warns(FutureWarning) as warning:
with pytest.raises(ValueError) as exin:
lime = ftl.Lime(np.ones((6, 4), dtype='U1'))
assert str(exin.value) == value_error_cat
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# A structured data array with weird categorical indices type
with pytest.warns(FutureWarning) as warning:
with pytest.raises(TypeError) as exin:
ftl.Lime(futt.NUMERICAL_STRUCT_ARRAY, categorical_features='')
assert str(exin.value) == type_error_struct_indices
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# A structured data array with weird categorical indices shape
with pytest.warns(FutureWarning) as warning:
with pytest.raises(IncorrectShapeError) as exin:
ftl.Lime(futt.NUMERICAL_STRUCT_ARRAY, categorical_features=[['a']])
assert str(exin.value) == incorrect_shape_struct_indices
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# A structured data array with non-textual categorical indices
with pytest.warns(FutureWarning) as warning:
with pytest.raises(ValueError) as exin:
ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
categorical_features=np.array([3, 2]))
assert str(exin.value) == value_error_struct_indices
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# A structured data array with incorrect categorical indices
with pytest.warns(FutureWarning) as warning:
with pytest.raises(ValueError) as exin:
ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
categorical_features=['a', 'e', 'b'])
assert str(exin.value) == value_error_struct_incorrect_indices
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# Wrong operation mode
with pytest.warns(FutureWarning) as warning:
with pytest.raises(ValueError) as exin:
ftl.Lime(futt.NUMERICAL_NP_ARRAY, mode='c')
assert str(exin.value) == value_error.format('c')
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# Invalid model
invalid_model = futt.InvalidModel()
with pytest.warns(FutureWarning) as warning:
with pytest.raises(IncompatibleModelError) as exin:
ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=invalid_model,
mode='classification')
assert str(exin.value) == incompatible_model_error
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
with pytest.warns(FutureWarning) as warning:
with pytest.raises(IncompatibleModelError) as exin:
ftl.Lime(futt.NUMERICAL_NP_ARRAY, model='a', mode='classification')
assert str(exin.value) == incompatible_model_error
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# Invalid predictive function
with pytest.warns(FutureWarning) as warning:
with pytest.raises(TypeError) as exin:
ftl.Lime(
futt.NUMERICAL_NP_ARRAY, predict_fn='a', mode='regression')
assert str(exin.value) == type_error_predictor
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
###########################################################################
# Test explain_instance for exceptions and errors
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(futt.NUMERICAL_NP_ARRAY)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
# Incorrect parameter
with pytest.raises(AttributeError) as exin:
lime.explain_instance(SAMPLE, weird_named_argument='yes')
assert str(exin.value) == attribute_error_explain.format(
"{'weird_named_argument'}")
# Incorrect shape
with pytest.raises(IncorrectShapeError) as exin:
lime.explain_instance(futt.NUMERICAL_STRUCT_ARRAY)
assert str(exin.value) == incorrect_shape_error_explain
# Not numerical
with pytest.raises(ValueError) as exin:
lime.explain_instance(np.ones((5, ), dtype='U1'))
assert str(exin.value) == value_error_explain
def test_explain_instance_classification(caplog):
"""
Tests :mod:`fatf.transparency.lime.Lime.explain_instance` method.
These tests are for a classification task.
"""
runtime_error_no_predictor = 'A predictive function is not available.'
runtime_error_non_prob = ('The predictive model is not probabilistic. '
'Please specify a predictive function instead.')
# Check logging
assert len(caplog.records) == 0
# Non-probabilistic model -- function -- probabilistic function
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Non-probabilistic model -- function -- no function
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Non-probabilistic model -- no function -- probabilistic function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Non-probabilistic model -- no function -- no function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
with pytest.raises(RuntimeError) as exin:
lime.explain_instance(SAMPLE_STRUCT)
assert str(exin.value) == runtime_error_non_prob
# Check logging
assert len(caplog.records) == 4
for i in range(4):
assert caplog.records[i].levelname == 'WARNING'
assert caplog.records[i].getMessage() == LOG_WARNING
# No model -- function -- probabilistic function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# No model -- function -- no function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# No model -- no function -- probabilistic function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# No model -- no function -- no function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
with pytest.raises(RuntimeError) as exin:
lime.explain_instance(SAMPLE)
assert str(exin.value) == runtime_error_no_predictor
# Check logging
assert len(caplog.records) == 4
# Probabilistic model -- probabilistic function -- empty call
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE_STRUCT)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
#
# Probabilistic model -- probabilistic function -- non-empty call
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
#
# Probabilistic model -- no function -- empty call
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
#
# Probabilistic model -- no function -- non-empty call
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(
SAMPLE_STRUCT, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Check logging
assert len(caplog.records) == 4
###########################################################################
# Test with categorical features: feat0 and feat1
cat_feat = [0, 1]
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES,
categorical_features=cat_feat)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE_STRUCT)
assert futt.is_explanation_equal_list(CATEGORICAL_RESULTS, explained)
cat_feat = ['a', 'b']
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES,
categorical_features=cat_feat)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(CATEGORICAL_RESULTS, explained)
# Check logging
assert len(caplog.records) == 4
def test_explain_instance_regression(caplog):
"""
Tests :mod:`fatf.transparency.lime.Lime.explain_instance` method.
These tests are for a regression task.
"""
# Check logging
assert len(caplog.records) == 0
# Regression a non-probabilistic model
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
mode='regression',
model=CLF_NON_PROBA,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list({'a': explained},
{'a': REGRESSION_RESULTS})
# Check logging
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'WARNING'
assert caplog.records[0].getMessage() == LOG_WARNING
# Regression a probabilistic model
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
mode='regression',
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE_STRUCT)
assert futt.is_explanation_equal_list({'a': explained},
{'a': REGRESSION_RESULTS})
# Regression with a model and function
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
mode='regression',
model=CLF,
predict_fn=CLF_NON_PROBA.predict,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE_STRUCT)
assert futt.is_explanation_equal_list({'a': explained},
{'a': REGRESSION_RESULTS})
# Regression without a model
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
mode='regression',
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE, predict_fn=CLF_NON_PROBA.predict)
assert futt.is_explanation_equal_list({'a': explained},
{'a': REGRESSION_RESULTS})
# Check logging
assert len(caplog.records) == 1
| [
"fatf.transparency.lime.Lime",
"fatf.utils.testing.transparency.InvalidModel",
"numpy.ones",
"fatf.utils.testing.imports.module_import_tester",
"fatf.utils.models.KNN",
"pytest.warns",
"numpy.array",
"pytest.raises",
"importlib.reload",
"fatf.utils.testing.transparency.NonProbabilisticModel",
"p... | [((1083, 1111), 'numpy.array', 'np.array', (['[0, 1, 0.08, 0.54]'], {}), '([0, 1, 0.08, 0.54])\n', (1091, 1111), True, 'import numpy as np\n'), ((1233, 1242), 'fatf.utils.models.KNN', 'fum.KNN', ([], {}), '()\n', (1240, 1242), True, 'import fatf.utils.models as fum\n'), ((2933, 2972), 'fatf.utils.testing.transparency.NonProbabilisticModel', 'futt.NonProbabilisticModel', (['CLF.predict'], {}), '(CLF.predict)\n', (2959, 2972), True, 'import fatf.utils.testing.transparency as futt\n'), ((1128, 1219), 'numpy.array', 'np.array', (['[(0, 1, 0.08, 0.54)]'], {'dtype': "[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')]"}), "([(0, 1, 0.08, 0.54)], dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), (\n 'd', 'f')])\n", (1136, 1219), True, 'import numpy as np\n'), ((8671, 8690), 'fatf.utils.testing.transparency.InvalidModel', 'futt.InvalidModel', ([], {}), '()\n', (8688, 8690), True, 'import fatf.utils.testing.transparency as futt\n'), ((11655, 11714), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {'predict_fn': 'CLF.predict_proba'}), '(SAMPLE, predict_fn=CLF.predict_proba)\n', (11676, 11714), False, 'import lime\n'), ((11726, 11786), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (11756, 11786), True, 'import fatf.utils.testing.transparency as futt\n'), ((12259, 12288), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {}), '(SAMPLE)\n', (12280, 12288), False, 'import lime\n'), ((12300, 12360), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (12330, 12360), True, 'import fatf.utils.testing.transparency as futt\n'), ((12752, 12811), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {'predict_fn': 'CLF.predict_proba'}), '(SAMPLE, predict_fn=CLF.predict_proba)\n', (12773, 12811), False, 'import lime\n'), ((12823, 12883), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (12853, 12883), True, 'import fatf.utils.testing.transparency as futt\n'), ((13976, 14035), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {'predict_fn': 'CLF.predict_proba'}), '(SAMPLE, predict_fn=CLF.predict_proba)\n', (13997, 14035), False, 'import lime\n'), ((14047, 14107), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (14077, 14107), True, 'import fatf.utils.testing.transparency as futt\n'), ((14483, 14512), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {}), '(SAMPLE)\n', (14504, 14512), False, 'import lime\n'), ((14524, 14584), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (14554, 14584), True, 'import fatf.utils.testing.transparency as futt\n'), ((14928, 14987), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {'predict_fn': 'CLF.predict_proba'}), '(SAMPLE, predict_fn=CLF.predict_proba)\n', (14949, 14987), False, 'import lime\n'), ((14999, 15059), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (15029, 15059), True, 'import fatf.utils.testing.transparency as futt\n'), ((16046, 16082), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE_STRUCT'], {}), '(SAMPLE_STRUCT)\n', (16067, 16082), False, 'import lime\n'), ((16094, 16154), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (16124, 16154), True, 'import fatf.utils.testing.transparency as futt\n'), ((16636, 16695), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {'predict_fn': 'CLF.predict_proba'}), '(SAMPLE, predict_fn=CLF.predict_proba)\n', (16657, 16695), False, 'import lime\n'), ((16707, 16767), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (16737, 16767), True, 'import fatf.utils.testing.transparency as futt\n'), ((17143, 17172), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {}), '(SAMPLE)\n', (17164, 17172), False, 'import lime\n'), ((17184, 17244), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (17214, 17244), True, 'import fatf.utils.testing.transparency as futt\n'), ((17624, 17690), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE_STRUCT'], {'predict_fn': 'CLF.predict_proba'}), '(SAMPLE_STRUCT, predict_fn=CLF.predict_proba)\n', (17645, 17690), False, 'import lime\n'), ((17711, 17771), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['explained', 'NUMERICAL_RESULTS'], {}), '(explained, NUMERICAL_RESULTS)\n', (17741, 17771), True, 'import fatf.utils.testing.transparency as futt\n'), ((18340, 18376), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE_STRUCT'], {}), '(SAMPLE_STRUCT)\n', (18361, 18376), False, 'import lime\n'), ((18388, 18450), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['CATEGORICAL_RESULTS', 'explained'], {}), '(CATEGORICAL_RESULTS, explained)\n', (18418, 18450), True, 'import fatf.utils.testing.transparency as futt\n'), ((18835, 18864), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {}), '(SAMPLE)\n', (18856, 18864), False, 'import lime\n'), ((18876, 18938), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (['CATEGORICAL_RESULTS', 'explained'], {}), '(CATEGORICAL_RESULTS, explained)\n', (18906, 18938), True, 'import fatf.utils.testing.transparency as futt\n'), ((19629, 19658), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {}), '(SAMPLE)\n', (19650, 19658), False, 'import lime\n'), ((19670, 19745), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (["{'a': explained}", "{'a': REGRESSION_RESULTS}"], {}), "({'a': explained}, {'a': REGRESSION_RESULTS})\n", (19700, 19745), True, 'import fatf.utils.testing.transparency as futt\n'), ((20335, 20371), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE_STRUCT'], {}), '(SAMPLE_STRUCT)\n', (20356, 20371), False, 'import lime\n'), ((20383, 20458), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (["{'a': explained}", "{'a': REGRESSION_RESULTS}"], {}), "({'a': explained}, {'a': REGRESSION_RESULTS})\n", (20413, 20458), True, 'import fatf.utils.testing.transparency as futt\n'), ((20989, 21025), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE_STRUCT'], {}), '(SAMPLE_STRUCT)\n', (21010, 21025), False, 'import lime\n'), ((21037, 21112), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (["{'a': explained}", "{'a': REGRESSION_RESULTS}"], {}), "({'a': explained}, {'a': REGRESSION_RESULTS})\n", (21067, 21112), True, 'import fatf.utils.testing.transparency as futt\n'), ((21507, 21570), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {'predict_fn': 'CLF_NON_PROBA.predict'}), '(SAMPLE, predict_fn=CLF_NON_PROBA.predict)\n', (21528, 21570), False, 'import lime\n'), ((21582, 21657), 'fatf.utils.testing.transparency.is_explanation_equal_list', 'futt.is_explanation_equal_list', (["{'a': explained}", "{'a': REGRESSION_RESULTS}"], {}), "({'a': explained}, {'a': REGRESSION_RESULTS})\n", (21612, 21657), True, 'import fatf.utils.testing.transparency as futt\n'), ((186, 274), 'pytest.skip', 'pytest.skip', (['"""Skipping lime wrapper tests -- lime missing."""'], {'allow_module_level': '(True)'}), "('Skipping lime wrapper tests -- lime missing.',\n allow_module_level=True)\n", (197, 274), False, 'import pytest\n'), ((3420, 3472), 'fatf.utils.testing.imports.module_import_tester', 'futi.module_import_tester', (['"""lime"""'], {'when_missing': '(True)'}), "('lime', when_missing=True)\n", (3445, 3472), True, 'import fatf.utils.testing.imports as futi\n'), ((5717, 5744), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (5729, 5744), False, 'import pytest\n'), ((6075, 6102), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (6087, 6102), False, 'import pytest\n'), ((6384, 6411), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (6396, 6411), False, 'import pytest\n'), ((6737, 6764), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (6749, 6764), False, 'import pytest\n'), ((7118, 7145), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (7130, 7145), False, 'import pytest\n'), ((7519, 7546), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (7531, 7546), False, 'import pytest\n'), ((7947, 7974), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (7959, 7974), False, 'import pytest\n'), ((8346, 8373), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (8358, 8373), False, 'import pytest\n'), ((8700, 8727), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (8712, 8727), False, 'import pytest\n'), ((9089, 9116), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (9101, 9116), False, 'import pytest\n'), ((9454, 9481), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (9466, 9481), False, 'import pytest\n'), ((9920, 9947), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (9932, 9947), False, 'import pytest\n'), ((9975, 10008), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {}), '(futt.NUMERICAL_NP_ARRAY)\n', (9983, 10008), True, 'import fatf.transparency.lime as ftl\n'), ((10127, 10156), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (10140, 10156), False, 'import pytest\n'), ((10174, 10231), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {'weird_named_argument': '"""yes"""'}), "(SAMPLE, weird_named_argument='yes')\n", (10195, 10231), False, 'import lime\n'), ((10362, 10396), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (10375, 10396), False, 'import pytest\n'), ((10414, 10464), 'lime.explain_instance', 'lime.explain_instance', (['futt.NUMERICAL_STRUCT_ARRAY'], {}), '(futt.NUMERICAL_STRUCT_ARRAY)\n', (10435, 10464), False, 'import lime\n'), ((10555, 10580), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10568, 10580), False, 'import pytest\n'), ((11249, 11267), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (11261, 11267), False, 'import pytest\n'), ((11295, 11438), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF_NON_PROBA', 'predict_fn': 'CLF.predict_proba', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, model=CLF_NON_PROBA, predict_fn=CLF.\n predict_proba, class_names=CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (11303, 11438), True, 'import fatf.transparency.lime as ftl\n'), ((11853, 11871), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (11865, 11871), False, 'import pytest\n'), ((11899, 12042), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF_NON_PROBA', 'predict_fn': 'CLF.predict_proba', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, model=CLF_NON_PROBA, predict_fn=CLF.\n predict_proba, class_names=CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (11907, 12042), True, 'import fatf.transparency.lime as ftl\n'), ((12441, 12468), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (12453, 12468), False, 'import pytest\n'), ((12496, 12609), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF_NON_PROBA', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, model=CLF_NON_PROBA, class_names=\n CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (12504, 12609), True, 'import fatf.transparency.lime as ftl\n'), ((12953, 12980), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (12965, 12980), False, 'import pytest\n'), ((13008, 13121), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF_NON_PROBA', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, model=CLF_NON_PROBA, class_names=\n CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (13016, 13121), True, 'import fatf.transparency.lime as ftl\n'), ((13257, 13284), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13270, 13284), False, 'import pytest\n'), ((13302, 13338), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE_STRUCT'], {}), '(SAMPLE_STRUCT)\n', (13323, 13338), False, 'import lime\n'), ((13652, 13679), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (13664, 13679), False, 'import pytest\n'), ((13707, 13832), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'predict_fn': 'CLF.predict_proba', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_STRUCT_ARRAY, predict_fn=CLF.predict_proba,\n class_names=CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (13715, 13832), True, 'import fatf.transparency.lime as ftl\n'), ((14159, 14186), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (14171, 14186), False, 'import pytest\n'), ((14214, 14339), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'predict_fn': 'CLF.predict_proba', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_STRUCT_ARRAY, predict_fn=CLF.predict_proba,\n class_names=CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (14222, 14339), True, 'import fatf.transparency.lime as ftl\n'), ((14650, 14677), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (14662, 14677), False, 'import pytest\n'), ((14705, 14797), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, class_names=CLASS_NAMES, feature_names=\n FEATURE_NAMES)\n', (14713, 14797), True, 'import fatf.transparency.lime as ftl\n'), ((15114, 15141), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (15126, 15141), False, 'import pytest\n'), ((15169, 15261), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, class_names=CLASS_NAMES, feature_names=\n FEATURE_NAMES)\n', (15177, 15261), True, 'import fatf.transparency.lime as ftl\n'), ((15385, 15412), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (15398, 15412), False, 'import pytest\n'), ((15430, 15459), 'lime.explain_instance', 'lime.explain_instance', (['SAMPLE'], {}), '(SAMPLE)\n', (15451, 15459), False, 'import lime\n'), ((15650, 15668), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (15662, 15668), False, 'import pytest\n'), ((15696, 15828), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF', 'predict_fn': 'CLF.predict_proba', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, model=CLF, predict_fn=CLF.predict_proba,\n class_names=CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (15704, 15828), True, 'import fatf.transparency.lime as ftl\n'), ((16240, 16258), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (16252, 16258), False, 'import pytest\n'), ((16286, 16418), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF', 'predict_fn': 'CLF.predict_proba', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_NP_ARRAY, model=CLF, predict_fn=CLF.predict_proba,\n class_names=CLASS_NAMES, feature_names=FEATURE_NAMES)\n', (16294, 16418), True, 'import fatf.transparency.lime as ftl\n'), ((16838, 16865), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (16850, 16865), False, 'import pytest\n'), ((16893, 16999), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'model': 'CLF', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_STRUCT_ARRAY, model=CLF, class_names=CLASS_NAMES,\n feature_names=FEATURE_NAMES)\n', (16901, 16999), True, 'import fatf.transparency.lime as ftl\n'), ((17319, 17346), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (17331, 17346), False, 'import pytest\n'), ((17374, 17480), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'model': 'CLF', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), '(futt.NUMERICAL_STRUCT_ARRAY, model=CLF, class_names=CLASS_NAMES,\n feature_names=FEATURE_NAMES)\n', (17382, 17480), True, 'import fatf.transparency.lime as ftl\n'), ((17996, 18023), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (18008, 18023), False, 'import pytest\n'), ((18051, 18184), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES', 'categorical_features': 'cat_feat'}), '(futt.NUMERICAL_NP_ARRAY, model=CLF, class_names=CLASS_NAMES,\n feature_names=FEATURE_NAMES, categorical_features=cat_feat)\n', (18059, 18184), True, 'import fatf.transparency.lime as ftl\n'), ((18487, 18514), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (18499, 18514), False, 'import pytest\n'), ((18542, 18679), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'model': 'CLF', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES', 'categorical_features': 'cat_feat'}), '(futt.NUMERICAL_STRUCT_ARRAY, model=CLF, class_names=CLASS_NAMES,\n feature_names=FEATURE_NAMES, categorical_features=cat_feat)\n', (18550, 18679), True, 'import fatf.transparency.lime as ftl\n'), ((19283, 19310), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (19295, 19310), False, 'import pytest\n'), ((19338, 19474), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'mode': '"""regression"""', 'model': 'CLF_NON_PROBA', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), "(futt.NUMERICAL_STRUCT_ARRAY, mode='regression', model=\n CLF_NON_PROBA, class_names=CLASS_NAMES, feature_names=FEATURE_NAMES)\n", (19346, 19474), True, 'import fatf.transparency.lime as ftl\n'), ((20003, 20030), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (20015, 20030), False, 'import pytest\n'), ((20058, 20180), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'mode': '"""regression"""', 'model': 'CLF', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), "(futt.NUMERICAL_NP_ARRAY, mode='regression', model=CLF, class_names\n =CLASS_NAMES, feature_names=FEATURE_NAMES)\n", (20066, 20180), True, 'import fatf.transparency.lime as ftl\n'), ((20554, 20572), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (20566, 20572), False, 'import pytest\n'), ((20600, 20763), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'mode': '"""regression"""', 'model': 'CLF', 'predict_fn': 'CLF_NON_PROBA.predict', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), "(futt.NUMERICAL_STRUCT_ARRAY, mode='regression', model=CLF,\n predict_fn=CLF_NON_PROBA.predict, class_names=CLASS_NAMES,\n feature_names=FEATURE_NAMES)\n", (20608, 20763), True, 'import fatf.transparency.lime as ftl\n'), ((21198, 21225), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (21210, 21225), False, 'import pytest\n'), ((21253, 21364), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'mode': '"""regression"""', 'class_names': 'CLASS_NAMES', 'feature_names': 'FEATURE_NAMES'}), "(futt.NUMERICAL_NP_ARRAY, mode='regression', class_names=\n CLASS_NAMES, feature_names=FEATURE_NAMES)\n", (21261, 21364), True, 'import fatf.transparency.lime as ftl\n'), ((3487, 3514), 'pytest.warns', 'pytest.warns', (['ImportWarning'], {}), '(ImportWarning)\n', (3499, 3514), False, 'import pytest\n'), ((3533, 3554), 'importlib.reload', 'importlib.reload', (['ftl'], {}), '(ftl)\n', (3549, 3554), False, 'import importlib\n'), ((5770, 5799), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (5783, 5799), False, 'import pytest\n'), ((5821, 5880), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'CLF', 'lorem': '"""ipsum"""'}), "(futt.NUMERICAL_NP_ARRAY, model=CLF, lorem='ipsum')\n", (5829, 5880), True, 'import fatf.transparency.lime as ftl\n'), ((6128, 6162), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (6141, 6162), False, 'import pytest\n'), ((6437, 6462), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6450, 6462), False, 'import pytest\n'), ((6790, 6814), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6803, 6814), False, 'import pytest\n'), ((6836, 6898), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'categorical_features': '""""""'}), "(futt.NUMERICAL_STRUCT_ARRAY, categorical_features='')\n", (6844, 6898), True, 'import fatf.transparency.lime as ftl\n'), ((7171, 7205), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (7184, 7205), False, 'import pytest\n'), ((7227, 7294), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'categorical_features': "[['a']]"}), "(futt.NUMERICAL_STRUCT_ARRAY, categorical_features=[['a']])\n", (7235, 7294), True, 'import fatf.transparency.lime as ftl\n'), ((7572, 7597), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7585, 7597), False, 'import pytest\n'), ((8000, 8025), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8013, 8025), False, 'import pytest\n'), ((8047, 8122), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_STRUCT_ARRAY'], {'categorical_features': "['a', 'e', 'b']"}), "(futt.NUMERICAL_STRUCT_ARRAY, categorical_features=['a', 'e', 'b'])\n", (8055, 8122), True, 'import fatf.transparency.lime as ftl\n'), ((8399, 8424), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8412, 8424), False, 'import pytest\n'), ((8446, 8489), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'mode': '"""c"""'}), "(futt.NUMERICAL_NP_ARRAY, mode='c')\n", (8454, 8489), True, 'import fatf.transparency.lime as ftl\n'), ((8753, 8790), 'pytest.raises', 'pytest.raises', (['IncompatibleModelError'], {}), '(IncompatibleModelError)\n', (8766, 8790), False, 'import pytest\n'), ((8812, 8889), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': 'invalid_model', 'mode': '"""classification"""'}), "(futt.NUMERICAL_NP_ARRAY, model=invalid_model, mode='classification')\n", (8820, 8889), True, 'import fatf.transparency.lime as ftl\n'), ((9142, 9179), 'pytest.raises', 'pytest.raises', (['IncompatibleModelError'], {}), '(IncompatibleModelError)\n', (9155, 9179), False, 'import pytest\n'), ((9201, 9268), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'model': '"""a"""', 'mode': '"""classification"""'}), "(futt.NUMERICAL_NP_ARRAY, model='a', mode='classification')\n", (9209, 9268), True, 'import fatf.transparency.lime as ftl\n'), ((9507, 9531), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (9520, 9531), False, 'import pytest\n'), ((9553, 9621), 'fatf.transparency.lime.Lime', 'ftl.Lime', (['futt.NUMERICAL_NP_ARRAY'], {'predict_fn': '"""a"""', 'mode': '"""regression"""'}), "(futt.NUMERICAL_NP_ARRAY, predict_fn='a', mode='regression')\n", (9561, 9621), True, 'import fatf.transparency.lime as ftl\n'), ((10620, 10645), 'numpy.ones', 'np.ones', (['(5,)'], {'dtype': '"""U1"""'}), "((5,), dtype='U1')\n", (10627, 10645), True, 'import numpy as np\n'), ((6193, 6211), 'numpy.ones', 'np.ones', (['(6, 4, 4)'], {}), '((6, 4, 4))\n', (6200, 6211), True, 'import numpy as np\n'), ((6500, 6527), 'numpy.ones', 'np.ones', (['(6, 4)'], {'dtype': '"""U1"""'}), "((6, 4), dtype='U1')\n", (6507, 6527), True, 'import numpy as np\n'), ((7711, 7727), 'numpy.array', 'np.array', (['[3, 2]'], {}), '([3, 2])\n', (7719, 7727), True, 'import numpy as np\n')] |
import os
import sys
from datetime import datetime, timedelta
import numpy as np
quick_hist = False
quick_bar = True
#quick_bar = False
def d4_computation_time_nparray( top='' ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
path_l = []
ftimes = []
ctimes = []
# Prepare file path list
for dir_ in dirs:
fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ]
path_l.append( os.path.join( top, dir_, fname ) )
scale_l = []
# Get computation time for SCALE
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
dat_ = float( data[5] )
if tit_ == 'SCALE':
scale_l.append( dat_ )
except:
print( "Failed", data )
scale_l = np.array( scale_l )
key_l = [ "SCALE", "READ_OBS",
"OBS_OPERATOR",
"INITIALIZE",
"INITIALIZE_OTHERS",
"INIT_LETKF",
"PROCESS_OBS",
"SET_GRID",
"READ_GUES",
"GUES_MEAN",
"WRITE RESTART/GRADS(GUES)",
"DAS_LETKF",
"ANAL_MEAN",
"WRITE_ANAL",
"DEALLOCATE",
"WRITE RESTART/GRADS(ANAL)",
"OTHERS",
"FINALIZE",
"JIT_GET",
]
# prepare nan array
iarray = np.zeros( scale_l.shape )
iarray[:] = np.nan
DETAIL = {}
for key in key_l:
if key == 'SCALE':
DETAIL[key] = scale_l
else:
DETAIL[key] = np.copy( iarray )
# Get computation time for all
i = -1
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == 'SCALE':
i += 1
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
i_ = i
if i_ < 0:
i_ = 0
if tit_ in DETAIL:
DETAIL[tit_][i_] = dat_
else:
DETAIL["OTHERS"][i_] = dat_
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_][i] = dat_
except:
print( "Failed", data )
return( ftimes, ctimes, DETAIL )
def d4_computation_time( top='', ctmax=600 ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
ftimes = []
ctimes = []
path_l = []
init = []
init_others = []
init_letkf = []
scale = []
others = []
read_obs = []
obsope = []
process_obs = []
set_grid = []
read_gues = []
gues_mean = []
write_restartg = []
das_letkf = []
anal_mean = []
write_anal = []
deallocate = []
write_restarta = []
others = []
finalize = []
jitget = []
DETAIL = { "SCALE": scale,
"READ_OBS":read_obs,
"OBS_OPERATOR": obsope,
"INITIALIZE": init,
"INITIALIZE_OTHERS": init_others,
"INIT_LETKF": init_letkf,
"PROCESS_OBS": process_obs,
"SET_GRID": set_grid,
"READ_GUES": read_gues,
"GUES_MEAN": gues_mean,
"WRITE RESTART/GRADS(GUES)": write_restartg,
"DAS_LETKF": das_letkf,
"ANAL_MEAN": anal_mean,
"WRITE_ANAL": write_anal,
"DEALLOCATE": deallocate,
"WRITE RESTART/GRADS(ANAL)": write_restarta,
"OTHERS": others,
"FINALIZE": finalize,
"JIT_GET": jitget,
}
# Prepare file path list
for dir_ in dirs:
fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ]
path_l.append( os.path.join( top, dir_, fname ) )
# Get computation time
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
if tit_ in DETAIL:
DETAIL[tit_].append( dat_ )
else:
DETAIL["OTHERS"].append( dat_ )
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_].append( dat_ )
except:
print( "Failed", data )
for key in DETAIL.keys():
DETAIL[key] = np.array( DETAIL[key] )
return( ftimes, ctimes, DETAIL )
def plot_hist( key="", dat=np.array([]) ):
import matplotlib.pyplot as plt
from scipy import stats
xmin = 0
xmax = 60
# Scott's choise
#h = 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0)
#bins = int( ( xmax - xmin ) / h )
# Square-root choice
bins = int( np.sqrt( dat.size ) )
fig, ax = plt.subplots( 1, 1, figsize=(6,4) )
fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, )
rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 )
imode = np.argmax( rn )
mode = np.mean( rbins[imode:imode+2] )
mean = np.mean( dat )
#print( len(rn), len(rbins), mode )
lw = 1.0
ymin = 0.0
ymax = 4000 #dat_.size
ls = 'dashed'
color = 'b'
ax.vlines( x=mode, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
color = 'k'
ax.vlines( x=mean, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
text_ = 'Mean:{0:.3f} s\nMode:{1:.3f} s\nN={2:}'.format( mean, mode, dat.size )
ax.text( 0.99, 0.99, text_,
fontsize=12, transform=ax.transAxes,
ha='right',
va='top' )
tit_ = key
ax.text( 0.5, 1.01, tit_,
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.set_xlim( xmin, xmax )
ax.set_ylim( ymin, ymax )
xlab = 'Computation time (s)'
ylab = 'Frequency'
ax.set_xlabel( xlab, fontsize=11)
ax.set_ylabel( ylab, fontsize=11)
key_ = key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')')
ofig = 'png/1p_d4_{0:}.png'.format( key_ )
print( ofig )
if quick_hist:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
return( mode, mean )
def plot_bar( dic={} ):
import matplotlib.pyplot as plt
fig, ax = plt.subplots( 1, 1, figsize=(4,5) )
fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
ax.bar( '', dic[key], bottom=acm,
label=lab, color=c_l[i] )
acm += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax.get_legend_handles_labels()
ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=13 )
ax.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 32, 2 )
ax.set_ylim( 0, 31.0 )
ax.set_yticks( yticks )
ofig = 'png/1p_d4_bar.png'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
####
top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp'
#dtime_max = 1000
ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, )
#print( DETAIL["DAS_LETKF"][0:5], DETAIL["WRITE_ANAL"][0:5])
#ftimes, ctimes, DETAIL = d4_computation_time( top=top, )
ctimes = np.array( ctimes )
print( '{0:} average: {1:} (N: {2:})'.format( "cycle", np.nanmean( ctimes ), len(ctimes) ) )
print( '{0:} average: {1:} (N: {2:})'.format( "fcst ", np.mean( ftimes ), len(ftimes) ) )
print("")
DETAIL_MODE = { }
dat_jit = DETAIL['JIT_GET']
dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ]
for key in DETAIL.keys():
time_ = np.mean( DETAIL[key] )
dat = DETAIL[key]
dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ]
num = len( dat_ )
if key == "READ_OBS":
dat_ -= dat_jit_
print( key, time_, num )
if num > 100:
mode_, mean_ = plot_hist( key=key, dat=dat_ )
#DETAIL_MODE[key] = mode_
DETAIL_MODE[key] = mean_
else:
print( 'Not plot ', key)
SUM = { "SCALE": 0.0,
"LETKF": 0.0,
"OBS": 0.0,
# "DATA TRANSFER": 0.0,
"JIT-DT": 0.0,
}
for key in DETAIL_MODE.keys():
if key == "SCALE":
SUM["SCALE"] += DETAIL_MODE[key]
elif key == "READ_OBS":
SUM["OBS"] += DETAIL_MODE[key]
# elif key == "READ_GUES" or key == "WRITE_ANAL":
# SUM["DATA TRANSFER"] += DETAIL_MODE[key]
elif key == "JIT_GET":
SUM["JIT-DT"] += DETAIL_MODE[key]
#elif key == "DAS_LETKF":
else:
SUM["LETKF"] += DETAIL_MODE[key]
print( key )
print( SUM )
print( DETAIL_MODE )
plot_bar( dic=SUM )
| [
"numpy.mean",
"numpy.copy",
"numpy.sqrt",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"os.scandir",
"numpy.argmax",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"numpy.nanmean",
"numpy.isnan",
"os.path.isfile",
"matplotlib.pyplot.subplots",
"numpy.... | [((11344, 11360), 'numpy.array', 'np.array', (['ctimes'], {}), '(ctimes)\n', (11352, 11360), True, 'import numpy as np\n'), ((1623, 1640), 'numpy.array', 'np.array', (['scale_l'], {}), '(scale_l)\n', (1631, 1640), True, 'import numpy as np\n'), ((2255, 2278), 'numpy.zeros', 'np.zeros', (['scale_l.shape'], {}), '(scale_l.shape)\n', (2263, 2278), True, 'import numpy as np\n'), ((7716, 7728), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7724, 7728), True, 'import numpy as np\n'), ((8045, 8079), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 4)'}), '(1, 1, figsize=(6, 4))\n', (8057, 8079), True, 'import matplotlib.pyplot as plt\n'), ((8259, 8272), 'numpy.argmax', 'np.argmax', (['rn'], {}), '(rn)\n', (8268, 8272), True, 'import numpy as np\n'), ((8286, 8317), 'numpy.mean', 'np.mean', (['rbins[imode:imode + 2]'], {}), '(rbins[imode:imode + 2])\n', (8293, 8317), True, 'import numpy as np\n'), ((8329, 8341), 'numpy.mean', 'np.mean', (['dat'], {}), '(dat)\n', (8336, 8341), True, 'import numpy as np\n'), ((9727, 9761), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4, 5)'}), '(1, 1, figsize=(4, 5))\n', (9739, 9761), True, 'import matplotlib.pyplot as plt\n'), ((10711, 10730), 'numpy.arange', 'np.arange', (['(0)', '(32)', '(2)'], {}), '(0, 32, 2)\n', (10720, 10730), True, 'import numpy as np\n'), ((11684, 11704), 'numpy.mean', 'np.mean', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (11691, 11704), True, 'import numpy as np\n'), ((7626, 7647), 'numpy.array', 'np.array', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (7634, 7647), True, 'import numpy as np\n'), ((8004, 8021), 'numpy.sqrt', 'np.sqrt', (['dat.size'], {}), '(dat.size)\n', (8011, 8021), True, 'import numpy as np\n'), ((9471, 9481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9479, 9481), True, 'import matplotlib.pyplot as plt\n'), ((9499, 9553), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (9510, 9553), True, 'import matplotlib.pyplot as plt\n'), ((9584, 9593), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9591, 9593), True, 'import matplotlib.pyplot as plt\n'), ((9601, 9617), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9610, 9617), True, 'import matplotlib.pyplot as plt\n'), ((10863, 10873), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10871, 10873), True, 'import matplotlib.pyplot as plt\n'), ((10891, 10945), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (10902, 10945), True, 'import matplotlib.pyplot as plt\n'), ((10976, 10985), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10983, 10985), True, 'import matplotlib.pyplot as plt\n'), ((10993, 11009), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11002, 11009), True, 'import matplotlib.pyplot as plt\n'), ((11418, 11436), 'numpy.nanmean', 'np.nanmean', (['ctimes'], {}), '(ctimes)\n', (11428, 11436), True, 'import numpy as np\n'), ((11511, 11526), 'numpy.mean', 'np.mean', (['ftimes'], {}), '(ftimes)\n', (11518, 11526), True, 'import numpy as np\n'), ((11625, 11642), 'numpy.isnan', 'np.isnan', (['dat_jit'], {}), '(dat_jit)\n', (11633, 11642), True, 'import numpy as np\n'), ((212, 227), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (222, 227), False, 'import os\n'), ((477, 507), 'os.path.join', 'os.path.join', (['top', 'dir_', 'fname'], {}), '(top, dir_, fname)\n', (489, 507), False, 'import os\n'), ((619, 639), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (633, 639), False, 'import os\n'), ((2441, 2456), 'numpy.copy', 'np.copy', (['iarray'], {}), '(iarray)\n', (2448, 2456), True, 'import numpy as np\n'), ((2557, 2577), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2571, 2577), False, 'import os\n'), ((4187, 4202), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (4197, 4202), False, 'import os\n'), ((5622, 5652), 'os.path.join', 'os.path.join', (['top', 'dir_', 'fname'], {}), '(top, dir_, fname)\n', (5634, 5652), False, 'import os\n'), ((5736, 5756), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (5750, 5756), False, 'import os\n'), ((11747, 11760), 'numpy.isnan', 'np.isnan', (['dat'], {}), '(dat)\n', (11755, 11760), True, 'import numpy as np\n'), ((11764, 11781), 'numpy.isnan', 'np.isnan', (['dat_jit'], {}), '(dat_jit)\n', (11772, 11781), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import codecs as cs
import nltk
import numpy as np
from utils import sample_token4,LoadGoldEntity
from keras.utils import np_utils
import pickle
SPARSE = 'Sparse_word'
PADDING= 'padding'
E1_B = 'entity1begin'
E1_E = 'entity1end'
E2_B = 'entity2begin'
E2_E = 'entity2end'
#一些用到的字典
small = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}
eAbbr = {'CHEMICAL':'chem','GENE-Y':'geneY','GENE-N':'geneN'}
RelationAbbr = {'CPR:3':'C3','CPR:4':'C4','CPR:5':'C5','CPR:6':'C6','CPR:9':'C9'}
class RepresentationLayer(object):
def __init__(self, wordvec_file=None, frequency=20000,
len_s=150, pattern ='test',use_pre_e = False):
self.len_sentence = len_s
self.frequency = frequency
self.l2i_dic = {'none':0,'C3':1,'C4':2,'C5':3,'C6':4,'C9':5}
self.i2l_dic = {0:'none',1:'C3',2:'C4',3:'C5',4:'C6',5:'C9'}
self.vec_table, self.word_2_index, self.index_2_word, self.vec_size = self.load_wordvecs(wordvec_file)
self.distance_2_index = self.load_dis_index_table()
self.pattern = pattern
#以下是训练集和测试集不同的
self.SentencesList = self.LoadCorpus(self.pattern)
self.gold_e,self.gold_r = self.GetGoldAnwer(self.SentencesList)
self.tokens,self.eindex2tindex_C,self.eindex2tindex_G,self.rindex2eindex = self.GetMap()
if use_pre_e:
self.eindex2tindex_C = pickle.load(open('../data/e2t_c.pkl','rb'))
self.eindex2tindex_G = pickle.load(open('../data/e2t_g.pkl','rb'))
self.gold_e = LoadGoldEntity('../data/predictE_test.txt')
self.samples,self.EntityPos,self.indexmap = self.GenerateSample()
def load_dis_index_table(self):
distance_2_index = {}
index = 1
for i in range(-self.len_sentence, self.len_sentence):
distance_2_index[i] = index
index += 1
return distance_2_index
def LoadCorpus(self,pattern):
if pattern == 'train':
abdir = '../../ChemProt_Corpus/chemprot_train_new/chemprot_training_abstracts.tsv'
edir = '../../ChemProt_Corpus/chemprot_train_new/chemprot_training_entities.tsv'
rdir = '../../ChemProt_Corpus/chemprot_train_new/chemprot_training_relations.tsv'
elif pattern == 'vaild':
abdir = '../../ChemProt_Corpus/chemprot_development_new/chemprot_development_abstracts.tsv'
edir = '../../ChemProt_Corpus/chemprot_development_new/chemprot_development_entities.tsv'
rdir = '../../ChemProt_Corpus/chemprot_development_new/chemprot_development_relations.tsv'
else:
abdir = '../../ChemProt_Corpus/chemprot_test_gs/chemprot_test_abstracts_gs.tsv'
edir = '../../ChemProt_Corpus/chemprot_test_gs/chemprot_test_entities_gs.tsv'
rdir = '../../ChemProt_Corpus/chemprot_test_gs/chemprot_test_relations_gs.tsv'
fp = cs.open(abdir,'r','utf-8')
text = fp.read().split('\n')[:-1]
fp.close()
#读入所有实体,dic, key为文章号,值为list,列表的元素为实体信息list [实体序号,实体类型,左边界,右边界,实体名]
fp1 = cs.open(edir,'r','utf-8')
entitys = fp1.read().split('\n')[:-1]
fp1.close()
edic = {}
for line in entitys:
#
elements = line.split('\t')
id = elements[0]
if id in edic:
edic[id].append([elements[1],elements[2],int(elements[3]),int(elements[4]),elements[5]])
else:
edic[id] = []
edic[id].append([elements[1],elements[2],int(elements[3]),int(elements[4]),elements[5]])
#读入所有关系,dic, key为文章号,值为list,列表的元素为实体信息list [关系group,是否正例,细分类别,实体1序号,实体2序号]
fp2 = cs.open(rdir,'r','utf-8')
relations = fp2.read().split('\n')[:-1]
fp2.close()
rdic = {}
for line in relations:
elements = line.split('\t')
id = elements[0]
if id in rdic:
rdic[id].append([elements[1],elements[2],elements[3],elements[4][5:],elements[5][5:]])
else:
rdic[id] = []
rdic[id].append([elements[1],elements[2],elements[3],elements[4][5:],elements[5][5:]])
max_len = 0
all_len = 0
Senslist = []
for line in text:
article_id = line.split('\t')[0]
abstract = line.split('\t')[1]
text = line.split('\t')[2]
#切分句子
sentences = []
for s in text.split('. '):
if s[0] in small and len(sentences)>0:#如果该句的开头是小写字母,且前面有句子,则拼接到上一个句子中
sentences[-1] = sentences[-1] + s + '. '
else:#否则该句单独作为一个句子
sentences.append(s + '. ')
sentences[-1] = sentences[-1][:-2]#最后一句话无需加上". "
sens = []
sens.append(abstract+' ')
sens.extend(sentences)
#统计句子最大长度
for each in sens:
all_len += len(each)
if len(each)>max_len:
#print each
max_len = len(each)
#得到每个句子的起止位置
sens_len = []
begin = 0
end = 0
for i in range(len(sens)):
end = begin + len(sens[i])
sens_len.append([begin,end])
begin = end
#对于文章里每个句子的起始位置,得到属于该句子的所有实体和关系
rnum = 0
for i in range(len(sens_len)):
sendic = {}#存放一个句子的文本、实体、关系
sendic['text'] = sens[i]
sendic['entity'] = []
sendic['pair'] = []
begin = sens_len[i][0]
end = sens_len[i][1]
sen_e_index = []#该句子的所有实体在文章中的序号
#取出属于该句子的实体
for e in edic[article_id]:
if e[2] >= begin and e[3] <= end:
sendic['entity'].append([e[2]-begin,e[3]-begin-1,e[1]])#实体位置改为含头含尾
sen_e_index.append(e[0])
#得到该句子的关系
if article_id in rdic:#如果该文章是包含关系的
for r in rdic[article_id]:
if r[1] == u'Y ':#Y后有空格
e1 = r[3]
e2 = r[4]
if e1 in sen_e_index and e2 in sen_e_index:
e1_index = sen_e_index.index(e1)
e2_index = sen_e_index.index(e2)
sendic['pair'].append([e1_index,e2_index,r[0]])
rnum +=1
Senslist.append(sendic)
#将实体位置替换为忽略空格的模式
for sentence in Senslist:#每个句子:字典
text = sentence['text']#取出文本
while(' ' in text):#当文本中有空格时
for i in range(len(text)):#i即为当前第一个空格空格出现的位置
if text[i] == ' ':
break
for entity in sentence['entity']:#句子中的每个实体 [left,right,label]
if entity[0] > i:#如果该实体在空格后面,则将它的左右边界同时减一
entity[0] -= 1
entity[1] -= 1
if entity[0] < i and entity[1] > i:#如果该实体包含空格,则将它的右边界减一
entity[1] -= 1
text = text[0:i] + text[i+1:]
return Senslist
def GetGoldAnwer(self,SentencesList):#保存标注的实体位置和关系位置/类别
gold_entity = []#所有实体的位置
gold_relation = []#所有关系的实体位置及类别
for sentence in SentencesList:#每句话
text = sentence['text']
text = sample_token4(text)
text = nltk.tokenize.word_tokenize(text)
entity_s = []#一句话中的实体
relation_s = []#一句话中的关系
for entity in sentence['entity']:#将这句话中实体们的位置保存
entity_s.append([entity[0],entity[1],eAbbr[entity[2]]])
for pair in sentence['pair']:
e1_index = pair[0]#实体1在句子中的索引
e2_index = pair[1]#实体2在句子中的索引
label = pair[2]#关系
e1_dir = sentence['entity'][e1_index][:2]#实体1的左右边界(忽略空格)
e2_dir = sentence['entity'][e2_index][:2]#实体2的左右边界(忽略空格)
labelAbbr = RelationAbbr[label]#该关系的缩写
relation_s.append([e1_dir[0],e1_dir[1],e2_dir[0],e2_dir[1],labelAbbr])
gold_entity.append(entity_s)
gold_relation.append(relation_s)
return gold_entity,gold_relation
def GetMap(self):
'''
生成每个句子的:
tokens: [[token11,token12...]]
eindex2tindex = [{0:[2,3,4],1:[7]...},...]
rindex2eindex = [[[0,1,'C1'],...],...]
'''
alltokens = []#所有句子的tokens 二维列表
eindex2tindex_C = []#句子的实体index to tokenindex的映射字典 只包含药物实体
eindex2tindex_G = []#句子的实体index to tokenindex的映射字典 只包含基因实体
rindex2eindex = []#所有句子的关系index to 实体index的映射字典
for i in range(len(self.SentencesList)):
#提取每句话的tokens
text = self.SentencesList[i]['text']#取出文本
text = sample_token4(text)
text = nltk.tokenize.word_tokenize(text)
tokens = []
for token in text:
tokens.append(token.lower())
alltokens.append(tokens)
#提取entity2token的映射
entitys = self.SentencesList[i]['entity']
e2t_c = {}#句子级别的实体到token的映射
e2t_g = {}#句子级别的实体到token的映射
for j in range(len(entitys)):#每个实体至少对应一个token,不然无法生成位置索引!
if entitys[j][2] == 'CHEMICAL':
e2t_c[j] = []
entity = entitys[j]
left = -1
right = -1
for k in range(len(text)):
token = text[k]
left = right + 1#当前token的左右边界(含头含尾)
right = right + len(token)
if left == entity[0] or left > entity[0] and left <= entity[1] or left < entity[0] and right >= entity[0]:
e2t_c[j].append(k)
if len(e2t_c[j]) == 0:
print (text)
print (u'this entity cannot find tokens %s'%entitys[j])
else:
e2t_g[j] = []
entity = entitys[j]
left = -1
right = -1
for k in range(len(text)):
token = text[k]
left = right + 1#当前token的左右边界(含头含尾)
right = right + len(token)
if left == entity[0] or left > entity[0] and left <= entity[1] or left < entity[0] and right >= entity[0]:
e2t_g[j].append(k)
if len(e2t_g[j]) == 0:
print (text)
print (u'this entity cannot find tokens %s'%entitys[j])
eindex2tindex_C.append(e2t_c)
eindex2tindex_G.append(e2t_g)
#提取关系到实体的映射
relations = self.SentencesList[i]['pair']
rindex2eindex.append(relations)
return alltokens,eindex2tindex_C,eindex2tindex_G,rindex2eindex
def GenerateSample(self):
Samples = []
sampleindex2sentence = []#每个样本到句子索引的映射
ENTITYPOS = []#每个样本的实体信息
for k in range(len(self.eindex2tindex_C)):#遍历每句话中的实体
num_c = len(self.eindex2tindex_C[k])
num_g = len(self.eindex2tindex_G[k])
if num_c == 0 or num_g == 0 or len(self.tokens[k]) > self.len_sentence:
continue
for chem_index in self.eindex2tindex_C[k]:#药物实体的索引
for gene_index in self.eindex2tindex_G[k]:#基因实体的索引
#根据这两个实体的对应的token索引生成样本,在该环节剔除存在嵌套的实体对
chem = self.eindex2tindex_C[k][chem_index]
gene = self.eindex2tindex_G[k][gene_index]
jiaoji = [val for val in chem if val in gene]#如果两个实体存在重合则跳过
if len(jiaoji) > 0:
continue
sample = {}
sample['tokens'] = self.tokens[k]
sample['e1b'] = chem[0]
sample['e1e'] = chem[-1]#实体1的首个token的索引
sample['e2b'] = gene[0]
sample['e2e'] = gene[-1]
if_rela = 0
for rela in self.rindex2eindex[k]:
if chem_index == rela[0] and gene_index ==rela[1]:
sample['label'] = self.l2i_dic[RelationAbbr[rela[2]]]
if_rela = 1
break
if if_rela == 0:
sample['label'] = 0
#生成最后用于结合结果的entitypos
entitypos = []
entitypos.extend(self.gold_e[k][chem_index][0:2])
entitypos.extend(self.gold_e[k][gene_index][0:2])
ENTITYPOS.append(entitypos)
#将该样本对应的句子序号append进映射中
sampleindex2sentence.append(k)
Samples.append(sample)
return Samples,ENTITYPOS,sampleindex2sentence
def represent_instances(self):
label_list = []
word_index_list = []
distance_e1_index_list = []
distance_e2_index_list = []
for i in range(len(self.samples)):
sample = self.samples[i]
label, word_indexs, distance_e1_indexs, distance_e2_indexs = self.represent_instance(sample)
#there is 2 bug , e1 is superposition of e2
if len(distance_e1_indexs) >self.len_sentence:
distance_e1_indexs = distance_e1_indexs[0:self.len_sentence]
distance_e2_indexs = distance_e2_indexs[0:self.len_sentence]
word_indexs = word_indexs[0:self.len_sentence]
label_list.append([label])
word_index_list.append(word_indexs)
distance_e1_index_list.append(distance_e1_indexs)
distance_e2_index_list.append(distance_e2_indexs)
label_array = np.array(label_list)
label_array = np_utils.to_categorical(label_array, len(self.l2i_dic))#将数值型标签转换为多分类数组
label_array = np.reshape(label_array, (len(word_index_list),len(self.l2i_dic)))
#label_array = label_array.reshape((len(label_array)/self.y_dim, self.y_dim))
word_array = np.array(word_index_list)
#word_array = word_array.reshape((word_array.shape[0]/self.max_sent_len, self.max_sent_len))
dis_e1_array = np.array(distance_e1_index_list)
#dis_e1_array = dis_e1_array.reshape((dis_e1_array.shape[0]/self.max_sent_len, self.max_sent_len))
dis_e2_array = np.array(distance_e2_index_list)
#dis_e2_array = dis_e2_array.reshape((dis_e2_array.shape[0]/self.max_sent_len, self.max_sent_len))
return label_array, word_array, dis_e1_array, dis_e2_array
def represent_instance(self,sample):
tokens = ''
tokens = sample['tokens']
if sample['e1b'] < sample['e2b']:
e1_b,e1_e,e2_b,e2_e = sample['e1b'],sample['e1e'],sample['e2b'],sample['e2e']
else:
e1_b,e1_e,e2_b,e2_e = sample['e2b'],sample['e2e'],sample['e1b'],sample['e1e']
label = sample['label']
# the max length sentence won't contain the
# two entities
left_part = tokens[:e1_b]
e1 = tokens[e1_b:e1_e+1]
middle_part = tokens[e1_e+1:e2_b]
e2 = tokens[e2_b:e2_e+1]
right_part = tokens[e2_e+1:] + [PADDING for i in range(self.len_sentence - len(tokens))]
distance_e1, distance_e2 = self.generate_distance_features(left_part, e1, middle_part, e2, right_part)
distance_e1_index_list = self.replace_distances_with_indexs(distance_e1)
distance_e2_index_list = self.replace_distances_with_indexs(distance_e2)
tokens.extend([PADDING for i in range(self.len_sentence - len(tokens))])
word_index_list = self.replace_words_with_indexs(tokens)
return label, word_index_list, distance_e1_index_list, distance_e2_index_list
def replace_words_with_indexs(self, words):
word_indexs = []
for word in words:
#如果是有的词 或 pad
if word.lower() in self.word_2_index:
word_indexs.append(self.word_2_index[word.lower()])
#如果是没有的词
else:
word_indexs.append(self.word_2_index[SPARSE])
return word_indexs
'''
replace distance list with corresponding indexs
'''
def replace_distances_with_indexs(self, distances):
distance_indexs = []
for distance in distances:
if distance == 0:#如果原本是pad 则继续使用0
distance_indexs.append(0)
continue
if distance in self.distance_2_index:#否则使用dis字典替换,其中实体相对于自己的位置由-150映射为0
distance_indexs.append(self.distance_2_index[distance])
else:
print (distance)
print ('Impossible! This program will stop!')
# sys.exit(0)
return distance_indexs
def generate_distance_features(self, left_part, e1, middle_part, e2, right_part):
distance_e1 = []
distance_e2 = []
len_left = len(left_part)
len_middle = len(middle_part)
len_right = len(right_part)
### left part
for i in range(len_left):
distance_e1.append(i - len_left)
distance_e2.append(i - len_left - 1 - len_middle)
### entry1 part
for e in e1:
distance_e1.append(-self.len_sentence)
distance_e2.append(-len_middle)
### middle part
for i in range(len_middle):
distance_e1.append(i + 1)
distance_e2.append(i - len_middle)
### entry2 part
for e in e2:
distance_e1.append(len_middle)
distance_e2.append(-self.len_sentence)
### right part
for i in range(len_right):
if right_part[i] == PADDING:
distance_e1.append(0)
distance_e2.append(0)
else:
distance_e1.append(len_middle + 1 + i + 1)
distance_e2.append(i + 1)
return distance_e1, distance_e2
def load_wordvecs(self, wordvec_file):
file = cs.open(wordvec_file,'r','utf-8')
first_line = file.readline()
word_count = int(first_line.split()[0])
dimension = int(first_line.split()[1])
vec_table = np.zeros((word_count, dimension))
word_2_index = {PADDING:0}
index_2_word = {0:PADDING}
padding_vector = np.zeros(dimension)
for col in range(dimension):
vec_table[0][col] = padding_vector[col]
row = 1
for line in file:
if row < self.frequency:
line_split = line[:-1].split()
word_2_index[line_split[0]] = row
index_2_word[row] = line_split[0]
for col in range(dimension):
vec_table[row][col] = float(line_split[col + 1])
row += 1
else:
break
#忽略掉频率小于frequecy的词 并将它们用sparse代替,sparse的词向量为余下词的词向量取平均
word_2_index[SPARSE] = row
index_2_word[row] = SPARSE
sparse_vectors = np.zeros(dimension)
for line in file:
line_split = line[:-1].split()[1:]
for i in range(dimension):
sparse_vectors[i] += float(line_split[i])
sparse_vectors /= (word_count - self.frequency)
for col in range(dimension):
vec_table[row][col] = sparse_vectors[col]
file.close()
return vec_table, word_2_index, index_2_word, dimension
rep = RepresentationLayer(wordvec_file = '../../token2vec/chemdner_pubmed_biov5_drug.token4_d100_CPR', pattern ='test',frequency=20000)
dis2index,gold_e,gold_r,tokens,e2t_c,e2t_g,r2e = rep.distance_2_index,rep.gold_e,rep.gold_r,rep.tokens,rep.eindex2tindex_C,rep.eindex2tindex_G,rep.rindex2eindex
label, word_index_list, distance_e1_index_list, distance_e2_index_list= rep.represent_instances()
EntityPos,indexmap= rep.EntityPos,rep.indexmap | [
"utils.sample_token4",
"nltk.tokenize.word_tokenize",
"numpy.array",
"numpy.zeros",
"utils.LoadGoldEntity",
"codecs.open"
] | [((2962, 2990), 'codecs.open', 'cs.open', (['abdir', '"""r"""', '"""utf-8"""'], {}), "(abdir, 'r', 'utf-8')\n", (2969, 2990), True, 'import codecs as cs\n'), ((3139, 3166), 'codecs.open', 'cs.open', (['edir', '"""r"""', '"""utf-8"""'], {}), "(edir, 'r', 'utf-8')\n", (3146, 3166), True, 'import codecs as cs\n'), ((3752, 3779), 'codecs.open', 'cs.open', (['rdir', '"""r"""', '"""utf-8"""'], {}), "(rdir, 'r', 'utf-8')\n", (3759, 3779), True, 'import codecs as cs\n'), ((14126, 14146), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (14134, 14146), True, 'import numpy as np\n'), ((14436, 14461), 'numpy.array', 'np.array', (['word_index_list'], {}), '(word_index_list)\n', (14444, 14461), True, 'import numpy as np\n'), ((14587, 14619), 'numpy.array', 'np.array', (['distance_e1_index_list'], {}), '(distance_e1_index_list)\n', (14595, 14619), True, 'import numpy as np\n'), ((14751, 14783), 'numpy.array', 'np.array', (['distance_e2_index_list'], {}), '(distance_e2_index_list)\n', (14759, 14783), True, 'import numpy as np\n'), ((18426, 18461), 'codecs.open', 'cs.open', (['wordvec_file', '"""r"""', '"""utf-8"""'], {}), "(wordvec_file, 'r', 'utf-8')\n", (18433, 18461), True, 'import codecs as cs\n'), ((18612, 18645), 'numpy.zeros', 'np.zeros', (['(word_count, dimension)'], {}), '((word_count, dimension))\n', (18620, 18645), True, 'import numpy as np\n'), ((18741, 18760), 'numpy.zeros', 'np.zeros', (['dimension'], {}), '(dimension)\n', (18749, 18760), True, 'import numpy as np\n'), ((19414, 19433), 'numpy.zeros', 'np.zeros', (['dimension'], {}), '(dimension)\n', (19422, 19433), True, 'import numpy as np\n'), ((1599, 1642), 'utils.LoadGoldEntity', 'LoadGoldEntity', (['"""../data/predictE_test.txt"""'], {}), "('../data/predictE_test.txt')\n", (1613, 1642), False, 'from utils import sample_token4, LoadGoldEntity\n'), ((7584, 7603), 'utils.sample_token4', 'sample_token4', (['text'], {}), '(text)\n', (7597, 7603), False, 'from utils import sample_token4, LoadGoldEntity\n'), ((7623, 7656), 'nltk.tokenize.word_tokenize', 'nltk.tokenize.word_tokenize', (['text'], {}), '(text)\n', (7650, 7656), False, 'import nltk\n'), ((9047, 9066), 'utils.sample_token4', 'sample_token4', (['text'], {}), '(text)\n', (9060, 9066), False, 'from utils import sample_token4, LoadGoldEntity\n'), ((9086, 9119), 'nltk.tokenize.word_tokenize', 'nltk.tokenize.word_tokenize', (['text'], {}), '(text)\n', (9113, 9119), False, 'import nltk\n')] |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
from pipeline import pipeline
from tracker import LineTracker
# ## Apply a perspective transform to rectify binary image to create a "birds-eye view"
def warper(img, src, dst):
# Compute and apply perpective transform
img_size = (img.shape[1], img.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image
return warped
def map_lane(img, src, dst):
# Compute and apply perpective transform
img_size = (img.shape[1], img.shape[0])
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_LINEAR) # keep same size as input image
return warped
# This expects RGB images
def process_image(img, src, dst, thresholds, tracker):
result = pipeline(img, thresholds['l_thresh'], thresholds['b_thresh'], thresholds['grad_thresh'], thresholds['dir_thresh'])
warped = warper(result,src,dst)
left_line, right_line = tracker.find_lines(warped)
road_img = tracker.get_road_img(warped)
road_warped = map_lane(road_img,src,dst)
result = cv2.addWeighted(img,1.0,road_warped,0.5,0.0)
curverad = 0
cte = 0
if left_line.detected and right_line.detected:
ym_per_pix = tracker.ym_per_pix # meters per pixel in y dimension
xm_per_pix = tracker.xm_per_pix # meters per pixel in x dimension
curve_fit_cr = np.polyfit(np.array(left_line.yvals,np.float32)*ym_per_pix,np.array(left_line.bestx+right_line.bestx,np.float32)*xm_per_pix/2.0,2)
curverad = ((1 + (2*curve_fit_cr[0]*left_line.yvals[-1]*ym_per_pix + curve_fit_cr[1])**2)**1.5)/np.absolute(2*curve_fit_cr[0])
# calculate the offset of the car on the road
center_diff = (left_line.line_base_pos + right_line.line_base_pos)/2
cte = 0-center_diff
side_pos = 'left'
if center_diff <= 0:
side_pos = 'right'
# draw the text showing curvature, offset, and speed
cv2.putText(result, str(int(curverad))+'(m)',(0,25),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1)
cv2.putText(result, str(abs(round(center_diff,2)))+'m '+side_pos,(0,70),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1)
if left_line.line_base_pos > - 0.9 or right_line.line_base_pos < 0.9: #Approx half of average width of a car
cv2.putText(result,'Lane Departure',(0,115),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1)
# Force detecting new lane positions
left_line.detected = False
right_line.detected = False
left_line.recent_xfitted = []
right_line.recent_xfitted = []
left_line.allx = []
right_line.allx = []
left_line.ally = []
right_line.ally = []
return result,cte,curverad | [
"cv2.getPerspectiveTransform",
"numpy.absolute",
"cv2.putText",
"cv2.addWeighted",
"cv2.warpPerspective",
"numpy.array",
"pipeline.pipeline"
] | [((364, 401), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (391, 401), False, 'import cv2\n'), ((415, 477), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', 'img_size'], {'flags': 'cv2.INTER_NEAREST'}), '(img, M, img_size, flags=cv2.INTER_NEAREST)\n', (434, 477), False, 'import cv2\n'), ((661, 698), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (688, 698), False, 'import cv2\n'), ((712, 776), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'Minv', 'img_size'], {'flags': 'cv2.INTER_LINEAR'}), '(img, Minv, img_size, flags=cv2.INTER_LINEAR)\n', (731, 776), False, 'import cv2\n'), ((925, 1044), 'pipeline.pipeline', 'pipeline', (['img', "thresholds['l_thresh']", "thresholds['b_thresh']", "thresholds['grad_thresh']", "thresholds['dir_thresh']"], {}), "(img, thresholds['l_thresh'], thresholds['b_thresh'], thresholds[\n 'grad_thresh'], thresholds['dir_thresh'])\n", (933, 1044), False, 'from pipeline import pipeline\n'), ((1251, 1299), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(1.0)', 'road_warped', '(0.5)', '(0.0)'], {}), '(img, 1.0, road_warped, 0.5, 0.0)\n', (1266, 1299), False, 'import cv2\n'), ((1785, 1817), 'numpy.absolute', 'np.absolute', (['(2 * curve_fit_cr[0])'], {}), '(2 * curve_fit_cr[0])\n', (1796, 1817), True, 'import numpy as np\n'), ((2482, 2582), 'cv2.putText', 'cv2.putText', (['result', '"""Lane Departure"""', '(0, 115)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 255, 255)', '(1)'], {}), "(result, 'Lane Departure', (0, 115), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 1)\n", (2493, 2582), False, 'import cv2\n'), ((1561, 1598), 'numpy.array', 'np.array', (['left_line.yvals', 'np.float32'], {}), '(left_line.yvals, np.float32)\n', (1569, 1598), True, 'import numpy as np\n'), ((1609, 1665), 'numpy.array', 'np.array', (['(left_line.bestx + right_line.bestx)', 'np.float32'], {}), '(left_line.bestx + right_line.bestx, np.float32)\n', (1617, 1665), True, 'import numpy as np\n')] |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The `Model` class coordinates the creation and execution of all TensorFlow operations within a model.
It implements the `reset`, `act` and `update` functions, which form the interface the `Agent` class
communicates with, and which should not need to be overwritten. Instead, the following TensorFlow
functions need to be implemented:
* `tf_actions_and_internals(states, internals, deterministic)` returning the batch of
actions and successor internal states.
* `tf_loss_per_instance(states, internals, actions, terminal, reward)` returning the loss
per instance for a batch.
Further, the following TensorFlow functions should be extended accordingly:
* `initialize(custom_getter)` defining TensorFlow placeholders/functions and adding internal states.
* `get_variables()` returning the list of TensorFlow variables (to be optimized) of this model.
* `tf_regularization_losses(states, internals)` returning a dict of regularization losses.
* `get_optimizer_kwargs(states, internals, actions, terminal, reward)` returning a dict of potential
arguments (argument-free functions) to the optimizer.
Finally, the following TensorFlow functions can be useful in some cases:
* `preprocess_states(states)` for state preprocessing, returning the processed batch of states.
* `tf_action_exploration(action, exploration, action_spec)` for action postprocessing (e.g. exploration),
returning the processed batch of actions.
* `tf_preprocess_reward(states, internals, terminal, reward)` for reward preprocessing (e.g. reward normalization),
returning the processed batch of rewards.
* `create_output_operations(states, internals, actions, terminal, reward, deterministic)` for further output operations,
similar to the two above for `Model.act` and `Model.update`.
* `tf_optimization(states, internals, actions, terminal, reward)` for further optimization operations
(e.g. the baseline update in a `PGModel` or the target network update in a `QModel`),
returning a single grouped optimization operation.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from copy import deepcopy
import os
import numpy as np
import tensorflow as tf
from tensorforce import TensorForceError, util
from tensorforce.core.explorations import Exploration
from tensorforce.core.optimizers import Optimizer, GlobalOptimizer
from tensorforce.core.preprocessing import PreprocessorStack
class Model(object):
"""
Base class for all (TensorFlow-based) models.
"""
def __init__(
self,
states_spec,
actions_spec,
device=None,
session_config=None,
scope='base_model',
saver_spec=None,
summary_spec=None,
distributed_spec=None,
optimizer=None,
discount=0.0,
variable_noise=None,
states_preprocessing_spec=None,
explorations_spec=None,
reward_preprocessing_spec=None
):
"""
Args:
states_spec (dict): The state-space description dictionary.
actions_spec (dict): The action-space description dictionary.
device (str): The name of the device to run the graph of this model on.
session_config (dict): Dict specifying the tf monitored session to create when calling `setup`.
scope (str): The root scope str to use for tf variable scoping.
saver_spec (dict): Dict specifying whether and how to save the model's parameters.
summary_spec (dict): Dict specifying which tensorboard summaries should be created and added to the graph.
distributed_spec (dict): Dict specifying whether and how to do distributed training on the model's graph.
optimizer (dict): Dict specifying the tf optimizer to use for tuning the model's trainable parameters.
discount (float): The RL reward discount factor (gamma).
variable_noise (float): The stddev value of a Normal distribution used for adding random
noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).
Use None for not adding any noise.
states_preprocessing_spec (dict): Dict specifying whether and how to preprocess state signals
(e.g. normalization, greyscale, etc..).
explorations_spec (dict): Dict specifying whether and how to add exploration to the model's
"action outputs" (e.g. epsilon-greedy).
reward_preprocessing_spec (dict): Dict specifying whether and how to preprocess rewards coming
from the Environment (e.g. reward normalization).
"""
# States and actions specifications
self.states_spec = states_spec
self.actions_spec = actions_spec
# TensorFlow device, managed-session and scope specs
self.device = device
self.session_config = session_config
self.scope = scope
# Saver/distributed specifications
self.saver_spec = saver_spec
self.distributed_spec = distributed_spec
# TensorFlow summaries
self.summary_spec = summary_spec
if summary_spec is None:
self.summary_labels = set()
else:
self.summary_labels = set(summary_spec.get('labels', ()))
self.optimizer = optimizer
self.discount = discount
# Variable noise
assert variable_noise is None or variable_noise > 0.0
self.variable_noise = variable_noise
# Preprocessing and exploration
self.states_preprocessing_spec = states_preprocessing_spec
self.reward_preprocessing_spec = reward_preprocessing_spec
self.explorations_spec = explorations_spec
# Define all other variables that will be initialized later
# (in calls to `setup` and `initialize` directly following __init__).
# The Network object to use to finish constructing our graph
self.network = None
# Global (proxy)-model
self.global_model = None
# TensorFlow Graph of this model
self.graph = None
# Dict of trainable tf Variables of this model (keys = names of Variables).
self.variables = None
# Dict of all tf Variables of this model (keys = names of Variables).
self.all_variables = None
self.registered_variables = None # set of registered tf Variable names (str)
# The tf.train.Scaffold object used to create important pieces of this model's graph
self.scaffold = None
# Directory used for default export of model parameters
self.saver_directory = None
# The tf MonitoredSession object (Session wrapper handling common hooks)
self.monitored_session = None
# The actual tf.Session object (part of our MonitoredSession object)
self.session = None
# A list of tf.summary.Summary objects defined for our Graph (for tensorboard)
self.summaries = None
# TensorFlow FileWriter object that writes summaries (histograms, images, etc..) to disk
self.summary_writer = None
# Summary hook to use by the MonitoredSession
self.summary_writer_hook = None
# Inputs and internals
# Current episode number as int Tensor
self.episode = None
# TensorFlow op incrementing `self.episode` depending on True is-terminal signals
self.increment_episode = None
# Int Tensor representing the total timestep (over all episodes)
self.timestep = None
# Dict holding placeholders for each (original/unprocessed) state component input
self.states_input = None
# Dict holding the PreprocessorStack objects (if any) for each state component
self.states_preprocessing = None
# Dict holding placeholders for each (original/unprocessed) action component input
self.actions_input = None
# Dict holding the Exploration objects (if any) for each action component
self.explorations = None
# The bool-type placeholder for a batch of is-terminal signals from the environment
self.terminal_input = None
# The float-type placeholder for a batch of reward signals from the environment
self.reward_input = None
# PreprocessorStack object (if any) for the reward
self.reward_preprocessing = None
# A list of all the Model's internal/hidden state (e.g. RNNs) initialization Tensors
self.internals_init = None
# A list of placeholders for incoming internal/hidden states (e.g. RNNs)
self.internals_input = None
# Single-bool placeholder for determining whether to not apply exploration
self.deterministic_input = None
# Single bool Tensor specifying whether sess.run should update parameters (train)
self.update_input = None
# Outputs
# Dict of action output Tensors (returned by fn_actions_and_internals)
self.actions_output = None
# Dict of internal state output Tensors (returned by fn_actions_and_internals)
self.internals_output = None
# Int that keeps track of how many actions have been "executed" using `act`
self.timestep_output = None
# Tf template functions created in `initialize` from `tf_` methods.
# Template function calculating cumulated discounted rewards
self.fn_discounted_cumulative_reward = None
# Template function returning the actual action/internal state outputs
self.fn_actions_and_internals = None
# Template function returning the loss-per-instance Tensor (axis 0 is the batch axis)
self.fn_loss_per_instance = None
# Tensor of the loss value per instance (batch sample). Axis 0 is the batch axis.
self.loss_per_instance = None
# Returns tf op for calculating the regularization losses per state comp
self.fn_regularization_losses = None
# Template function returning the single float value total loss tensor.
self.fn_loss = None
# Template function returning the optimization op used by the model to learn
self.fn_optimization = None
# Tf optimization op (e.g. `minimize`) used as 1st fetch in sess.run in self.update
self.optimization = None
# Template function applying pre-processing to a batch of states
self.fn_preprocess_states = None
# Template function applying exploration to a batch of actions
self.fn_action_exploration = None
# Template function applying pre-processing to a batch of rewards
self.fn_preprocess_reward = None
self.summary_configuration_op = None
# Setup TensorFlow graph and session
self.setup()
def setup(self):
"""
Sets up the TensorFlow model graph and initializes (and enters) the TensorFlow session.
"""
# Create our Graph or figure out, which shared/global one to use.
default_graph = None
# No parallel RL or ThreadedRunner with Hogwild! shared network updates:
# Build single graph and work with that from here on. In the case of threaded RL, the central
# and already initialized model is handed to the worker Agents via the ThreadedRunner's
# WorkerAgentGenerator factory.
if self.distributed_spec is None:
self.global_model = None
self.graph = tf.Graph()
default_graph = self.graph.as_default()
default_graph.__enter__()
# Distributed tensorflow setup (each process gets its own (identical) graph).
# We are the parameter server.
elif self.distributed_spec.get('parameter_server'):
if self.distributed_spec.get('replica_model'):
raise TensorForceError("Invalid config value for distributed mode.")
self.global_model = None
self.graph = tf.Graph()
default_graph = self.graph.as_default()
default_graph.__enter__()
# We are a worker's replica model.
# Place our ops round-robin on all worker devices.
elif self.distributed_spec.get('replica_model'):
self.device = tf.train.replica_device_setter(
worker_device=self.device,
cluster=self.distributed_spec['cluster_spec']
)
# The graph is the parent model's graph, hence no new graph here.
self.global_model = None
self.graph = tf.get_default_graph()
# We are a worker:
# Construct the global model (deepcopy of ourselves), set it up via `setup` and link to it (global_model).
else:
graph = tf.Graph()
default_graph = graph.as_default()
default_graph.__enter__()
self.global_model = deepcopy(self)
self.global_model.distributed_spec['replica_model'] = True
self.global_model.setup()
self.graph = graph
with tf.device(device_name_or_function=self.device):
# Variables and summaries
self.variables = dict()
self.all_variables = dict()
self.registered_variables = set()
self.summaries = list()
def custom_getter(getter, name, registered=False, second=False, **kwargs):
if registered:
self.registered_variables.add(name)
elif name in self.registered_variables:
registered = True
variable = getter(name=name, **kwargs) # Top-level, hence no 'registered'
if not registered:
self.all_variables[name] = variable
if kwargs.get('trainable', True) and not name.startswith('optimization'):
self.variables[name] = variable
if 'variables' in self.summary_labels:
summary = tf.summary.histogram(name=name, values=variable)
self.summaries.append(summary)
return variable
# Episode
collection = self.graph.get_collection(name='episode')
if len(collection) == 0:
self.episode = tf.Variable(
name='episode',
dtype=util.tf_dtype('int'),
trainable=False,
initial_value=0
)
self.graph.add_to_collection(name='episode', value=self.episode)
else:
assert len(collection) == 1
self.episode = collection[0]
# Timestep
collection = self.graph.get_collection(name='timestep')
if len(collection) == 0:
self.timestep = tf.Variable(
name='timestep',
dtype=util.tf_dtype('int'),
trainable=False,
initial_value=0
)
self.graph.add_to_collection(name='timestep', value=self.timestep)
self.graph.add_to_collection(name=tf.GraphKeys.GLOBAL_STEP, value=self.timestep)
else:
assert len(collection) == 1
self.timestep = collection[0]
# Create placeholders, tf functions, internals, etc
self.initialize(custom_getter=custom_getter)
# Input tensors
states = {name: tf.identity(input=state) for name, state in self.states_input.items()}
states = self.fn_preprocess_states(states=states)
states = {name: tf.stop_gradient(input=state) for name, state in states.items()}
internals = [tf.identity(input=internal) for internal in self.internals_input]
actions = {name: tf.identity(input=action) for name, action in self.actions_input.items()}
terminal = tf.identity(input=self.terminal_input)
reward = tf.identity(input=self.reward_input)
reward = self.fn_preprocess_reward(states=states, internals=internals, terminal=terminal, reward=reward)
reward = tf.stop_gradient(input=reward)
# Optimizer
# No optimizer (non-learning model)
if self.optimizer is None:
pass
# Optimizer will be a global_optimizer
elif self.distributed_spec is not None and \
not self.distributed_spec.get('parameter_server') and \
not self.distributed_spec.get('replica_model'):
# If not internal global model
self.optimizer = GlobalOptimizer(optimizer=self.optimizer)
else:
kwargs_opt = dict(
summaries=self.summaries,
summary_labels=self.summary_labels
)
self.optimizer = Optimizer.from_spec(spec=self.optimizer, kwargs=kwargs_opt)
# Create output fetch operations
self.create_output_operations(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=self.update_input,
deterministic=self.deterministic_input
)
# Add exploration post-processing (only if deterministic is set to False).
for name, action in self.actions_output.items():
if name in self.explorations:
self.actions_output[name] = tf.cond(
pred=self.deterministic_input,
true_fn=(lambda: action),
false_fn=(lambda: self.fn_action_exploration(
action=action,
exploration=self.explorations[name],
action_spec=self.actions_spec[name]
))
)
# Add all summaries specified in summary_labels
if any(k in self.summary_labels for k in ['inputs', 'states']):
for name, state in states.items():
summary = tf.summary.histogram(name=(self.scope + '/inputs/states/' + name), values=state)
self.summaries.append(summary)
if any(k in self.summary_labels for k in ['inputs', 'actions']):
for name, action in actions.items():
summary = tf.summary.histogram(name=(self.scope + '/inputs/actions/' + name), values=action)
self.summaries.append(summary)
if any(k in self.summary_labels for k in ['inputs', 'rewards']):
summary = tf.summary.histogram(name=(self.scope + '/inputs/rewards'), values=reward)
self.summaries.append(summary)
if self.distributed_spec is not None:
# We are just a replica model: Return.
if self.distributed_spec.get('replica_model'):
return
# We are the parameter server: Start and wait.
elif self.distributed_spec.get('parameter_server'):
server = tf.train.Server(
server_or_cluster_def=self.distributed_spec['cluster_spec'],
job_name='ps',
task_index=self.distributed_spec['task_index'],
protocol=self.distributed_spec.get('protocol'),
config=None,
start=True
)
# Param server does nothing actively
server.join()
return
# Global trainables (from global_model)
global_variables = self.global_model.get_variables(include_non_trainable=True) +\
[self.episode, self.timestep]
# Local counterparts
local_variables = self.get_variables(include_non_trainable=True) + [self.episode, self.timestep]
init_op = tf.variables_initializer(var_list=global_variables)
ready_op = tf.report_uninitialized_variables(var_list=(global_variables + local_variables))
ready_for_local_init_op = tf.report_uninitialized_variables(var_list=global_variables)
# Op to assign values from the global model to local counterparts
local_init_op = tf.group(*(local_var.assign(value=global_var)
for local_var, global_var in zip(local_variables, global_variables)))
# Local variables initialize operations (no global_model).
else:
global_variables = self.get_variables(include_non_trainable=True) + [self.episode, self.timestep]
init_op = tf.variables_initializer(var_list=global_variables)
ready_op = tf.report_uninitialized_variables(var_list=global_variables)
# TODO(Michael) TensorFlow template hotfix following 1.5.0rc0
global_variables = list(set(global_variables))
ready_for_local_init_op = None
local_init_op = None
def init_fn(scaffold, session):
if self.saver_spec is not None and self.saver_spec.get('load', True):
directory = self.saver_spec['directory']
file = self.saver_spec.get('file')
if file is None:
file = tf.train.latest_checkpoint(
checkpoint_dir=directory,
latest_filename=None # Corresponds to argument of saver.save() in Model.save().
)
elif not os.path.isfile(file):
file = os.path.join(directory, file)
if file is not None:
scaffold.saver.restore(sess=session, save_path=file)
# Summary operation
summaries = self.get_summaries()
if len(summaries) > 0:
summary_op = tf.summary.merge(inputs=summaries)
else:
summary_op = None
# TensorFlow saver object
saver = tf.train.Saver(
var_list=global_variables, # should be given?
reshape=False,
sharded=False, # should be true?
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=True,
write_version=tf.train.SaverDef.V2,
pad_step_number=False,
save_relative_paths=True
# filename=None
)
# TensorFlow scaffold object
self.scaffold = tf.train.Scaffold(
init_op=init_op,
init_feed_dict=None,
init_fn=init_fn,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op,
local_init_op=local_init_op,
summary_op=summary_op,
saver=saver,
copy_from_scaffold=None
)
hooks = list()
# Checkpoint saver hook
if self.saver_spec is not None and (self.distributed_spec is None or self.distributed_spec['task_index'] == 0):
self.saver_directory = self.saver_spec['directory']
hooks.append(tf.train.CheckpointSaverHook(
checkpoint_dir=self.saver_directory,
save_secs=self.saver_spec.get('seconds', None if 'steps' in self.saver_spec else 600),
save_steps=self.saver_spec.get('steps'), # Either one or the other has to be set.
saver=None, # None since given via 'scaffold' argument.
checkpoint_basename=self.saver_spec.get('basename', 'model.ckpt'),
scaffold=self.scaffold,
listeners=None
))
else:
self.saver_directory = None
# Summary saver hook
if self.summary_spec is None:
self.summary_writer_hook = None
else:
# TensorFlow summary writer object
self.summary_writer = tf.summary.FileWriter(
logdir=self.summary_spec['directory'],
graph=self.graph,
max_queue=10,
flush_secs=120,
filename_suffix=None
)
self.summary_writer_hook = util.UpdateSummarySaverHook(
update_input=self.update_input,
save_steps=self.summary_spec.get('steps'), # Either one or the other has to be set.
save_secs=self.summary_spec.get('seconds', None if 'steps' in self.summary_spec else 120),
output_dir=None, # None since given via 'summary_writer' argument.
summary_writer=self.summary_writer,
scaffold=self.scaffold,
summary_op=None # None since given via 'scaffold' argument.
)
hooks.append(self.summary_writer_hook)
# Stop at step hook
# hooks.append(tf.train.StopAtStepHook(
# num_steps=???, # This makes more sense, if load and continue training.
# last_step=None # Either one or the other has to be set.
# ))
# # Step counter hook
# hooks.append(tf.train.StepCounterHook(
# every_n_steps=counter_config.get('steps', 100), # Either one or the other has to be set.
# every_n_secs=counter_config.get('secs'), # Either one or the other has to be set.
# output_dir=None, # None since given via 'summary_writer' argument.
# summary_writer=summary_writer
# ))
# Other available hooks:
# tf.train.FinalOpsHook(final_ops, final_ops_feed_dict=None)
# tf.train.GlobalStepWaiterHook(wait_until_step)
# tf.train.LoggingTensorHook(tensors, every_n_iter=None, every_n_secs=None)
# tf.train.NanTensorHook(loss_tensor, fail_on_nan_loss=True)
# tf.train.ProfilerHook(save_steps=None, save_secs=None, output_dir='', show_dataflow=True, show_memory=False)
if self.distributed_spec is None:
# TensorFlow non-distributed monitored session object
self.monitored_session = tf.train.SingularMonitoredSession(
hooks=hooks,
scaffold=self.scaffold,
master='', # Default value.
config=self.session_config, # always the same?
checkpoint_dir=None
)
else:
server = tf.train.Server(
server_or_cluster_def=self.distributed_spec['cluster_spec'],
job_name='worker',
task_index=self.distributed_spec['task_index'],
protocol=self.distributed_spec.get('protocol'),
config=self.session_config,
start=True
)
if self.distributed_spec['task_index'] == 0:
# TensorFlow chief session creator object
session_creator = tf.train.ChiefSessionCreator(
scaffold=self.scaffold,
master=server.target,
config=self.session_config,
checkpoint_dir=None,
checkpoint_filename_with_path=None
)
else:
# TensorFlow worker session creator object
session_creator = tf.train.WorkerSessionCreator(
scaffold=self.scaffold,
master=server.target,
config=self.session_config,
)
# TensorFlow monitored session object
self.monitored_session = tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks,
stop_grace_period_secs=120 # Default value.
)
if default_graph:
default_graph.__exit__(None, None, None)
self.graph.finalize()
self.monitored_session.__enter__()
self.session = self.monitored_session._tf_sess()
# # tf.ConfigProto(device_filters=['/job:ps', '/job:worker/task:{}/cpu:0'.format(self.task_index)])
# # config=tf.ConfigProto(device_filters=["/job:ps"])
# # config=tf.ConfigProto(
# # inter_op_parallelism_threads=2,
# # log_device_placement=True
# # )
def close(self):
if self.saver_directory is not None:
self.save(append_timestep=True)
self.monitored_session.close()
def initialize(self, custom_getter):
"""
Creates the TensorFlow placeholders and functions for this model. Moreover adds the
internal state placeholders and initialization values to the model.
Args:
custom_getter: The `custom_getter_` object to use for `tf.make_template` when creating TensorFlow functions.
"""
# States preprocessing
self.states_preprocessing = dict()
if self.states_preprocessing_spec is None:
for name, state in self.states_spec.items():
state['processed_shape'] = state['shape']
elif isinstance(self.states_preprocessing_spec, list):
for name, state in self.states_spec.items():
preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec)
self.states_preprocessing[name] = preprocessing
state['processed_shape'] = preprocessing.processed_shape(shape=state['shape'])
else:
for name, state in self.states_spec.items():
if self.states_preprocessing_spec.get(name) is not None:
preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec[name])
self.states_preprocessing[name] = preprocessing
state['processed_shape'] = preprocessing.processed_shape(shape=state['shape'])
else:
state['processed_shape'] = state['shape']
# States
self.states_input = dict()
for name, state in self.states_spec.items():
self.states_input[name] = tf.placeholder(
dtype=util.tf_dtype(state['type']),
shape=(None,) + tuple(state['shape']),
name=name
)
# Actions
self.actions_input = dict()
for name, action in self.actions_spec.items():
self.actions_input[name] = tf.placeholder(
dtype=util.tf_dtype(action['type']),
shape=(None,) + tuple(action['shape']),
name=name
)
# Explorations
self.explorations = dict()
if self.explorations_spec is None:
pass
elif isinstance(self.explorations_spec, list):
for name, state in self.actions_spec.items():
self.explorations[name] = Exploration.from_spec(spec=self.explorations_spec)
# single spec for all components of our action space
elif "type" in self.explorations_spec:
for name, state in self.actions_spec.items():
self.explorations[name] = Exploration.from_spec(spec=self.explorations_spec)
# different spec for different components of our action space
else:
for name, state in self.actions_spec.items():
if self.explorations_spec.get(name) is not None:
self.explorations[name] = Exploration.from_spec(spec=self.explorations_spec[name])
# Terminal
self.terminal_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(None,), name='terminal')
# Reward preprocessing
if self.reward_preprocessing_spec is None:
self.reward_preprocessing = None
else:
self.reward_preprocessing = PreprocessorStack.from_spec(spec=self.reward_preprocessing_spec)
if self.reward_preprocessing.processed_shape(shape=()) != ():
raise TensorForceError("Invalid reward preprocessing!")
# Reward
self.reward_input = tf.placeholder(dtype=util.tf_dtype('float'), shape=(None,), name='reward')
# Internal states
self.internals_input = list()
self.internals_init = list()
# Deterministic action flag
self.deterministic_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='deterministic')
# Update flag
self.update_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='update')
# TensorFlow functions
self.fn_discounted_cumulative_reward = tf.make_template(
name_=(self.scope + '/discounted-cumulative-reward'),
func_=self.tf_discounted_cumulative_reward,
custom_getter_=custom_getter
)
self.fn_actions_and_internals = tf.make_template(
name_=(self.scope + '/actions-and-internals'),
func_=self.tf_actions_and_internals,
custom_getter_=custom_getter
)
self.fn_loss_per_instance = tf.make_template(
name_=(self.scope + '/loss-per-instance'),
func_=self.tf_loss_per_instance,
custom_getter_=custom_getter
)
self.fn_regularization_losses = tf.make_template(
name_=(self.scope + '/regularization-losses'),
func_=self.tf_regularization_losses,
custom_getter_=custom_getter
)
self.fn_loss = tf.make_template(
name_=(self.scope + '/loss'),
func_=self.tf_loss,
custom_getter_=custom_getter
)
self.fn_optimization = tf.make_template(
name_=(self.scope + '/optimization'),
func_=self.tf_optimization,
custom_getter_=custom_getter
)
self.fn_preprocess_states = tf.make_template(
name_=(self.scope + '/preprocess-states'),
func_=self.tf_preprocess_states,
custom_getter_=custom_getter
)
self.fn_action_exploration = tf.make_template(
name_=(self.scope + '/action-exploration'),
func_=self.tf_action_exploration,
custom_getter_=custom_getter
)
self.fn_preprocess_reward = tf.make_template(
name_=(self.scope + '/preprocess-reward'),
func_=self.tf_preprocess_reward,
custom_getter_=custom_getter
)
self.summary_configuration_op = None
if self.summary_spec and 'meta_param_recorder_class' in self.summary_spec:
self.summary_configuration_op = self.summary_spec['meta_param_recorder_class'].build_metagraph_list()
# self.fn_summarization = tf.make_template(
# name_='summarization',
# func_=self.tf_summarization,
# custom_getter_=custom_getter
# )
def tf_preprocess_states(self, states):
"""
Applies optional preprocessing to the states.
"""
for name, state in states.items():
if name in self.states_preprocessing:
states[name] = self.states_preprocessing[name].process(tensor=state)
else:
states[name] = tf.identity(input=state)
return states
def tf_action_exploration(self, action, exploration, action_spec):
"""
Applies optional exploration to the action (post-processor for action outputs).
Args:
action (tf.Tensor): The original output action tensor (to be post-processed).
exploration (Exploration): The Exploration object to use.
action_spec (dict): Dict specifying the action space.
Returns:
The post-processed action output tensor.
"""
action_shape = tf.shape(input=action)
exploration_value = exploration.tf_explore(
episode=self.episode,
timestep=self.timestep,
action_shape=action_shape
)
if action_spec['type'] == 'bool':
action = tf.where(
condition=(tf.random_uniform(shape=action_shape[0]) < exploration_value),
x=(tf.random_uniform(shape=action_shape) < 0.5),
y=action
)
elif action_spec['type'] == 'int':
action = tf.where(
condition=(tf.random_uniform(shape=action_shape) < exploration_value),
x=tf.random_uniform(shape=action_shape, maxval=action_spec['num_actions'], dtype=util.tf_dtype('int')),
y=action
)
elif action_spec['type'] == 'float':
action += tf.reshape(tensor=exploration_value,
shape=tuple(1 for _ in range(action_shape.get_shape().as_list()[0])))
if 'min_value' in action_spec:
action = tf.clip_by_value(
t=action,
clip_value_min=action_spec['min_value'],
clip_value_max=action_spec['max_value']
)
return action
def tf_preprocess_reward(self, states, internals, terminal, reward):
"""
Applies optional preprocessing to the reward.
"""
if self.reward_preprocessing is None:
reward = tf.identity(input=reward)
else:
reward = self.reward_preprocessing.process(tensor=reward)
return reward
# TODO: this could be a utility helper function if we remove self.discount and only allow external discount-value input
def tf_discounted_cumulative_reward(self, terminal, reward, discount=None, final_reward=0.0, horizon=0):
"""
Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards
for a given sequence of single rewards.
Example:
single rewards = 2.0 1.0 0.0 0.5 1.0 -1.0
terminal = False, False, False, False True False
gamma = 0.95
final_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal)
horizon=3
output = 2.95 1.45 1.38 1.45 1.0 94.0
Args:
terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one
True value. If its very last element is False (not terminating), the given `final_reward` value
is assumed to follow the last value in the single rewards sequence (see below).
reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False,
an assumed last reward of the value of `final_reward` will be used.
discount (float): The discount factor (gamma). By default, take the Model's discount factor.
final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence
ends with False). This value will be ignored if horizon == 1 or discount == 0.0.
horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks
without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the
exact same results as a discount factor of 0.0.
Returns:
Discounted cumulative reward tensor with the same shape as `reward`.
"""
# By default -> take Model's gamma value
if discount is None:
discount = self.discount
# Accumulates discounted (n-step) reward (start new if terminal)
def cumulate(cumulative, reward_terminal_horizon_subtract):
rew, is_terminal, is_over_horizon, sub = reward_terminal_horizon_subtract
return tf.where(
# If terminal, start new cumulation.
condition=is_terminal,
x=rew,
y=tf.where(
# If we are above the horizon length (H) -> subtract discounted value from H steps back.
condition=is_over_horizon,
x=(rew + cumulative * discount - sub),
y=(rew + cumulative * discount)
)
)
# Accumulates length of episodes (starts new if terminal)
def len_(cumulative, term):
return tf.where(
condition=term,
# Start counting from 1 after is-terminal signal
x=tf.ones(shape=(), dtype=tf.int32),
# Otherwise, increase length by 1
y=cumulative + 1
)
# Reverse, since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right.
reward = tf.reverse(tensor=reward, axis=(0,))
# e.g. -1.0 1.0 0.5 0.0 1.0 2.0
terminal = tf.reverse(tensor=terminal, axis=(0,))
# e.g. F T F F F F
# Store the steps until end of the episode(s) determined by the input terminal signals (True starts new count).
lengths = tf.scan(fn=len_, elems=terminal, initializer=0)
# e.g. 1 1 2 3 4 5
off_horizon = tf.greater(lengths, tf.fill(dims=tf.shape(lengths), value=horizon))
# e.g. F F F F T T
# Calculate the horizon-subtraction value for each step.
if horizon > 0:
horizon_subtractions = tf.map_fn(lambda x: (discount ** horizon) * x, reward, dtype=tf.float32)
# Shift right by size of horizon (fill rest with 0.0).
horizon_subtractions = tf.concat([np.zeros(shape=(horizon,)), horizon_subtractions], axis=0)
horizon_subtractions = tf.slice(horizon_subtractions, begin=(0,), size=tf.shape(reward))
# e.g. 0.0, 0.0, 0.0, -1.0*g^3, 1.0*g^3, 0.5*g^3
# all 0.0 if infinite horizon (special case: horizon=0)
else:
horizon_subtractions = tf.zeros(shape=tf.shape(reward))
# Now do the scan, each time summing up the previous step (discounted by gamma) and
# subtracting the respective `horizon_subtraction`.
reward = tf.scan(
fn=cumulate,
elems=(reward, terminal, off_horizon, horizon_subtractions),
initializer=final_reward if horizon != 1 else 0.0
)
# Re-reverse again to match input sequences.
return tf.reverse(tensor=reward, axis=(0,))
def tf_actions_and_internals(self, states, internals, update, deterministic):
"""
Creates and returns the TensorFlow operations for retrieving the actions and - if applicable -
the posterior internal state Tensors in reaction to the given input states (and prior internal states).
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
update: Single boolean tensor indicating whether this call happens during an update.
deterministic: Boolean Tensor indicating, whether we will not apply exploration when actions
are calculated.
Returns:
tuple:
1) dict of output actions (with or without exploration applied (see `deterministic`))
2) list of posterior internal state Tensors (empty for non-internal state models)
"""
raise NotImplementedError
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, update):
"""
Creates and returns the TensorFlow operations for calculating the loss per batch instance (sample)
of the given input state(s) and action(s).
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
Loss tensor (first rank is the batch size -> one loss value per sample in the batch).
"""
raise NotImplementedError
def tf_regularization_losses(self, states, internals, update):
"""
Creates and returns the TensorFlow operations for calculating the different regularization losses for
the given batch of state/internal state inputs.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
Dict of regularization loss tensors (keys == different regularization types, e.g. 'entropy').
"""
return dict()
def tf_loss(self, states, internals, actions, terminal, reward, update):
"""
Creates and returns the single loss Tensor representing the total loss for a batch, including
the mean loss per sample, the regularization loss of the batch, .
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
Single float-value loss tensor.
"""
# Losses per samples
loss_per_instance = self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
# Mean loss
loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=0)
# Summary for (mean) loss without any regularizations.
if 'losses' in self.summary_labels:
summary = tf.summary.scalar(name='loss-without-regularization', tensor=loss)
self.summaries.append(summary)
# Add the different types of regularization losses to the total.
losses = self.fn_regularization_losses(states=states, internals=internals, update=update)
if len(losses) > 0:
loss += tf.add_n(inputs=list(losses.values()))
if 'regularization' in self.summary_labels:
for name, loss_val in losses.items():
summary = tf.summary.scalar(name="regularization/"+name, tensor=loss_val)
self.summaries.append(summary)
# Summary for the total loss (including regularization).
if 'losses' in self.summary_labels or 'total-loss' in self.summary_labels:
summary = tf.summary.scalar(name='total-loss', tensor=loss)
self.summaries.append(summary)
return loss
def get_optimizer_kwargs(self, states, internals, actions, terminal, reward, update):
"""
Returns the optimizer arguments including the time, the list of variables to optimize,
and various argument-free functions (in particular `fn_loss` returning the combined
0-dim batch loss tensor) which the optimizer might require to perform an update step.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
Dict to be passed into the optimizer op (e.g. 'minimize') as kwargs.
"""
kwargs = dict()
kwargs['time'] = self.timestep
kwargs['variables'] = self.get_variables()
kwargs['fn_loss'] = (
lambda: self.fn_loss(states=states, internals=internals, actions=actions,
terminal=terminal, reward=reward, update=update)
)
if self.global_model is not None:
kwargs['global_variables'] = self.global_model.get_variables()
return kwargs
def tf_optimization(self, states, internals, actions, terminal, reward, update):
"""
Creates the TensorFlow operations for performing an optimization update step based
on the given input states and actions batch.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
The optimization operation.
"""
# No optimization (non-learning model)
if self.optimizer is None:
return tf.no_op()
optimizer_kwargs = self.get_optimizer_kwargs(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
return self.optimizer.minimize(**optimizer_kwargs)
def create_output_operations(self, states, internals, actions, terminal, reward, update, deterministic):
"""
Calls all the relevant TensorFlow functions for this model and hence creates all the
TensorFlow operations involved.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
deterministic: Boolean Tensor indicating, whether we will not apply exploration when actions
are calculated.
"""
# Create graph by calling the functions corresponding to model.act() / model.update(), to initialize variables.
# TODO: Could call reset here, but would have to move other methods below reset.
self.fn_actions_and_internals(
states=states,
internals=internals,
update=update,
deterministic=deterministic
)
self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
# Tensor fetched for model.act()
operations = list()
if self.variable_noise is not None and self.variable_noise > 0.0:
# Add variable noise
noise_deltas = list()
for variable in self.get_variables():
noise_delta = tf.random_normal(shape=util.shape(variable), mean=0.0, stddev=self.variable_noise)
noise_deltas.append(noise_delta)
operations.append(variable.assign_add(delta=noise_delta))
# Retrieve actions and internals
with tf.control_dependencies(control_inputs=operations):
self.actions_output, self.internals_output = self.fn_actions_and_internals(
states=states,
internals=internals,
update=update,
deterministic=deterministic
)
# Increment timestep
increment_timestep = tf.shape(input=next(iter(states.values())))[0]
increment_timestep = self.timestep.assign_add(delta=increment_timestep)
operations = [increment_timestep]
# Subtract variable noise
if self.variable_noise is not None and self.variable_noise > 0.0:
for variable, noise_delta in zip(self.get_variables(), noise_deltas):
operations.append(variable.assign_sub(delta=noise_delta))
with tf.control_dependencies(control_inputs=operations):
# Trivial operation to enforce control dependency
self.timestep_output = self.timestep + 0
# Tensor fetched for model.observe()
increment_episode = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('int'))
increment_episode = self.episode.assign_add(delta=increment_episode)
with tf.control_dependencies(control_inputs=(increment_episode,)):
self.increment_episode = self.episode + 0
# TODO: add up rewards per episode and add summary_label 'episode-reward'
# Tensor(s) fetched for model.update()
self.optimization = self.fn_optimization(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
self.loss_per_instance = self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
def get_variables(self, include_non_trainable=False):
"""
Returns the TensorFlow variables used by the model.
Returns:
List of variables.
"""
if include_non_trainable:
# Optimizer variables and timestep/episode only included if 'include_non_trainable' set
model_variables = [self.all_variables[key] for key in sorted(self.all_variables)]
states_preprocessing_variables = [
variable for name in self.states_preprocessing.keys()
for variable in self.states_preprocessing[name].get_variables()
]
explorations_variables = [
variable for name in self.explorations.keys()
for variable in self.explorations[name].get_variables()
]
if self.reward_preprocessing is not None:
reward_preprocessing_variables = self.reward_preprocessing.get_variables()
else:
reward_preprocessing_variables = list()
if self.optimizer is None:
optimizer_variables = list()
else:
optimizer_variables = self.optimizer.get_variables()
variables = model_variables
variables.extend([v for v in states_preprocessing_variables if v not in variables])
variables.extend([v for v in explorations_variables if v not in variables])
variables.extend([v for v in reward_preprocessing_variables if v not in variables])
variables.extend([v for v in optimizer_variables if v not in variables])
return variables
else:
return [self.variables[key] for key in sorted(self.variables)]
def get_summaries(self):
"""
Returns the TensorFlow summaries reported by the model
Returns:
List of summaries
"""
return self.summaries
def reset(self):
"""
Resets the model to its initial state on episode start.
Returns:
tuple:
Current episode, timestep counter and the shallow-copied list of internal state initialization Tensors.
"""
# TODO preprocessing reset call moved from agent
episode, timestep = self.monitored_session.run(fetches=(self.episode, self.timestep))
return episode, timestep, list(self.internals_init)
def act(self, states, internals, deterministic=False):
"""
Does a forward pass through the model to retrieve action (outputs) given inputs for state (and internal
state, if applicable (e.g. RNNs))
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of incoming internal state tensors.
deterministic (bool): If True, will not apply exploration after actions are calculated.
Returns:
tuple:
- Actual action-outputs (batched if state input is a batch).
- Actual values of internal states (if applicable) (batched if state input is a batch).
- The timestep (int) after calculating the (batch of) action(s).
"""
fetches = [self.actions_output, self.internals_output, self.timestep_output]
name = next(iter(self.states_spec))
batched = (np.asarray(states[name]).ndim != len(self.states_spec[name]['shape']))
if batched:
feed_dict = {state_input: states[name] for name, state_input in self.states_input.items()}
feed_dict.update({internal_input: internals[n] for n, internal_input in enumerate(self.internals_input)})
else:
feed_dict = {state_input: (states[name],) for name, state_input in self.states_input.items()}
feed_dict.update({internal_input: (internals[n],) for n, internal_input in enumerate(self.internals_input)})
feed_dict[self.deterministic_input] = deterministic
feed_dict[self.update_input] = False
actions, internals, timestep = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
# Extract the first (and only) action/internal from the batch to make return values non-batched
if not batched:
actions = {name: action[0] for name, action in actions.items()}
internals = [internal[0] for internal in internals]
if self.summary_configuration_op is not None:
summary_values = self.session.run(self.summary_configuration_op)
self.summary_writer.add_summary(summary_values)
self.summary_writer.flush()
# Only do this operation once to reduce duplicate data in Tensorboard
self.summary_configuration_op = None
return actions, internals, timestep
def observe(self, terminal, reward):
"""
Adds an observation (reward and is-terminal) to the model without updating its trainable variables.
Args:
terminal (bool): Whether the episode has terminated.
reward (float): The observed reward value.
Returns:
The value of the model-internal episode counter.
"""
terminal = np.asarray(terminal)
batched = (terminal.ndim == 1)
if batched:
feed_dict = {self.terminal_input: terminal, self.reward_input: reward, }
else:
feed_dict = {self.terminal_input: (terminal,), self.reward_input: (reward,)}
feed_dict[self.update_input] = False # don't update, just "observe"
episode = self.monitored_session.run(fetches=self.increment_episode, feed_dict=feed_dict)
return episode
def update(self, states, internals, actions, terminal, reward, return_loss_per_instance=False):
"""
Runs the self.optimization in the session to update the Model's parameters.
Optionally, also runs the `loss_per_instance` calculation and returns the result of that.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
return_loss_per_instance (bool): Whether to also run and return the `loss_per_instance` Tensor.
Returns:
void or - if return_loss_per_instance is True - the value of the `loss_per_instance` Tensor.
"""
fetches = [self.optimization]
# Optionally fetch loss per instance
if return_loss_per_instance:
fetches.append(self.loss_per_instance)
terminal = np.asarray(terminal)
batched = (terminal.ndim == 1)
if batched:
feed_dict = {state_input: states[name] for name, state_input in self.states_input.items()}
feed_dict.update(
{internal_input: internals[n]
for n, internal_input in enumerate(self.internals_input)}
)
feed_dict.update(
{action_input: actions[name]
for name, action_input in self.actions_input.items()}
)
feed_dict[self.terminal_input] = terminal
feed_dict[self.reward_input] = reward
else:
feed_dict = {state_input: (states[name],) for name, state_input in self.states_input.items()}
feed_dict.update(
{internal_input: (internals[n],)
for n, internal_input in enumerate(self.internals_input)}
)
feed_dict.update(
{action_input: (actions[name],)
for name, action_input in self.actions_input.items()}
)
feed_dict[self.terminal_input] = (terminal,)
feed_dict[self.reward_input] = (reward,)
feed_dict[self.deterministic_input] = True
feed_dict[self.update_input] = True
fetched = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
if return_loss_per_instance:
return fetched[1]
def save(self, directory=None, append_timestep=True):
"""
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory: Optional checkpoint directory.
append_timestep: Appends the current timestep to the checkpoint file if true.
Returns:
Checkpoint path were the model was saved.
"""
if self.summary_writer_hook is not None:
self.summary_writer_hook._summary_writer.flush()
return self.scaffold.saver.save(
sess=self.session,
save_path=(self.saver_directory if directory is None else directory),
global_step=(self.timestep if append_timestep else None),
# latest_filename=None, # Defaults to 'checkpoint'.
meta_graph_suffix='meta',
write_meta_graph=True,
write_state=True
)
def restore(self, directory=None, file=None):
"""
Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is
restored. If no checkpoint directory is given, the model's default saver directory is
used (unless file specifies the entire path).
Args:
directory: Optional checkpoint directory.
file: Optional checkpoint file, or path if directory not given.
"""
if file is None:
file = tf.train.latest_checkpoint(
checkpoint_dir=(self.saver_directory if directory is None else directory),
# latest_filename=None # Corresponds to argument of saver.save() in Model.save().
)
elif directory is None:
file = os.path.join(self.saver_directory, file)
elif not os.path.isfile(file):
file = os.path.join(directory, file)
# if not os.path.isfile(file):
# raise TensorForceError("Invalid model directory/file.")
self.scaffold.saver.restore(sess=self.session, save_path=file)
| [
"tensorflow.shape",
"tensorflow.train.SingularMonitoredSession",
"tensorforce.util.shape",
"tensorflow.train.Scaffold",
"tensorflow.control_dependencies",
"tensorforce.core.optimizers.GlobalOptimizer",
"copy.deepcopy",
"tensorflow.reduce_mean",
"tensorflow.scan",
"tensorflow.variables_initializer"... | [((22742, 23075), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'global_variables', 'reshape': '(False)', 'sharded': '(False)', 'max_to_keep': '(5)', 'keep_checkpoint_every_n_hours': '(10000.0)', 'name': 'None', 'restore_sequentially': '(False)', 'saver_def': 'None', 'builder': 'None', 'defer_build': '(False)', 'allow_empty': '(True)', 'write_version': 'tf.train.SaverDef.V2', 'pad_step_number': '(False)', 'save_relative_paths': '(True)'}), '(var_list=global_variables, reshape=False, sharded=False,\n max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None,\n restore_sequentially=False, saver_def=None, builder=None, defer_build=\n False, allow_empty=True, write_version=tf.train.SaverDef.V2,\n pad_step_number=False, save_relative_paths=True)\n', (22756, 23075), True, 'import tensorflow as tf\n'), ((23366, 23608), 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {'init_op': 'init_op', 'init_feed_dict': 'None', 'init_fn': 'init_fn', 'ready_op': 'ready_op', 'ready_for_local_init_op': 'ready_for_local_init_op', 'local_init_op': 'local_init_op', 'summary_op': 'summary_op', 'saver': 'saver', 'copy_from_scaffold': 'None'}), '(init_op=init_op, init_feed_dict=None, init_fn=init_fn,\n ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_init_op, summary_op=summary_op, saver=saver,\n copy_from_scaffold=None)\n', (23383, 23608), True, 'import tensorflow as tf\n'), ((33317, 33464), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/discounted-cumulative-reward')", 'func_': 'self.tf_discounted_cumulative_reward', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/discounted-cumulative-reward', func_=\n self.tf_discounted_cumulative_reward, custom_getter_=custom_getter)\n", (33333, 33464), True, 'import tensorflow as tf\n'), ((33548, 33681), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/actions-and-internals')", 'func_': 'self.tf_actions_and_internals', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/actions-and-internals', func_=self.\n tf_actions_and_internals, custom_getter_=custom_getter)\n", (33564, 33681), True, 'import tensorflow as tf\n'), ((33761, 33886), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/loss-per-instance')", 'func_': 'self.tf_loss_per_instance', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/loss-per-instance', func_=self.\n tf_loss_per_instance, custom_getter_=custom_getter)\n", (33777, 33886), True, 'import tensorflow as tf\n'), ((33970, 34103), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/regularization-losses')", 'func_': 'self.tf_regularization_losses', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/regularization-losses', func_=self.\n tf_regularization_losses, custom_getter_=custom_getter)\n", (33986, 34103), True, 'import tensorflow as tf\n'), ((34170, 34268), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/loss')", 'func_': 'self.tf_loss', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/loss', func_=self.tf_loss,\n custom_getter_=custom_getter)\n", (34186, 34268), True, 'import tensorflow as tf\n'), ((34344, 34459), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/optimization')", 'func_': 'self.tf_optimization', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/optimization', func_=self.\n tf_optimization, custom_getter_=custom_getter)\n", (34360, 34459), True, 'import tensorflow as tf\n'), ((34539, 34664), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/preprocess-states')", 'func_': 'self.tf_preprocess_states', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/preprocess-states', func_=self.\n tf_preprocess_states, custom_getter_=custom_getter)\n", (34555, 34664), True, 'import tensorflow as tf\n'), ((34745, 34872), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/action-exploration')", 'func_': 'self.tf_action_exploration', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/action-exploration', func_=self.\n tf_action_exploration, custom_getter_=custom_getter)\n", (34761, 34872), True, 'import tensorflow as tf\n'), ((34952, 35077), 'tensorflow.make_template', 'tf.make_template', ([], {'name_': "(self.scope + '/preprocess-reward')", 'func_': 'self.tf_preprocess_reward', 'custom_getter_': 'custom_getter'}), "(name_=self.scope + '/preprocess-reward', func_=self.\n tf_preprocess_reward, custom_getter_=custom_getter)\n", (34968, 35077), True, 'import tensorflow as tf\n'), ((36481, 36503), 'tensorflow.shape', 'tf.shape', ([], {'input': 'action'}), '(input=action)\n', (36489, 36503), True, 'import tensorflow as tf\n'), ((41411, 41447), 'tensorflow.reverse', 'tf.reverse', ([], {'tensor': 'reward', 'axis': '(0,)'}), '(tensor=reward, axis=(0,))\n', (41421, 41447), True, 'import tensorflow as tf\n'), ((41507, 41545), 'tensorflow.reverse', 'tf.reverse', ([], {'tensor': 'terminal', 'axis': '(0,)'}), '(tensor=terminal, axis=(0,))\n', (41517, 41545), True, 'import tensorflow as tf\n'), ((41712, 41759), 'tensorflow.scan', 'tf.scan', ([], {'fn': 'len_', 'elems': 'terminal', 'initializer': '(0)'}), '(fn=len_, elems=terminal, initializer=0)\n', (41719, 41759), True, 'import tensorflow as tf\n'), ((42752, 42888), 'tensorflow.scan', 'tf.scan', ([], {'fn': 'cumulate', 'elems': '(reward, terminal, off_horizon, horizon_subtractions)', 'initializer': '(final_reward if horizon != 1 else 0.0)'}), '(fn=cumulate, elems=(reward, terminal, off_horizon,\n horizon_subtractions), initializer=final_reward if horizon != 1 else 0.0)\n', (42759, 42888), True, 'import tensorflow as tf\n'), ((42999, 43035), 'tensorflow.reverse', 'tf.reverse', ([], {'tensor': 'reward', 'axis': '(0,)'}), '(tensor=reward, axis=(0,))\n', (43009, 43035), True, 'import tensorflow as tf\n'), ((46786, 46840), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': 'loss_per_instance', 'axis': '(0)'}), '(input_tensor=loss_per_instance, axis=0)\n', (46800, 46840), True, 'import tensorflow as tf\n'), ((59778, 59798), 'numpy.asarray', 'np.asarray', (['terminal'], {}), '(terminal)\n', (59788, 59798), True, 'import numpy as np\n'), ((61384, 61404), 'numpy.asarray', 'np.asarray', (['terminal'], {}), '(terminal)\n', (61394, 61404), True, 'import numpy as np\n'), ((12175, 12185), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12183, 12185), True, 'import tensorflow as tf\n'), ((13740, 13786), 'tensorflow.device', 'tf.device', ([], {'device_name_or_function': 'self.device'}), '(device_name_or_function=self.device)\n', (13749, 13786), True, 'import tensorflow as tf\n'), ((16620, 16658), 'tensorflow.identity', 'tf.identity', ([], {'input': 'self.terminal_input'}), '(input=self.terminal_input)\n', (16631, 16658), True, 'import tensorflow as tf\n'), ((16680, 16716), 'tensorflow.identity', 'tf.identity', ([], {'input': 'self.reward_input'}), '(input=self.reward_input)\n', (16691, 16716), True, 'import tensorflow as tf\n'), ((16855, 16885), 'tensorflow.stop_gradient', 'tf.stop_gradient', ([], {'input': 'reward'}), '(input=reward)\n', (16871, 16885), True, 'import tensorflow as tf\n'), ((20700, 20751), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'global_variables'}), '(var_list=global_variables)\n', (20724, 20751), True, 'import tensorflow as tf\n'), ((20775, 20853), 'tensorflow.report_uninitialized_variables', 'tf.report_uninitialized_variables', ([], {'var_list': '(global_variables + local_variables)'}), '(var_list=global_variables + local_variables)\n', (20808, 20853), True, 'import tensorflow as tf\n'), ((20894, 20954), 'tensorflow.report_uninitialized_variables', 'tf.report_uninitialized_variables', ([], {'var_list': 'global_variables'}), '(var_list=global_variables)\n', (20927, 20954), True, 'import tensorflow as tf\n'), ((21430, 21481), 'tensorflow.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'global_variables'}), '(var_list=global_variables)\n', (21454, 21481), True, 'import tensorflow as tf\n'), ((21505, 21565), 'tensorflow.report_uninitialized_variables', 'tf.report_uninitialized_variables', ([], {'var_list': 'global_variables'}), '(var_list=global_variables)\n', (21538, 21565), True, 'import tensorflow as tf\n'), ((22612, 22646), 'tensorflow.summary.merge', 'tf.summary.merge', ([], {'inputs': 'summaries'}), '(inputs=summaries)\n', (22628, 22646), True, 'import tensorflow as tf\n'), ((24769, 24904), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', ([], {'logdir': "self.summary_spec['directory']", 'graph': 'self.graph', 'max_queue': '(10)', 'flush_secs': '(120)', 'filename_suffix': 'None'}), "(logdir=self.summary_spec['directory'], graph=self.\n graph, max_queue=10, flush_secs=120, filename_suffix=None)\n", (24790, 24904), True, 'import tensorflow as tf\n'), ((26881, 27015), 'tensorflow.train.SingularMonitoredSession', 'tf.train.SingularMonitoredSession', ([], {'hooks': 'hooks', 'scaffold': 'self.scaffold', 'master': '""""""', 'config': 'self.session_config', 'checkpoint_dir': 'None'}), "(hooks=hooks, scaffold=self.scaffold,\n master='', config=self.session_config, checkpoint_dir=None)\n", (26914, 27015), True, 'import tensorflow as tf\n'), ((28332, 28435), 'tensorflow.train.MonitoredSession', 'tf.train.MonitoredSession', ([], {'session_creator': 'session_creator', 'hooks': 'hooks', 'stop_grace_period_secs': '(120)'}), '(session_creator=session_creator, hooks=hooks,\n stop_grace_period_secs=120)\n', (28357, 28435), True, 'import tensorflow as tf\n'), ((32536, 32600), 'tensorforce.core.preprocessing.PreprocessorStack.from_spec', 'PreprocessorStack.from_spec', ([], {'spec': 'self.reward_preprocessing_spec'}), '(spec=self.reward_preprocessing_spec)\n', (32563, 32600), False, 'from tensorforce.core.preprocessing import PreprocessorStack\n'), ((37968, 37993), 'tensorflow.identity', 'tf.identity', ([], {'input': 'reward'}), '(input=reward)\n', (37979, 37993), True, 'import tensorflow as tf\n'), ((42029, 42099), 'tensorflow.map_fn', 'tf.map_fn', (['(lambda x: discount ** horizon * x)', 'reward'], {'dtype': 'tf.float32'}), '(lambda x: discount ** horizon * x, reward, dtype=tf.float32)\n', (42038, 42099), True, 'import tensorflow as tf\n'), ((46971, 47037), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': '"""loss-without-regularization"""', 'tensor': 'loss'}), "(name='loss-without-regularization', tensor=loss)\n", (46988, 47037), True, 'import tensorflow as tf\n'), ((47766, 47815), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': '"""total-loss"""', 'tensor': 'loss'}), "(name='total-loss', tensor=loss)\n", (47783, 47815), True, 'import tensorflow as tf\n'), ((50258, 50268), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (50266, 50268), True, 'import tensorflow as tf\n'), ((52630, 52680), 'tensorflow.control_dependencies', 'tf.control_dependencies', ([], {'control_inputs': 'operations'}), '(control_inputs=operations)\n', (52653, 52680), True, 'import tensorflow as tf\n'), ((53434, 53484), 'tensorflow.control_dependencies', 'tf.control_dependencies', ([], {'control_inputs': 'operations'}), '(control_inputs=operations)\n', (53457, 53484), True, 'import tensorflow as tf\n'), ((53833, 53893), 'tensorflow.control_dependencies', 'tf.control_dependencies', ([], {'control_inputs': '(increment_episode,)'}), '(control_inputs=(increment_episode,))\n', (53856, 53893), True, 'import tensorflow as tf\n'), ((64462, 64565), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': '(self.saver_directory if directory is None else directory)'}), '(checkpoint_dir=self.saver_directory if directory is\n None else directory)\n', (64488, 64565), True, 'import tensorflow as tf\n'), ((12667, 12677), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12675, 12677), True, 'import tensorflow as tf\n'), ((16177, 16201), 'tensorflow.identity', 'tf.identity', ([], {'input': 'state'}), '(input=state)\n', (16188, 16201), True, 'import tensorflow as tf\n'), ((16338, 16367), 'tensorflow.stop_gradient', 'tf.stop_gradient', ([], {'input': 'state'}), '(input=state)\n', (16354, 16367), True, 'import tensorflow as tf\n'), ((16428, 16455), 'tensorflow.identity', 'tf.identity', ([], {'input': 'internal'}), '(input=internal)\n', (16439, 16455), True, 'import tensorflow as tf\n'), ((16523, 16548), 'tensorflow.identity', 'tf.identity', ([], {'input': 'action'}), '(input=action)\n', (16534, 16548), True, 'import tensorflow as tf\n'), ((19421, 19493), 'tensorflow.summary.histogram', 'tf.summary.histogram', ([], {'name': "(self.scope + '/inputs/rewards')", 'values': 'reward'}), "(name=self.scope + '/inputs/rewards', values=reward)\n", (19441, 19493), True, 'import tensorflow as tf\n'), ((27672, 27839), 'tensorflow.train.ChiefSessionCreator', 'tf.train.ChiefSessionCreator', ([], {'scaffold': 'self.scaffold', 'master': 'server.target', 'config': 'self.session_config', 'checkpoint_dir': 'None', 'checkpoint_filename_with_path': 'None'}), '(scaffold=self.scaffold, master=server.target,\n config=self.session_config, checkpoint_dir=None,\n checkpoint_filename_with_path=None)\n', (27700, 27839), True, 'import tensorflow as tf\n'), ((28061, 28168), 'tensorflow.train.WorkerSessionCreator', 'tf.train.WorkerSessionCreator', ([], {'scaffold': 'self.scaffold', 'master': 'server.target', 'config': 'self.session_config'}), '(scaffold=self.scaffold, master=server.target,\n config=self.session_config)\n', (28090, 28168), True, 'import tensorflow as tf\n'), ((32299, 32320), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""bool"""'], {}), "('bool')\n", (32312, 32320), False, 'from tensorforce import TensorForceError, util\n'), ((32697, 32746), 'tensorforce.TensorForceError', 'TensorForceError', (['"""Invalid reward preprocessing!"""'], {}), "('Invalid reward preprocessing!')\n", (32713, 32746), False, 'from tensorforce import TensorForceError, util\n'), ((32814, 32836), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""float"""'], {}), "('float')\n", (32827, 32836), False, 'from tensorforce import TensorForceError, util\n'), ((33063, 33084), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""bool"""'], {}), "('bool')\n", (33076, 33084), False, 'from tensorforce import TensorForceError, util\n'), ((33190, 33211), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""bool"""'], {}), "('bool')\n", (33203, 33211), False, 'from tensorforce import TensorForceError, util\n'), ((35912, 35936), 'tensorflow.identity', 'tf.identity', ([], {'input': 'state'}), '(input=state)\n', (35923, 35936), True, 'import tensorflow as tf\n'), ((53721, 53741), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""int"""'], {}), "('int')\n", (53734, 53741), False, 'from tensorforce import TensorForceError, util\n'), ((57930, 57954), 'numpy.asarray', 'np.asarray', (['states[name]'], {}), '(states[name])\n', (57940, 57954), True, 'import numpy as np\n'), ((64745, 64785), 'os.path.join', 'os.path.join', (['self.saver_directory', 'file'], {}), '(self.saver_directory, file)\n', (64757, 64785), False, 'import os\n'), ((12542, 12604), 'tensorforce.TensorForceError', 'TensorForceError', (['"""Invalid config value for distributed mode."""'], {}), "('Invalid config value for distributed mode.')\n", (12558, 12604), False, 'from tensorforce import TensorForceError, util\n'), ((12953, 13062), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'worker_device': 'self.device', 'cluster': "self.distributed_spec['cluster_spec']"}), "(worker_device=self.device, cluster=self.\n distributed_spec['cluster_spec'])\n", (12983, 13062), True, 'import tensorflow as tf\n'), ((13244, 13266), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (13264, 13266), True, 'import tensorflow as tf\n'), ((13443, 13453), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13451, 13453), True, 'import tensorflow as tf\n'), ((13571, 13585), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (13579, 13585), False, 'from copy import deepcopy\n'), ((17351, 17392), 'tensorforce.core.optimizers.GlobalOptimizer', 'GlobalOptimizer', ([], {'optimizer': 'self.optimizer'}), '(optimizer=self.optimizer)\n', (17366, 17392), False, 'from tensorforce.core.optimizers import Optimizer, GlobalOptimizer\n'), ((17598, 17657), 'tensorforce.core.optimizers.Optimizer.from_spec', 'Optimizer.from_spec', ([], {'spec': 'self.optimizer', 'kwargs': 'kwargs_opt'}), '(spec=self.optimizer, kwargs=kwargs_opt)\n', (17617, 17657), False, 'from tensorforce.core.optimizers import Optimizer, GlobalOptimizer\n'), ((18892, 18970), 'tensorflow.summary.histogram', 'tf.summary.histogram', ([], {'name': "(self.scope + '/inputs/states/' + name)", 'values': 'state'}), "(name=self.scope + '/inputs/states/' + name, values=state)\n", (18912, 18970), True, 'import tensorflow as tf\n'), ((19184, 19269), 'tensorflow.summary.histogram', 'tf.summary.histogram', ([], {'name': "(self.scope + '/inputs/actions/' + name)", 'values': 'action'}), "(name=self.scope + '/inputs/actions/' + name, values=action\n )\n", (19204, 19269), True, 'import tensorflow as tf\n'), ((22067, 22141), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': 'directory', 'latest_filename': 'None'}), '(checkpoint_dir=directory, latest_filename=None)\n', (22093, 22141), True, 'import tensorflow as tf\n'), ((29987, 30051), 'tensorforce.core.preprocessing.PreprocessorStack.from_spec', 'PreprocessorStack.from_spec', ([], {'spec': 'self.states_preprocessing_spec'}), '(spec=self.states_preprocessing_spec)\n', (30014, 30051), False, 'from tensorforce.core.preprocessing import PreprocessorStack\n'), ((30895, 30923), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (["state['type']"], {}), "(state['type'])\n", (30908, 30923), False, 'from tensorforce import TensorForceError, util\n'), ((31207, 31236), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (["action['type']"], {}), "(action['type'])\n", (31220, 31236), False, 'from tensorforce import TensorForceError, util\n'), ((31608, 31658), 'tensorforce.core.explorations.Exploration.from_spec', 'Exploration.from_spec', ([], {'spec': 'self.explorations_spec'}), '(spec=self.explorations_spec)\n', (31629, 31658), False, 'from tensorforce.core.explorations import Exploration\n'), ((40595, 40703), 'tensorflow.where', 'tf.where', ([], {'condition': 'is_over_horizon', 'x': '(rew + cumulative * discount - sub)', 'y': '(rew + cumulative * discount)'}), '(condition=is_over_horizon, x=rew + cumulative * discount - sub, y=\n rew + cumulative * discount)\n', (40603, 40703), True, 'import tensorflow as tf\n'), ((41151, 41184), 'tensorflow.ones', 'tf.ones', ([], {'shape': '()', 'dtype': 'tf.int32'}), '(shape=(), dtype=tf.int32)\n', (41158, 41184), True, 'import tensorflow as tf\n'), ((41842, 41859), 'tensorflow.shape', 'tf.shape', (['lengths'], {}), '(lengths)\n', (41850, 41859), True, 'import tensorflow as tf\n'), ((42215, 42241), 'numpy.zeros', 'np.zeros', ([], {'shape': '(horizon,)'}), '(shape=(horizon,))\n', (42223, 42241), True, 'import numpy as np\n'), ((42357, 42373), 'tensorflow.shape', 'tf.shape', (['reward'], {}), '(reward)\n', (42365, 42373), True, 'import tensorflow as tf\n'), ((42564, 42580), 'tensorflow.shape', 'tf.shape', (['reward'], {}), '(reward)\n', (42572, 42580), True, 'import tensorflow as tf\n'), ((47480, 47545), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': "('regularization/' + name)", 'tensor': 'loss_val'}), "(name='regularization/' + name, tensor=loss_val)\n", (47497, 47545), True, 'import tensorflow as tf\n'), ((64803, 64823), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (64817, 64823), False, 'import os\n'), ((64844, 64873), 'os.path.join', 'os.path.join', (['directory', 'file'], {}), '(directory, file)\n', (64856, 64873), False, 'import os\n'), ((15059, 15079), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""int"""'], {}), "('int')\n", (15072, 15079), False, 'from tensorforce import TensorForceError, util\n'), ((15597, 15617), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""int"""'], {}), "('int')\n", (15610, 15617), False, 'from tensorforce import TensorForceError, util\n'), ((22297, 22317), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (22311, 22317), False, 'import os\n'), ((22346, 22375), 'os.path.join', 'os.path.join', (['directory', 'file'], {}), '(directory, file)\n', (22358, 22375), False, 'import os\n'), ((30391, 30461), 'tensorforce.core.preprocessing.PreprocessorStack.from_spec', 'PreprocessorStack.from_spec', ([], {'spec': 'self.states_preprocessing_spec[name]'}), '(spec=self.states_preprocessing_spec[name])\n', (30418, 30461), False, 'from tensorforce.core.preprocessing import PreprocessorStack\n'), ((31867, 31917), 'tensorforce.core.explorations.Exploration.from_spec', 'Exploration.from_spec', ([], {'spec': 'self.explorations_spec'}), '(spec=self.explorations_spec)\n', (31888, 31917), False, 'from tensorforce.core.explorations import Exploration\n'), ((36775, 36815), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': 'action_shape[0]'}), '(shape=action_shape[0])\n', (36792, 36815), True, 'import tensorflow as tf\n'), ((36857, 36894), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': 'action_shape'}), '(shape=action_shape)\n', (36874, 36894), True, 'import tensorflow as tf\n'), ((37539, 37651), 'tensorflow.clip_by_value', 'tf.clip_by_value', ([], {'t': 'action', 'clip_value_min': "action_spec['min_value']", 'clip_value_max': "action_spec['max_value']"}), "(t=action, clip_value_min=action_spec['min_value'],\n clip_value_max=action_spec['max_value'])\n", (37555, 37651), True, 'import tensorflow as tf\n'), ((52392, 52412), 'tensorforce.util.shape', 'util.shape', (['variable'], {}), '(variable)\n', (52402, 52412), False, 'from tensorforce import TensorForceError, util\n'), ((14686, 14734), 'tensorflow.summary.histogram', 'tf.summary.histogram', ([], {'name': 'name', 'values': 'variable'}), '(name=name, values=variable)\n', (14706, 14734), True, 'import tensorflow as tf\n'), ((32171, 32227), 'tensorforce.core.explorations.Exploration.from_spec', 'Exploration.from_spec', ([], {'spec': 'self.explorations_spec[name]'}), '(spec=self.explorations_spec[name])\n', (32192, 32227), False, 'from tensorforce.core.explorations import Exploration\n'), ((37044, 37081), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': 'action_shape'}), '(shape=action_shape)\n', (37061, 37081), True, 'import tensorflow as tf\n'), ((37201, 37221), 'tensorforce.util.tf_dtype', 'util.tf_dtype', (['"""int"""'], {}), "('int')\n", (37214, 37221), False, 'from tensorforce import TensorForceError, util\n')] |
import os
import argparse
import numpy as np
import tokenization
from run_classifier import ColaProcessor, SstProcessor, MrpcProcessor, QqpProcessor, QnliProcessor, MnliProcessor, RteProcessor, SnliProcessor, WnliProcessor
from run_classifier import file_based_convert_examples_to_features
def grade(basepath):
task = 'SST-2'
data_dir = '%s/glue_data/%s' % (basepath, task)
problems = '%s/test.tsv' % data_dir
answers = '%s/bert/cased_L-12_H-768_A-12/test_results.tsv' % data_dir
truths = '../../../data/sentiment_analysis/sstb/tst'
gold = {}
with open(truths, 'rt') as handle:
line = handle.read()
for item in line.split('\n'):
if len(item)>0:
label = item[0]
string = item[2:]
if string=='no. .':
string='no . .'
# if '\\/' in string:
# print(1)
string = string.replace('\\/', '/')
string = string.replace('-lrb-','(').replace('-rrb-',')')
gold[string[:10]] = int(label)
answer_list = []
with open(answers, 'rt') as handle:
all_line = handle.read()
for line in all_line.split('\n'):
if len(line) > 0:
probs = [float(item) for item in line.split('\t')]
pred = 1 if probs[1]>probs[0] else 0
answer_list.append(pred)
y_hat = []
y = []
with open(problems, 'rt') as handle:
all_line = handle.read()
for idx, line in enumerate(all_line.split('\n')):
if idx==0:
continue
if len(line) > 0:
y.append(gold[line.split('\t')[1][:10]])
y_hat.append(answer_list[idx - 1])
y = np.array(y)
y_hat = np.array(y_hat)
print(1)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', default='../data') # base path
args = parser.parse_args()
grade(args.p)
| [
"numpy.array",
"argparse.ArgumentParser"
] | [((1767, 1778), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1775, 1778), True, 'import numpy as np\n'), ((1791, 1806), 'numpy.array', 'np.array', (['y_hat'], {}), '(y_hat)\n', (1799, 1806), True, 'import numpy as np\n'), ((1861, 1886), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1884, 1886), False, 'import argparse\n')] |
import numpy as np
from denoising.utils import *
# from denoising.bm3d import BM3D
# from denoising.non_local_means import NLM
# from denoising.bilateral_filter import bilateral_filter
# from denoising.wavelet import wavelet_soft_thresholding, \
# wavelet_hard_thresholding
# from denoising.wiener import wiener_filter
from tqdm import tqdm
def _batch_algorithm_implementation(single_image_denoising_algorithm, noisy_images: np.ndarray, noise_std_dev: float, show_progress:bool = False, *args, **kwargs):
validate_array_input(noisy_images)
validate_if_noise_std_dev_is_a_float(noise_std_dev)
filtered_images = []
if show_progress:
for i in tqdm(range(noisy_images.shape[0])):
filtered_images.append(
single_image_denoising_algorithm(
noisy_images[i, :,:,:],
*args,
**kwargs
)
)
else:
for i in range(noisy_images.shape[0]):
filtered_images.append(
single_image_denoising_algorithm(
noisy_images[i, :,:,:],
*args,
**kwargs
)
)
filtered_images = np.array(filtered_images)
return filtered_images
| [
"numpy.array"
] | [((1231, 1256), 'numpy.array', 'np.array', (['filtered_images'], {}), '(filtered_images)\n', (1239, 1256), True, 'import numpy as np\n')] |
from skimage import data, filters
from skimage.color import rgb2gray
from matplotlib import pyplot as plt
from skimage.transform import rescale, resize, downscale_local_mean
from skimage.filters import threshold_otsu, try_all_threshold, threshold_multiotsu
import numpy as np
from skimage.filters.thresholding import _cross_entropy
import os
# --- EDGES ---
image = data.cat()
edges = filters.sobel(image)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
# ax = axes.ravel()
axes[0].imshow(image)
axes[0].set_title("Original")
axes[0].set_axis_off()
axes[1].imshow(edges)
axes[1].set_title("Edges")
axes[1].set_axis_off()
fig.tight_layout()
plt.show()
# --- RGB TO GRAY ---
grayscale = rgb2gray(image) # transform into grayscale
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
axes[0].imshow(image)
axes[0].set_title("Original")
axes[0].set_axis_off()
axes[1].imshow(grayscale, cmap='gray') # equal to cmap=plt.cm.gray
axes[1].set_title("Grayscale")
axes[1].axis('off')
fig.tight_layout()
plt.show()
# --- RESCALE ---
# Rescale resizes an image by a given scaling factor. Resize serves the same purpose, but allows to specify an output
# image shape instead of a scaling factor. Downscale serves the purpose of down-sampling an n-dimensional image by
# integer factors using the local mean on the elements of each block of the size factors given as a parameter to the
# function.
# Anti_aliasing: apply a Gaussian filter to smooth the image.
grayscale_rescaled = rescale(grayscale, 0.25, anti_aliasing=False)
grayscale_resized = resize(grayscale, (grayscale.shape[0] // 4, grayscale.shape[1] // 4), anti_aliasing=False)
grayscale_downscaled = downscale_local_mean(grayscale, (4, 3))
fig, axes = plt.subplots(nrows=2, ncols=2)
ax = axes.ravel()
ax[0].imshow(grayscale, cmap='gray')
ax[0].set_title("Original")
ax[1].imshow(grayscale_rescaled, cmap='gray')
ax[1].set_title("Rescaled image (with aliasing)")
ax[2].imshow(grayscale_resized, cmap='gray')
ax[2].set_title("Resized image (with aliasing)")
ax[3].imshow(grayscale_downscaled, cmap='gray')
ax[3].set_title("Downscaled image (no aliasing)")
ax[0].set_xlim(0, 512)
ax[0].set_ylim(512, 0)
for a in ax:
a.set_axis_off()
plt.tight_layout()
plt.show()
# --- THRESHOLDING ---
# Thresholding is used to create a binary image from a grayscale image.
# If I don't know which thresholding method to choose.
# We specify a radius for local thresholding algorithms. If it is not specified, only global algorithms are called.
image = data.camera()
fig, ax = try_all_threshold(image, figsize=(10, 8), verbose=False)
plt.figure()
fig.tight_layout()
plt.show()
# Otsu's Method
thresh = threshold_otsu(image)
binary = image > thresh # every value bigger than 'thresh' turns True
fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5))
axt = axes.ravel()
axt[0] = plt.subplot(1, 3, 1)
axt[0].imshow(image, cmap=plt.cm.gray)
axt[0].set_title('Original')
axt[0].axis('off')
axt[1] = plt.subplot(1, 3, 2)
axt[1].hist(image.ravel(), bins=256)
axt[1].set_title('Histogram')
axt[1].axvline(thresh, color='r')
axt[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0]) # share their axes
axt[2].imshow(binary, cmap=plt.cm.gray)
axt[2].set_title("Thresholded with Otsu's Method")
axt[2].axis('off')
plt.show()
# --- Li Thresholding Method ---
# They proposed that minimizing the cross-entropy between the foreground and the foreground mean, and the background
# and the background mean, would give the best threshold in most situations.
thresh_li = np.arange(np.min(image) + 1.5, np.max(image) - 1.5)
entropies = [_cross_entropy(image, t) for t in thresh_li]
optimal_camera_threshold = thresh_li[np.argmin(entropies)] # Returns the indices of the minimum values along an axis.
fig_li, ax_li = plt.subplots(1, 3, figsize=(8, 3))
ax_li[0].imshow(image, cmap='gray')
ax_li[0].set_title('image')
ax_li[0].set_axis_off()
ax_li[1].imshow(image > optimal_camera_threshold, cmap='gray')
ax_li[1].set_title('Thresholded with Li´s Method')
ax_li[1].set_axis_off()
ax_li[2].plot(thresh_li, entropies)
ax_li[2].set_xlabel('thresholds')
ax_li[2].set_ylabel('cross-entropy')
ax_li[2].vlines(optimal_camera_threshold, ymin=np.min(entropies) - 0.05 * np.ptp(entropies),
ymax=np.max(entropies) - 0.05 * np.ptp(entropies))
ax_li[2].set_title('optimal threshold')
fig_li.tight_layout()
print('The brute force optimal threshold is:', optimal_camera_threshold)
print('The computed optimal threshold is:', filters.threshold_li(image))
plt.show()
# --- Multi-Otsu Method ---
# Thresholding algorithm that is used to separate the pixels of an input image into several different classes,
# each one obtained according to the intensity of the gray levels within the image.
cat = rgb2gray(data.cat())
thresh_multi = threshold_multiotsu(cat) # Creates 3 classes
regions = np.digitize(cat, bins=thresh_multi)
fig_multi, ax_multi = plt.subplots(1, 3, figsize=(10, 3.5))
ax_multi[0].imshow(cat, cmap='jet')
ax_multi[0].set_title('Original')
ax_multi[0].axis('off')
ax_multi[1].hist(cat.ravel(), bins=255)
ax_multi[1].set_title('Histogram')
ax_multi[2].imshow(regions, cmap='jet')
ax_multi[2].set_title('Multi-Otsu Result')
ax_multi[2].axis('off')
plt.subplots_adjust()
plt.show()
# Covariancia
# std
# SNR
# Normalized mean squared error ou mean squared error
# Normalized standard deviation
# PET: Recovery Coefficient (CRC ou RC)
diretorio = os.path.dirname(os.path.abspath(__file__))
image = plt.imread(os.path.join(diretorio, image, "fruta-1024x676.jpg"))
| [
"skimage.filters.try_all_threshold",
"numpy.ptp",
"skimage.filters.threshold_otsu",
"skimage.data.camera",
"skimage.transform.downscale_local_mean",
"numpy.max",
"skimage.data.cat",
"numpy.min",
"numpy.argmin",
"skimage.filters.thresholding._cross_entropy",
"skimage.transform.rescale",
"skimag... | [((367, 377), 'skimage.data.cat', 'data.cat', ([], {}), '()\n', (375, 377), False, 'from skimage import data, filters\n'), ((386, 406), 'skimage.filters.sobel', 'filters.sobel', (['image'], {}), '(image)\n', (399, 406), False, 'from skimage import data, filters\n'), ((420, 454), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4)'}), '(1, 2, figsize=(8, 4))\n', (432, 454), True, 'from matplotlib import pyplot as plt\n'), ((644, 654), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (652, 654), True, 'from matplotlib import pyplot as plt\n'), ((690, 705), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (698, 705), False, 'from skimage.color import rgb2gray\n'), ((746, 780), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4)'}), '(1, 2, figsize=(8, 4))\n', (758, 780), True, 'from matplotlib import pyplot as plt\n'), ((997, 1007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1005, 1007), True, 'from matplotlib import pyplot as plt\n'), ((1473, 1518), 'skimage.transform.rescale', 'rescale', (['grayscale', '(0.25)'], {'anti_aliasing': '(False)'}), '(grayscale, 0.25, anti_aliasing=False)\n', (1480, 1518), False, 'from skimage.transform import rescale, resize, downscale_local_mean\n'), ((1539, 1633), 'skimage.transform.resize', 'resize', (['grayscale', '(grayscale.shape[0] // 4, grayscale.shape[1] // 4)'], {'anti_aliasing': '(False)'}), '(grayscale, (grayscale.shape[0] // 4, grayscale.shape[1] // 4),\n anti_aliasing=False)\n', (1545, 1633), False, 'from skimage.transform import rescale, resize, downscale_local_mean\n'), ((1653, 1692), 'skimage.transform.downscale_local_mean', 'downscale_local_mean', (['grayscale', '(4, 3)'], {}), '(grayscale, (4, 3))\n', (1673, 1692), False, 'from skimage.transform import rescale, resize, downscale_local_mean\n'), ((1706, 1736), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (1718, 1736), True, 'from matplotlib import pyplot as plt\n'), ((2194, 2212), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2210, 2212), True, 'from matplotlib import pyplot as plt\n'), ((2213, 2223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2221, 2223), True, 'from matplotlib import pyplot as plt\n'), ((2499, 2512), 'skimage.data.camera', 'data.camera', ([], {}), '()\n', (2510, 2512), False, 'from skimage import data, filters\n'), ((2523, 2579), 'skimage.filters.try_all_threshold', 'try_all_threshold', (['image'], {'figsize': '(10, 8)', 'verbose': '(False)'}), '(image, figsize=(10, 8), verbose=False)\n', (2540, 2579), False, 'from skimage.filters import threshold_otsu, try_all_threshold, threshold_multiotsu\n'), ((2581, 2593), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2591, 2593), True, 'from matplotlib import pyplot as plt\n'), ((2613, 2623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2621, 2623), True, 'from matplotlib import pyplot as plt\n'), ((2650, 2671), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['image'], {}), '(image)\n', (2664, 2671), False, 'from skimage.filters import threshold_otsu, try_all_threshold, threshold_multiotsu\n'), ((2756, 2795), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(3)', 'figsize': '(8, 2.5)'}), '(ncols=3, figsize=(8, 2.5))\n', (2768, 2795), True, 'from matplotlib import pyplot as plt\n'), ((2825, 2845), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (2836, 2845), True, 'from matplotlib import pyplot as plt\n'), ((2943, 2963), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (2954, 2963), True, 'from matplotlib import pyplot as plt\n'), ((3075, 3123), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {'sharex': 'ax[0]', 'sharey': 'ax[0]'}), '(1, 3, 3, sharex=ax[0], sharey=ax[0])\n', (3086, 3123), True, 'from matplotlib import pyplot as plt\n'), ((3256, 3266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3264, 3266), True, 'from matplotlib import pyplot as plt\n'), ((3754, 3788), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(8, 3)'}), '(1, 3, figsize=(8, 3))\n', (3766, 3788), True, 'from matplotlib import pyplot as plt\n'), ((4495, 4505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4503, 4505), True, 'from matplotlib import pyplot as plt\n'), ((4772, 4796), 'skimage.filters.threshold_multiotsu', 'threshold_multiotsu', (['cat'], {}), '(cat)\n', (4791, 4796), False, 'from skimage.filters import threshold_otsu, try_all_threshold, threshold_multiotsu\n'), ((4828, 4863), 'numpy.digitize', 'np.digitize', (['cat'], {'bins': 'thresh_multi'}), '(cat, bins=thresh_multi)\n', (4839, 4863), True, 'import numpy as np\n'), ((4887, 4924), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 3.5)'}), '(1, 3, figsize=(10, 3.5))\n', (4899, 4924), True, 'from matplotlib import pyplot as plt\n'), ((5205, 5226), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {}), '()\n', (5224, 5226), True, 'from matplotlib import pyplot as plt\n'), ((5227, 5237), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5235, 5237), True, 'from matplotlib import pyplot as plt\n'), ((3573, 3597), 'skimage.filters.thresholding._cross_entropy', '_cross_entropy', (['image', 't'], {}), '(image, t)\n', (3587, 3597), False, 'from skimage.filters.thresholding import _cross_entropy\n'), ((3655, 3675), 'numpy.argmin', 'np.argmin', (['entropies'], {}), '(entropies)\n', (3664, 3675), True, 'import numpy as np\n'), ((4465, 4492), 'skimage.filters.threshold_li', 'filters.threshold_li', (['image'], {}), '(image)\n', (4485, 4492), False, 'from skimage import data, filters\n'), ((4745, 4755), 'skimage.data.cat', 'data.cat', ([], {}), '()\n', (4753, 4755), False, 'from skimage import data, filters\n'), ((5421, 5446), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5436, 5446), False, 'import os\n'), ((5467, 5519), 'os.path.join', 'os.path.join', (['diretorio', 'image', '"""fruta-1024x676.jpg"""'], {}), "(diretorio, image, 'fruta-1024x676.jpg')\n", (5479, 5519), False, 'import os\n'), ((3518, 3531), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (3524, 3531), True, 'import numpy as np\n'), ((3539, 3552), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (3545, 3552), True, 'import numpy as np\n'), ((4171, 4188), 'numpy.min', 'np.min', (['entropies'], {}), '(entropies)\n', (4177, 4188), True, 'import numpy as np\n'), ((4238, 4255), 'numpy.max', 'np.max', (['entropies'], {}), '(entropies)\n', (4244, 4255), True, 'import numpy as np\n'), ((4198, 4215), 'numpy.ptp', 'np.ptp', (['entropies'], {}), '(entropies)\n', (4204, 4215), True, 'import numpy as np\n'), ((4265, 4282), 'numpy.ptp', 'np.ptp', (['entropies'], {}), '(entropies)\n', (4271, 4282), True, 'import numpy as np\n')] |
import os
import shutil
from aidapy.hist import total_systematic_histogram
from aidapy.hist import hist2array
#from .style_mpl import atlas_mpl_style
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
import matplotlib as mpl
import matplotlib.gridspec as gsc
#plt.style.use('classic')
from pylab import setp
#sty = atlas_mpl_style()
#for key, val in sty.items():
# mpl.rcParams[key] = val
from matplotlib.font_manager import FontProperties
fontBase = FontProperties()
fontATLAS = fontBase.copy()
fontATLAS.set_size(16)
fontATLAS.set_style('italic')
fontATLAS.set_weight('bold')
def canvas_with_ratio(figsize=(8,7),height_ratios=[3.65,1],
xtitle='x title',ytitle='ytitle',ratio_title='Ratio'):
fig = plt.figure(figsize=figsize)
gs = gsc.GridSpec(2,1,height_ratios=height_ratios)
gs.update(hspace=0.075)
ax0 = fig.add_subplot(gs[0])
ax1 = fig.add_subplot(gs[1],sharex=ax0)
ax0.xaxis.set_minor_locator(AutoMinorLocator())
ax0.yaxis.set_minor_locator(AutoMinorLocator())
setp(ax0.get_xticklabels(),visible=False)
ax0.set_ylabel(ytitle)
ax1.set_ylabel(ratio_title)
ax1.set_xlabel(xtitle)
return fig, ax0, ax1
def hplot_mpl(root_file, hist_name='met_1pj', outdir='outs', xtitle='', ytitle='',logy=False,
proc_names=['Wt','ttbar','Fakes','WW','Diboson','Ztautau','RareSM']):
if os.path.exists(outdir):
pass
else:
os.makedirs(outdir)
nominals = { pname : root_file.Get(pname+'_FULL_main_nominal_'+hist_name) for pname in proc_names }
nominals = { pname : hist2array(h,return_edges=True) for pname, h in nominals.items() }
data = root_file.Get('Data_'+hist_name)
data = hist2array(data)
nom_h, total_band, edges, staterr = total_systematic_histogram(root_file,hist_name,proc_names,
return_stat_error=True)
centers = np.delete(edges,[0])-(np.ediff1d(edges)/2.0)
to_stack = [nominals[name][0] for name in ['RareSM','Diboson','Fakes','WW','Wt','Ztautau','ttbar']]
cols = ['darkred','black','gray','green','blue','orange','white']
labels = [r'Rare SM',r'Diboson',r'Fake/NP (MC)',r'WW',r'Wt',r'$Z\rightarrow\tau\tau$',r'$t\bar{t}$']
#to_stack = [nominals[name][0] for name in ['RareSM','Diboson','Fakes','WW','Ztautau','ttbar','Wt']]
#cols = ['darkred','black','gray','green','orange','white','blue']
#labels = [r'Rare SM',r'Diboson',r'Fake/NP (MC)',r'WW',r'$Z\rightarrow\tau\tau$',r'$t\bar{t}$',r'Wt']
fig,ax,axerr = canvas_with_ratio()
ax.errorbar(centers,data,yerr=np.sqrt(data),fmt='ko',label=r'Data')
ax.hist([centers for _ in to_stack],weights=to_stack,bins=edges,stacked=True,
color=cols,histtype='stepfilled',label=labels, ls='solid', lw=1, edgecolor='black')
syspatches = []
syspatches = [patches.Rectangle((c-w/2,v-err),w,err*2,hatch='\\\\\\\\',fill=False,edgecolor='none')
for c, v, err, w in zip(centers,nom_h,total_band,np.ediff1d(edges))]
for p in syspatches: ax.add_patch(p)
trashpatch = patches.Rectangle((0,0),0,0,hatch='\\\\\\\\',fill=False,edgecolor='none',
label=r'Systematics')
ax.add_patch(trashpatch)
ax.errorbar(centers,data,yerr=np.sqrt(data),fmt='ko')
ax.legend(loc='upper right')
l_handles, l_labels = ax.get_legend_handles_labels()
l_handles = [l_handles[-1]] + l_handles[:-1]
l_labels = [l_labels[-1]] + l_labels[:-1]
ax.legend(l_handles,l_labels,loc='upper right',fontsize=12)
ax.set_ylim([0,np.max(data)*1.3])
ax.text(.05,.92,'ATLAS',transform=ax.transAxes,style='oblique',size=14,fontproperties=fontATLAS)
ax.text(.185,.92,r'Internal, AIDA OS $e\mu$, pre-fit',transform=ax.transAxes,size=14)
ax.text(.05,.845,r'$\sqrt{s}$ = 13 TeV, $\int \mathcal{L}$dt = 36.1 fb$^{-1}$',
transform=ax.transAxes,size=14)
ax.text(.05,.75,'',transform=ax.transAxes,size=14)
domcErr = np.sqrt(1.0/(nom_h*nom_h)*data + data*data*staterr*staterr/(nom_h*nom_h*nom_h*nom_h))
axerr.errorbar(centers,data/nom_h,yerr=domcErr,fmt='ko')#data/(nom_h*nom_h)*total_band
errpatches = []
errpatches = [patches.Rectangle((c-w/2,1-err),w,err*2,hatch='\\\\\\\\',fill=False,edgecolor='none')
for c, v, err, w in zip(centers,data/nom_h,data/(nom_h*nom_h)*total_band,np.ediff1d(edges))]
for p in errpatches: axerr.add_patch(p)
axerr.set_ylim([0.5,1.5])
axerr.set_xlim([edges[0],edges[-1]])
axerr.plot(edges,np.array([1 for _ in edges]),'k-')
log_axes = ['pT','_2bins','_3bins']
if any(term in hist_name for term in log_axes):
logy = True
axerr.set_xlabel(xtitle,fontsize=14)
if 'njets' in hist_name:
axerr.xaxis.set_ticks(np.array([i for i in centers]))
newxticklabels = [str(int(i)) for i in centers]
newxticklabels[-1] = r'$\geq '+str(int(centers[-1]))+'$'
axerr.set_xticklabels(newxticklabels)
ax.set_ylabel(ytitle,fontsize=14)
if logy: ax.set_yscale('log'), ax.set_ylim([np.min(data)*.01,np.max(data)*500])
fig.savefig(outdir+'/'+hist_name+'.pdf')
fig.savefig(outdir+'/'+hist_name+'.png')
#plt.show()
| [
"os.path.exists",
"matplotlib.patches.Rectangle",
"numpy.sqrt",
"os.makedirs",
"matplotlib.font_manager.FontProperties",
"aidapy.hist.total_systematic_histogram",
"numpy.delete",
"numpy.ediff1d",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.m... | [((560, 576), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (574, 576), False, 'from matplotlib.font_manager import FontProperties\n'), ((835, 862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (845, 862), True, 'import matplotlib.pyplot as plt\n'), ((873, 920), 'matplotlib.gridspec.GridSpec', 'gsc.GridSpec', (['(2)', '(1)'], {'height_ratios': 'height_ratios'}), '(2, 1, height_ratios=height_ratios)\n', (885, 920), True, 'import matplotlib.gridspec as gsc\n'), ((1471, 1493), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (1485, 1493), False, 'import os\n'), ((1805, 1821), 'aidapy.hist.hist2array', 'hist2array', (['data'], {}), '(data)\n', (1815, 1821), False, 'from aidapy.hist import hist2array\n'), ((1862, 1950), 'aidapy.hist.total_systematic_histogram', 'total_systematic_histogram', (['root_file', 'hist_name', 'proc_names'], {'return_stat_error': '(True)'}), '(root_file, hist_name, proc_names,\n return_stat_error=True)\n', (1888, 1950), False, 'from aidapy.hist import total_systematic_histogram\n'), ((3205, 3310), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(0, 0)', '(0)', '(0)'], {'hatch': '"""\\\\\\\\\\\\\\\\"""', 'fill': '(False)', 'edgecolor': '"""none"""', 'label': '"""Systematics"""'}), "((0, 0), 0, 0, hatch='\\\\\\\\\\\\\\\\', fill=False, edgecolor=\n 'none', label='Systematics')\n", (3222, 3310), True, 'import matplotlib.patches as patches\n'), ((4100, 4210), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (nom_h * nom_h) * data + data * data * staterr * staterr / (nom_h *\n nom_h * nom_h * nom_h))'], {}), '(1.0 / (nom_h * nom_h) * data + data * data * staterr * staterr / (\n nom_h * nom_h * nom_h * nom_h))\n', (4107, 4210), True, 'import numpy as np\n'), ((1056, 1074), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (1072, 1074), False, 'from matplotlib.ticker import AutoMinorLocator, MultipleLocator\n'), ((1108, 1126), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (1124, 1126), False, 'from matplotlib.ticker import AutoMinorLocator, MultipleLocator\n'), ((1526, 1545), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (1537, 1545), False, 'import os\n'), ((1675, 1707), 'aidapy.hist.hist2array', 'hist2array', (['h'], {'return_edges': '(True)'}), '(h, return_edges=True)\n', (1685, 1707), False, 'from aidapy.hist import hist2array\n'), ((2027, 2048), 'numpy.delete', 'np.delete', (['edges', '[0]'], {}), '(edges, [0])\n', (2036, 2048), True, 'import numpy as np\n'), ((2974, 3078), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(c - w / 2, v - err)', 'w', '(err * 2)'], {'hatch': '"""\\\\\\\\\\\\\\\\"""', 'fill': '(False)', 'edgecolor': '"""none"""'}), "((c - w / 2, v - err), w, err * 2, hatch='\\\\\\\\\\\\\\\\', fill=\n False, edgecolor='none')\n", (2991, 3078), True, 'import matplotlib.patches as patches\n'), ((4315, 4419), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(c - w / 2, 1 - err)', 'w', '(err * 2)'], {'hatch': '"""\\\\\\\\\\\\\\\\"""', 'fill': '(False)', 'edgecolor': '"""none"""'}), "((c - w / 2, 1 - err), w, err * 2, hatch='\\\\\\\\\\\\\\\\', fill=\n False, edgecolor='none')\n", (4332, 4419), True, 'import matplotlib.patches as patches\n'), ((4648, 4678), 'numpy.array', 'np.array', (['[(1) for _ in edges]'], {}), '([(1) for _ in edges])\n', (4656, 4678), True, 'import numpy as np\n'), ((2049, 2066), 'numpy.ediff1d', 'np.ediff1d', (['edges'], {}), '(edges)\n', (2059, 2066), True, 'import numpy as np\n'), ((2720, 2733), 'numpy.sqrt', 'np.sqrt', (['data'], {}), '(data)\n', (2727, 2733), True, 'import numpy as np\n'), ((3399, 3412), 'numpy.sqrt', 'np.sqrt', (['data'], {}), '(data)\n', (3406, 3412), True, 'import numpy as np\n'), ((4895, 4925), 'numpy.array', 'np.array', (['[i for i in centers]'], {}), '([i for i in centers])\n', (4903, 4925), True, 'import numpy as np\n'), ((3127, 3144), 'numpy.ediff1d', 'np.ediff1d', (['edges'], {}), '(edges)\n', (3137, 3144), True, 'import numpy as np\n'), ((3693, 3705), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (3699, 3705), True, 'import numpy as np\n'), ((4492, 4509), 'numpy.ediff1d', 'np.ediff1d', (['edges'], {}), '(edges)\n', (4502, 4509), True, 'import numpy as np\n'), ((5180, 5192), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (5186, 5192), True, 'import numpy as np\n'), ((5197, 5209), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (5203, 5209), True, 'import numpy as np\n')] |
import unittest
import hail as hl
import hail.expr.aggregators as agg
from subprocess import DEVNULL, call as syscall
import numpy as np
from struct import unpack
import hail.utils as utils
from hail.linalg import BlockMatrix
from math import sqrt
from .utils import resource, doctest_resource, startTestHailContext, stopTestHailContext
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
_dataset = None
def get_dataset(self):
if Tests._dataset is None:
Tests._dataset = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf')))
return Tests._dataset
def test_ibd(self):
dataset = self.get_dataset()
def plinkify(ds, min=None, max=None):
vcf = utils.new_temp_file(prefix="plink", suffix="vcf")
plinkpath = utils.new_temp_file(prefix="plink")
hl.export_vcf(ds, vcf)
threshold_string = "{} {}".format("--min {}".format(min) if min else "",
"--max {}".format(max) if max else "")
plink_command = "plink --double-id --allow-extra-chr --vcf {} --genome full --out {} {}" \
.format(utils.uri_path(vcf),
utils.uri_path(plinkpath),
threshold_string)
result_file = utils.uri_path(plinkpath + ".genome")
syscall(plink_command, shell=True, stdout=DEVNULL, stderr=DEVNULL)
### format of .genome file is:
# _, fid1, iid1, fid2, iid2, rt, ez, z0, z1, z2, pihat, phe,
# dst, ppc, ratio, ibs0, ibs1, ibs2, homhom, hethet (+ separated)
### format of ibd is:
# i (iid1), j (iid2), ibd: {Z0, Z1, Z2, PI_HAT}, ibs0, ibs1, ibs2
results = {}
with open(result_file) as f:
f.readline()
for line in f:
row = line.strip().split()
results[(row[1], row[3])] = (list(map(float, row[6:10])),
list(map(int, row[14:17])))
return results
def compare(ds, min=None, max=None):
plink_results = plinkify(ds, min, max)
hail_results = hl.identity_by_descent(ds, min=min, max=max).collect()
for row in hail_results:
key = (row.i, row.j)
self.assertAlmostEqual(plink_results[key][0][0], row.ibd.Z0, places=4)
self.assertAlmostEqual(plink_results[key][0][1], row.ibd.Z1, places=4)
self.assertAlmostEqual(plink_results[key][0][2], row.ibd.Z2, places=4)
self.assertAlmostEqual(plink_results[key][0][3], row.ibd.PI_HAT, places=4)
self.assertEqual(plink_results[key][1][0], row.ibs0)
self.assertEqual(plink_results[key][1][1], row.ibs1)
self.assertEqual(plink_results[key][1][2], row.ibs2)
compare(dataset)
compare(dataset, min=0.0, max=1.0)
dataset = dataset.annotate_rows(dummy_maf=0.01)
hl.identity_by_descent(dataset, dataset['dummy_maf'], min=0.0, max=1.0)
hl.identity_by_descent(dataset, hl.float32(dataset['dummy_maf']), min=0.0, max=1.0)
def test_impute_sex_same_as_plink(self):
ds = hl.import_vcf(resource('x-chromosome.vcf'))
sex = hl.impute_sex(ds.GT, include_par=True)
vcf_file = utils.uri_path(utils.new_temp_file(prefix="plink", suffix="vcf"))
out_file = utils.uri_path(utils.new_temp_file(prefix="plink"))
hl.export_vcf(ds, vcf_file)
utils.run_command(["plink", "--vcf", vcf_file, "--const-fid",
"--check-sex", "--silent", "--out", out_file])
plink_sex = hl.import_table(out_file + '.sexcheck',
delimiter=' +',
types={'SNPSEX': hl.tint32,
'F': hl.tfloat64})
plink_sex = plink_sex.select('IID', 'SNPSEX', 'F')
plink_sex = plink_sex.select(
s=plink_sex.IID,
is_female=hl.cond(plink_sex.SNPSEX == 2,
True,
hl.cond(plink_sex.SNPSEX == 1,
False,
hl.null(hl.tbool))),
f_stat=plink_sex.F).key_by('s')
sex = sex.select('is_female', 'f_stat')
self.assertTrue(plink_sex._same(sex.select_globals(), tolerance=1e-3))
ds = ds.annotate_rows(aaf=(agg.call_stats(ds.GT, ds.alleles)).AF[1])
self.assertTrue(hl.impute_sex(ds.GT)._same(hl.impute_sex(ds.GT, aaf='aaf')))
def test_linreg(self):
phenos = hl.import_table(resource('regressionLinear.pheno'),
types={'Pheno': hl.tfloat64},
key='Sample')
covs = hl.import_table(resource('regressionLinear.cov'),
types={'Cov1': hl.tfloat64, 'Cov2': hl.tfloat64},
key='Sample')
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = mt.annotate_cols(pheno=phenos[mt.s].Pheno, cov=covs[mt.s])
mt = mt.annotate_entries(x = mt.GT.n_alt_alleles()).cache()
t1 = hl.linear_regression(
y=mt.pheno, x=mt.GT.n_alt_alleles(), covariates=[mt.cov.Cov1, mt.cov.Cov2 + 1 - 1]).rows()
t1 = t1.select(p=t1.linreg.p_value)
t2 = hl.linear_regression(
y=mt.pheno, x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t2 = t2.select(p=t2.linreg.p_value)
t3 = hl.linear_regression(
y=[mt.pheno], x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t3 = t3.select(p=t3.linreg.p_value[0])
t4 = hl.linear_regression(
y=[mt.pheno, mt.pheno], x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t4a = t4.select(p=t4.linreg.p_value[0])
t4b = t4.select(p=t4.linreg.p_value[1])
self.assertTrue(t1._same(t2))
self.assertTrue(t1._same(t3))
self.assertTrue(t1._same(t4a))
self.assertTrue(t1._same(t4b))
def test_linear_regression_with_two_cov(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.28589421, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2739153, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5417647, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3350599, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.07367185, places=6)
self.assertAlmostEqual(results[3].standard_error, 0.6764348, places=6)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[6].t_stat))
self.assertTrue(np.isnan(results[6].p_value))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_with_two_cov_pl(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=hl.pl_dosage(mt.PL),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.29166985, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2996510, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5499320, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3401110, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.09536219, places=6)
self.assertAlmostEqual(results[3].standard_error, 0.6901002, places=6)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
def test_linear_regression_with_two_cov_dosage(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_gen(resource('regressionLinear.gen'), sample_file=resource('regressionLinear.sample'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=hl.gp_dosage(mt.GP),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.29166985, places=4)
self.assertAlmostEqual(results[1].standard_error, 1.2996510, places=4)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5499320, places=4)
self.assertAlmostEqual(results[2].standard_error, 0.3401110, places=4)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.09536219, places=4)
self.assertAlmostEqual(results[3].standard_error, 0.6901002, places=4)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
def test_linear_regression_with_no_cov(self):
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=mt.GT.n_alt_alleles())
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.25, places=6)
self.assertAlmostEqual(results[1].standard_error, 0.4841229, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.5163978, places=6)
self.assertAlmostEqual(results[1].p_value, 0.63281250, places=6)
self.assertAlmostEqual(results[2].beta, -0.250000, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.2602082, places=6)
self.assertAlmostEqual(results[2].t_stat, -0.9607689, places=6)
self.assertAlmostEqual(results[2].p_value, 0.391075888, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_with_import_fam_boolean(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
fam = hl.import_fam(resource('regressionLinear.fam'))
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=fam[mt.s].is_case,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.28589421, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2739153, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5417647, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3350599, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_with_import_fam_quant(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
fam = hl.import_fam(resource('regressionLinear.fam'),
quant_pheno=True,
missing='0')
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=fam[mt.s].quant_pheno,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.28589421, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2739153, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5417647, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3350599, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_multi_pheno_same(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()),
root='single')
mt = hl.linear_regression(y=[pheno[mt.s].Pheno, pheno[mt.s].Pheno],
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()),
root='multi')
def eq(x1, x2):
return (hl.is_nan(x1) & hl.is_nan(x2)) | (hl.abs(x1 - x2) < 1e-4)
self.assertTrue(mt.aggregate_rows(hl.agg.all((eq(mt.single.p_value, mt.multi.p_value[0]) &
eq(mt.single.standard_error, mt.multi.standard_error[0]) &
eq(mt.single.t_stat, mt.multi.t_stat[0]) &
eq(mt.single.beta, mt.multi.beta[0]) &
eq(mt.single.y_transpose_x, mt.multi.y_transpose_x[0])))))
self.assertTrue(mt.aggregate_rows(hl.agg.all(eq(mt.multi.p_value[1], mt.multi.p_value[0]) &
eq(mt.multi.standard_error[1], mt.multi.standard_error[0]) &
eq(mt.multi.t_stat[1], mt.multi.t_stat[0]) &
eq(mt.multi.beta[1], mt.multi.beta[0]) &
eq(mt.multi.y_transpose_x[1], mt.multi.y_transpose_x[0]))))
def test_logistic_regression_wald_test_two_cov(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('wald',
y=pheno[mt.s].isCase,
x=mt.GT.n_alt_alleles(),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.81226793796, places=6)
self.assertAlmostEqual(results[1].standard_error, 2.1085483421, places=6)
self.assertAlmostEqual(results[1].z_stat, -0.3852261396, places=6)
self.assertAlmostEqual(results[1].p_value, 0.7000698784, places=6)
self.assertAlmostEqual(results[2].beta, -0.43659460858, places=6)
self.assertAlmostEqual(results[2].standard_error, 1.0296902941, places=6)
self.assertAlmostEqual(results[2].z_stat, -0.4240057531, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6715616176, places=6)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertTrue(is_constant(results[3]))
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_wald_test_two_cov_pl(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('wald',
y=pheno[mt.s].isCase,
x=hl.pl_dosage(mt.PL),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.8286774, places=6)
self.assertAlmostEqual(results[1].standard_error, 2.151145, places=6)
self.assertAlmostEqual(results[1].z_stat, -0.3852261, places=6)
self.assertAlmostEqual(results[1].p_value, 0.7000699, places=6)
self.assertAlmostEqual(results[2].beta, -0.4431764, places=6)
self.assertAlmostEqual(results[2].standard_error, 1.045213, places=6)
self.assertAlmostEqual(results[2].z_stat, -0.4240058, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6715616, places=6)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertFalse(results[3].fit.converged)
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_wald_two_cov_dosage(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_gen(resource('regressionLogistic.gen'),
sample_file=resource('regressionLogistic.sample'))
mt = hl.logistic_regression('wald',
y=pheno[mt.s].isCase,
x=hl.gp_dosage(mt.GP),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.8286774, places=4)
self.assertAlmostEqual(results[1].standard_error, 2.151145, places=4)
self.assertAlmostEqual(results[1].z_stat, -0.3852261, places=4)
self.assertAlmostEqual(results[1].p_value, 0.7000699, places=4)
self.assertAlmostEqual(results[2].beta, -0.4431764, places=4)
self.assertAlmostEqual(results[2].standard_error, 1.045213, places=4)
self.assertAlmostEqual(results[2].z_stat, -0.4240058, places=4)
self.assertAlmostEqual(results[2].p_value, 0.6715616, places=4)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertFalse(results[3].fit.converged)
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_lrt_two_cov(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('lrt',
y=pheno[mt.s].isCase,
x=mt.GT.n_alt_alleles(),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.81226793796, places=6)
self.assertAlmostEqual(results[1].chi_sq_stat, 0.1503349167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.6982155052, places=6)
self.assertAlmostEqual(results[2].beta, -0.43659460858, places=6)
self.assertAlmostEqual(results[2].chi_sq_stat, 0.1813968574, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6701755415, places=6)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertFalse(results[3].fit.converged)
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_score_two_cov(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('score',
y=pheno[mt.s].isCase,
x=mt.GT.n_alt_alleles(),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].chi_sq_stat, 0.1502364955, places=6)
self.assertAlmostEqual(results[1].p_value, 0.6983094571, places=6)
self.assertAlmostEqual(results[2].chi_sq_stat, 0.1823600965, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6693528073, places=6)
self.assertAlmostEqual(results[3].chi_sq_stat, 7.047367694, places=6)
self.assertAlmostEqual(results[3].p_value, 0.007938182229, places=6)
def is_constant(r):
return r.chi_sq_stat is None or r.chi_sq_stat < 1e-6
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_epacts(self):
covariates = hl.import_table(resource('regressionLogisticEpacts.cov'),
key='IND_ID',
types={'PC1': hl.tfloat, 'PC2': hl.tfloat})
fam = hl.import_fam(resource('regressionLogisticEpacts.fam'))
mt = hl.import_vcf(resource('regressionLogisticEpacts.vcf'))
mt = mt.annotate_cols(**covariates[mt.s], **fam[mt.s])
mt = hl.logistic_regression('wald',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='wald')
mt = hl.logistic_regression('lrt',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='lrt')
mt = hl.logistic_regression('score',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='score')
mt = hl.logistic_regression('firth',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='firth')
# 2535 samples from 1K Genomes Project
# Locus("22", 16060511) # MAC 623
# Locus("22", 16115878) # MAC 370
# Locus("22", 16115882) # MAC 1207
# Locus("22", 16117940) # MAC 7
# Locus("22", 16117953) # MAC 21
mt = mt.select_rows('wald', 'lrt', 'firth', 'score')
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.row))))
self.assertAlmostEqual(results[16060511].wald.beta, -0.097476, places=4)
self.assertAlmostEqual(results[16060511].wald.standard_error, 0.087478, places=4)
self.assertAlmostEqual(results[16060511].wald.z_stat, -1.1143, places=4)
self.assertAlmostEqual(results[16060511].wald.p_value, 0.26516, places=4)
self.assertAlmostEqual(results[16060511].lrt.p_value, 0.26475, places=4)
self.assertAlmostEqual(results[16060511].score.p_value, 0.26499, places=4)
self.assertAlmostEqual(results[16060511].firth.beta, -0.097079, places=4)
self.assertAlmostEqual(results[16060511].firth.p_value, 0.26593, places=4)
self.assertAlmostEqual(results[16115878].wald.beta, -0.052632, places=4)
self.assertAlmostEqual(results[16115878].wald.standard_error, 0.11272, places=4)
self.assertAlmostEqual(results[16115878].wald.z_stat, -0.46691, places=4)
self.assertAlmostEqual(results[16115878].wald.p_value, 0.64056, places=4)
self.assertAlmostEqual(results[16115878].lrt.p_value, 0.64046, places=4)
self.assertAlmostEqual(results[16115878].score.p_value, 0.64054, places=4)
self.assertAlmostEqual(results[16115878].firth.beta, -0.052301, places=4)
self.assertAlmostEqual(results[16115878].firth.p_value, 0.64197, places=4)
self.assertAlmostEqual(results[16115882].wald.beta, -0.15598, places=4)
self.assertAlmostEqual(results[16115882].wald.standard_error, 0.079508, places=4)
self.assertAlmostEqual(results[16115882].wald.z_stat, -1.9619, places=4)
self.assertAlmostEqual(results[16115882].wald.p_value, 0.049779, places=4)
self.assertAlmostEqual(results[16115882].lrt.p_value, 0.049675, places=4)
self.assertAlmostEqual(results[16115882].score.p_value, 0.049675, places=4)
self.assertAlmostEqual(results[16115882].firth.beta, -0.15567, places=4)
self.assertAlmostEqual(results[16115882].firth.p_value, 0.04991, places=4)
self.assertAlmostEqual(results[16117940].wald.beta, -0.88059, places=4)
self.assertAlmostEqual(results[16117940].wald.standard_error, 0.83769, places=2)
self.assertAlmostEqual(results[16117940].wald.z_stat, -1.0512, places=2)
self.assertAlmostEqual(results[16117940].wald.p_value, 0.29316, places=2)
self.assertAlmostEqual(results[16117940].lrt.p_value, 0.26984, places=4)
self.assertAlmostEqual(results[16117940].score.p_value, 0.27828, places=4)
self.assertAlmostEqual(results[16117940].firth.beta, -0.7524, places=4)
self.assertAlmostEqual(results[16117940].firth.p_value, 0.30731, places=4)
self.assertAlmostEqual(results[16117953].wald.beta, 0.54921, places=4)
self.assertAlmostEqual(results[16117953].wald.standard_error, 0.4517, places=3)
self.assertAlmostEqual(results[16117953].wald.z_stat, 1.2159, places=3)
self.assertAlmostEqual(results[16117953].wald.p_value, 0.22403, places=3)
self.assertAlmostEqual(results[16117953].lrt.p_value, 0.21692, places=4)
self.assertAlmostEqual(results[16117953].score.p_value, 0.21849, places=4)
self.assertAlmostEqual(results[16117953].firth.beta, 0.5258, places=4)
self.assertAlmostEqual(results[16117953].firth.p_value, 0.22562, places=4)
def test_trio_matrix(self):
"""
This test depends on certain properties of the trio matrix VCF and
pedigree structure. This test is NOT a valid test if the pedigree
includes quads: the trio_matrix method will duplicate the parents
appropriately, but the genotypes_table and samples_table orthogonal
paths would require another duplication/explode that we haven't written.
"""
ped = hl.Pedigree.read(resource('triomatrix.fam'))
ht = hl.import_fam(resource('triomatrix.fam'))
mt = hl.import_vcf(resource('triomatrix.vcf'))
mt = mt.annotate_cols(fam=ht[mt.s])
dads = ht.filter(hl.is_defined(ht.pat_id))
dads = dads.select(dads.pat_id, is_dad=True).key_by('pat_id')
moms = ht.filter(hl.is_defined(ht.mat_id))
moms = moms.select(moms.mat_id, is_mom=True).key_by('mat_id')
et = (mt.entries()
.key_by('s')
.join(dads, how='left')
.join(moms, how='left'))
et = et.annotate(is_dad=hl.is_defined(et.is_dad),
is_mom=hl.is_defined(et.is_mom))
et = (et
.group_by(et.locus, et.alleles, fam=et.fam.fam_id)
.aggregate(data=hl.agg.collect(hl.struct(
role=hl.case().when(et.is_dad, 1).when(et.is_mom, 2).default(0),
g=hl.struct(GT=et.GT, AD=et.AD, DP=et.DP, GQ=et.GQ, PL=et.PL)))))
et = et.filter(hl.len(et.data) == 3)
et = et.select('data').explode('data')
tt = hl.trio_matrix(mt, ped, complete_trios=True).entries().key_by('locus', 'alleles')
tt = tt.annotate(fam=tt.proband.fam.fam_id,
data=[hl.struct(role=0, g=tt.proband_entry.select('GT', 'AD', 'DP', 'GQ', 'PL')),
hl.struct(role=1, g=tt.father_entry.select('GT', 'AD', 'DP', 'GQ', 'PL')),
hl.struct(role=2, g=tt.mother_entry.select('GT', 'AD', 'DP', 'GQ', 'PL'))])
tt = tt.select('fam', 'data').explode('data')
tt = tt.filter(hl.is_defined(tt.data.g)).key_by('locus', 'alleles', 'fam')
self.assertEqual(et.key.dtype, tt.key.dtype)
self.assertEqual(et.row.dtype, tt.row.dtype)
self.assertTrue(et._same(tt))
# test annotations
e_cols = (mt.cols()
.join(dads, how='left')
.join(moms, how='left'))
e_cols = e_cols.annotate(is_dad=hl.is_defined(e_cols.is_dad),
is_mom=hl.is_defined(e_cols.is_mom))
e_cols = (e_cols.group_by(fam=e_cols.fam.fam_id)
.aggregate(data=hl.agg.collect(hl.struct(role=hl.case()
.when(e_cols.is_dad, 1).when(e_cols.is_mom, 2).default(0),
sa=hl.struct(**e_cols.row.select(*mt.col))))))
e_cols = e_cols.filter(hl.len(e_cols.data) == 3).select('data').explode('data')
t_cols = hl.trio_matrix(mt, ped, complete_trios=True).cols()
t_cols = t_cols.annotate(fam=t_cols.proband.fam.fam_id,
data=[
hl.struct(role=0, sa=t_cols.proband),
hl.struct(role=1, sa=t_cols.father),
hl.struct(role=2, sa=t_cols.mother)]).key_by('fam').select('data').explode('data')
t_cols = t_cols.filter(hl.is_defined(t_cols.data.sa))
self.assertEqual(e_cols.key.dtype, t_cols.key.dtype)
self.assertEqual(e_cols.row.dtype, t_cols.row.dtype)
self.assertTrue(e_cols._same(t_cols))
def test_sample_qc(self):
dataset = self.get_dataset()
dataset = hl.sample_qc(dataset)
def test_variant_qc(self):
data = [
{'v': '1:1:A:T', 's': '1', 'GT': hl.Call([0, 0]), 'GQ': 10, 'DP': 0},
{'v': '1:1:A:T', 's': '2', 'GT': hl.Call([1, 1]), 'GQ': 10, 'DP': 5},
{'v': '1:1:A:T', 's': '3', 'GT': hl.Call([0, 1]), 'GQ': 11, 'DP': 100},
{'v': '1:1:A:T', 's': '4', 'GT': None, 'GQ': None, 'DP': 100},
{'v': '1:2:A:T,C', 's': '1', 'GT': hl.Call([1, 2]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '2', 'GT': hl.Call([2, 2]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '3', 'GT': hl.Call([0, 1]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '4', 'GT': hl.Call([1, 1]), 'GQ': 10, 'DP': 5},
]
ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, GT: call, GQ: int, DP: int}'))
ht = ht.transmute(**hl.parse_variant(ht.v))
mt = ht.to_matrix_table(['locus', 'alleles'], ['s'], partition_key=['locus'])
mt = hl.variant_qc(mt, 'vqc')
r = mt.rows().collect()
self.assertEqual(r[0].vqc.AF, [0.5, 0.5])
self.assertEqual(r[0].vqc.AC, [3, 3])
self.assertEqual(r[0].vqc.AN, 6)
self.assertEqual(r[0].vqc.homozygote_count, [1, 1])
self.assertEqual(r[0].vqc.n_called, 3)
self.assertEqual(r[0].vqc.n_not_called, 1)
self.assertEqual(r[0].vqc.call_rate, 0.75)
self.assertEqual(r[0].vqc.n_het, 1)
self.assertEqual(r[0].vqc.n_non_ref, 2)
self.assertEqual(r[0].vqc.r_expected_het_freq, 0.6)
self.assertEqual(r[0].vqc.p_hwe, 0.7)
self.assertEqual(r[0].vqc.dp_stats.min, 0)
self.assertEqual(r[0].vqc.dp_stats.max, 100)
self.assertEqual(r[0].vqc.dp_stats.mean, 51.25)
self.assertAlmostEqual(r[0].vqc.dp_stats.stdev, 48.782040752719645)
self.assertEqual(r[0].vqc.gq_stats.min, 10)
self.assertEqual(r[0].vqc.gq_stats.max, 11)
self.assertAlmostEqual(r[0].vqc.gq_stats.mean, 10.333333333333334)
self.assertAlmostEqual(r[0].vqc.gq_stats.stdev, 0.47140452079103168)
self.assertEqual(r[1].vqc.AF, [0.125, 0.5, 0.375])
self.assertEqual(r[1].vqc.AC, [1, 4, 3])
self.assertEqual(r[1].vqc.AN, 8)
self.assertEqual(r[1].vqc.homozygote_count, [0, 1, 1])
self.assertEqual(r[1].vqc.n_called, 4)
self.assertEqual(r[1].vqc.n_not_called, 0)
self.assertEqual(r[1].vqc.call_rate, 1.0)
self.assertEqual(r[1].vqc.n_het, 2)
self.assertEqual(r[1].vqc.n_non_ref, 4)
self.assertEqual(r[1].vqc.p_hwe, None)
self.assertEqual(r[1].vqc.r_expected_het_freq, None)
self.assertEqual(r[1].vqc.dp_stats.min, 5)
self.assertEqual(r[1].vqc.dp_stats.max, 5)
self.assertEqual(r[1].vqc.dp_stats.mean, 5)
self.assertEqual(r[1].vqc.dp_stats.stdev, 0.0)
self.assertEqual(r[1].vqc.gq_stats.min, 10)
self.assertEqual(r[1].vqc.gq_stats.max, 10)
self.assertEqual(r[1].vqc.gq_stats.mean, 10)
self.assertEqual(r[1].vqc.gq_stats.stdev, 0)
def test_grm(self):
tolerance = 0.001
def load_id_file(path):
ids = []
with hl.hadoop_open(path) as f:
for l in f:
r = l.strip().split('\t')
self.assertEqual(len(r), 2)
ids.append(r[1])
return ids
def load_rel(ns, path):
rel = np.zeros((ns, ns))
with hl.hadoop_open(path) as f:
for i, l in enumerate(f):
for j, n in enumerate(map(float, l.strip().split('\t'))):
rel[i, j] = n
self.assertEqual(j, i)
self.assertEqual(i, ns - 1)
return rel
def load_grm(ns, nv, path):
m = np.zeros((ns, ns))
with utils.hadoop_open(path) as f:
i = 0
for l in f:
row = l.strip().split('\t')
self.assertEqual(int(row[2]), nv)
m[int(row[0]) - 1, int(row[1]) - 1] = float(row[3])
i += 1
self.assertEqual(i, ns * (ns + 1) / 2)
return m
def load_bin(ns, path):
m = np.zeros((ns, ns))
with utils.hadoop_open(path, 'rb') as f:
for i in range(ns):
for j in range(i + 1):
b = f.read(4)
self.assertEqual(len(b), 4)
m[i, j] = unpack('<f', bytearray(b))[0]
left = f.read()
self.assertEqual(len(left), 0)
return m
b_file = utils.new_temp_file(prefix="plink")
rel_file = utils.new_temp_file(prefix="test", suffix="rel")
rel_id_file = utils.new_temp_file(prefix="test", suffix="rel.id")
grm_file = utils.new_temp_file(prefix="test", suffix="grm")
grm_bin_file = utils.new_temp_file(prefix="test", suffix="grm.bin")
grm_nbin_file = utils.new_temp_file(prefix="test", suffix="grm.N.bin")
dataset = self.get_dataset()
n_samples = dataset.count_cols()
dataset = dataset.annotate_rows(AC=agg.sum(dataset.GT.n_alt_alleles()),
n_called=agg.count_where(hl.is_defined(dataset.GT)))
dataset = dataset.filter_rows((dataset.AC > 0) & (dataset.AC < 2 * dataset.n_called))
dataset = dataset.filter_rows(dataset.n_called == n_samples).persist()
hl.export_plink(dataset, b_file, ind_id=dataset.s)
sample_ids = [row.s for row in dataset.cols().select().collect()]
n_variants = dataset.count_rows()
self.assertGreater(n_variants, 0)
grm = hl.genetic_relatedness_matrix(dataset.GT)
grm.export_id_file(rel_id_file)
############
### rel
p_file = utils.new_temp_file(prefix="plink")
syscall('''plink --bfile {} --make-rel --out {}'''
.format(utils.uri_path(b_file), utils.uri_path(p_file)), shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(load_id_file(p_file + ".rel.id"), sample_ids)
grm.export_rel(rel_file)
self.assertEqual(load_id_file(rel_id_file), sample_ids)
self.assertTrue(np.allclose(load_rel(n_samples, p_file + ".rel"),
load_rel(n_samples, rel_file),
atol=tolerance))
############
### gcta-grm
p_file = utils.new_temp_file(prefix="plink")
syscall('''plink --bfile {} --make-grm-gz --out {}'''
.format(utils.uri_path(b_file), utils.uri_path(p_file)), shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(load_id_file(p_file + ".grm.id"), sample_ids)
grm.export_gcta_grm(grm_file)
self.assertTrue(np.allclose(load_grm(n_samples, n_variants, p_file + ".grm.gz"),
load_grm(n_samples, n_variants, grm_file),
atol=tolerance))
############
### gcta-grm-bin
p_file = utils.new_temp_file(prefix="plink")
syscall('''plink --bfile {} --make-grm-bin --out {}'''
.format(utils.uri_path(b_file), utils.uri_path(p_file)), shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(load_id_file(p_file + ".grm.id"), sample_ids)
grm.export_gcta_grm_bin(grm_bin_file, grm_nbin_file)
self.assertTrue(np.allclose(load_bin(n_samples, p_file + ".grm.bin"),
load_bin(n_samples, grm_bin_file),
atol=tolerance))
self.assertTrue(np.allclose(load_bin(n_samples, p_file + ".grm.N.bin"),
load_bin(n_samples, grm_nbin_file),
atol=tolerance))
def test_block_matrix_from_numpy(self):
ndarray = np.matrix([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], dtype=np.float64)
for block_size in [1, 2, 5, 1024]:
block_matrix = BlockMatrix.from_numpy(ndarray, block_size)
assert (block_matrix.n_rows == 3)
assert (block_matrix.n_cols == 5)
assert (block_matrix.to_numpy() == ndarray).all()
def test_rrm(self):
seed = 0
n1 = 100
m1 = 200
k = 3
fst = .9
dataset = hl.balding_nichols_model(k,
n1,
m1,
fst=(k * [fst]),
seed=seed,
n_partitions=4)
dataset = dataset.annotate_cols(s = hl.str(dataset.sample_idx)).key_cols_by('s')
def direct_calculation(ds):
ds = BlockMatrix.from_entry_expr(ds['GT'].n_alt_alleles()).to_numpy()
# filter out constant rows
isconst = lambda r: any([all([(gt < c + .01) and (gt > c - .01) for gt in r]) for c in range(3)])
ds = np.array([row for row in ds if not isconst(row)])
nvariants, nsamples = ds.shape
sumgt = lambda r: sum([i for i in r if i >= 0])
sumsq = lambda r: sum([i ** 2 for i in r if i >= 0])
mean = [sumgt(row) / nsamples for row in ds]
stddev = [sqrt(sumsq(row) / nsamples - mean[i] ** 2)
for i, row in enumerate(ds)]
mat = np.array([[(g - mean[i]) / stddev[i] for g in row] for i, row in enumerate(ds)])
rrm = (mat.T @ mat) / nvariants
return rrm
def hail_calculation(ds):
rrm = hl.realized_relationship_matrix(ds.GT)
fn = utils.new_temp_file(suffix='.tsv')
rrm.export_tsv(fn)
data = []
with open(utils.uri_path(fn)) as f:
f.readline()
for line in f:
row = line.strip().split()
data.append(list(map(float, row)))
return np.array(data)
manual = direct_calculation(dataset)
rrm = hail_calculation(dataset)
self.assertTrue(np.allclose(manual, rrm))
def test_hwe_normalized_pca(self):
mt = hl.balding_nichols_model(3, 100, 50)
eigenvalues, scores, loadings = hl.hwe_normalized_pca(mt.GT, k=2, compute_loadings=True)
self.assertEqual(len(eigenvalues), 2)
self.assertTrue(isinstance(scores, hl.Table))
self.assertEqual(scores.count(), 100)
self.assertTrue(isinstance(loadings, hl.Table))
_, _, loadings = hl.hwe_normalized_pca(mt.GT, k=2, compute_loadings=False)
self.assertEqual(loadings, None)
def test_pca_against_numpy(self):
mt = hl.import_vcf(resource('tiny_m.vcf'))
mt = mt.filter_rows(hl.len(mt.alleles) == 2)
mt = mt.annotate_rows(AC = hl.agg.sum(mt.GT.n_alt_alleles()),
n_called = hl.agg.count_where(hl.is_defined(mt.GT)))
mt = mt.filter_rows((mt.AC > 0) & (mt.AC < 2 * mt.n_called)).persist()
n_rows = mt.count_rows()
def make_expr(mean):
return hl.cond(hl.is_defined(mt.GT),
(mt.GT.n_alt_alleles() - mean) / hl.sqrt(mean * (2 - mean) * n_rows / 2),
0)
eigen, scores, loadings= hl.pca(hl.bind(make_expr, mt.AC / mt.n_called), k=3, compute_loadings=True)
hail_scores = scores.explode('scores').scores.collect()
hail_loadings = loadings.explode('loadings').loadings.collect()
self.assertEqual(len(eigen), 3)
self.assertEqual(scores.count(), mt.count_cols())
self.assertEqual(loadings.count(), n_rows)
# compute PCA with numpy
def normalize(a):
ms = np.mean(a, axis = 0, keepdims = True)
return np.divide(np.subtract(a, ms), np.sqrt(2.0*np.multiply(ms/2.0, 1-ms/2.0)*a.shape[1]))
g = np.pad(np.diag([1.0, 1, 2]), ((0, 1), (0, 0)), mode='constant')
g[1, 0] = 1.0 / 3
n = normalize(g)
U, s, V = np.linalg.svd(n, full_matrices=0)
np_scores = U.dot(np.diag(s)).flatten()
np_loadings = V.transpose().flatten()
np_eigenvalues = np.multiply(s,s).flatten()
def check(hail_array, np_array):
self.assertEqual(len(hail_array), len(np_array))
for i, (left, right) in enumerate(zip(hail_array, np_array)):
self.assertAlmostEqual(abs(left), abs(right),
msg=f'mismatch at index {i}: hl={left}, np={right}',
places=4)
check(eigen, np_eigenvalues)
check(hail_scores, np_scores)
check(hail_loadings, np_loadings)
def _R_pc_relate(self, mt, maf):
plink_file = utils.uri_path(utils.new_temp_file())
hl.export_plink(mt, plink_file, ind_id=hl.str(mt.col_key[0]))
utils.run_command(["Rscript",
resource("is/hail/methods/runPcRelate.R"),
plink_file,
str(maf)])
types = {
'ID1': hl.tstr,
'ID2': hl.tstr,
'nsnp': hl.tfloat64,
'kin': hl.tfloat64,
'k0': hl.tfloat64,
'k1': hl.tfloat64,
'k2': hl.tfloat64
}
plink_kin = hl.import_table(plink_file + '.out',
delimiter=' +',
types=types)
return plink_kin.select(i=hl.struct(sample_idx=plink_kin.ID1),
j=hl.struct(sample_idx=plink_kin.ID2),
kin=plink_kin.kin,
ibd0=plink_kin.k0,
ibd1=plink_kin.k1,
ibd2=plink_kin.k2).key_by('i', 'j')
def test_pc_relate_on_balding_nichols_against_R_pc_relate(self):
mt = hl.balding_nichols_model(3, 100, 1000)
mt = mt.key_cols_by(sample_idx=hl.str(mt.sample_idx))
hkin = hl.pc_relate(mt.GT, 0.00, k=2).cache()
rkin = self._R_pc_relate(mt, 0.00).cache()
self.assertTrue(rkin.select("kin")._same(hkin.select("kin"), tolerance=1e-3, absolute=True))
self.assertTrue(rkin.select("ibd0")._same(hkin.select("ibd0"), tolerance=1e-2, absolute=True))
self.assertTrue(rkin.select("ibd1")._same(hkin.select("ibd1"), tolerance=2e-2, absolute=True))
self.assertTrue(rkin.select("ibd2")._same(hkin.select("ibd2"), tolerance=1e-2, absolute=True))
def test_pcrelate_paths(self):
mt = hl.balding_nichols_model(3, 50, 100)
_, scores2, _ = hl.hwe_normalized_pca(mt.GT, k=2, compute_loadings=False)
_, scores3, _ = hl.hwe_normalized_pca(mt.GT, k=3, compute_loadings=False)
kin1 = hl.pc_relate(mt.GT, 0.10, k=2, statistics='kin', block_size=64)
kin_s1 = hl.pc_relate(mt.GT, 0.10, scores_expr=scores2[mt.col_key].scores,
statistics='kin', block_size=32)
kin2 = hl.pc_relate(mt.GT, 0.05, k=2, min_kinship=0.01, statistics='kin2', block_size=128).cache()
kin_s2 = hl.pc_relate(mt.GT, 0.05, scores_expr=scores2[mt.col_key].scores, min_kinship=0.01,
statistics='kin2', block_size=16)
kin3 = hl.pc_relate(mt.GT, 0.02, k=3, min_kinship=0.1, statistics='kin20', block_size=64).cache()
kin_s3 = hl.pc_relate(mt.GT, 0.02, scores_expr=scores3[mt.col_key].scores, min_kinship=0.1,
statistics='kin20', block_size=32)
kin4 = hl.pc_relate(mt.GT, 0.01, k=3, statistics='all', block_size=128)
kin_s4 = hl.pc_relate(mt.GT, 0.01, scores_expr=scores3[mt.col_key].scores, statistics='all', block_size=16)
self.assertTrue(kin1._same(kin_s1, tolerance=1e-4))
self.assertTrue(kin2._same(kin_s2, tolerance=1e-4))
self.assertTrue(kin3._same(kin_s3, tolerance=1e-4))
self.assertTrue(kin4._same(kin_s4, tolerance=1e-4))
self.assertTrue(kin1.count() == 50 * 49 / 2)
self.assertTrue(kin2.count() > 0)
self.assertTrue(kin2.filter(kin2.kin < 0.01).count() == 0)
self.assertTrue(kin3.count() > 0)
self.assertTrue(kin3.filter(kin3.kin < 0.1).count() == 0)
def test_rename_duplicates(self):
dataset = self.get_dataset() # FIXME - want to rename samples with same id
renamed_ids = hl.rename_duplicates(dataset).cols().select().collect()
self.assertTrue(len(set(renamed_ids)), len(renamed_ids))
def test_split_multi_hts(self):
ds1 = hl.import_vcf(resource('split_test.vcf'))
ds1 = hl.split_multi_hts(ds1)
ds2 = hl.import_vcf(resource('split_test_b.vcf'))
df = ds1.rows()
self.assertTrue(df.all((df.locus.position == 1180) | df.was_split))
ds1 = ds1.drop('was_split', 'a_index')
self.assertTrue(ds1._same(ds2))
def test_mendel_errors(self):
mt = hl.import_vcf(resource('mendel.vcf'))
ped = hl.Pedigree.read(resource('mendel.fam'))
men, fam, ind, var = hl.mendel_errors(mt['GT'], ped)
self.assertEqual(men.key.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr),
s=hl.tstr))
self.assertEqual(men.row.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr),
s=hl.tstr,
fam_id=hl.tstr,
mendel_code=hl.tint))
self.assertEqual(fam.key.dtype, hl.tstruct(pat_id=hl.tstr,
mat_id=hl.tstr))
self.assertEqual(fam.row.dtype, hl.tstruct(pat_id=hl.tstr,
mat_id=hl.tstr,
fam_id=hl.tstr,
children=hl.tint,
errors=hl.tint64,
snp_errors=hl.tint64))
self.assertEqual(ind.key.dtype, hl.tstruct(s=hl.tstr))
self.assertEqual(ind.row.dtype, hl.tstruct(s=hl.tstr,
fam_id=hl.tstr,
errors=hl.tint64,
snp_errors=hl.tint64))
self.assertEqual(var.key.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr)))
self.assertEqual(var.row.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr),
errors=hl.tint64))
self.assertEqual(men.count(), 41)
self.assertEqual(fam.count(), 2)
self.assertEqual(ind.count(), 7)
self.assertEqual(var.count(), mt.count_rows())
self.assertEqual(set(fam.select('errors', 'snp_errors').collect()),
{
hl.utils.Struct(pat_id='Dad1', mat_id='Mom1', errors=41, snp_errors=39),
hl.utils.Struct(pat_id='Dad2', mat_id='Mom2', errors=0, snp_errors=0)
})
self.assertEqual(set(ind.select('errors', 'snp_errors').collect()),
{
hl.utils.Struct(s='Son1', errors=23, snp_errors=22),
hl.utils.Struct(s='Dtr1', errors=18, snp_errors=17),
hl.utils.Struct(s='Dad1', errors=19, snp_errors=18),
hl.utils.Struct(s='Mom1', errors=22, snp_errors=21),
hl.utils.Struct(s='Dad2', errors=0, snp_errors=0),
hl.utils.Struct(s='Mom2', errors=0, snp_errors=0),
hl.utils.Struct(s='Son2', errors=0, snp_errors=0)
})
to_keep = hl.set([
(hl.Locus("1", 1), ['C', 'CT']),
(hl.Locus("1", 2), ['C', 'T']),
(hl.Locus("X", 1), ['C', 'T']),
(hl.Locus("X", 3), ['C', 'T']),
(hl.Locus("Y", 1), ['C', 'T']),
(hl.Locus("Y", 3), ['C', 'T'])
])
self.assertEqual(var.filter(to_keep.contains((var.locus, var.alleles)))
.order_by('locus')
.select('errors').collect(),
[
hl.utils.Struct(locus=hl.Locus("1", 1), alleles=['C', 'CT'], errors=2),
hl.utils.Struct(locus=hl.Locus("1", 2), alleles=['C', 'T'], errors=1),
hl.utils.Struct(locus=hl.Locus("X", 1), alleles=['C', 'T'], errors=2),
hl.utils.Struct(locus=hl.Locus("X", 3), alleles=['C', 'T'], errors=1),
hl.utils.Struct(locus=hl.Locus("Y", 1), alleles=['C', 'T'], errors=1),
hl.utils.Struct(locus=hl.Locus("Y", 3), alleles=['C', 'T'], errors=1),
])
ped2 = hl.Pedigree.read(resource('mendelWithMissingSex.fam'))
men2, _, _, _ = hl.mendel_errors(mt['GT'], ped2)
self.assertTrue(men2.filter(men2.s == 'Dtr1')._same(men.filter(men.s == 'Dtr1')))
def test_export_vcf(self):
dataset = hl.import_vcf(resource('sample.vcf.bgz'))
vcf_metadata = hl.get_vcf_metadata(resource('sample.vcf.bgz'))
hl.export_vcf(dataset, '/tmp/sample.vcf', metadata=vcf_metadata)
dataset_imported = hl.import_vcf('/tmp/sample.vcf')
self.assertTrue(dataset._same(dataset_imported))
metadata_imported = hl.get_vcf_metadata('/tmp/sample.vcf')
self.assertDictEqual(vcf_metadata, metadata_imported)
def test_concordance(self):
dataset = self.get_dataset()
glob_conc, cols_conc, rows_conc = hl.concordance(dataset, dataset)
self.assertEqual(sum([sum(glob_conc[i]) for i in range(5)]), dataset.count_rows() * dataset.count_cols())
counts = dataset.aggregate_entries(hl.Struct(n_het=agg.count(agg.filter(dataset.GT.is_het(), dataset.GT)),
n_hom_ref=agg.count(agg.filter(dataset.GT.is_hom_ref(), dataset.GT)),
n_hom_var=agg.count(agg.filter(dataset.GT.is_hom_var(), dataset.GT)),
nNoCall=agg.count(
agg.filter(hl.is_missing(dataset.GT), dataset.GT))))
self.assertEqual(glob_conc[0][0], 0)
self.assertEqual(glob_conc[1][1], counts.nNoCall)
self.assertEqual(glob_conc[2][2], counts.n_hom_ref)
self.assertEqual(glob_conc[3][3], counts.n_het)
self.assertEqual(glob_conc[4][4], counts.n_hom_var)
[self.assertEqual(glob_conc[i][j], 0) for i in range(5) for j in range(5) if i != j]
self.assertTrue(cols_conc.all(hl.sum(hl.flatten(cols_conc.concordance)) == dataset.count_rows()))
self.assertTrue(rows_conc.all(hl.sum(hl.flatten(rows_conc.concordance)) == dataset.count_cols()))
cols_conc.write('/tmp/foo.kt', overwrite=True)
rows_conc.write('/tmp/foo.kt', overwrite=True)
def test_import_table_force_bgz(self):
f = utils.new_temp_file(suffix=".bgz")
t = utils.range_table(10, 5)
t.export(f)
f2 = utils.new_temp_file(suffix=".gz")
utils.run_command(["cp", utils.uri_path(f), utils.uri_path(f2)])
t2 = hl.import_table(f2, force_bgz=True, impute=True).key_by('idx')
self.assertTrue(t._same(t2))
def test_import_locus_intervals(self):
interval_file = resource('annotinterall.interval_list')
t = hl.import_locus_intervals(interval_file, reference_genome='GRCh37')
nint = t.count()
i = 0
with open(interval_file) as f:
for line in f:
if len(line.strip()) != 0:
i += 1
self.assertEqual(nint, i)
self.assertEqual(t.interval.dtype.point_type, hl.tlocus('GRCh37'))
tmp_file = utils.new_temp_file(prefix="test", suffix="interval_list")
start = t.interval.start
end = t.interval.end
(t
.key_by(interval=hl.locus_interval(start.contig, start.position, end.position, True, True))
.select()
.export(tmp_file, header=False))
t2 = hl.import_locus_intervals(tmp_file)
self.assertTrue(t.select()._same(t2))
def test_import_locus_intervals_no_reference_specified(self):
interval_file = resource('annotinterall.interval_list')
t = hl.import_locus_intervals(interval_file, reference_genome=None)
self.assertTrue(t.count() == 2)
self.assertEqual(t.interval.dtype.point_type, hl.tstruct(contig=hl.tstr, position=hl.tint32))
def test_import_locus_intervals_badly_defined_intervals(self):
interval_file = resource('example3.interval_list')
t = hl.import_locus_intervals(interval_file, reference_genome='GRCh37', skip_invalid_intervals=True)
self.assertTrue(t.count() == 21)
t = hl.import_locus_intervals(interval_file, reference_genome=None, skip_invalid_intervals=True)
self.assertTrue(t.count() == 22)
def test_import_bed(self):
bed_file = resource('example1.bed')
bed = hl.import_bed(bed_file, reference_genome='GRCh37')
nbed = bed.count()
i = 0
with open(bed_file) as f:
for line in f:
if len(line.strip()) != 0:
try:
int(line.split()[0])
i += 1
except:
pass
self.assertEqual(nbed, i)
self.assertEqual(bed.interval.dtype.point_type, hl.tlocus('GRCh37'))
bed_file = resource('example2.bed')
t = hl.import_bed(bed_file, reference_genome='GRCh37')
self.assertEqual(t.interval.dtype.point_type, hl.tlocus('GRCh37'))
self.assertTrue(list(t.key.dtype) == ['interval'])
self.assertTrue(list(t.row.dtype) == ['interval','target'])
def test_import_bed_no_reference_specified(self):
bed_file = resource('example1.bed')
t = hl.import_bed(bed_file, reference_genome=None)
self.assertTrue(t.count() == 3)
self.assertEqual(t.interval.dtype.point_type, hl.tstruct(contig=hl.tstr, position=hl.tint32))
def test_import_bed_badly_defined_intervals(self):
bed_file = resource('example4.bed')
t = hl.import_bed(bed_file, reference_genome='GRCh37', skip_invalid_intervals=True)
self.assertTrue(t.count() == 3)
t = hl.import_bed(bed_file, reference_genome=None, skip_invalid_intervals=True)
self.assertTrue(t.count() == 4)
def test_annotate_intervals(self):
ds = self.get_dataset()
bed1 = hl.import_bed(resource('example1.bed'), reference_genome='GRCh37')
bed2 = hl.import_bed(resource('example2.bed'), reference_genome='GRCh37')
bed3 = hl.import_bed(resource('example3.bed'), reference_genome='GRCh37')
self.assertTrue(list(bed2.key.dtype) == ['interval'])
self.assertTrue(list(bed2.row.dtype) == ['interval','target'])
interval_list1 = hl.import_locus_intervals(resource('exampleAnnotation1.interval_list'))
interval_list2 = hl.import_locus_intervals(resource('exampleAnnotation2.interval_list'))
self.assertTrue(list(interval_list2.key.dtype) == ['interval'])
self.assertTrue(list(interval_list2.row.dtype) == ['interval', 'target'])
ann = ds.annotate_rows(in_interval = bed1[ds.locus]).rows()
self.assertTrue(ann.all((ann.locus.position <= 14000000) |
(ann.locus.position >= 17000000) |
(hl.is_missing(ann.in_interval))))
for bed in [bed2, bed3]:
ann = ds.annotate_rows(target = bed[ds.locus].target).rows()
expr = (hl.case()
.when(ann.locus.position <= 14000000, ann.target == 'gene1')
.when(ann.locus.position >= 17000000, ann.target == 'gene2')
.default(ann.target == hl.null(hl.tstr)))
self.assertTrue(ann.all(expr))
self.assertTrue(ds.annotate_rows(in_interval = interval_list1[ds.locus]).rows()
._same(ds.annotate_rows(in_interval = bed1[ds.locus]).rows()))
self.assertTrue(ds.annotate_rows(target = interval_list2[ds.locus].target).rows()
._same(ds.annotate_rows(target = bed2[ds.locus].target).rows()))
def test_import_fam(self):
fam_file = resource('sample.fam')
nfam = hl.import_fam(fam_file).count()
i = 0
with open(fam_file) as f:
for line in f:
if len(line.strip()) != 0:
i += 1
self.assertEqual(nfam, i)
def test_export_plink(self):
vcf_file = resource('sample.vcf')
mt = hl.split_multi_hts(hl.import_vcf(vcf_file, min_partitions=10))
split_vcf_file = utils.uri_path(utils.new_temp_file())
hl_output = utils.uri_path(utils.new_temp_file())
plink_output = utils.uri_path(utils.new_temp_file())
merge_output = utils.uri_path(utils.new_temp_file())
hl.export_vcf(mt, split_vcf_file)
hl.export_plink(mt, hl_output)
utils.run_command(["plink", "--vcf", split_vcf_file,
"--make-bed", "--out", plink_output,
"--const-fid", "--keep-allele-order"])
data = []
with open(utils.uri_path(plink_output + ".bim")) as file:
for line in file:
row = line.strip().split()
row[1] = ":".join([row[0], row[3], row[5], row[4]])
data.append("\t".join(row) + "\n")
with open(plink_output + ".bim", 'w') as f:
f.writelines(data)
utils.run_command(["plink", "--bfile", plink_output,
"--bmerge", hl_output, "--merge-mode",
"6", "--out", merge_output])
same = True
with open(merge_output + ".diff") as f:
for line in f:
row = line.strip().split()
if row != ["SNP", "FID", "IID", "NEW", "OLD"]:
same = False
break
self.assertTrue(same)
def test_export_plink_exprs(self):
ds = self.get_dataset()
fam_mapping = {'f0': 'fam_id', 'f1': 'ind_id', 'f2': 'pat_id', 'f3': 'mat_id',
'f4': 'is_female', 'f5': 'pheno'}
bim_mapping = {'f0': 'contig', 'f1': 'varid', 'f2': 'cm_position',
'f3': 'position', 'f4': 'a1', 'f5': 'a2'}
# Test default arguments
out1 = utils.new_temp_file()
hl.export_plink(ds, out1)
fam1 = (hl.import_table(out1 + '.fam', no_header=True, impute=False, missing="")
.rename(fam_mapping))
bim1 = (hl.import_table(out1 + '.bim', no_header=True, impute=False)
.rename(bim_mapping))
self.assertTrue(fam1.all((fam1.fam_id == "0") & (fam1.pat_id == "0") &
(fam1.mat_id == "0") & (fam1.is_female == "0") &
(fam1.pheno == "NA")))
self.assertTrue(bim1.all((bim1.varid == bim1.contig + ":" + bim1.position + ":" + bim1.a2 + ":" + bim1.a1) &
(bim1.cm_position == "0.0")))
# Test non-default FAM arguments
out2 = utils.new_temp_file()
hl.export_plink(ds, out2, ind_id=ds.s, fam_id=ds.s, pat_id="nope",
mat_id="nada", is_female=True, pheno=False)
fam2 = (hl.import_table(out2 + '.fam', no_header=True, impute=False, missing="")
.rename(fam_mapping))
self.assertTrue(fam2.all((fam2.fam_id == fam2.ind_id) & (fam2.pat_id == "nope") &
(fam2.mat_id == "nada") & (fam2.is_female == "2") &
(fam2.pheno == "1")))
# Test quantitative phenotype
out3 = utils.new_temp_file()
hl.export_plink(ds, out3, ind_id=ds.s, pheno=hl.float64(hl.len(ds.s)))
fam3 = (hl.import_table(out3 + '.fam', no_header=True, impute=False, missing="")
.rename(fam_mapping))
self.assertTrue(fam3.all((fam3.fam_id == "0") & (fam3.pat_id == "0") &
(fam3.mat_id == "0") & (fam3.is_female == "0") &
(fam3.pheno != "0") & (fam3.pheno != "NA")))
# Test non-default BIM arguments
out4 = utils.new_temp_file()
hl.export_plink(ds, out4, varid="hello", cm_position=100)
bim4 = (hl.import_table(out4 + '.bim', no_header=True, impute=False)
.rename(bim_mapping))
self.assertTrue(bim4.all((bim4.varid == "hello") & (bim4.cm_position == "100.0")))
# Test call expr
out5 = utils.new_temp_file()
ds_call = ds.annotate_entries(gt_fake=hl.call(0, 0))
hl.export_plink(ds_call, out5, call=ds_call.gt_fake)
ds_all_hom_ref = hl.import_plink(out5 + '.bed', out5 + '.bim', out5 + '.fam')
nerrors = ds_all_hom_ref.aggregate_entries(agg.count_where(~ds_all_hom_ref.GT.is_hom_ref()))
self.assertTrue(nerrors == 0)
# Test white-space in FAM id expr raises error
with self.assertRaisesRegex(TypeError, "has spaces in the following values:"):
hl.export_plink(ds, utils.new_temp_file(), mat_id="hello world")
# Test white-space in varid expr raises error
with self.assertRaisesRegex(utils.FatalError, "no white space allowed:"):
hl.export_plink(ds, utils.new_temp_file(), varid="hello world")
def test_export_gen(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
contig_recoding={"01": "1"},
reference_genome='GRCh37',
min_partitions=3)
file = '/tmp/test_export_gen'
hl.export_gen(gen, file)
gen2 = hl.import_gen(file + '.gen',
sample_file=file + '.sample',
reference_genome='GRCh37',
min_partitions=3)
self.assertTrue(gen._same(gen2, tolerance=3E-4, absolute=True))
def test_export_gen_exprs(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
contig_recoding={"01": "1"},
reference_genome='GRCh37',
min_partitions=3).add_col_index().add_row_index()
out1 = utils.new_temp_file()
hl.export_gen(gen, out1, id1=hl.str(gen.col_idx), id2=hl.str(gen.col_idx), missing=0.5,
varid=hl.str(gen.row_idx), rsid=hl.str(gen.row_idx), gp=[0.0, 1.0, 0.0])
in1 = (hl.import_gen(out1 + '.gen', sample_file=out1 + '.sample', min_partitions=3)
.add_col_index()
.add_row_index())
self.assertTrue(in1.aggregate_entries(agg.fraction(in1.GP == [0.0, 1.0, 0.0])) == 1.0)
self.assertTrue(in1.aggregate_rows(agg.fraction((in1.varid == hl.str(in1.row_idx)) &
(in1.rsid == hl.str(in1.row_idx)))) == 1.0)
self.assertTrue(in1.aggregate_cols(agg.fraction((in1.s == hl.str(in1.col_idx)))))
def test_tdt(self):
pedigree = hl.Pedigree.read(resource('tdt.fam'))
tdt_tab = (hl.transmission_disequilibrium_test(
hl.split_multi_hts(hl.import_vcf(resource('tdt.vcf'), min_partitions=4)),
pedigree))
truth = hl.import_table(
resource('tdt_results.tsv'),
types={'POSITION': hl.tint32, 'T': hl.tint32, 'U': hl.tint32,
'Chi2': hl.tfloat64, 'Pval': hl.tfloat64})
truth = (truth
.transmute(locus=hl.locus(truth.CHROM, truth.POSITION),
alleles=[truth.REF, truth.ALT])
.key_by('locus', 'alleles'))
if tdt_tab.count() != truth.count():
self.fail('Result has {} rows but should have {} rows'.format(tdt_tab.count(), truth.count()))
bad = (tdt_tab.filter(hl.is_nan(tdt_tab.p_value), keep=False)
.join(truth.filter(hl.is_nan(truth.Pval), keep=False), how='outer'))
bad.describe()
bad = bad.filter(~(
(bad.t == bad.T) &
(bad.u == bad.U) &
(hl.abs(bad.chi2 - bad.Chi2) < 0.001) &
(hl.abs(bad.p_value - bad.Pval) < 0.001)))
if bad.count() != 0:
bad.order_by(hl.asc(bad.v)).show()
self.fail('Found rows in violation of the predicate (see show output)')
def test_maximal_independent_set(self):
# prefer to remove nodes with higher index
t = hl.utils.range_table(10)
graph = t.select(i=hl.int64(t.idx), j=hl.int64(t.idx + 10), bad_type=hl.float32(t.idx))
mis_table = hl.maximal_independent_set(graph.i, graph.j, True, lambda l, r: l - r)
mis = [row['node'] for row in mis_table.collect()]
self.assertEqual(sorted(mis), list(range(0, 10)))
self.assertEqual(mis_table.row.dtype, hl.tstruct(node=hl.tint64))
self.assertEqual(mis_table.key.dtype, hl.tstruct(node=hl.tint64))
self.assertRaises(ValueError, lambda: hl.maximal_independent_set(graph.i, graph.bad_type, True))
self.assertRaises(ValueError, lambda: hl.maximal_independent_set(graph.i, hl.utils.range_table(10).idx, True))
self.assertRaises(ValueError, lambda: hl.maximal_independent_set(hl.literal(1), hl.literal(2), True))
def test_maximal_independent_set2(self):
edges = [(0, 4), (0, 1), (0, 2), (1, 5), (1, 3), (2, 3), (2, 6),
(3, 7), (4, 5), (4, 6), (5, 7), (6, 7)]
edges = [{"i": l, "j": r} for l, r in edges]
t = hl.Table.parallelize(edges, hl.tstruct(i=hl.tint64, j=hl.tint64))
mis_t = hl.maximal_independent_set(t.i, t.j)
self.assertTrue(mis_t.row.dtype == hl.tstruct(node=hl.tint64) and
mis_t.globals.dtype == hl.tstruct())
mis = set([row.node for row in mis_t.collect()])
maximal_indep_sets = [{0, 6, 5, 3}, {1, 4, 7, 2}]
non_maximal_indep_sets = [{0, 7}, {6, 1}]
self.assertTrue(mis in non_maximal_indep_sets or mis in maximal_indep_sets)
def test_maximal_independent_set3(self):
is_case = {"A", "C", "E", "G", "H"}
edges = [("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")]
edges = [{"i": {"id": l, "is_case": l in is_case},
"j": {"id": r, "is_case": r in is_case}} for l, r in edges]
t = hl.Table.parallelize(edges, hl.tstruct(i=hl.tstruct(id=hl.tstr, is_case=hl.tbool),
j=hl.tstruct(id=hl.tstr, is_case=hl.tbool)))
tiebreaker = lambda l, r: (hl.case()
.when(l.is_case & (~r.is_case), -1)
.when(~(l.is_case) & r.is_case, 1)
.default(0))
mis = hl.maximal_independent_set(t.i, t.j, tie_breaker=tiebreaker)
expected_sets = [{"A", "C", "E", "G"}, {"A", "C", "E", "H"}]
self.assertTrue(mis.all(mis.node.is_case))
self.assertTrue(set([row.id for row in mis.select(mis.node.id).collect()]) in expected_sets)
def test_filter_alleles(self):
# poor man's Gen
paths = [resource('sample.vcf'),
resource('multipleChromosomes.vcf'),
resource('sample2.vcf')]
for path in paths:
ds = hl.import_vcf(path)
self.assertEqual(
hl.filter_alleles(ds, lambda a, i: False).count_rows(), 0)
self.assertEqual(hl.filter_alleles(ds, lambda a, i: True).count_rows(), ds.count_rows())
def test_filter_alleles_hts(self):
# 1 variant: A:T,G
ds = hl.import_vcf(resource('filter_alleles/input.vcf'))
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'T', subset=True)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele1_subset.vcf'))))
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=True)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele2_subset.vcf')))
)
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a != 'G', subset=False)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele1_downcode.vcf')))
)
(hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=False)).old_to_new.show()
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=False)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele2_downcode.vcf')))
)
def test_ld_prune(self):
ds = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf')))
pruned_table = hl.ld_prune(ds.GT, r2=0.2, bp_window_size=1000000)
filtered_ds = (ds.filter_rows(hl.is_defined(pruned_table[(ds.locus, ds.alleles)])))
filtered_ds = filtered_ds.annotate_rows(stats=agg.stats(filtered_ds.GT.n_alt_alleles()))
filtered_ds = filtered_ds.annotate_rows(
mean=filtered_ds.stats.mean, sd_reciprocal=1 / filtered_ds.stats.stdev)
n_samples = filtered_ds.count_cols()
normalized_mean_imputed_genotype_expr = (
hl.cond(hl.is_defined(filtered_ds['GT']),
(filtered_ds['GT'].n_alt_alleles() - filtered_ds['mean'])
* filtered_ds['sd_reciprocal'] * (1 / hl.sqrt(n_samples)), 0))
block_matrix = BlockMatrix.from_entry_expr(normalized_mean_imputed_genotype_expr)
entries = ((block_matrix @ block_matrix.T) ** 2).entries()
index_table = filtered_ds.add_row_index().rows().key_by('row_idx').select('locus')
entries = entries.annotate(locus_i=index_table[entries.i].locus, locus_j=index_table[entries.j].locus)
contig_filter = entries.locus_i.contig == entries.locus_j.contig
window_filter = (hl.abs(entries.locus_i.position - entries.locus_j.position)) <= 1000000
identical_filter = entries.i != entries.j
self.assertEqual(entries.filter(
(entries['entry'] >= 0.2) & (contig_filter) & (window_filter) & (identical_filter)).count(), 0)
def test_ld_prune_inputs(self):
ds = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf')))
self.assertRaises(ValueError, lambda: hl.ld_prune(ds.GT, r2=0.2, bp_window_size=1000000, memory_per_core=0))
def test_ld_prune_no_prune(self):
ds = hl.balding_nichols_model(n_populations=1, n_samples=10, n_variants=100)
pruned_table = hl.ld_prune(ds.GT, r2=0.1, bp_window_size=0)
expected_count = ds.filter_rows(agg.collect_as_set(ds.GT).size() > 1, keep=True).count_rows()
self.assertEqual(pruned_table.count(), expected_count)
def test_ld_prune_identical_variants(self):
ds = hl.import_vcf(resource('ldprune2.vcf'), min_partitions=2)
pruned_table = hl.ld_prune(ds.GT)
self.assertEqual(pruned_table.count(), 1)
def test_ld_prune_maf(self):
ds = hl.balding_nichols_model(n_populations=1, n_samples=50, n_variants=10, n_partitions=10).cache()
ht = ds.select_rows(p=hl.agg.sum(ds.GT.n_alt_alleles()) / (2 * 50)).rows()
ht = ht.select(maf=hl.cond(ht.p <= 0.5, ht.p, 1.0 - ht.p)).cache()
pruned_table = hl.ld_prune(ds.GT, 0.0)
positions = pruned_table.locus.position.collect()
self.assertEqual(len(positions), 1)
kept_position = hl.literal(positions[0])
kept_maf = ht.filter(ht.locus.position == kept_position).maf.collect()[0]
self.assertEqual(kept_maf, max(ht.maf.collect()))
def test_ld_prune_call_expression(self):
ds = hl.import_vcf(resource("ldprune2.vcf"), min_partitions=2)
ds = ds.select_entries(foo=ds.GT)
pruned_table = hl.ld_prune(ds.foo)
self.assertEqual(pruned_table.count(), 1)
def test_entries(self):
n_rows, n_cols = 5, 3
rows = [{'i': i, 'j': j, 'entry': float(i + j)} for i in range(n_rows) for j in range(n_cols)]
schema = hl.tstruct(i=hl.tint32, j=hl.tint32, entry=hl.tfloat64)
table = hl.Table.parallelize([hl.struct(i=row['i'], j=row['j'], entry=row['entry']) for row in rows], schema)
table = table.annotate(i=hl.int64(table.i),
j=hl.int64(table.j)).key_by('i', 'j')
ndarray = np.reshape(list(map(lambda row: row['entry'], rows)), (n_rows, n_cols))
for block_size in [1, 2, 1024]:
block_matrix = BlockMatrix.from_numpy(ndarray, block_size)
entries_table = block_matrix.entries()
self.assertEqual(entries_table.count(), n_cols * n_rows)
self.assertEqual(len(entries_table.row), 3)
self.assertTrue(table._same(entries_table))
def test_filter_intervals(self):
ds = hl.import_vcf(resource('sample.vcf'), min_partitions=20)
self.assertEqual(
hl.filter_intervals(ds, [hl.parse_locus_interval('20:10639222-10644705')]).count_rows(), 3)
intervals = [hl.parse_locus_interval('20:10639222-10644700'),
hl.parse_locus_interval('20:10644700-10644705')]
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
intervals = hl.array([hl.parse_locus_interval('20:10639222-10644700'),
hl.parse_locus_interval('20:10644700-10644705')])
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
intervals = hl.array([hl.parse_locus_interval('20:10639222-10644700').value,
hl.parse_locus_interval('20:10644700-10644705')])
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
intervals = [hl.parse_locus_interval('[20:10019093-10026348]').value,
hl.parse_locus_interval('[20:17705793-17716416]').value]
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 4)
def test_filter_intervals_compound_partition_key(self):
ds = hl.import_vcf(resource('sample.vcf'), min_partitions=20)
ds = (ds.annotate_rows(variant=hl.struct(locus=ds.locus, alleles=ds.alleles))
.key_rows_by('locus', 'alleles'))
intervals = [hl.Interval(hl.Struct(locus=hl.Locus('20', 10639222), alleles=['A', 'T']),
hl.Struct(locus=hl.Locus('20', 10644700), alleles=['A', 'T']))]
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
def test_balding_nichols_model(self):
from hail.stats import TruncatedBetaDist
ds = hl.balding_nichols_model(2, 20, 25, 3,
pop_dist=[1.0, 2.0],
fst=[.02, .06],
af_dist=TruncatedBetaDist(a=0.01, b=2.0, min=0.05, max=0.95),
seed=1)
self.assertEqual(ds.count_cols(), 20)
self.assertEqual(ds.count_rows(), 25)
self.assertEqual(ds.n_partitions(), 3)
glob = ds.globals
self.assertEqual(glob.n_populations.value, 2)
self.assertEqual(glob.n_samples.value, 20)
self.assertEqual(glob.n_variants.value, 25)
self.assertEqual(glob.pop_dist.value, [1, 2])
self.assertEqual(glob.fst.value, [.02, .06])
self.assertEqual(glob.seed.value, 1)
self.assertEqual(glob.ancestral_af_dist.value,
hl.Struct(type='TruncatedBetaDist', a=0.01, b=2.0, min=0.05, max=0.95))
def test_skat(self):
ds2 = hl.import_vcf(resource('sample2.vcf'))
covariates = (hl.import_table(resource("skat.cov"), impute=True)
.key_by("Sample"))
phenotypes = (hl.import_table(resource("skat.pheno"),
types={"Pheno": hl.tfloat64},
missing="0")
.key_by("Sample"))
intervals = (hl.import_locus_intervals(resource("skat.interval_list")))
weights = (hl.import_table(resource("skat.weights"),
types={"locus": hl.tlocus(),
"weight": hl.tfloat64})
.key_by("locus"))
ds = hl.split_multi_hts(ds2)
ds = ds.annotate_rows(gene=intervals[ds.locus],
weight=weights[ds.locus].weight)
ds = ds.annotate_cols(pheno=phenotypes[ds.s].Pheno,
cov=covariates[ds.s])
ds = ds.annotate_cols(pheno=hl.cond(ds.pheno == 1.0,
False,
hl.cond(ds.pheno == 2.0,
True,
hl.null(hl.tbool))))
hl.skat(key_expr=ds.gene,
weight_expr=ds.weight,
y=ds.pheno,
x=ds.GT.n_alt_alleles(),
covariates=[ds.cov.Cov1, ds.cov.Cov2],
logistic=False).count()
hl.skat(key_expr=ds.gene,
weight_expr=ds.weight,
y=ds.pheno,
x=hl.pl_dosage(ds.PL),
covariates=[ds.cov.Cov1, ds.cov.Cov2],
logistic=True).count()
def test_import_gen(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
contig_recoding={"01": "1"},
reference_genome = 'GRCh37').rows()
self.assertTrue(gen.all(gen.locus.contig == "1"))
self.assertEqual(gen.count(), 199)
self.assertEqual(gen.locus.dtype, hl.tlocus('GRCh37'))
def test_import_gen_no_reference_specified(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
reference_genome=None)
self.assertTrue(gen.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
self.assertEqual(gen.count_rows(), 199)
def test_import_bgen(self):
hl.index_bgen(resource('example.v11.bgen'))
bgen_rows = hl.import_bgen(resource('example.v11.bgen'),
entry_fields=['GT', 'GP'],
sample_file=resource('example.sample'),
contig_recoding={'01': '1'},
reference_genome='GRCh37').rows()
self.assertTrue(bgen_rows.all(bgen_rows.locus.contig == '1'))
self.assertEqual(bgen_rows.count(), 199)
hl.index_bgen(resource('example.8bits.bgen'))
bgen = hl.import_bgen(resource('example.8bits.bgen'),
entry_fields=['dosage'],
contig_recoding={'01': '1'},
reference_genome='GRCh37')
self.assertEqual(bgen.entry.dtype, hl.tstruct(dosage=hl.tfloat64))
bgen = hl.import_bgen(resource('example.8bits.bgen'),
entry_fields=['GT', 'GP'],
sample_file=resource('example.sample'),
contig_recoding={'01': '1'},
reference_genome='GRCh37')
self.assertEqual(bgen.entry.dtype, hl.tstruct(GT=hl.tcall, GP=hl.tarray(hl.tfloat64)))
self.assertEqual(bgen.count_rows(), 199)
hl.index_bgen(resource('example.10bits.bgen'))
bgen = hl.import_bgen(resource('example.10bits.bgen'),
entry_fields=['GT', 'GP', 'dosage'],
contig_recoding={'01': '1'},
reference_genome='GRCh37')
self.assertEqual(bgen.entry.dtype, hl.tstruct(GT=hl.tcall, GP=hl.tarray(hl.tfloat64), dosage=hl.tfloat64))
self.assertEqual(bgen.locus.dtype, hl.tlocus('GRCh37'))
def test_import_bgen_no_entry_fields(self):
hl.index_bgen(resource('example.v11.bgen'))
bgen = hl.import_bgen(resource('example.v11.bgen'),
entry_fields=[],
sample_file=resource('example.sample'),
contig_recoding={'01': '1'},
reference_genome='GRCh37')
bgen._jvds.typecheck()
def test_import_bgen_no_reference_specified(self):
bgen = hl.import_bgen(resource('example.10bits.bgen'),
entry_fields=['GT', 'GP', 'dosage'],
contig_recoding={'01': '1'},
reference_genome=None)
self.assertTrue(bgen.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
self.assertEqual(bgen.count_rows(), 199)
def test_import_vcf(self):
vcf = hl.split_multi_hts(
hl.import_vcf(resource('sample2.vcf'),
reference_genome=hl.get_reference('GRCh38'),
contig_recoding={"22": "chr22"}))
vcf_table = vcf.rows()
self.assertTrue(vcf_table.all(vcf_table.locus.contig == "chr22"))
self.assertTrue(vcf.locus.dtype, hl.tlocus('GRCh37'))
def test_import_vcf_no_reference_specified(self):
vcf = hl.import_vcf(resource('sample2.vcf'),
reference_genome=None)
self.assertTrue(vcf.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
self.assertEqual(vcf.count_rows(), 735)
def test_import_vcf_bad_reference_allele(self):
vcf = hl.import_vcf(resource('invalid_base.vcf'))
self.assertEqual(vcf.count_rows(), 1)
def test_import_vcf_flags_are_defined(self):
# issue 3277
t = hl.import_vcf(resource('sample.vcf')).rows()
self.assertTrue(t.all(hl.is_defined(t.info.NEGATIVE_TRAIN_SITE) &
hl.is_defined(t.info.POSITIVE_TRAIN_SITE) &
hl.is_defined(t.info.DB) &
hl.is_defined(t.info.DS)))
def test_import_vcf_can_import_float_array_format(self):
mt = hl.import_vcf(resource('floating_point_array.vcf'))
self.assertTrue(mt.aggregate_entries(hl.agg.all(mt.numeric_array == [1.5, 2.5])))
def test_import_vcf_can_import_negative_numbers(self):
mt = hl.import_vcf(resource('negative_format_fields.vcf'))
self.assertTrue(mt.aggregate_entries(hl.agg.all(mt.negative_int == -1) &
hl.agg.all(mt.negative_float == -1.5) &
hl.agg.all(mt.negative_int_array == [-1, -2]) &
hl.agg.all(mt.negative_float_array == [-0.5, -1.5])))
def test_import_vcf_missing_info_field_elements(self):
mt = hl.import_vcf(resource('missingInfoArray.vcf'), reference_genome='GRCh37', array_elements_required=False)
mt = mt.select_rows(FOO=mt.info.FOO, BAR=mt.info.BAR)
expected = hl.Table.parallelize([{'locus': hl.Locus('X', 16050036), 'alleles': ['A', 'C'],
'FOO': [1, None], 'BAR': [2, None, None]},
{'locus': hl.Locus('X', 16061250), 'alleles': ['T', 'A', 'C'],
'FOO': [None, 2, None], 'BAR': [None, 1.0, None]}],
hl.tstruct(locus=hl.tlocus('GRCh37'), alleles=hl.tarray(hl.tstr),
FOO=hl.tarray(hl.tint), BAR=hl.tarray(hl.tfloat64)),
key=['locus', 'alleles'])
self.assertTrue(mt.rows()._same(expected))
def test_import_vcf_missing_format_field_elements(self):
mt = hl.import_vcf(resource('missingFormatArray.vcf'), reference_genome='GRCh37', array_elements_required=False)
mt = mt.select_rows().select_entries('AD', 'PL')
expected = hl.Table.parallelize([{'locus': hl.Locus('X', 16050036), 'alleles': ['A', 'C'], 's': 'C1046::HG02024',
'AD': [None, None], 'PL': [0, None, 180]},
{'locus': hl.Locus('X', 16050036), 'alleles': ['A', 'C'], 's': 'C1046::HG02025',
'AD': [None, 6], 'PL': [70, None]},
{'locus': hl.Locus('X', 16061250), 'alleles': ['T', 'A', 'C'], 's': 'C1046::HG02024',
'AD': [0, 0, None], 'PL': [396, None, None, 33, None, 0]},
{'locus': hl.Locus('X', 16061250), 'alleles': ['T', 'A', 'C'], 's': 'C1046::HG02025',
'AD': [0, 0, 9], 'PL': [None, None, None]}],
hl.tstruct(locus=hl.tlocus('GRCh37'), alleles=hl.tarray(hl.tstr), s=hl.tstr,
AD=hl.tarray(hl.tint), PL=hl.tarray(hl.tint)),
key=['locus', 'alleles', 's'])
self.assertTrue(mt.entries()._same(expected))
def test_export_import_plink_same(self):
mt = self.get_dataset()
mt = mt.select_rows(rsid=hl.delimit([mt.locus.contig, hl.str(mt.locus.position), mt.alleles[0], mt.alleles[1]], ':'),
cm_position=15.0)
mt = mt.select_cols(fam_id=hl.null(hl.tstr), pat_id=hl.null(hl.tstr), mat_id=hl.null(hl.tstr),
is_female=hl.null(hl.tbool), is_case=hl.null(hl.tbool))
mt = mt.select_entries('GT')
bfile = '/tmp/test_import_export_plink'
hl.export_plink(mt, bfile, ind_id=mt.s, cm_position=mt.cm_position)
mt_imported = hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam',
a2_reference=True, reference_genome='GRCh37')
self.assertTrue(mt._same(mt_imported))
self.assertTrue(mt.aggregate_rows(hl.agg.all(mt.cm_position == 15.0)))
def test_import_plink_empty_fam(self):
mt = self.get_dataset().drop_cols()
bfile = '/tmp/test_empty_fam'
hl.export_plink(mt, bfile, ind_id=mt.s)
with self.assertRaisesRegex(utils.FatalError, "Empty .fam file"):
hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam')
def test_import_plink_empty_bim(self):
mt = self.get_dataset().drop_rows()
bfile = '/tmp/test_empty_bim'
hl.export_plink(mt, bfile, ind_id=mt.s)
with self.assertRaisesRegex(utils.FatalError, ".bim file does not contain any variants"):
hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam')
def test_import_plink_a1_major(self):
mt = self.get_dataset()
bfile = '/tmp/sample_plink'
hl.export_plink(mt, bfile, ind_id=mt.s)
def get_data(a2_reference):
mt_imported = hl.import_plink(bfile + '.bed', bfile + '.bim',
bfile + '.fam', a2_reference=a2_reference)
return (hl.variant_qc(mt_imported)
.rows()
.key_by('rsid'))
a2 = get_data(a2_reference=True)
a1 = get_data(a2_reference=False)
j = (a2.annotate(a1_alleles=a1[a2.rsid].alleles, a1_vqc=a1[a2.rsid].variant_qc)
.rename({'variant_qc': 'a2_vqc', 'alleles': 'a2_alleles'}))
self.assertTrue(j.all((j.a1_alleles[0] == j.a2_alleles[1]) &
(j.a1_alleles[1] == j.a2_alleles[0]) &
(j.a1_vqc.n_not_called == j.a2_vqc.n_not_called) &
(j.a1_vqc.n_het == j.a2_vqc.n_het) &
(j.a1_vqc.homozygote_count[0] == j.a2_vqc.homozygote_count[1]) &
(j.a1_vqc.homozygote_count[1] == j.a2_vqc.homozygote_count[0])))
def test_import_plink_contig_recoding_w_reference(self):
vcf = hl.split_multi_hts(
hl.import_vcf(resource('sample2.vcf'),
reference_genome=hl.get_reference('GRCh38'),
contig_recoding={"22": "chr22"}))
hl.export_plink(vcf, '/tmp/sample_plink')
bfile = '/tmp/sample_plink'
plink = hl.import_plink(
bfile + '.bed', bfile + '.bim', bfile + '.fam',
a2_reference=True,
contig_recoding={'chr22': '22'},
reference_genome='GRCh37').rows()
self.assertTrue(plink.all(plink.locus.contig == "22"))
self.assertEqual(vcf.count_rows(), plink.count())
self.assertTrue(plink.locus.dtype, hl.tlocus('GRCh37'))
def test_import_plink_no_reference_specified(self):
bfile = resource('fastlmmTest')
plink = hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam',
reference_genome=None)
self.assertTrue(plink.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
def test_import_matrix_table(self):
mt = hl.import_matrix_table(doctest_resource('matrix1.tsv'),
row_fields={'Barcode': hl.tstr, 'Tissue': hl.tstr, 'Days': hl.tfloat32})
self.assertEqual(mt['Barcode']._indices, mt._row_indices)
self.assertEqual(mt['Tissue']._indices, mt._row_indices)
self.assertEqual(mt['Days']._indices, mt._row_indices)
self.assertEqual(mt['col_id']._indices, mt._col_indices)
self.assertEqual(mt['row_id']._indices, mt._row_indices)
mt.count()
row_fields = {'f0': hl.tstr, 'f1': hl.tstr, 'f2': hl.tfloat32}
hl.import_matrix_table(doctest_resource('matrix2.tsv'),
row_fields=row_fields, row_key=[]).count()
hl.import_matrix_table(doctest_resource('matrix3.tsv'),
row_fields=row_fields,
no_header=True).count()
hl.import_matrix_table(doctest_resource('matrix3.tsv'),
row_fields=row_fields,
no_header=True,
row_key=[]).count()
self.assertRaises(hl.utils.FatalError,
hl.import_matrix_table,
doctest_resource('matrix3.tsv'),
row_fields=row_fields,
no_header=True,
row_key=['foo'])
def test_de_novo(self):
mt = hl.import_vcf(resource('denovo.vcf'))
mt = mt.filter_rows(mt.locus.in_y_par(), keep=False) # de_novo_finder doesn't know about y PAR
ped = hl.Pedigree.read(resource('denovo.fam'))
r = hl.de_novo(mt, ped, mt.info.ESP)
r = r.select(
prior = r.prior,
kid_id=r.proband.s,
dad_id=r.father.s,
mom_id=r.mother.s,
p_de_novo=r.p_de_novo,
confidence=r.confidence).key_by('locus', 'alleles', 'kid_id', 'dad_id', 'mom_id')
truth = hl.import_table(resource('denovo.out'), impute=True, comment='#')
truth = truth.select(
locus=hl.locus(truth['Chr'], truth['Pos']),
alleles=[truth['Ref'], truth['Alt']],
kid_id=truth['Child_ID'],
dad_id=truth['Dad_ID'],
mom_id=truth['Mom_ID'],
p_de_novo=truth['Prob_dn'],
confidence=truth['Validation_Likelihood'].split('_')[0]).key_by('locus', 'alleles', 'kid_id', 'dad_id', 'mom_id')
j = r.join(truth, how='outer')
self.assertTrue(j.all((j.confidence == j.confidence_1) & (hl.abs(j.p_de_novo - j.p_de_novo_1) < 1e-4)))
def test_window_by_locus(self):
mt = hl.utils.range_matrix_table(100, 2, n_partitions=10)
mt = mt.annotate_rows(locus=hl.locus('1', mt.row_idx + 1))
mt = mt.key_rows_by('locus')
mt = mt.annotate_entries(e_row_idx = mt.row_idx, e_col_idx = mt.col_idx)
mt = hl.window_by_locus(mt, 5).cache()
self.assertEqual(mt.count_rows(), 100)
rows = mt.rows()
self.assertTrue(rows.all((rows.row_idx < 5) | (rows.prev_rows.length() == 5)))
self.assertTrue(rows.all(hl.all(lambda x: (rows.row_idx - 1 - x[0]) == x[1].row_idx,
hl.zip_with_index(rows.prev_rows))))
entries = mt.entries()
self.assertTrue(entries.all(hl.all(lambda x: x.e_col_idx == entries.col_idx, entries.prev_entries)))
self.assertTrue(entries.all(hl.all(lambda x: entries.row_idx - 1 - x[0] == x[1].e_row_idx,
hl.zip_with_index(entries.prev_entries))))
| [
"hail.utils.range_matrix_table",
"hail.float32",
"hail.de_novo",
"subprocess.call",
"hail.utils.uri_path",
"hail.export_gen",
"hail.realized_relationship_matrix",
"hail.locus_interval",
"hail.mendel_errors",
"hail.case",
"hail.is_nan",
"numpy.diag",
"hail.hwe_normalized_pca",
"hail.len",
... | [((3082, 3153), 'hail.identity_by_descent', 'hl.identity_by_descent', (['dataset', "dataset['dummy_maf']"], {'min': '(0.0)', 'max': '(1.0)'}), "(dataset, dataset['dummy_maf'], min=0.0, max=1.0)\n", (3104, 3153), True, 'import hail as hl\n'), ((3364, 3402), 'hail.impute_sex', 'hl.impute_sex', (['ds.GT'], {'include_par': '(True)'}), '(ds.GT, include_par=True)\n', (3377, 3402), True, 'import hail as hl\n'), ((3569, 3596), 'hail.export_vcf', 'hl.export_vcf', (['ds', 'vcf_file'], {}), '(ds, vcf_file)\n', (3582, 3596), True, 'import hail as hl\n'), ((3606, 3718), 'hail.utils.run_command', 'utils.run_command', (["['plink', '--vcf', vcf_file, '--const-fid', '--check-sex', '--silent',\n '--out', out_file]"], {}), "(['plink', '--vcf', vcf_file, '--const-fid', '--check-sex',\n '--silent', '--out', out_file])\n", (3623, 3718), True, 'import hail.utils as utils\n'), ((3763, 3870), 'hail.import_table', 'hl.import_table', (["(out_file + '.sexcheck')"], {'delimiter': '""" +"""', 'types': "{'SNPSEX': hl.tint32, 'F': hl.tfloat64}"}), "(out_file + '.sexcheck', delimiter=' +', types={'SNPSEX': hl\n .tint32, 'F': hl.tfloat64})\n", (3778, 3870), True, 'import hail as hl\n'), ((37053, 37074), 'hail.sample_qc', 'hl.sample_qc', (['dataset'], {}), '(dataset)\n', (37065, 37074), True, 'import hail as hl\n'), ((38049, 38073), 'hail.variant_qc', 'hl.variant_qc', (['mt', '"""vqc"""'], {}), "(mt, 'vqc')\n", (38062, 38073), True, 'import hail as hl\n'), ((41755, 41790), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""'}), "(prefix='plink')\n", (41774, 41790), True, 'import hail.utils as utils\n'), ((41810, 41858), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""test"""', 'suffix': '"""rel"""'}), "(prefix='test', suffix='rel')\n", (41829, 41858), True, 'import hail.utils as utils\n'), ((41881, 41932), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""test"""', 'suffix': '"""rel.id"""'}), "(prefix='test', suffix='rel.id')\n", (41900, 41932), True, 'import hail.utils as utils\n'), ((41952, 42000), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""test"""', 'suffix': '"""grm"""'}), "(prefix='test', suffix='grm')\n", (41971, 42000), True, 'import hail.utils as utils\n'), ((42024, 42076), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""test"""', 'suffix': '"""grm.bin"""'}), "(prefix='test', suffix='grm.bin')\n", (42043, 42076), True, 'import hail.utils as utils\n'), ((42101, 42155), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""test"""', 'suffix': '"""grm.N.bin"""'}), "(prefix='test', suffix='grm.N.bin')\n", (42120, 42155), True, 'import hail.utils as utils\n'), ((42590, 42640), 'hail.export_plink', 'hl.export_plink', (['dataset', 'b_file'], {'ind_id': 'dataset.s'}), '(dataset, b_file, ind_id=dataset.s)\n', (42605, 42640), True, 'import hail as hl\n'), ((42815, 42856), 'hail.genetic_relatedness_matrix', 'hl.genetic_relatedness_matrix', (['dataset.GT'], {}), '(dataset.GT)\n', (42844, 42856), True, 'import hail as hl\n'), ((42953, 42988), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""'}), "(prefix='plink')\n", (42972, 42988), True, 'import hail.utils as utils\n'), ((43589, 43624), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""'}), "(prefix='plink')\n", (43608, 43624), True, 'import hail.utils as utils\n'), ((44200, 44235), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""'}), "(prefix='plink')\n", (44219, 44235), True, 'import hail.utils as utils\n'), ((45021, 45111), 'numpy.matrix', 'np.matrix', (['[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]'], {'dtype': 'np.float64'}), '([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], dtype=\n np.float64)\n', (45030, 45111), True, 'import numpy as np\n'), ((45502, 45579), 'hail.balding_nichols_model', 'hl.balding_nichols_model', (['k', 'n1', 'm1'], {'fst': '(k * [fst])', 'seed': 'seed', 'n_partitions': '(4)'}), '(k, n1, m1, fst=k * [fst], seed=seed, n_partitions=4)\n', (45526, 45579), True, 'import hail as hl\n'), ((47367, 47403), 'hail.balding_nichols_model', 'hl.balding_nichols_model', (['(3)', '(100)', '(50)'], {}), '(3, 100, 50)\n', (47391, 47403), True, 'import hail as hl\n'), ((47444, 47500), 'hail.hwe_normalized_pca', 'hl.hwe_normalized_pca', (['mt.GT'], {'k': '(2)', 'compute_loadings': '(True)'}), '(mt.GT, k=2, compute_loadings=True)\n', (47465, 47500), True, 'import hail as hl\n'), ((47730, 47787), 'hail.hwe_normalized_pca', 'hl.hwe_normalized_pca', (['mt.GT'], {'k': '(2)', 'compute_loadings': '(False)'}), '(mt.GT, k=2, compute_loadings=False)\n', (47751, 47787), True, 'import hail as hl\n'), ((49207, 49240), 'numpy.linalg.svd', 'np.linalg.svd', (['n'], {'full_matrices': '(0)'}), '(n, full_matrices=0)\n', (49220, 49240), True, 'import numpy as np\n'), ((50499, 50564), 'hail.import_table', 'hl.import_table', (["(plink_file + '.out')"], {'delimiter': '""" +"""', 'types': 'types'}), "(plink_file + '.out', delimiter=' +', types=types)\n", (50514, 50564), True, 'import hail as hl\n'), ((51083, 51121), 'hail.balding_nichols_model', 'hl.balding_nichols_model', (['(3)', '(100)', '(1000)'], {}), '(3, 100, 1000)\n', (51107, 51121), True, 'import hail as hl\n'), ((51749, 51785), 'hail.balding_nichols_model', 'hl.balding_nichols_model', (['(3)', '(50)', '(100)'], {}), '(3, 50, 100)\n', (51773, 51785), True, 'import hail as hl\n'), ((51810, 51867), 'hail.hwe_normalized_pca', 'hl.hwe_normalized_pca', (['mt.GT'], {'k': '(2)', 'compute_loadings': '(False)'}), '(mt.GT, k=2, compute_loadings=False)\n', (51831, 51867), True, 'import hail as hl\n'), ((51892, 51949), 'hail.hwe_normalized_pca', 'hl.hwe_normalized_pca', (['mt.GT'], {'k': '(3)', 'compute_loadings': '(False)'}), '(mt.GT, k=3, compute_loadings=False)\n', (51913, 51949), True, 'import hail as hl\n'), ((51966, 52028), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.1)'], {'k': '(2)', 'statistics': '"""kin"""', 'block_size': '(64)'}), "(mt.GT, 0.1, k=2, statistics='kin', block_size=64)\n", (51978, 52028), True, 'import hail as hl\n'), ((52047, 52149), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.1)'], {'scores_expr': 'scores2[mt.col_key].scores', 'statistics': '"""kin"""', 'block_size': '(32)'}), "(mt.GT, 0.1, scores_expr=scores2[mt.col_key].scores, statistics\n ='kin', block_size=32)\n", (52059, 52149), True, 'import hail as hl\n'), ((52301, 52422), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.05)'], {'scores_expr': 'scores2[mt.col_key].scores', 'min_kinship': '(0.01)', 'statistics': '"""kin2"""', 'block_size': '(16)'}), "(mt.GT, 0.05, scores_expr=scores2[mt.col_key].scores,\n min_kinship=0.01, statistics='kin2', block_size=16)\n", (52313, 52422), True, 'import hail as hl\n'), ((52573, 52694), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.02)'], {'scores_expr': 'scores3[mt.col_key].scores', 'min_kinship': '(0.1)', 'statistics': '"""kin20"""', 'block_size': '(32)'}), "(mt.GT, 0.02, scores_expr=scores3[mt.col_key].scores,\n min_kinship=0.1, statistics='kin20', block_size=32)\n", (52585, 52694), True, 'import hail as hl\n'), ((52737, 52801), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.01)'], {'k': '(3)', 'statistics': '"""all"""', 'block_size': '(128)'}), "(mt.GT, 0.01, k=3, statistics='all', block_size=128)\n", (52749, 52801), True, 'import hail as hl\n'), ((52819, 52921), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.01)'], {'scores_expr': 'scores3[mt.col_key].scores', 'statistics': '"""all"""', 'block_size': '(16)'}), "(mt.GT, 0.01, scores_expr=scores3[mt.col_key].scores,\n statistics='all', block_size=16)\n", (52831, 52921), True, 'import hail as hl\n'), ((53805, 53828), 'hail.split_multi_hts', 'hl.split_multi_hts', (['ds1'], {}), '(ds1)\n', (53823, 53828), True, 'import hail as hl\n'), ((54245, 54276), 'hail.mendel_errors', 'hl.mendel_errors', (["mt['GT']", 'ped'], {}), "(mt['GT'], ped)\n", (54261, 54276), True, 'import hail as hl\n'), ((58549, 58581), 'hail.mendel_errors', 'hl.mendel_errors', (["mt['GT']", 'ped2'], {}), "(mt['GT'], ped2)\n", (58565, 58581), True, 'import hail as hl\n'), ((58844, 58908), 'hail.export_vcf', 'hl.export_vcf', (['dataset', '"""/tmp/sample.vcf"""'], {'metadata': 'vcf_metadata'}), "(dataset, '/tmp/sample.vcf', metadata=vcf_metadata)\n", (58857, 58908), True, 'import hail as hl\n'), ((58936, 58968), 'hail.import_vcf', 'hl.import_vcf', (['"""/tmp/sample.vcf"""'], {}), "('/tmp/sample.vcf')\n", (58949, 58968), True, 'import hail as hl\n'), ((59055, 59093), 'hail.get_vcf_metadata', 'hl.get_vcf_metadata', (['"""/tmp/sample.vcf"""'], {}), "('/tmp/sample.vcf')\n", (59074, 59093), True, 'import hail as hl\n'), ((59268, 59300), 'hail.concordance', 'hl.concordance', (['dataset', 'dataset'], {}), '(dataset, dataset)\n', (59282, 59300), True, 'import hail as hl\n'), ((60713, 60747), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'suffix': '""".bgz"""'}), "(suffix='.bgz')\n", (60732, 60747), True, 'import hail.utils as utils\n'), ((60760, 60784), 'hail.utils.range_table', 'utils.range_table', (['(10)', '(5)'], {}), '(10, 5)\n', (60777, 60784), True, 'import hail.utils as utils\n'), ((60819, 60852), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'suffix': '""".gz"""'}), "(suffix='.gz')\n", (60838, 60852), True, 'import hail.utils as utils\n'), ((61159, 61226), 'hail.import_locus_intervals', 'hl.import_locus_intervals', (['interval_file'], {'reference_genome': '"""GRCh37"""'}), "(interval_file, reference_genome='GRCh37')\n", (61184, 61226), True, 'import hail as hl\n'), ((61532, 61590), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""test"""', 'suffix': '"""interval_list"""'}), "(prefix='test', suffix='interval_list')\n", (61551, 61590), True, 'import hail.utils as utils\n'), ((61840, 61875), 'hail.import_locus_intervals', 'hl.import_locus_intervals', (['tmp_file'], {}), '(tmp_file)\n', (61865, 61875), True, 'import hail as hl\n'), ((62066, 62129), 'hail.import_locus_intervals', 'hl.import_locus_intervals', (['interval_file'], {'reference_genome': 'None'}), '(interval_file, reference_genome=None)\n', (62091, 62129), True, 'import hail as hl\n'), ((62411, 62511), 'hail.import_locus_intervals', 'hl.import_locus_intervals', (['interval_file'], {'reference_genome': '"""GRCh37"""', 'skip_invalid_intervals': '(True)'}), "(interval_file, reference_genome='GRCh37',\n skip_invalid_intervals=True)\n", (62436, 62511), True, 'import hail as hl\n'), ((62562, 62658), 'hail.import_locus_intervals', 'hl.import_locus_intervals', (['interval_file'], {'reference_genome': 'None', 'skip_invalid_intervals': '(True)'}), '(interval_file, reference_genome=None,\n skip_invalid_intervals=True)\n', (62587, 62658), True, 'import hail as hl\n'), ((62786, 62836), 'hail.import_bed', 'hl.import_bed', (['bed_file'], {'reference_genome': '"""GRCh37"""'}), "(bed_file, reference_genome='GRCh37')\n", (62799, 62836), True, 'import hail as hl\n'), ((63310, 63360), 'hail.import_bed', 'hl.import_bed', (['bed_file'], {'reference_genome': '"""GRCh37"""'}), "(bed_file, reference_genome='GRCh37')\n", (63323, 63360), True, 'import hail as hl\n'), ((63674, 63720), 'hail.import_bed', 'hl.import_bed', (['bed_file'], {'reference_genome': 'None'}), '(bed_file, reference_genome=None)\n', (63687, 63720), True, 'import hail as hl\n'), ((63975, 64054), 'hail.import_bed', 'hl.import_bed', (['bed_file'], {'reference_genome': '"""GRCh37"""', 'skip_invalid_intervals': '(True)'}), "(bed_file, reference_genome='GRCh37', skip_invalid_intervals=True)\n", (63988, 64054), True, 'import hail as hl\n'), ((64108, 64183), 'hail.import_bed', 'hl.import_bed', (['bed_file'], {'reference_genome': 'None', 'skip_invalid_intervals': '(True)'}), '(bed_file, reference_genome=None, skip_invalid_intervals=True)\n', (64121, 64183), True, 'import hail as hl\n'), ((66760, 66793), 'hail.export_vcf', 'hl.export_vcf', (['mt', 'split_vcf_file'], {}), '(mt, split_vcf_file)\n', (66773, 66793), True, 'import hail as hl\n'), ((66802, 66832), 'hail.export_plink', 'hl.export_plink', (['mt', 'hl_output'], {}), '(mt, hl_output)\n', (66817, 66832), True, 'import hail as hl\n'), ((66842, 66974), 'hail.utils.run_command', 'utils.run_command', (["['plink', '--vcf', split_vcf_file, '--make-bed', '--out', plink_output,\n '--const-fid', '--keep-allele-order']"], {}), "(['plink', '--vcf', split_vcf_file, '--make-bed', '--out',\n plink_output, '--const-fid', '--keep-allele-order'])\n", (66859, 66974), True, 'import hail.utils as utils\n'), ((67395, 67519), 'hail.utils.run_command', 'utils.run_command', (["['plink', '--bfile', plink_output, '--bmerge', hl_output, '--merge-mode',\n '6', '--out', merge_output]"], {}), "(['plink', '--bfile', plink_output, '--bmerge', hl_output,\n '--merge-mode', '6', '--out', merge_output])\n", (67412, 67519), True, 'import hail.utils as utils\n'), ((68267, 68288), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (68286, 68288), True, 'import hail.utils as utils\n'), ((68297, 68322), 'hail.export_plink', 'hl.export_plink', (['ds', 'out1'], {}), '(ds, out1)\n', (68312, 68322), True, 'import hail as hl\n'), ((69020, 69041), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (69039, 69041), True, 'import hail.utils as utils\n'), ((69050, 69165), 'hail.export_plink', 'hl.export_plink', (['ds', 'out2'], {'ind_id': 'ds.s', 'fam_id': 'ds.s', 'pat_id': '"""nope"""', 'mat_id': '"""nada"""', 'is_female': '(True)', 'pheno': '(False)'}), "(ds, out2, ind_id=ds.s, fam_id=ds.s, pat_id='nope', mat_id=\n 'nada', is_female=True, pheno=False)\n", (69065, 69165), True, 'import hail as hl\n'), ((69597, 69618), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (69616, 69618), True, 'import hail.utils as utils\n'), ((70122, 70143), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (70141, 70143), True, 'import hail.utils as utils\n'), ((70152, 70209), 'hail.export_plink', 'hl.export_plink', (['ds', 'out4'], {'varid': '"""hello"""', 'cm_position': '(100)'}), "(ds, out4, varid='hello', cm_position=100)\n", (70167, 70209), True, 'import hail as hl\n'), ((70458, 70479), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (70477, 70479), True, 'import hail.utils as utils\n'), ((70549, 70601), 'hail.export_plink', 'hl.export_plink', (['ds_call', 'out5'], {'call': 'ds_call.gt_fake'}), '(ds_call, out5, call=ds_call.gt_fake)\n', (70564, 70601), True, 'import hail as hl\n'), ((70627, 70687), 'hail.import_plink', 'hl.import_plink', (["(out5 + '.bed')", "(out5 + '.bim')", "(out5 + '.fam')"], {}), "(out5 + '.bed', out5 + '.bim', out5 + '.fam')\n", (70642, 70687), True, 'import hail as hl\n'), ((71618, 71642), 'hail.export_gen', 'hl.export_gen', (['gen', 'file'], {}), '(gen, file)\n', (71631, 71642), True, 'import hail as hl\n'), ((71658, 71766), 'hail.import_gen', 'hl.import_gen', (["(file + '.gen')"], {'sample_file': "(file + '.sample')", 'reference_genome': '"""GRCh37"""', 'min_partitions': '(3)'}), "(file + '.gen', sample_file=file + '.sample', reference_genome\n ='GRCh37', min_partitions=3)\n", (71671, 71766), True, 'import hail as hl\n'), ((72287, 72308), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (72306, 72308), True, 'import hail.utils as utils\n'), ((74492, 74516), 'hail.utils.range_table', 'hl.utils.range_table', (['(10)'], {}), '(10)\n', (74512, 74516), True, 'import hail as hl\n'), ((74634, 74704), 'hail.maximal_independent_set', 'hl.maximal_independent_set', (['graph.i', 'graph.j', '(True)', '(lambda l, r: l - r)'], {}), '(graph.i, graph.j, True, lambda l, r: l - r)\n', (74660, 74704), True, 'import hail as hl\n'), ((75629, 75665), 'hail.maximal_independent_set', 'hl.maximal_independent_set', (['t.i', 't.j'], {}), '(t.i, t.j)\n', (75655, 75665), True, 'import hail as hl\n'), ((76785, 76845), 'hail.maximal_independent_set', 'hl.maximal_independent_set', (['t.i', 't.j'], {'tie_breaker': 'tiebreaker'}), '(t.i, t.j, tie_breaker=tiebreaker)\n', (76811, 76845), True, 'import hail as hl\n'), ((78985, 79035), 'hail.ld_prune', 'hl.ld_prune', (['ds.GT'], {'r2': '(0.2)', 'bp_window_size': '(1000000)'}), '(ds.GT, r2=0.2, bp_window_size=1000000)\n', (78996, 79035), True, 'import hail as hl\n'), ((79694, 79760), 'hail.linalg.BlockMatrix.from_entry_expr', 'BlockMatrix.from_entry_expr', (['normalized_mean_imputed_genotype_expr'], {}), '(normalized_mean_imputed_genotype_expr)\n', (79721, 79760), False, 'from hail.linalg import BlockMatrix\n'), ((80679, 80750), 'hail.balding_nichols_model', 'hl.balding_nichols_model', ([], {'n_populations': '(1)', 'n_samples': '(10)', 'n_variants': '(100)'}), '(n_populations=1, n_samples=10, n_variants=100)\n', (80703, 80750), True, 'import hail as hl\n'), ((80774, 80818), 'hail.ld_prune', 'hl.ld_prune', (['ds.GT'], {'r2': '(0.1)', 'bp_window_size': '(0)'}), '(ds.GT, r2=0.1, bp_window_size=0)\n', (80785, 80818), True, 'import hail as hl\n'), ((81127, 81145), 'hail.ld_prune', 'hl.ld_prune', (['ds.GT'], {}), '(ds.GT)\n', (81138, 81145), True, 'import hail as hl\n'), ((81522, 81545), 'hail.ld_prune', 'hl.ld_prune', (['ds.GT', '(0.0)'], {}), '(ds.GT, 0.0)\n', (81533, 81545), True, 'import hail as hl\n'), ((81672, 81696), 'hail.literal', 'hl.literal', (['positions[0]'], {}), '(positions[0])\n', (81682, 81696), True, 'import hail as hl\n'), ((82020, 82039), 'hail.ld_prune', 'hl.ld_prune', (['ds.foo'], {}), '(ds.foo)\n', (82031, 82039), True, 'import hail as hl\n'), ((82269, 82324), 'hail.tstruct', 'hl.tstruct', ([], {'i': 'hl.tint32', 'j': 'hl.tint32', 'entry': 'hl.tfloat64'}), '(i=hl.tint32, j=hl.tint32, entry=hl.tfloat64)\n', (82279, 82324), True, 'import hail as hl\n'), ((86468, 86491), 'hail.split_multi_hts', 'hl.split_multi_hts', (['ds2'], {}), '(ds2)\n', (86486, 86491), True, 'import hail as hl\n'), ((95888, 95955), 'hail.export_plink', 'hl.export_plink', (['mt', 'bfile'], {'ind_id': 'mt.s', 'cm_position': 'mt.cm_position'}), '(mt, bfile, ind_id=mt.s, cm_position=mt.cm_position)\n', (95903, 95955), True, 'import hail as hl\n'), ((95979, 96092), 'hail.import_plink', 'hl.import_plink', (["(bfile + '.bed')", "(bfile + '.bim')", "(bfile + '.fam')"], {'a2_reference': '(True)', 'reference_genome': '"""GRCh37"""'}), "(bfile + '.bed', bfile + '.bim', bfile + '.fam',\n a2_reference=True, reference_genome='GRCh37')\n", (95994, 96092), True, 'import hail as hl\n'), ((96387, 96426), 'hail.export_plink', 'hl.export_plink', (['mt', 'bfile'], {'ind_id': 'mt.s'}), '(mt, bfile, ind_id=mt.s)\n', (96402, 96426), True, 'import hail as hl\n'), ((96711, 96750), 'hail.export_plink', 'hl.export_plink', (['mt', 'bfile'], {'ind_id': 'mt.s'}), '(mt, bfile, ind_id=mt.s)\n', (96726, 96750), True, 'import hail as hl\n'), ((97044, 97083), 'hail.export_plink', 'hl.export_plink', (['mt', 'bfile'], {'ind_id': 'mt.s'}), '(mt, bfile, ind_id=mt.s)\n', (97059, 97083), True, 'import hail as hl\n'), ((98402, 98443), 'hail.export_plink', 'hl.export_plink', (['vcf', '"""/tmp/sample_plink"""'], {}), "(vcf, '/tmp/sample_plink')\n", (98417, 98443), True, 'import hail as hl\n'), ((98994, 99084), 'hail.import_plink', 'hl.import_plink', (["(bfile + '.bed')", "(bfile + '.bim')", "(bfile + '.fam')"], {'reference_genome': 'None'}), "(bfile + '.bed', bfile + '.bim', bfile + '.fam',\n reference_genome=None)\n", (99009, 99084), True, 'import hail as hl\n'), ((100889, 100921), 'hail.de_novo', 'hl.de_novo', (['mt', 'ped', 'mt.info.ESP'], {}), '(mt, ped, mt.info.ESP)\n', (100899, 100921), True, 'import hail as hl\n'), ((101893, 101945), 'hail.utils.range_matrix_table', 'hl.utils.range_matrix_table', (['(100)', '(2)'], {'n_partitions': '(10)'}), '(100, 2, n_partitions=10)\n', (101920, 101945), True, 'import hail as hl\n'), ((772, 821), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""', 'suffix': '"""vcf"""'}), "(prefix='plink', suffix='vcf')\n", (791, 821), True, 'import hail.utils as utils\n'), ((846, 881), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""'}), "(prefix='plink')\n", (865, 881), True, 'import hail.utils as utils\n'), ((894, 916), 'hail.export_vcf', 'hl.export_vcf', (['ds', 'vcf'], {}), '(ds, vcf)\n', (907, 916), True, 'import hail as hl\n'), ((1355, 1392), 'hail.utils.uri_path', 'utils.uri_path', (["(plinkpath + '.genome')"], {}), "(plinkpath + '.genome')\n", (1369, 1392), True, 'import hail.utils as utils\n'), ((1406, 1472), 'subprocess.call', 'syscall', (['plink_command'], {'shell': '(True)', 'stdout': 'DEVNULL', 'stderr': 'DEVNULL'}), '(plink_command, shell=True, stdout=DEVNULL, stderr=DEVNULL)\n', (1413, 1472), True, 'from subprocess import DEVNULL, call as syscall\n'), ((3194, 3226), 'hail.float32', 'hl.float32', (["dataset['dummy_maf']"], {}), "(dataset['dummy_maf'])\n", (3204, 3226), True, 'import hail as hl\n'), ((3438, 3487), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""', 'suffix': '"""vcf"""'}), "(prefix='plink', suffix='vcf')\n", (3457, 3487), True, 'import hail.utils as utils\n'), ((3523, 3558), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'prefix': '"""plink"""'}), "(prefix='plink')\n", (3542, 3558), True, 'import hail.utils as utils\n'), ((7910, 7945), 'numpy.isnan', 'np.isnan', (['results[6].standard_error'], {}), '(results[6].standard_error)\n', (7918, 7945), True, 'import numpy as np\n'), ((7971, 7998), 'numpy.isnan', 'np.isnan', (['results[6].t_stat'], {}), '(results[6].t_stat)\n', (7979, 7998), True, 'import numpy as np\n'), ((8024, 8052), 'numpy.isnan', 'np.isnan', (['results[6].p_value'], {}), '(results[6].p_value)\n', (8032, 8052), True, 'import numpy as np\n'), ((8079, 8114), 'numpy.isnan', 'np.isnan', (['results[7].standard_error'], {}), '(results[7].standard_error)\n', (8087, 8114), True, 'import numpy as np\n'), ((8140, 8175), 'numpy.isnan', 'np.isnan', (['results[8].standard_error'], {}), '(results[8].standard_error)\n', (8148, 8175), True, 'import numpy as np\n'), ((8201, 8236), 'numpy.isnan', 'np.isnan', (['results[9].standard_error'], {}), '(results[9].standard_error)\n', (8209, 8236), True, 'import numpy as np\n'), ((8262, 8298), 'numpy.isnan', 'np.isnan', (['results[10].standard_error'], {}), '(results[10].standard_error)\n', (8270, 8298), True, 'import numpy as np\n'), ((11790, 11825), 'numpy.isnan', 'np.isnan', (['results[6].standard_error'], {}), '(results[6].standard_error)\n', (11798, 11825), True, 'import numpy as np\n'), ((12974, 13009), 'numpy.isnan', 'np.isnan', (['results[6].standard_error'], {}), '(results[6].standard_error)\n', (12982, 13009), True, 'import numpy as np\n'), ((13035, 13070), 'numpy.isnan', 'np.isnan', (['results[7].standard_error'], {}), '(results[7].standard_error)\n', (13043, 13070), True, 'import numpy as np\n'), ((13096, 13131), 'numpy.isnan', 'np.isnan', (['results[8].standard_error'], {}), '(results[8].standard_error)\n', (13104, 13131), True, 'import numpy as np\n'), ((13157, 13192), 'numpy.isnan', 'np.isnan', (['results[9].standard_error'], {}), '(results[9].standard_error)\n', (13165, 13192), True, 'import numpy as np\n'), ((13218, 13254), 'numpy.isnan', 'np.isnan', (['results[10].standard_error'], {}), '(results[10].standard_error)\n', (13226, 13254), True, 'import numpy as np\n'), ((14546, 14581), 'numpy.isnan', 'np.isnan', (['results[6].standard_error'], {}), '(results[6].standard_error)\n', (14554, 14581), True, 'import numpy as np\n'), ((14607, 14642), 'numpy.isnan', 'np.isnan', (['results[7].standard_error'], {}), '(results[7].standard_error)\n', (14615, 14642), True, 'import numpy as np\n'), ((14668, 14703), 'numpy.isnan', 'np.isnan', (['results[8].standard_error'], {}), '(results[8].standard_error)\n', (14676, 14703), True, 'import numpy as np\n'), ((14729, 14764), 'numpy.isnan', 'np.isnan', (['results[9].standard_error'], {}), '(results[9].standard_error)\n', (14737, 14764), True, 'import numpy as np\n'), ((14790, 14826), 'numpy.isnan', 'np.isnan', (['results[10].standard_error'], {}), '(results[10].standard_error)\n', (14798, 14826), True, 'import numpy as np\n'), ((16207, 16242), 'numpy.isnan', 'np.isnan', (['results[6].standard_error'], {}), '(results[6].standard_error)\n', (16215, 16242), True, 'import numpy as np\n'), ((16268, 16303), 'numpy.isnan', 'np.isnan', (['results[7].standard_error'], {}), '(results[7].standard_error)\n', (16276, 16303), True, 'import numpy as np\n'), ((16329, 16364), 'numpy.isnan', 'np.isnan', (['results[8].standard_error'], {}), '(results[8].standard_error)\n', (16337, 16364), True, 'import numpy as np\n'), ((16390, 16425), 'numpy.isnan', 'np.isnan', (['results[9].standard_error'], {}), '(results[9].standard_error)\n', (16398, 16425), True, 'import numpy as np\n'), ((16451, 16487), 'numpy.isnan', 'np.isnan', (['results[10].standard_error'], {}), '(results[10].standard_error)\n', (16459, 16487), True, 'import numpy as np\n'), ((34000, 34024), 'hail.is_defined', 'hl.is_defined', (['ht.pat_id'], {}), '(ht.pat_id)\n', (34013, 34024), True, 'import hail as hl\n'), ((34122, 34146), 'hail.is_defined', 'hl.is_defined', (['ht.mat_id'], {}), '(ht.mat_id)\n', (34135, 34146), True, 'import hail as hl\n'), ((36767, 36796), 'hail.is_defined', 'hl.is_defined', (['t_cols.data.sa'], {}), '(t_cols.data.sa)\n', (36780, 36796), True, 'import hail as hl\n'), ((37834, 37896), 'hail.dtype', 'hl.dtype', (['"""struct{v: str, s: str, GT: call, GQ: int, DP: int}"""'], {}), "('struct{v: str, s: str, GT: call, GQ: int, DP: int}')\n", (37842, 37896), True, 'import hail as hl\n'), ((40505, 40523), 'numpy.zeros', 'np.zeros', (['(ns, ns)'], {}), '((ns, ns))\n', (40513, 40523), True, 'import numpy as np\n'), ((40889, 40907), 'numpy.zeros', 'np.zeros', (['(ns, ns)'], {}), '((ns, ns))\n', (40897, 40907), True, 'import numpy as np\n'), ((41332, 41350), 'numpy.zeros', 'np.zeros', (['(ns, ns)'], {}), '((ns, ns))\n', (41340, 41350), True, 'import numpy as np\n'), ((45178, 45221), 'hail.linalg.BlockMatrix.from_numpy', 'BlockMatrix.from_numpy', (['ndarray', 'block_size'], {}), '(ndarray, block_size)\n', (45200, 45221), False, 'from hail.linalg import BlockMatrix\n'), ((46787, 46825), 'hail.realized_relationship_matrix', 'hl.realized_relationship_matrix', (['ds.GT'], {}), '(ds.GT)\n', (46818, 46825), True, 'import hail as hl\n'), ((46843, 46877), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {'suffix': '""".tsv"""'}), "(suffix='.tsv')\n", (46862, 46877), True, 'import hail.utils as utils\n'), ((47162, 47176), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (47170, 47176), True, 'import numpy as np\n'), ((47288, 47312), 'numpy.allclose', 'np.allclose', (['manual', 'rrm'], {}), '(manual, rrm)\n', (47299, 47312), True, 'import numpy as np\n'), ((48487, 48526), 'hail.bind', 'hl.bind', (['make_expr', '(mt.AC / mt.n_called)'], {}), '(make_expr, mt.AC / mt.n_called)\n', (48494, 48526), True, 'import hail as hl\n'), ((48919, 48952), 'numpy.mean', 'np.mean', (['a'], {'axis': '(0)', 'keepdims': '(True)'}), '(a, axis=0, keepdims=True)\n', (48926, 48952), True, 'import numpy as np\n'), ((49081, 49101), 'numpy.diag', 'np.diag', (['[1.0, 1, 2]'], {}), '([1.0, 1, 2])\n', (49088, 49101), True, 'import numpy as np\n'), ((49959, 49980), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (49978, 49980), True, 'import hail.utils as utils\n'), ((54887, 54929), 'hail.tstruct', 'hl.tstruct', ([], {'pat_id': 'hl.tstr', 'mat_id': 'hl.tstr'}), '(pat_id=hl.tstr, mat_id=hl.tstr)\n', (54897, 54929), True, 'import hail as hl\n'), ((55022, 55142), 'hail.tstruct', 'hl.tstruct', ([], {'pat_id': 'hl.tstr', 'mat_id': 'hl.tstr', 'fam_id': 'hl.tstr', 'children': 'hl.tint', 'errors': 'hl.tint64', 'snp_errors': 'hl.tint64'}), '(pat_id=hl.tstr, mat_id=hl.tstr, fam_id=hl.tstr, children=hl.tint,\n errors=hl.tint64, snp_errors=hl.tint64)\n', (55032, 55142), True, 'import hail as hl\n'), ((55435, 55456), 'hail.tstruct', 'hl.tstruct', ([], {'s': 'hl.tstr'}), '(s=hl.tstr)\n', (55445, 55456), True, 'import hail as hl\n'), ((55498, 55575), 'hail.tstruct', 'hl.tstruct', ([], {'s': 'hl.tstr', 'fam_id': 'hl.tstr', 'errors': 'hl.tint64', 'snp_errors': 'hl.tint64'}), '(s=hl.tstr, fam_id=hl.tstr, errors=hl.tint64, snp_errors=hl.tint64)\n', (55508, 55575), True, 'import hail as hl\n'), ((61491, 61510), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (61500, 61510), True, 'import hail as hl\n'), ((62224, 62270), 'hail.tstruct', 'hl.tstruct', ([], {'contig': 'hl.tstr', 'position': 'hl.tint32'}), '(contig=hl.tstr, position=hl.tint32)\n', (62234, 62270), True, 'import hail as hl\n'), ((63232, 63251), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (63241, 63251), True, 'import hail as hl\n'), ((63415, 63434), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (63424, 63434), True, 'import hail as hl\n'), ((63815, 63861), 'hail.tstruct', 'hl.tstruct', ([], {'contig': 'hl.tstr', 'position': 'hl.tint32'}), '(contig=hl.tstr, position=hl.tint32)\n', (63825, 63861), True, 'import hail as hl\n'), ((66463, 66505), 'hail.import_vcf', 'hl.import_vcf', (['vcf_file'], {'min_partitions': '(10)'}), '(vcf_file, min_partitions=10)\n', (66476, 66505), True, 'import hail as hl\n'), ((66548, 66569), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (66567, 66569), True, 'import hail.utils as utils\n'), ((66606, 66627), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (66625, 66627), True, 'import hail.utils as utils\n'), ((66667, 66688), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (66686, 66688), True, 'import hail.utils as utils\n'), ((66728, 66749), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (66747, 66749), True, 'import hail.utils as utils\n'), ((74868, 74894), 'hail.tstruct', 'hl.tstruct', ([], {'node': 'hl.tint64'}), '(node=hl.tint64)\n', (74878, 74894), True, 'import hail as hl\n'), ((74942, 74968), 'hail.tstruct', 'hl.tstruct', ([], {'node': 'hl.tint64'}), '(node=hl.tint64)\n', (74952, 74968), True, 'import hail as hl\n'), ((75575, 75611), 'hail.tstruct', 'hl.tstruct', ([], {'i': 'hl.tint64', 'j': 'hl.tint64'}), '(i=hl.tint64, j=hl.tint64)\n', (75585, 75611), True, 'import hail as hl\n'), ((77311, 77330), 'hail.import_vcf', 'hl.import_vcf', (['path'], {}), '(path)\n', (77324, 77330), True, 'import hail as hl\n'), ((79075, 79124), 'hail.is_defined', 'hl.is_defined', (['pruned_table[ds.locus, ds.alleles]'], {}), '(pruned_table[ds.locus, ds.alleles])\n', (79088, 79124), True, 'import hail as hl\n'), ((79475, 79507), 'hail.is_defined', 'hl.is_defined', (["filtered_ds['GT']"], {}), "(filtered_ds['GT'])\n", (79488, 79507), True, 'import hail as hl\n'), ((80130, 80189), 'hail.abs', 'hl.abs', (['(entries.locus_i.position - entries.locus_j.position)'], {}), '(entries.locus_i.position - entries.locus_j.position)\n', (80136, 80189), True, 'import hail as hl\n'), ((82723, 82766), 'hail.linalg.BlockMatrix.from_numpy', 'BlockMatrix.from_numpy', (['ndarray', 'block_size'], {}), '(ndarray, block_size)\n', (82745, 82766), False, 'from hail.linalg import BlockMatrix\n'), ((83260, 83307), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""20:10639222-10644700"""'], {}), "('20:10639222-10644700')\n", (83283, 83307), True, 'import hail as hl\n'), ((83330, 83377), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""20:10644700-10644705"""'], {}), "('20:10644700-10644705')\n", (83353, 83377), True, 'import hail as hl\n'), ((85666, 85736), 'hail.Struct', 'hl.Struct', ([], {'type': '"""TruncatedBetaDist"""', 'a': '(0.01)', 'b': '(2.0)', 'min': '(0.05)', 'max': '(0.95)'}), "(type='TruncatedBetaDist', a=0.01, b=2.0, min=0.05, max=0.95)\n", (85675, 85736), True, 'import hail as hl\n'), ((87925, 87944), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (87934, 87944), True, 'import hail as hl\n'), ((89185, 89215), 'hail.tstruct', 'hl.tstruct', ([], {'dosage': 'hl.tfloat64'}), '(dosage=hl.tfloat64)\n', (89195, 89215), True, 'import hail as hl\n'), ((90127, 90146), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (90136, 90146), True, 'import hail as hl\n'), ((91408, 91427), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (91417, 91427), True, 'import hail as hl\n'), ((96513, 96576), 'hail.import_plink', 'hl.import_plink', (["(bfile + '.bed')", "(bfile + '.bim')", "(bfile + '.fam')"], {}), "(bfile + '.bed', bfile + '.bim', bfile + '.fam')\n", (96528, 96576), True, 'import hail as hl\n'), ((96861, 96924), 'hail.import_plink', 'hl.import_plink', (["(bfile + '.bed')", "(bfile + '.bim')", "(bfile + '.fam')"], {}), "(bfile + '.bed', bfile + '.bim', bfile + '.fam')\n", (96876, 96924), True, 'import hail as hl\n'), ((97147, 97241), 'hail.import_plink', 'hl.import_plink', (["(bfile + '.bed')", "(bfile + '.bim')", "(bfile + '.fam')"], {'a2_reference': 'a2_reference'}), "(bfile + '.bed', bfile + '.bim', bfile + '.fam',\n a2_reference=a2_reference)\n", (97162, 97241), True, 'import hail as hl\n'), ((98860, 98879), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (98869, 98879), True, 'import hail as hl\n'), ((1215, 1234), 'hail.utils.uri_path', 'utils.uri_path', (['vcf'], {}), '(vcf)\n', (1229, 1234), True, 'import hail.utils as utils\n'), ((1260, 1285), 'hail.utils.uri_path', 'utils.uri_path', (['plinkpath'], {}), '(plinkpath)\n', (1274, 1285), True, 'import hail.utils as utils\n'), ((4664, 4695), 'hail.impute_sex', 'hl.impute_sex', (['ds.GT'], {'aaf': '"""aaf"""'}), "(ds.GT, aaf='aaf')\n", (4677, 4695), True, 'import hail as hl\n'), ((5495, 5574), 'hail.linear_regression', 'hl.linear_regression', ([], {'y': 'mt.pheno', 'x': 'mt.x', 'covariates': '[mt.cov.Cov1, mt.cov.Cov2]'}), '(y=mt.pheno, x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2])\n', (5515, 5574), True, 'import hail as hl\n'), ((5653, 5739), 'hail.linear_regression', 'hl.linear_regression', ([], {'y': '[mt.pheno]', 'x': 'mt.x', 'covariates': '[mt.cov.Cov1, mt.cov.Cov2]'}), '(y=[mt.pheno], x=mt.x, covariates=[mt.cov.Cov1, mt.cov.\n Cov2])\n', (5673, 5739), True, 'import hail as hl\n'), ((5816, 5912), 'hail.linear_regression', 'hl.linear_regression', ([], {'y': '[mt.pheno, mt.pheno]', 'x': 'mt.x', 'covariates': '[mt.cov.Cov1, mt.cov.Cov2]'}), '(y=[mt.pheno, mt.pheno], x=mt.x, covariates=[mt.cov.\n Cov1, mt.cov.Cov2])\n', (5836, 5912), True, 'import hail as hl\n'), ((6952, 6998), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.linreg)'], {}), '((mt.locus.position, mt.linreg))\n', (6966, 6998), True, 'import hail as hl\n'), ((8933, 8952), 'hail.pl_dosage', 'hl.pl_dosage', (['mt.PL'], {}), '(mt.PL)\n', (8945, 8952), True, 'import hail as hl\n'), ((9074, 9120), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.linreg)'], {}), '((mt.locus.position, mt.linreg))\n', (9088, 9120), True, 'import hail as hl\n'), ((10692, 10711), 'hail.gp_dosage', 'hl.gp_dosage', (['mt.GP'], {}), '(mt.GP)\n', (10704, 10711), True, 'import hail as hl\n'), ((10833, 10879), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.linreg)'], {}), '((mt.locus.position, mt.linreg))\n', (10847, 10879), True, 'import hail as hl\n'), ((12315, 12361), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.linreg)'], {}), '((mt.locus.position, mt.linreg))\n', (12329, 12361), True, 'import hail as hl\n'), ((13881, 13927), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.linreg)'], {}), '((mt.locus.position, mt.linreg))\n', (13895, 13927), True, 'import hail as hl\n'), ((15542, 15588), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.linreg)'], {}), '((mt.locus.position, mt.linreg))\n', (15556, 15588), True, 'import hail as hl\n'), ((19528, 19574), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.logreg)'], {}), '((mt.locus.position, mt.logreg))\n', (19542, 19574), True, 'import hail as hl\n'), ((20264, 20283), 'numpy.isnan', 'np.isnan', (['r.p_value'], {}), '(r.p_value)\n', (20272, 20283), True, 'import numpy as np\n'), ((21309, 21328), 'hail.pl_dosage', 'hl.pl_dosage', (['mt.PL'], {}), '(mt.PL)\n', (21321, 21328), True, 'import hail as hl\n'), ((21467, 21513), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.logreg)'], {}), '((mt.locus.position, mt.logreg))\n', (21481, 21513), True, 'import hail as hl\n'), ((22175, 22194), 'numpy.isnan', 'np.isnan', (['r.p_value'], {}), '(r.p_value)\n', (22183, 22194), True, 'import numpy as np\n'), ((23299, 23318), 'hail.gp_dosage', 'hl.gp_dosage', (['mt.GP'], {}), '(mt.GP)\n', (23311, 23318), True, 'import hail as hl\n'), ((23457, 23503), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.logreg)'], {}), '((mt.locus.position, mt.logreg))\n', (23471, 23503), True, 'import hail as hl\n'), ((24165, 24184), 'numpy.isnan', 'np.isnan', (['r.p_value'], {}), '(r.p_value)\n', (24173, 24184), True, 'import numpy as np\n'), ((25362, 25408), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.logreg)'], {}), '((mt.locus.position, mt.logreg))\n', (25376, 25408), True, 'import hail as hl\n'), ((25942, 25961), 'numpy.isnan', 'np.isnan', (['r.p_value'], {}), '(r.p_value)\n', (25950, 25961), True, 'import numpy as np\n'), ((27143, 27189), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.logreg)'], {}), '((mt.locus.position, mt.logreg))\n', (27157, 27189), True, 'import hail as hl\n'), ((29967, 30010), 'hail.agg.collect', 'hl.agg.collect', (['(mt.locus.position, mt.row)'], {}), '((mt.locus.position, mt.row))\n', (29981, 30010), True, 'import hail as hl\n'), ((34382, 34406), 'hail.is_defined', 'hl.is_defined', (['et.is_dad'], {}), '(et.is_dad)\n', (34395, 34406), True, 'import hail as hl\n'), ((34440, 34464), 'hail.is_defined', 'hl.is_defined', (['et.is_mom'], {}), '(et.is_mom)\n', (34453, 34464), True, 'import hail as hl\n'), ((34780, 34795), 'hail.len', 'hl.len', (['et.data'], {}), '(et.data)\n', (34786, 34795), True, 'import hail as hl\n'), ((35768, 35796), 'hail.is_defined', 'hl.is_defined', (['e_cols.is_dad'], {}), '(e_cols.is_dad)\n', (35781, 35796), True, 'import hail as hl\n'), ((35838, 35866), 'hail.is_defined', 'hl.is_defined', (['e_cols.is_mom'], {}), '(e_cols.is_mom)\n', (35851, 35866), True, 'import hail as hl\n'), ((36311, 36355), 'hail.trio_matrix', 'hl.trio_matrix', (['mt', 'ped'], {'complete_trios': '(True)'}), '(mt, ped, complete_trios=True)\n', (36325, 36355), True, 'import hail as hl\n'), ((37169, 37184), 'hail.Call', 'hl.Call', (['[0, 0]'], {}), '([0, 0])\n', (37176, 37184), True, 'import hail as hl\n'), ((37251, 37266), 'hail.Call', 'hl.Call', (['[1, 1]'], {}), '([1, 1])\n', (37258, 37266), True, 'import hail as hl\n'), ((37333, 37348), 'hail.Call', 'hl.Call', (['[0, 1]'], {}), '([0, 1])\n', (37340, 37348), True, 'import hail as hl\n'), ((37494, 37509), 'hail.Call', 'hl.Call', (['[1, 2]'], {}), '([1, 2])\n', (37501, 37509), True, 'import hail as hl\n'), ((37578, 37593), 'hail.Call', 'hl.Call', (['[2, 2]'], {}), '([2, 2])\n', (37585, 37593), True, 'import hail as hl\n'), ((37662, 37677), 'hail.Call', 'hl.Call', (['[0, 1]'], {}), '([0, 1])\n', (37669, 37677), True, 'import hail as hl\n'), ((37746, 37761), 'hail.Call', 'hl.Call', (['[1, 1]'], {}), '([1, 1])\n', (37753, 37761), True, 'import hail as hl\n'), ((37926, 37948), 'hail.parse_variant', 'hl.parse_variant', (['ht.v'], {}), '(ht.v)\n', (37942, 37948), True, 'import hail as hl\n'), ((40245, 40265), 'hail.hadoop_open', 'hl.hadoop_open', (['path'], {}), '(path)\n', (40259, 40265), True, 'import hail as hl\n'), ((40541, 40561), 'hail.hadoop_open', 'hl.hadoop_open', (['path'], {}), '(path)\n', (40555, 40561), True, 'import hail as hl\n'), ((40925, 40948), 'hail.utils.hadoop_open', 'utils.hadoop_open', (['path'], {}), '(path)\n', (40942, 40948), True, 'import hail.utils as utils\n'), ((41368, 41397), 'hail.utils.hadoop_open', 'utils.hadoop_open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (41385, 41397), True, 'import hail.utils as utils\n'), ((43072, 43094), 'hail.utils.uri_path', 'utils.uri_path', (['b_file'], {}), '(b_file)\n', (43086, 43094), True, 'import hail.utils as utils\n'), ((43096, 43118), 'hail.utils.uri_path', 'utils.uri_path', (['p_file'], {}), '(p_file)\n', (43110, 43118), True, 'import hail.utils as utils\n'), ((43711, 43733), 'hail.utils.uri_path', 'utils.uri_path', (['b_file'], {}), '(b_file)\n', (43725, 43733), True, 'import hail.utils as utils\n'), ((43735, 43757), 'hail.utils.uri_path', 'utils.uri_path', (['p_file'], {}), '(p_file)\n', (43749, 43757), True, 'import hail.utils as utils\n'), ((44323, 44345), 'hail.utils.uri_path', 'utils.uri_path', (['b_file'], {}), '(b_file)\n', (44337, 44345), True, 'import hail.utils as utils\n'), ((44347, 44369), 'hail.utils.uri_path', 'utils.uri_path', (['p_file'], {}), '(p_file)\n', (44361, 44369), True, 'import hail.utils as utils\n'), ((47947, 47965), 'hail.len', 'hl.len', (['mt.alleles'], {}), '(mt.alleles)\n', (47953, 47965), True, 'import hail as hl\n'), ((48294, 48314), 'hail.is_defined', 'hl.is_defined', (['mt.GT'], {}), '(mt.GT)\n', (48307, 48314), True, 'import hail as hl\n'), ((48986, 49004), 'numpy.subtract', 'np.subtract', (['a', 'ms'], {}), '(a, ms)\n', (48997, 49004), True, 'import numpy as np\n'), ((49360, 49377), 'numpy.multiply', 'np.multiply', (['s', 's'], {}), '(s, s)\n', (49371, 49377), True, 'import numpy as np\n'), ((50029, 50050), 'hail.str', 'hl.str', (['mt.col_key[0]'], {}), '(mt.col_key[0])\n', (50035, 50050), True, 'import hail as hl\n'), ((51161, 51182), 'hail.str', 'hl.str', (['mt.sample_idx'], {}), '(mt.sample_idx)\n', (51167, 51182), True, 'import hail as hl\n'), ((51199, 51228), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.0)'], {'k': '(2)'}), '(mt.GT, 0.0, k=2)\n', (51211, 51228), True, 'import hail as hl\n'), ((52192, 52279), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.05)'], {'k': '(2)', 'min_kinship': '(0.01)', 'statistics': '"""kin2"""', 'block_size': '(128)'}), "(mt.GT, 0.05, k=2, min_kinship=0.01, statistics='kin2',\n block_size=128)\n", (52204, 52279), True, 'import hail as hl\n'), ((52465, 52551), 'hail.pc_relate', 'hl.pc_relate', (['mt.GT', '(0.02)'], {'k': '(3)', 'min_kinship': '(0.1)', 'statistics': '"""kin20"""', 'block_size': '(64)'}), "(mt.GT, 0.02, k=3, min_kinship=0.1, statistics='kin20',\n block_size=64)\n", (52477, 52551), True, 'import hail as hl\n'), ((56418, 56489), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'pat_id': '"""Dad1"""', 'mat_id': '"""Mom1"""', 'errors': '(41)', 'snp_errors': '(39)'}), "(pat_id='Dad1', mat_id='Mom1', errors=41, snp_errors=39)\n", (56433, 56489), True, 'import hail as hl\n'), ((56520, 56589), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'pat_id': '"""Dad2"""', 'mat_id': '"""Mom2"""', 'errors': '(0)', 'snp_errors': '(0)'}), "(pat_id='Dad2', mat_id='Mom2', errors=0, snp_errors=0)\n", (56535, 56589), True, 'import hail as hl\n'), ((56751, 56802), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'s': '"""Son1"""', 'errors': '(23)', 'snp_errors': '(22)'}), "(s='Son1', errors=23, snp_errors=22)\n", (56766, 56802), True, 'import hail as hl\n'), ((56833, 56884), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'s': '"""Dtr1"""', 'errors': '(18)', 'snp_errors': '(17)'}), "(s='Dtr1', errors=18, snp_errors=17)\n", (56848, 56884), True, 'import hail as hl\n'), ((56915, 56966), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'s': '"""Dad1"""', 'errors': '(19)', 'snp_errors': '(18)'}), "(s='Dad1', errors=19, snp_errors=18)\n", (56930, 56966), True, 'import hail as hl\n'), ((56997, 57048), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'s': '"""Mom1"""', 'errors': '(22)', 'snp_errors': '(21)'}), "(s='Mom1', errors=22, snp_errors=21)\n", (57012, 57048), True, 'import hail as hl\n'), ((57079, 57128), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'s': '"""Dad2"""', 'errors': '(0)', 'snp_errors': '(0)'}), "(s='Dad2', errors=0, snp_errors=0)\n", (57094, 57128), True, 'import hail as hl\n'), ((57159, 57208), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'s': '"""Mom2"""', 'errors': '(0)', 'snp_errors': '(0)'}), "(s='Mom2', errors=0, snp_errors=0)\n", (57174, 57208), True, 'import hail as hl\n'), ((57239, 57288), 'hail.utils.Struct', 'hl.utils.Struct', ([], {'s': '"""Son2"""', 'errors': '(0)', 'snp_errors': '(0)'}), "(s='Son2', errors=0, snp_errors=0)\n", (57254, 57288), True, 'import hail as hl\n'), ((60886, 60903), 'hail.utils.uri_path', 'utils.uri_path', (['f'], {}), '(f)\n', (60900, 60903), True, 'import hail.utils as utils\n'), ((60905, 60923), 'hail.utils.uri_path', 'utils.uri_path', (['f2'], {}), '(f2)\n', (60919, 60923), True, 'import hail.utils as utils\n'), ((60939, 60987), 'hail.import_table', 'hl.import_table', (['f2'], {'force_bgz': '(True)', 'impute': '(True)'}), '(f2, force_bgz=True, impute=True)\n', (60954, 60987), True, 'import hail as hl\n'), ((66144, 66167), 'hail.import_fam', 'hl.import_fam', (['fam_file'], {}), '(fam_file)\n', (66157, 66167), True, 'import hail as hl\n'), ((67062, 67099), 'hail.utils.uri_path', 'utils.uri_path', (["(plink_output + '.bim')"], {}), "(plink_output + '.bim')\n", (67076, 67099), True, 'import hail.utils as utils\n'), ((68339, 68411), 'hail.import_table', 'hl.import_table', (["(out1 + '.fam')"], {'no_header': '(True)', 'impute': '(False)', 'missing': '""""""'}), "(out1 + '.fam', no_header=True, impute=False, missing='')\n", (68354, 68411), True, 'import hail as hl\n'), ((68466, 68526), 'hail.import_table', 'hl.import_table', (["(out1 + '.bim')"], {'no_header': '(True)', 'impute': '(False)'}), "(out1 + '.bim', no_header=True, impute=False)\n", (68481, 68526), True, 'import hail as hl\n'), ((69201, 69273), 'hail.import_table', 'hl.import_table', (["(out2 + '.fam')"], {'no_header': '(True)', 'impute': '(False)', 'missing': '""""""'}), "(out2 + '.fam', no_header=True, impute=False, missing='')\n", (69216, 69273), True, 'import hail as hl\n'), ((69714, 69786), 'hail.import_table', 'hl.import_table', (["(out3 + '.fam')"], {'no_header': '(True)', 'impute': '(False)', 'missing': '""""""'}), "(out3 + '.fam', no_header=True, impute=False, missing='')\n", (69729, 69786), True, 'import hail as hl\n'), ((70226, 70286), 'hail.import_table', 'hl.import_table', (["(out4 + '.bim')"], {'no_header': '(True)', 'impute': '(False)'}), "(out4 + '.bim', no_header=True, impute=False)\n", (70241, 70286), True, 'import hail as hl\n'), ((70526, 70539), 'hail.call', 'hl.call', (['(0)', '(0)'], {}), '(0, 0)\n', (70533, 70539), True, 'import hail as hl\n'), ((71002, 71023), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (71021, 71023), True, 'import hail.utils as utils\n'), ((71216, 71237), 'hail.utils.new_temp_file', 'utils.new_temp_file', ([], {}), '()\n', (71235, 71237), True, 'import hail.utils as utils\n'), ((72346, 72365), 'hail.str', 'hl.str', (['gen.col_idx'], {}), '(gen.col_idx)\n', (72352, 72365), True, 'import hail as hl\n'), ((72371, 72390), 'hail.str', 'hl.str', (['gen.col_idx'], {}), '(gen.col_idx)\n', (72377, 72390), True, 'import hail as hl\n'), ((72433, 72452), 'hail.str', 'hl.str', (['gen.row_idx'], {}), '(gen.row_idx)\n', (72439, 72452), True, 'import hail as hl\n'), ((72459, 72478), 'hail.str', 'hl.str', (['gen.row_idx'], {}), '(gen.row_idx)\n', (72465, 72478), True, 'import hail as hl\n'), ((73936, 73957), 'hail.is_nan', 'hl.is_nan', (['truth.Pval'], {}), '(truth.Pval)\n', (73945, 73957), True, 'import hail as hl\n'), ((74544, 74559), 'hail.int64', 'hl.int64', (['t.idx'], {}), '(t.idx)\n', (74552, 74559), True, 'import hail as hl\n'), ((74563, 74583), 'hail.int64', 'hl.int64', (['(t.idx + 10)'], {}), '(t.idx + 10)\n', (74571, 74583), True, 'import hail as hl\n'), ((74594, 74611), 'hail.float32', 'hl.float32', (['t.idx'], {}), '(t.idx)\n', (74604, 74611), True, 'import hail as hl\n'), ((75017, 75074), 'hail.maximal_independent_set', 'hl.maximal_independent_set', (['graph.i', 'graph.bad_type', '(True)'], {}), '(graph.i, graph.bad_type, True)\n', (75043, 75074), True, 'import hail as hl\n'), ((80556, 80625), 'hail.ld_prune', 'hl.ld_prune', (['ds.GT'], {'r2': '(0.2)', 'bp_window_size': '(1000000)', 'memory_per_core': '(0)'}), '(ds.GT, r2=0.2, bp_window_size=1000000, memory_per_core=0)\n', (80567, 80625), True, 'import hail as hl\n'), ((81243, 81334), 'hail.balding_nichols_model', 'hl.balding_nichols_model', ([], {'n_populations': '(1)', 'n_samples': '(50)', 'n_variants': '(10)', 'n_partitions': '(10)'}), '(n_populations=1, n_samples=50, n_variants=10,\n n_partitions=10)\n', (81267, 81334), True, 'import hail as hl\n'), ((82363, 82416), 'hail.struct', 'hl.struct', ([], {'i': "row['i']", 'j': "row['j']", 'entry': "row['entry']"}), "(i=row['i'], j=row['j'], entry=row['entry'])\n", (82372, 82416), True, 'import hail as hl\n'), ((83487, 83534), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""20:10639222-10644700"""'], {}), "('20:10639222-10644700')\n", (83510, 83534), True, 'import hail as hl\n'), ((83566, 83613), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""20:10644700-10644705"""'], {}), "('20:10644700-10644705')\n", (83589, 83613), True, 'import hail as hl\n'), ((83809, 83856), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""20:10644700-10644705"""'], {}), "('20:10644700-10644705')\n", (83832, 83856), True, 'import hail as hl\n'), ((83958, 84007), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""[20:10019093-10026348]"""'], {}), "('[20:10019093-10026348]')\n", (83981, 84007), True, 'import hail as hl\n'), ((84036, 84085), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""[20:17705793-17716416]"""'], {}), "('[20:17705793-17716416]')\n", (84059, 84085), True, 'import hail as hl\n'), ((85010, 85062), 'hail.stats.TruncatedBetaDist', 'TruncatedBetaDist', ([], {'a': '(0.01)', 'b': '(2.0)', 'min': '(0.05)', 'max': '(0.95)'}), '(a=0.01, b=2.0, min=0.05, max=0.95)\n', (85027, 85062), False, 'from hail.stats import TruncatedBetaDist\n'), ((88217, 88263), 'hail.tstruct', 'hl.tstruct', ([], {'contig': 'hl.tstr', 'position': 'hl.tint32'}), '(contig=hl.tstr, position=hl.tint32)\n', (88227, 88263), True, 'import hail as hl\n'), ((90916, 90962), 'hail.tstruct', 'hl.tstruct', ([], {'contig': 'hl.tstr', 'position': 'hl.tint32'}), '(contig=hl.tstr, position=hl.tint32)\n', (90926, 90962), True, 'import hail as hl\n'), ((91631, 91677), 'hail.tstruct', 'hl.tstruct', ([], {'contig': 'hl.tstr', 'position': 'hl.tint32'}), '(contig=hl.tstr, position=hl.tint32)\n', (91641, 91677), True, 'import hail as hl\n'), ((92446, 92488), 'hail.agg.all', 'hl.agg.all', (['(mt.numeric_array == [1.5, 2.5])'], {}), '(mt.numeric_array == [1.5, 2.5])\n', (92456, 92488), True, 'import hail as hl\n'), ((95642, 95658), 'hail.null', 'hl.null', (['hl.tstr'], {}), '(hl.tstr)\n', (95649, 95658), True, 'import hail as hl\n'), ((95667, 95683), 'hail.null', 'hl.null', (['hl.tstr'], {}), '(hl.tstr)\n', (95674, 95683), True, 'import hail as hl\n'), ((95692, 95708), 'hail.null', 'hl.null', (['hl.tstr'], {}), '(hl.tstr)\n', (95699, 95708), True, 'import hail as hl\n'), ((95748, 95765), 'hail.null', 'hl.null', (['hl.tbool'], {}), '(hl.tbool)\n', (95755, 95765), True, 'import hail as hl\n'), ((95775, 95792), 'hail.null', 'hl.null', (['hl.tbool'], {}), '(hl.tbool)\n', (95782, 95792), True, 'import hail as hl\n'), ((96216, 96250), 'hail.agg.all', 'hl.agg.all', (['(mt.cm_position == 15.0)'], {}), '(mt.cm_position == 15.0)\n', (96226, 96250), True, 'import hail as hl\n'), ((98497, 98648), 'hail.import_plink', 'hl.import_plink', (["(bfile + '.bed')", "(bfile + '.bim')", "(bfile + '.fam')"], {'a2_reference': '(True)', 'contig_recoding': "{'chr22': '22'}", 'reference_genome': '"""GRCh37"""'}), "(bfile + '.bed', bfile + '.bim', bfile + '.fam',\n a2_reference=True, contig_recoding={'chr22': '22'}, reference_genome=\n 'GRCh37')\n", (98512, 98648), True, 'import hail as hl\n'), ((99158, 99204), 'hail.tstruct', 'hl.tstruct', ([], {'contig': 'hl.tstr', 'position': 'hl.tint32'}), '(contig=hl.tstr, position=hl.tint32)\n', (99168, 99204), True, 'import hail as hl\n'), ((101982, 102011), 'hail.locus', 'hl.locus', (['"""1"""', '(mt.row_idx + 1)'], {}), "('1', mt.row_idx + 1)\n", (101990, 102011), True, 'import hail as hl\n'), ((102144, 102169), 'hail.window_by_locus', 'hl.window_by_locus', (['mt', '(5)'], {}), '(mt, 5)\n', (102162, 102169), True, 'import hail as hl\n'), ((102577, 102647), 'hail.all', 'hl.all', (['(lambda x: x.e_col_idx == entries.col_idx)', 'entries.prev_entries'], {}), '(lambda x: x.e_col_idx == entries.col_idx, entries.prev_entries)\n', (102583, 102647), True, 'import hail as hl\n'), ((2260, 2304), 'hail.identity_by_descent', 'hl.identity_by_descent', (['ds'], {'min': 'min', 'max': 'max'}), '(ds, min=min, max=max)\n', (2282, 2304), True, 'import hail as hl\n'), ((4637, 4657), 'hail.impute_sex', 'hl.impute_sex', (['ds.GT'], {}), '(ds.GT)\n', (4650, 4657), True, 'import hail as hl\n'), ((17578, 17591), 'hail.is_nan', 'hl.is_nan', (['x1'], {}), '(x1)\n', (17587, 17591), True, 'import hail as hl\n'), ((17594, 17607), 'hail.is_nan', 'hl.is_nan', (['x2'], {}), '(x2)\n', (17603, 17607), True, 'import hail as hl\n'), ((17612, 17627), 'hail.abs', 'hl.abs', (['(x1 - x2)'], {}), '(x1 - x2)\n', (17618, 17627), True, 'import hail as hl\n'), ((35394, 35418), 'hail.is_defined', 'hl.is_defined', (['tt.data.g'], {}), '(tt.data.g)\n', (35407, 35418), True, 'import hail as hl\n'), ((42380, 42405), 'hail.is_defined', 'hl.is_defined', (['dataset.GT'], {}), '(dataset.GT)\n', (42393, 42405), True, 'import hail as hl\n'), ((46954, 46972), 'hail.utils.uri_path', 'utils.uri_path', (['fn'], {}), '(fn)\n', (46968, 46972), True, 'import hail.utils as utils\n'), ((48102, 48122), 'hail.is_defined', 'hl.is_defined', (['mt.GT'], {}), '(mt.GT)\n', (48115, 48122), True, 'import hail as hl\n'), ((48376, 48415), 'hail.sqrt', 'hl.sqrt', (['(mean * (2 - mean) * n_rows / 2)'], {}), '(mean * (2 - mean) * n_rows / 2)\n', (48383, 48415), True, 'import hail as hl\n'), ((49267, 49277), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (49274, 49277), True, 'import numpy as np\n'), ((54410, 54428), 'hail.tarray', 'hl.tarray', (['hl.tstr'], {}), '(hl.tstr)\n', (54419, 54428), True, 'import hail as hl\n'), ((54625, 54643), 'hail.tarray', 'hl.tarray', (['hl.tstr'], {}), '(hl.tstr)\n', (54634, 54643), True, 'import hail as hl\n'), ((55862, 55880), 'hail.tarray', 'hl.tarray', (['hl.tstr'], {}), '(hl.tstr)\n', (55871, 55880), True, 'import hail as hl\n'), ((56015, 56033), 'hail.tarray', 'hl.tarray', (['hl.tstr'], {}), '(hl.tstr)\n', (56024, 56033), True, 'import hail as hl\n'), ((57358, 57374), 'hail.Locus', 'hl.Locus', (['"""1"""', '(1)'], {}), "('1', 1)\n", (57366, 57374), True, 'import hail as hl\n'), ((57403, 57419), 'hail.Locus', 'hl.Locus', (['"""1"""', '(2)'], {}), "('1', 2)\n", (57411, 57419), True, 'import hail as hl\n'), ((57447, 57463), 'hail.Locus', 'hl.Locus', (['"""X"""', '(1)'], {}), "('X', 1)\n", (57455, 57463), True, 'import hail as hl\n'), ((57491, 57507), 'hail.Locus', 'hl.Locus', (['"""X"""', '(3)'], {}), "('X', 3)\n", (57499, 57507), True, 'import hail as hl\n'), ((57535, 57551), 'hail.Locus', 'hl.Locus', (['"""Y"""', '(1)'], {}), "('Y', 1)\n", (57543, 57551), True, 'import hail as hl\n'), ((57579, 57595), 'hail.Locus', 'hl.Locus', (['"""Y"""', '(3)'], {}), "('Y', 3)\n", (57587, 57595), True, 'import hail as hl\n'), ((65261, 65291), 'hail.is_missing', 'hl.is_missing', (['ann.in_interval'], {}), '(ann.in_interval)\n', (65274, 65291), True, 'import hail as hl\n'), ((65637, 65653), 'hail.null', 'hl.null', (['hl.tstr'], {}), '(hl.tstr)\n', (65644, 65653), True, 'import hail as hl\n'), ((69683, 69695), 'hail.len', 'hl.len', (['ds.s'], {}), '(ds.s)\n', (69689, 69695), True, 'import hail as hl\n'), ((72704, 72743), 'hail.expr.aggregators.fraction', 'agg.fraction', (['(in1.GP == [0.0, 1.0, 0.0])'], {}), '(in1.GP == [0.0, 1.0, 0.0])\n', (72716, 72743), True, 'import hail.expr.aggregators as agg\n'), ((73865, 73891), 'hail.is_nan', 'hl.is_nan', (['tdt_tab.p_value'], {}), '(tdt_tab.p_value)\n', (73874, 73891), True, 'import hail as hl\n'), ((75268, 75281), 'hail.literal', 'hl.literal', (['(1)'], {}), '(1)\n', (75278, 75281), True, 'import hail as hl\n'), ((75283, 75296), 'hail.literal', 'hl.literal', (['(2)'], {}), '(2)\n', (75293, 75296), True, 'import hail as hl\n'), ((75709, 75735), 'hail.tstruct', 'hl.tstruct', ([], {'node': 'hl.tint64'}), '(node=hl.tint64)\n', (75719, 75735), True, 'import hail as hl\n'), ((75787, 75799), 'hail.tstruct', 'hl.tstruct', ([], {}), '()\n', (75797, 75799), True, 'import hail as hl\n'), ((76397, 76437), 'hail.tstruct', 'hl.tstruct', ([], {'id': 'hl.tstr', 'is_case': 'hl.tbool'}), '(id=hl.tstr, is_case=hl.tbool)\n', (76407, 76437), True, 'import hail as hl\n'), ((76492, 76532), 'hail.tstruct', 'hl.tstruct', ([], {'id': 'hl.tstr', 'is_case': 'hl.tbool'}), '(id=hl.tstr, is_case=hl.tbool)\n', (76502, 76532), True, 'import hail as hl\n'), ((78499, 78561), 'hail.filter_alleles_hts', 'hl.filter_alleles_hts', (['ds', "(lambda a, i: a == 'G')"], {'subset': '(False)'}), "(ds, lambda a, i: a == 'G', subset=False)\n", (78520, 78561), True, 'import hail as hl\n'), ((79645, 79663), 'hail.sqrt', 'hl.sqrt', (['n_samples'], {}), '(n_samples)\n', (79652, 79663), True, 'import hail as hl\n'), ((83404, 83438), 'hail.filter_intervals', 'hl.filter_intervals', (['ds', 'intervals'], {}), '(ds, intervals)\n', (83423, 83438), True, 'import hail as hl\n'), ((83641, 83675), 'hail.filter_intervals', 'hl.filter_intervals', (['ds', 'intervals'], {}), '(ds, intervals)\n', (83660, 83675), True, 'import hail as hl\n'), ((83724, 83771), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""20:10639222-10644700"""'], {}), "('20:10639222-10644700')\n", (83747, 83771), True, 'import hail as hl\n'), ((83884, 83918), 'hail.filter_intervals', 'hl.filter_intervals', (['ds', 'intervals'], {}), '(ds, intervals)\n', (83903, 83918), True, 'import hail as hl\n'), ((84118, 84152), 'hail.filter_intervals', 'hl.filter_intervals', (['ds', 'intervals'], {}), '(ds, intervals)\n', (84137, 84152), True, 'import hail as hl\n'), ((84654, 84688), 'hail.filter_intervals', 'hl.filter_intervals', (['ds', 'intervals'], {}), '(ds, intervals)\n', (84673, 84688), True, 'import hail as hl\n'), ((89593, 89615), 'hail.tarray', 'hl.tarray', (['hl.tfloat64'], {}), '(hl.tfloat64)\n', (89602, 89615), True, 'import hail as hl\n'), ((90039, 90061), 'hail.tarray', 'hl.tarray', (['hl.tfloat64'], {}), '(hl.tfloat64)\n', (90048, 90061), True, 'import hail as hl\n'), ((91173, 91199), 'hail.get_reference', 'hl.get_reference', (['"""GRCh38"""'], {}), "('GRCh38')\n", (91189, 91199), True, 'import hail as hl\n'), ((92247, 92271), 'hail.is_defined', 'hl.is_defined', (['t.info.DS'], {}), '(t.info.DS)\n', (92260, 92271), True, 'import hail as hl\n'), ((92922, 92973), 'hail.agg.all', 'hl.agg.all', (['(mt.negative_float_array == [-0.5, -1.5])'], {}), '(mt.negative_float_array == [-0.5, -1.5])\n', (92932, 92973), True, 'import hail as hl\n'), ((93268, 93291), 'hail.Locus', 'hl.Locus', (['"""X"""', '(16050036)'], {}), "('X', 16050036)\n", (93276, 93291), True, 'import hail as hl\n'), ((93452, 93475), 'hail.Locus', 'hl.Locus', (['"""X"""', '(16061250)'], {}), "('X', 16061250)\n", (93460, 93475), True, 'import hail as hl\n'), ((93656, 93675), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (93665, 93675), True, 'import hail as hl\n'), ((93685, 93703), 'hail.tarray', 'hl.tarray', (['hl.tstr'], {}), '(hl.tstr)\n', (93694, 93703), True, 'import hail as hl\n'), ((93760, 93778), 'hail.tarray', 'hl.tarray', (['hl.tint'], {}), '(hl.tint)\n', (93769, 93778), True, 'import hail as hl\n'), ((93784, 93806), 'hail.tarray', 'hl.tarray', (['hl.tfloat64'], {}), '(hl.tfloat64)\n', (93793, 93806), True, 'import hail as hl\n'), ((94218, 94241), 'hail.Locus', 'hl.Locus', (['"""X"""', '(16050036)'], {}), "('X', 16050036)\n", (94226, 94241), True, 'import hail as hl\n'), ((94425, 94448), 'hail.Locus', 'hl.Locus', (['"""X"""', '(16050036)'], {}), "('X', 16050036)\n", (94433, 94448), True, 'import hail as hl\n'), ((94625, 94648), 'hail.Locus', 'hl.Locus', (['"""X"""', '(16061250)'], {}), "('X', 16061250)\n", (94633, 94648), True, 'import hail as hl\n'), ((94853, 94876), 'hail.Locus', 'hl.Locus', (['"""X"""', '(16061250)'], {}), "('X', 16061250)\n", (94861, 94876), True, 'import hail as hl\n'), ((95073, 95092), 'hail.tlocus', 'hl.tlocus', (['"""GRCh37"""'], {}), "('GRCh37')\n", (95082, 95092), True, 'import hail as hl\n'), ((95102, 95120), 'hail.tarray', 'hl.tarray', (['hl.tstr'], {}), '(hl.tstr)\n', (95111, 95120), True, 'import hail as hl\n'), ((95187, 95205), 'hail.tarray', 'hl.tarray', (['hl.tint'], {}), '(hl.tint)\n', (95196, 95205), True, 'import hail as hl\n'), ((95210, 95228), 'hail.tarray', 'hl.tarray', (['hl.tint'], {}), '(hl.tint)\n', (95219, 95228), True, 'import hail as hl\n'), ((98305, 98331), 'hail.get_reference', 'hl.get_reference', (['"""GRCh38"""'], {}), "('GRCh38')\n", (98321, 98331), True, 'import hail as hl\n'), ((102472, 102505), 'hail.zip_with_index', 'hl.zip_with_index', (['rows.prev_rows'], {}), '(rows.prev_rows)\n', (102489, 102505), True, 'import hail as hl\n'), ((102792, 102831), 'hail.zip_with_index', 'hl.zip_with_index', (['entries.prev_entries'], {}), '(entries.prev_entries)\n', (102809, 102831), True, 'import hail as hl\n'), ((4570, 4603), 'hail.expr.aggregators.call_stats', 'agg.call_stats', (['ds.GT', 'ds.alleles'], {}), '(ds.GT, ds.alleles)\n', (4584, 4603), True, 'import hail.expr.aggregators as agg\n'), ((34863, 34907), 'hail.trio_matrix', 'hl.trio_matrix', (['mt', 'ped'], {'complete_trios': '(True)'}), '(mt, ped, complete_trios=True)\n', (34877, 34907), True, 'import hail as hl\n'), ((45841, 45867), 'hail.str', 'hl.str', (['dataset.sample_idx'], {}), '(dataset.sample_idx)\n', (45847, 45867), True, 'import hail as hl\n'), ((50671, 50706), 'hail.struct', 'hl.struct', ([], {'sample_idx': 'plink_kin.ID1'}), '(sample_idx=plink_kin.ID1)\n', (50680, 50706), True, 'import hail as hl\n'), ((50742, 50777), 'hail.struct', 'hl.struct', ([], {'sample_idx': 'plink_kin.ID2'}), '(sample_idx=plink_kin.ID2)\n', (50751, 50777), True, 'import hail as hl\n'), ((57876, 57892), 'hail.Locus', 'hl.Locus', (['"""1"""', '(1)'], {}), "('1', 1)\n", (57884, 57892), True, 'import hail as hl\n'), ((57977, 57993), 'hail.Locus', 'hl.Locus', (['"""1"""', '(2)'], {}), "('1', 2)\n", (57985, 57993), True, 'import hail as hl\n'), ((58077, 58093), 'hail.Locus', 'hl.Locus', (['"""X"""', '(1)'], {}), "('X', 1)\n", (58085, 58093), True, 'import hail as hl\n'), ((58177, 58193), 'hail.Locus', 'hl.Locus', (['"""X"""', '(3)'], {}), "('X', 3)\n", (58185, 58193), True, 'import hail as hl\n'), ((58277, 58293), 'hail.Locus', 'hl.Locus', (['"""Y"""', '(1)'], {}), "('Y', 1)\n", (58285, 58293), True, 'import hail as hl\n'), ((58377, 58393), 'hail.Locus', 'hl.Locus', (['"""Y"""', '(3)'], {}), "('Y', 3)\n", (58385, 58393), True, 'import hail as hl\n'), ((60379, 60412), 'hail.flatten', 'hl.flatten', (['cols_conc.concordance'], {}), '(cols_conc.concordance)\n', (60389, 60412), True, 'import hail as hl\n'), ((60485, 60518), 'hail.flatten', 'hl.flatten', (['rows_conc.concordance'], {}), '(rows_conc.concordance)\n', (60495, 60518), True, 'import hail as hl\n'), ((72516, 72592), 'hail.import_gen', 'hl.import_gen', (["(out1 + '.gen')"], {'sample_file': "(out1 + '.sample')", 'min_partitions': '(3)'}), "(out1 + '.gen', sample_file=out1 + '.sample', min_partitions=3)\n", (72529, 72592), True, 'import hail as hl\n'), ((73012, 73031), 'hail.str', 'hl.str', (['in1.col_idx'], {}), '(in1.col_idx)\n', (73018, 73031), True, 'import hail as hl\n'), ((73546, 73583), 'hail.locus', 'hl.locus', (['truth.CHROM', 'truth.POSITION'], {}), '(truth.CHROM, truth.POSITION)\n', (73554, 73583), True, 'import hail as hl\n'), ((74181, 74211), 'hail.abs', 'hl.abs', (['(bad.p_value - bad.Pval)'], {}), '(bad.p_value - bad.Pval)\n', (74187, 74211), True, 'import hail as hl\n'), ((74278, 74291), 'hail.asc', 'hl.asc', (['bad.v'], {}), '(bad.v)\n', (74284, 74291), True, 'import hail as hl\n'), ((75158, 75182), 'hail.utils.range_table', 'hl.utils.range_table', (['(10)'], {}), '(10)\n', (75178, 75182), True, 'import hail as hl\n'), ((77377, 77418), 'hail.filter_alleles', 'hl.filter_alleles', (['ds', '(lambda a, i: False)'], {}), '(ds, lambda a, i: False)\n', (77394, 77418), True, 'import hail as hl\n'), ((77465, 77505), 'hail.filter_alleles', 'hl.filter_alleles', (['ds', '(lambda a, i: True)'], {}), '(ds, lambda a, i: True)\n', (77482, 77505), True, 'import hail as hl\n'), ((81450, 81488), 'hail.cond', 'hl.cond', (['(ht.p <= 0.5)', 'ht.p', '(1.0 - ht.p)'], {}), '(ht.p <= 0.5, ht.p, 1.0 - ht.p)\n', (81457, 81488), True, 'import hail as hl\n'), ((82476, 82493), 'hail.int64', 'hl.int64', (['table.i'], {}), '(table.i)\n', (82484, 82493), True, 'import hail as hl\n'), ((82528, 82545), 'hail.int64', 'hl.int64', (['table.j'], {}), '(table.j)\n', (82536, 82545), True, 'import hail as hl\n'), ((84340, 84385), 'hail.struct', 'hl.struct', ([], {'locus': 'ds.locus', 'alleles': 'ds.alleles'}), '(locus=ds.locus, alleles=ds.alleles)\n', (84349, 84385), True, 'import hail as hl\n'), ((84485, 84509), 'hail.Locus', 'hl.Locus', (['"""20"""', '(10639222)'], {}), "('20', 10639222)\n", (84493, 84509), True, 'import hail as hl\n'), ((84581, 84605), 'hail.Locus', 'hl.Locus', (['"""20"""', '(10644700)'], {}), "('20', 10644700)\n", (84589, 84605), True, 'import hail as hl\n'), ((87014, 87031), 'hail.null', 'hl.null', (['hl.tbool'], {}), '(hl.tbool)\n', (87021, 87031), True, 'import hail as hl\n'), ((87393, 87412), 'hail.pl_dosage', 'hl.pl_dosage', (['ds.PL'], {}), '(ds.PL)\n', (87405, 87412), True, 'import hail as hl\n'), ((92190, 92214), 'hail.is_defined', 'hl.is_defined', (['t.info.DB'], {}), '(t.info.DB)\n', (92203, 92214), True, 'import hail as hl\n'), ((92829, 92874), 'hail.agg.all', 'hl.agg.all', (['(mt.negative_int_array == [-1, -2])'], {}), '(mt.negative_int_array == [-1, -2])\n', (92839, 92874), True, 'import hail as hl\n'), ((95497, 95522), 'hail.str', 'hl.str', (['mt.locus.position'], {}), '(mt.locus.position)\n', (95503, 95522), True, 'import hail as hl\n'), ((101327, 101363), 'hail.locus', 'hl.locus', (["truth['Chr']", "truth['Pos']"], {}), "(truth['Chr'], truth['Pos'])\n", (101335, 101363), True, 'import hail as hl\n'), ((101797, 101832), 'hail.abs', 'hl.abs', (['(j.p_de_novo - j.p_de_novo_1)'], {}), '(j.p_de_novo - j.p_de_novo_1)\n', (101803, 101832), True, 'import hail as hl\n'), ((34692, 34751), 'hail.struct', 'hl.struct', ([], {'GT': 'et.GT', 'AD': 'et.AD', 'DP': 'et.DP', 'GQ': 'et.GQ', 'PL': 'et.PL'}), '(GT=et.GT, AD=et.AD, DP=et.DP, GQ=et.GQ, PL=et.PL)\n', (34701, 34751), True, 'import hail as hl\n'), ((49018, 49053), 'numpy.multiply', 'np.multiply', (['(ms / 2.0)', '(1 - ms / 2.0)'], {}), '(ms / 2.0, 1 - ms / 2.0)\n', (49029, 49053), True, 'import numpy as np\n'), ((59918, 59943), 'hail.is_missing', 'hl.is_missing', (['dataset.GT'], {}), '(dataset.GT)\n', (59931, 59943), True, 'import hail as hl\n'), ((74125, 74152), 'hail.abs', 'hl.abs', (['(bad.chi2 - bad.Chi2)'], {}), '(bad.chi2 - bad.Chi2)\n', (74131, 74152), True, 'import hail as hl\n'), ((77707, 77768), 'hail.filter_alleles_hts', 'hl.filter_alleles_hts', (['ds', "(lambda a, i: a == 'T')"], {'subset': '(True)'}), "(ds, lambda a, i: a == 'T', subset=True)\n", (77728, 77768), True, 'import hail as hl\n'), ((77976, 78037), 'hail.filter_alleles_hts', 'hl.filter_alleles_hts', (['ds', "(lambda a, i: a == 'G')"], {'subset': '(True)'}), "(ds, lambda a, i: a == 'G', subset=True)\n", (77997, 78037), True, 'import hail as hl\n'), ((78246, 78308), 'hail.filter_alleles_hts', 'hl.filter_alleles_hts', (['ds', "(lambda a, i: a != 'G')"], {'subset': '(False)'}), "(ds, lambda a, i: a != 'G', subset=False)\n", (78267, 78308), True, 'import hail as hl\n'), ((78618, 78680), 'hail.filter_alleles_hts', 'hl.filter_alleles_hts', (['ds', "(lambda a, i: a == 'G')"], {'subset': '(False)'}), "(ds, lambda a, i: a == 'G', subset=False)\n", (78639, 78680), True, 'import hail as hl\n'), ((83171, 83218), 'hail.parse_locus_interval', 'hl.parse_locus_interval', (['"""20:10639222-10644705"""'], {}), "('20:10639222-10644705')\n", (83194, 83218), True, 'import hail as hl\n'), ((86341, 86352), 'hail.tlocus', 'hl.tlocus', ([], {}), '()\n', (86350, 86352), True, 'import hail as hl\n'), ((92042, 92083), 'hail.is_defined', 'hl.is_defined', (['t.info.NEGATIVE_TRAIN_SITE'], {}), '(t.info.NEGATIVE_TRAIN_SITE)\n', (92055, 92083), True, 'import hail as hl\n'), ((92116, 92157), 'hail.is_defined', 'hl.is_defined', (['t.info.POSITIVE_TRAIN_SITE'], {}), '(t.info.POSITIVE_TRAIN_SITE)\n', (92129, 92157), True, 'import hail as hl\n'), ((92663, 92696), 'hail.agg.all', 'hl.agg.all', (['(mt.negative_int == -1)'], {}), '(mt.negative_int == -1)\n', (92673, 92696), True, 'import hail as hl\n'), ((92744, 92781), 'hail.agg.all', 'hl.agg.all', (['(mt.negative_float == -1.5)'], {}), '(mt.negative_float == -1.5)\n', (92754, 92781), True, 'import hail as hl\n'), ((97300, 97326), 'hail.variant_qc', 'hl.variant_qc', (['mt_imported'], {}), '(mt_imported)\n', (97313, 97326), True, 'import hail as hl\n'), ((4340, 4357), 'hail.null', 'hl.null', (['hl.tbool'], {}), '(hl.tbool)\n', (4347, 4357), True, 'import hail as hl\n'), ((36236, 36255), 'hail.len', 'hl.len', (['e_cols.data'], {}), '(e_cols.data)\n', (36242, 36255), True, 'import hail as hl\n'), ((53577, 53606), 'hail.rename_duplicates', 'hl.rename_duplicates', (['dataset'], {}), '(dataset)\n', (53597, 53606), True, 'import hail as hl\n'), ((61690, 61763), 'hail.locus_interval', 'hl.locus_interval', (['start.contig', 'start.position', 'end.position', '(True)', '(True)'], {}), '(start.contig, start.position, end.position, True, True)\n', (61707, 61763), True, 'import hail as hl\n'), ((72823, 72842), 'hail.str', 'hl.str', (['in1.row_idx'], {}), '(in1.row_idx)\n', (72829, 72842), True, 'import hail as hl\n'), ((72915, 72934), 'hail.str', 'hl.str', (['in1.row_idx'], {}), '(in1.row_idx)\n', (72921, 72934), True, 'import hail as hl\n'), ((80859, 80884), 'hail.expr.aggregators.collect_as_set', 'agg.collect_as_set', (['ds.GT'], {}), '(ds.GT)\n', (80877, 80884), True, 'import hail.expr.aggregators as agg\n'), ((65422, 65431), 'hail.case', 'hl.case', ([], {}), '()\n', (65429, 65431), True, 'import hail as hl\n'), ((76571, 76580), 'hail.case', 'hl.case', ([], {}), '()\n', (76578, 76580), True, 'import hail as hl\n'), ((36504, 36540), 'hail.struct', 'hl.struct', ([], {'role': '(0)', 'sa': 't_cols.proband'}), '(role=0, sa=t_cols.proband)\n', (36513, 36540), True, 'import hail as hl\n'), ((36579, 36614), 'hail.struct', 'hl.struct', ([], {'role': '(1)', 'sa': 't_cols.father'}), '(role=1, sa=t_cols.father)\n', (36588, 36614), True, 'import hail as hl\n'), ((36653, 36688), 'hail.struct', 'hl.struct', ([], {'role': '(2)', 'sa': 't_cols.mother'}), '(role=2, sa=t_cols.mother)\n', (36662, 36688), True, 'import hail as hl\n'), ((34618, 34627), 'hail.case', 'hl.case', ([], {}), '()\n', (34625, 34627), True, 'import hail as hl\n'), ((35983, 35992), 'hail.case', 'hl.case', ([], {}), '()\n', (35990, 35992), True, 'import hail as hl\n')] |
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from random import randint
from keras.utils import np_utils
import sys
inp = sys.argv[1]
outFile = sys.argv[2]
with open(inp) as f:
content = f.read().lower()
vocab = sorted(list(set(content)))
vocab_indices = dict((c, i) for i, c in enumerate(vocab))
indices_vocab = dict((i, c) for i, c in enumerate(vocab))
seq = 50
dataX = []
dataY = []
for i in range(0, len(content) - seq, 1):
seq_in = content[i:i + seq]
seq_out = content[i + seq]
dataX.append([vocab_indices[char] for char in seq_in])
dataY.append(vocab_indices[seq_out])
n_patterns = len(dataX)
X = np.reshape(dataX, (n_patterns, seq, 1))
X = X / float(len(vocab))
y = np_utils.to_categorical(dataY)
# RNN model
model = Sequential()
model.add(LSTM(256, return_sequences=True,input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.summary()
# Comment it when trained
filepath='nn.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit(X, y,batch_size=128,epochs=10,verbose=2,callbacks=callbacks_list)
filename = 'nn.hdf5'
model.load_weights(filename)
def sample(preds, temperature=1):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
start = randint(0, len(content)-1-seq)
text = content[start:start+seq]
for it in range(400):
new=[]
for k in text[-seq:]:
new.append(vocab_indices[k])
x = np.reshape(new, (1, seq, 1))
x = x / float(len(vocab))
preds = model.predict(x, verbose=2)[0]
text+=indices_vocab[sample(preds,1)]
open(outFile, 'w').write(text).close()
| [
"numpy.reshape",
"keras.callbacks.ModelCheckpoint",
"numpy.log",
"numpy.asarray",
"numpy.argmax",
"keras.models.Sequential",
"numpy.exp",
"keras.layers.LSTM",
"numpy.random.multinomial",
"keras.utils.np_utils.to_categorical",
"numpy.sum",
"keras.layers.Activation",
"keras.layers.Dense",
"k... | [((753, 792), 'numpy.reshape', 'np.reshape', (['dataX', '(n_patterns, seq, 1)'], {}), '(dataX, (n_patterns, seq, 1))\n', (763, 792), True, 'import numpy as np\n'), ((823, 853), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['dataY'], {}), '(dataY)\n', (846, 853), False, 'from keras.utils import np_utils\n'), ((875, 887), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (885, 887), False, 'from keras.models import Sequential\n'), ((1243, 1332), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='loss', verbose=1, save_best_only=True,\n mode='min')\n", (1258, 1332), False, 'from keras.callbacks import ModelCheckpoint\n'), ((898, 968), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'return_sequences': '(True)', 'input_shape': '(X.shape[1], X.shape[2])'}), '(256, return_sequences=True, input_shape=(X.shape[1], X.shape[2]))\n', (902, 968), False, 'from keras.layers import LSTM\n'), ((979, 991), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (986, 991), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1003, 1012), 'keras.layers.LSTM', 'LSTM', (['(256)'], {}), '(256)\n', (1007, 1012), False, 'from keras.layers import LSTM\n'), ((1024, 1036), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1031, 1036), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1048, 1065), 'keras.layers.Dense', 'Dense', (['y.shape[1]'], {}), '(y.shape[1])\n', (1053, 1065), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1077, 1098), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1087, 1098), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1616, 1629), 'numpy.exp', 'np.exp', (['preds'], {}), '(preds)\n', (1622, 1629), True, 'import numpy as np\n'), ((1679, 1713), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'preds', '(1)'], {}), '(1, preds, 1)\n', (1700, 1713), True, 'import numpy as np\n'), ((1722, 1739), 'numpy.argmax', 'np.argmax', (['probas'], {}), '(probas)\n', (1731, 1739), True, 'import numpy as np\n'), ((1901, 1929), 'numpy.reshape', 'np.reshape', (['new', '(1, seq, 1)'], {}), '(new, (1, seq, 1))\n', (1911, 1929), True, 'import numpy as np\n'), ((1575, 1588), 'numpy.log', 'np.log', (['preds'], {}), '(preds)\n', (1581, 1588), True, 'import numpy as np\n'), ((1651, 1668), 'numpy.sum', 'np.sum', (['exp_preds'], {}), '(exp_preds)\n', (1657, 1668), True, 'import numpy as np\n'), ((1530, 1547), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (1540, 1547), True, 'import numpy as np\n')] |
import logging
from multiprocessing.managers import SyncManager
from typing import List, Tuple, cast, Union
import numpy as np
import tensorflow as tf
import gpbasics.DataHandling.DataInput as di
import gpbasics.KernelBasics.Kernel as k
import gpbasics.global_parameters as global_param
import gpminference.ChangePointDetection.BaseKernelSCPD as bscpd
import gpminference.KernelSearch.ParallelApproach as pks
import gpminference.KernelSearch.Segment as seg
import gpminference.KernelExpansionStrategies.KernelExpansionStrategy as kexp
from gpminference.KernelSearch import KernelSearch as ks
from gpbasics.KernelBasics import Operators as op
from gpbasics.MeanFunctionBasics import MeanFunction as mf
from gpbasics.Metrics import Metrics as met
from gpbasics.Statistics import GaussianProcess as gp
from gpminference.ChangePointDetection import SequentialCPDetection as scpd
from gpminference.KernelSearch.KernelSearch import KernelSearchType
global_param.ensure_init()
import gpbasics.Metrics.MatrixHandlingTypes as mht
class SequentialKernelSearch(ks.KernelSearch):
def __init__(self, data_input: Union[di.AbstractDataInput, List[di.AbstractDataInput]],
mean_function: mf.MeanFunction, default_window_size: int,
strategy_options: dict, kernelExpansion: kexp.KernelExpansionStrategyType,
sequential_cpd: scpd.SequentialChangePointDetectionType, optimize_metric: met.MetricType,
model_selection_metric: met.MetricType, p_kernel_expression_replacement: bool,
local_approx: mht.GlobalApproximationsType, numerical_matrix_handling: mht.NumericalMatrixHandlingType,
covariance_function_repository: List[k.Kernel], random_restart: int = 1, subset_size: int = None):
if isinstance(data_input, list):
logging.warning("Divisive Gaussian process model inference algorithms currently do not divide a dataset in "
"k-fold cross validated manner, but infer the respective local models in that k-fold "
"cross validated fashion.")
self.k_fold = len(data_input)
x = tf.concat([data_input[0].data_x_train, data_input[0].data_x_test], axis=0)
y = tf.concat([data_input[0].data_y_train, data_input[0].data_y_test], axis=0)
if x.shape[1] == 1:
sorted_idx = tf.argsort(tf.reshape(x, [-1, ]))
x = tf.gather(x, sorted_idx)
y = tf.gather(y, sorted_idx)
mean_function = data_input[0].mean_function
data_input = di.DataInput(x, y, x, y)
data_input.set_mean_function(mean_function)
else:
self.k_fold = 1
super(SequentialKernelSearch, self).__init__(
kernelExpansion, data_input, mean_function, KernelSearchType.SKS, optimize_metric, model_selection_metric,
p_kernel_expression_replacement, local_approx, numerical_matrix_handling, covariance_function_repository,
random_restart, subset_size)
self.default_window_size: int = default_window_size
self.sequential_cpd_type: scpd.SequentialChangePointDetectionType = sequential_cpd
self.sequential_cpd: scpd.SequentialChangePointDetection = self.get_cpd_by_type()
self.strategy_options: dict = strategy_options
def get_cpd_by_type(self) -> scpd.SequentialChangePointDetection:
if self.sequential_cpd_type is scpd.SequentialChangePointDetectionType.WhiteNoise:
return bscpd.WhiteNoiseCPD(self)
else:
logging.warning(
"No valid SequentialChangePointDetectionType given (given: %s). "
"Thus falling back to default type: WhiteNoise." % str(self.sequential_cpd_type))
return bscpd.WhiteNoiseCPD(self)
def parallel_kernel_search(
self, data_input: Union[di.AbstractDataInput, List[di.AbstractDataInput]],
optimize_metric: met.MetricType = None, model_selection_metric: met.MetricType = None) \
-> List[gp.AbstractGaussianProcess]:
if optimize_metric is None:
optimize_metric = self.optimize_metric
if model_selection_metric is None:
model_selection_metric = self.model_selection_metric
strategy_options = self.strategy_options.copy()
strategy_options['global_max_depth'] = strategy_options['local_max_depth']
parallel_kernel_search: pks.ParallelKernelSearch = pks.ParallelKernelSearch(
self.strategy_type, strategy_options, data_input, self.mean_function,
optimize_metric, model_selection_metric, local_approx=self.local_approx,
numerical_matrix_handling=self.numerical_matrix_handling, subset_size=self.subset_size,
p_kernel_expression_replacement=self.p_kernel_expression_replacement,
covariance_function_repository=self.covariance_function_repository)
parallel_kernel_search.default_logging = logging.debug
return parallel_kernel_search.perform_kernel_search()
def start_kernel_search(self) -> List[gp.AbstractGaussianProcess]:
return [self.sequential_kernel_search(self.default_window_size)]
def sequential_kernel_search(self, default_window_size: int) -> gp.AbstractGaussianProcess:
start_index: int = 0
n_train = self.data_input.n_train
x_train = self.data_input.data_x_train
dim = self.data_input.get_input_dimensionality()
stop_index: int = min(n_train - 1, default_window_size)
saved_cp: int = -1
change_points: List[tf.Tensor] = []
is_debug: bool = global_param.p_logging_level == logging.DEBUG
multi_threading: bool = global_param.p_max_threads > 1 and self.activate_global_multi_threading
if multi_threading:
manager: SyncManager = SyncManager()
manager.start()
segments_list: List[seg.LegacySegment] = manager.list()
else:
segments_list: List[seg.LegacySegment] = []
logging.info("Sequential Kernel Search started.")
if (default_window_size / 2) < n_train - 1:
success: bool
mid_index: int
while True:
success, mid_index = self.usual_window_case(start_index, stop_index, segments_list)
if not is_debug and len(change_points) % 101 == 100:
logging.info("100 Changepoints processed.")
if mid_index == stop_index or not success:
if stop_index == n_train - 1:
break
stop_index = int(np.min([stop_index + default_window_size * 0.5, n_train - 1]))
continue
else:
change_points.append(
cast(tf.Tensor, tf.Variable(x_train[mid_index], shape=[dim, ], dtype=global_param.p_dtype)))
saved_cp = mid_index
start_index = mid_index
stop_index = int(np.min([start_index + default_window_size, n_train - 1]))
if (start_index + default_window_size) > n_train + 0.25 * default_window_size:
break
else:
logging.info("No Segmentation done as default window size is larger than whole dataset.")
if saved_cp < n_train - 1:
start_index = max(saved_cp, 0)
stop_index = n_train
if multi_threading:
self.start_find_kernel_for_segment_parallelizable(start_index, stop_index, segments_list)
else:
segments_list.append(self.find_kernel_for_segment(start_index, stop_index))
if self.activate_global_multi_threading and global_param.p_max_threads > 1:
global_param.shutdown_pool()
omni_gp: gp.AbstractGaussianProcess
if len(segments_list) > 1:
# Important step as in multi_threading one segment calculation may overtake another one
segments_list: List[seg.LegacySegment] = sorted(segments_list, key=lambda segment: segment.start_index)
child_nodes: List[k.Kernel] = []
for s in segments_list:
child_nodes.append(s.latest_gp.covariance_matrix.kernel)
cp_operator_tree: op.ChangePointOperator = op.ChangePointOperator(
dim, child_nodes, change_points)
omni_gp: gp.BlockwiseGaussianProcess = gp.BlockwiseGaussianProcess(cp_operator_tree, self.mean_function)
logging.debug("SKS built up full kernel expression from %i constituents." % len(omni_gp.constituent_gps))
else:
logging.warning("Resulting model has only one constituent segment.")
omni_gp = segments_list[0].latest_gp
logging.debug("SKS built up full kernel expression from 1 constituent.")
if multi_threading:
manager.shutdown()
return omni_gp
def usual_window_case(self, start_index: int, stop_index: int, segments_list: List[seg.LegacySegment]) \
-> Tuple[bool, int]:
success: bool
mid_index: int
success, mid_index = self.sequential_cpd.get_next_change_point(start_index, stop_index)
if not success:
return success, mid_index
else:
logging.debug("Found new Change Point: %i" % mid_index)
if global_param.p_max_threads > 1 and self.activate_global_multi_threading:
self.start_find_kernel_for_segment_parallelizable(start_index, mid_index, segments_list)
else:
segments_list.append(
self.find_kernel_for_segment(start_index, mid_index))
return success, mid_index
def find_kernel_for_segment(self, start_index: int, stop_index: int) -> seg.LegacySegment:
block_data_input: Union[di.AbstractDataInput, List[di.AbstractDataInput]]
if self.k_fold > 1:
block_data_input = di.DataInput.get_k_fold_data_inputs(
self.data_input.data_x_train[start_index:stop_index],
self.data_input.data_y_train[start_index:stop_index], k=self.k_fold)
[block_data_input_instance.set_mean_function(self.data_input.mean_function)
for block_data_input_instance in block_data_input]
else:
sliced_data_x_train = self.data_input.data_x_train[start_index:stop_index]
sliced_data_y_train = self.data_input.data_y_train[start_index:stop_index]
test_idx = tf.reshape(tf.where(tf.math.logical_and(tf.reshape(self.data_input.data_x_test, [-1, ]) < tf.reduce_max(sliced_data_x_train), tf.reshape(self.data_input.data_x_test, [-1, ]) >= tf.reduce_min(sliced_data_x_train))), [-1, ])
sliced_data_x_test = tf.gather(self.data_input.data_x_test, test_idx)
sliced_data_y_test = tf.gather(self.data_input.data_y_test, test_idx)
block_data_input = di.DataInput(
sliced_data_x_train, sliced_data_y_train, sliced_data_x_test, sliced_data_y_test)
block_data_input.set_mean_function(self.data_input.mean_function)
gp_left = self.parallel_kernel_search(
data_input=block_data_input, optimize_metric=self.optimize_metric,
model_selection_metric=self.model_selection_metric)[0]
assert gp_left is not None and gp_left.covariance_matrix is not None, "no new kernel found!"
new_segment: seg.LegacySegment = seg.LegacySegment(start_index, stop_index)
new_segment.set_latest_gp(gp_left)
new_segment.set_final(True)
return new_segment
def start_find_kernel_for_segment_parallelizable(
self, start_index: int, stop_index: int, segments_list: List[seg.LegacySegment]):
global_param.pool.apply_async(self.find_kernel_for_segment_parallelizable,
args=(start_index, stop_index, segments_list))
def find_kernel_for_segment_parallelizable(
self, start_index: int, stop_index: int, segments_list: List[seg.LegacySegment]):
new_segment: seg.LegacySegment = self.find_kernel_for_segment(start_index, stop_index)
segments_list.append(new_segment)
| [
"logging.debug",
"gpminference.ChangePointDetection.BaseKernelSCPD.WhiteNoiseCPD",
"gpbasics.Statistics.GaussianProcess.BlockwiseGaussianProcess",
"logging.info",
"tensorflow.reduce_min",
"gpbasics.KernelBasics.Operators.ChangePointOperator",
"gpbasics.global_parameters.ensure_init",
"gpbasics.global_... | [((946, 972), 'gpbasics.global_parameters.ensure_init', 'global_param.ensure_init', ([], {}), '()\n', (970, 972), True, 'import gpbasics.global_parameters as global_param\n'), ((4492, 4906), 'gpminference.KernelSearch.ParallelApproach.ParallelKernelSearch', 'pks.ParallelKernelSearch', (['self.strategy_type', 'strategy_options', 'data_input', 'self.mean_function', 'optimize_metric', 'model_selection_metric'], {'local_approx': 'self.local_approx', 'numerical_matrix_handling': 'self.numerical_matrix_handling', 'subset_size': 'self.subset_size', 'p_kernel_expression_replacement': 'self.p_kernel_expression_replacement', 'covariance_function_repository': 'self.covariance_function_repository'}), '(self.strategy_type, strategy_options, data_input,\n self.mean_function, optimize_metric, model_selection_metric,\n local_approx=self.local_approx, numerical_matrix_handling=self.\n numerical_matrix_handling, subset_size=self.subset_size,\n p_kernel_expression_replacement=self.p_kernel_expression_replacement,\n covariance_function_repository=self.covariance_function_repository)\n', (4516, 4906), True, 'import gpminference.KernelSearch.ParallelApproach as pks\n'), ((6060, 6109), 'logging.info', 'logging.info', (['"""Sequential Kernel Search started."""'], {}), "('Sequential Kernel Search started.')\n", (6072, 6109), False, 'import logging\n'), ((11492, 11534), 'gpminference.KernelSearch.Segment.LegacySegment', 'seg.LegacySegment', (['start_index', 'stop_index'], {}), '(start_index, stop_index)\n', (11509, 11534), True, 'import gpminference.KernelSearch.Segment as seg\n'), ((11799, 11924), 'gpbasics.global_parameters.pool.apply_async', 'global_param.pool.apply_async', (['self.find_kernel_for_segment_parallelizable'], {'args': '(start_index, stop_index, segments_list)'}), '(self.find_kernel_for_segment_parallelizable,\n args=(start_index, stop_index, segments_list))\n', (11828, 11924), True, 'import gpbasics.global_parameters as global_param\n'), ((1825, 2052), 'logging.warning', 'logging.warning', (['"""Divisive Gaussian process model inference algorithms currently do not divide a dataset in k-fold cross validated manner, but infer the respective local models in that k-fold cross validated fashion."""'], {}), "(\n 'Divisive Gaussian process model inference algorithms currently do not divide a dataset in k-fold cross validated manner, but infer the respective local models in that k-fold cross validated fashion.'\n )\n", (1840, 2052), False, 'import logging\n'), ((2163, 2237), 'tensorflow.concat', 'tf.concat', (['[data_input[0].data_x_train, data_input[0].data_x_test]'], {'axis': '(0)'}), '([data_input[0].data_x_train, data_input[0].data_x_test], axis=0)\n', (2172, 2237), True, 'import tensorflow as tf\n'), ((2254, 2328), 'tensorflow.concat', 'tf.concat', (['[data_input[0].data_y_train, data_input[0].data_y_test]'], {'axis': '(0)'}), '([data_input[0].data_y_train, data_input[0].data_y_test], axis=0)\n', (2263, 2328), True, 'import tensorflow as tf\n'), ((2597, 2621), 'gpbasics.DataHandling.DataInput.DataInput', 'di.DataInput', (['x', 'y', 'x', 'y'], {}), '(x, y, x, y)\n', (2609, 2621), True, 'import gpbasics.DataHandling.DataInput as di\n'), ((3531, 3556), 'gpminference.ChangePointDetection.BaseKernelSCPD.WhiteNoiseCPD', 'bscpd.WhiteNoiseCPD', (['self'], {}), '(self)\n', (3550, 3556), True, 'import gpminference.ChangePointDetection.BaseKernelSCPD as bscpd\n'), ((3799, 3824), 'gpminference.ChangePointDetection.BaseKernelSCPD.WhiteNoiseCPD', 'bscpd.WhiteNoiseCPD', (['self'], {}), '(self)\n', (3818, 3824), True, 'import gpminference.ChangePointDetection.BaseKernelSCPD as bscpd\n'), ((5871, 5884), 'multiprocessing.managers.SyncManager', 'SyncManager', ([], {}), '()\n', (5882, 5884), False, 'from multiprocessing.managers import SyncManager\n'), ((7252, 7351), 'logging.info', 'logging.info', (['"""No Segmentation done as default window size is larger than whole dataset."""'], {}), "(\n 'No Segmentation done as default window size is larger than whole dataset.'\n )\n", (7264, 7351), False, 'import logging\n'), ((7800, 7828), 'gpbasics.global_parameters.shutdown_pool', 'global_param.shutdown_pool', ([], {}), '()\n', (7826, 7828), True, 'import gpbasics.global_parameters as global_param\n'), ((8336, 8391), 'gpbasics.KernelBasics.Operators.ChangePointOperator', 'op.ChangePointOperator', (['dim', 'child_nodes', 'change_points'], {}), '(dim, child_nodes, change_points)\n', (8358, 8391), True, 'from gpbasics.KernelBasics import Operators as op\n'), ((8461, 8526), 'gpbasics.Statistics.GaussianProcess.BlockwiseGaussianProcess', 'gp.BlockwiseGaussianProcess', (['cp_operator_tree', 'self.mean_function'], {}), '(cp_operator_tree, self.mean_function)\n', (8488, 8526), True, 'from gpbasics.Statistics import GaussianProcess as gp\n'), ((8672, 8740), 'logging.warning', 'logging.warning', (['"""Resulting model has only one constituent segment."""'], {}), "('Resulting model has only one constituent segment.')\n", (8687, 8740), False, 'import logging\n'), ((8803, 8875), 'logging.debug', 'logging.debug', (['"""SKS built up full kernel expression from 1 constituent."""'], {}), "('SKS built up full kernel expression from 1 constituent.')\n", (8816, 8875), False, 'import logging\n'), ((9333, 9388), 'logging.debug', 'logging.debug', (["('Found new Change Point: %i' % mid_index)"], {}), "('Found new Change Point: %i' % mid_index)\n", (9346, 9388), False, 'import logging\n'), ((9989, 10157), 'gpbasics.DataHandling.DataInput.DataInput.get_k_fold_data_inputs', 'di.DataInput.get_k_fold_data_inputs', (['self.data_input.data_x_train[start_index:stop_index]', 'self.data_input.data_y_train[start_index:stop_index]'], {'k': 'self.k_fold'}), '(self.data_input.data_x_train[\n start_index:stop_index], self.data_input.data_y_train[start_index:\n stop_index], k=self.k_fold)\n', (10024, 10157), True, 'import gpbasics.DataHandling.DataInput as di\n'), ((10802, 10850), 'tensorflow.gather', 'tf.gather', (['self.data_input.data_x_test', 'test_idx'], {}), '(self.data_input.data_x_test, test_idx)\n', (10811, 10850), True, 'import tensorflow as tf\n'), ((10884, 10932), 'tensorflow.gather', 'tf.gather', (['self.data_input.data_y_test', 'test_idx'], {}), '(self.data_input.data_y_test, test_idx)\n', (10893, 10932), True, 'import tensorflow as tf\n'), ((10964, 11062), 'gpbasics.DataHandling.DataInput.DataInput', 'di.DataInput', (['sliced_data_x_train', 'sliced_data_y_train', 'sliced_data_x_test', 'sliced_data_y_test'], {}), '(sliced_data_x_train, sliced_data_y_train, sliced_data_x_test,\n sliced_data_y_test)\n', (10976, 11062), True, 'import gpbasics.DataHandling.DataInput as di\n'), ((2445, 2469), 'tensorflow.gather', 'tf.gather', (['x', 'sorted_idx'], {}), '(x, sorted_idx)\n', (2454, 2469), True, 'import tensorflow as tf\n'), ((2490, 2514), 'tensorflow.gather', 'tf.gather', (['y', 'sorted_idx'], {}), '(y, sorted_idx)\n', (2499, 2514), True, 'import tensorflow as tf\n'), ((2402, 2421), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1]'], {}), '(x, [-1])\n', (2412, 2421), True, 'import tensorflow as tf\n'), ((6430, 6473), 'logging.info', 'logging.info', (['"""100 Changepoints processed."""'], {}), "('100 Changepoints processed.')\n", (6442, 6473), False, 'import logging\n'), ((6651, 6712), 'numpy.min', 'np.min', (['[stop_index + default_window_size * 0.5, n_train - 1]'], {}), '([stop_index + default_window_size * 0.5, n_train - 1])\n', (6657, 6712), True, 'import numpy as np\n'), ((7046, 7102), 'numpy.min', 'np.min', (['[start_index + default_window_size, n_train - 1]'], {}), '([start_index + default_window_size, n_train - 1])\n', (7052, 7102), True, 'import numpy as np\n'), ((6847, 6919), 'tensorflow.Variable', 'tf.Variable', (['x_train[mid_index]'], {'shape': '[dim]', 'dtype': 'global_param.p_dtype'}), '(x_train[mid_index], shape=[dim], dtype=global_param.p_dtype)\n', (6858, 6919), True, 'import tensorflow as tf\n'), ((10586, 10631), 'tensorflow.reshape', 'tf.reshape', (['self.data_input.data_x_test', '[-1]'], {}), '(self.data_input.data_x_test, [-1])\n', (10596, 10631), True, 'import tensorflow as tf\n'), ((10636, 10670), 'tensorflow.reduce_max', 'tf.reduce_max', (['sliced_data_x_train'], {}), '(sliced_data_x_train)\n', (10649, 10670), True, 'import tensorflow as tf\n'), ((10672, 10717), 'tensorflow.reshape', 'tf.reshape', (['self.data_input.data_x_test', '[-1]'], {}), '(self.data_input.data_x_test, [-1])\n', (10682, 10717), True, 'import tensorflow as tf\n'), ((10723, 10757), 'tensorflow.reduce_min', 'tf.reduce_min', (['sliced_data_x_train'], {}), '(sliced_data_x_train)\n', (10736, 10757), True, 'import tensorflow as tf\n')] |
"""
Classification dataset routines.
"""
__all__ = ['img_normalization']
import numpy as np
def img_normalization(img,
mean_rgb,
std_rgb):
"""
Normalization as in the ImageNet-1K validation procedure.
Parameters
----------
img : np.array
input image.
mean_rgb : tuple of 3 float
Mean of RGB channels in the dataset.
std_rgb : tuple of 3 float
STD of RGB channels in the dataset.
Returns
-------
np.array
Output image.
"""
# print(img.max())
mean_rgb = np.array(mean_rgb, np.float32) * 255.0
std_rgb = np.array(std_rgb, np.float32) * 255.0
img = (img - mean_rgb) / std_rgb
return img
| [
"numpy.array"
] | [((590, 620), 'numpy.array', 'np.array', (['mean_rgb', 'np.float32'], {}), '(mean_rgb, np.float32)\n', (598, 620), True, 'import numpy as np\n'), ((643, 672), 'numpy.array', 'np.array', (['std_rgb', 'np.float32'], {}), '(std_rgb, np.float32)\n', (651, 672), True, 'import numpy as np\n')] |
"""
Generic Data Source Class
DataSource is the root class for all other podpac defined data sources,
including user defined data sources.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
from collections import OrderedDict
from copy import deepcopy
import warnings
import logging
import numpy as np
import xarray as xr
import traitlets as tl
# Internal imports
from podpac.core.settings import settings
from podpac.core.units import UnitsDataArray
from podpac.core.coordinates import Coordinates, Coordinates1d, StackedCoordinates
from podpac.core.coordinates.utils import VALID_DIMENSION_NAMES, make_coord_delta, make_coord_delta_array
from podpac.core.node import Node
from podpac.core.utils import common_doc, cached_property
from podpac.core.node import COMMON_NODE_DOC
log = logging.getLogger(__name__)
DATA_DOC = {
"coordinates": "The coordinates of the data source.",
"get_data": """
This method must be defined by the data source implementing the DataSource class.
When data source nodes are evaluated, this method is called with request coordinates and coordinate indexes.
The implementing method can choose which input provides the most efficient method of getting data
(i.e via coordinates or via the index of the coordinates).
Coordinates and coordinate indexes may be strided or subsets of the
source data, but all coordinates and coordinate indexes will match 1:1 with the subset data.
This method may return a numpy array, an xarray DaraArray, or a podpac UnitsDataArray.
If a numpy array or xarray DataArray is returned, :meth:`podpac.data.DataSource.evaluate` will
cast the data into a `UnitsDataArray` using the requested source coordinates.
If a podpac UnitsDataArray is passed back, the :meth:`podpac.data.DataSource.evaluate`
method will not do any further processing.
The inherited Node method `create_output_array` can be used to generate the template UnitsDataArray
in your DataSource.
See :meth:`podpac.Node.create_output_array` for more details.
Parameters
----------
coordinates : :class:`podpac.Coordinates`
The coordinates that need to be retrieved from the data source using the coordinate system of the data
source
coordinates_index : List
A list of slices or a boolean array that give the indices of the data that needs to be retrieved from
the data source. The values in the coordinate_index will vary depending on the `coordinate_index_type`
defined for the data source.
Returns
--------
np.ndarray, xr.DataArray, :class:`podpac.UnitsDataArray`
A subset of the returned data. If a numpy array or xarray DataArray is returned,
the data will be cast into UnitsDataArray using the returned data to fill values
at the requested source coordinates.
""",
"get_coordinates": """
Returns a Coordinates object that describes the coordinates of the data source.
In most cases, this method is defined by the data source implementing the DataSource class.
If method is not implemented by the data source, it will try to return ``self.coordinates``
if ``self.coordinates`` is not None.
Otherwise, this method will raise a NotImplementedError.
Returns
--------
:class:`podpac.Coordinates`
The coordinates describing the data source array.
Notes
------
Need to pay attention to:
- the order of the dimensions
- the stacking of the dimension
- the type of coordinates
Coordinates should be non-nan and non-repeating for best compatibility
""",
"interpolation": """
Interpolation definition for the data source.
By default, the interpolation method is set to `podpac.settings["DEFAULT_INTERPOLATION"]` which defaults to 'nearest'` for all dimensions.
""",
"interpolation_long": """
{interpolation}
If input is a string, it must match one of the interpolation shortcuts defined in
:attr:`podpac.data.INTERPOLATION_SHORTCUTS`. The interpolation method associated
with this string will be applied to all dimensions at the same time.
If input is a dict or list of dict, the dict or dict elements must adhere to the following format:
The key ``'method'`` defining the interpolation method name.
If the interpolation method is not one of :attr:`podpac.data.INTERPOLATION_SHORTCUTS`, a
second key ``'interpolators'`` must be defined with a list of
:class:`podpac.interpolators.Interpolator` classes to use in order of uages.
The dictionary may contain an option ``'params'`` key which contains a dict of parameters to pass along to
the :class:`podpac.interpolators.Interpolator` classes associated with the interpolation method.
The dict may contain the key ``'dims'`` which specifies dimension names (i.e. ``'time'`` or ``('lat', 'lon')`` ).
If the dictionary does not contain a key for all unstacked dimensions of the source coordinates, the
:attr:`podpac.data.INTERPOLATION_DEFAULT` value will be used.
All dimension keys must be unstacked even if the underlying coordinate dimensions are stacked.
Any extra dimensions included but not found in the source coordinates will be ignored.
The dict may contain a key ``'params'`` that can be used to configure the :class:`podpac.interpolators.Interpolator` classes associated with the interpolation method.
If input is a :class:`podpac.data.Interpolation` class, this Interpolation
class will be used without modification.
""",
}
COMMON_DATA_DOC = COMMON_NODE_DOC.copy()
COMMON_DATA_DOC.update(DATA_DOC) # inherit and overwrite with DATA_DOC
@common_doc(COMMON_DATA_DOC)
class DataSource(Node):
"""Base node for any data obtained directly from a single source.
Parameters
----------
source : Any
The location of the source. Depending on the child node this can be a filepath,
numpy array, or dictionary as a few examples.
coordinates : :class:`podpac.Coordinates`
{coordinates}
nan_vals : List, optional
List of values from source data that should be interpreted as 'no data' or 'nans'
coordinate_index_type : str, optional
Type of index to use for data source. Possible values are ``['slice', 'numpy', 'xarray']``
Default is 'numpy', which allows a tuple of integer indices.
cache_coordinates : bool
Whether to cache coordinates using the podpac ``cache_ctrl``. Default False.
cache_output : bool
Should the node's output be cached? If not provided or None, uses default based on
settings["CACHE_DATASOURCE_OUTPUT_DEFAULT"]. If True, outputs will be cached and retrieved from cache. If False,
outputs will not be cached OR retrieved from cache (even if they exist in cache).
Notes
-----
Custom DataSource Nodes must implement the :meth:`get_data` and :meth:`get_coordinates` methods.
"""
nan_vals = tl.List().tag(attr=True)
nan_val = tl.Any(np.nan).tag(attr=True)
boundary = tl.Dict().tag(attr=True)
coordinate_index_type = tl.Enum(["slice", "numpy", "xarray"], default_value="numpy")
cache_coordinates = tl.Bool(False)
cache_output = tl.Bool()
# privates
_coordinates = tl.Instance(Coordinates, allow_none=True, default_value=None, read_only=True)
# debug attributes
_requested_coordinates = tl.Instance(Coordinates, allow_none=True)
_requested_source_coordinates = tl.Instance(Coordinates, allow_none=True)
_requested_source_coordinates_index = tl.Instance(tuple, allow_none=True)
_requested_source_boundary = tl.Instance(dict, allow_none=True)
_requested_source_data = tl.Instance(UnitsDataArray, allow_none=True)
_evaluated_coordinates = tl.Instance(Coordinates, allow_none=True)
@tl.validate("boundary")
def _validate_boundary(self, d):
val = d["value"]
for dim, boundary in val.items():
if dim not in VALID_DIMENSION_NAMES:
raise ValueError("Invalid dimension '%s' in boundary" % dim)
if np.array(boundary).ndim == 0:
try:
delta = make_coord_delta(boundary)
except ValueError:
raise ValueError(
"Invalid boundary for dimension '%s' ('%s' is not a valid coordinate delta)" % (dim, boundary)
)
if np.array(delta).astype(float) < 0:
raise ValueError("Invalid boundary for dimension '%s' (%s < 0)" % (dim, delta))
if np.array(boundary).ndim == 1:
make_coord_delta_array(boundary)
raise NotImplementedError("Non-centered boundary not yet supported for dimension '%s'" % dim)
if np.array(boundary).ndim == 2:
for elem in boundary:
make_coord_delta_array(elem)
raise NotImplementedError("Non-uniform boundary not yet supported for dimension '%s'" % dim)
return val
@tl.default("cache_output")
def _cache_output_default(self):
return settings["CACHE_DATASOURCE_OUTPUT_DEFAULT"]
# ------------------------------------------------------------------------------------------------------------------
# Properties
# ------------------------------------------------------------------------------------------------------------------
@property
def coordinates(self):
"""{coordinates}"""
if self._coordinates is not None:
nc = self._coordinates
elif self.cache_coordinates and self.has_cache("coordinates"):
nc = self.get_cache("coordinates")
self.set_trait("_coordinates", nc)
else:
nc = self.get_coordinates()
self.set_trait("_coordinates", nc)
if self.cache_coordinates:
self.put_cache(nc, "coordinates")
return nc
@property
def dims(self):
""" datasource dims. """
return self.coordinates.dims
@property
def udims(self):
""" datasource udims. """
return self.coordinates.udims
@property
def _crs(self):
""" datasource crs. """
return self.coordinates.crs
# ------------------------------------------------------------------------------------------------------------------
# Private Methods
# ------------------------------------------------------------------------------------------------------------------
def _get_data(self, rc, rci):
"""Wrapper for `self.get_data` with pre and post processing
Returns
-------
podpac.core.units.UnitsDataArray
Returns UnitsDataArray with coordinates defined by _requested_source_coordinates
Raises
------
TypeError
Raised if unknown data is passed by from self.get_data
NotImplementedError
Raised if get_data is not implemented by data source subclass
"""
# get data from data source at requested source coordinates and requested source coordinates index
data = self.get_data(rc, rci)
# convert data into UnitsDataArray depending on format
# TODO: what other processing needs to happen here?
if isinstance(data, UnitsDataArray):
udata_array = data
elif isinstance(data, xr.DataArray):
# TODO: check order of coordinates here
udata_array = self.create_output_array(rc, data=data.data)
elif isinstance(data, np.ndarray):
udata_array = self.create_output_array(rc, data=data)
else:
raise TypeError(
"Unknown data type passed back from "
+ "{}.get_data(): {}. ".format(type(self).__name__, type(data))
+ "Must be one of numpy.ndarray, xarray.DataArray, or podpac.UnitsDataArray"
)
# extract single output, if necessary
# subclasses should extract single outputs themselves if possible, but this provides a backup
if "output" in udata_array.dims and self.output is not None:
udata_array = udata_array.sel(output=self.output)
# fill nan_vals in data array
udata_array.data[np.isin(udata_array.data, self.nan_vals)] = self.nan_val
return udata_array
# ------------------------------------------------------------------------------------------------------------------
# Methods
# ------------------------------------------------------------------------------------------------------------------
def get_source_data(self, bounds={}):
"""
Get source data, without interpolation.
Arguments
---------
bounds : dict
Dictionary of bounds by dimension, optional.
Keys must be dimension names, and values are (min, max) tuples, e.g. ``{'lat': (10, 20)}``.
Returns
-------
data : UnitsDataArray
Source data
"""
coords, I = self.coordinates.select(bounds, return_index=True)
return self._get_data(coords, I)
def eval(self, coordinates, **kwargs):
"""
Wraps the super Node.eval method in order to cache with the correct coordinates.
The output is independent of the crs or any extra dimensions, so this transforms and removes extra dimensions
before caching in the super eval method.
"""
# check for missing dimensions
for c in self.coordinates.values():
if isinstance(c, Coordinates1d):
if c.name not in coordinates.udims:
raise ValueError("Cannot evaluate these coordinates, missing dim '%s'" % c.name)
elif isinstance(c, StackedCoordinates):
if all(s.name not in coordinates.udims for s in c):
raise ValueError("Cannot evaluate these coordinates, missing at least one dim in '%s'" % c.name)
# store original requested coordinates
requested_coordinates = coordinates
# remove extra dimensions
extra = [
c.name
for c in coordinates.values()
if (isinstance(c, Coordinates1d) and c.name not in self.udims)
or (isinstance(c, StackedCoordinates) and all(dim not in self.udims for dim in c.dims))
]
coordinates = coordinates.drop(extra)
# transform coordinates into native crs if different
if coordinates.crs.lower() != self._crs.lower():
coordinates = coordinates.transform(self._crs)
# note: super().eval (not self._eval)
output = super().eval(coordinates, **kwargs)
# transform back to requested coordinates, if necessary
if coordinates.crs.lower() != requested_coordinates.crs.lower():
coords = Coordinates.from_xarray(output, crs=output.attrs.get("crs", None))
output = self.create_output_array(coords.transform(requested_coordinates.crs), data=output.data)
if settings["DEBUG"]:
self._requested_coordinates = requested_coordinates
return output
@common_doc(COMMON_DATA_DOC)
def _eval(self, coordinates, output=None, _selector=None):
"""Evaluates this node using the supplied coordinates.
The coordinates are mapped to the requested coordinates, interpolated if necessary, and set to
`_requested_source_coordinates` with associated index `_requested_source_coordinates_index`. The requested
source coordinates and index are passed to `get_data()` returning the source data at the
coordinatesset to `_requested_source_data`. Finally `_requested_source_data` is interpolated
using the `interpolate` method and set to the `output` attribute of the node.
Parameters
----------
coordinates : :class:`podpac.Coordinates`
{requested_coordinates}
An exception is raised if the requested coordinates are missing dimensions in the DataSource.
Extra dimensions in the requested coordinates are dropped.
output : :class:`podpac.UnitsDataArray`, optional
{eval_output}
_selector: callable(coordinates, request_coordinates)
{eval_selector}
Returns
-------
{eval_return}
Raises
------
ValueError
Cannot evaluate these coordinates
"""
log.debug("Evaluating {} data source".format(self.__class__.__name__))
# Use the selector
if _selector is not None:
(rsc, rsci) = _selector(self.coordinates, coordinates, index_type=self.coordinate_index_type)
else:
# get source coordinates that are within the requested coordinates bounds
(rsc, rsci) = self.coordinates.intersect(coordinates, outer=True, return_index=True)
# if requested coordinates and coordinates do not intersect, shortcut with nan UnitsDataArary
if rsc.size == 0:
if output is None:
output = self.create_output_array(rsc)
if "output" in output.dims and self.output is not None:
output = output.sel(output=self.output)
else:
output[:] = np.nan
if settings["DEBUG"]:
self._evaluated_coordinates = coordinates
self._requested_source_coordinates = rsc
self._requested_source_coordinates_index = rsci
self._requested_source_boundary = None
self._requested_source_data = None
self._output = output
return output
# get data from data source
rsd = self._get_data(rsc, rsci)
if output is None:
# if requested_coordinates.crs.lower() != coordinates.crs.lower():
# if rsc.shape == rsd.shape:
# rsd = self.create_output_array(rsc, data=rsd.data)
# else:
# crds = Coordinates.from_xarray(rsd, crs=data.attrs.get("crs", None))
# rsd = self.create_output_array(crds.transform(rsc.crs), data=rsd.data)
output = rsd
else:
output.data[:] = rsd.data
# get indexed boundary
rsb = self._get_boundary(rsci)
output.attrs["boundary_data"] = rsb
output.attrs["bounds"] = self.coordinates.bounds
# save output to private for debugging
if settings["DEBUG"]:
self._evaluated_coordinates = coordinates
self._requested_source_coordinates = rsc
self._requested_source_coordinates_index = rsci
self._requested_source_boundary = rsb
self._requested_source_data = rsd
self._output = output
return output
def find_coordinates(self):
"""
Get the available coordinates for the Node. For a DataSource, this is just the coordinates.
Returns
-------
coords_list : list
singleton list containing the coordinates (Coordinates object)
"""
return [self.coordinates]
def get_bounds(self, crs="default"):
"""Get the full available coordinate bounds for the Node.
Arguments
---------
crs : str
Desired CRS for the bounds. Use 'source' to use the native source crs.
If not specified, podpac.settings["DEFAULT_CRS"] is used. Optional.
Returns
-------
bounds : dict
Bounds for each dimension. Keys are dimension names and values are tuples (min, max).
crs : str
The crs for the bounds.
"""
if crs == "default":
crs = settings["DEFAULT_CRS"]
elif crs == "source":
crs = self.coordinates.crs
return self.coordinates.transform(crs).bounds, crs
@common_doc(COMMON_DATA_DOC)
def get_data(self, coordinates, coordinates_index):
"""{get_data}
Raises
------
NotImplementedError
This needs to be implemented by derived classes
"""
raise NotImplementedError
@common_doc(COMMON_DATA_DOC)
def get_coordinates(self):
"""{get_coordinates}
Raises
------
NotImplementedError
This needs to be implemented by derived classes
"""
raise NotImplementedError
def set_coordinates(self, coordinates, force=False):
"""Set the coordinates. Used by Compositors as an optimization.
Arguments
---------
coordinates : :class:`podpac.Coordinates`
Coordinates to set. Usually these are coordinates that are shared across compositor sources.
NOTE: This is only currently used by SMAPCompositor. It should potentially be moved to the SMAPSource.
"""
if force or not self.trait_is_defined("_coordinates"):
self.set_trait("_coordinates", coordinates)
def _get_boundary(self, index):
"""
Select the boundary for the given the coordinates index. Only non-uniform boundary arrays need to be indexed.
Arguments
---------
index : tuple
Coordinates index (e.g. coordinates_index)
Returns
-------
boundary : dict
Indexed boundary. Uniform boundaries are unchanged and non-uniform boundary arrays are indexed.
"""
if index is None:
return self.boundary
boundary = {}
for c, I in zip(self.coordinates.values(), index):
for dim in c.dims:
if dim not in self.boundary:
pass
elif np.array(self.boundary[dim]).ndim == 2:
boundary[dim] = np.array(self.boundary[dim][I])
else:
boundary[dim] = self.boundary[dim]
return boundary
| [
"logging.getLogger",
"traitlets.default",
"podpac.core.utils.common_doc",
"traitlets.Dict",
"traitlets.Instance",
"traitlets.List",
"numpy.isin",
"traitlets.Enum",
"numpy.array",
"traitlets.validate",
"podpac.core.coordinates.utils.make_coord_delta_array",
"podpac.core.node.COMMON_NODE_DOC.cop... | [((824, 851), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (841, 851), False, 'import logging\n'), ((5883, 5905), 'podpac.core.node.COMMON_NODE_DOC.copy', 'COMMON_NODE_DOC.copy', ([], {}), '()\n', (5903, 5905), False, 'from podpac.core.node import COMMON_NODE_DOC\n'), ((5981, 6008), 'podpac.core.utils.common_doc', 'common_doc', (['COMMON_DATA_DOC'], {}), '(COMMON_DATA_DOC)\n', (5991, 6008), False, 'from podpac.core.utils import common_doc, cached_property\n'), ((7415, 7475), 'traitlets.Enum', 'tl.Enum', (["['slice', 'numpy', 'xarray']"], {'default_value': '"""numpy"""'}), "(['slice', 'numpy', 'xarray'], default_value='numpy')\n", (7422, 7475), True, 'import traitlets as tl\n'), ((7500, 7514), 'traitlets.Bool', 'tl.Bool', (['(False)'], {}), '(False)\n', (7507, 7514), True, 'import traitlets as tl\n'), ((7534, 7543), 'traitlets.Bool', 'tl.Bool', ([], {}), '()\n', (7541, 7543), True, 'import traitlets as tl\n'), ((7579, 7656), 'traitlets.Instance', 'tl.Instance', (['Coordinates'], {'allow_none': '(True)', 'default_value': 'None', 'read_only': '(True)'}), '(Coordinates, allow_none=True, default_value=None, read_only=True)\n', (7590, 7656), True, 'import traitlets as tl\n'), ((7710, 7751), 'traitlets.Instance', 'tl.Instance', (['Coordinates'], {'allow_none': '(True)'}), '(Coordinates, allow_none=True)\n', (7721, 7751), True, 'import traitlets as tl\n'), ((7788, 7829), 'traitlets.Instance', 'tl.Instance', (['Coordinates'], {'allow_none': '(True)'}), '(Coordinates, allow_none=True)\n', (7799, 7829), True, 'import traitlets as tl\n'), ((7872, 7907), 'traitlets.Instance', 'tl.Instance', (['tuple'], {'allow_none': '(True)'}), '(tuple, allow_none=True)\n', (7883, 7907), True, 'import traitlets as tl\n'), ((7941, 7975), 'traitlets.Instance', 'tl.Instance', (['dict'], {'allow_none': '(True)'}), '(dict, allow_none=True)\n', (7952, 7975), True, 'import traitlets as tl\n'), ((8005, 8049), 'traitlets.Instance', 'tl.Instance', (['UnitsDataArray'], {'allow_none': '(True)'}), '(UnitsDataArray, allow_none=True)\n', (8016, 8049), True, 'import traitlets as tl\n'), ((8079, 8120), 'traitlets.Instance', 'tl.Instance', (['Coordinates'], {'allow_none': '(True)'}), '(Coordinates, allow_none=True)\n', (8090, 8120), True, 'import traitlets as tl\n'), ((8127, 8150), 'traitlets.validate', 'tl.validate', (['"""boundary"""'], {}), "('boundary')\n", (8138, 8150), True, 'import traitlets as tl\n'), ((9344, 9370), 'traitlets.default', 'tl.default', (['"""cache_output"""'], {}), "('cache_output')\n", (9354, 9370), True, 'import traitlets as tl\n'), ((15475, 15502), 'podpac.core.utils.common_doc', 'common_doc', (['COMMON_DATA_DOC'], {}), '(COMMON_DATA_DOC)\n', (15485, 15502), False, 'from podpac.core.utils import common_doc, cached_property\n'), ((20239, 20266), 'podpac.core.utils.common_doc', 'common_doc', (['COMMON_DATA_DOC'], {}), '(COMMON_DATA_DOC)\n', (20249, 20266), False, 'from podpac.core.utils import common_doc, cached_property\n'), ((20516, 20543), 'podpac.core.utils.common_doc', 'common_doc', (['COMMON_DATA_DOC'], {}), '(COMMON_DATA_DOC)\n', (20526, 20543), False, 'from podpac.core.utils import common_doc, cached_property\n'), ((7277, 7286), 'traitlets.List', 'tl.List', ([], {}), '()\n', (7284, 7286), True, 'import traitlets as tl\n'), ((7316, 7330), 'traitlets.Any', 'tl.Any', (['np.nan'], {}), '(np.nan)\n', (7322, 7330), True, 'import traitlets as tl\n'), ((7361, 7370), 'traitlets.Dict', 'tl.Dict', ([], {}), '()\n', (7368, 7370), True, 'import traitlets as tl\n'), ((12580, 12620), 'numpy.isin', 'np.isin', (['udata_array.data', 'self.nan_vals'], {}), '(udata_array.data, self.nan_vals)\n', (12587, 12620), True, 'import numpy as np\n'), ((8933, 8965), 'podpac.core.coordinates.utils.make_coord_delta_array', 'make_coord_delta_array', (['boundary'], {}), '(boundary)\n', (8955, 8965), False, 'from podpac.core.coordinates.utils import VALID_DIMENSION_NAMES, make_coord_delta, make_coord_delta_array\n'), ((8396, 8414), 'numpy.array', 'np.array', (['boundary'], {}), '(boundary)\n', (8404, 8414), True, 'import numpy as np\n'), ((8475, 8501), 'podpac.core.coordinates.utils.make_coord_delta', 'make_coord_delta', (['boundary'], {}), '(boundary)\n', (8491, 8501), False, 'from podpac.core.coordinates.utils import VALID_DIMENSION_NAMES, make_coord_delta, make_coord_delta_array\n'), ((8887, 8905), 'numpy.array', 'np.array', (['boundary'], {}), '(boundary)\n', (8895, 8905), True, 'import numpy as np\n'), ((9092, 9110), 'numpy.array', 'np.array', (['boundary'], {}), '(boundary)\n', (9100, 9110), True, 'import numpy as np\n'), ((9180, 9208), 'podpac.core.coordinates.utils.make_coord_delta_array', 'make_coord_delta_array', (['elem'], {}), '(elem)\n', (9202, 9208), False, 'from podpac.core.coordinates.utils import VALID_DIMENSION_NAMES, make_coord_delta, make_coord_delta_array\n'), ((22133, 22164), 'numpy.array', 'np.array', (['self.boundary[dim][I]'], {}), '(self.boundary[dim][I])\n', (22141, 22164), True, 'import numpy as np\n'), ((8736, 8751), 'numpy.array', 'np.array', (['delta'], {}), '(delta)\n', (8744, 8751), True, 'import numpy as np\n'), ((22057, 22085), 'numpy.array', 'np.array', (['self.boundary[dim]'], {}), '(self.boundary[dim])\n', (22065, 22085), True, 'import numpy as np\n')] |
"""High-dimensional output
This module concerns the following use-case: we make a parameter study over some
input parameters x and the domain code yields an output vector y contains many
entries. This is typically the case, when y is function-valued, i.e. depends
on an indenpendent variable t, or even "pixel-valued" output is produced,
like on a 2D map (like 1024x1024). Before fitting an input-output relation,
one has to reduce the output to a manageable number of dimensions.
This means extracting the relevant features.
Imagine the following idea: instead of viewing the output as many 1D outputs,
we look at it as a single vector in a high-dimensional space. Linear
combinations between vectors give new vectors. We can choose y from our
training data to span a basis in this vector space. Reducing this basis to an
appropriate set of orthonormal entries is done via the Karhunen-Loeve expansion.
"""
import numpy as np
from numpy.linalg import eigh
class KarhunenLoeve:
r"""Linear dimension reduction by the Karhunen-Loeve expansion.
This is efficient if the number of training samples ntrain is
smaller than the number of support points N in independent variables.
We want to write the i-th output function $y_i$
as a linear combination of the other $y_j$ with, e.g. $j=1,2,3$
$$
y_i = a_1 y_1 + a_2 y_2 + a_3 y_3
$$
This can be done by projection with inner products:
$$
\begin{align}
y_1 \cdot y_i &= a_1 y_1 \cdot y_1+a_2 y_1 \cdot y_2+a_3 y_1 \cdot y_3 \\
y_2 \cdot y_i &= a_1 y_2 \cdot y_1+a_2 y_2 \cdot y_2+a_3 y_2 \cdot y_3 \\
y_3 \cdot y_i &= a_1 y_3 \cdot y_1+a_2 y_3 \cdot y_2+a_3 y_3 \cdot y_3
\end{align}
$$
We see that we have to solve a linear system with the
collocation matrix $M_{ij} = y_i \cdot y_j$. To find the most relevant
features and reduce dimensionality, we use only the highest eigenvalues.
We center the data around the mean, i.e. subtract `ymean` before.
Parameters:
ytrain: ntrain sample vectors of length N.
tol: Absolute cutoff tolerance of eigenvalues.
"""
def __init__(self, ytrain, tol=1e-2):
self.tol = tol
self.ymean = np.mean(ytrain, 0)
self.dy = ytrain - self.ymean
w, Q = eigh(self.dy @ self.dy.T)
condi = w>tol
self.w = w[condi]
self.Q = Q[:, condi]
def project(self, y):
"""
Parameters:
y: ntest sample vectors of length N.
Returns:
Expansion coefficients of y in eigenbasis.
"""
ntrain = self.dy.shape[0]
ntest = y.shape[0]
b = np.empty((ntrain, ntest))
for i in range(ntrain):
b[i,:] = (y - self.ymean) @ self.dy[i]
return np.diag(1.0/self.w) @ self.Q.T @ b
def lift(self, z):
"""
Parameters:
z: Expansion coefficients of y in eigenbasis.
Returns:
Reconstructed ntest sample vectors of length N.
"""
return self.ymean + (self.dy.T @ (self.Q @ z)).T
def features(self):
"""
Returns:
neig feature vectors of length N.
"""
return self.dy.T @ self.Q
class PCA:
"""Linear dimension reduction by principle component analysis (PCA).
This is efficient if the number of training samples ntrain is
larger than the number of support points in independent variables.
Parameters:
ytrain: ntrain sample vectors of length N.
tol: Absolute cutoff tolerance of eigenvalues.
"""
def __init__(self, ytrain, tol=1e-2):
self.tol = tol
self.ymean = np.mean(ytrain, 0)
self.dy = ytrain - self.ymean
w, Q = eigh(self.dy.T @ self.dy)
condi = w>tol
self.w = w[condi]
self.Q = Q[:, condi]
def project(self, y):
"""
Parameters:
y: ntest sample vectors of length N.
Returns:
Expansion coefficients of y in eigenbasis.
"""
return (y - self.ymean) @ self.Q
def lift(self, z):
"""
Parameters:
z: Expansion coefficients of y in eigenbasis.
Returns:
Reconstructed ntest sample vectors of length N.
"""
return self.ymean + (self.Q @ z.T).T
def features(self):
"""
Returns:
neig feature vectors of length N.
"""
return self.Q
| [
"numpy.linalg.eigh",
"numpy.mean",
"numpy.empty",
"numpy.diag"
] | [((2200, 2218), 'numpy.mean', 'np.mean', (['ytrain', '(0)'], {}), '(ytrain, 0)\n', (2207, 2218), True, 'import numpy as np\n'), ((2272, 2297), 'numpy.linalg.eigh', 'eigh', (['(self.dy @ self.dy.T)'], {}), '(self.dy @ self.dy.T)\n', (2276, 2297), False, 'from numpy.linalg import eigh\n'), ((2641, 2666), 'numpy.empty', 'np.empty', (['(ntrain, ntest)'], {}), '((ntrain, ntest))\n', (2649, 2666), True, 'import numpy as np\n'), ((3647, 3665), 'numpy.mean', 'np.mean', (['ytrain', '(0)'], {}), '(ytrain, 0)\n', (3654, 3665), True, 'import numpy as np\n'), ((3719, 3744), 'numpy.linalg.eigh', 'eigh', (['(self.dy.T @ self.dy)'], {}), '(self.dy.T @ self.dy)\n', (3723, 3744), False, 'from numpy.linalg import eigh\n'), ((2765, 2786), 'numpy.diag', 'np.diag', (['(1.0 / self.w)'], {}), '(1.0 / self.w)\n', (2772, 2786), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import os
import tempfile
from astropy.io import fits
from astropy.tests.helper import pytest
from astropy.units import Quantity
from astropy.coordinates.angles import Angle
from ...background import GammaImages, IterativeKernelBackgroundEstimator
from ...image import make_empty_image
from ...stats import significance
from ...datasets import FermiGalacticCenter
from ...irf import EnergyDependentTablePSF
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
@pytest.mark.skipif('not HAS_SCIPY')
def test_GammaImages():
"""Tests compute correlated maps in GammaImages.
This is the only method in GammaImages that actually calculates anything.
"""
# Set up test counts and background
counts_hdu = make_empty_image(nxpix=10, nypix=10, binsz=1, fill=42)
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
background_data = 42 * np.ones_like(counts, dtype=float)
# Single unit pixel kernel so should actually be no change.
background_kernel = np.ones((1, 1))
images = GammaImages(counts, background_data)
images.compute_correlated_maps(background_kernel)
# Test significance image against Li & Ma significance value
expected = significance(counts, background_data)
actual = images.significance
assert_allclose(actual, expected)
@pytest.mark.skipif('not HAS_SCIPY')
class TestIterativeKernelBackgroundEstimator(object):
"""Tests methods in the IterativeKernelBackgroundEstimator.
"""
def setup_class(self):
"""Prepares appropriate input and defines inputs for test cases.
"""
from scipy.ndimage import convolve
# Load/create example model images
counts_hdu = make_empty_image(nxpix=10, nypix=10, binsz=1, fill=42)
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
# Initial counts required by one of the tests.
self.counts = counts
psf = FermiGalacticCenter.psf()
psf = psf.table_psf_in_energy_band(Quantity([10, 500], 'GeV'))
kernel_array = psf.kernel(pixel_size=Angle(1, 'deg'),
offset_max=Angle(3, 'deg'), normalize=True)
counts_blob = convolve(counts, kernel_array, mode='constant')
self.counts_blob = counts_blob
# Start with flat background estimate
# Background must be provided as an ImageHDU
images = GammaImages(counts=counts, header=counts_hdu.header)
images_blob = GammaImages(counts=counts_blob, header=counts_hdu.header)
source_kernel = np.ones((1, 3))
background_kernel = np.ones((5, 3))
significance_threshold = 4
mask_dilation_radius = 1
# Loads prepared inputs into estimator
self.ibe = IterativeKernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.ibe2 = IterativeKernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.ibe_blob = IterativeKernelBackgroundEstimator(
images_blob,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
def test_run_iteration_point(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.ibe.run_iteration()
# Should be run twice to update the mask
self.ibe.run_iteration()
mask = self.ibe.mask_image_hdu.data
background = self.ibe.background_image_hdu.data
# Check mask matches expectations
expected_mask = np.ones_like(self.counts)
expected_mask[4][3] = 0
expected_mask[4][4] = 0
expected_mask[4][5] = 0
assert_allclose(mask.astype(int), expected_mask)
# Check background, should be 42 uniformly
assert_allclose(background.astype(float), 42 * np.ones((10, 10)))
def test_run_iteration_blob(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.ibe_blob.run_iteration()
# Should be run twice to update the mask
self.ibe_blob.run_iteration()
mask = self.ibe_blob.mask_image_hdu.data
background = self.ibe_blob.background_image_hdu.data
# Check background, should be 42 uniformly within 10%
assert_allclose(background, 42 * np.ones((10, 10)), rtol=0.15)
def test_run(self):
"""Tests run script."""
mask, background = self.ibe2.run()
assert_allclose(mask.sum(), 97)
assert_allclose(background, 42 * np.ones((10, 10)))
def test_save_files(self):
"""Tests that files are saves, and checks values within them."""
# Create temporary file to write output into
dir = tempfile.mkdtemp()
self.ibe.run_iteration(1)
self.ibe.save_files(filebase=dir, index=0)
mask_filename = dir + '00_mask.fits'
significance_filename = dir + '00_significance.fits'
background_filename = dir + '00_background.fits'
mask_data = fits.open(mask_filename)[1].data
significance_data = fits.open(significance_filename)[1].data
background_data = fits.open(background_filename)[1].data
# Checks values in files against known results for one iteration.
assert_allclose(mask_data.sum(), 97)
assert_allclose(significance_data.sum(), 157.316195729298)
assert_allclose(background_data.sum(), 4200)
os.removedirs(dir)
| [
"numpy.ones_like",
"numpy.ones",
"astropy.tests.helper.pytest.mark.skipif",
"numpy.testing.assert_allclose",
"scipy.ndimage.convolve",
"astropy.coordinates.angles.Angle",
"os.removedirs",
"tempfile.mkdtemp",
"astropy.io.fits.open",
"astropy.units.Quantity"
] | [((684, 719), 'astropy.tests.helper.pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_SCIPY"""'], {}), "('not HAS_SCIPY')\n", (702, 719), False, 'from astropy.tests.helper import pytest\n'), ((1531, 1566), 'astropy.tests.helper.pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_SCIPY"""'], {}), "('not HAS_SCIPY')\n", (1549, 1566), False, 'from astropy.tests.helper import pytest\n'), ((1208, 1223), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1215, 1223), True, 'import numpy as np\n'), ((1494, 1527), 'numpy.testing.assert_allclose', 'assert_allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (1509, 1527), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1085, 1118), 'numpy.ones_like', 'np.ones_like', (['counts'], {'dtype': 'float'}), '(counts, dtype=float)\n', (1097, 1118), True, 'import numpy as np\n'), ((2404, 2451), 'scipy.ndimage.convolve', 'convolve', (['counts', 'kernel_array'], {'mode': '"""constant"""'}), "(counts, kernel_array, mode='constant')\n", (2412, 2451), False, 'from scipy.ndimage import convolve\n'), ((2793, 2808), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (2800, 2808), True, 'import numpy as np\n'), ((2838, 2853), 'numpy.ones', 'np.ones', (['(5, 3)'], {}), '((5, 3))\n', (2845, 2853), True, 'import numpy as np\n'), ((4857, 4882), 'numpy.ones_like', 'np.ones_like', (['self.counts'], {}), '(self.counts)\n', (4869, 4882), True, 'import numpy as np\n'), ((6128, 6146), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6144, 6146), False, 'import tempfile\n'), ((6833, 6851), 'os.removedirs', 'os.removedirs', (['dir'], {}), '(dir)\n', (6846, 6851), False, 'import os\n'), ((2213, 2239), 'astropy.units.Quantity', 'Quantity', (['[10, 500]', '"""GeV"""'], {}), "([10, 500], 'GeV')\n", (2221, 2239), False, 'from astropy.units import Quantity\n'), ((2286, 2301), 'astropy.coordinates.angles.Angle', 'Angle', (['(1)', '"""deg"""'], {}), "(1, 'deg')\n", (2291, 2301), False, 'from astropy.coordinates.angles import Angle\n'), ((2348, 2363), 'astropy.coordinates.angles.Angle', 'Angle', (['(3)', '"""deg"""'], {}), "(3, 'deg')\n", (2353, 2363), False, 'from astropy.coordinates.angles import Angle\n'), ((5151, 5168), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5158, 5168), True, 'import numpy as np\n'), ((5713, 5730), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5720, 5730), True, 'import numpy as np\n'), ((5933, 5950), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5940, 5950), True, 'import numpy as np\n'), ((6417, 6441), 'astropy.io.fits.open', 'fits.open', (['mask_filename'], {}), '(mask_filename)\n', (6426, 6441), False, 'from astropy.io import fits\n'), ((6478, 6510), 'astropy.io.fits.open', 'fits.open', (['significance_filename'], {}), '(significance_filename)\n', (6487, 6510), False, 'from astropy.io import fits\n'), ((6545, 6575), 'astropy.io.fits.open', 'fits.open', (['background_filename'], {}), '(background_filename)\n', (6554, 6575), False, 'from astropy.io import fits\n')] |
import click
import json
from collections import defaultdict
import random
import os
import boto3
import imageio
from PIL import ImageFont, ImageDraw, Image
import cv2
import numpy as np
from retry.api import retry_call
def generate_url(s3, bucket_name, key):
return s3.generate_presigned_url(
ClientMethod = 'get_object',
Params = {'Bucket' : bucket_name, 'Key' : key},
ExpiresIn = 3600,
HttpMethod = 'GET')
@click.command()
@click.argument('filename')
@click.argument('output_dir')
@click.option('--bucket', '-b')
@click.option('--font-size', 'size', type=int, default=8)
@click.option('--keyword')
@click.option('--num', type=int, default=-1)
@click.option('--noplot', is_flag=True)
@click.option('--local')
def main(filename, output_dir, bucket, size, keyword, num, noplot, local):
os.makedirs(output_dir, exist_ok=True)
fontpath = 'font/ipaexg.ttf'
font = ImageFont.truetype(fontpath, size)
s3 = boto3.client('s3')
j = json.load(open(filename))
colors = defaultdict(lambda: (random.randint(128, 255),random.randint(128, 255), random.randint(128, 255)))
if num > 0:
j = j[:num]
for task in j:
result = task['results'][0]
resource = task['resources'][0]
if keyword is not None and keyword not in resource['contents']:
continue
if local is None:
img = retry_call(imageio.imread, fargs=(generate_url(s3, bucket, resource['contents']),), tries=3)
else:
img = imageio.imread(os.path.join(local, resource['contents']))
img = img[:, :, :3][:,:,::-1]
img = np.ascontiguousarray(img, dtype=np.uint8)
if not noplot:
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
x, y = (0, 0)
username = result['worker']
w, h = font.getsize(username)
draw.rectangle((x, y, x + w, y + h), fill=colors[username])
draw.text((x, y), username, font = font , fill = (0, 0, 0) )
img = np.array(img_pil)
print(img.shape)
for info in result['information']:
classname = ','.join(["{}:{}".format(question['name'], value['name']) for question in info['questions'] for value in question['value']])
classname = '{} ({})'.format(classname, info['input_text'])
print(classname)
xmin, ymin, xmax, ymax = map(int, (info['rectangle']['x1'], info['rectangle']['y1'], info['rectangle']['x2'], info['rectangle']['y2']))
print(xmin, ymin, xmax, ymax)
print(img.shape)
img = cv2.rectangle(img, (xmin, ymin), (xmax, ymax), colors[classname], 2)
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
x, y = (xmin, ymin + 4)
w, h = font.getsize(classname)
draw.rectangle((x, y, x + w, y + h), fill=colors[classname])
draw.text((x, y), classname, font = font , fill = (0, 0, 0) )
img = np.array(img_pil)
os.makedirs(os.path.join(output_dir, result['worker']), exist_ok=True)
cv2.imwrite(os.path.join(output_dir, result['worker'], os.path.basename(resource['contents'])), img)
if __name__ == '__main__':
main()
| [
"cv2.rectangle",
"click.argument",
"PIL.Image.fromarray",
"boto3.client",
"os.makedirs",
"click.option",
"os.path.join",
"PIL.ImageFont.truetype",
"numpy.ascontiguousarray",
"numpy.array",
"PIL.ImageDraw.Draw",
"os.path.basename",
"click.command",
"random.randint"
] | [((451, 466), 'click.command', 'click.command', ([], {}), '()\n', (464, 466), False, 'import click\n'), ((468, 494), 'click.argument', 'click.argument', (['"""filename"""'], {}), "('filename')\n", (482, 494), False, 'import click\n'), ((496, 524), 'click.argument', 'click.argument', (['"""output_dir"""'], {}), "('output_dir')\n", (510, 524), False, 'import click\n'), ((526, 556), 'click.option', 'click.option', (['"""--bucket"""', '"""-b"""'], {}), "('--bucket', '-b')\n", (538, 556), False, 'import click\n'), ((558, 614), 'click.option', 'click.option', (['"""--font-size"""', '"""size"""'], {'type': 'int', 'default': '(8)'}), "('--font-size', 'size', type=int, default=8)\n", (570, 614), False, 'import click\n'), ((616, 641), 'click.option', 'click.option', (['"""--keyword"""'], {}), "('--keyword')\n", (628, 641), False, 'import click\n'), ((643, 686), 'click.option', 'click.option', (['"""--num"""'], {'type': 'int', 'default': '(-1)'}), "('--num', type=int, default=-1)\n", (655, 686), False, 'import click\n'), ((688, 726), 'click.option', 'click.option', (['"""--noplot"""'], {'is_flag': '(True)'}), "('--noplot', is_flag=True)\n", (700, 726), False, 'import click\n'), ((728, 751), 'click.option', 'click.option', (['"""--local"""'], {}), "('--local')\n", (740, 751), False, 'import click\n'), ((831, 869), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (842, 869), False, 'import os\n'), ((914, 948), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['fontpath', 'size'], {}), '(fontpath, size)\n', (932, 948), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((958, 976), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (970, 976), False, 'import boto3\n'), ((1629, 1670), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (1649, 1670), True, 'import numpy as np\n'), ((1716, 1736), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1731, 1736), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((1756, 1779), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_pil'], {}), '(img_pil)\n', (1770, 1779), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((2051, 2068), 'numpy.array', 'np.array', (['img_pil'], {}), '(img_pil)\n', (2059, 2068), True, 'import numpy as np\n'), ((3127, 3169), 'os.path.join', 'os.path.join', (['output_dir', "result['worker']"], {}), "(output_dir, result['worker'])\n", (3139, 3169), False, 'import os\n'), ((1047, 1071), 'random.randint', 'random.randint', (['(128)', '(255)'], {}), '(128, 255)\n', (1061, 1071), False, 'import random\n'), ((1072, 1096), 'random.randint', 'random.randint', (['(128)', '(255)'], {}), '(128, 255)\n', (1086, 1096), False, 'import random\n'), ((1098, 1122), 'random.randint', 'random.randint', (['(128)', '(255)'], {}), '(128, 255)\n', (1112, 1122), False, 'import random\n'), ((1534, 1575), 'os.path.join', 'os.path.join', (['local', "resource['contents']"], {}), "(local, resource['contents'])\n", (1546, 1575), False, 'import os\n'), ((2662, 2730), 'cv2.rectangle', 'cv2.rectangle', (['img', '(xmin, ymin)', '(xmax, ymax)', 'colors[classname]', '(2)'], {}), '(img, (xmin, ymin), (xmax, ymax), colors[classname], 2)\n', (2675, 2730), False, 'import cv2\n'), ((2757, 2777), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2772, 2777), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((2801, 2824), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_pil'], {}), '(img_pil)\n', (2815, 2824), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((3089, 3106), 'numpy.array', 'np.array', (['img_pil'], {}), '(img_pil)\n', (3097, 3106), True, 'import numpy as np\n'), ((3249, 3287), 'os.path.basename', 'os.path.basename', (["resource['contents']"], {}), "(resource['contents'])\n", (3265, 3287), False, 'import os\n')] |
from os import listdir
from os.path import join
import numpy as np
from cv2 import resize
from imageio import imread
def txt_to_array(path):
with open(path) as file:
return [[float(word.strip()) for word in line.split(' ')] for line in file]
def load_pictures(args):
basedir = args.datadir
downsample = args.render_factor
# loading images
imgs_dir = join(basedir, 'rgb')
poses_dir = join(basedir, 'pose')
# getting all poses and images
all_poses_fnames = [join(poses_dir,f) for f in sorted(listdir(poses_dir)) if f[0]!='.']
all_imgs_fnames = [join(imgs_dir,f) for f in sorted(listdir(imgs_dir)) if f[0]!='.']
train_poses = sorted([fname for fname in all_poses_fnames if 'train' in fname])
val_poses = sorted([fname for fname in all_poses_fnames if 'val' in fname])
test_poses = sorted([fname for fname in all_poses_fnames if 'test' in fname])
render_poses = test_poses
render_poses = np.asarray(render_poses)
train_imgs = sorted([fname for fname in all_imgs_fnames if 'train' in fname])
val_imgs = sorted([fname for fname in all_imgs_fnames if 'val' in fname])
test_imgs = sorted([fname for fname in all_imgs_fnames if 'test' in fname])
# no test images results in a None entry
if len(test_imgs) < len(test_poses):
len_diff = len(test_poses) - len(test_imgs)
test_imgs += [None] * len_diff
all_imgs = train_imgs + val_imgs + test_imgs
all_poses = train_poses + val_poses + test_poses
counts = [0, len(train_poses),len(train_poses)+len(val_poses),len(all_poses)]
i_split = [np.arange(counts1, counts2) for counts1, counts2 in zip(counts,counts[1:])]
imgs = []
for fname in all_imgs:
if fname is not None:
img = imread(fname)/255
img = resize(img, (img.shape[0]//downsample,img.shape[1]//downsample))
# else: raise ValueError('No image found for pose {}'.format(fname))
imgs.append(img)
imgs = np.asarray(imgs).astype(np.float32)
poses = []
for fname in all_poses:
pose = np.asarray(txt_to_array(fname))
pose[:, 1:3] *= -1
poses.append(pose.tolist())
poses = np.asarray(poses).astype(np.float32)
H, W = imgs[0].shape[:2]
int_path = join(basedir, 'intrinsics.txt')
K = np.asarray(txt_to_array(int_path))
focal = K[0,0]
# near anf far calculations
bbox_path = join(basedir, 'bbox.txt')
bounds = np.asarray(txt_to_array(bbox_path)[0])
min_corner, max_corner = bounds[:3],bounds[3:-1]
trans = poses[...,:3,-1]
camera_radius = np.max(np.diag(trans @ trans.T)**.5)
largest_obj_rad = np.max([np.linalg.norm(min_corner),np.linalg.norm(max_corner)])
near = np.floor(camera_radius - largest_obj_rad) - 1
far = np.ceil(camera_radius + largest_obj_rad) + 1
# recommended by piazza
# near = 1.
# far = 5.
# actual 0. , 6.
return imgs, poses, render_poses, [H, W, focal], K, i_split, near, far
| [
"numpy.ceil",
"os.listdir",
"numpy.asarray",
"os.path.join",
"numpy.floor",
"numpy.diag",
"numpy.linalg.norm",
"imageio.imread",
"cv2.resize",
"numpy.arange"
] | [((384, 404), 'os.path.join', 'join', (['basedir', '"""rgb"""'], {}), "(basedir, 'rgb')\n", (388, 404), False, 'from os.path import join\n'), ((421, 442), 'os.path.join', 'join', (['basedir', '"""pose"""'], {}), "(basedir, 'pose')\n", (425, 442), False, 'from os.path import join\n'), ((964, 988), 'numpy.asarray', 'np.asarray', (['render_poses'], {}), '(render_poses)\n', (974, 988), True, 'import numpy as np\n'), ((2296, 2327), 'os.path.join', 'join', (['basedir', '"""intrinsics.txt"""'], {}), "(basedir, 'intrinsics.txt')\n", (2300, 2327), False, 'from os.path import join\n'), ((2439, 2464), 'os.path.join', 'join', (['basedir', '"""bbox.txt"""'], {}), "(basedir, 'bbox.txt')\n", (2443, 2464), False, 'from os.path import join\n'), ((502, 520), 'os.path.join', 'join', (['poses_dir', 'f'], {}), '(poses_dir, f)\n', (506, 520), False, 'from os.path import join\n'), ((593, 610), 'os.path.join', 'join', (['imgs_dir', 'f'], {}), '(imgs_dir, f)\n', (597, 610), False, 'from os.path import join\n'), ((1611, 1638), 'numpy.arange', 'np.arange', (['counts1', 'counts2'], {}), '(counts1, counts2)\n', (1620, 1638), True, 'import numpy as np\n'), ((2758, 2799), 'numpy.floor', 'np.floor', (['(camera_radius - largest_obj_rad)'], {}), '(camera_radius - largest_obj_rad)\n', (2766, 2799), True, 'import numpy as np\n'), ((2814, 2854), 'numpy.ceil', 'np.ceil', (['(camera_radius + largest_obj_rad)'], {}), '(camera_radius + largest_obj_rad)\n', (2821, 2854), True, 'import numpy as np\n'), ((1819, 1888), 'cv2.resize', 'resize', (['img', '(img.shape[0] // downsample, img.shape[1] // downsample)'], {}), '(img, (img.shape[0] // downsample, img.shape[1] // downsample))\n', (1825, 1888), False, 'from cv2 import resize\n'), ((2006, 2022), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (2016, 2022), True, 'import numpy as np\n'), ((2209, 2226), 'numpy.asarray', 'np.asarray', (['poses'], {}), '(poses)\n', (2219, 2226), True, 'import numpy as np\n'), ((2626, 2650), 'numpy.diag', 'np.diag', (['(trans @ trans.T)'], {}), '(trans @ trans.T)\n', (2633, 2650), True, 'import numpy as np\n'), ((2686, 2712), 'numpy.linalg.norm', 'np.linalg.norm', (['min_corner'], {}), '(min_corner)\n', (2700, 2712), True, 'import numpy as np\n'), ((2713, 2739), 'numpy.linalg.norm', 'np.linalg.norm', (['max_corner'], {}), '(max_corner)\n', (2727, 2739), True, 'import numpy as np\n'), ((536, 554), 'os.listdir', 'listdir', (['poses_dir'], {}), '(poses_dir)\n', (543, 554), False, 'from os import listdir\n'), ((626, 643), 'os.listdir', 'listdir', (['imgs_dir'], {}), '(imgs_dir)\n', (633, 643), False, 'from os import listdir\n'), ((1783, 1796), 'imageio.imread', 'imread', (['fname'], {}), '(fname)\n', (1789, 1796), False, 'from imageio import imread\n')] |
import matplotlib
#matplotlib.style.use('classic')
matplotlib.use('Agg')
import matplotlib.pyplot as pl
import numpy as np
from brian2.units import *
import sys, pickle
with open('data/plst_net_red_arec0.05_affwd0.10_N4993_T50000ms_stdphom_selfrm.p', 'rb') as pfile:
st005_010 = pickle.load(pfile)
st005_010.update({'a_rec': 0.05, 'a_ffwd': 0.10})
with open('data/plst_net_red_arec0.25_affwd0.10_N4993_T50000ms_stdphom_selfrm.p', 'rb') as pfile:
st025_010 = pickle.load(pfile)
st025_010.update({'a_rec': 0.25, 'a_ffwd': 0.10})
with open('data/plst_net_red_arec0.10_affwd0.20_N4993_T50000ms_stdphom_selfrm.p', 'rb') as pfile:
st010_020 = pickle.load(pfile)
st010_020.update({'a_rec': 0.10, 'a_ffwd': 0.20})
with open('data/plst_net_red_arec0.50_affwd0.20_N4993_T50000ms_stdphom_selfrm.p', 'rb') as pfile:
st050_020 = pickle.load(pfile)
st050_020.update({'a_rec': 0.50, 'a_ffwd': 0.20})
with open('data/plst_net_red_arec0.10_affwd0.15_N4993_T50000ms_stdphom_selfrm.p', 'rb') as pfile:
st010_015 = pickle.load(pfile)
st010_015.update({'a_rec': 0.10, 'a_ffwd': 0.15})
with open('data/plst_net_red_arec0.30_affwd0.15_N4993_T50000ms_stdphom_selfrm.p', 'rb') as pfile:
st030_015 = pickle.load(pfile)
st030_015.update({'a_rec': 0.30, 'a_ffwd': 0.15})
matplotlib.rc('text', usetex=True)
pl.rcParams['text.latex.preamble'] = [
r'\usepackage{tgheros}',
r'\usepackage[eulergreek]{sansmath}',
r'\sansmath'
r'\usepackage{siunitx}',
r'\sisetup{detect-all}',
]
def make_figure(sets, fname):
fig,ax = pl.subplots(1,1)
fig.set_size_inches(5.,3.5)
for dat in sets:
NE= dat['NErcr']
See = dat['S_ee']
dists = [np.sqrt((abs(NE['x'][See['i'][k]] - NE['x'][See['j'][k]]) % 32)**2 + (abs(NE['y'][See['i'][k]] - NE['y'][See['j'][k]]) % 32)**2) for k in range(len(See['i']))]
n, dbins = np.histogram(dists, bins=50)
sy, _ = np.histogram(dists, bins=dbins, weights=See['w']/mV)
sy2, _ = np.histogram(dists, bins=dbins, weights=See['w']/mV*See['w']/mV)
centers = 0.5*(dbins[1:]+dbins[:-1])
ax.plot(centers, sy/n, label=r'$\alpha_{\text{rec}} ='+str(dat['a_rec']) + '$, '+ r'$\alpha_{\text{ffwd}} = ' + str(dat['a_ffwd']) +'$', linewidth=3.)
ax.set_xlabel('distance [a.u.]')
ax.set_ylabel('mean synaptic weight [mV]')
ax.legend(loc='upper right', framealpha=1., frameon=False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
fig.tight_layout()
fig.savefig('img/weight_dist_{:s}.png'.format(fname), dpi=300)
if __name__ == '__main__':
make_figure([st005_010, st025_010], '005-010_025_10')
make_figure([st010_020, st050_020], '010-020_050_20')
make_figure([st010_015, st030_015], '010-015_030_15')
make_figure([st005_010, st025_010, st010_020, st050_020, st010_015, st030_015], 'all')
| [
"numpy.histogram",
"matplotlib.use",
"pickle.load",
"matplotlib.rc",
"matplotlib.pyplot.subplots"
] | [((52, 73), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (66, 73), False, 'import matplotlib\n'), ((1308, 1342), 'matplotlib.rc', 'matplotlib.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1321, 1342), False, 'import matplotlib\n'), ((288, 306), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (299, 306), False, 'import sys, pickle\n'), ((475, 493), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (486, 493), False, 'import sys, pickle\n'), ((663, 681), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (674, 681), False, 'import sys, pickle\n'), ((850, 868), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (861, 868), False, 'import sys, pickle\n'), ((1042, 1060), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (1053, 1060), False, 'import sys, pickle\n'), ((1229, 1247), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (1240, 1247), False, 'import sys, pickle\n'), ((1614, 1631), 'matplotlib.pyplot.subplots', 'pl.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1625, 1631), True, 'import matplotlib.pyplot as pl\n'), ((1935, 1963), 'numpy.histogram', 'np.histogram', (['dists'], {'bins': '(50)'}), '(dists, bins=50)\n', (1947, 1963), True, 'import numpy as np\n'), ((1981, 2035), 'numpy.histogram', 'np.histogram', (['dists'], {'bins': 'dbins', 'weights': "(See['w'] / mV)"}), "(dists, bins=dbins, weights=See['w'] / mV)\n", (1993, 2035), True, 'import numpy as np\n'), ((2052, 2122), 'numpy.histogram', 'np.histogram', (['dists'], {'bins': 'dbins', 'weights': "(See['w'] / mV * See['w'] / mV)"}), "(dists, bins=dbins, weights=See['w'] / mV * See['w'] / mV)\n", (2064, 2122), True, 'import numpy as np\n')] |
import torch
from torch import nn, Tensor
from torch.nn.utils.rnn import PackedSequence
from sklearn import metrics
import numpy as np
from tqdm import tqdm
from typing import Optional
from collections import OrderedDict
class LSTM_CNN2(nn.Module):
def __init__(self, input_dim=390, hidden_dim=8, lstm_layers=1):
# dim, batch_norm, dropout, rec_dropout, task,
# target_repl = False, deep_supervision = False, num_classes = 1,
# depth = 1, input_dim = 390, ** kwargs
super(LSTM_CNN2, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = lstm_layers
self.bidirectional = True
# self.dense = dense
# some more parameters
# self.output_dim = dim
# self.batch_norm = batch_norm
self.dropout = 0.3
self.rec_dropout = 0.3
self.depth = lstm_layers
self.drop_conv = 0.5
self.num_classes = 1
# define the LSTM layer
# in keras we have inputs: A 3D tensor with shape [batch, timesteps, feature]
# units: Positive integer, dimensionality of the output space. = dim=num_units=hidden_size
if self.layers >= 2:
self.lstm1 = nn.LSTM(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.layers - 1,
dropout=self.rec_dropout,
bidirectional=self.bidirectional,
batch_first=True)
self.do0 = nn.Dropout(self.dropout)
# this is not in the original model
# self.act1 = nn.ReLU()
if self.layers >= 2:
self.lstm2 = nn.LSTM(input_size=self.hidden_dim * 2,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
else:
self.lstm2 = nn.LSTM(input_size=self.input_dim,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
self.do1 = nn.Dropout(self.dropout)
# self.bn0 = nn.BatchNorm1d(48 * self.hidden_dim*2)
# three Convolutional Neural Networks with different kernel sizes
nfilters = [2, 3, 4]
nb_filters = 100
pooling_reps = []
self.cnn1 = nn.Sequential(
nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=2,
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2),
nn.Flatten()
)
self.cnn2 = nn.Sequential(
nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=3,
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2),
nn.Flatten()
)
self.cnn3 = nn.Sequential(
nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=4,
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2),
nn.Flatten()
)
self.do2 = nn.Dropout(self.drop_conv)
self.final = nn.Linear(6800, self.num_classes)
def forward(self, inputs, labels=None):
out = inputs
if self.layers >= 2:
out, h = self.lstm1(out)
out = self.do0(out)
out, h = self.lstm2(out)
out = self.do1(out)
pooling_reps = []
pool_vecs = self.cnn1(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn2(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn3(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
# concatenate all vectors
representation = torch.cat(pooling_reps, dim=1).contiguous()
out = self.do2(representation)
out = self.final(out)
return out
class VariationalDropout(nn.Module):
"""
Applies the same dropout mask across the temporal dimension
See https://arxiv.org/abs/1512.05287 for more details.
Note that this is not applied to the recurrent activations in the LSTM like the above paper.
Instead, it is applied to the inputs and outputs of the recurrent layer.
"""
def __init__(self, dropout: float, batch_first: Optional[bool] = False):
super().__init__()
self.dropout = dropout
self.batch_first = batch_first
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not self.training or self.dropout <= 0.:
return x
is_packed = isinstance(x, PackedSequence)
if is_packed:
x, batch_sizes = x
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
max_batch_size = x.size(0)
# Drop same mask across entire sequence
if self.batch_first:
m = x.new_empty(max_batch_size, 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout)
else:
m = x.new_empty(1, max_batch_size, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout)
x = x.masked_fill(m == 0, 0) / (1 - self.dropout)
if is_packed:
return PackedSequence(x, batch_sizes)
else:
return x
class LSTMNew(nn.LSTM):
def __init__(self, *args, dropouti: float = 0.,
dropoutw: float = 0., dropouto: float = 0.,
batch_first=True, unit_forget_bias=True, **kwargs):
super().__init__(*args, **kwargs, batch_first=batch_first)
self.unit_forget_bias = unit_forget_bias
self.dropoutw = dropoutw
self.input_drop = VariationalDropout(dropouti,
batch_first=batch_first)
self.output_drop = VariationalDropout(dropouto,
batch_first=batch_first)
self._init_weights()
def _init_weights(self):
"""
Use orthogonal init for recurrent layers, xavier uniform for input layers
Bias is 0 except for forget gate
"""
for name, param in self.named_parameters():
if "weight_hh" in name:
nn.init.orthogonal_(param.data)
elif "weight_ih" in name:
nn.init.xavier_uniform_(param.data)
elif "bias" in name and self.unit_forget_bias:
nn.init.zeros_(param.data)
param.data[self.hidden_size:2 * self.hidden_size] = 1
def _drop_weights(self):
for name, param in self.named_parameters():
if "weight_hh" in name:
getattr(self, name).data = \
torch.nn.functional.dropout(param.data, p=self.dropoutw,
training=self.training).contiguous()
def forward(self, input, hx=None):
self._drop_weights()
self.flatten_parameters()
input = self.input_drop(input)
seq, state = super().forward(input, hx=hx)
return self.output_drop(seq), state
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class EventsDataEncoder(nn.Module):
def __init__(self, input_dim=390, hidden_dim=512, lstm_layers=3,
filter_kernels=[2, 3, 4], filters=100, output_dim=1024,
add_embeds=True, embed_dim=700,
dropout=0.3, dropout_w=0.2, dropout_conv=0.2):
# dim, batch_norm, dropout, rec_dropout, task,
# target_repl = False, deep_supervision = False, num_classes = 1,
# depth = 1, input_dim = 390, ** kwargs
super(EventsDataEncoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = lstm_layers
self.bidirectional = True
# some more parameters
self.dropout = dropout
self.rec_dropout = dropout_w
self.depth = lstm_layers
self.drop_conv = dropout_conv
self.num_classes = 1
self.output_dim = output_dim
self.add_embeds = add_embeds
self.embed_dim = embed_dim if add_embeds else 0
# define the LSTM layer
# in keras we have inputs: A 3D tensor with shape [batch, timesteps, feature]
# units: Positive integer, dimensionality of the output space. = dim=num_units=hidden_size
if self.layers >= 2:
self.lstm1 = LSTMNew(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.layers - 1,
dropoutw=self.rec_dropout,
dropout=self.rec_dropout,
bidirectional=self.bidirectional,
batch_first=True)
self.do0 = nn.Dropout(self.dropout)
# this is not in the original model
if self.layers >= 2:
self.lstm2 = LSTMNew(input_size=self.hidden_dim * 2,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropoutw=self.rec_dropout,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
else:
self.lstm2 = LSTMNew(input_size=self.input_dim,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropoutw=self.rec_dropout,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
# three Convolutional Neural Networks with different kernel sizes
nfilters = filter_kernels
nb_filters = filters
# 48 hrs of events data
L_out = [(48 - k) + 1 for k in nfilters]
maxpool_padding, maxpool_dilation, maxpool_kernel_size, maxpool_stride = (0, 1, 2, 2)
dim_ = int(np.sum([100 * np.floor(
(l + 2 * maxpool_padding - maxpool_dilation * (maxpool_kernel_size - 1) - 1) / maxpool_stride + 1)
for l in
L_out]))
if self.add_embeds:
dim_ += self.embed_dim
self.cnn1 = nn.Sequential(OrderedDict([
("cnn1_conv1d", nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=nfilters[0],
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros')),
("cnn1_relu", nn.ReLU()),
("cnn1_maxpool1d", nn.MaxPool1d(kernel_size=2)),
("cnn1_flatten", nn.Flatten())
]))
self.cnn2 = nn.Sequential(OrderedDict([
("cnn2_conv1d", nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=nfilters[1],
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros')),
("cnn2_relu", nn.ReLU()),
("cnn2_maxpool1d", nn.MaxPool1d(kernel_size=2)),
("cnn2_flatten", nn.Flatten())
]))
self.cnn3 = nn.Sequential(OrderedDict([
("cnn3_conv1d", nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=nfilters[2],
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros')),
("cnn3_relu", nn.ReLU()),
("cnn3_maxpool1d", nn.MaxPool1d(kernel_size=2)),
("cnn3_flatten", nn.Flatten())
]))
# dim_latent = int(np.sum([100 * np.floor(
# (l + 2 * maxpool_padding - maxpool_dilation * (maxpool_kernel_size - 1) - 1) / maxpool_stride + 1)
# for l in
# L_out]))
# dim_encoder = embed_dim * 2
# self.latent = nn.Sequential(OrderedDict([
# ("enc_fc1", nn.Linear(dim_latent, embed_dim)),
# # ("enc_fc1", nn.Linear(dim_, 1024)),
# # ("enc_fc1", nn.Linear(dim_, self.output_dim)),
# # ("enc_bn1", nn.BatchNorm1d(dim_ * 2)), # new BN
# ("enc_relu", nn.ReLU())])
# )
self.encoder = nn.Sequential(OrderedDict([
# ("enc_layernorm1", nn.LayerNorm(dim_)),
("enc_fc1", nn.Linear(dim_, dim_ * 2)),
("enc_relu", nn.ReLU()),
("enc_layernorm2", nn.LayerNorm(dim_ * 2)),
("enc_fc2", nn.Linear(dim_ * 2, self.output_dim)),
("enc_relu2", nn.ReLU()),
("enc_layernorm3", nn.LayerNorm(self.output_dim))
]))
self.do2 = nn.Dropout(self.drop_conv)
# self.final = nn.Linear(dim_, self.num_classes)
def forward(self, inputs, embeds=None):
out = inputs
if self.layers >= 2:
out, h = self.lstm1(out)
out = self.do0(out)
out, h = self.lstm2(out)
pooling_reps = []
pool_vecs = self.cnn1(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn2(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn3(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
# concatenate all vectors
representation = torch.cat(pooling_reps, dim=1).contiguous()
# new model architecture
# out = self.latent(representation)
out = self.do2(representation)
if self.add_embeds:
out = torch.cat([out, embeds], dim=1)
encoding = self.encoder(out)
# out = self.final(out)
# return encoding in the shape of (output_dim)
return encoding
| [
"torch.nn.MaxPool1d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.Flatten",
"torch.nn.init.xavier_uniform_",
"torch.sigmoid",
"numpy.floor",
"torch.nn.LayerNorm",
"torch.nn.init.zeros_",
"torch.nn.functional.dropout",
"torch.nn.utils.rnn.PackedSequence",
"torch.nn.init.or... | [((2436, 2460), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (2446, 2460), False, 'from torch import nn, Tensor\n'), ((3749, 3775), 'torch.nn.Dropout', 'nn.Dropout', (['self.drop_conv'], {}), '(self.drop_conv)\n', (3759, 3775), False, 'from torch import nn, Tensor\n'), ((3797, 3830), 'torch.nn.Linear', 'nn.Linear', (['(6800)', 'self.num_classes'], {}), '(6800, self.num_classes)\n', (3806, 3830), False, 'from torch import nn, Tensor\n'), ((13789, 13815), 'torch.nn.Dropout', 'nn.Dropout', (['self.drop_conv'], {}), '(self.drop_conv)\n', (13799, 13815), False, 'from torch import nn, Tensor\n'), ((1234, 1413), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.input_dim', 'hidden_size': 'self.hidden_dim', 'num_layers': '(self.layers - 1)', 'dropout': 'self.rec_dropout', 'bidirectional': 'self.bidirectional', 'batch_first': '(True)'}), '(input_size=self.input_dim, hidden_size=self.hidden_dim, num_layers=\n self.layers - 1, dropout=self.rec_dropout, bidirectional=self.\n bidirectional, batch_first=True)\n', (1241, 1413), False, 'from torch import nn, Tensor\n'), ((1592, 1616), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (1602, 1616), False, 'from torch import nn, Tensor\n'), ((1748, 1907), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(self.hidden_dim * 2)', 'hidden_size': '(self.hidden_dim * 2)', 'num_layers': '(1)', 'dropout': 'self.rec_dropout', 'bidirectional': '(False)', 'batch_first': '(True)'}), '(input_size=self.hidden_dim * 2, hidden_size=self.hidden_dim * 2,\n num_layers=1, dropout=self.rec_dropout, bidirectional=False,\n batch_first=True)\n', (1755, 1907), False, 'from torch import nn, Tensor\n'), ((2104, 2258), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.input_dim', 'hidden_size': '(self.hidden_dim * 2)', 'num_layers': '(1)', 'dropout': 'self.rec_dropout', 'bidirectional': '(False)', 'batch_first': '(True)'}), '(input_size=self.input_dim, hidden_size=self.hidden_dim * 2,\n num_layers=1, dropout=self.rec_dropout, bidirectional=False,\n batch_first=True)\n', (2111, 2258), False, 'from torch import nn, Tensor\n'), ((2724, 2890), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(self.hidden_dim * 2)', 'out_channels': 'nb_filters', 'kernel_size': '(2)', 'stride': '(1)', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(True)', 'padding_mode': '"""zeros"""'}), "(in_channels=self.hidden_dim * 2, out_channels=nb_filters,\n kernel_size=2, stride=1, padding=0, dilation=1, groups=1, bias=True,\n padding_mode='zeros')\n", (2733, 2890), False, 'from torch import nn, Tensor\n'), ((2940, 2949), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2947, 2949), False, 'from torch import nn, Tensor\n'), ((2963, 2990), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (2975, 2990), False, 'from torch import nn, Tensor\n'), ((3004, 3016), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (3014, 3016), False, 'from torch import nn, Tensor\n'), ((3075, 3241), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(self.hidden_dim * 2)', 'out_channels': 'nb_filters', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(True)', 'padding_mode': '"""zeros"""'}), "(in_channels=self.hidden_dim * 2, out_channels=nb_filters,\n kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True,\n padding_mode='zeros')\n", (3084, 3241), False, 'from torch import nn, Tensor\n'), ((3291, 3300), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3298, 3300), False, 'from torch import nn, Tensor\n'), ((3314, 3341), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (3326, 3341), False, 'from torch import nn, Tensor\n'), ((3355, 3367), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (3365, 3367), False, 'from torch import nn, Tensor\n'), ((3426, 3592), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(self.hidden_dim * 2)', 'out_channels': 'nb_filters', 'kernel_size': '(4)', 'stride': '(1)', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(True)', 'padding_mode': '"""zeros"""'}), "(in_channels=self.hidden_dim * 2, out_channels=nb_filters,\n kernel_size=4, stride=1, padding=0, dilation=1, groups=1, bias=True,\n padding_mode='zeros')\n", (3435, 3592), False, 'from torch import nn, Tensor\n'), ((3642, 3651), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3649, 3651), False, 'from torch import nn, Tensor\n'), ((3665, 3692), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (3677, 3692), False, 'from torch import nn, Tensor\n'), ((3706, 3718), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (3716, 3718), False, 'from torch import nn, Tensor\n'), ((5860, 5890), 'torch.nn.utils.rnn.PackedSequence', 'PackedSequence', (['x', 'batch_sizes'], {}), '(x, batch_sizes)\n', (5874, 5890), False, 'from torch.nn.utils.rnn import PackedSequence\n'), ((8033, 8057), 'torch.sigmoid', 'torch.sigmoid', (['(1.702 * x)'], {}), '(1.702 * x)\n', (8046, 8057), False, 'import torch\n'), ((9740, 9764), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (9750, 9764), False, 'from torch import nn, Tensor\n'), ((14645, 14676), 'torch.cat', 'torch.cat', (['[out, embeds]'], {'dim': '(1)'}), '([out, embeds], dim=1)\n', (14654, 14676), False, 'import torch\n'), ((4425, 4455), 'torch.cat', 'torch.cat', (['pooling_reps'], {'dim': '(1)'}), '(pooling_reps, dim=1)\n', (4434, 4455), False, 'import torch\n'), ((6845, 6876), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['param.data'], {}), '(param.data)\n', (6864, 6876), False, 'from torch import nn, Tensor\n'), ((14439, 14469), 'torch.cat', 'torch.cat', (['pooling_reps'], {'dim': '(1)'}), '(pooling_reps, dim=1)\n', (14448, 14469), False, 'import torch\n'), ((6931, 6966), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['param.data'], {}), '(param.data)\n', (6954, 6966), False, 'from torch import nn, Tensor\n'), ((7042, 7068), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['param.data'], {}), '(param.data)\n', (7056, 7068), False, 'from torch import nn, Tensor\n'), ((7322, 7407), 'torch.nn.functional.dropout', 'torch.nn.functional.dropout', (['param.data'], {'p': 'self.dropoutw', 'training': 'self.training'}), '(param.data, p=self.dropoutw, training=self.training\n )\n', (7349, 7407), False, 'import torch\n'), ((11000, 11111), 'numpy.floor', 'np.floor', (['((l + 2 * maxpool_padding - maxpool_dilation * (maxpool_kernel_size - 1) - \n 1) / maxpool_stride + 1)'], {}), '((l + 2 * maxpool_padding - maxpool_dilation * (maxpool_kernel_size -\n 1) - 1) / maxpool_stride + 1)\n', (11008, 11111), True, 'import numpy as np\n'), ((11352, 11528), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(self.hidden_dim * 2)', 'out_channels': 'nb_filters', 'kernel_size': 'nfilters[0]', 'stride': '(1)', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(True)', 'padding_mode': '"""zeros"""'}), "(in_channels=self.hidden_dim * 2, out_channels=nb_filters,\n kernel_size=nfilters[0], stride=1, padding=0, dilation=1, groups=1,\n bias=True, padding_mode='zeros')\n", (11361, 11528), False, 'from torch import nn, Tensor\n'), ((11625, 11634), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11632, 11634), False, 'from torch import nn, Tensor\n'), ((11668, 11695), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (11680, 11695), False, 'from torch import nn, Tensor\n'), ((11727, 11739), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (11737, 11739), False, 'from torch import nn, Tensor\n'), ((11830, 12006), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(self.hidden_dim * 2)', 'out_channels': 'nb_filters', 'kernel_size': 'nfilters[1]', 'stride': '(1)', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(True)', 'padding_mode': '"""zeros"""'}), "(in_channels=self.hidden_dim * 2, out_channels=nb_filters,\n kernel_size=nfilters[1], stride=1, padding=0, dilation=1, groups=1,\n bias=True, padding_mode='zeros')\n", (11839, 12006), False, 'from torch import nn, Tensor\n'), ((12103, 12112), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12110, 12112), False, 'from torch import nn, Tensor\n'), ((12146, 12173), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (12158, 12173), False, 'from torch import nn, Tensor\n'), ((12205, 12217), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (12215, 12217), False, 'from torch import nn, Tensor\n'), ((12308, 12484), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(self.hidden_dim * 2)', 'out_channels': 'nb_filters', 'kernel_size': 'nfilters[2]', 'stride': '(1)', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(True)', 'padding_mode': '"""zeros"""'}), "(in_channels=self.hidden_dim * 2, out_channels=nb_filters,\n kernel_size=nfilters[2], stride=1, padding=0, dilation=1, groups=1,\n bias=True, padding_mode='zeros')\n", (12317, 12484), False, 'from torch import nn, Tensor\n'), ((12581, 12590), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12588, 12590), False, 'from torch import nn, Tensor\n'), ((12624, 12651), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (12636, 12651), False, 'from torch import nn, Tensor\n'), ((12683, 12695), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (12693, 12695), False, 'from torch import nn, Tensor\n'), ((13473, 13498), 'torch.nn.Linear', 'nn.Linear', (['dim_', '(dim_ * 2)'], {}), '(dim_, dim_ * 2)\n', (13482, 13498), False, 'from torch import nn, Tensor\n'), ((13526, 13535), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13533, 13535), False, 'from torch import nn, Tensor\n'), ((13569, 13591), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['(dim_ * 2)'], {}), '(dim_ * 2)\n', (13581, 13591), False, 'from torch import nn, Tensor\n'), ((13618, 13654), 'torch.nn.Linear', 'nn.Linear', (['(dim_ * 2)', 'self.output_dim'], {}), '(dim_ * 2, self.output_dim)\n', (13627, 13654), False, 'from torch import nn, Tensor\n'), ((13683, 13692), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13690, 13692), False, 'from torch import nn, Tensor\n'), ((13726, 13755), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.output_dim'], {}), '(self.output_dim)\n', (13738, 13755), False, 'from torch import nn, Tensor\n')] |
r"""
Piezo-elasticity problem - linear elastic material with piezoelectric
effects.
Find :math:`\ul{u}`, :math:`\phi` such that:
.. math::
- \omega^2 \int_{Y} \rho\ \ul{v} \cdot \ul{u}
+ \int_{Y} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{Y_2} g_{kij}\ e_{ij}(\ul{v}) \nabla_k \phi
= 0
\;, \quad \forall \ul{v} \;,
\int_{Y_2} g_{kij}\ e_{ij}(\ul{u}) \nabla_k \psi
+ \int_{Y} K_{ij} \nabla_i \psi \nabla_j \phi
= 0
\;, \quad \forall \psi \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
import os
import numpy as nm
from sfepy import data_dir
from sfepy.discrete.fem import MeshIO
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
## filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/cube_cylinder.mesh'
omega = 1
omega_squared = omega**2
conf_dir = os.path.dirname(__file__)
io = MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir)
bbox, dim = io.read_bounding_box( ret_dim = True )
geom = {3 : '3_4', 2 : '2_3'}[dim]
x_left, x_right = bbox[:,0]
regions = {
'Y' : 'all',
'Y1' : 'cells of group 1',
'Y2' : 'cells of group 2',
'Y2_Surface': ('r.Y1 *v r.Y2', 'facet'),
'Left' : ('vertices in (x < %f)' % (x_left + 1e-3), 'facet'),
'Right' : ('vertices in (x > %f)' % (x_right - 1e-3), 'facet'),
}
material_2 = {
'name' : 'inclusion',
# epoxy
'function' : 'get_inclusion_pars',
}
def get_inclusion_pars(ts, coor, mode=None, **kwargs):
"""TODO: implement proper 3D -> 2D transformation of constitutive
matrices."""
if mode == 'qp':
n_nod, dim = coor.shape
sym = (dim + 1) * dim / 2
dielectric = nm.eye( dim, dtype = nm.float64 )
# !!!
coupling = nm.ones( (dim, sym), dtype = nm.float64 )
# coupling[0,1] = 0.2
out = {
# Lame coefficients in 1e+10 Pa.
'lam' : 0.1798,
'mu' : 0.148,
# dielectric tensor
'dielectric' : dielectric,
# piezoelectric coupling
'coupling' : coupling,
'density' : 0.1142, # in 1e4 kg/m3
}
for key, val in out.iteritems():
out[key] = nm.tile(val, (coor.shape[0], 1, 1))
return out
functions = {
'get_inclusion_pars' : (get_inclusion_pars,),
}
field_0 = {
'name' : 'displacement',
'dtype' : nm.float64,
'shape' : dim,
'region' : 'Y',
'approx_order' : 1,
}
field_2 = {
'name' : 'potential',
'dtype' : nm.float64,
'shape' : (1,),
'region' : 'Y',
'approx_order' : 1,
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
}
ebcs = {
'u1' : ('Left', {'u.all' : 0.0}),
'u2' : ('Right', {'u.0' : 0.1}),
'phi' : ('Y2_Surface', {'phi.all' : 0.0}),
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'1' : """- %f * dw_volume_dot.i.Y( inclusion.density, v, u )
+ dw_lin_elastic_iso.i.Y( inclusion.lam, inclusion.mu, v, u )
- dw_piezo_coupling.i.Y2( inclusion.coupling, v, phi )
= 0""" % omega_squared,
'2' : """dw_piezo_coupling.i.Y2( inclusion.coupling, u, psi )
+ dw_diffusion.i.Y( inclusion.dielectric, psi, phi )
= 0""",
}
##
# Solvers etc.
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp': 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
| [
"numpy.tile",
"numpy.eye",
"numpy.ones",
"os.path.dirname",
"sfepy.discrete.fem.MeshIO.any_from_filename"
] | [((1055, 1080), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1070, 1080), False, 'import os\n'), ((1086, 1146), 'sfepy.discrete.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename_mesh'], {'prefix_dir': 'conf_dir'}), '(filename_mesh, prefix_dir=conf_dir)\n', (1110, 1146), False, 'from sfepy.discrete.fem import MeshIO\n'), ((1884, 1913), 'numpy.eye', 'nm.eye', (['dim'], {'dtype': 'nm.float64'}), '(dim, dtype=nm.float64)\n', (1890, 1913), True, 'import numpy as nm\n'), ((1951, 1988), 'numpy.ones', 'nm.ones', (['(dim, sym)'], {'dtype': 'nm.float64'}), '((dim, sym), dtype=nm.float64)\n', (1958, 1988), True, 'import numpy as nm\n'), ((2407, 2442), 'numpy.tile', 'nm.tile', (['val', '(coor.shape[0], 1, 1)'], {}), '(val, (coor.shape[0], 1, 1))\n', (2414, 2442), True, 'import numpy as nm\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 2 16:14:47 2019
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from lbl_ir.data_objects.ir_map import sample_info, ir_map
def lorentzian(x,x0,gamma=10):
return 1/(np.power((x-x0)/gamma,2)+1)
def gaussian(x,x0=0,sigma=10):
return 1/np.exp(np.power((x-x0)/sigma,2))
class spectra_map_simulator:
"""Generate a simulated spectral map
Parameters:
-----------
NbaseSpectra: int, optional
The number of basis spectrum. Default is 3.
Nclusters: int, optional
The number of data point clusters in the spectral map. Default is 4.
ptsPerCluster: int, optional
The number of data points per cluster. Default is 50.
Nx: int, optional
The pixel numbers of x-axis. Default is 64.
Ny: int, optional
The pixel numbers of y-axis. Default is 64.
cov: int or float matrix, optional
The covariance matrix of 2D gaussian distribution used to generate random data point clusters.
Default is [[20, 10], [10, 25]].
sigma: int or float, optional
The standard deviation of 1D gaussian distribution used to generate spectral weights. Default is 10.
startWavenumber: int or float, optional
The beginning wavenumber of spectral range. Default is 400.
endWavenumber: int or float, optional
The ending wavenumber of spectral range. Default is 4000.
Nwavenumber: int, optional
The number of wavenumber values in the spectrum. Default is 1600.
random_state: int, optional
The seed of the pseudo random number generator to use when generating random numbers. Default to 17.
Attributes:
--------
data : float matrix
The final spectral data matrix. Number of rows equals number of non-zero data points;
Number of columns equals number of wavenumbers.
densityMatCondense : float matrix
The distribution weight matrix of each basis spectrum (only stores non-zero data points, see nonZeroInd).
Number of rows equals number of non-zero data points;
Number of columns equals number of basis spectrum.
spectraMat : float matrix
The basis spectra matrix. Number of rows equals number of basis spectrum;
Number of columns equals number of wavenumbers.
wavenumber : float array
The wavenumber values of the spectrum (x-axis).
nonZeroInd : int array
The linear index of all non-zero data points in the flatterned 2D map.
Examples:
---------
>>> s = spectra_map_simulator(random_state=3)
>>> s.spectra_map_gen()
>>> print(s.data.shape)
(446, 1600)
"""
def __init__(self, NbaseSpectra=3, Nclusters=4, ptsPerCluster=50, Nx=64, cov=[[20, 10], [10, 25]], sigma=10,
startWavenumber=400, endWavenumber=4000, Nwavenumber=1600, random_state=17):
self.NbaseSpectra = NbaseSpectra
self.Nclusters = Nclusters
self.ptsPerCluster = ptsPerCluster
self.Nx = Nx
self.Ny = Nx
self.cov = cov
self.sigma = sigma
self.startWavenumber = startWavenumber
self.endWavenumber = endWavenumber
self.Nwavenumber = Nwavenumber
self.random_state = random_state
def cluster_placement(self, random_state=17):
"""Generate simulated data point locations and weights in a 2D map
Returns:
--------
points : float matrix
The data point location matrix. The first two columns are x-y coordiantes of data points.
The third column are the weight coefficients of data points.
densityVec : float array
The 1D array generated from flattened 2D weight coefficients matrix (densityMat)
"""
np.random.seed(seed=random_state)
centroids = np.random.randint(int(self.Nx*0.8), size=(self.Nclusters,2))
self.points = np.zeros((self.ptsPerCluster*self.Nclusters, 3))
densityMat = np.zeros((self.Ny, self.Nx))
for i in range(self.Nclusters):
self.points[i*self.ptsPerCluster:(i+1)*self.ptsPerCluster, :2] = np.random.multivariate_normal(centroids[i,:], self.cov, self.ptsPerCluster)
self.points[i*self.ptsPerCluster:(i+1)*self.ptsPerCluster, 2] = gaussian(np.sqrt(np.power(self.points[i*self.ptsPerCluster:(i+1)*self.ptsPerCluster, :2]
-centroids[i,:],2).sum(axis = 1)),sigma=self.sigma)
for i in self.points:
idx = tuple(np.clip(i[1::-1].astype(int), 0, self.Nx-1))
densityMat[idx] += i[2]
densityVec = densityMat.flatten()
return densityVec
def spectrum_gen(self, Npeaks=3, firstPeakPosition=1200, peakWidth=[30,60,150], random_state=17):
"""Generate a simulated spectrum
Parameters:
-----------
Npeaks: int, optional
The number of peaks. Default is 3.
firstPeakPosition: int, optional
The position of the first peak. Default is 1200.
peakWidth: int or float list, optional
The list of peak widths. Default is [30,60,150].
Returns:
--------
wavenumber : float array
The wavenumber values of the spectrum (x-axis).
spectrum : float array
The transmission/reflection/absorption coefficients of the spectrum (y-axis)
"""
if len(peakWidth) != Npeaks:
raise Exception("The number of peak width values doesn't match the number of peaks")
np.random.seed(seed=random_state)
self.wavenumber = np.linspace(self.startWavenumber,self.endWavenumber,self.Nwavenumber)
peakPositions = np.linspace(firstPeakPosition,self.endWavenumber,Npeaks,endpoint=False)
spectrum = np.zeros(len(self.wavenumber))
weights = np.random.rand(Npeaks)
for i in range(Npeaks):
singlePeak = lorentzian(self.wavenumber,peakPositions[i],peakWidth[i])
spectrum += singlePeak * weights[i]
spectrum /= weights.sum()
return spectrum
def spectra_map_gen(self, Npeaks = [3,4,5], positions = [800, 1000, 1200]):
"""Generate a spectral map data cube
Parameters:
-----------
Npeaks: int list, optional
The list of number of peaks. Default is [3,4,5].
positions: int or float list, optional
The list of first peak positions. Default is [800, 1000, 1200].
Returns
-------
self : object
"""
if len(positions) != len(Npeaks):
raise Exception("The number of first peak positions doesn't match the number of basis spectrum")
np.random.seed(seed=self.random_state)
random_seeds = np.random.randint(50, size=len(Npeaks))
densityVecs = np.zeros((self.Nx*self.Ny,self.NbaseSpectra)) # component coefficients
self.spectraMat = np.zeros((self.NbaseSpectra, self.Nwavenumber)) # components spectra matrix
for i in range(self.NbaseSpectra):
densityVecs[:,i] = self.cluster_placement(random_state = random_seeds[i])
np.random.seed(seed=random_seeds[i])
peakWidth = np.random.randint(4,20, Npeaks[i])*10
self.spectraMat[i,:] = self.spectrum_gen(Npeaks=Npeaks[i], firstPeakPosition = positions[i],
peakWidth=peakWidth, random_state = random_seeds[i])
self.nonZeroInd = ~np.all(densityVecs==0, axis=1)
self.densityVecsCondense = densityVecs[self.nonZeroInd] # remove all zero rows
self.data = self.densityVecsCondense.dot(self.spectraMat) # matrix mulplication
mask = np.zeros((self.Ny, self.Nx), dtype='bool').flatten()
mask[self.nonZeroInd] = True
self.mask = mask.reshape(self.Ny, self.Nx)
def save(self, sample_id='simulated_dataset'):
"""Save the simulated dataset as an hdf5 file.
Arguments:
----------
sample_id : A string that identifies the sample, spaces will be substituted for underscores.
"""
si = sample_info(sample_id=sample_id, sample_meta_data=f"This dataset has {self.NbaseSpectra} components and map size is {self.Ny} * {self.Nx}")
si.show()
self.spectra_map_gen() # generate simulated dataset
y, x = np.where(self.mask)
self.xy = np.c_[x,y]
ir_data = ir_map(self.wavenumber, si, with_factorization=True)
ir_data.add_data(spectrum=self.data, xy=self.xy)
ir_data.to_image_cube()
ir_data.add_factorization(component=self.spectraMat, component_coef=self.densityVecsCondense, prefix='MCR')
ir_data.write_as_hdf5(f'{sample_id}.h5')
def load(self, filename='simulated_dataset.h5'):
"""load the simulated dataset from an hdf5 file.
Arguments:
----------
filename : The hdf5 filename where data will be read from.
"""
ir_data = ir_map(filename=filename)
ir_data.add_image_cube()
ir_data.add_factorization(prefix='MCR')
return ir_data
def plot_spectra_map(self):
"""Plot the spectral distribution map and basis spectra
"""
self.spectra_map_gen() # generate simulated dataset
plt.figure(figsize=(12, 6))
for i in range(self.NbaseSpectra):
n_row = 2
plt.subplot(n_row,3,i+1)
densityMat = np.zeros((self.Ny, self.Nx)).flatten()
densityMat[self.nonZeroInd] = self.densityVecsCondense[:,i]
densityMat = densityMat.reshape(self.Ny, self.Nx)
plt.imshow(np.flipud(densityMat))
plt.title(f'Distribution of Component {i+1}')
plt.colorbar()
plt.clim([0, 2])
if n_row == 3:
plt.subplot(n_row,3,i+7)
plt.imshow(np.flipud(densityMat>0))
plt.subplot(n_row,3,i+4)
plt.subplots_adjust(hspace=0.3)
plt.plot(self.wavenumber, self.spectraMat[i,:])
plt.title(f'Spectrum of Component {i+1}')
plt.xlim([4000,400])
return None
if __name__ == "__main__":
s = spectra_map_simulator(random_state=3)
s.plot_spectra_map() | [
"numpy.random.rand",
"lbl_ir.data_objects.ir_map.ir_map",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.random.seed",
"numpy.flipud",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.py... | [((3933, 3966), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (3947, 3966), True, 'import numpy as np\n'), ((4070, 4120), 'numpy.zeros', 'np.zeros', (['(self.ptsPerCluster * self.Nclusters, 3)'], {}), '((self.ptsPerCluster * self.Nclusters, 3))\n', (4078, 4120), True, 'import numpy as np\n'), ((4140, 4168), 'numpy.zeros', 'np.zeros', (['(self.Ny, self.Nx)'], {}), '((self.Ny, self.Nx))\n', (4148, 4168), True, 'import numpy as np\n'), ((5809, 5842), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (5823, 5842), True, 'import numpy as np\n'), ((5869, 5940), 'numpy.linspace', 'np.linspace', (['self.startWavenumber', 'self.endWavenumber', 'self.Nwavenumber'], {}), '(self.startWavenumber, self.endWavenumber, self.Nwavenumber)\n', (5880, 5940), True, 'import numpy as np\n'), ((5963, 6037), 'numpy.linspace', 'np.linspace', (['firstPeakPosition', 'self.endWavenumber', 'Npeaks'], {'endpoint': '(False)'}), '(firstPeakPosition, self.endWavenumber, Npeaks, endpoint=False)\n', (5974, 6037), True, 'import numpy as np\n'), ((6103, 6125), 'numpy.random.rand', 'np.random.rand', (['Npeaks'], {}), '(Npeaks)\n', (6117, 6125), True, 'import numpy as np\n'), ((7020, 7058), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'self.random_state'}), '(seed=self.random_state)\n', (7034, 7058), True, 'import numpy as np\n'), ((7153, 7201), 'numpy.zeros', 'np.zeros', (['(self.Nx * self.Ny, self.NbaseSpectra)'], {}), '((self.Nx * self.Ny, self.NbaseSpectra))\n', (7161, 7201), True, 'import numpy as np\n'), ((7250, 7297), 'numpy.zeros', 'np.zeros', (['(self.NbaseSpectra, self.Nwavenumber)'], {}), '((self.NbaseSpectra, self.Nwavenumber))\n', (7258, 7297), True, 'import numpy as np\n'), ((8500, 8649), 'lbl_ir.data_objects.ir_map.sample_info', 'sample_info', ([], {'sample_id': 'sample_id', 'sample_meta_data': 'f"""This dataset has {self.NbaseSpectra} components and map size is {self.Ny} * {self.Nx}"""'}), "(sample_id=sample_id, sample_meta_data=\n f'This dataset has {self.NbaseSpectra} components and map size is {self.Ny} * {self.Nx}'\n )\n", (8511, 8649), False, 'from lbl_ir.data_objects.ir_map import sample_info, ir_map\n'), ((8743, 8762), 'numpy.where', 'np.where', (['self.mask'], {}), '(self.mask)\n', (8751, 8762), True, 'import numpy as np\n'), ((8811, 8863), 'lbl_ir.data_objects.ir_map.ir_map', 'ir_map', (['self.wavenumber', 'si'], {'with_factorization': '(True)'}), '(self.wavenumber, si, with_factorization=True)\n', (8817, 8863), False, 'from lbl_ir.data_objects.ir_map import sample_info, ir_map\n'), ((9392, 9417), 'lbl_ir.data_objects.ir_map.ir_map', 'ir_map', ([], {'filename': 'filename'}), '(filename=filename)\n', (9398, 9417), False, 'from lbl_ir.data_objects.ir_map import sample_info, ir_map\n'), ((9744, 9771), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (9754, 9771), True, 'import matplotlib.pyplot as plt\n'), ((246, 275), 'numpy.power', 'np.power', (['((x - x0) / gamma)', '(2)'], {}), '((x - x0) / gamma, 2)\n', (254, 275), True, 'import numpy as np\n'), ((327, 356), 'numpy.power', 'np.power', (['((x - x0) / sigma)', '(2)'], {}), '((x - x0) / sigma, 2)\n', (335, 356), True, 'import numpy as np\n'), ((4287, 4363), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['centroids[i, :]', 'self.cov', 'self.ptsPerCluster'], {}), '(centroids[i, :], self.cov, self.ptsPerCluster)\n', (4316, 4363), True, 'import numpy as np\n'), ((7478, 7514), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'random_seeds[i]'}), '(seed=random_seeds[i])\n', (7492, 7514), True, 'import numpy as np\n'), ((7825, 7857), 'numpy.all', 'np.all', (['(densityVecs == 0)'], {'axis': '(1)'}), '(densityVecs == 0, axis=1)\n', (7831, 7857), True, 'import numpy as np\n'), ((9875, 9903), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', '(3)', '(i + 1)'], {}), '(n_row, 3, i + 1)\n', (9886, 9903), True, 'import matplotlib.pyplot as plt\n'), ((10156, 10203), 'matplotlib.pyplot.title', 'plt.title', (['f"""Distribution of Component {i + 1}"""'], {}), "(f'Distribution of Component {i + 1}')\n", (10165, 10203), True, 'import matplotlib.pyplot as plt\n'), ((10214, 10228), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10226, 10228), True, 'import matplotlib.pyplot as plt\n'), ((10241, 10257), 'matplotlib.pyplot.clim', 'plt.clim', (['[0, 2]'], {}), '([0, 2])\n', (10249, 10257), True, 'import matplotlib.pyplot as plt\n'), ((10404, 10432), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', '(3)', '(i + 4)'], {}), '(n_row, 3, i + 4)\n', (10415, 10432), True, 'import matplotlib.pyplot as plt\n'), ((10441, 10472), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)'}), '(hspace=0.3)\n', (10460, 10472), True, 'import matplotlib.pyplot as plt\n'), ((10485, 10533), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wavenumber', 'self.spectraMat[i, :]'], {}), '(self.wavenumber, self.spectraMat[i, :])\n', (10493, 10533), True, 'import matplotlib.pyplot as plt\n'), ((10545, 10588), 'matplotlib.pyplot.title', 'plt.title', (['f"""Spectrum of Component {i + 1}"""'], {}), "(f'Spectrum of Component {i + 1}')\n", (10554, 10588), True, 'import matplotlib.pyplot as plt\n'), ((10599, 10620), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[4000, 400]'], {}), '([4000, 400])\n', (10607, 10620), True, 'import matplotlib.pyplot as plt\n'), ((7539, 7574), 'numpy.random.randint', 'np.random.randint', (['(4)', '(20)', 'Npeaks[i]'], {}), '(4, 20, Npeaks[i])\n', (7556, 7574), True, 'import numpy as np\n'), ((8056, 8098), 'numpy.zeros', 'np.zeros', (['(self.Ny, self.Nx)'], {'dtype': '"""bool"""'}), "((self.Ny, self.Nx), dtype='bool')\n", (8064, 8098), True, 'import numpy as np\n'), ((10121, 10142), 'numpy.flipud', 'np.flipud', (['densityMat'], {}), '(densityMat)\n', (10130, 10142), True, 'import numpy as np\n'), ((10314, 10342), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', '(3)', '(i + 7)'], {}), '(n_row, 3, i + 7)\n', (10325, 10342), True, 'import matplotlib.pyplot as plt\n'), ((9925, 9953), 'numpy.zeros', 'np.zeros', (['(self.Ny, self.Nx)'], {}), '((self.Ny, self.Nx))\n', (9933, 9953), True, 'import numpy as np\n'), ((10366, 10391), 'numpy.flipud', 'np.flipud', (['(densityMat > 0)'], {}), '(densityMat > 0)\n', (10375, 10391), True, 'import numpy as np\n'), ((4456, 4560), 'numpy.power', 'np.power', (['(self.points[i * self.ptsPerCluster:(i + 1) * self.ptsPerCluster, :2] -\n centroids[i, :])', '(2)'], {}), '(self.points[i * self.ptsPerCluster:(i + 1) * self.ptsPerCluster, :\n 2] - centroids[i, :], 2)\n', (4464, 4560), True, 'import numpy as np\n')] |
import numpy
from pymodm import MongoModel, fields, EmbeddedMongoModel
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from newsgac.common.fields import ObjectField
from newsgac.common.mixins import CreatedUpdated, DeleteObjectsMixin
from newsgac.learners import LearnerSVC
from newsgac.learners.models.learner import Learner
from newsgac.nlp_tools import TFIDF
from newsgac.nlp_tools.models.nlp_tool import NlpTool
from newsgac.pipelines.get_sk_pipeline import get_sk_pipeline
from newsgac.stop_words.models import StopWords
class Result(EmbeddedMongoModel):
accuracy = fields.FloatField()
cohens_kappa = fields.FloatField()
confusion_matrix = ObjectField()
fmeasure_macro = fields.FloatField()
fmeasure_micro = fields.FloatField()
fmeasure_weighted = fields.FloatField()
precision_macro = fields.FloatField()
precision_micro = fields.FloatField()
precision_weighted = fields.FloatField()
recall_macro = fields.FloatField()
recall_micro = fields.FloatField()
recall_weighted = fields.FloatField()
std = fields.FloatField()
# sorted_labels = fields.ListField()
@classmethod
def from_prediction(cls, true_labels, predicted_labels):
def make_score(number,precision):
return("{0:0.2f}".format(100.0*float(int(number*(10.0**precision)))/(10.0**precision)))
def estimate_10cv_scores(true_labels,predicted_labels):
scores = [ 1 if true_labels[i] == predicted_labels[i] else 0 for i in range(0, len(true_labels)) ]
nbrOfScores = len(scores)
sections = [scores[int(i*nbrOfScores/10):int((i+1)*nbrOfScores/10)] for i in range(0,10)]
section_scores = [numpy.array(x).mean() for x in sections]
return(numpy.array(section_scores))
scores = estimate_10cv_scores(true_labels,predicted_labels)
return cls(
confusion_matrix=confusion_matrix(true_labels, predicted_labels),
precision_weighted=make_score(metrics.precision_score(true_labels, predicted_labels, average='weighted'),4),
precision_micro=make_score(metrics.precision_score(true_labels, predicted_labels, average='micro'),4),
precision_macro=make_score(metrics.precision_score(true_labels, predicted_labels, average='macro'),4),
recall_weighted=make_score(metrics.recall_score(true_labels, predicted_labels, average='weighted'),4),
recall_micro=make_score(metrics.recall_score(true_labels, predicted_labels, average='micro'),4),
recall_macro=make_score(metrics.recall_score(true_labels, predicted_labels, average='macro'),4),
fmeasure_weighted=make_score(metrics.f1_score(true_labels, predicted_labels, average='weighted'),4),
fmeasure_micro=make_score(metrics.f1_score(true_labels, predicted_labels, average='micro'),4),
fmeasure_macro=make_score(metrics.f1_score(true_labels, predicted_labels, average='macro'),4),
cohens_kappa=make_score(metrics.cohen_kappa_score(true_labels, predicted_labels),4),
accuracy=make_score(scores.mean(),4),
std=make_score(scores.std(),4)
)
class Pipeline(CreatedUpdated, DeleteObjectsMixin, MongoModel):
from newsgac.users.models import User
from newsgac.data_sources.models import DataSource
from newsgac.tasks.models import TrackedTask
user = fields.ReferenceField(User, required=True)
display_title = fields.CharField(required=True)
created = fields.DateTimeField()
updated = fields.DateTimeField()
data_source = fields.ReferenceField(DataSource, required=True, blank=False)
stop_words = fields.ReferenceField(StopWords, required=False, blank=True)
lowercase = fields.BooleanField(required=True, default=False)
lemmatization = fields.BooleanField(required=True, default=False)
quote_removal = fields.BooleanField(required=True, default=True)
nlp_tool = fields.EmbeddedDocumentField(NlpTool, blank=True, required=True, default=TFIDF.create())
learner = fields.EmbeddedDocumentField(Learner)
sk_pipeline = ObjectField()
result = fields.EmbeddedDocumentField(Result, blank=True)
grid_search_result = ObjectField()
task = fields.EmbeddedDocumentField(TrackedTask, default=TrackedTask())
@classmethod
def create(cls):
return cls(
display_title="",
data_source=None,
stop_words=cls.stop_words.default,
lowercase=cls.lowercase.default,
lemmatization=cls.lemmatization.default,
nlp_tool=cls.nlp_tool.default,
learner=LearnerSVC.create()
)
def get_feature_extractor(self):
raise NotImplementedError('Subclass should implement get_feature_extractor')
def get_sk_pipeline(self):
return get_sk_pipeline(self)
def delete(self):
from newsgac.ace import ACE
# delete this pipeline from related ace runs
for ace in ACE.objects.raw({'pipelines': {'$in': [self.pk]}}):
ace.delete_pipeline(self)
super(Pipeline, self).delete()
| [
"sklearn.metrics.f1_score",
"sklearn.metrics.confusion_matrix",
"pymodm.fields.DateTimeField",
"newsgac.pipelines.get_sk_pipeline.get_sk_pipeline",
"newsgac.common.fields.ObjectField",
"newsgac.tasks.models.TrackedTask",
"sklearn.metrics.precision_score",
"pymodm.fields.FloatField",
"numpy.array",
... | [((604, 623), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (621, 623), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((643, 662), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (660, 662), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((686, 699), 'newsgac.common.fields.ObjectField', 'ObjectField', ([], {}), '()\n', (697, 699), False, 'from newsgac.common.fields import ObjectField\n'), ((721, 740), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (738, 740), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((762, 781), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (779, 781), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((806, 825), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (823, 825), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((848, 867), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (865, 867), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((890, 909), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (907, 909), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((935, 954), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (952, 954), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((974, 993), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (991, 993), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((1013, 1032), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (1030, 1032), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((1055, 1074), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (1072, 1074), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((1085, 1104), 'pymodm.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (1102, 1104), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3404, 3446), 'pymodm.fields.ReferenceField', 'fields.ReferenceField', (['User'], {'required': '(True)'}), '(User, required=True)\n', (3425, 3446), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3467, 3498), 'pymodm.fields.CharField', 'fields.CharField', ([], {'required': '(True)'}), '(required=True)\n', (3483, 3498), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3513, 3535), 'pymodm.fields.DateTimeField', 'fields.DateTimeField', ([], {}), '()\n', (3533, 3535), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3550, 3572), 'pymodm.fields.DateTimeField', 'fields.DateTimeField', ([], {}), '()\n', (3570, 3572), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3592, 3653), 'pymodm.fields.ReferenceField', 'fields.ReferenceField', (['DataSource'], {'required': '(True)', 'blank': '(False)'}), '(DataSource, required=True, blank=False)\n', (3613, 3653), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3671, 3731), 'pymodm.fields.ReferenceField', 'fields.ReferenceField', (['StopWords'], {'required': '(False)', 'blank': '(True)'}), '(StopWords, required=False, blank=True)\n', (3692, 3731), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3748, 3797), 'pymodm.fields.BooleanField', 'fields.BooleanField', ([], {'required': '(True)', 'default': '(False)'}), '(required=True, default=False)\n', (3767, 3797), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3818, 3867), 'pymodm.fields.BooleanField', 'fields.BooleanField', ([], {'required': '(True)', 'default': '(False)'}), '(required=True, default=False)\n', (3837, 3867), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((3888, 3936), 'pymodm.fields.BooleanField', 'fields.BooleanField', ([], {'required': '(True)', 'default': '(True)'}), '(required=True, default=True)\n', (3907, 3936), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((4055, 4092), 'pymodm.fields.EmbeddedDocumentField', 'fields.EmbeddedDocumentField', (['Learner'], {}), '(Learner)\n', (4083, 4092), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((4111, 4124), 'newsgac.common.fields.ObjectField', 'ObjectField', ([], {}), '()\n', (4122, 4124), False, 'from newsgac.common.fields import ObjectField\n'), ((4138, 4186), 'pymodm.fields.EmbeddedDocumentField', 'fields.EmbeddedDocumentField', (['Result'], {'blank': '(True)'}), '(Result, blank=True)\n', (4166, 4186), False, 'from pymodm import MongoModel, fields, EmbeddedMongoModel\n'), ((4212, 4225), 'newsgac.common.fields.ObjectField', 'ObjectField', ([], {}), '()\n', (4223, 4225), False, 'from newsgac.common.fields import ObjectField\n'), ((4830, 4851), 'newsgac.pipelines.get_sk_pipeline.get_sk_pipeline', 'get_sk_pipeline', (['self'], {}), '(self)\n', (4845, 4851), False, 'from newsgac.pipelines.get_sk_pipeline import get_sk_pipeline\n'), ((4983, 5033), 'newsgac.ace.ACE.objects.raw', 'ACE.objects.raw', (["{'pipelines': {'$in': [self.pk]}}"], {}), "({'pipelines': {'$in': [self.pk]}})\n", (4998, 5033), False, 'from newsgac.ace import ACE\n'), ((1773, 1800), 'numpy.array', 'numpy.array', (['section_scores'], {}), '(section_scores)\n', (1784, 1800), False, 'import numpy\n'), ((4025, 4039), 'newsgac.nlp_tools.TFIDF.create', 'TFIDF.create', ([], {}), '()\n', (4037, 4039), False, 'from newsgac.nlp_tools import TFIDF\n'), ((4288, 4301), 'newsgac.tasks.models.TrackedTask', 'TrackedTask', ([], {}), '()\n', (4299, 4301), False, 'from newsgac.tasks.models import TrackedTask\n'), ((1920, 1967), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1936, 1967), False, 'from sklearn.metrics import confusion_matrix\n'), ((4630, 4649), 'newsgac.learners.LearnerSVC.create', 'LearnerSVC.create', ([], {}), '()\n', (4647, 4649), False, 'from newsgac.learners import LearnerSVC\n'), ((2011, 2085), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['true_labels', 'predicted_labels'], {'average': '"""weighted"""'}), "(true_labels, predicted_labels, average='weighted')\n", (2034, 2085), False, 'from sklearn import metrics\n'), ((2129, 2200), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['true_labels', 'predicted_labels'], {'average': '"""micro"""'}), "(true_labels, predicted_labels, average='micro')\n", (2152, 2200), False, 'from sklearn import metrics\n'), ((2244, 2315), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['true_labels', 'predicted_labels'], {'average': '"""macro"""'}), "(true_labels, predicted_labels, average='macro')\n", (2267, 2315), False, 'from sklearn import metrics\n'), ((2359, 2430), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['true_labels', 'predicted_labels'], {'average': '"""weighted"""'}), "(true_labels, predicted_labels, average='weighted')\n", (2379, 2430), False, 'from sklearn import metrics\n'), ((2471, 2539), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['true_labels', 'predicted_labels'], {'average': '"""micro"""'}), "(true_labels, predicted_labels, average='micro')\n", (2491, 2539), False, 'from sklearn import metrics\n'), ((2580, 2648), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['true_labels', 'predicted_labels'], {'average': '"""macro"""'}), "(true_labels, predicted_labels, average='macro')\n", (2600, 2648), False, 'from sklearn import metrics\n'), ((2694, 2761), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['true_labels', 'predicted_labels'], {'average': '"""weighted"""'}), "(true_labels, predicted_labels, average='weighted')\n", (2710, 2761), False, 'from sklearn import metrics\n'), ((2804, 2868), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['true_labels', 'predicted_labels'], {'average': '"""micro"""'}), "(true_labels, predicted_labels, average='micro')\n", (2820, 2868), False, 'from sklearn import metrics\n'), ((2911, 2975), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['true_labels', 'predicted_labels'], {'average': '"""macro"""'}), "(true_labels, predicted_labels, average='macro')\n", (2927, 2975), False, 'from sklearn import metrics\n'), ((3016, 3072), 'sklearn.metrics.cohen_kappa_score', 'metrics.cohen_kappa_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (3041, 3072), False, 'from sklearn import metrics\n'), ((1713, 1727), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (1724, 1727), False, 'import numpy\n')] |
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
from open_spiel.python.project.part_1.dynamics_lenient_boltzmannq import dynamics_lb
# True for field plot, False for phase plot
PLOT_FLAG = False
payoff_stag_hunt = np.array([[[1, 0], [2 / 3, 2 / 3]], [[1, 2 / 3], [0, 2 / 3]]])
# Stag Hunt
fig = plt.figure(figsize=(10,10))
for i, K in zip(range(6),reversed([0.001,1,2,5,10,1000])):
dyn_stag_hunt = dynamics_lb.MultiPopulationDynamicsLB(payoff_stag_hunt, [partial(dynamics_lb.lenient_boltzmann, K= K, temperature=0.05)] * 2)
ax1 = fig.add_subplot(int("23{}".format(i+1)), projection="2x2")
ax1.quiver(dyn_stag_hunt) if PLOT_FLAG else ax1.streamplot(dyn_stag_hunt, linewidth="velocity", color="velocity")
ax1.set_title("Stag Hunt, K={}".format(round(K,2)), fontweight="bold")
ax1.set(xlabel="Player 1: Pr(Hunt)",ylabel="Player 2: Pr(Hunt)")
plt.show() | [
"numpy.array",
"matplotlib.pyplot.figure",
"functools.partial",
"matplotlib.pyplot.show"
] | [((251, 313), 'numpy.array', 'np.array', (['[[[1, 0], [2 / 3, 2 / 3]], [[1, 2 / 3], [0, 2 / 3]]]'], {}), '([[[1, 0], [2 / 3, 2 / 3]], [[1, 2 / 3], [0, 2 / 3]]])\n', (259, 313), True, 'import numpy as np\n'), ((333, 361), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (343, 361), True, 'import matplotlib.pyplot as plt\n'), ((898, 908), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (906, 908), True, 'import matplotlib.pyplot as plt\n'), ((497, 558), 'functools.partial', 'partial', (['dynamics_lb.lenient_boltzmann'], {'K': 'K', 'temperature': '(0.05)'}), '(dynamics_lb.lenient_boltzmann, K=K, temperature=0.05)\n', (504, 558), False, 'from functools import partial\n')] |
import pandas as pd
import numpy as np
class Maze:
def __init__(self,goal=[3,3],trap1=[0,3],trap2=[3,1],position=0):
pass
'''
def printTable(self,p=None):
p = random_position()
table = pd.DataFrame(np.zeros((4,4),dtype=int),columns=None)
table.iloc[3,3]='X'
table.iloc[3,1]='T'
table.iloc[0,3]='T'
T = pd.DataFrame({
'linhas':[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3],\
'colunas':[0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3]
})
table = table.replace(0,'_')
table.iloc[T['linhas'][p],T['colunas'][p]] = 'o'
print(table.to_string(index=False,header=False))
print('')
'''
@property
def random_position(self):
return np.random.randint(0,16)
def set_traps(self,trap1,trap2):
self.trap1 = trap1
self.trap2 = trap2
def set_goal(self,goal):
self.goal = goal
def get_goal(self):
return self.goal
def set_position(self,position):
self.position = position
def get_posisiotn(self):
return self.position
def make_maze(self,goal,trap1,trap2,position):
self.table = pd.DataFrame(np.zeros((4,4),dtype=int),columns=None)
self.table.iloc[goal[0],goal[1]]='X'
self.table.iloc[self.trap1[0],self.trap1[1]]='T'
self.table.iloc[self.trap2[0],self.trap2[1]]='T'
T = pd.DataFrame({
'linhas':[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3],\
'colunas':[0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3]
})
self.table = self.table.replace(0,'_')
self.table.iloc[T['linhas'][self.position],T['colunas'][self.position]] = 'o'
return self.table
def print_maze(self,table):
print(self.table.to_string(index=False,header=False))
print('') | [
"pandas.DataFrame",
"numpy.random.randint",
"numpy.zeros"
] | [((783, 807), 'numpy.random.randint', 'np.random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (800, 807), True, 'import numpy as np\n'), ((1459, 1598), 'pandas.DataFrame', 'pd.DataFrame', (["{'linhas': [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], 'colunas': [0,\n 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]}"], {}), "({'linhas': [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3],\n 'colunas': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]})\n", (1471, 1598), True, 'import pandas as pd\n'), ((1248, 1275), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {'dtype': 'int'}), '((4, 4), dtype=int)\n', (1256, 1275), True, 'import numpy as np\n')] |
import logging
from functools import partial
import cv2
import os
import json
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from evaluation.inception import InceptionScore
from sg2im.data.dataset_params import get_dataset, get_collate_fn
from scripts.args import get_args, print_args, init_args
from scripts.graphs_utils import calc_log_p
from sg2im.data.utils import decode_image, imagenet_deprocess, print_compute_converse_edges, \
print_compute_transitive_edges
from sg2im.meta_models import MetaGeneratorModel, MetaDiscriminatorModel
from sg2im.model import get_conv_converse
from sg2im.pix2pix_model import Pix2PixModel
from sg2im.data import deprocess_batch
from sg2im.metrics import jaccard
from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding
from spade.models.networks.sync_batchnorm import DataParallelWithCallback
torch.backends.cudnn.benchmark = True
def restore_checkpoint(args, model, gans_model, discriminator, optimizer, device):
try:
if args.checkpoint_name is None:
raise Exception('You should pre-train the model on your training data first')
img_discriminator, obj_discriminator = discriminator.img_discriminator, discriminator.obj_discriminator,
optimizer_d_img, optimizer_d_obj = discriminator.optimizer_d_img, discriminator.optimizer_d_obj
# Load pre-trained weights for fine-tune
checkpoint = torch.load(args.checkpoint_name, map_location=device)
model.load_state_dict(checkpoint['model_state'])
gans_model.load_state_dict(checkpoint['gans_model_state'])
img_discriminator.load_state_dict(checkpoint['d_img_state'])
obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
# Load Optimizers
try:
optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])
optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])
optimizer.load_state_dict(checkpoint['optim_state'])
except Exception as e:
print("Could not load optimizers state:", e)
# Load Epoch and Iteration num.
t = checkpoint['counters']['t']
epoch = checkpoint['counters']['epoch']
except Exception as e:
raise NotImplementedError(
'Could not restore weights for checkpoint {} because `{}`'.format(args.checkpoint_name, e))
return epoch, t
def restore_checkpoints(args, model, gans_model, discriminator, optimizer, device):
try:
if args.checkpoint_name is None:
raise Exception('You should pre-train the model on your training data first')
img_discriminator, obj_discriminator = discriminator.img_discriminator, discriminator.obj_discriminator,
optimizer_d_img, optimizer_d_obj = discriminator.optimizer_d_img, discriminator.optimizer_d_obj
# Load pre-trained weights for fine-tune
checkpoint_gan = torch.load(args.checkpoint_gan_name, map_location=device)
checkpoint_graph = torch.load(args.checkpoint_graph_name, map_location=device)
checkpoint_gan['model_state'].update(checkpoint_graph['model_state'])
model.load_state_dict(checkpoint_gan['model_state'], strict=False)
checkpoint_gan['gans_model_state'].pop(
'module.discriminator.mask_discriminator.discriminator_0.model0.0.weight')
checkpoint_gan['gans_model_state'].pop(
'module.discriminator.mask_discriminator.discriminator_1.model0.0.weight')
checkpoint_gan['gans_model_state'].pop('module.netD_mask.discriminator_0.model0.0.weight')
checkpoint_gan['gans_model_state'].pop('module.netD_mask.discriminator_1.model0.0.weight')
gans_model.load_state_dict(checkpoint_gan['gans_model_state'], strict=False)
img_discriminator.load_state_dict(checkpoint_gan['d_img_state'])
obj_discriminator.load_state_dict(checkpoint_gan['d_obj_state'])
except Exception as e:
raise NotImplementedError(
'Could not restore weights for checkpoint {} because `{}`'.format(args.checkpoint_name, e))
# Load Optimizers
try:
optimizer_d_img.load_state_dict(checkpoint_gan['d_img_optim_state'])
optimizer_d_obj.load_state_dict(checkpoint_gan['d_obj_optim_state'])
optimizer.load_state_dict(checkpoint_gan['optim_state'])
except Exception as e:
print("Could not load optimizers state:", e)
# Load Epoch and Iteration num.
t = 0
epoch = 0
return epoch, t
def freeze_weights(model, discriminator, module):
print(" >> Freeze Weights:")
if module == 'generation':
print(" >> Freeze Layout to image module")
if hasattr(model, 'layout_to_image_model'):
for param in model.layout_to_image_model.parameters():
param.requires_grad = False
for param in discriminator.parameters():
param.requires_grad = False
else:
raise NotImplementedError('Unrecognized option, you can freeze either graph module or I3D module')
pass
def add_loss(curr_loss, loss_dict, loss_name, weight=1):
curr_loss = curr_loss * weight
loss_dict[loss_name] = curr_loss.item()
def build_test_dsets(args):
test_dset = get_dataset(args.dataset, 'test', args)
vocab = test_dset.vocab
collate_fn = get_collate_fn(args)
loader_kwargs = {
'batch_size': args.batch_size,
'num_workers': args.loader_num_workers,
'shuffle': False,
'collate_fn': partial(collate_fn, vocab),
}
test_loader = DataLoader(test_dset, **loader_kwargs)
return test_loader, test_dset.vocab
def build_train_val_loaders(args):
train_dset = get_dataset(args.dataset, 'train', args)
val_dset = get_dataset(args.dataset, 'val', args)
assert train_dset.vocab == val_dset.vocab
vocab = json.loads(json.dumps(train_dset.vocab))
collate = get_collate_fn(args)
loader_kwargs = {
'batch_size': args.batch_size,
'num_workers': args.loader_num_workers,
'shuffle': True,
'collate_fn': partial(collate, vocab),
}
train_loader = DataLoader(train_dset, **loader_kwargs)
loader_kwargs['shuffle'] = args.shuffle_val
val_loader = DataLoader(val_dset, **loader_kwargs)
return vocab, train_loader, val_loader
def check_model(args, loader, model, gans_model, inception_score, use_gt=True, full_test=False):
model.eval()
num_samples = 0
all_losses = defaultdict(list)
total_iou = 0.
total_iou_masks = 0.
total_iou_05 = 0.
total_iou_03 = 0.
total_boxes = 0.
inception_score.clean()
image_df = {
'image_id': [],
'avg_iou': [],
'iou03': [],
'iou05': [],
"predicted_boxes": [],
"gt_boxes": [],
"number_of_objects": [],
"class": []
}
with torch.no_grad():
for batch in loader:
try:
batch = batch_to(batch)
imgs, objs, boxes, triplets, _, triplet_type, masks, image_ids = batch
# Run the model as it has been run during training
if use_gt:
model_out = model(objs, triplets, triplet_type, boxes_gt=boxes, masks_gt=masks, test_mode=True)
else:
model_out = model(objs, triplets, triplet_type, test_mode=True)
imgs_pred, boxes_pred, masks_pred = model_out
G_losses = gans_model(batch, model_out, mode='compute_generator_loss')
if boxes_pred is not None:
boxes_pred = torch.clamp(boxes_pred, 0., 1.)
if imgs_pred is not None:
inception_score(imgs_pred)
if not args.skip_graph_model:
image_df['image_id'].extend(image_ids)
for i in range(boxes.size(0)):
# masks_sample = masks[i]
# masks_pred_sample = masks_pred[i]
boxes_sample = boxes[i]
boxes_pred_sample = boxes_pred[i]
boxes_pred_sample, boxes_sample = \
remove_dummies_and_padding(boxes_sample, objs[i], args.vocab,
[boxes_pred_sample, boxes_sample])
iou, iou05, iou03 = jaccard(boxes_pred_sample, boxes_sample)
# iou_masks = jaccard_masks(masks_pred_sample, masks_sample)
total_iou += iou.sum()
# total_iou_masks += iou_masks.sum()
total_iou_05 += iou05.sum()
total_iou_03 += iou03.sum()
total_boxes += float(iou.shape[0])
image_df['avg_iou'].append(np.mean(iou))
image_df['iou03'].append(np.mean(iou03))
image_df['iou05'].append(np.mean(iou03))
image_df['predicted_boxes'].append(str(boxes_pred_sample.cpu().numpy().tolist()))
image_df['gt_boxes'].append(str(boxes_sample.cpu().numpy().tolist()))
image_df["number_of_objects"].append(len(objs[i]))
if objs.shape[-1] == 1:
image_df["class"].append(
str([args.vocab["object_idx_to_name"][obj_index] for obj_index in objs[i]]))
else:
image_df["class"].append(str(
[args.vocab["reverse_attributes"]['shape'][str(int(objs[i][obj_index][2]))] for
obj_index in range(objs[i].shape[0])]))
for loss_name, loss_val in G_losses.items():
all_losses[loss_name].append(loss_val)
num_samples += imgs.size(0)
if not full_test and args.num_val_samples and num_samples >= args.num_val_samples:
break
except Exception as e:
print("Error in {}".format(str(e)))
samples = {}
if not args.skip_generation and not args.skip_graph_model:
samples['pred_box_pred_mask'] = model(objs, triplets, triplet_type, test_mode=True)[0]
samples['pred_box_gt_mask'] = model(objs, triplets, triplet_type, masks_gt=masks, test_mode=True)[0]
if not args.skip_generation:
samples['gt_img'] = imgs
samples['gt_box_gt_mask'] = \
model(objs, triplets, triplet_type, boxes_gt=boxes, masks_gt=masks, test_mode=True)[0]
samples['gt_box_pred_mask'] = model(objs, triplets, triplet_type, boxes_gt=boxes, test_mode=True)[0]
for k, v in samples.items():
samples[k] = np.transpose(deprocess_batch(v, deprocess_func=args.deprocess_func).cpu().numpy(),
[0, 2, 3, 1])
mean_losses = {k: torch.stack(v).mean() for k, v in all_losses.items() if k != 'bbox_pred_all'}
if not args.skip_graph_model:
mean_losses.update({'avg_iou': total_iou / total_boxes,
'total_iou_05': total_iou_05 / total_boxes,
'total_iou_03': total_iou_03 / total_boxes})
mean_losses.update({'inception_mean': 0.0})
mean_losses.update({'inception_std': 0.0})
if not args.skip_generation:
inception_mean, inception_std = inception_score.compute_score(splits=5)
mean_losses.update({'inception_mean': inception_mean})
mean_losses.update({'inception_std': inception_std})
model.train()
return mean_losses, samples, pd.DataFrame.from_dict(image_df)
def update_loader_params(dset, w_conv, w_trans):
if w_conv is not None:
dset.converse_candidates_weights = w_conv.detach().cpu().numpy()
if w_trans is not None:
dset.trans_candidates_weights = torch.sigmoid(w_trans).detach().cpu().numpy()
def main(args):
logger = logging.getLogger(__name__)
args.vocab, train_loader, val_loader = build_train_val_loaders(args)
init_args(args)
learning_rate = args.learning_rate
print_args(args)
if not os.path.isdir(args.output_dir):
print('Checkpoints directory "%s" does not exist; creating it' % args.output_dir)
os.makedirs(args.output_dir)
json.dump(vars(args), open(os.path.join(args.output_dir, 'run_args.json'), 'w'))
writer = SummaryWriter(args.output_dir)
float_dtype = torch.cuda.FloatTensor
# Define img_deprocess
if args.img_deprocess == "imagenet":
args.deprocess_func = imagenet_deprocess
elif args.img_deprocess == "decode_img":
args.deprocess_func = decode_image
else:
print("Error: No deprocess function was found. decode_image was chosen")
args.deprocess_func = decode_image
# setup device - CPU or GPU
device = torch.device("cuda:{gpu}".format(gpu=args.gpu_ids[0]) if args.use_cuda else "cpu")
print(" > Active GPU ids: {}".format(args.gpu_ids))
print(" > Using device: {}".format(device.type))
model = MetaGeneratorModel(args, device)
model.type(float_dtype)
conv_weights_mat = get_conv_converse(model)
update_loader_params(train_loader.dataset, conv_weights_mat, None)
update_loader_params(val_loader.dataset, conv_weights_mat, None)
converse_list = [
'sg_to_layout.module.converse_candidates_weights'] # 'sg_to_layout.module.trans_candidates_weights'
trans_list = ['sg_to_layout.module.trans_candidates_weights'] # 'sg_to_layout.module.trans_candidates_weights'
learned_converse_params = [kv[1] for kv in model.named_parameters() if kv[0] in converse_list]
learned_transitivity_params = [kv[1] for kv in model.named_parameters() if kv[0] in trans_list]
all_special_params = converse_list + trans_list
base_params = [kv[1] for kv in model.named_parameters() if kv[0] not in all_special_params]
optimizer = torch.optim.Adam([{'params': base_params, 'lr': learning_rate},
{'params': learned_transitivity_params, 'lr': 1e-2}])
optimizer_converse = torch.optim.Adam([{'params': learned_converse_params, 'lr': 1e-2}])
print(model)
discriminator = MetaDiscriminatorModel(args)
print(discriminator)
gans_model = Pix2PixModel(args, discriminator=discriminator)
gans_model = DataParallelWithCallback(gans_model, device_ids=args.gpu_ids).to(device)
epoch, t = 0, 0
# Restore checkpoint
if args.restore_checkpoint:
epoch, t = restore_checkpoint(args, model, gans_model, discriminator, optimizer, device)
# Freeze weights
if args.freeze:
freeze_weights(model, discriminator, args.freeze_options)
# Init Inception Score
inception_score = InceptionScore(device, batch_size=args.batch_size, resize=True)
# Run Epoch
meta_relations = [args.vocab['pred_name_to_idx'][p] for p in train_loader.dataset.meta_relations]
non_meta_relations = set(args.vocab['pred_name_to_idx'].values()) - set(meta_relations)
eps = np.finfo(np.float32).eps.item()
while True:
if t >= args.num_iterations:
break
epoch += 1
print('Starting epoch %d' % epoch)
# Run Batch
for batch in train_loader:
try:
t += 1
batch = batch_to(batch)
imgs, objs, boxes, triplets, conv_counts, triplet_type, masks, image_ids = batch
model_out = model(objs, triplets, triplet_type, boxes_gt=boxes, masks_gt=masks, test_mode=False)
# non gan losses
G_losses = gans_model(batch, model_out, mode="compute_generator_loss")
r = G_losses["bbox_pred_all"].detach()
G_losses = {k: v.mean() for k, v in G_losses.items()}
log_scalar_dict(writer, G_losses, 'train/loss', t)
optimizer.zero_grad()
G_losses["total_loss"].backward()
optimizer.step()
# Update SRC params
if args.learned_converse:
batch_size = batch[0].shape[0]
if batch_size > 1:
r = (r - r.mean()) / (r.std() + eps)
conv_weights_mat = get_conv_converse(model)
log_prob = calc_log_p(conv_weights_mat, non_meta_relations, conv_counts)
loss_conv = torch.mean(r * log_prob)
optimizer_converse.zero_grad()
loss_conv.backward()
optimizer_converse.step()
conv_weights_mat = get_conv_converse(model)
update_loader_params(train_loader.dataset, conv_weights_mat, None)
update_loader_params(val_loader.dataset, conv_weights_mat, None)
# Update GAN discriminators losses
D_losses = {}
if not args.skip_generation and args.freeze_options != "generation":
D_losses = gans_model(batch, model_out, mode="compute_discriminator_loss")
D_losses = {k: v.mean() for k, v in D_losses.items()}
log_scalar_dict(writer, D_losses, 'train/loss', t)
set_d_gans_loss(D_losses, args, discriminator)
# Logger
if t % args.print_every == 0:
print('t = %d / %d' % (t, args.num_iterations))
for name, val in G_losses.items():
print(' G [%s]: %.4f' % (name, val))
for name, val in D_losses.items():
print(' D [%s]: %.4f' % (name, val))
# Save checkpoint
if t % args.checkpoint_every == 0:
conv_weights_mat = get_conv_converse(model)
print_compute_converse_edges({}, conv_weights_mat.detach(), args.vocab, non_meta_relations)
print_compute_transitive_edges({}, torch.sigmoid(
model.sg_to_layout.module.trans_candidates_weights).detach(), args.vocab)
# GT Boxes; GT Masks
print('checking: input box/mask as GT')
gt_val_losses, gt_val_samples, _ = check_model(args, val_loader, model, gans_model, inception_score,
use_gt=True, full_test=False)
log_scalar_dict(writer, gt_val_losses, 'gt_val/loss', t)
log_results(gt_val_losses, t, prefix='GT VAL')
# Pred Boxes; Pred Masks
print('checking: input box/mask as PRED')
use_gt = True if args.skip_graph_model else False # if skip graph then use gt
val_losses, val_samples, _ = check_model(args, val_loader, model, gans_model,
inception_score, use_gt=use_gt, full_test=False)
log_scalar_dict(writer, val_losses, 'val/loss', t)
log_results(val_losses, t, prefix='VAL')
save_images(args, t, val_samples, writer)
# Save checkpoint
checkpoint_path = os.path.join(args.output_dir, 'itr_%s.pt' % t)
print('Saving checkpoint to ', checkpoint_path)
save_checkpoint(args, checkpoint_path, discriminator, epoch, gans_model, model, optimizer, t)
# Full test
if t % args.full_test == 0:
print('checking on full eval')
test_losses, test_samples, _ = check_model(args, val_loader, model, gans_model, inception_score,
use_gt=False, full_test=True)
log_scalar_dict(writer, test_losses, 'test/loss', t)
print('Iter: {},'.format(t) + ' TEST Inception mean: %.4f' % test_losses['inception_mean'])
print('Iter: {},'.format(t) + ' TEST Inception STD: %.4f' % test_losses['inception_std'])
except Exception as e:
logger.exception(e)
writer.close()
def log_results(semi_val_losses, t, prefix=''):
print('Iter: {}, '.format(t) + prefix + ' avg_iou: %.4f' % semi_val_losses.get('avg_iou', 0.0))
print('Iter: {}, '.format(t) + prefix + ' total_iou_03: %.4f' % semi_val_losses.get('total_iou_03', 0.0))
print('Iter: {}, '.format(t) + prefix + ' total_iou_05: %.4f' % semi_val_losses.get('total_iou_05', 0.0))
print('Iter: {}, '.format(t) + prefix + ' Inception mean: %.4f' % semi_val_losses.get('inception_mean', 0.0))
print('Iter: {}, '.format(t) + prefix + ' Inception STD: %.4f' % semi_val_losses.get('inception_std', 0.0))
def save_images(args, t, val_samples, writer, dir_name='val'):
for k, v in val_samples.items():
if isinstance(v, list):
for i in range(len(v)):
writer.add_figure('val_%s/%s' % (k, i), v, global_step=t)
else:
path = os.path.join(args.output_dir, dir_name, str(t), k)
os.makedirs(path)
for i in range(v.shape[0]):
writer.add_images('val_%s/%s' % (k, i), v[i], global_step=t, dataformats='HWC')
RGB_img_i = cv2.cvtColor(v[i], cv2.COLOR_BGR2RGB)
cv2.imwrite("{}/{}.jpg".format(path, i), RGB_img_i)
def set_d_gans_loss(D_losses, args, discriminator):
if args.use_img_disc:
discriminator.optimizer_d_img.zero_grad()
D_losses["total_img_loss"].backward()
discriminator.optimizer_d_img.step()
else:
discriminator.optimizer_d_img.zero_grad()
D_losses["total_img_loss"].backward()
discriminator.optimizer_d_img.step()
discriminator.optimizer_d_obj.zero_grad()
D_losses["total_obj_loss"].backward()
discriminator.optimizer_d_obj.step()
if args.mask_size > 0 and "total_mask_loss" in D_losses:
discriminator.optimizer_d_mask.zero_grad()
D_losses["total_mask_loss"].backward()
discriminator.optimizer_d_mask.step()
def save_checkpoint(args, checkpoint_path, discriminator, epoch, gans_model, model, optimizer, t):
if args.use_img_disc:
checkpoint_dict = {
'model_state': model.state_dict(),
'gans_model_state': gans_model.state_dict(),
'd_img_state': discriminator.img_discriminator.state_dict(),
'd_img_optim_state': discriminator.optimizer_d_img.state_dict(),
'optim_state': optimizer.state_dict(),
'vocab': args.vocab,
'counters': {
't': t,
'epoch': epoch,
}
}
else:
checkpoint_dict = {
'model_state': model.state_dict(),
'gans_model_state': gans_model.state_dict(),
'd_img_state': discriminator.img_discriminator.state_dict(),
'd_obj_state': discriminator.obj_discriminator.state_dict(),
'd_mask_state': discriminator.mask_discriminator.state_dict(),
'd_img_optim_state': discriminator.optimizer_d_img.state_dict(),
'd_obj_optim_state': discriminator.optimizer_d_obj.state_dict(),
'd_mask_optim_state': discriminator.optimizer_d_mask.state_dict(),
'optim_state': optimizer.state_dict(),
'vocab': args.vocab,
'counters': {
't': t,
'epoch': epoch,
}
}
torch.save(checkpoint_dict, checkpoint_path)
if __name__ == '__main__':
args = get_args()
main(args)
| [
"logging.getLogger",
"sg2im.utils.log_scalar_dict",
"evaluation.inception.InceptionScore",
"sg2im.model.get_conv_converse",
"sg2im.meta_models.MetaGeneratorModel",
"scripts.args.get_args",
"numpy.mean",
"tensorboardX.SummaryWriter",
"torch.mean",
"sg2im.data.dataset_params.get_dataset",
"scripts... | [((5334, 5373), 'sg2im.data.dataset_params.get_dataset', 'get_dataset', (['args.dataset', '"""test"""', 'args'], {}), "(args.dataset, 'test', args)\n", (5345, 5373), False, 'from sg2im.data.dataset_params import get_dataset, get_collate_fn\n'), ((5419, 5439), 'sg2im.data.dataset_params.get_collate_fn', 'get_collate_fn', (['args'], {}), '(args)\n', (5433, 5439), False, 'from sg2im.data.dataset_params import get_dataset, get_collate_fn\n'), ((5651, 5689), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dset'], {}), '(test_dset, **loader_kwargs)\n', (5661, 5689), False, 'from torch.utils.data import DataLoader\n'), ((5784, 5824), 'sg2im.data.dataset_params.get_dataset', 'get_dataset', (['args.dataset', '"""train"""', 'args'], {}), "(args.dataset, 'train', args)\n", (5795, 5824), False, 'from sg2im.data.dataset_params import get_dataset, get_collate_fn\n'), ((5840, 5878), 'sg2im.data.dataset_params.get_dataset', 'get_dataset', (['args.dataset', '"""val"""', 'args'], {}), "(args.dataset, 'val', args)\n", (5851, 5878), False, 'from sg2im.data.dataset_params import get_dataset, get_collate_fn\n'), ((5992, 6012), 'sg2im.data.dataset_params.get_collate_fn', 'get_collate_fn', (['args'], {}), '(args)\n', (6006, 6012), False, 'from sg2im.data.dataset_params import get_dataset, get_collate_fn\n'), ((6220, 6259), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dset'], {}), '(train_dset, **loader_kwargs)\n', (6230, 6259), False, 'from torch.utils.data import DataLoader\n'), ((6326, 6363), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dset'], {}), '(val_dset, **loader_kwargs)\n', (6336, 6363), False, 'from torch.utils.data import DataLoader\n'), ((6560, 6577), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6571, 6577), False, 'from collections import defaultdict\n'), ((12140, 12167), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (12157, 12167), False, 'import logging\n'), ((12245, 12260), 'scripts.args.init_args', 'init_args', (['args'], {}), '(args)\n', (12254, 12260), False, 'from scripts.args import get_args, print_args, init_args\n'), ((12304, 12320), 'scripts.args.print_args', 'print_args', (['args'], {}), '(args)\n', (12314, 12320), False, 'from scripts.args import get_args, print_args, init_args\n'), ((12590, 12620), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.output_dir'], {}), '(args.output_dir)\n', (12603, 12620), False, 'from tensorboardX import SummaryWriter\n'), ((13253, 13285), 'sg2im.meta_models.MetaGeneratorModel', 'MetaGeneratorModel', (['args', 'device'], {}), '(args, device)\n', (13271, 13285), False, 'from sg2im.meta_models import MetaGeneratorModel, MetaDiscriminatorModel\n'), ((13337, 13361), 'sg2im.model.get_conv_converse', 'get_conv_converse', (['model'], {}), '(model)\n', (13354, 13361), False, 'from sg2im.model import get_conv_converse\n'), ((14112, 14233), 'torch.optim.Adam', 'torch.optim.Adam', (["[{'params': base_params, 'lr': learning_rate}, {'params':\n learned_transitivity_params, 'lr': 0.01}]"], {}), "([{'params': base_params, 'lr': learning_rate}, {'params':\n learned_transitivity_params, 'lr': 0.01}])\n", (14128, 14233), False, 'import torch\n'), ((14289, 14356), 'torch.optim.Adam', 'torch.optim.Adam', (["[{'params': learned_converse_params, 'lr': 0.01}]"], {}), "([{'params': learned_converse_params, 'lr': 0.01}])\n", (14305, 14356), False, 'import torch\n'), ((14395, 14423), 'sg2im.meta_models.MetaDiscriminatorModel', 'MetaDiscriminatorModel', (['args'], {}), '(args)\n', (14417, 14423), False, 'from sg2im.meta_models import MetaGeneratorModel, MetaDiscriminatorModel\n'), ((14466, 14513), 'sg2im.pix2pix_model.Pix2PixModel', 'Pix2PixModel', (['args'], {'discriminator': 'discriminator'}), '(args, discriminator=discriminator)\n', (14478, 14513), False, 'from sg2im.pix2pix_model import Pix2PixModel\n'), ((14937, 15000), 'evaluation.inception.InceptionScore', 'InceptionScore', (['device'], {'batch_size': 'args.batch_size', 'resize': '(True)'}), '(device, batch_size=args.batch_size, resize=True)\n', (14951, 15000), False, 'from evaluation.inception import InceptionScore\n'), ((23712, 23756), 'torch.save', 'torch.save', (['checkpoint_dict', 'checkpoint_path'], {}), '(checkpoint_dict, checkpoint_path)\n', (23722, 23756), False, 'import torch\n'), ((23797, 23807), 'scripts.args.get_args', 'get_args', ([], {}), '()\n', (23805, 23807), False, 'from scripts.args import get_args, print_args, init_args\n'), ((1528, 1581), 'torch.load', 'torch.load', (['args.checkpoint_name'], {'map_location': 'device'}), '(args.checkpoint_name, map_location=device)\n', (1538, 1581), False, 'import torch\n'), ((3027, 3084), 'torch.load', 'torch.load', (['args.checkpoint_gan_name'], {'map_location': 'device'}), '(args.checkpoint_gan_name, map_location=device)\n', (3037, 3084), False, 'import torch\n'), ((3112, 3171), 'torch.load', 'torch.load', (['args.checkpoint_graph_name'], {'map_location': 'device'}), '(args.checkpoint_graph_name, map_location=device)\n', (3122, 3171), False, 'import torch\n'), ((5598, 5624), 'functools.partial', 'partial', (['collate_fn', 'vocab'], {}), '(collate_fn, vocab)\n', (5605, 5624), False, 'from functools import partial\n'), ((5948, 5976), 'json.dumps', 'json.dumps', (['train_dset.vocab'], {}), '(train_dset.vocab)\n', (5958, 5976), False, 'import json\n'), ((6170, 6193), 'functools.partial', 'partial', (['collate', 'vocab'], {}), '(collate, vocab)\n', (6177, 6193), False, 'from functools import partial\n'), ((6944, 6959), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6957, 6959), False, 'import torch\n'), ((11811, 11843), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['image_df'], {}), '(image_df)\n', (11833, 11843), True, 'import pandas as pd\n'), ((12333, 12363), 'os.path.isdir', 'os.path.isdir', (['args.output_dir'], {}), '(args.output_dir)\n', (12346, 12363), False, 'import os\n'), ((12463, 12491), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (12474, 12491), False, 'import os\n'), ((12523, 12569), 'os.path.join', 'os.path.join', (['args.output_dir', '"""run_args.json"""'], {}), "(args.output_dir, 'run_args.json')\n", (12535, 12569), False, 'import os\n'), ((14531, 14592), 'spade.models.networks.sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['gans_model'], {'device_ids': 'args.gpu_ids'}), '(gans_model, device_ids=args.gpu_ids)\n', (14555, 14592), False, 'from spade.models.networks.sync_batchnorm import DataParallelWithCallback\n'), ((21299, 21316), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (21310, 21316), False, 'import os\n'), ((7031, 7046), 'sg2im.utils.batch_to', 'batch_to', (['batch'], {}), '(batch)\n', (7039, 7046), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((15221, 15241), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (15229, 15241), True, 'import numpy as np\n'), ((15506, 15521), 'sg2im.utils.batch_to', 'batch_to', (['batch'], {}), '(batch)\n', (15514, 15521), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((15994, 16044), 'sg2im.utils.log_scalar_dict', 'log_scalar_dict', (['writer', 'G_losses', '"""train/loss"""', 't'], {}), "(writer, G_losses, 'train/loss', t)\n", (16009, 16044), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((21481, 21518), 'cv2.cvtColor', 'cv2.cvtColor', (['v[i]', 'cv2.COLOR_BGR2RGB'], {}), '(v[i], cv2.COLOR_BGR2RGB)\n', (21493, 21518), False, 'import cv2\n'), ((7677, 7710), 'torch.clamp', 'torch.clamp', (['boxes_pred', '(0.0)', '(1.0)'], {}), '(boxes_pred, 0.0, 1.0)\n', (7688, 7710), False, 'import torch\n'), ((11057, 11071), 'torch.stack', 'torch.stack', (['v'], {}), '(v)\n', (11068, 11071), False, 'import torch\n'), ((16436, 16460), 'sg2im.model.get_conv_converse', 'get_conv_converse', (['model'], {}), '(model)\n', (16453, 16460), False, 'from sg2im.model import get_conv_converse\n'), ((16492, 16553), 'scripts.graphs_utils.calc_log_p', 'calc_log_p', (['conv_weights_mat', 'non_meta_relations', 'conv_counts'], {}), '(conv_weights_mat, non_meta_relations, conv_counts)\n', (16502, 16553), False, 'from scripts.graphs_utils import calc_log_p\n'), ((16586, 16610), 'torch.mean', 'torch.mean', (['(r * log_prob)'], {}), '(r * log_prob)\n', (16596, 16610), False, 'import torch\n'), ((16790, 16814), 'sg2im.model.get_conv_converse', 'get_conv_converse', (['model'], {}), '(model)\n', (16807, 16814), False, 'from sg2im.model import get_conv_converse\n'), ((17343, 17393), 'sg2im.utils.log_scalar_dict', 'log_scalar_dict', (['writer', 'D_losses', '"""train/loss"""', 't'], {}), "(writer, D_losses, 'train/loss', t)\n", (17358, 17393), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((17958, 17982), 'sg2im.model.get_conv_converse', 'get_conv_converse', (['model'], {}), '(model)\n', (17975, 17982), False, 'from sg2im.model import get_conv_converse\n'), ((18603, 18659), 'sg2im.utils.log_scalar_dict', 'log_scalar_dict', (['writer', 'gt_val_losses', '"""gt_val/loss"""', 't'], {}), "(writer, gt_val_losses, 'gt_val/loss', t)\n", (18618, 18659), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((19162, 19212), 'sg2im.utils.log_scalar_dict', 'log_scalar_dict', (['writer', 'val_losses', '"""val/loss"""', 't'], {}), "(writer, val_losses, 'val/loss', t)\n", (19177, 19212), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((19413, 19459), 'os.path.join', 'os.path.join', (['args.output_dir', "('itr_%s.pt' % t)"], {}), "(args.output_dir, 'itr_%s.pt' % t)\n", (19425, 19459), False, 'import os\n'), ((19996, 20048), 'sg2im.utils.log_scalar_dict', 'log_scalar_dict', (['writer', 'test_losses', '"""test/loss"""', 't'], {}), "(writer, test_losses, 'test/loss', t)\n", (20011, 20048), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((8260, 8361), 'sg2im.utils.remove_dummies_and_padding', 'remove_dummies_and_padding', (['boxes_sample', 'objs[i]', 'args.vocab', '[boxes_pred_sample, boxes_sample]'], {}), '(boxes_sample, objs[i], args.vocab, [\n boxes_pred_sample, boxes_sample])\n', (8286, 8361), False, 'from sg2im.utils import batch_to, log_scalar_dict, remove_dummies_and_padding\n'), ((8456, 8496), 'sg2im.metrics.jaccard', 'jaccard', (['boxes_pred_sample', 'boxes_sample'], {}), '(boxes_pred_sample, boxes_sample)\n', (8463, 8496), False, 'from sg2im.metrics import jaccard\n'), ((8905, 8917), 'numpy.mean', 'np.mean', (['iou'], {}), '(iou)\n', (8912, 8917), True, 'import numpy as np\n'), ((8968, 8982), 'numpy.mean', 'np.mean', (['iou03'], {}), '(iou03)\n', (8975, 8982), True, 'import numpy as np\n'), ((9033, 9047), 'numpy.mean', 'np.mean', (['iou03'], {}), '(iou03)\n', (9040, 9047), True, 'import numpy as np\n'), ((12063, 12085), 'torch.sigmoid', 'torch.sigmoid', (['w_trans'], {}), '(w_trans)\n', (12076, 12085), False, 'import torch\n'), ((18150, 18215), 'torch.sigmoid', 'torch.sigmoid', (['model.sg_to_layout.module.trans_candidates_weights'], {}), '(model.sg_to_layout.module.trans_candidates_weights)\n', (18163, 18215), False, 'import torch\n'), ((10904, 10958), 'sg2im.data.deprocess_batch', 'deprocess_batch', (['v'], {'deprocess_func': 'args.deprocess_func'}), '(v, deprocess_func=args.deprocess_func)\n', (10919, 10958), False, 'from sg2im.data import deprocess_batch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 02 20:35:11 2016
@author: perrytsao
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import glob
import os
plt.close('all')
if len(sys.argv)>1:
fltname='flight_data\\'+sys.argv[1]
else:
search_dir = "flight_data\\"
# remove anything from the list that is not a file (directories, symlinks)
# thanks to <NAME> for pointing out that the requirement was a list
# of files (presumably not including directories)
files = filter(os.path.isfile, glob.glob(search_dir + "*.npy"))
files.sort(key=lambda x:os.path.getmtime(x))
#fltname='flight_data\\'+'2016_04_13_21_41_flt1_'
m=files[-1]
ms=m.split('_')
ms.pop()
fltname='_'.join(ms)
controldata=np.load(fltname+'_controldata.npy')
controlvarnames=np.load(fltname+'_controlvarnames.npy')
flightdata=np.load(fltname+'_flightdata.npy')
cpdict=dict(zip(controlvarnames, controldata))
flight_data_names=['t', 'x', 'y', 'z',
'dx', 'dy', 'dz',
'e_dx', 'e_ix', 'e_d2x',
'e_dy', 'e_iy', 'e_d2y',
'e_dz', 'e_iz', 'e_d2z',
'xspeed', 'yspeed', 'zspeed',
'throttle', 'aileron', 'elevator', 'rudder']
#index of flight data
fd=flightdata[1:, :]
fi=dict(zip(flight_data_names,range(23) ))
for xx in fi.keys():
exec(xx+'=fd[:,fi[xx]]')
t=t-t[0]
fig1=plt.figure(1)
plt.clf()
plt.plot(t, x, t, y, t,z)
fig1.canvas.set_window_title('Position (x,y,z)')
plt.show()
fig2=plt.figure(2)
plt.clf()
plt.subplot(211)
plt.plot(t, x, t, y, t,z)
plt.hold(True)
plt.plot([t[0], t[-1]], [350, 350])
plt.plot([t[0], t[-1]], [250, 250])
plt.plot([t[0], t[-1]], [65, 65])
plt.subplot(212)
plt.plot(t, dx, t, dy, t,dz)
plt.hold(True)
# post-calculated velocity (no filtering)
compdx=-1*(x[0:-1]-x[1:])
compdy=-1*(y[0:-1]-y[1:])
compdz=-1*(z[0:-1]-z[1:])
#plt.plot(t[1:], compdx, t[1:], compdy, t[1:],compdz, hold=True)
plt.plot(t, xspeed, t, yspeed, t, zspeed)
fig2.canvas.set_window_title('Velocities (dx, dy, dz)')
plt.show()
fig3=plt.figure(3)
plt.clf()
plt.plot(t,e_dx, t,e_dy, t,e_dz)
fig3.canvas.set_window_title('Position Error (e_dx, e_dy, e_dz)')
plt.show()
fig4=plt.figure(4)
plt.clf()
plt.plot(t, aileron, t,elevator, t, throttle)
fig4.canvas.set_window_title('Aileron-elevator-throttle')
plt.show()
fig5=plt.figure(5)
plt.clf()
plt.subplot(211)
plt.plot(t, xspeed, t, yspeed, t, zspeed)
plt.subplot(212)
plt.plot(t, dx, t, dy, t,dz)
fig5.canvas.set_window_title('Target Velocities (xspeed, yspeed, zspeed)')
plt.show()
fig6=plt.figure(6)
plt.clf()
plt.subplot(411)
#aileron = cp.Kx*(e_dx*cp.Kpx+cp.Kix*e_ix+cp.Kdx*e_d2x)+AILERON_MID
plt.plot(t, x, label='X')
plt.plot(t, y, label='Y')
plt.plot(t, z, label='Z')
plt.legend()
plt.subplot(412)
plt.plot(t, e_dx*cpdict['Kpx']*cpdict['Kx'], label='P')
plt.plot(t, e_ix*cpdict['Kix']*cpdict['Kx'], label='I')
plt.plot(t, e_d2x*cpdict['Kdx']*cpdict['Kx'],label='D')
plt.legend()
plt.subplot(413)
plt.plot(t, e_dy*cpdict['Kpy']*cpdict['Ky'], label='P')
plt.plot(t, e_iy*cpdict['Kiy']*cpdict['Ky'], label='I')
plt.plot(t, e_d2y*cpdict['Kdy']*cpdict['Ky'],label='D')
plt.legend()
plt.subplot(414)
plt.plot(t, e_dz*cpdict['Kpz']*cpdict['Kz'], label='P')
plt.plot(t, e_iz*cpdict['Kiz']*cpdict['Kz'], label='I')
plt.plot(t, e_d2z*cpdict['Kdz']*cpdict['Kz'],label='D')
plt.legend()
fig6.canvas.set_window_title('Control ouputs - PID')
plt.show()
| [
"matplotlib.pyplot.hold",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"glob.glob",
"os.path.getmtime",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((174, 190), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (183, 190), True, 'import matplotlib.pyplot as plt\n'), ((761, 798), 'numpy.load', 'np.load', (["(fltname + '_controldata.npy')"], {}), "(fltname + '_controldata.npy')\n", (768, 798), True, 'import numpy as np\n'), ((813, 854), 'numpy.load', 'np.load', (["(fltname + '_controlvarnames.npy')"], {}), "(fltname + '_controlvarnames.npy')\n", (820, 854), True, 'import numpy as np\n'), ((864, 900), 'numpy.load', 'np.load', (["(fltname + '_flightdata.npy')"], {}), "(fltname + '_flightdata.npy')\n", (871, 900), True, 'import numpy as np\n'), ((1458, 1471), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1468, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1472, 1481), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1479, 1481), True, 'import matplotlib.pyplot as plt\n'), ((1482, 1508), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x', 't', 'y', 't', 'z'], {}), '(t, x, t, y, t, z)\n', (1490, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1567), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1565, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1587), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1584, 1587), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1597), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1595, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1614), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1609, 1614), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1641), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x', 't', 'y', 't', 'z'], {}), '(t, x, t, y, t, z)\n', (1623, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1655), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (1649, 1655), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1691), 'matplotlib.pyplot.plot', 'plt.plot', (['[t[0], t[-1]]', '[350, 350]'], {}), '([t[0], t[-1]], [350, 350])\n', (1664, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1727), 'matplotlib.pyplot.plot', 'plt.plot', (['[t[0], t[-1]]', '[250, 250]'], {}), '([t[0], t[-1]], [250, 250])\n', (1700, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1761), 'matplotlib.pyplot.plot', 'plt.plot', (['[t[0], t[-1]]', '[65, 65]'], {}), '([t[0], t[-1]], [65, 65])\n', (1736, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1762, 1778), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1773, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1809), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'dx', 't', 'dy', 't', 'dz'], {}), '(t, dx, t, dy, t, dz)\n', (1788, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1823), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (1817, 1823), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2051), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'xspeed', 't', 'yspeed', 't', 'zspeed'], {}), '(t, xspeed, t, yspeed, t, zspeed)\n', (2018, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2108, 2118), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2116, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2125, 2138), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (2135, 2138), True, 'import matplotlib.pyplot as plt\n'), ((2139, 2148), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2146, 2148), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2184), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'e_dx', 't', 'e_dy', 't', 'e_dz'], {}), '(t, e_dx, t, e_dy, t, e_dz)\n', (2157, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2248, 2258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2256, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2269, 2282), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (2279, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2283, 2292), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2290, 2292), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2339), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'aileron', 't', 'elevator', 't', 'throttle'], {}), '(t, aileron, t, elevator, t, throttle)\n', (2301, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2407), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2405, 2407), True, 'import matplotlib.pyplot as plt\n'), ((2414, 2427), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (2424, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2437), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2435, 2437), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2454), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2449, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2496), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'xspeed', 't', 'yspeed', 't', 'zspeed'], {}), '(t, xspeed, t, yspeed, t, zspeed)\n', (2463, 2496), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2513), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2508, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2514, 2543), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'dx', 't', 'dy', 't', 'dz'], {}), '(t, dx, t, dy, t, dz)\n', (2522, 2543), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2628), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2626, 2628), True, 'import matplotlib.pyplot as plt\n'), ((2635, 2648), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (2645, 2648), True, 'import matplotlib.pyplot as plt\n'), ((2649, 2658), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2656, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2675), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(411)'], {}), '(411)\n', (2670, 2675), True, 'import matplotlib.pyplot as plt\n'), ((2746, 2771), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x'], {'label': '"""X"""'}), "(t, x, label='X')\n", (2754, 2771), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2797), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y'], {'label': '"""Y"""'}), "(t, y, label='Y')\n", (2780, 2797), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2823), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'z'], {'label': '"""Z"""'}), "(t, z, label='Z')\n", (2806, 2823), True, 'import matplotlib.pyplot as plt\n'), ((2824, 2836), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2834, 2836), True, 'import matplotlib.pyplot as plt\n'), ((2838, 2854), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(412)'], {}), '(412)\n', (2849, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2855, 2914), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_dx * cpdict['Kpx'] * cpdict['Kx'])"], {'label': '"""P"""'}), "(t, e_dx * cpdict['Kpx'] * cpdict['Kx'], label='P')\n", (2863, 2914), True, 'import matplotlib.pyplot as plt\n'), ((2911, 2970), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_ix * cpdict['Kix'] * cpdict['Kx'])"], {'label': '"""I"""'}), "(t, e_ix * cpdict['Kix'] * cpdict['Kx'], label='I')\n", (2919, 2970), True, 'import matplotlib.pyplot as plt\n'), ((2967, 3027), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_d2x * cpdict['Kdx'] * cpdict['Kx'])"], {'label': '"""D"""'}), "(t, e_d2x * cpdict['Kdx'] * cpdict['Kx'], label='D')\n", (2975, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3023, 3035), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3033, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3037, 3053), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(413)'], {}), '(413)\n', (3048, 3053), True, 'import matplotlib.pyplot as plt\n'), ((3054, 3113), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_dy * cpdict['Kpy'] * cpdict['Ky'])"], {'label': '"""P"""'}), "(t, e_dy * cpdict['Kpy'] * cpdict['Ky'], label='P')\n", (3062, 3113), True, 'import matplotlib.pyplot as plt\n'), ((3110, 3169), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_iy * cpdict['Kiy'] * cpdict['Ky'])"], {'label': '"""I"""'}), "(t, e_iy * cpdict['Kiy'] * cpdict['Ky'], label='I')\n", (3118, 3169), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3226), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_d2y * cpdict['Kdy'] * cpdict['Ky'])"], {'label': '"""D"""'}), "(t, e_d2y * cpdict['Kdy'] * cpdict['Ky'], label='D')\n", (3174, 3226), True, 'import matplotlib.pyplot as plt\n'), ((3222, 3234), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3232, 3234), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3252), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(414)'], {}), '(414)\n', (3247, 3252), True, 'import matplotlib.pyplot as plt\n'), ((3253, 3312), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_dz * cpdict['Kpz'] * cpdict['Kz'])"], {'label': '"""P"""'}), "(t, e_dz * cpdict['Kpz'] * cpdict['Kz'], label='P')\n", (3261, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3309, 3368), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_iz * cpdict['Kiz'] * cpdict['Kz'])"], {'label': '"""I"""'}), "(t, e_iz * cpdict['Kiz'] * cpdict['Kz'], label='I')\n", (3317, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3425), 'matplotlib.pyplot.plot', 'plt.plot', (['t', "(e_d2z * cpdict['Kdz'] * cpdict['Kz'])"], {'label': '"""D"""'}), "(t, e_d2z * cpdict['Kdz'] * cpdict['Kz'], label='D')\n", (3373, 3425), True, 'import matplotlib.pyplot as plt\n'), ((3421, 3433), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3431, 3433), True, 'import matplotlib.pyplot as plt\n'), ((3488, 3498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3496, 3498), True, 'import matplotlib.pyplot as plt\n'), ((538, 569), 'glob.glob', 'glob.glob', (["(search_dir + '*.npy')"], {}), "(search_dir + '*.npy')\n", (547, 569), False, 'import glob\n'), ((599, 618), 'os.path.getmtime', 'os.path.getmtime', (['x'], {}), '(x)\n', (615, 618), False, 'import os\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 23:24:19 2019
@author: usuario
"""
import cv2
import numpy
cam = cv2.VideoCapture(0)
kernel = numpy.ones((5 ,5), numpy.uint8)
while (True):
ret, frame = cam.read()
rangomax = numpy.array([50, 255, 50]) # B, G, R
rangomin = numpy.array([0, 51, 0])
mask = cv2.inRange(frame, rangomin, rangomax)
# reduce the noise
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
x, y, w, h = cv2.boundingRect(opening)
#cv2.rectangle(frame, (x, y), (x+w, y + h), (0, 255, 0), 3)
cv2.circle(frame, (x+w/2, y+h/2), 20, (0, 0, 255), 3)
cv2.circle(frame, (x+w/2, y+h/2), 5, (0, 0, 255), -1)
cv2.imshow('camera', frame)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break | [
"numpy.ones",
"cv2.inRange",
"cv2.imshow",
"numpy.array",
"cv2.morphologyEx",
"cv2.circle",
"cv2.VideoCapture",
"cv2.waitKey",
"cv2.boundingRect"
] | [((142, 161), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (158, 161), False, 'import cv2\n'), ((171, 202), 'numpy.ones', 'numpy.ones', (['(5, 5)', 'numpy.uint8'], {}), '((5, 5), numpy.uint8)\n', (181, 202), False, 'import numpy\n'), ((262, 288), 'numpy.array', 'numpy.array', (['[50, 255, 50]'], {}), '([50, 255, 50])\n', (273, 288), False, 'import numpy\n'), ((314, 337), 'numpy.array', 'numpy.array', (['[0, 51, 0]'], {}), '([0, 51, 0])\n', (325, 337), False, 'import numpy\n'), ((349, 387), 'cv2.inRange', 'cv2.inRange', (['frame', 'rangomin', 'rangomax'], {}), '(frame, rangomin, rangomax)\n', (360, 387), False, 'import cv2\n'), ((425, 471), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (441, 471), False, 'import cv2\n'), ((491, 516), 'cv2.boundingRect', 'cv2.boundingRect', (['opening'], {}), '(opening)\n', (507, 516), False, 'import cv2\n'), ((587, 648), 'cv2.circle', 'cv2.circle', (['frame', '(x + w / 2, y + h / 2)', '(20)', '(0, 0, 255)', '(3)'], {}), '(frame, (x + w / 2, y + h / 2), 20, (0, 0, 255), 3)\n', (597, 648), False, 'import cv2\n'), ((645, 706), 'cv2.circle', 'cv2.circle', (['frame', '(x + w / 2, y + h / 2)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, (x + w / 2, y + h / 2), 5, (0, 0, 255), -1)\n', (655, 706), False, 'import cv2\n'), ((705, 732), 'cv2.imshow', 'cv2.imshow', (['"""camera"""', 'frame'], {}), "('camera', frame)\n", (715, 732), False, 'import cv2\n'), ((743, 757), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (754, 757), False, 'import cv2\n')] |
import typing
class ReadStdin:
def __call__(
self,
) -> bytes:
return next(self.__chunks)
def __init__(
self,
) -> typing.NoReturn:
import sys
self.__buf = (
sys.stdin.buffer
)
self.__chunks = (
self.__read_chunks()
)
def int(
self,
) -> int:
return int(self())
def __read_chunks(
self,
) -> typing.Iterator[bytes]:
while 1:
l = self.__buf.readline()
for chunk in l.split():
yield chunk
def str(
self,
) -> str:
b = self()
return b.decode()
import typing
from abc import ABC, abstractmethod
class Solver(
ABC,
):
def __call__(
self,
) -> typing.NoReturn:
self._prepare()
self._solve()
def __init__(
self,
) -> typing.NoReturn:
...
@abstractmethod
def _prepare(
self,
) -> typing.NoReturn:
...
@abstractmethod
def _solve(
self,
) -> typing.NoReturn:
...
import typing
class CompressArray():
def retrieve(
self,
i: int,
) -> int:
return self.__v[i]
def __call__(
self,
a: typing.Iterable[int],
) -> typing.List[int]:
a = sorted(
enumerate(a),
key=lambda x: x[1],
)
n = len(a)
b = [None] * n
v = [None] * n
i, mn = -1, -float('inf')
for j, x in a:
if x > mn:
i += 1
v[i] = x
mn = x
b[j] = i
self.__v = v
return b
import typing
class FenwickTree():
def __init__(
self,
n: int,
) -> typing.NoReturn:
self.__buf = [0] * (n + 1)
def add(
self,
i: int,
x: int,
) -> typing.NoReturn:
b = self.__buf
n = len(b)
while i < n:
b[i] += x
i += i & -i
def sum(
self,
i: int,
) -> int:
b = self.__buf
s = 0
while i > 0:
s += b[i]
i -= i & -i
return s
import typing
class InversionCount():
def __call__(
self,
a: typing.List[int],
) -> int:
self.__a = a
self.__compress()
self.__calc()
return self.__cnt
def __calc(
self,
) -> typing.NoReturn:
a = self.__a
n = len(a)
ft = FenwickTree(n)
c = 0
for i in range(n):
x = a[i]
c += i - ft.sum(x)
ft.add(x + 1, 1)
self.__cnt = c
def __compress(
self,
) -> typing.NoReturn:
a = self.__a
fn = CompressArray()
self.__a = fn(a)
import sys
import typing
import numpy as np
class Problem(
Solver,
):
def __init__(
self,
) -> typing.NoReturn:
self.__read = ReadStdin()
def _prepare(
self,
) -> typing.NoReturn:
read = self.__read
n = read.int()
a, b = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(2, n)
self.__n = n
self.__a = a
self.__b = b
def _solve(
self,
) -> typing.NoReturn:
self.__preprocess()
self.__enumerate_sort()
if not self.__achievable():
print(-1); return
self.__count_inversion()
print(self.__res)
def __count_inversion(
self,
) -> typing.NoReturn:
a = self.__a
b = self.__b
n = a.shape[0]
c = np.zeros(n, dtype=int)
c[a[:, 1]] = b[:, 1]
fn = InversionCount()
self.__res = fn(c)
def __enumerate_sort(
self,
) -> typing.NoReturn:
a = self.__a
b = self.__b
n = a.size
i = np.arange(n)
a = np.vstack([a, i]).T
b = np.vstack([b, i]).T
i = np.argsort(a[:, 0])
self.__a = a[i]
i = np.argsort(b[:, 0])
self.__b = b[i]
def __achievable(
self,
) -> bool:
return ((
self.__a[:, 0]
== self.__b[:, 0]
).all())
def __preprocess(
self,
) -> typing.NoReturn:
n = self.__n
i = np.arange(n)
self.__a += i
self.__b += i
def main():
p = Problem()
t = 1
# t = ReadStdin().int()
for _ in range(t): p()
if __name__ == '__main__':
main()
| [
"numpy.argsort",
"numpy.zeros",
"numpy.vstack",
"sys.stdin.read",
"numpy.arange"
] | [((3104, 3126), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (3112, 3126), True, 'import numpy as np\n'), ((3318, 3330), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3327, 3330), True, 'import numpy as np\n'), ((3395, 3414), 'numpy.argsort', 'np.argsort', (['a[:, 0]'], {}), '(a[:, 0])\n', (3405, 3414), True, 'import numpy as np\n'), ((3443, 3462), 'numpy.argsort', 'np.argsort', (['b[:, 0]'], {}), '(b[:, 0])\n', (3453, 3462), True, 'import numpy as np\n'), ((3683, 3695), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3692, 3695), True, 'import numpy as np\n'), ((3339, 3356), 'numpy.vstack', 'np.vstack', (['[a, i]'], {}), '([a, i])\n', (3348, 3356), True, 'import numpy as np\n'), ((3367, 3384), 'numpy.vstack', 'np.vstack', (['[b, i]'], {}), '([b, i])\n', (3376, 3384), True, 'import numpy as np\n'), ((2653, 2669), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (2667, 2669), False, 'import sys\n')] |
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
import scikit_posthocs._posthocs as sp
import seaborn as sb
import numpy as np
class TestPosthocs(unittest.TestCase):
df = sb.load_dataset("exercise")
df_bn = np.array([[4,3,4,4,5,6,3],
[1,2,3,5,6,7,7],
[1,2,6,4,1,5,1]])
def test_posthoc_conover(self):
r_results = np.array([[-1, 1.131263e-02, 9.354690e-11],
[1.131263e-02, -1, 5.496288e-06],
[9.354690e-11, 5.496288e-06, -1]])
results = sp.posthoc_conover(self.df, val_col = 'pulse', group_col = 'kind', p_adjust = 'holm')
self.assertTrue(np.allclose(results, r_results))
def test_posthoc_dunn(self):
r_results = np.array([[-1, 4.390066e-02, 9.570998e-09],
[4.390066e-02, -1, 1.873208e-04],
[9.570998e-09, 1.873208e-04, -1]])
results = sp.posthoc_dunn(self.df, val_col = 'pulse', group_col = 'kind', p_adjust = 'holm')
self.assertTrue(np.allclose(results, r_results, atol=1.e-4))
def test_posthoc_nemenyi(self):
r_results = np.array([[-1, 1.313107e-01, 2.431833e-08],
[1.313107e-01, -1, 4.855675e-04],
[2.431833e-08, 4.855675e-04, -1]])
results = sp.posthoc_nemenyi(self.df, val_col = 'pulse', group_col = 'kind')
self.assertTrue(np.allclose(results, r_results, atol=1.e-4))
def test_posthoc_nemenyi_friedman(self):
p_results = np.array([[-1., 0.9, 0.82163255, 0.9, 0.9, 0.21477876, 0.9],
[0.9, -1., 0.87719193, 0.9, 0.9, 0.25967965, 0.9],
[0.82163255, 0.87719193, -1., 0.9, 0.9, 0.9, 0.9],
[0.9, 0.9, 0.9, -1., 0.9, 0.87719193, 0.9],
[0.9, 0.9, 0.9, 0.9, -1., 0.87719193, 0.9],
[0.21477876, 0.25967965, 0.9, 0.87719193, 0.87719193, -1., 0.54381888],
[0.9, 0.9, 0.9, 0.9, 0.9, 0.54381888, -1.]])
results = sp.posthoc_nemenyi_friedman(self.df_bn)
self.assertTrue(np.allclose(results, p_results, atol=1.e-4))
def test_posthoc_conover_friedman(self):
results = sp.posthoc_conover_friedman(self.df_bn)
p_results = np.array([[-1.000000, 0.935333, 0.268619, 0.339721, 0.339721, 0.060540, 0.628079],
[0.935333, -1.000000, 0.302605, 0.380025, 0.380025, 0.070050, 0.685981],
[0.268619, 0.302605, -1.000000, 0.871144, 0.871144, 0.380025, 0.519961],
[0.339721, 0.380025, 0.871144, -1.000000, 1.000000, 0.302605, 0.628079],
[0.339721, 0.380025, 0.871144, 1.000000, -1.000000, 0.302605, 0.628079],
[0.060540, 0.070050, 0.380025, 0.302605, 0.302605, -1.000000, 0.141412],
[0.628079, 0.685981, 0.519961, 0.628079, 0.628079, 0.141412, -1.000000]])
self.assertTrue(np.allclose(results, p_results))
def test_posthoc_miller_friedman(self):
results = sp.posthoc_miller_friedman(self.df_bn)
p_results = np.array([[-1.0, 1.0, 0.9411963, 0.9724396000000001, 0.9724396000000001, 0.4717981, 0.9993864],
[1.0, -1.0, 0.9588993, 0.9823818000000001, 0.9823818000000001, 0.5256257, 0.9997869],
[0.9411963, 0.9588993, -1.0, 0.9999991, 0.9999991, 0.9823818000000001, 0.9968575999999999],
[0.9724396000000001, 0.9823818000000001, 0.9999991, -1.0, 1.0, 0.9588993, 0.9993864],
[0.9724396000000001, 0.9823818000000001, 0.9999991, 1.0, -1.0, 0.9588993, 0.9993864],
[0.4717981, 0.5256257, 0.9823818000000001, 0.9588993, 0.9588993, -1.0, 0.7803545999999999],
[0.9993864, 0.9997869, 0.9968575999999999, 0.9993864, 0.9993864, 0.7803545999999999, -1.0]])
self.assertTrue(np.allclose(results, p_results))
def test_posthoc_siegel_friedman(self):
results = sp.posthoc_siegel_friedman(self.df_bn)
p_results = np.array([[-1.000000, 0.92471904, 0.18587673, 0.25683926, 0.25683926, 0.01816302, 0.57075039],
[0.92471904, -1.0000000, 0.2193026, 0.2986177, 0.2986177, 0.0233422, 0.6366016],
[0.18587673, 0.2193026, -1.0000000, 0.8501067, 0.8501067, 0.2986177, 0.4496918],
[0.25683926, 0.2986177, 0.8501067, -1.000000, 1.0000000, 0.2193026, 0.5707504],
[0.25683926, 0.2986177, 0.8501067, 1.0000000, -1.0000000, 0.2193026, 0.5707504],
[0.01816302, 0.0233422, 0.2986177, 0.2193026, 0.2193026, -1.000000, 0.07260094],
[0.57075039, 0.6366016, 0.4496918, 0.5707504, 0.5707504, 0.07260094, -1.000000]])
self.assertTrue(np.allclose(results, p_results))
def test_posthoc_durbin(self):
results = sp.posthoc_durbin(self.df_bn, p_adjust = 'holm')
p_results = np.array([[-1.000000, 1.000000, 1.0, 1.0, 1.0, 0.381364, 1.0],
[1.000000, -1.000000, 1.0, 1.0, 1.0, 0.444549, 1.0],
[1.000000, 1.000000, -1.0, 1.0, 1.0, 1.000000, 1.0],
[1.000000, 1.000000, 1.0, -1.0, 1.0, 1.000000, 1.0],
[1.000000, 1.000000, 1.0, 1.0, -1.0, 1.000000, 1.0],
[0.381364, 0.444549, 1.0, 1.0, 1.0, -1.000000, 1.0],
[1.000000, 1.000000, 1.0, 1.0, 1.0, 1.000000, -1.0]])
self.assertTrue(np.allclose(results, p_results))
def test_posthoc_quade(self):
results = sp.posthoc_quade(self.df_bn)
p_results = np.array([[-1.00000000, 0.67651326, 0.15432143, 0.17954686, 0.2081421 , 0.02267043, 0.2081421],
[ 0.67651326,-1.00000000, 0.29595042, 0.33809987, 0.38443835, 0.0494024 , 0.38443835],
[ 0.15432143, 0.29595042,-1.00000000, 0.92586499, 0.85245022, 0.29595042, 0.85245022],
[ 0.17954686, 0.33809987, 0.92586499,-1.00000000, 0.92586499, 0.25789648, 0.92586499],
[ 0.2081421 , 0.38443835, 0.85245022, 0.92586499,-1.00000000, 0.22378308, 1.00000000],
[ 0.02267043, 0.0494024 , 0.29595042, 0.25789648, 0.22378308,-1.00000000, 0.22378308],
[ 0.2081421 , 0.38443835, 0.85245022, 0.92586499, 1.00000000, 0.22378308,-1.00000000]])
self.assertTrue(np.allclose(results, p_results))
def test_posthoc_vanwaerden(self):
r_results = np.array([[-1, 1.054709e-02, 6.476665e-11],
[1.054709e-02, -1, 4.433141e-06],
[6.476665e-11, 4.433141e-06, -1]])
results = sp.posthoc_vanwaerden(self.df, val_col = 'pulse', group_col = 'kind', p_adjust='holm')
self.assertTrue(np.allclose(results, r_results))
def test_posthoc_ttest(self):
r_results = np.array([[-1, 9.757069e-03, 4.100954e-07],
[9.757069e-03, -1, 1.556010e-05],
[4.100954e-07, 1.556010e-05, -1]])
results = sp.posthoc_ttest(self.df, val_col = 'pulse', group_col = 'kind', equal_var = False, p_adjust='holm')
self.assertTrue(np.allclose(results, r_results))
def test_posthoc_tukey_hsd(self):
x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
results = sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
n_results = np.array([[-1, 1, 0],[ 1, -1, 1],[ 0, 1, -1]])
self.assertTrue(np.allclose(n_results, results))
def test_posthoc_mannwhitney(self):
r_results = np.array([[-1, 1.714393e-02, 3.420508e-08],
[1.714393e-02, -1, 1.968352e-05],
[3.420508e-08, 1.968352e-05, -1]])
results = sp.posthoc_mannwhitney(self.df, val_col = 'pulse', group_col = 'kind')
self.assertTrue(np.allclose(results, r_results))
def test_posthoc_wilcoxon(self):
r_results = np.array([[-1, 2.337133e-03, 2.857818e-06],
[2.337133e-03, -1, 1.230888e-05],
[2.857818e-06, 1.230888e-05, -1]])
results = sp.posthoc_wilcoxon(self.df.sort_index(), val_col = 'pulse', group_col = 'kind')
self.assertTrue(np.allclose(results, r_results))
def test_posthoc_scheffe(self):
r_results = np.array([[-1, 3.378449e-01, 3.047472e-10],
[3.378449e-01, -1, 2.173209e-07],
[3.047472e-10, 2.173209e-07, -1]])
results = sp.posthoc_scheffe(self.df.sort_index(), val_col = 'pulse', group_col = 'kind')
self.assertTrue(np.allclose(results, r_results))
def test_posthoc_tamhane(self):
r_results = np.array([[-1, 2.898653e-02, 4.100954e-07],
[2.898653e-02, -1, 2.333996e-05],
[4.100954e-07, 2.333996e-05, -1]])
results = sp.posthoc_tamhane(self.df.sort_index(), val_col = 'pulse', group_col = 'kind')
self.assertTrue(np.allclose(results, r_results))
def test_posthoc_tukey(self):
r_results = np.array([[-1, 3.042955e-01, 4.308631e-10],
[3.042955e-01, -1, 9.946571e-08],
[4.308631e-10, 9.946571e-08, -1]])
results = sp.posthoc_tukey(self.df.sort_index(), val_col = 'pulse', group_col = 'kind')
print(results)
self.assertTrue(np.allclose(results, r_results, atol=1.e-3))
if __name__ == '__main__':
unittest.main()
| [
"scikit_posthocs._posthocs.posthoc_vanwaerden",
"numpy.allclose",
"scikit_posthocs._posthocs.posthoc_durbin",
"scikit_posthocs._posthocs.posthoc_conover",
"seaborn.load_dataset",
"scikit_posthocs._posthocs.posthoc_conover_friedman",
"numpy.array",
"os.path.dirname",
"scikit_posthocs._posthocs.postho... | [((241, 268), 'seaborn.load_dataset', 'sb.load_dataset', (['"""exercise"""'], {}), "('exercise')\n", (256, 268), True, 'import seaborn as sb\n'), ((281, 360), 'numpy.array', 'np.array', (['[[4, 3, 4, 4, 5, 6, 3], [1, 2, 3, 5, 6, 7, 7], [1, 2, 6, 4, 1, 5, 1]]'], {}), '([[4, 3, 4, 4, 5, 6, 3], [1, 2, 3, 5, 6, 7, 7], [1, 2, 6, 4, 1, 5, 1]])\n', (289, 360), True, 'import numpy as np\n'), ((9977, 9992), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9990, 9992), False, 'import unittest\n'), ((445, 556), 'numpy.array', 'np.array', (['[[-1, 0.01131263, 9.35469e-11], [0.01131263, -1, 5.496288e-06], [\n 9.35469e-11, 5.496288e-06, -1]]'], {}), '([[-1, 0.01131263, 9.35469e-11], [0.01131263, -1, 5.496288e-06], [\n 9.35469e-11, 5.496288e-06, -1]])\n', (453, 556), True, 'import numpy as np\n'), ((637, 716), 'scikit_posthocs._posthocs.posthoc_conover', 'sp.posthoc_conover', (['self.df'], {'val_col': '"""pulse"""', 'group_col': '"""kind"""', 'p_adjust': '"""holm"""'}), "(self.df, val_col='pulse', group_col='kind', p_adjust='holm')\n", (655, 716), True, 'import scikit_posthocs._posthocs as sp\n'), ((835, 948), 'numpy.array', 'np.array', (['[[-1, 0.04390066, 9.570998e-09], [0.04390066, -1, 0.0001873208], [\n 9.570998e-09, 0.0001873208, -1]]'], {}), '([[-1, 0.04390066, 9.570998e-09], [0.04390066, -1, 0.0001873208], [\n 9.570998e-09, 0.0001873208, -1]])\n', (843, 948), True, 'import numpy as np\n'), ((1027, 1103), 'scikit_posthocs._posthocs.posthoc_dunn', 'sp.posthoc_dunn', (['self.df'], {'val_col': '"""pulse"""', 'group_col': '"""kind"""', 'p_adjust': '"""holm"""'}), "(self.df, val_col='pulse', group_col='kind', p_adjust='holm')\n", (1042, 1103), True, 'import scikit_posthocs._posthocs as sp\n'), ((1237, 1348), 'numpy.array', 'np.array', (['[[-1, 0.1313107, 2.431833e-08], [0.1313107, -1, 0.0004855675], [\n 2.431833e-08, 0.0004855675, -1]]'], {}), '([[-1, 0.1313107, 2.431833e-08], [0.1313107, -1, 0.0004855675], [\n 2.431833e-08, 0.0004855675, -1]])\n', (1245, 1348), True, 'import numpy as np\n'), ((1429, 1491), 'scikit_posthocs._posthocs.posthoc_nemenyi', 'sp.posthoc_nemenyi', (['self.df'], {'val_col': '"""pulse"""', 'group_col': '"""kind"""'}), "(self.df, val_col='pulse', group_col='kind')\n", (1447, 1491), True, 'import scikit_posthocs._posthocs as sp\n'), ((1632, 2030), 'numpy.array', 'np.array', (['[[-1.0, 0.9, 0.82163255, 0.9, 0.9, 0.21477876, 0.9], [0.9, -1.0, 0.87719193,\n 0.9, 0.9, 0.25967965, 0.9], [0.82163255, 0.87719193, -1.0, 0.9, 0.9, \n 0.9, 0.9], [0.9, 0.9, 0.9, -1.0, 0.9, 0.87719193, 0.9], [0.9, 0.9, 0.9,\n 0.9, -1.0, 0.87719193, 0.9], [0.21477876, 0.25967965, 0.9, 0.87719193, \n 0.87719193, -1.0, 0.54381888], [0.9, 0.9, 0.9, 0.9, 0.9, 0.54381888, -1.0]]'], {}), '([[-1.0, 0.9, 0.82163255, 0.9, 0.9, 0.21477876, 0.9], [0.9, -1.0, \n 0.87719193, 0.9, 0.9, 0.25967965, 0.9], [0.82163255, 0.87719193, -1.0, \n 0.9, 0.9, 0.9, 0.9], [0.9, 0.9, 0.9, -1.0, 0.9, 0.87719193, 0.9], [0.9,\n 0.9, 0.9, 0.9, -1.0, 0.87719193, 0.9], [0.21477876, 0.25967965, 0.9, \n 0.87719193, 0.87719193, -1.0, 0.54381888], [0.9, 0.9, 0.9, 0.9, 0.9, \n 0.54381888, -1.0]])\n', (1640, 2030), True, 'import numpy as np\n'), ((2198, 2237), 'scikit_posthocs._posthocs.posthoc_nemenyi_friedman', 'sp.posthoc_nemenyi_friedman', (['self.df_bn'], {}), '(self.df_bn)\n', (2225, 2237), True, 'import scikit_posthocs._posthocs as sp\n'), ((2372, 2411), 'scikit_posthocs._posthocs.posthoc_conover_friedman', 'sp.posthoc_conover_friedman', (['self.df_bn'], {}), '(self.df_bn)\n', (2399, 2411), True, 'import scikit_posthocs._posthocs as sp\n'), ((2432, 2932), 'numpy.array', 'np.array', (['[[-1.0, 0.935333, 0.268619, 0.339721, 0.339721, 0.06054, 0.628079], [\n 0.935333, -1.0, 0.302605, 0.380025, 0.380025, 0.07005, 0.685981], [\n 0.268619, 0.302605, -1.0, 0.871144, 0.871144, 0.380025, 0.519961], [\n 0.339721, 0.380025, 0.871144, -1.0, 1.0, 0.302605, 0.628079], [0.339721,\n 0.380025, 0.871144, 1.0, -1.0, 0.302605, 0.628079], [0.06054, 0.07005, \n 0.380025, 0.302605, 0.302605, -1.0, 0.141412], [0.628079, 0.685981, \n 0.519961, 0.628079, 0.628079, 0.141412, -1.0]]'], {}), '([[-1.0, 0.935333, 0.268619, 0.339721, 0.339721, 0.06054, 0.628079],\n [0.935333, -1.0, 0.302605, 0.380025, 0.380025, 0.07005, 0.685981], [\n 0.268619, 0.302605, -1.0, 0.871144, 0.871144, 0.380025, 0.519961], [\n 0.339721, 0.380025, 0.871144, -1.0, 1.0, 0.302605, 0.628079], [0.339721,\n 0.380025, 0.871144, 1.0, -1.0, 0.302605, 0.628079], [0.06054, 0.07005, \n 0.380025, 0.302605, 0.302605, -1.0, 0.141412], [0.628079, 0.685981, \n 0.519961, 0.628079, 0.628079, 0.141412, -1.0]])\n', (2440, 2932), True, 'import numpy as np\n'), ((3255, 3293), 'scikit_posthocs._posthocs.posthoc_miller_friedman', 'sp.posthoc_miller_friedman', (['self.df_bn'], {}), '(self.df_bn)\n', (3281, 3293), True, 'import scikit_posthocs._posthocs as sp\n'), ((3315, 3982), 'numpy.array', 'np.array', (['[[-1.0, 1.0, 0.9411963, 0.9724396000000001, 0.9724396000000001, 0.4717981, \n 0.9993864], [1.0, -1.0, 0.9588993, 0.9823818000000001, \n 0.9823818000000001, 0.5256257, 0.9997869], [0.9411963, 0.9588993, -1.0,\n 0.9999991, 0.9999991, 0.9823818000000001, 0.9968575999999999], [\n 0.9724396000000001, 0.9823818000000001, 0.9999991, -1.0, 1.0, 0.9588993,\n 0.9993864], [0.9724396000000001, 0.9823818000000001, 0.9999991, 1.0, -\n 1.0, 0.9588993, 0.9993864], [0.4717981, 0.5256257, 0.9823818000000001, \n 0.9588993, 0.9588993, -1.0, 0.7803545999999999], [0.9993864, 0.9997869,\n 0.9968575999999999, 0.9993864, 0.9993864, 0.7803545999999999, -1.0]]'], {}), '([[-1.0, 1.0, 0.9411963, 0.9724396000000001, 0.9724396000000001, \n 0.4717981, 0.9993864], [1.0, -1.0, 0.9588993, 0.9823818000000001, \n 0.9823818000000001, 0.5256257, 0.9997869], [0.9411963, 0.9588993, -1.0,\n 0.9999991, 0.9999991, 0.9823818000000001, 0.9968575999999999], [\n 0.9724396000000001, 0.9823818000000001, 0.9999991, -1.0, 1.0, 0.9588993,\n 0.9993864], [0.9724396000000001, 0.9823818000000001, 0.9999991, 1.0, -\n 1.0, 0.9588993, 0.9993864], [0.4717981, 0.5256257, 0.9823818000000001, \n 0.9588993, 0.9588993, -1.0, 0.7803545999999999], [0.9993864, 0.9997869,\n 0.9968575999999999, 0.9993864, 0.9993864, 0.7803545999999999, -1.0]])\n', (3323, 3982), True, 'import numpy as np\n'), ((4249, 4287), 'scikit_posthocs._posthocs.posthoc_siegel_friedman', 'sp.posthoc_siegel_friedman', (['self.df_bn'], {}), '(self.df_bn)\n', (4275, 4287), True, 'import scikit_posthocs._posthocs as sp\n'), ((4309, 4872), 'numpy.array', 'np.array', (['[[-1.0, 0.92471904, 0.18587673, 0.25683926, 0.25683926, 0.01816302, \n 0.57075039], [0.92471904, -1.0, 0.2193026, 0.2986177, 0.2986177, \n 0.0233422, 0.6366016], [0.18587673, 0.2193026, -1.0, 0.8501067, \n 0.8501067, 0.2986177, 0.4496918], [0.25683926, 0.2986177, 0.8501067, -\n 1.0, 1.0, 0.2193026, 0.5707504], [0.25683926, 0.2986177, 0.8501067, 1.0,\n -1.0, 0.2193026, 0.5707504], [0.01816302, 0.0233422, 0.2986177, \n 0.2193026, 0.2193026, -1.0, 0.07260094], [0.57075039, 0.6366016, \n 0.4496918, 0.5707504, 0.5707504, 0.07260094, -1.0]]'], {}), '([[-1.0, 0.92471904, 0.18587673, 0.25683926, 0.25683926, 0.01816302,\n 0.57075039], [0.92471904, -1.0, 0.2193026, 0.2986177, 0.2986177, \n 0.0233422, 0.6366016], [0.18587673, 0.2193026, -1.0, 0.8501067, \n 0.8501067, 0.2986177, 0.4496918], [0.25683926, 0.2986177, 0.8501067, -\n 1.0, 1.0, 0.2193026, 0.5707504], [0.25683926, 0.2986177, 0.8501067, 1.0,\n -1.0, 0.2193026, 0.5707504], [0.01816302, 0.0233422, 0.2986177, \n 0.2193026, 0.2193026, -1.0, 0.07260094], [0.57075039, 0.6366016, \n 0.4496918, 0.5707504, 0.5707504, 0.07260094, -1.0]])\n', (4317, 4872), True, 'import numpy as np\n'), ((5182, 5228), 'scikit_posthocs._posthocs.posthoc_durbin', 'sp.posthoc_durbin', (['self.df_bn'], {'p_adjust': '"""holm"""'}), "(self.df_bn, p_adjust='holm')\n", (5199, 5228), True, 'import scikit_posthocs._posthocs as sp\n'), ((5252, 5567), 'numpy.array', 'np.array', (['[[-1.0, 1.0, 1.0, 1.0, 1.0, 0.381364, 1.0], [1.0, -1.0, 1.0, 1.0, 1.0, \n 0.444549, 1.0], [1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, -\n 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0], [0.381364, \n 0.444549, 1.0, 1.0, 1.0, -1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0]]'], {}), '([[-1.0, 1.0, 1.0, 1.0, 1.0, 0.381364, 1.0], [1.0, -1.0, 1.0, 1.0, \n 1.0, 0.444549, 1.0], [1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, \n 1.0, -1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0], [\n 0.381364, 0.444549, 1.0, 1.0, 1.0, -1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, -1.0]])\n', (5260, 5567), True, 'import numpy as np\n'), ((5924, 5952), 'scikit_posthocs._posthocs.posthoc_quade', 'sp.posthoc_quade', (['self.df_bn'], {}), '(self.df_bn)\n', (5940, 5952), True, 'import scikit_posthocs._posthocs as sp\n'), ((5974, 6557), 'numpy.array', 'np.array', (['[[-1.0, 0.67651326, 0.15432143, 0.17954686, 0.2081421, 0.02267043, \n 0.2081421], [0.67651326, -1.0, 0.29595042, 0.33809987, 0.38443835, \n 0.0494024, 0.38443835], [0.15432143, 0.29595042, -1.0, 0.92586499, \n 0.85245022, 0.29595042, 0.85245022], [0.17954686, 0.33809987, \n 0.92586499, -1.0, 0.92586499, 0.25789648, 0.92586499], [0.2081421, \n 0.38443835, 0.85245022, 0.92586499, -1.0, 0.22378308, 1.0], [0.02267043,\n 0.0494024, 0.29595042, 0.25789648, 0.22378308, -1.0, 0.22378308], [\n 0.2081421, 0.38443835, 0.85245022, 0.92586499, 1.0, 0.22378308, -1.0]]'], {}), '([[-1.0, 0.67651326, 0.15432143, 0.17954686, 0.2081421, 0.02267043,\n 0.2081421], [0.67651326, -1.0, 0.29595042, 0.33809987, 0.38443835, \n 0.0494024, 0.38443835], [0.15432143, 0.29595042, -1.0, 0.92586499, \n 0.85245022, 0.29595042, 0.85245022], [0.17954686, 0.33809987, \n 0.92586499, -1.0, 0.92586499, 0.25789648, 0.92586499], [0.2081421, \n 0.38443835, 0.85245022, 0.92586499, -1.0, 0.22378308, 1.0], [0.02267043,\n 0.0494024, 0.29595042, 0.25789648, 0.22378308, -1.0, 0.22378308], [\n 0.2081421, 0.38443835, 0.85245022, 0.92586499, 1.0, 0.22378308, -1.0]])\n', (5982, 6557), True, 'import numpy as np\n'), ((6890, 7003), 'numpy.array', 'np.array', (['[[-1, 0.01054709, 6.476665e-11], [0.01054709, -1, 4.433141e-06], [\n 6.476665e-11, 4.433141e-06, -1]]'], {}), '([[-1, 0.01054709, 6.476665e-11], [0.01054709, -1, 4.433141e-06], [\n 6.476665e-11, 4.433141e-06, -1]])\n', (6898, 7003), True, 'import numpy as np\n'), ((7082, 7169), 'scikit_posthocs._posthocs.posthoc_vanwaerden', 'sp.posthoc_vanwaerden', (['self.df'], {'val_col': '"""pulse"""', 'group_col': '"""kind"""', 'p_adjust': '"""holm"""'}), "(self.df, val_col='pulse', group_col='kind', p_adjust=\n 'holm')\n", (7103, 7169), True, 'import scikit_posthocs._posthocs as sp\n'), ((7282, 7394), 'numpy.array', 'np.array', (['[[-1, 0.009757069, 4.100954e-07], [0.009757069, -1, 1.55601e-05], [\n 4.100954e-07, 1.55601e-05, -1]]'], {}), '([[-1, 0.009757069, 4.100954e-07], [0.009757069, -1, 1.55601e-05],\n [4.100954e-07, 1.55601e-05, -1]])\n', (7290, 7394), True, 'import numpy as np\n'), ((7474, 7573), 'scikit_posthocs._posthocs.posthoc_ttest', 'sp.posthoc_ttest', (['self.df'], {'val_col': '"""pulse"""', 'group_col': '"""kind"""', 'equal_var': '(False)', 'p_adjust': '"""holm"""'}), "(self.df, val_col='pulse', group_col='kind', equal_var=\n False, p_adjust='holm')\n", (7490, 7573), True, 'import scikit_posthocs._posthocs as sp\n'), ((7873, 7919), 'numpy.array', 'np.array', (['[[-1, 1, 0], [1, -1, 1], [0, 1, -1]]'], {}), '([[-1, 1, 0], [1, -1, 1], [0, 1, -1]])\n', (7881, 7919), True, 'import numpy as np\n'), ((8043, 8156), 'numpy.array', 'np.array', (['[[-1, 0.01714393, 3.420508e-08], [0.01714393, -1, 1.968352e-05], [\n 3.420508e-08, 1.968352e-05, -1]]'], {}), '([[-1, 0.01714393, 3.420508e-08], [0.01714393, -1, 1.968352e-05], [\n 3.420508e-08, 1.968352e-05, -1]])\n', (8051, 8156), True, 'import numpy as np\n'), ((8235, 8301), 'scikit_posthocs._posthocs.posthoc_mannwhitney', 'sp.posthoc_mannwhitney', (['self.df'], {'val_col': '"""pulse"""', 'group_col': '"""kind"""'}), "(self.df, val_col='pulse', group_col='kind')\n", (8257, 8301), True, 'import scikit_posthocs._posthocs as sp\n'), ((8422, 8536), 'numpy.array', 'np.array', (['[[-1, 0.002337133, 2.857818e-06], [0.002337133, -1, 1.230888e-05], [\n 2.857818e-06, 1.230888e-05, -1]]'], {}), '([[-1, 0.002337133, 2.857818e-06], [0.002337133, -1, 1.230888e-05],\n [2.857818e-06, 1.230888e-05, -1]])\n', (8430, 8536), True, 'import numpy as np\n'), ((8810, 8921), 'numpy.array', 'np.array', (['[[-1, 0.3378449, 3.047472e-10], [0.3378449, -1, 2.173209e-07], [\n 3.047472e-10, 2.173209e-07, -1]]'], {}), '([[-1, 0.3378449, 3.047472e-10], [0.3378449, -1, 2.173209e-07], [\n 3.047472e-10, 2.173209e-07, -1]])\n', (8818, 8921), True, 'import numpy as np\n'), ((9197, 9310), 'numpy.array', 'np.array', (['[[-1, 0.02898653, 4.100954e-07], [0.02898653, -1, 2.333996e-05], [\n 4.100954e-07, 2.333996e-05, -1]]'], {}), '([[-1, 0.02898653, 4.100954e-07], [0.02898653, -1, 2.333996e-05], [\n 4.100954e-07, 2.333996e-05, -1]])\n', (9205, 9310), True, 'import numpy as np\n'), ((9581, 9692), 'numpy.array', 'np.array', (['[[-1, 0.3042955, 4.308631e-10], [0.3042955, -1, 9.946571e-08], [\n 4.308631e-10, 9.946571e-08, -1]]'], {}), '([[-1, 0.3042955, 4.308631e-10], [0.3042955, -1, 9.946571e-08], [\n 4.308631e-10, 9.946571e-08, -1]])\n', (9589, 9692), True, 'import numpy as np\n'), ((60, 85), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (75, 85), False, 'import os, sys\n'), ((747, 778), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {}), '(results, r_results)\n', (758, 778), True, 'import numpy as np\n'), ((1134, 1178), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {'atol': '(0.0001)'}), '(results, r_results, atol=0.0001)\n', (1145, 1178), True, 'import numpy as np\n'), ((1520, 1564), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {'atol': '(0.0001)'}), '(results, r_results, atol=0.0001)\n', (1531, 1564), True, 'import numpy as np\n'), ((2262, 2306), 'numpy.allclose', 'np.allclose', (['results', 'p_results'], {'atol': '(0.0001)'}), '(results, p_results, atol=0.0001)\n', (2273, 2306), True, 'import numpy as np\n'), ((3158, 3189), 'numpy.allclose', 'np.allclose', (['results', 'p_results'], {}), '(results, p_results)\n', (3169, 3189), True, 'import numpy as np\n'), ((4151, 4182), 'numpy.allclose', 'np.allclose', (['results', 'p_results'], {}), '(results, p_results)\n', (4162, 4182), True, 'import numpy as np\n'), ((5095, 5126), 'numpy.allclose', 'np.allclose', (['results', 'p_results'], {}), '(results, p_results)\n', (5106, 5126), True, 'import numpy as np\n'), ((5838, 5869), 'numpy.allclose', 'np.allclose', (['results', 'p_results'], {}), '(results, p_results)\n', (5849, 5869), True, 'import numpy as np\n'), ((6797, 6828), 'numpy.allclose', 'np.allclose', (['results', 'p_results'], {}), '(results, p_results)\n', (6808, 6828), True, 'import numpy as np\n'), ((7193, 7224), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {}), '(results, r_results)\n', (7204, 7224), True, 'import numpy as np\n'), ((7599, 7630), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {}), '(results, r_results)\n', (7610, 7630), True, 'import numpy as np\n'), ((7815, 7832), 'numpy.concatenate', 'np.concatenate', (['x'], {}), '(x)\n', (7829, 7832), True, 'import numpy as np\n'), ((7834, 7851), 'numpy.concatenate', 'np.concatenate', (['g'], {}), '(g)\n', (7848, 7851), True, 'import numpy as np\n'), ((7948, 7979), 'numpy.allclose', 'np.allclose', (['n_results', 'results'], {}), '(n_results, results)\n', (7959, 7979), True, 'import numpy as np\n'), ((8330, 8361), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {}), '(results, r_results)\n', (8341, 8361), True, 'import numpy as np\n'), ((8719, 8750), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {}), '(results, r_results)\n', (8730, 8750), True, 'import numpy as np\n'), ((9106, 9137), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {}), '(results, r_results)\n', (9117, 9137), True, 'import numpy as np\n'), ((9493, 9524), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {}), '(results, r_results)\n', (9504, 9524), True, 'import numpy as np\n'), ((9898, 9941), 'numpy.allclose', 'np.allclose', (['results', 'r_results'], {'atol': '(0.001)'}), '(results, r_results, atol=0.001)\n', (9909, 9941), True, 'import numpy as np\n')] |
from matplotlib import pyplot, ticker
import numpy as np
# import seaborn as sns
from scipy import stats
import yt
from grid_figure import GridFigure
if __name__ == "__main__":
my_fig = GridFigure(3, 1, figsize=(4.5, 7),
left_buffer=0.22, right_buffer=0.02,
bottom_buffer=0.09, top_buffer=0.02,
vertical_buffer=0)
# palette = sns.color_palette(palette="colorblind")
palette = \
[(0.0, 0.4470588235294118, 0.6980392156862745),
(0.0, 0.6196078431372549, 0.45098039215686275),
(0.8352941176470589, 0.3686274509803922, 0.0),
(0.8, 0.4745098039215686, 0.6549019607843137),
(0.9411764705882353, 0.8941176470588236, 0.25882352941176473),
(0.33725490196078434, 0.7058823529411765, 0.9137254901960784)]
labels = ["rare peak", "normal", "void"]
ds_list = [
yt.load("Rarepeak_LWB/black_hole_growth_stats.h5"),
yt.load("normal_BG1/black_hole_growth_stats.h5"),
yt.load("void_BG1/black_hole_growth_stats.h5")
]
t_growth = [ds.data["growth_time"].to("Myr") for ds in ds_list]
r_growth = [ds.data["max_growth_rate"]/2.2e-8 for ds in ds_list]
for i, my_axes in enumerate(my_fig):
my_axes.set_xscale("linear")
my_axes.set_yscale("log")
ip = i
if ip == i:
s = 0.5
alpha = 0.7
label = labels[ip]
else:
s = 3
alpha = 0.2
label = ""
my_filter = r_growth[ip] > 0
my_axes.scatter(t_growth[ip][my_filter], r_growth[ip][my_filter],
color=palette[ip],
s=s, alpha=alpha, label=label,
rasterized=True)
my_axes.legend(loc="lower right", frameon=False,
numpoints=3, markerfirst=False,
markerscale=5, handletextpad=0)
my_axes.set_xlim(0, 350)
# my_axes.set_ylim(1e-16, 0.2)
my_axes.xaxis.set_ticks(np.linspace(0, 300, 4))
my_axes.xaxis.set_ticks(np.linspace(0, 350, 15), minor=True)
my_axes.xaxis.set_minor_formatter(ticker.NullFormatter())
# my_axes.yaxis.set_ticks(np.logspace(-15, 0, 4))
# my_axes.yaxis.set_ticks(np.logspace(-16, 0, 17), minor=True)
# my_axes.yaxis.set_minor_formatter(ticker.NullFormatter())
if i < len(ds_list) - 1:
my_axes.xaxis.set_ticklabels([])
else:
my_axes.xaxis.set_label_text("black hole age [Myr]")
if i > 0:
tl = my_axes.yaxis.get_ticklabels()
tl[-1].set_visible(False)
my_axes.yaxis.set_label_text("M$_{\\rm f}$ / M$_{\\rm i}$ - 1")
for my_x in np.linspace(0, 300, 4):
my_axes.axvline(x=my_x, color="black", linestyle=":",
linewidth=0.75, alpha=0.2, zorder=-100)
for my_y in np.logspace(-15, 0, 4):
my_axes.axhline(y=my_y, color="black", linestyle=":",
linewidth=0.75, alpha=0.2, zorder=-100)
pyplot.savefig("figures/t_growth-max_eddington.pdf")
| [
"matplotlib.ticker.NullFormatter",
"matplotlib.pyplot.savefig",
"numpy.linspace",
"yt.load",
"grid_figure.GridFigure",
"numpy.logspace"
] | [((192, 323), 'grid_figure.GridFigure', 'GridFigure', (['(3)', '(1)'], {'figsize': '(4.5, 7)', 'left_buffer': '(0.22)', 'right_buffer': '(0.02)', 'bottom_buffer': '(0.09)', 'top_buffer': '(0.02)', 'vertical_buffer': '(0)'}), '(3, 1, figsize=(4.5, 7), left_buffer=0.22, right_buffer=0.02,\n bottom_buffer=0.09, top_buffer=0.02, vertical_buffer=0)\n', (202, 323), False, 'from grid_figure import GridFigure\n'), ((3084, 3136), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""figures/t_growth-max_eddington.pdf"""'], {}), "('figures/t_growth-max_eddington.pdf')\n", (3098, 3136), False, 'from matplotlib import pyplot, ticker\n'), ((892, 942), 'yt.load', 'yt.load', (['"""Rarepeak_LWB/black_hole_growth_stats.h5"""'], {}), "('Rarepeak_LWB/black_hole_growth_stats.h5')\n", (899, 942), False, 'import yt\n'), ((952, 1000), 'yt.load', 'yt.load', (['"""normal_BG1/black_hole_growth_stats.h5"""'], {}), "('normal_BG1/black_hole_growth_stats.h5')\n", (959, 1000), False, 'import yt\n'), ((1010, 1056), 'yt.load', 'yt.load', (['"""void_BG1/black_hole_growth_stats.h5"""'], {}), "('void_BG1/black_hole_growth_stats.h5')\n", (1017, 1056), False, 'import yt\n'), ((2742, 2764), 'numpy.linspace', 'np.linspace', (['(0)', '(300)', '(4)'], {}), '(0, 300, 4)\n', (2753, 2764), True, 'import numpy as np\n'), ((2921, 2943), 'numpy.logspace', 'np.logspace', (['(-15)', '(0)', '(4)'], {}), '(-15, 0, 4)\n', (2932, 2943), True, 'import numpy as np\n'), ((2027, 2049), 'numpy.linspace', 'np.linspace', (['(0)', '(300)', '(4)'], {}), '(0, 300, 4)\n', (2038, 2049), True, 'import numpy as np\n'), ((2083, 2106), 'numpy.linspace', 'np.linspace', (['(0)', '(350)', '(15)'], {}), '(0, 350, 15)\n', (2094, 2106), True, 'import numpy as np\n'), ((2162, 2184), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (2182, 2184), False, 'from matplotlib import pyplot, ticker\n')] |
# * @Author: abhinav.mazumdar
# * @Date: 2020-09-02 23:08:21
# * @Last Modified by:abhinav.mazumdar
# * @Last Modified time: 2020-09-02 23:08:49
# This model classifies movie (IMDB Dataset)reviews as positive
# or negative ( binary classification)
from keras.datasets import imdb
from keras import models
from keras import layers
import numpy as np
# import matplotlib.pyplot as plt
# num words means we will only keep the top 10000 most frequently
# occuring words in the training data. Rare words will be discarded
# This will allow us to work with vector data of manageable size
def get_dataset():
"""
Import IMDB data set
"""
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
num_words=10000
)
return (train_data, train_labels), (test_data, test_labels)
# One hot encoding
def vectorize_sequences(sequences, dimension=10000):
# Create a all zero matrix of shape : ((len(sequences( number of samples/data point, dimesions)))
results = np.zeros((len(sequences), dimension))
# Iterate over the sequence and set specific indices of results[i] to 1
for index, sequence in enumerate(sequences):
results[index, sequence] = 1
return results
def get_prepared_data(train_data, train_labels, test_data, test_labels):
"""[summary]
Parameters
----------
train_data : [type]
[description]
train_labels : [type]
[description]
test_data : [type]
[description]
test_labels : [type]
[description]
Returns
-------
[type]
[description]
"""
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype("float32")
y_test = np.asarray(test_labels).astype("float32")
return (x_train, y_train, x_test, y_test)
def create_train_val_set(x_train, y_train):
"""[summary]
Parameters
----------
x_train : [type]
[description]
y_train : [type]
[description]
"""
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
return (x_val, partial_x_train, y_val, partial_y_train)
def create_model_definition(x_val, partial_x_train, y_val, partial_y_train):
"""[summary]"""
print("X" * 1000)
print(x_val)
model = models.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(10000,)))
model.add(layers.Dense(16, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(
partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val),
verbose=2,
)
return history
def plot_loss(history, epochs, plt):
history_dict = history.history
loss_values = history_dict.get("loss")
val_loss_values = history_dict.get("val_loss")
epochs_to_plot = range(1, epochs + 1)
plt.plot(epochs_to_plot, loss_values, "bo", label="Training Loss")
plt.plot(epochs_to_plot, val_loss_values, "b", label="Validation Loss")
plt.title("Training and Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
def plot_accuracy(history, epochs, plt):
history_dict = history.history
acc_values = history_dict.get("accuracy")
val_acc_values = history_dict.get("val_accuracy")
epochs_to_plot = range(1, epochs + 1)
plt.plot(epochs_to_plot, acc_values, "bo", label="Training Accuracy")
plt.plot(epochs_to_plot, val_acc_values, "b", label="Validation Accuracy")
plt.title("Training and Validation Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
def calculate_result_of_model(model, x_train, x_test):
"""[summary]
Parameters
----------
model : [type]
[description]
x_train : [type]
[description]
x_test : [type]
[description]
"""
results = model.evaluate(x_test, y_test)
return results
if __name__ == "__main__":
from numba import cuda
cuda.select_device(0)
cuda.close()
(train_data, train_labels), (test_data, test_labels) = get_dataset()
x_train, y_train, x_test, y_test = get_prepared_data(
train_data, train_labels, test_data, test_labels
)
x_val, partial_x_train, y_val, partial_y_train = create_train_val_set(
x_train, y_train
)
history = create_model_definition(x_val, partial_x_train, y_val, partial_y_train)
# plot_loss(history, 20, plt)
# plot_accuracy(history, 20, plt)
| [
"keras.datasets.imdb.load_data",
"numba.cuda.select_device",
"numpy.asarray",
"keras.models.Sequential",
"numba.cuda.close",
"keras.layers.Dense"
] | [((712, 743), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': '(10000)'}), '(num_words=10000)\n', (726, 743), False, 'from keras.datasets import imdb\n'), ((2386, 2405), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (2403, 2405), False, 'from keras import models\n'), ((4203, 4224), 'numba.cuda.select_device', 'cuda.select_device', (['(0)'], {}), '(0)\n', (4221, 4224), False, 'from numba import cuda\n'), ((4229, 4241), 'numba.cuda.close', 'cuda.close', ([], {}), '()\n', (4239, 4241), False, 'from numba import cuda\n'), ((2420, 2477), 'keras.layers.Dense', 'layers.Dense', (['(16)'], {'activation': '"""relu"""', 'input_shape': '(10000,)'}), "(16, activation='relu', input_shape=(10000,))\n", (2432, 2477), False, 'from keras import layers\n'), ((2493, 2528), 'keras.layers.Dense', 'layers.Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (2505, 2528), False, 'from keras import layers\n'), ((2544, 2581), 'keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2556, 2581), False, 'from keras import layers\n'), ((1712, 1736), 'numpy.asarray', 'np.asarray', (['train_labels'], {}), '(train_labels)\n', (1722, 1736), True, 'import numpy as np\n'), ((1768, 1791), 'numpy.asarray', 'np.asarray', (['test_labels'], {}), '(test_labels)\n', (1778, 1791), True, 'import numpy as np\n')] |
import numpy as np
from .. import Circuit, DcOp, Resistor, Mos
from ..analysis import Contour
def cmos_inv(vgs):
class CmosInv(Circuit):
""" Cmos Inverter """
def define(self):
self.create_nodes(1)
vdd = self.create_forced_node(name='vdd', v=1.0)
g = self.create_forced_node(name='vgs', v=vgs)
self.create_comp(cls=Mos, polarity=-1,
conns={'s': vdd, 'b': vdd,
'd': self.nodes[0], 'g': g})
self.create_comp(cls=Mos, polarity=1,
conns={'s': self.node0, 'b': self.node0,
'd': self.nodes[0], 'g': g})
return CmosInv()
def test_cmos_inv():
vis = []
vos = []
vo = [1.0]
for vi in np.linspace(0, 1.0, 101):
dut = cmos_inv(vi)
s = DcOp(ckt=dut)
s.solve([vo])
vo = s.v[0]
vis += [vi]
vos += [vo]
print(vis)
print(vos)
assert (vos[0] > 0.9)
assert (vos[-1] < 0.1)
# def test_nmos_inv_contour():
# xs = []
# dxs = []
# for k in range(11):
# vgs = k / 10.0
# dut = nmos_inv(vgs)
# an = Contour(dut)
# x, dx = an.explore()
# xs.append(x)
# dxs.append(dx)
# print(xs)
# print(dxs)
| [
"numpy.linspace"
] | [((811, 835), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(101)'], {}), '(0, 1.0, 101)\n', (822, 835), True, 'import numpy as np\n')] |
import os
import time
import pickle
import random
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
from utils import cv_utils
from data.dataset import DatasetBase
class AusDataset(DatasetBase):
def __init__(self, opt, is_for_train):
super(AusDataset, self).__init__(opt, is_for_train)
self._name = "AusDataset"
self._read_dataset()
self._cond_nc = opt.cond_nc
def __len__(self):
return self._dataset_size
def __getitem__(self, idx):
assert (idx < self._dataset_size)
real_img = None
real_cond = None
real_img_path = None
while real_img is None or real_cond is None:
# get sample data
sample_id = self._get_id(idx)
real_img, real_img_path = self._get_img_by_id(idx)
real_cond = self._get_cond_by_id(idx)
if real_img is None:
print('error reading image %s, skipping sample' % real_img_path)
idx = random.randint(0, self._dataset_size - 1)
if real_cond is None:
print('error reading aus %s, skipping sample' % sample_id)
idx = random.randint(0, self._dataset_size - 1)
real_cond += np.random.uniform(-0.02, 0.02, real_cond.shape)
desired_img, desired_cond, noise = self._generate_random_cond()
# transform data
real_img = self._transform(Image.fromarray(real_img))
desired_img = self._transform(Image.fromarray(desired_img))
# pack data
sample = {'real_img': real_img,
'real_cond': real_cond,
'desired_img': desired_img,
'desired_cond': desired_cond,
'cond_diff': desired_cond - real_cond,
}
return sample
def _create_transform(self):
if self._is_for_train:
transform_list = [transforms.RandomHorizontalFlip(),
transforms.Resize(self._image_size),
transforms.Pad(self._image_size // 16),
transforms.RandomCrop(self._image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
else:
transform_list = [transforms.Resize(self._image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
self._transform = transforms.Compose(transform_list)
def _read_dataset(self):
self._root = self._opt.data_dir
self._imgs_dir = os.path.join(self._root, self._opt.images_folder)
annotations_file = self._opt.train_annotations_file if self._is_for_train else self._opt.test_annotations_file
pkl_path = os.path.join(self._root, annotations_file)
self._info = self._read_pkl(pkl_path)
self._image_size = self._opt.image_size
# dataset size
self._dataset_size = len(self._info)
def _read_pkl(self, file_path):
assert os.path.exists(file_path) and file_path.endswith('.pkl'), 'Read pkl file error. Cannot open %s' % file_path
with open(file_path, 'rb') as f:
return pickle.load(f)
def _get_id(self, idx):
id = self._info[idx]['file_path']
return os.path.splitext(id)[0]
def _get_cond_by_id(self, idx):
cond = None
if idx < self._dataset_size:
cond = self._info[idx]['aus'] / 5.0
return cond
def _get_img_by_id(self, idx):
if idx < self._dataset_size:
img_path = os.path.join(self._imgs_dir, self._info[idx]['file_path'])
img = cv_utils.read_cv2_img(img_path)
return img, self._info[idx]['file_path']
else:
print('You input idx: ', idx)
return None, None
def _generate_random_cond(self):
cond = None
rand_sample_id = -1
while cond is None:
rand_sample_id = random.randint(0, self._dataset_size - 1)
cond = self._get_cond_by_id(rand_sample_id)
img, _ = self._get_img_by_id(rand_sample_id)
noise = np.random.uniform(-0.1, 0.1, cond.shape)
if img is None:
img, cond, noise = self._generate_random_cond()
cond += noise
return img, cond, noise
| [
"os.path.exists",
"PIL.Image.fromarray",
"random.randint",
"utils.cv_utils.read_cv2_img",
"os.path.join",
"pickle.load",
"os.path.splitext",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Normalize",
"numpy.random.uniform",
"torchvis... | [((1264, 1311), 'numpy.random.uniform', 'np.random.uniform', (['(-0.02)', '(0.02)', 'real_cond.shape'], {}), '(-0.02, 0.02, real_cond.shape)\n', (1281, 1311), True, 'import numpy as np\n'), ((2599, 2633), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (2617, 2633), True, 'import torchvision.transforms as transforms\n'), ((2733, 2782), 'os.path.join', 'os.path.join', (['self._root', 'self._opt.images_folder'], {}), '(self._root, self._opt.images_folder)\n', (2745, 2782), False, 'import os\n'), ((2922, 2964), 'os.path.join', 'os.path.join', (['self._root', 'annotations_file'], {}), '(self._root, annotations_file)\n', (2934, 2964), False, 'import os\n'), ((4301, 4341), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', 'cond.shape'], {}), '(-0.1, 0.1, cond.shape)\n', (4318, 4341), True, 'import numpy as np\n'), ((1445, 1470), 'PIL.Image.fromarray', 'Image.fromarray', (['real_img'], {}), '(real_img)\n', (1460, 1470), False, 'from PIL import Image\n'), ((1510, 1538), 'PIL.Image.fromarray', 'Image.fromarray', (['desired_img'], {}), '(desired_img)\n', (1525, 1538), False, 'from PIL import Image\n'), ((3187, 3212), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (3201, 3212), False, 'import os\n'), ((3355, 3369), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3366, 3369), False, 'import pickle\n'), ((3456, 3476), 'os.path.splitext', 'os.path.splitext', (['id'], {}), '(id)\n', (3472, 3476), False, 'import os\n'), ((3743, 3801), 'os.path.join', 'os.path.join', (['self._imgs_dir', "self._info[idx]['file_path']"], {}), "(self._imgs_dir, self._info[idx]['file_path'])\n", (3755, 3801), False, 'import os\n'), ((3820, 3851), 'utils.cv_utils.read_cv2_img', 'cv_utils.read_cv2_img', (['img_path'], {}), '(img_path)\n', (3841, 3851), False, 'from utils import cv_utils\n'), ((4134, 4175), 'random.randint', 'random.randint', (['(0)', '(self._dataset_size - 1)'], {}), '(0, self._dataset_size - 1)\n', (4148, 4175), False, 'import random\n'), ((1028, 1069), 'random.randint', 'random.randint', (['(0)', '(self._dataset_size - 1)'], {}), '(0, self._dataset_size - 1)\n', (1042, 1069), False, 'import random\n'), ((1201, 1242), 'random.randint', 'random.randint', (['(0)', '(self._dataset_size - 1)'], {}), '(0, self._dataset_size - 1)\n', (1215, 1242), False, 'import random\n'), ((1931, 1964), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1962, 1964), True, 'import torchvision.transforms as transforms\n'), ((1996, 2031), 'torchvision.transforms.Resize', 'transforms.Resize', (['self._image_size'], {}), '(self._image_size)\n', (2013, 2031), True, 'import torchvision.transforms as transforms\n'), ((2063, 2101), 'torchvision.transforms.Pad', 'transforms.Pad', (['(self._image_size // 16)'], {}), '(self._image_size // 16)\n', (2077, 2101), True, 'import torchvision.transforms as transforms\n'), ((2133, 2172), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['self._image_size'], {}), '(self._image_size)\n', (2154, 2172), True, 'import torchvision.transforms as transforms\n'), ((2204, 2225), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2223, 2225), True, 'import torchvision.transforms as transforms\n'), ((2257, 2320), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (2277, 2320), True, 'import torchvision.transforms as transforms\n'), ((2379, 2414), 'torchvision.transforms.Resize', 'transforms.Resize', (['self._image_size'], {}), '(self._image_size)\n', (2396, 2414), True, 'import torchvision.transforms as transforms\n'), ((2446, 2467), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2465, 2467), True, 'import torchvision.transforms as transforms\n'), ((2499, 2562), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (2519, 2562), True, 'import torchvision.transforms as transforms\n')] |
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import os
def find_card(I):
# 识别出车牌区域并返回该区域的图像
[y, x, z] = I.shape
# y取值范围分析
Blue_y = np.zeros((y, 1))
for i in range(y):
for j in range(x):
# 蓝色rgb范围
temp = I[i, j, :]
if (I[i, j, 2] <= 30) and (I[i, j, 0] >= 119):
Blue_y[i][0] += 1
MaxY = np.argmax(Blue_y)
PY1 = MaxY
while (Blue_y[PY1, 0] >= 5) and (PY1 > 0):
PY1 -= 1
PY2 = MaxY
while (Blue_y[PY2, 0] >= 5) and (PY2 < y - 1):
PY2 += 1
# x取值
Blue_x = np.zeros((1, x))
for i in range(x):
for j in range(PY1, PY2):
if (I[j, i, 2] <= 30) and (I[j, i, 0] >= 119):
Blue_x[0][i] += 1
PX1 = 0
while (Blue_x[0, PX1] < 3) and (PX1 < x - 1):
PX1 += 1
PX2 = x - 1
while (Blue_x[0, PX2] < 3) and (PX2 > PX1):
PX2 -= 1
# 对车牌区域的修正
PX1 -= 2
PX2 += 2
return I[PY1:PY2, PX1 - 2: PX2, :]
def divide(I):
[y, x, z] = I.shape
White_x = np.zeros((x, 1))
for i in range(x):
for j in range(y):
if I[j, i, 1] > 176:
White_x[i][0] += 1
return White_x
def divide_each_character(I):
[y, x, z] = I.shape
White_x = np.zeros((x, 1))
for i in range(x):
for j in range(y):
if I[j, i, 1] > 176:
White_x[i][0] += 1
res = []
length = 0
for i in range(White_x.shape[0]):
# 使用超参数经验分割
t = I.shape[1] / 297
num = White_x[i]
if num > 8:
length += 1
elif length > 20 * t:
res.append([i - length - 2, i + 2])
length = 0
else:
length = 0
return res
if __name__ == '__main__':
I = cv2.imread('Car.jpg')
Plate = find_card(I)
# White_x = divide(Plate)
plt.imshow(Plate)
plt.show()
# plt.plot(np.arange(Plate.shape[1]), White_x)
res = divide_each_character(Plate)
plate_save_path = './singledigit/'
for t in range(len(res)):
plt.subplot(1, 7, t + 1)
temp = res[t]
save_img = cv2.cvtColor(Plate[:, temp[0]:temp[1], :],cv2.COLOR_BGR2GRAY)
ma = max(save_img.shape[0], save_img.shape[1])
mi = min(save_img.shape[0], save_img.shape[1])
ans = np.zeros(shape=(ma, ma, 3),dtype=np.uint8)
start =int(ma/2-mi/2)
for i in range(mi):
for j in range(ma):
if save_img[j,i] > 125:
for k in range(3):
ans[j,start+i,k]=255
ans=cv2.merge([ans[:,:,0],ans[:,:,1],ans[:,:,2]])
ans=cv2.resize(ans,(25,25))
dir_name=plate_save_path+str(t)
os.mkdir(dir_name)
cv2.imwrite(dir_name+'/'+str(t)+'.jpg',ans)
plt.imshow(ans)
plt.show()
| [
"matplotlib.pyplot.imshow",
"cv2.merge",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"os.mkdir",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((179, 195), 'numpy.zeros', 'np.zeros', (['(y, 1)'], {}), '((y, 1))\n', (187, 195), True, 'import numpy as np\n'), ((402, 419), 'numpy.argmax', 'np.argmax', (['Blue_y'], {}), '(Blue_y)\n', (411, 419), True, 'import numpy as np\n'), ((605, 621), 'numpy.zeros', 'np.zeros', (['(1, x)'], {}), '((1, x))\n', (613, 621), True, 'import numpy as np\n'), ((1067, 1083), 'numpy.zeros', 'np.zeros', (['(x, 1)'], {}), '((x, 1))\n', (1075, 1083), True, 'import numpy as np\n'), ((1291, 1307), 'numpy.zeros', 'np.zeros', (['(x, 1)'], {}), '((x, 1))\n', (1299, 1307), True, 'import numpy as np\n'), ((1800, 1821), 'cv2.imread', 'cv2.imread', (['"""Car.jpg"""'], {}), "('Car.jpg')\n", (1810, 1821), False, 'import cv2\n'), ((1881, 1898), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Plate'], {}), '(Plate)\n', (1891, 1898), True, 'import matplotlib.pyplot as plt\n'), ((1903, 1913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1911, 1913), True, 'import matplotlib.pyplot as plt\n'), ((2831, 2841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2839, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2105), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(7)', '(t + 1)'], {}), '(1, 7, t + 1)\n', (2092, 2105), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2209), 'cv2.cvtColor', 'cv2.cvtColor', (['Plate[:, temp[0]:temp[1], :]', 'cv2.COLOR_BGR2GRAY'], {}), '(Plate[:, temp[0]:temp[1], :], cv2.COLOR_BGR2GRAY)\n', (2159, 2209), False, 'import cv2\n'), ((2333, 2376), 'numpy.zeros', 'np.zeros', ([], {'shape': '(ma, ma, 3)', 'dtype': 'np.uint8'}), '(shape=(ma, ma, 3), dtype=np.uint8)\n', (2341, 2376), True, 'import numpy as np\n'), ((2602, 2655), 'cv2.merge', 'cv2.merge', (['[ans[:, :, 0], ans[:, :, 1], ans[:, :, 2]]'], {}), '([ans[:, :, 0], ans[:, :, 1], ans[:, :, 2]])\n', (2611, 2655), False, 'import cv2\n'), ((2660, 2685), 'cv2.resize', 'cv2.resize', (['ans', '(25, 25)'], {}), '(ans, (25, 25))\n', (2670, 2685), False, 'import cv2\n'), ((2732, 2750), 'os.mkdir', 'os.mkdir', (['dir_name'], {}), '(dir_name)\n', (2740, 2750), False, 'import os\n'), ((2811, 2826), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ans'], {}), '(ans)\n', (2821, 2826), True, 'import matplotlib.pyplot as plt\n')] |
import numpy
from sklearn import preprocessing
def linear(intrinsic_process):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
observed_process[0] = intrinsic_process[0]
observed_process[1] = intrinsic_process[1]
observed_process[2] = 0
return observed_process
def s_curve(intrinsic_process, k=1):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-0).T/2
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
t = 3 * numpy.pi * k * intrinsic_process_temp[0]
observed_process[0] = numpy.sin(t)
observed_process[1] = intrinsic_process[1]*1
observed_process[2] = numpy.sign(t) * (numpy.cos(t) - 1)
return observed_process
def severed_sphere(intrinsic_process, k1=5.5, k2=2):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
#intrinsic_process_temp = (intrinsic_process_temp.T-numpy.mean(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = numpy.sin(intrinsic_process_temp[0]*k1)*numpy.cos(intrinsic_process_temp[1]*k2)
observed_process[1] = numpy.cos(intrinsic_process_temp[0]*k1)*numpy.cos(intrinsic_process_temp[1]*k2)
observed_process[2] = numpy.sin(intrinsic_process_temp[1]*k2)
return observed_process
def twin_peaks(intrinsic_process, k=1):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-0).T/2
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = intrinsic_process_temp[0]
observed_process[1] = intrinsic_process_temp[1]
observed_process[2] = numpy.sin(k*intrinsic_process_temp[0])*numpy.sin(k*intrinsic_process_temp[1])/3
return observed_process
def parabola2d2d(intrinsic_process, k=2):
assert intrinsic_process.shape[0] == 2
scale_x = numpy.max(intrinsic_process[0]) - numpy.min(intrinsic_process[0])
scale_y = numpy.max(intrinsic_process[1]) - numpy.min(intrinsic_process[1])
scale = max(scale_x, scale_y)
origin = numpy.mean(intrinsic_process, axis=1)
intrinsic_process_temp = (intrinsic_process.T-origin.T).T/scale
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
observed_process[0, :] = intrinsic_process_temp[0, :]
observed_process[1, :] = intrinsic_process_temp[1, :] - k * intrinsic_process_temp[0, :] ** 2
return observed_process
def parabola2d3d(intrinsic_process, k=3):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
observed_process[0, :] = intrinsic_process[0, :]
observed_process[1, :] = intrinsic_process[1, :]
observed_process[2, :] = k * numpy.sum(intrinsic_process ** 2, axis=0)
return observed_process
def singers_mushroom(intrinsic_process):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-numpy.min(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((2, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = intrinsic_process_temp[0]+numpy.power(intrinsic_process_temp[1], 3)
observed_process[1] = intrinsic_process_temp[1]-numpy.power(intrinsic_process_temp[0], 3)
return observed_process
def singers_sphere(intrinsic_process):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = intrinsic_process
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
radius = numpy.sqrt(intrinsic_process_temp[0]**2+intrinsic_process_temp[1]**2+1)
observed_process[0] = intrinsic_process_temp[0]/radius
observed_process[1] = intrinsic_process_temp[1]/radius
observed_process[2] = 1/radius
return observed_process
def whole_sphere(intrinsic_process, k=0.5):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-numpy.mean(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
radius = numpy.sqrt(intrinsic_process_temp[0]**2+intrinsic_process_temp[1]**2)
theta = numpy.arctan(intrinsic_process[1, :] / intrinsic_process[0, :])
theta[numpy.where(intrinsic_process[0, :] < 0)] = theta[numpy.where(intrinsic_process[0, :] < 0)]+numpy.pi
observed_process[0] = numpy.sin(k*radius)*numpy.sin(theta)
observed_process[1] = numpy.sin(k*radius)*numpy.cos(theta)
observed_process[2] = -numpy.cos(k*radius)
return observed_process
def photo_dist(intrinsic_process, k=1.5):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
r = numpy.sqrt(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)
observed_process[0, :] = intrinsic_process[0, :]*(1 + k * r ** 2)
observed_process[1, :] = intrinsic_process[1, :]*(1 + k * r ** 2)
observed_process = observed_process + 0.5
return observed_process
def twirl(intrinsic_process, k=6):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
temp_mean = numpy.mean(intrinsic_process, 1)
intrinsic_process = (intrinsic_process.T - temp_mean.T).T
r = numpy.sqrt(intrinsic_process[0, :]**2 + intrinsic_process[1, :]**2)
theta = numpy.arctan(intrinsic_process[1, :] / intrinsic_process[0, :])
theta[numpy.where(intrinsic_process[0, :] < 0)] = theta[intrinsic_process[0, :] < 0]+numpy.pi
newr = r
newtheta = theta + newr * k
newtheta = -newtheta
observed_process[0, :] = newr * numpy.cos(newtheta)
observed_process[1, :] = newr * numpy.sin(newtheta)
observed_process = (observed_process.T + temp_mean.T).T
return observed_process
def bend(intrinsic_process, k=45):
assert intrinsic_process.shape[0] == 2
deg = 2*numpy.pi*(k/360)
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
for x in range(0, intrinsic_process.shape[1]):
if intrinsic_process[0, x] < 0.5:
observed_process[0, x] = intrinsic_process[0, x]
observed_process[1, x] = intrinsic_process[1, x]
observed_process[2, x] = 0
else:
observed_process[0, x] = 0.5 + numpy.cos(deg)*(intrinsic_process[0, x] - 0.5)
observed_process[1, x] = intrinsic_process[1, x]
observed_process[2, x] = numpy.sin(deg) * (intrinsic_process[0, x] - 0.5)
return observed_process
def swissroll(intrinsic_process, k_r=8, k_twist=8):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-numpy.min(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = k_r*intrinsic_process_temp[0] * numpy.cos(k_twist * intrinsic_process_temp[0])
observed_process[1] = intrinsic_process_temp[1]*2
observed_process[2] = k_r*intrinsic_process_temp[0] * numpy.sin(k_twist * intrinsic_process_temp[0])
return observed_process
def tube(intrinsic_process, k=160):
assert intrinsic_process.shape[0] == 2
scale = numpy.max(intrinsic_process[0]) - numpy.min(intrinsic_process[0])
radius = (360/k)/(2*numpy.pi)
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
observed_process[0] = radius * numpy.cos(2*numpy.pi*(k/360) * (intrinsic_process[0]/scale))
observed_process[1] = intrinsic_process[1]
observed_process[2] = radius * numpy.sin(2*numpy.pi*(k/360) * (intrinsic_process[0]/scale))
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
observed_process = min_max_scaler.fit_transform(observed_process.T).T
return observed_process
def helix(intrinsic_process, k=2):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
observed_process[0, :] = intrinsic_process[0, :]
observed_process[1, :] = intrinsic_process[1, :] * numpy.cos(k * numpy.pi * (intrinsic_process[0, :]))
observed_process[2, :] = intrinsic_process[1, :] * numpy.sin(k * numpy.pi * (intrinsic_process[0, :]))
observed_process = observed_process + 0.5
return observed_process
def papillon(intrinsic_process, k=8):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
observed_process[0, :] = intrinsic_process[0, :] + 0.5
observed_process[1, :] = intrinsic_process[1, :] + k * intrinsic_process[1, :] * intrinsic_process[0, :] ** 2 + 0.5
return observed_process
def twist(intrinsic_process, k=6):
assert intrinsic_process.shape[0] == 3
intrinsic_process = intrinsic_process - 0.5
r = numpy.sqrt(intrinsic_process[0, :]**2 + intrinsic_process[1, :]**2)
theta = numpy.arctan(intrinsic_process[1, :] / intrinsic_process[0, :])
theta[numpy.where(intrinsic_process[0, :] < 0)] = theta[numpy.where(intrinsic_process[0, :] < 0)]+numpy.pi
observed_process = numpy.empty([3, intrinsic_process.shape[1]])
observed_process[0, :] = r*numpy.cos(theta + intrinsic_process[2, :]*k)
observed_process[1, :] = r*numpy.sin(theta + intrinsic_process[2, :]*k)
observed_process[2, :] = intrinsic_process[2, :]
observed_process = observed_process + 0.5
return observed_process
def antenna(intrinsic_process, centers, amplitudes, width, angles, range_factor, reg_fact):
n_antenas = centers.shape[1]
observed_process = numpy.zeros([n_antenas, intrinsic_process.shape[1]])
assert intrinsic_process.shape[0] == centers.shape[0]
for i_antena in range(0, n_antenas):
dists = (intrinsic_process.T - centers[:, i_antena].T).T
angle = numpy.angle([dists[0, :]+1j*dists[1, :]])
dists = dists * dists
dists = numpy.sqrt(numpy.sum(dists, axis=0))
observed_process[i_antena, :] = amplitudes[i_antena]*((1/(reg_fact[i_antena]+dists*range_factor[i_antena])))**(1/5)*(1/(0.5+width[i_antena]*numpy.abs(numpy.exp(1j*angle)-numpy.exp(1j*(angles[i_antena])))))
#observed_process[i_antena, :] = dists
return observed_process
| [
"numpy.copy",
"numpy.mean",
"numpy.sqrt",
"numpy.power",
"numpy.where",
"numpy.max",
"numpy.angle",
"numpy.sum",
"numpy.zeros",
"numpy.exp",
"numpy.empty",
"numpy.sign",
"numpy.cos",
"numpy.min",
"numpy.sin",
"sklearn.preprocessing.MinMaxScaler",
"numpy.arctan"
] | [((146, 211), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (157, 211), False, 'import numpy\n'), ((473, 502), 'numpy.copy', 'numpy.copy', (['intrinsic_process'], {}), '(intrinsic_process)\n', (483, 502), False, 'import numpy\n'), ((588, 658), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process_temp.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)\n', (599, 658), False, 'import numpy\n'), ((738, 750), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (747, 750), False, 'import numpy\n'), ((1016, 1045), 'numpy.copy', 'numpy.copy', (['intrinsic_process'], {}), '(intrinsic_process)\n', (1026, 1045), False, 'import numpy\n'), ((1173, 1243), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process_temp.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)\n', (1184, 1243), False, 'import numpy\n'), ((1482, 1523), 'numpy.sin', 'numpy.sin', (['(intrinsic_process_temp[1] * k2)'], {}), '(intrinsic_process_temp[1] * k2)\n', (1491, 1523), False, 'import numpy\n'), ((1664, 1693), 'numpy.copy', 'numpy.copy', (['intrinsic_process'], {}), '(intrinsic_process)\n', (1674, 1693), False, 'import numpy\n'), ((1779, 1849), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process_temp.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)\n', (1790, 1849), False, 'import numpy\n'), ((2382, 2419), 'numpy.mean', 'numpy.mean', (['intrinsic_process'], {'axis': '(1)'}), '(intrinsic_process, axis=1)\n', (2392, 2419), False, 'import numpy\n'), ((2511, 2576), 'numpy.empty', 'numpy.empty', (['(2, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((2, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (2522, 2576), False, 'import numpy\n'), ((2870, 2935), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (2881, 2935), False, 'import numpy\n'), ((3307, 3336), 'numpy.copy', 'numpy.copy', (['intrinsic_process'], {}), '(intrinsic_process)\n', (3317, 3336), False, 'import numpy\n'), ((3462, 3532), 'numpy.empty', 'numpy.empty', (['(2, intrinsic_process_temp.shape[1])'], {'dtype': 'numpy.float64'}), '((2, intrinsic_process_temp.shape[1]), dtype=numpy.float64)\n', (3473, 3532), False, 'import numpy\n'), ((3902, 3972), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process_temp.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)\n', (3913, 3972), False, 'import numpy\n'), ((3986, 4065), 'numpy.sqrt', 'numpy.sqrt', (['(intrinsic_process_temp[0] ** 2 + intrinsic_process_temp[1] ** 2 + 1)'], {}), '(intrinsic_process_temp[0] ** 2 + intrinsic_process_temp[1] ** 2 + 1)\n', (3996, 4065), False, 'import numpy\n'), ((4357, 4386), 'numpy.copy', 'numpy.copy', (['intrinsic_process'], {}), '(intrinsic_process)\n', (4367, 4386), False, 'import numpy\n'), ((4513, 4583), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process_temp.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)\n', (4524, 4583), False, 'import numpy\n'), ((4597, 4672), 'numpy.sqrt', 'numpy.sqrt', (['(intrinsic_process_temp[0] ** 2 + intrinsic_process_temp[1] ** 2)'], {}), '(intrinsic_process_temp[0] ** 2 + intrinsic_process_temp[1] ** 2)\n', (4607, 4672), False, 'import numpy\n'), ((4679, 4742), 'numpy.arctan', 'numpy.arctan', (['(intrinsic_process[1, :] / intrinsic_process[0, :])'], {}), '(intrinsic_process[1, :] / intrinsic_process[0, :])\n', (4691, 4742), False, 'import numpy\n'), ((5166, 5231), 'numpy.empty', 'numpy.empty', (['(2, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((2, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (5177, 5231), False, 'import numpy\n'), ((5288, 5359), 'numpy.sqrt', 'numpy.sqrt', (['(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)'], {}), '(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)\n', (5298, 5359), False, 'import numpy\n'), ((5677, 5742), 'numpy.empty', 'numpy.empty', (['(2, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((2, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (5688, 5742), False, 'import numpy\n'), ((5759, 5791), 'numpy.mean', 'numpy.mean', (['intrinsic_process', '(1)'], {}), '(intrinsic_process, 1)\n', (5769, 5791), False, 'import numpy\n'), ((5862, 5933), 'numpy.sqrt', 'numpy.sqrt', (['(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)'], {}), '(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)\n', (5872, 5933), False, 'import numpy\n'), ((5942, 6005), 'numpy.arctan', 'numpy.arctan', (['(intrinsic_process[1, :] / intrinsic_process[0, :])'], {}), '(intrinsic_process[1, :] / intrinsic_process[0, :])\n', (5954, 6005), False, 'import numpy\n'), ((6506, 6571), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (6517, 6571), False, 'import numpy\n'), ((7231, 7260), 'numpy.copy', 'numpy.copy', (['intrinsic_process'], {}), '(intrinsic_process)\n', (7241, 7260), False, 'import numpy\n'), ((7386, 7456), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process_temp.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)\n', (7397, 7456), False, 'import numpy\n'), ((7965, 8030), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (7976, 8030), False, 'import numpy\n'), ((8291, 8339), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (8317, 8339), False, 'from sklearn import preprocessing\n'), ((8545, 8610), 'numpy.empty', 'numpy.empty', (['(3, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((3, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (8556, 8610), False, 'import numpy\n'), ((9106, 9171), 'numpy.empty', 'numpy.empty', (['(2, intrinsic_process.shape[1])'], {'dtype': 'numpy.float64'}), '((2, intrinsic_process.shape[1]), dtype=numpy.float64)\n', (9117, 9171), False, 'import numpy\n'), ((9563, 9634), 'numpy.sqrt', 'numpy.sqrt', (['(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)'], {}), '(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)\n', (9573, 9634), False, 'import numpy\n'), ((9643, 9706), 'numpy.arctan', 'numpy.arctan', (['(intrinsic_process[1, :] / intrinsic_process[0, :])'], {}), '(intrinsic_process[1, :] / intrinsic_process[0, :])\n', (9655, 9706), False, 'import numpy\n'), ((9841, 9885), 'numpy.empty', 'numpy.empty', (['[3, intrinsic_process.shape[1]]'], {}), '([3, intrinsic_process.shape[1]])\n', (9852, 9885), False, 'import numpy\n'), ((10315, 10367), 'numpy.zeros', 'numpy.zeros', (['[n_antenas, intrinsic_process.shape[1]]'], {}), '([n_antenas, intrinsic_process.shape[1]])\n', (10326, 10367), False, 'import numpy\n'), ((826, 839), 'numpy.sign', 'numpy.sign', (['t'], {}), '(t)\n', (836, 839), False, 'import numpy\n'), ((1270, 1311), 'numpy.sin', 'numpy.sin', (['(intrinsic_process_temp[0] * k1)'], {}), '(intrinsic_process_temp[0] * k1)\n', (1279, 1311), False, 'import numpy\n'), ((1310, 1351), 'numpy.cos', 'numpy.cos', (['(intrinsic_process_temp[1] * k2)'], {}), '(intrinsic_process_temp[1] * k2)\n', (1319, 1351), False, 'import numpy\n'), ((1376, 1417), 'numpy.cos', 'numpy.cos', (['(intrinsic_process_temp[0] * k1)'], {}), '(intrinsic_process_temp[0] * k1)\n', (1385, 1417), False, 'import numpy\n'), ((1416, 1457), 'numpy.cos', 'numpy.cos', (['(intrinsic_process_temp[1] * k2)'], {}), '(intrinsic_process_temp[1] * k2)\n', (1425, 1457), False, 'import numpy\n'), ((2189, 2220), 'numpy.max', 'numpy.max', (['intrinsic_process[0]'], {}), '(intrinsic_process[0])\n', (2198, 2220), False, 'import numpy\n'), ((2223, 2254), 'numpy.min', 'numpy.min', (['intrinsic_process[0]'], {}), '(intrinsic_process[0])\n', (2232, 2254), False, 'import numpy\n'), ((2269, 2300), 'numpy.max', 'numpy.max', (['intrinsic_process[1]'], {}), '(intrinsic_process[1])\n', (2278, 2300), False, 'import numpy\n'), ((2303, 2334), 'numpy.min', 'numpy.min', (['intrinsic_process[1]'], {}), '(intrinsic_process[1])\n', (2312, 2334), False, 'import numpy\n'), ((3123, 3164), 'numpy.sum', 'numpy.sum', (['(intrinsic_process ** 2)'], {'axis': '(0)'}), '(intrinsic_process ** 2, axis=0)\n', (3132, 3164), False, 'import numpy\n'), ((3585, 3626), 'numpy.power', 'numpy.power', (['intrinsic_process_temp[1]', '(3)'], {}), '(intrinsic_process_temp[1], 3)\n', (3596, 3626), False, 'import numpy\n'), ((3679, 3720), 'numpy.power', 'numpy.power', (['intrinsic_process_temp[0]', '(3)'], {}), '(intrinsic_process_temp[0], 3)\n', (3690, 3720), False, 'import numpy\n'), ((4753, 4793), 'numpy.where', 'numpy.where', (['(intrinsic_process[0, :] < 0)'], {}), '(intrinsic_process[0, :] < 0)\n', (4764, 4793), False, 'import numpy\n'), ((4881, 4902), 'numpy.sin', 'numpy.sin', (['(k * radius)'], {}), '(k * radius)\n', (4890, 4902), False, 'import numpy\n'), ((4901, 4917), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (4910, 4917), False, 'import numpy\n'), ((4944, 4965), 'numpy.sin', 'numpy.sin', (['(k * radius)'], {}), '(k * radius)\n', (4953, 4965), False, 'import numpy\n'), ((4964, 4980), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (4973, 4980), False, 'import numpy\n'), ((5008, 5029), 'numpy.cos', 'numpy.cos', (['(k * radius)'], {}), '(k * radius)\n', (5017, 5029), False, 'import numpy\n'), ((6016, 6056), 'numpy.where', 'numpy.where', (['(intrinsic_process[0, :] < 0)'], {}), '(intrinsic_process[0, :] < 0)\n', (6027, 6056), False, 'import numpy\n'), ((6210, 6229), 'numpy.cos', 'numpy.cos', (['newtheta'], {}), '(newtheta)\n', (6219, 6229), False, 'import numpy\n'), ((6266, 6285), 'numpy.sin', 'numpy.sin', (['newtheta'], {}), '(newtheta)\n', (6275, 6285), False, 'import numpy\n'), ((7515, 7561), 'numpy.cos', 'numpy.cos', (['(k_twist * intrinsic_process_temp[0])'], {}), '(k_twist * intrinsic_process_temp[0])\n', (7524, 7561), False, 'import numpy\n'), ((7674, 7720), 'numpy.sin', 'numpy.sin', (['(k_twist * intrinsic_process_temp[0])'], {}), '(k_twist * intrinsic_process_temp[0])\n', (7683, 7720), False, 'import numpy\n'), ((7842, 7873), 'numpy.max', 'numpy.max', (['intrinsic_process[0]'], {}), '(intrinsic_process[0])\n', (7851, 7873), False, 'import numpy\n'), ((7876, 7907), 'numpy.min', 'numpy.min', (['intrinsic_process[0]'], {}), '(intrinsic_process[0])\n', (7885, 7907), False, 'import numpy\n'), ((8066, 8134), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * (k / 360) * (intrinsic_process[0] / scale))'], {}), '(2 * numpy.pi * (k / 360) * (intrinsic_process[0] / scale))\n', (8075, 8134), False, 'import numpy\n'), ((8209, 8277), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * (k / 360) * (intrinsic_process[0] / scale))'], {}), '(2 * numpy.pi * (k / 360) * (intrinsic_process[0] / scale))\n', (8218, 8277), False, 'import numpy\n'), ((8767, 8816), 'numpy.cos', 'numpy.cos', (['(k * numpy.pi * intrinsic_process[0, :])'], {}), '(k * numpy.pi * intrinsic_process[0, :])\n', (8776, 8816), False, 'import numpy\n'), ((8874, 8923), 'numpy.sin', 'numpy.sin', (['(k * numpy.pi * intrinsic_process[0, :])'], {}), '(k * numpy.pi * intrinsic_process[0, :])\n', (8883, 8923), False, 'import numpy\n'), ((9717, 9757), 'numpy.where', 'numpy.where', (['(intrinsic_process[0, :] < 0)'], {}), '(intrinsic_process[0, :] < 0)\n', (9728, 9757), False, 'import numpy\n'), ((9917, 9963), 'numpy.cos', 'numpy.cos', (['(theta + intrinsic_process[2, :] * k)'], {}), '(theta + intrinsic_process[2, :] * k)\n', (9926, 9963), False, 'import numpy\n'), ((9993, 10039), 'numpy.sin', 'numpy.sin', (['(theta + intrinsic_process[2, :] * k)'], {}), '(theta + intrinsic_process[2, :] * k)\n', (10002, 10039), False, 'import numpy\n'), ((10548, 10595), 'numpy.angle', 'numpy.angle', (['[dists[0, :] + 1.0j * dists[1, :]]'], {}), '([dists[0, :] + 1.0j * dists[1, :]])\n', (10559, 10595), False, 'import numpy\n'), ((843, 855), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (852, 855), False, 'import numpy\n'), ((1980, 2020), 'numpy.sin', 'numpy.sin', (['(k * intrinsic_process_temp[0])'], {}), '(k * intrinsic_process_temp[0])\n', (1989, 2020), False, 'import numpy\n'), ((2019, 2059), 'numpy.sin', 'numpy.sin', (['(k * intrinsic_process_temp[1])'], {}), '(k * intrinsic_process_temp[1])\n', (2028, 2059), False, 'import numpy\n'), ((4803, 4843), 'numpy.where', 'numpy.where', (['(intrinsic_process[0, :] < 0)'], {}), '(intrinsic_process[0, :] < 0)\n', (4814, 4843), False, 'import numpy\n'), ((9767, 9807), 'numpy.where', 'numpy.where', (['(intrinsic_process[0, :] < 0)'], {}), '(intrinsic_process[0, :] < 0)\n', (9778, 9807), False, 'import numpy\n'), ((10647, 10671), 'numpy.sum', 'numpy.sum', (['dists'], {'axis': '(0)'}), '(dists, axis=0)\n', (10656, 10671), False, 'import numpy\n'), ((3392, 3433), 'numpy.min', 'numpy.min', (['intrinsic_process_temp'], {'axis': '(1)'}), '(intrinsic_process_temp, axis=1)\n', (3401, 3433), False, 'import numpy\n'), ((4442, 4484), 'numpy.mean', 'numpy.mean', (['intrinsic_process_temp'], {'axis': '(1)'}), '(intrinsic_process_temp, axis=1)\n', (4452, 4484), False, 'import numpy\n'), ((7028, 7042), 'numpy.sin', 'numpy.sin', (['deg'], {}), '(deg)\n', (7037, 7042), False, 'import numpy\n'), ((7316, 7357), 'numpy.min', 'numpy.min', (['intrinsic_process_temp'], {'axis': '(1)'}), '(intrinsic_process_temp, axis=1)\n', (7325, 7357), False, 'import numpy\n'), ((6883, 6897), 'numpy.cos', 'numpy.cos', (['deg'], {}), '(deg)\n', (6892, 6897), False, 'import numpy\n'), ((10831, 10854), 'numpy.exp', 'numpy.exp', (['(1.0j * angle)'], {}), '(1.0j * angle)\n', (10840, 10854), False, 'import numpy\n'), ((10851, 10885), 'numpy.exp', 'numpy.exp', (['(1.0j * angles[i_antena])'], {}), '(1.0j * angles[i_antena])\n', (10860, 10885), False, 'import numpy\n')] |
import sys
import numpy as np
from timeit import default_timer as timer
start = None
end = None
A = np.asmatrix(sys.argv[1])
A = A.astype(float)
I = np.identity(A.shape[0], dtype=float)
N = np.concatenate((A, I),axis=1)
start = timer()
# itera as colunas
for c in range(0, A.shape[1] - 1):
#procura coluna pivô não nula
if (N[c,c] == 0.0):
for l in range(c + 1, A.shape[0]):
if (N[l, c] != 0.0):
temp = np.copy(N[c, :])
N[c, :] = np.copy(N[l, :])
N[l, :] = np.copy(temp)
#torna o pivo igual a 1
if (N[c,c] != 1):
N[c, :] = N[c,:]/N[c,c]
#zera os elementos abaixo do pivo
for l in range(c + 1, A.shape[0]):
N[l, :] = N[l, :] - N[l,c] * N[c, :]
#zera os elementos acima do pivo
if (c-1 >= 0):
for l in range(c - 1,-1,-1):
N[l, :] = N[l, :] - N[l, c] * N[c, :]
#print("Matriz A:")
#print(N[:,:A.shape[1]-1])
end = timer()
print("Matriz Inversa:")
print(N[:,A.shape[1]:])
print("\nSolucao encontrada:")
print(N[:,A.shape[1]-1])
print("Tempo de execucao total: %e segundos" % (end - start))
| [
"numpy.identity",
"numpy.copy",
"numpy.asmatrix",
"timeit.default_timer",
"numpy.concatenate"
] | [((102, 126), 'numpy.asmatrix', 'np.asmatrix', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (113, 126), True, 'import numpy as np\n'), ((151, 187), 'numpy.identity', 'np.identity', (['A.shape[0]'], {'dtype': 'float'}), '(A.shape[0], dtype=float)\n', (162, 187), True, 'import numpy as np\n'), ((192, 222), 'numpy.concatenate', 'np.concatenate', (['(A, I)'], {'axis': '(1)'}), '((A, I), axis=1)\n', (206, 222), True, 'import numpy as np\n'), ((231, 238), 'timeit.default_timer', 'timer', ([], {}), '()\n', (236, 238), True, 'from timeit import default_timer as timer\n'), ((951, 958), 'timeit.default_timer', 'timer', ([], {}), '()\n', (956, 958), True, 'from timeit import default_timer as timer\n'), ((450, 466), 'numpy.copy', 'np.copy', (['N[c, :]'], {}), '(N[c, :])\n', (457, 466), True, 'import numpy as np\n'), ((493, 509), 'numpy.copy', 'np.copy', (['N[l, :]'], {}), '(N[l, :])\n', (500, 509), True, 'import numpy as np\n'), ((536, 549), 'numpy.copy', 'np.copy', (['temp'], {}), '(temp)\n', (543, 549), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # NumPy
#
#
# In[32]:
# NumPy is a library for scientific computations in Python.
# Numpy is one of the packages you have to know if you're going to do data science with Python.
# It is a Python library that provides support for large, multidimensional arrays along with masked
# arrays and matrices, and provides extensive functions for performing array manipulation, including
# mathematical, logical, and shape calculations, sorting, selecting, I/O, discrete Fourier transforms,
# linear algebra, basic statistical operations, random simulations, and so on.
# In[3]:
# Importing the Numpy library
import numpy as np
# In[5]:
# Numpy version
np.__version__
# # Function - numpy.array()
# In[6]:
# Function - numpy.array() (Note only most frequently parameters are explain below)
# numpy.array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, like=None)
# Parameters :
# object : array_like
# An array, any object exposing the array interface, an object whose __array__ method returns an array,
# or any (nested) sequence. If object is a scalar, a 0-dimensional array containing object is returned.
# Example - list object [1,2,3] or list of tuple [(1,2,3),(4,5,6)]
# dtype : data-type (optional)
# The desired data-type for the array. If not given, then the type will be determined as the minimum
# type required to hold the objects in the sequence.
# Example - dtype - float or str
# In[7]:
a = np.array([1,2,3])
b = np.array([(1,2,3),(6,7,8)],dtype = float)
c = np.array([[(1,2,3),(4,5,6)],[(7,8,9),(10,11,12)]], dtype= str)
# In[8]:
print(a, " \t" , type(a))
print("\n",b, " \t" , type(b))
print("\n",c, " \t" , type(c))
# # Numpy supports - element-wise operations
# In[9]:
# When a*2 is used, it performs element-wise operations rather than duplicating the content as with lists.
print(a*2)
print(b*2)
# # Function required for inspecting the Numpy Array
# - a.shape
# - a.ndim
# - a.size
# - a.dtype
# - a.dtype.name
# - a.astype(float)
# In[10]:
# a.shape - Tuple of array dimensions.
## The shape property is usually used to get the current shape of an array
## Example - (3,) - What is the magnitude of each dimension.
c.shape
# In[11]:
# a.ndim - Dimension of the array (1d,2d or nth array)
## Example - a.ndim - Array of dimension 1 (1d).
c.ndim
# In[12]:
# a.size - Number of elements in the array.
## Equal to np.prod(a.shape), i.e., the product of the array’s dimensions.
## Example c.size - shape of c is (2,2,3) = 2*2*3 = 12
c.size
# In[13]:
# a.dtype - Data-type of the array’s elements.
## Example a.dtype - dtype('int32') and b.dtype - dtype('float64')
b.dtype
# In[14]:
# a.dtype.name - A bit-width name for this data-type.
## Un-sized flexible data-type objects do not have this attribute.
## Example x = np.dtype(float) x.name is float64
x = np.dtype(float)
y = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
print(x.name,y.name)
# In[15]:
# a.astype(float)
## Convert the array's datatype according to the parameter given
## b.astype - When creating the array b, we used float, and then used astype to convert to integer.
b.astype(int)
# # Different methods to Initialize the numpy array
# ## numpy.arange
# In[16]:
# numpy.arange([start, ]stop, [step, ]dtype=None, *, like=None)
## Return evenly spaced values within a given interval.
## Values are generated within the half-open interval [start, stop)
## (In other words, the interval including start but excluding stop).
## For integer arguments the function is equivalent to the Python built-in range function,
## but returns an ndarray rather than a list.
print(np.arange(0,10,2,dtype = int))
print(np.arange(0,10,0.5,dtype=float))
# ## numpy.linspace
# In[17]:
# numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0)[source]
## Return evenly spaced numbers over a specified interval.
## Returns num evenly spaced samples, calculated over the interval [start, stop].
## The endpoint of the interval can optionally be excluded.
print(np.linspace(2.0, 3.0, num=5))
print(np.linspace(2.0, 3.0, num=5, endpoint=False))
print(np.linspace(2.0, 3.0, num=5, retstep=True))
# ## Difference between the linspace and arange.
# In[18]:
# Difference between the linspace and arange.
## arange allow you to define the size of the step. linspace allow you to define the number of steps.
## Example where arange might fail are -
print(" Using arange ",np.arange(0, 5, 0.5, dtype=int))
print(" Using arange ",np.arange(-3, 3, 0.5, dtype=int))
print(" Using Linspace ", np.linspace(0, 5, num = 5))
print(" Using Linspace ",np.linspace(-3, 3,num = 5))
# ## Difference between numpy Array and Lists
# |Numpy Array|List|
# |-----|-------|
# |Numpy data structures consume less memory|List take more memory than numpy array|
# |Numpy are faster |Lists are slower as compared to numpy array|
# |NumPy have optimized functions such as linear algebra operations built in||
# |Element wise operation is possible||
# |Array are by default Homogeneous, which means data inside an array must be of the same Datatype.|A list can store different data types|
# ||A list can consist of different nested data size|
# |We can create a N-dimensional array in python using numpy.array().||
# ||A list is easier to modify|
# |Array can handle mathematical operations|A list cannot directly handle a mathematical operations|
# ## numpy.zeros
# In[25]:
# numpy.zeros(shape, dtype=float, order='C', *, like=None)
## Return (ndarray) a new array of given shape and type, filled with zeros.
arr_1d_zeros = np.zeros(5)
arr_2d_zeros = np.zeros((2,5),dtype="int64")
arr_3d_zeros = np.zeros((2,3,4),dtype = int)
# In[31]:
print("\n 1D Array\n",arr_1d_zeros)
print("\n 2D Array\n", arr_2d_zeros)
print("\n 3D Array\n",arr_3d_zeros)
# ## numpy.ones
# In[32]:
# numpy.ones(shape, dtype=None, order='C', *, like=None)
## Return a new array of given shape and type, filled with ones.
arr_1d_ones = np.ones(5)
arr_2d_ones = np.ones((2,5),dtype="int64")
arr_3d_ones = np.ones((2,3,4),dtype = int)
# In[33]:
print("\n 1D Array\n",arr_1d_ones)
print("\n 2D Array\n", arr_2d_ones)
print("\n 3D Array\n",arr_3d_ones)
# ## numpy.full
# In[35]:
# numpy.full(shape, fill_value, dtype=None, order='C', *, like=None)
## Return a new array of given shape and type, filled with fill_value.
arr_1d_full = np.full(2, np.inf)
arr_2d_full = np.full((2, 2), 5)
arr_3d_full = np.full((2, 2,2), [1, 2])
# In[36]:
print("\n 1D Array\n",arr_1d_full)
print("\n 2D Array\n", arr_2d_full)
print("\n 3D Array\n",arr_3d_full)
# ## numpy.eye
# In[37]:
# numpy.eye(N, M=None, k=0, dtype=<class 'float'>, order='C', *, like=None)
## Return a 2-D array with ones on the diagonal and zeros elsewhere
arr_2d_diag0 = np.eye(2, dtype=int)
arr_2d_diag1 = np.eye(3, k=1)
# In[39]:
print("\n 2D Array\n", arr_2d_diag0)
print("\n 2D Array\n", arr_2d_diag1)
# ## random.random
# In[40]:
# random.random(size=None)
## Return random floats in the half-open interval [0.0, 1.0).
## Alias for random_sample to ease forward-porting to the new random API.
np.random.random()
# # Array Manipulation
#
# ## numpy.transpose
# In[55]:
# numpy.transpose(a, axes=None)
# Reverse or permute the axes of an array; returns the modified array.
# Parameters | aarray_like | Input array.
# axes | tuple or list of ints, optional
# If specified, it must be a tuple or list which contains a permutation of [0,1,..,N-1] where N is the
# number of axes of a. The i’th axis of the returned array will correspond to the axis numbered
# axes[i] of the input. If not specified, defaults to range(a.ndim)[::-1], which reverses the order
# of the axes.
x = np.arange(4).reshape((2,2))
print(x)
np.transpose(x)
# In[59]:
x = np.ones((1, 2, 3))
print("Original Array\n",x)
print("\n After Transpose \n",np.transpose(x, (1, 0, 2)))
# ## numpy.vstack
# In[44]:
# numpy.vstack(tup)
# Stack arrays in sequence vertically (row wise).
# Parameters | tup | sequence of ndarrays
# The arrays must have the same shape along all but the first axis.
# 1-D arrays must have the same length.
# Returns | stacked | ndarray
# The array formed by stacking the given arrays, will be at least 2-D.
top_stack = np.linspace(0, 3, 4).reshape(2,2)
bottom_stack = np.linspace(5, 8, 4).reshape(2,2)
vstack = np.vstack((top_stack,bottom_right))
# In[47]:
print("Array \n",top_stack)
print("\nArray \n",bottom_right)
print("\nMerged Array \n",vstack)
# ## numpy.hstack
# In[48]:
# numpy.hstack(tup)
# Stack arrays in sequence horizontally (column wise).
# Parameters | tup | sequence of ndarrays
# The arrays must have the same shape along all but the second axis,
#except 1-D arrays which can be any length.
# Returns | stacked | ndarray
# The array formed by stacking the given arrays.
left_stack = np.linspace(0, 3, 4).reshape(2,2)
right_stack = np.linspace(5, 8, 4).reshape(2,2)
hstack = np.hstack((left_stack,right_stack))
# In[49]:
print("Array \n",left_stack)
print("\nArray \n",right_stack)
print("\nMerged Array \n",hstack)
# ## numpy.concatenate
# In[51]:
# numpy.concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
# Join a sequence of arrays along an existing axis
# Parameters | a1, a2, …sequence of array_like
# The arrays must have the same shape, except in the dimension corresponding to axis
# (the first, by default).
# axis | int, optional
# The axis along which the arrays will be joined. If axis is None, arrays are flattened before use.
# Default is 0.
# Returns | res | ndarray | he concatenated array.
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
np.concatenate((a, b), axis=0)
# In[52]:
np.concatenate((a, b.T), axis=1)
# In[53]:
np.concatenate((a, b), axis=None)
# In[ ]:
| [
"numpy.eye",
"numpy.ones",
"numpy.hstack",
"numpy.random.random",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"numpy.full",
"numpy.dtype",
"numpy.transpose",
"numpy.arange"
] | [((1485, 1504), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1493, 1504), True, 'import numpy as np\n'), ((1507, 1552), 'numpy.array', 'np.array', (['[(1, 2, 3), (6, 7, 8)]'], {'dtype': 'float'}), '([(1, 2, 3), (6, 7, 8)], dtype=float)\n', (1515, 1552), True, 'import numpy as np\n'), ((1553, 1625), 'numpy.array', 'np.array', (['[[(1, 2, 3), (4, 5, 6)], [(7, 8, 9), (10, 11, 12)]]'], {'dtype': 'str'}), '([[(1, 2, 3), (4, 5, 6)], [(7, 8, 9), (10, 11, 12)]], dtype=str)\n', (1561, 1625), True, 'import numpy as np\n'), ((2897, 2912), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (2905, 2912), True, 'import numpy as np\n'), ((2917, 2969), 'numpy.dtype', 'np.dtype', (["[('a', np.int32, 8), ('b', np.float64, 6)]"], {}), "([('a', np.int32, 8), ('b', np.float64, 6)])\n", (2925, 2969), True, 'import numpy as np\n'), ((5646, 5657), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (5654, 5657), True, 'import numpy as np\n'), ((5673, 5704), 'numpy.zeros', 'np.zeros', (['(2, 5)'], {'dtype': '"""int64"""'}), "((2, 5), dtype='int64')\n", (5681, 5704), True, 'import numpy as np\n'), ((5718, 5748), 'numpy.zeros', 'np.zeros', (['(2, 3, 4)'], {'dtype': 'int'}), '((2, 3, 4), dtype=int)\n', (5726, 5748), True, 'import numpy as np\n'), ((6038, 6048), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (6045, 6048), True, 'import numpy as np\n'), ((6063, 6093), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': '"""int64"""'}), "((2, 5), dtype='int64')\n", (6070, 6093), True, 'import numpy as np\n'), ((6106, 6135), 'numpy.ones', 'np.ones', (['(2, 3, 4)'], {'dtype': 'int'}), '((2, 3, 4), dtype=int)\n', (6113, 6135), True, 'import numpy as np\n'), ((6440, 6458), 'numpy.full', 'np.full', (['(2)', 'np.inf'], {}), '(2, np.inf)\n', (6447, 6458), True, 'import numpy as np\n'), ((6473, 6491), 'numpy.full', 'np.full', (['(2, 2)', '(5)'], {}), '((2, 2), 5)\n', (6480, 6491), True, 'import numpy as np\n'), ((6506, 6532), 'numpy.full', 'np.full', (['(2, 2, 2)', '[1, 2]'], {}), '((2, 2, 2), [1, 2])\n', (6513, 6532), True, 'import numpy as np\n'), ((6841, 6861), 'numpy.eye', 'np.eye', (['(2)'], {'dtype': 'int'}), '(2, dtype=int)\n', (6847, 6861), True, 'import numpy as np\n'), ((6877, 6891), 'numpy.eye', 'np.eye', (['(3)'], {'k': '(1)'}), '(3, k=1)\n', (6883, 6891), True, 'import numpy as np\n'), ((7178, 7196), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7194, 7196), True, 'import numpy as np\n'), ((7805, 7820), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (7817, 7820), True, 'import numpy as np\n'), ((7839, 7857), 'numpy.ones', 'np.ones', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (7846, 7857), True, 'import numpy as np\n'), ((8405, 8441), 'numpy.vstack', 'np.vstack', (['(top_stack, bottom_right)'], {}), '((top_stack, bottom_right))\n', (8414, 8441), True, 'import numpy as np\n'), ((8998, 9034), 'numpy.hstack', 'np.hstack', (['(left_stack, right_stack)'], {}), '((left_stack, right_stack))\n', (9007, 9034), True, 'import numpy as np\n'), ((9677, 9703), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (9685, 9703), True, 'import numpy as np\n'), ((9708, 9726), 'numpy.array', 'np.array', (['[[5, 6]]'], {}), '([[5, 6]])\n', (9716, 9726), True, 'import numpy as np\n'), ((9727, 9757), 'numpy.concatenate', 'np.concatenate', (['(a, b)'], {'axis': '(0)'}), '((a, b), axis=0)\n', (9741, 9757), True, 'import numpy as np\n'), ((9772, 9804), 'numpy.concatenate', 'np.concatenate', (['(a, b.T)'], {'axis': '(1)'}), '((a, b.T), axis=1)\n', (9786, 9804), True, 'import numpy as np\n'), ((9819, 9852), 'numpy.concatenate', 'np.concatenate', (['(a, b)'], {'axis': 'None'}), '((a, b), axis=None)\n', (9833, 9852), True, 'import numpy as np\n'), ((3692, 3722), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(2)'], {'dtype': 'int'}), '(0, 10, 2, dtype=int)\n', (3701, 3722), True, 'import numpy as np\n'), ((3729, 3763), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.5)'], {'dtype': 'float'}), '(0, 10, 0.5, dtype=float)\n', (3738, 3763), True, 'import numpy as np\n'), ((4100, 4128), 'numpy.linspace', 'np.linspace', (['(2.0)', '(3.0)'], {'num': '(5)'}), '(2.0, 3.0, num=5)\n', (4111, 4128), True, 'import numpy as np\n'), ((4136, 4180), 'numpy.linspace', 'np.linspace', (['(2.0)', '(3.0)'], {'num': '(5)', 'endpoint': '(False)'}), '(2.0, 3.0, num=5, endpoint=False)\n', (4147, 4180), True, 'import numpy as np\n'), ((4188, 4230), 'numpy.linspace', 'np.linspace', (['(2.0)', '(3.0)'], {'num': '(5)', 'retstep': '(True)'}), '(2.0, 3.0, num=5, retstep=True)\n', (4199, 4230), True, 'import numpy as np\n'), ((4509, 4540), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(0.5)'], {'dtype': 'int'}), '(0, 5, 0.5, dtype=int)\n', (4518, 4540), True, 'import numpy as np\n'), ((4565, 4597), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.5)'], {'dtype': 'int'}), '(-3, 3, 0.5, dtype=int)\n', (4574, 4597), True, 'import numpy as np\n'), ((4626, 4650), 'numpy.linspace', 'np.linspace', (['(0)', '(5)'], {'num': '(5)'}), '(0, 5, num=5)\n', (4637, 4650), True, 'import numpy as np\n'), ((4679, 4704), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)'], {'num': '(5)'}), '(-3, 3, num=5)\n', (4690, 4704), True, 'import numpy as np\n'), ((7916, 7942), 'numpy.transpose', 'np.transpose', (['x', '(1, 0, 2)'], {}), '(x, (1, 0, 2))\n', (7928, 7942), True, 'import numpy as np\n'), ((7768, 7780), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (7777, 7780), True, 'import numpy as np\n'), ((8313, 8333), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(4)'], {}), '(0, 3, 4)\n', (8324, 8333), True, 'import numpy as np\n'), ((8362, 8382), 'numpy.linspace', 'np.linspace', (['(5)', '(8)', '(4)'], {}), '(5, 8, 4)\n', (8373, 8382), True, 'import numpy as np\n'), ((8907, 8927), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(4)'], {}), '(0, 3, 4)\n', (8918, 8927), True, 'import numpy as np\n'), ((8955, 8975), 'numpy.linspace', 'np.linspace', (['(5)', '(8)', '(4)'], {}), '(5, 8, 4)\n', (8966, 8975), True, 'import numpy as np\n')] |
"""
Created on 2018-10-29
@author: <NAME>
<EMAIL>
"""
import copy
import networkx as nx
import torch.nn as nn
import numpy as np
from nord.neural_nets import NeuralDescriptor
from nord.neural_nets.layers import Identity, ScaleLayer
from nord.utils import get_random_value
from .chromosome import Chromosome
from .gene import ConnectionGene, LayerGene
INPUT = -2
OUTPUT = -1
class Genome(object):
def __init__(self, layer_bounds_types, layer_bounds,
add_node_rate, add_connection_rate,
mutation_rate, innovation=None):
self.connections = Chromosome()
self.nodes = Chromosome()
self.layer_bounds = layer_bounds
self.layer_bounds_types = layer_bounds_types
self.add_node_rate = add_node_rate
self.add_connection_rate = add_connection_rate
self.mutation_rate = mutation_rate
# Add initial structure
start_node = LayerGene(None, None, io_node=True)
end_node = LayerGene(None, None, io_node=True)
start_node.innovation_number = INPUT
end_node.innovation_number = OUTPUT
self.nodes.add_gene(start_node)
self.nodes.add_gene(end_node)
connection_node = ConnectionGene(
start_node.innovation_number, end_node.innovation_number)
if innovation is not None:
innovation.assign_number(connection_node)
self.connections.add_gene(connection_node)
self.innovation = innovation
def mutate(self):
r = get_random_value()
if r < self.add_node_rate:
g = np.random.choice(list(self.connections.genes.keys()))
gene = self.connections.genes[g]
start_node, end_node = gene.value
gene.enabled = False
new_node = LayerGene(self.layer_bounds_types, self.layer_bounds)
self.innovation.assign_number(new_node)
new_start = ConnectionGene(
start_node, new_node.innovation_number)
self.innovation.assign_number(new_start)
new_end = ConnectionGene(
new_node.innovation_number, end_node)
self.innovation.assign_number(new_end)
self.connections.add_gene(new_start)
self.connections.add_gene(new_end)
self.nodes.add_gene(new_node)
elif r <= self.add_node_rate + self.add_connection_rate:
nodes = list(self.nodes.genes)
nodes.remove(INPUT)
nodes.remove(OUTPUT)
start_node = np.random.choice(nodes+[INPUT])
end_node = np.random.choice(nodes+[OUTPUT])
new_node = ConnectionGene(start_node, end_node)
if new_node not in self.connections.genes.values():
self.innovation.assign_number(new_node)
self.connections.add_gene(new_node)
self.connections.mutate(self.mutation_rate)
self.nodes.mutate(self.mutation_rate)
def crossover(self, other):
new = copy.deepcopy(self)
new.connections = new.connections.crossover(other.connections)
new.nodes = new.nodes.crossover(other.nodes)
return new
def __repr__(self):
return str({'Connections': self.connections, 'Nodes': self.nodes})
def __hash__(self):
return hash(self.connections) + hash(self.nodes)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Genome):
if not (len(self.nodes.genes) == len(other.nodes.genes) and
len(self.connections.genes) == len(other.connections.genes)):
return False
for i in self.nodes.genes:
if not self.nodes.genes[i] == other.nodes.genes[i]:
return False
for i in self.connections.genes:
if not self.connections.genes[i] == other.connections.genes[i]:
return False
return True
@staticmethod
def __from_repr__(rpr):
import ast
g = Genome([int, float, float, int, bool],
[[32, 0.0, 0, 1, 0],
[256, 0.7, 2.0, 3, 1]], [], [],
0.1, 0.1, 0.1, None)
rpr = ast.literal_eval(rpr)
connections = rpr['Connections']
for innovation in connections:
g.connections.genes[innovation] = ConnectionGene.__from_repr__(
connections[innovation])
g.connections.index.append(innovation)
g.connections.genes.pop(None)
nodes = rpr['Nodes']
for innovation in nodes:
g.nodes.genes[innovation] = LayerGene.__from_repr__(
nodes[innovation])
g.nodes.index.append(innovation)
return g
def to_descriptor(self, dimensions=2):
self.active_nodes = 0
descriptor = NeuralDescriptor()
actives = set()
self.actives = set()
# Get only active nodes
for p in self.get_direct_paths():
for n in p:
actives.add(n)
# First add the nodes themselves
for g in self.nodes.genes:
gene = self.nodes.genes[g]
# Don't add inactive nodes
if gene.innovation_number in actives and gene.enabled:
if not gene.io:
self.active_nodes += 1
self.actives.add(str(gene.value))
# Get the node's name (innovation number)
innv = str(gene.innovation_number)
# Get the parameters
parameters = gene.value
filter_no = int(parameters[0])
dropout_rate = float(parameters[1])
weight_scale = float(parameters[2])
kernel_size = int(parameters[3])
max_pool = True if int(parameters[4]) == 1 else False
out_channels = filter_no
# --Define the layers and parameters--
# Convolution layer
conv_layer = nn.Conv2d
if dimensions == 1:
conv_layer = nn.Conv1d
conv_parameters = {'in_channels': 1000,
'out_channels': out_channels,
'kernel_size': kernel_size}
descriptor.add_layer(
conv_layer, conv_parameters, name=innv+'in')
# Scale the weights
descriptor.add_layer_sequential(
ScaleLayer, {'scale': weight_scale}, name=innv+'scale')
# Dropout layer
if dimensions == 2:
dout = nn.Dropout2d
else:
dout = nn.Dropout
dout_parameters = {'p': dropout_rate}
descriptor.add_layer_sequential(
dout, dout_parameters, name=innv+'dout')
# Max pool layer
if max_pool:
pool = nn.MaxPool2d
if dimensions == 1:
pool = nn.MaxPool1d
pool_parameters = {'kernel_size': kernel_size,
'stride': kernel_size}
descriptor.add_layer_sequential(
pool, pool_parameters, name=innv+'pool')
# Activation layer
descriptor.add_layer_sequential(
nn.ReLU6, {}, name=innv+'out')
# Add IO layers
descriptor.add_layer(Identity, {}, name='-2out')
descriptor.add_layer(Identity, {}, name='-1in')
descriptor.first_layer = '-2out'
descriptor.last_layer = '-1in'
# Connect the layers
for g in self.connections.genes:
gene = self.connections.genes[g]
from_, to_ = gene.value
# Connect all active
if gene.enabled:
# Only connecitons from/to active nodes should be added
if from_ in actives and to_ in actives:
last_out = str(from_)+'out'
descriptor.connect_layers(last_out, str(to_)+'in')
return descriptor
def plot(self):
import matplotlib.pyplot as plt
def my_layout(G, paths):
nodes = G.nodes
lengths = [-len(x) for x in paths]
sorted_ = np.argsort(lengths)
positions = dict()
h = 0
w = 0
for index in sorted_:
h = 0
added = False
path = paths[index]
for node in path:
if node not in positions:
positions[node] = (w, h)
added = True
h -= 1
else:
if h > positions[node][1]:
h = positions[node][1]
if added:
if w >= 0:
w += 1
w *= -1
h = 0
for node in nodes:
if node not in positions:
positions[node] = (w, h)
h -= 1
if -1 in positions:
positions[-1] = (positions[-1][0], positions[-1][1]-1)
if -2 in positions:
positions[-2] = (positions[-2][0], positions[-2][1]+1)
return positions
G = self.to_networkx()
plt.figure()
in_path = self.get_direct_paths()
# pos = graphviz_layout(G, root='-2')
pos = my_layout(G, in_path)
nx.draw(G, pos=pos, node_color='b', with_labels=True)
nodes = set()
for p in in_path:
for node in p:
nodes.add(node)
nx.draw_networkx_nodes(G, pos=pos,
node_color='r',
nodelist=list(nodes),
with_labels=True)
plt.show()
def to_networkx(self, active_only=True):
G = nx.DiGraph()
for g in self.connections.genes:
gene = self.connections.genes[g]
if gene.enabled or not active_only:
G.add_edge(*gene.value)
return G
def get_direct_paths(self):
G = self.to_networkx()
try:
paths = nx.all_simple_paths(G, INPUT, OUTPUT)
except nx.NodeNotFound:
paths = [[]]
return [p for p in paths]
def get_recursions(self):
G = self.to_networkx()
cycles = nx.simple_cycles(G)
return [c for c in cycles]
def get_incoming_layers(self):
G = self.to_networkx()
incoming = dict()
edges = G.edges()
for edge in edges:
from_ = edge[0]
to_ = edge[1]
if to_ in incoming:
incoming[to_].append(from_)
else:
incoming[to_] = [from_]
return incoming
def get_connection_ratio(self):
G = self.to_networkx()
p = len(self.get_direct_paths())
r = (p**2)/G.number_of_nodes()
return r
def remove_recursions(self):
recs = self.get_recursions()
recs.sort(key=len)
edges = set()
for rec in recs:
if len(rec) == 1:
start = rec[0]
end = rec[0]
edges.add((start, end))
else:
for i in range(1, len(rec)):
start = rec[i-1]
end = rec[i]
edges.add((start, end))
for c in self.connections.genes:
if self.connections.genes[c].value in edges:
self.connections.genes[c].enabled = False
| [
"matplotlib.pyplot.show",
"numpy.random.choice",
"networkx.DiGraph",
"networkx.all_simple_paths",
"networkx.simple_cycles",
"ast.literal_eval",
"numpy.argsort",
"nord.utils.get_random_value",
"matplotlib.pyplot.figure",
"copy.deepcopy",
"networkx.draw",
"nord.neural_nets.NeuralDescriptor"
] | [((1577, 1595), 'nord.utils.get_random_value', 'get_random_value', ([], {}), '()\n', (1593, 1595), False, 'from nord.utils import get_random_value\n'), ((3098, 3117), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3111, 3117), False, 'import copy\n'), ((4370, 4391), 'ast.literal_eval', 'ast.literal_eval', (['rpr'], {}), '(rpr)\n', (4386, 4391), False, 'import ast\n'), ((5021, 5039), 'nord.neural_nets.NeuralDescriptor', 'NeuralDescriptor', ([], {}), '()\n', (5037, 5039), False, 'from nord.neural_nets import NeuralDescriptor\n'), ((9936, 9948), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9946, 9948), True, 'import matplotlib.pyplot as plt\n'), ((10085, 10138), 'networkx.draw', 'nx.draw', (['G'], {'pos': 'pos', 'node_color': '"""b"""', 'with_labels': '(True)'}), "(G, pos=pos, node_color='b', with_labels=True)\n", (10092, 10138), True, 'import networkx as nx\n'), ((10457, 10467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10465, 10467), True, 'import matplotlib.pyplot as plt\n'), ((10531, 10543), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10541, 10543), True, 'import networkx as nx\n'), ((11059, 11078), 'networkx.simple_cycles', 'nx.simple_cycles', (['G'], {}), '(G)\n', (11075, 11078), True, 'import networkx as nx\n'), ((8806, 8825), 'numpy.argsort', 'np.argsort', (['lengths'], {}), '(lengths)\n', (8816, 8825), True, 'import numpy as np\n'), ((10844, 10881), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['G', 'INPUT', 'OUTPUT'], {}), '(G, INPUT, OUTPUT)\n', (10863, 10881), True, 'import networkx as nx\n'), ((2617, 2650), 'numpy.random.choice', 'np.random.choice', (['(nodes + [INPUT])'], {}), '(nodes + [INPUT])\n', (2633, 2650), True, 'import numpy as np\n'), ((2673, 2707), 'numpy.random.choice', 'np.random.choice', (['(nodes + [OUTPUT])'], {}), '(nodes + [OUTPUT])\n', (2689, 2707), True, 'import numpy as np\n')] |
# --------------
# Code starts here
import numpy as np
# Code starts here
# Adjacency matrix
adj_mat = np.array([[0,0,0,0,0,0,1/3,0],
[1/2,0,1/2,1/3,0,0,0,0],
[1/2,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,1/2,1/3,0,0,1/3,0],
[0,0,0,1/3,1/3,0,0,1/2],
[0,0,0,0,1/3,0,0,1/2],
[0,0,0,0,1/3,1,1/3,0]])
# Compute eigenvalues and eigencevectrs
eigenvalues,eigencevectors=np.linalg.eig(adj_mat)
# Eigen vector corresponding to 1
eigen_1=abs(eigencevectors[:,0])/(np.linalg.norm(eigencevectors[:,0],1))
print(eigen_1)
# most important page
page=int(np.where(eigen_1 == eigen_1.max())[0])+1
#page=eigen_1.where()
print(page)
# Code ends here
# --------------
# Code starts here
# Initialize stationary vector I
init_I=np.array([1,0,0,0,0,0,0,0]);
print(init_I)
for i in range(10):
init_I=abs(np.dot(adj_mat, init_I))/(np.linalg.norm(init_I,1))
print(init_I)
power_page = np.where(np.max(init_I) == init_I)[0][0] + 1
print(power_page)
# Perform iterations for power method
# Code ends here
# --------------
# Code starts here
# New Adjancency matrix
# New Adjancency matrix
new_adj_mat = np.array([[0,0,0,0,0,0,0,0],
[1/2,0,1/2,1/3,0,0,0,0],
[1/2,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,1/2,1/3,0,0,1/2,0],
[0,0,0,1/3,1/3,0,0,1/2],
[0,0,0,0,1/3,0,0,1/2],
[0,0,0,0,1/3,1,1/2,0]])
# Initialize stationary vector I
new_init_I =np.array([1,0,0,0,0,0,0,0]);
# Perform iterations for power method
for i in range(10):
new_init_I=abs(np.dot(new_adj_mat, new_init_I))/(np.linalg.norm(new_init_I,1))
print(new_init_I)
# Code ends here
# --------------
# Alpha value
alpha = 0.85
# Code starts here
# Modified adjancency matrix
n=len(new_adj_mat)
l=np.ones(new_adj_mat.shape).astype(float)
G=np.dot(alpha,new_adj_mat)+np.dot((1-alpha)*(1/n),l)
# Initialize stationary vector I
final_init_I=np.array([1,0,0,0,0,0,0,0])
# Perform iterations for power method
for i in range(1000):
final_init_I=abs(np.dot(G, final_init_I))/(np.linalg.norm(final_init_I,1))
print(final_init_I)
# Code ends here
| [
"numpy.ones",
"numpy.linalg.eig",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
] | [((106, 397), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 1 / 3, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [1 / 2,\n 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 / 3, 0,\n 0, 1 / 3, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3,\n 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 3, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 1 / 3, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0\n ], [1 / 2, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2,\n 1 / 3, 0, 0, 1 / 3, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0,\n 0, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 3, 0]])\n', (114, 397), True, 'import numpy as np\n'), ((499, 521), 'numpy.linalg.eig', 'np.linalg.eig', (['adj_mat'], {}), '(adj_mat)\n', (512, 521), True, 'import numpy as np\n'), ((848, 882), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0])\n', (856, 882), True, 'import numpy as np\n'), ((1229, 1517), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [1 / 2, 0,\n 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 / 3, 0, 0,\n 1 / 2, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 0,\n 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 2, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [1 / 2, 0, 1 / 2, 1 / 3, 0, 0, 0, 0], [\n 1 / 2, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1 / 2, 1 /\n 3, 0, 0, 1 / 2, 0], [0, 0, 0, 1 / 3, 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, \n 1 / 3, 0, 0, 1 / 2], [0, 0, 0, 0, 1 / 3, 1, 1 / 2, 0]])\n', (1237, 1517), True, 'import numpy as np\n'), ((1598, 1632), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0])\n', (1606, 1632), True, 'import numpy as np\n'), ((2065, 2099), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0])\n', (2073, 2099), True, 'import numpy as np\n'), ((591, 630), 'numpy.linalg.norm', 'np.linalg.norm', (['eigencevectors[:, 0]', '(1)'], {}), '(eigencevectors[:, 0], 1)\n', (605, 630), True, 'import numpy as np\n'), ((1967, 1993), 'numpy.dot', 'np.dot', (['alpha', 'new_adj_mat'], {}), '(alpha, new_adj_mat)\n', (1973, 1993), True, 'import numpy as np\n'), ((1993, 2025), 'numpy.dot', 'np.dot', (['((1 - alpha) * (1 / n))', 'l'], {}), '((1 - alpha) * (1 / n), l)\n', (1999, 2025), True, 'import numpy as np\n'), ((952, 977), 'numpy.linalg.norm', 'np.linalg.norm', (['init_I', '(1)'], {}), '(init_I, 1)\n', (966, 977), True, 'import numpy as np\n'), ((1739, 1768), 'numpy.linalg.norm', 'np.linalg.norm', (['new_init_I', '(1)'], {}), '(new_init_I, 1)\n', (1753, 1768), True, 'import numpy as np\n'), ((1924, 1950), 'numpy.ones', 'np.ones', (['new_adj_mat.shape'], {}), '(new_adj_mat.shape)\n', (1931, 1950), True, 'import numpy as np\n'), ((2200, 2231), 'numpy.linalg.norm', 'np.linalg.norm', (['final_init_I', '(1)'], {}), '(final_init_I, 1)\n', (2214, 2231), True, 'import numpy as np\n'), ((926, 949), 'numpy.dot', 'np.dot', (['adj_mat', 'init_I'], {}), '(adj_mat, init_I)\n', (932, 949), True, 'import numpy as np\n'), ((1705, 1736), 'numpy.dot', 'np.dot', (['new_adj_mat', 'new_init_I'], {}), '(new_adj_mat, new_init_I)\n', (1711, 1736), True, 'import numpy as np\n'), ((2174, 2197), 'numpy.dot', 'np.dot', (['G', 'final_init_I'], {}), '(G, final_init_I)\n', (2180, 2197), True, 'import numpy as np\n'), ((1014, 1028), 'numpy.max', 'np.max', (['init_I'], {}), '(init_I)\n', (1020, 1028), True, 'import numpy as np\n')] |
import numpy as np
from ..base import Parameter
from .optimizer import BaseOptimizer
from benderopt.utils import logb
from .random import RandomOptimizer
class ParzenEstimator(BaseOptimizer):
""" Parzen Estimator
This estimator is largely inspired from TPE and hyperopt.
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
gamma: ratio of best observations to build lowest loss function
number_of_candidates: number of candidates to draw at each iteration
subsampling: number of observations to consider at max
subsampling_type: how to drow observations if number_of_observations > subsampling
prior_weight: weight of prior when building posterior parameters
minimum_observations: params will be drawn at random until minimum_observations is reached
"""
def __init__(
self,
optimization_problem,
gamma=0.15,
number_of_candidates=100,
subsampling=100,
subsampling_type="random",
prior_weight=0.05,
minimum_observations=20,
):
super(ParzenEstimator, self).__init__(optimization_problem)
self.gamma = gamma
self.number_of_candidates = number_of_candidates
self.subsampling = subsampling
self.subsampling_type = subsampling_type
self.prior_weight = prior_weight
self.minimum_observations = minimum_observations
def _generate_samples(self, size, debug=False):
assert size < int(self.number_of_candidates / 3)
# 0. If not enough observations, draw at random
if self.optimization_problem.number_of_observations < self.minimum_observations:
samples = RandomOptimizer(self.optimization_problem)._generate_samples(size)
if debug:
return samples, None, None
return samples
# 0. Retrieve self.gamma % best observations (lowest loss) observations_l
# and worst obervations (greatest loss g) observations_g
observations_l, observations_g = self.optimization_problem.observations_quantile(
self.gamma,
subsampling=min(len(self.observations), self.subsampling),
subsampling_type=self.subsampling_type,
)
# 1. Build by drawing a value for each parameter according to parzen estimation
samples = [{} for _ in range(size)]
posterior_parameters_l = []
posterior_parameters_g = []
for parameter in self.parameters:
# 1.a Build empirical distribution of good observations and bad obsevations
posterior_parameter_l = self._build_posterior_parameter(parameter, observations_l)
posterior_parameters_l.append(posterior_parameter_l)
posterior_parameter_g = self._build_posterior_parameter(parameter, observations_g)
posterior_parameters_g.append(posterior_parameter_g)
# 1.b Draw candidates from observations_l
candidates = np.array(
[
x[parameter.name]
for x in RandomOptimizer(self.optimization_problem).suggest(
self.number_of_candidates
)
]
)
# 1.c Evaluate cantidates score according to g / l taking care of zero division
scores = posterior_parameter_g.pdf(candidates) / np.clip(
posterior_parameter_l.pdf(candidates), a_min=1e-16, a_max=None
)
# Sort candidate and choose best
sorted_candidates = candidates[np.argsort(scores)][: int(self.number_of_candidates / 3)]
selected_candidates = np.random.choice(sorted_candidates, size=size, replace=False)
for i in range(size):
samples[i][parameter.name] = selected_candidates[i]
if debug:
return samples, posterior_parameters_l, posterior_parameters_g
return samples
def _build_posterior_parameter(self, parameter, observations):
"""Retrieve observed value for eache parameter."""
observed_values, observed_weights = zip(
*[
(observation.sample[parameter.name], observation.weight)
for observation in observations
]
)
return parzen_estimator_build_posterior_parameter[parameter.category](
observed_values=observed_values,
observed_weights=observed_weights,
parameter=parameter,
prior_weight=self.prior_weight,
)
def build_posterior_categorical(observed_values, observed_weights, parameter, prior_weight):
"""Posterior for categorical parameters.
observed_probabilities are the weighted count of each possible value.
posterior_probabilities are the weighted sum of prior (initial search space).
TODO Compare mean (current implem) vs hyperopt approach."""
posterior_parameter = None
prior_probabilities = np.array(parameter.search_space["probabilities"])
values = parameter.search_space["values"]
sum_observed_weights = sum(observed_weights)
if sum_observed_weights != 0:
observed_probabilities = np.array(
[
sum(
[
observed_weight
for observed_value, observed_weight in zip(
observed_values, observed_weights
)
if observed_value == value
]
)
/ sum_observed_weights
for value in values
]
)
posterior_probabilities = prior_probabilities * prior_weight + observed_probabilities * (
1 - prior_weight
)
# Numerical safety to always have sum = 1
posterior_probabilities /= sum(posterior_probabilities)
# Build param
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "categorical",
"search_space": {"values": values, "probabilities": list(posterior_probabilities),},
}
)
return posterior_parameter
def find_sigmas_mus(observed_mus, prior_mu, prior_sigma, low, high):
"""TODO when multiple values for prior index ??"""
# Mus
unsorted_mus = np.array(list(observed_mus)[:] + [prior_mu])
index = np.argsort(unsorted_mus)
mus = unsorted_mus[index]
# Sigmas
# Trick to get for each mu the greater distance from left and right neighbor
# when low and high are not defined we use inf to get the only available distance
# (right neighbor for sigmas[0] and left for sigmas[-1])
tmp = np.concatenate(
([low if low != -np.inf else np.inf], mus, [high if high != np.inf else -np.inf],)
)
sigmas = np.maximum(tmp[1:-1] - tmp[0:-2], tmp[2:] - tmp[1:-1])
# Use formulas from hyperopt to clip sigmas
sigma_max_value = prior_sigma
sigma_min_value = prior_sigma / min(100.0, (1.0 + len(mus)))
sigmas = np.clip(sigmas, sigma_min_value, sigma_max_value)
# Fix prior sigma with correct value
sigmas[index[-1]] = prior_sigma
return mus[:], sigmas[:], index
def build_posterior_uniform(observed_values, observed_weights, parameter, prior_weight):
"""TODO put doc here."""
low = parameter.search_space["low"]
high = parameter.search_space["high"]
# build prior mu and sigma
prior_mu = 0.5 * (high + low)
prior_sigma = high - low
# Build mus and sigmas centered on each observation, taking care of the prior
mus, sigmas, index = find_sigmas_mus(
observed_mus=observed_values, prior_mu=prior_mu, prior_sigma=prior_sigma, low=low, high=high
)
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "normal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": low,
"high": high,
"step": parameter.search_space.get("step", None),
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
def build_posterior_loguniform(observed_values, observed_weights, parameter, prior_weight):
low_log = parameter.search_space["low_log"]
high_log = parameter.search_space["high_log"]
base = parameter.search_space["base"]
# build log prior mu and sigma
prior_mu_log = 0.5 * (high_log + low_log)
prior_sigma_log = high_log - low_log
# Build mus and sigmas centered on each observation, taking care of the prior
mus_log, sigmas_log, index = find_sigmas_mus(
observed_mus=logb(observed_values, base),
prior_mu=prior_mu_log,
prior_sigma=prior_sigma_log,
low=low_log,
high=high_log,
)
# Back from log scale
mus = base ** mus_log
sigmas = base ** sigmas_log
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "lognormal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": parameter.search_space["low"],
"high": parameter.search_space["high"],
"step": parameter.search_space["step"],
"base": parameter.search_space["base"],
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
def build_posterior_normal(observed_values, observed_weights, parameter, prior_weight):
low = parameter.search_space["low"]
high = parameter.search_space["high"]
# build prior mu and sigma
prior_mu = parameter.search_space["mu"]
prior_sigma = parameter.search_space["sigma"]
# Build mus and sigmas centered on each observation, taking care of the prior
mus, sigmas, index = find_sigmas_mus(
observed_mus=observed_values, prior_mu=prior_mu, prior_sigma=prior_sigma, low=low, high=high
)
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "normal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": low,
"high": high,
"step": parameter.search_space.get("step", None),
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
def build_posterior_lognormal(observed_values, observed_weights, parameter, prior_weight):
low_log = parameter.search_space["low_log"]
high_log = parameter.search_space["high_log"]
base = parameter.search_space["base"]
# build log prior mu and sigma
prior_mu_log = parameter.search_space["mu_log"]
prior_sigma_log = parameter.search_space["sigma_log"]
# Build mus and sigmas centered on each observation, taking care of the prior
mus_log, sigmas_log, index = find_sigmas_mus(
observed_mus=logb(observed_values, base),
prior_mu=prior_mu_log,
prior_sigma=prior_sigma_log,
low=low_log,
high=high_log,
)
# Back from log scale
mus = base ** mus_log
sigmas = base ** sigmas_log
sum_observed_weights = sum(observed_weights)
posterior_parameter = Parameter.from_dict(
{
"name": parameter.name,
"category": "mixture",
"search_space": {
"parameters": [
{
"category": "lognormal",
"search_space": {
"mu": mu.tolist(),
"sigma": sigma.tolist(),
"low": parameter.search_space["low"],
"high": parameter.search_space["high"],
"step": parameter.search_space["step"],
"base": parameter.search_space["base"],
},
}
for mu, sigma in zip(mus, sigmas)
],
"weights": np.array(
[x * (1 - prior_weight) / sum_observed_weights for x in observed_weights]
+ [prior_weight]
)[index].tolist(),
},
}
)
return posterior_parameter
parzen_estimator_build_posterior_parameter = {
"categorical": build_posterior_categorical,
"uniform": build_posterior_uniform,
"loguniform": build_posterior_loguniform,
"normal": build_posterior_normal,
"lognormal": build_posterior_lognormal,
}
| [
"numpy.clip",
"benderopt.utils.logb",
"numpy.random.choice",
"numpy.argsort",
"numpy.array",
"numpy.concatenate",
"numpy.maximum"
] | [((4961, 5010), 'numpy.array', 'np.array', (["parameter.search_space['probabilities']"], {}), "(parameter.search_space['probabilities'])\n", (4969, 5010), True, 'import numpy as np\n'), ((6366, 6390), 'numpy.argsort', 'np.argsort', (['unsorted_mus'], {}), '(unsorted_mus)\n', (6376, 6390), True, 'import numpy as np\n'), ((6673, 6774), 'numpy.concatenate', 'np.concatenate', (['([low if low != -np.inf else np.inf], mus, [high if high != np.inf else -np\n .inf])'], {}), '(([low if low != -np.inf else np.inf], mus, [high if high !=\n np.inf else -np.inf]))\n', (6687, 6774), True, 'import numpy as np\n'), ((6799, 6853), 'numpy.maximum', 'np.maximum', (['(tmp[1:-1] - tmp[0:-2])', '(tmp[2:] - tmp[1:-1])'], {}), '(tmp[1:-1] - tmp[0:-2], tmp[2:] - tmp[1:-1])\n', (6809, 6853), True, 'import numpy as np\n'), ((7015, 7064), 'numpy.clip', 'np.clip', (['sigmas', 'sigma_min_value', 'sigma_max_value'], {}), '(sigmas, sigma_min_value, sigma_max_value)\n', (7022, 7064), True, 'import numpy as np\n'), ((3668, 3729), 'numpy.random.choice', 'np.random.choice', (['sorted_candidates'], {'size': 'size', 'replace': '(False)'}), '(sorted_candidates, size=size, replace=False)\n', (3684, 3729), True, 'import numpy as np\n'), ((9218, 9245), 'benderopt.utils.logb', 'logb', (['observed_values', 'base'], {}), '(observed_values, base)\n', (9222, 9245), False, 'from benderopt.utils import logb\n'), ((12622, 12649), 'benderopt.utils.logb', 'logb', (['observed_values', 'base'], {}), '(observed_values, base)\n', (12626, 12649), False, 'from benderopt.utils import logb\n'), ((3576, 3594), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (3586, 3594), True, 'import numpy as np\n'), ((8468, 8574), 'numpy.array', 'np.array', (['([(x * (1 - prior_weight) / sum_observed_weights) for x in observed_weights\n ] + [prior_weight])'], {}), '([(x * (1 - prior_weight) / sum_observed_weights) for x in\n observed_weights] + [prior_weight])\n', (8476, 8574), True, 'import numpy as np\n'), ((10322, 10428), 'numpy.array', 'np.array', (['([(x * (1 - prior_weight) / sum_observed_weights) for x in observed_weights\n ] + [prior_weight])'], {}), '([(x * (1 - prior_weight) / sum_observed_weights) for x in\n observed_weights] + [prior_weight])\n', (10330, 10428), True, 'import numpy as np\n'), ((11850, 11956), 'numpy.array', 'np.array', (['([(x * (1 - prior_weight) / sum_observed_weights) for x in observed_weights\n ] + [prior_weight])'], {}), '([(x * (1 - prior_weight) / sum_observed_weights) for x in\n observed_weights] + [prior_weight])\n', (11858, 11956), True, 'import numpy as np\n'), ((13726, 13832), 'numpy.array', 'np.array', (['([(x * (1 - prior_weight) / sum_observed_weights) for x in observed_weights\n ] + [prior_weight])'], {}), '([(x * (1 - prior_weight) / sum_observed_weights) for x in\n observed_weights] + [prior_weight])\n', (13734, 13832), True, 'import numpy as np\n')] |
# Equipe Machine big deep data learning vovozinha science
from ple.games.catcher import Catcher
from ple import PLE
import numpy as np
import random
exploration_rate = 0.1
gamma = 0.9
alpha = 0.6
class RandomAgent:
def __init__(self, actions):
self.actions = actions
self.q_table = np.empty((301, 301, 3)) #(player_x, fruit_x, actions)
for player_x in range(0, 301):
for fruit_x in range(0, 301):
for action in range(0, 3):
self.q_table[player_x][fruit_x][action] = random.randrange(0, 50)
def pickAction(self, state):
if random.uniform(0, 1) <= exploration_rate:
#Exploration
return actionIndex(random.choice(self.actions))
else:
#Exploitation
return self.maxQAction(state)
def maxQAction(self, state):
player_x = state.get("player_x")
fruit_x = state.get("fruit_x")
max_q = self.q_table[player_x][fruit_x][0]
index = 0
for i in range(1, 3):
if self.q_table[player_x][fruit_x][i] > max_q:
max_q = self.q_table[player_x][fruit_x][i]
index = i
return index
def actionIndex(action):
if action == 97:
return 0
elif action == 100:
return 1
elif action == None:
return 2
def actionValue(action):
if action == 0:
return 97
elif action == 1:
return 100
else:
return None
def Reward(state0, state1, fruit_reward):
state0_player_x = state0.get("player_x")
state0_fruit_x = state0.get("fruit_x")
state1_player_x = state1.get("player_x")
state1_fruit_x = state1.get("fruit_x")
state0_goal_distance = pow(state0_fruit_x - state0_player_x, 2)
state1_goal_distance = pow(state1_fruit_x - state1_player_x, 2)
goal_progress = state0_goal_distance - state1_goal_distance
if goal_progress > 0:
return (75 + (fruit_reward*100))
elif goal_progress < 0:
return ((fruit_reward*100) - 75)
else:
if state1_fruit_x == state1_player_x:
return (100 + (fruit_reward*100))
else:
return ((fruit_reward*100) - 75)
'''
State Formate:
{
'player_x': int, 0 - 205
'player_vel': float, to int 0 - 60 [-30, 30]
'fruit_x': int, 0 - 300
'fruit_y': int 0 - 300
}
Actions:
[97, 100, None]
'''
game = Catcher(width=256, height=256, init_lives=10)
p = PLE(game, fps=30, display_screen=True, force_fps=False)
p.init()
agent = RandomAgent(p.getActionSet())
nb_frames = 50000
reward = 0.0
print(game.getGameState())
print(p.getActionSet())
for f in range(nb_frames):
if p.game_over(): #check if the game is over
p.reset_game()
print("-----------------")
state0 = game.getGameState()
action_index = agent.pickAction(state0)
action = actionValue(action_index)
print(action)
fruit_reward = p.act(action)
state1 = game.getGameState()
reward = Reward(state0, state1, fruit_reward)
print(reward)
current_Q = agent.q_table[state0.get("player_x")][state0.get("fruit_x")][action_index]
agent.q_table[state0.get("player_x")][state0.get("fruit_x")][action_index] = current_Q + alpha*(reward + gamma*(agent.maxQAction(state1)) - current_Q)
print(action_index)
print(current_Q)
print(agent.q_table[state0.get("player_x")][state0.get("fruit_x")][action_index])
print("-----------------")
| [
"random.uniform",
"random.choice",
"random.randrange",
"ple.PLE",
"numpy.empty",
"ple.games.catcher.Catcher"
] | [((2107, 2152), 'ple.games.catcher.Catcher', 'Catcher', ([], {'width': '(256)', 'height': '(256)', 'init_lives': '(10)'}), '(width=256, height=256, init_lives=10)\n', (2114, 2152), False, 'from ple.games.catcher import Catcher\n'), ((2158, 2213), 'ple.PLE', 'PLE', (['game'], {'fps': '(30)', 'display_screen': '(True)', 'force_fps': '(False)'}), '(game, fps=30, display_screen=True, force_fps=False)\n', (2161, 2213), False, 'from ple import PLE\n'), ((290, 313), 'numpy.empty', 'np.empty', (['(301, 301, 3)'], {}), '((301, 301, 3))\n', (298, 313), True, 'import numpy as np\n'), ((548, 568), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (562, 568), False, 'import random\n'), ((629, 656), 'random.choice', 'random.choice', (['self.actions'], {}), '(self.actions)\n', (642, 656), False, 'import random\n'), ((488, 511), 'random.randrange', 'random.randrange', (['(0)', '(50)'], {}), '(0, 50)\n', (504, 511), False, 'import random\n')] |
import numpy as np
def rgb2yuv(r, g, b, mode="ycbcr"):
# 8 bit full scale Y Cb Cr
Y = [0.299, 0.587, 0.114]
U = [-0.169, -0.331, 0.5]
V = [0.5, -0.419, -0.081]
yuv = np.asarray([Y, U, V])
if mode == "ycbcr":
return yuv.dot(np.asarray([r, g, b]))
elif mode == "yuv":
return yuv.dot(np.asarray([r, g, b])) - np.array([0, 128.0, 128.0])
else:
return None
def yuv2rgb(y, u, v):
r = [1.0, 0.0, 1.402]
g = [1.0, -0.344, -0.714]
b = [1.0, 1.772, 0.0]
rgb = np.asarray([r, g, b])
return rgb.dot(np.asarray([[y, u, v]]))
def detectColormode(mode):
if mode == "RGB" or mode == "RGBY":
return "rgb"
elif mode == "YUV" or mode == "YCbYr":
return "yuv"
else:
raise ValueError("Unknow color mode: {}".format(mode))
| [
"numpy.array",
"numpy.asarray"
] | [((188, 209), 'numpy.asarray', 'np.asarray', (['[Y, U, V]'], {}), '([Y, U, V])\n', (198, 209), True, 'import numpy as np\n'), ((527, 548), 'numpy.asarray', 'np.asarray', (['[r, g, b]'], {}), '([r, g, b])\n', (537, 548), True, 'import numpy as np\n'), ((568, 591), 'numpy.asarray', 'np.asarray', (['[[y, u, v]]'], {}), '([[y, u, v]])\n', (578, 591), True, 'import numpy as np\n'), ((258, 279), 'numpy.asarray', 'np.asarray', (['[r, g, b]'], {}), '([r, g, b])\n', (268, 279), True, 'import numpy as np\n'), ((353, 380), 'numpy.array', 'np.array', (['[0, 128.0, 128.0]'], {}), '([0, 128.0, 128.0])\n', (361, 380), True, 'import numpy as np\n'), ((328, 349), 'numpy.asarray', 'np.asarray', (['[r, g, b]'], {}), '([r, g, b])\n', (338, 349), True, 'import numpy as np\n')] |
## writed by <NAME> 2022-05-05
__all__ = ["filter_nan"]
import numpy as np
def filter_nan(sim, obs):
count = len(obs) - np.isnan(obs).sum()
s1 = np.empty(count)
o1 = np.empty(count)
k=0
for i in range(len(obs)):
if np.isnan(obs[i]):
continue
else:
o1[k] = obs[i]
s1[k] = sim[i]
k = k+1
return s1, o1
| [
"numpy.empty",
"numpy.isnan"
] | [((158, 173), 'numpy.empty', 'np.empty', (['count'], {}), '(count)\n', (166, 173), True, 'import numpy as np\n'), ((183, 198), 'numpy.empty', 'np.empty', (['count'], {}), '(count)\n', (191, 198), True, 'import numpy as np\n'), ((248, 264), 'numpy.isnan', 'np.isnan', (['obs[i]'], {}), '(obs[i])\n', (256, 264), True, 'import numpy as np\n'), ((129, 142), 'numpy.isnan', 'np.isnan', (['obs'], {}), '(obs)\n', (137, 142), True, 'import numpy as np\n')] |
import numpy as np
import nibabel as nib
import copy
from eisen.transforms.imaging import CreateConstantFlags
from eisen.transforms.imaging import RenameFields
from eisen.transforms.imaging import FilterFields
from eisen.transforms.imaging import ResampleNiftiVolumes
from eisen.transforms.imaging import NiftiToNumpy
from eisen.transforms.imaging import NumpyToNifti
from eisen.transforms.imaging import CropCenteredSubVolumes
from eisen.transforms.imaging import MapValues
class TestCreateConstantFlags:
def setup_class(self):
self.data = {
'image': np.random.rand(32, 32, 3),
'label': 1
}
self.tform_one = CreateConstantFlags(['flag1', 'flag2'], [32.2, 42.0])
self.tform_two = CreateConstantFlags(['flag3', 'flag4', 'flag5'], ['flag3', 42, False])
def test_call(self):
self.data = self.tform_one(self.data)
assert 'flag1' in self.data.keys()
assert 'flag2' in self.data.keys()
assert self.data['flag1'] == 32.2
assert self.data['flag2'] == 42.0
self.data = self.tform_two(self.data)
assert 'flag3' in self.data.keys()
assert 'flag4' in self.data.keys()
assert 'flag5' in self.data.keys()
assert self.data['flag3'] == 'flag3'
assert self.data['flag4'] == 42
assert self.data['flag5'] is False
class TestRenameFields:
def setup_class(self):
self.data = {
'image': np.ones([32, 32, 3], dtype=np.float32),
'label': 0
}
self.tform_one = RenameFields(['image', 'label'], ['new_image', 'new_label'])
self.tform_two = RenameFields(['new_image'], ['image'])
def test_call(self):
self.data = self.tform_one(self.data)
assert 'new_image' in self.data.keys()
assert 'new_label' in self.data.keys()
assert 'image' not in self.data.keys()
assert 'label' not in self.data.keys()
assert np.all(self.data['new_image'] == 1)
assert self.data['new_label'] == 0
self.data = self.tform_two(self.data)
assert 'new_image' not in self.data.keys()
assert 'image' in self.data.keys()
assert np.all(self.data['image'] == 1)
class TestFilterFields:
def setup_class(self):
self.data = {
'image': np.ones([32, 32, 3], dtype=np.float32),
'label': 0
}
self.tform_one = FilterFields(['image', 'label'])
self.tform_two = FilterFields(['image'])
def test_call(self):
self.data = self.tform_one(self.data)
assert 'image' in self.data.keys()
assert 'label' in self.data.keys()
assert np.all(self.data['image'] == 1)
assert self.data['label'] == 0
self.data = self.tform_two(self.data)
assert 'label' not in self.data.keys()
assert 'image' in self.data.keys()
assert np.all(self.data['image'] == 1)
class TestResampleNiftiVolumes:
def setup_class(self):
data = np.ones([32, 32, 32]).astype(np.float32)
data = data * np.asarray(range(32))
img = nib.Nifti1Image(data, np.eye(4))
self.data = {
'image': img,
'label': 0
}
self.tform_one = ResampleNiftiVolumes(['image'], [0.5, 0.5, 0.5], interpolation='linear')
self.tform_two = ResampleNiftiVolumes(['image'], [1.0, 1.0, 1.0], interpolation='linear')
self.tform_three = ResampleNiftiVolumes(['image'], [2.0, 2.0, 2.0], interpolation='linear')
def test_call(self):
self.data = self.tform_one(self.data)
assert 'image' in self.data.keys()
assert 'label' in self.data.keys()
assert self.data['image'].shape[0] == 63
assert self.data['image'].shape[1] == 63
assert self.data['image'].shape[2] == 63
dta = np.asanyarray(self.data['image'].dataobj)
assert np.max(dta) == 31
assert np.min(dta) == 0
assert np.all(dta[0, 0, :] == np.arange(0, 31.5, step=0.5))
self.data = self.tform_two(self.data)
assert 'image' in self.data.keys()
assert 'label' in self.data.keys()
assert self.data['image'].shape[0] == 32
assert self.data['image'].shape[1] == 32
assert self.data['image'].shape[2] == 32
dta = np.asanyarray(self.data['image'].dataobj)
assert np.max(dta) == 31
assert np.min(dta) == 0
assert np.all(dta[0, 0, :] == np.arange(0, 32, step=1))
self.data = self.tform_three(self.data)
assert 'image' in self.data.keys()
assert 'label' in self.data.keys()
assert self.data['image'].shape[0] == 17
assert self.data['image'].shape[1] == 17
assert self.data['image'].shape[2] == 17
dta = np.asanyarray(self.data['image'].dataobj)
assert np.max(dta) == 30
assert np.min(dta) == 0
assert np.all(dta[0, 0, 0:16] == np.arange(0, 32, step=2))
class TestNiftiToNumpy:
def setup_class(self):
self.np_data = np.random.rand(32, 32, 32).astype(np.float32)
self.np_label = np.random.rand(32, 32, 32, 3).astype(np.float32)
img = nib.Nifti1Image(self.np_data, np.eye(4))
lbl = nib.Nifti1Image(self.np_label, np.eye(4))
self.data = {
'image': img,
'label': lbl
}
self.tform_one = NiftiToNumpy(['image'])
self.tform_two = NiftiToNumpy(['label'], multichannel=True)
def test_call(self):
self.data = self.tform_one(self.data)
assert isinstance(self.data['image'], np.ndarray)
assert self.data['image'].dtype == np.float32
assert np.all(self.np_data == self.data['image'])
self.data = self.tform_two(self.data)
assert isinstance(self.data['label'], np.ndarray)
assert self.data['label'].dtype == np.float32
assert np.all(self.np_label == self.data['label'].transpose([1, 2, 3, 0]))
assert self.data['label'].shape[0] == 3
class TestNumpyToNifti:
def setup_class(self):
self.np_img = np.random.rand(32, 32, 32).astype(np.float32)
self.np_lbl = np.random.rand(32, 32, 32).astype(np.float32)
self.img = nib.Nifti1Image(self.np_img, np.eye(4))
self.lbl = nib.Nifti1Image(self.np_lbl, np.eye(4))
self.data = {'image': self.np_img, 'label': self.np_lbl}
self.tform_one = NumpyToNifti(['image', 'label'])
def test_call(self):
self.data = self.tform_one(self.data)
assert isinstance(self.data['image'], type(self.img))
assert isinstance(self.data['label'], type(self.lbl))
assert np.array_equal(self.data['image'].affine, np.eye(4))
assert np.array_equal(self.data['label'].affine, np.eye(4))
img = np.asanyarray(self.data['image'].dataobj).astype(np.float32)
assert np.array_equal(img, self.np_img)
lbl = np.asanyarray(self.data['label'].dataobj).astype(np.float32)
assert np.array_equal(lbl, self.np_lbl)
class TestCropCenteredSubVolumes:
def setup_class(self):
self.data_one = np.random.rand(32, 32, 32).astype(np.float32)
self.data_two = np.random.rand(3, 32, 32, 32).astype(np.float32)
self.data = {
'image': self.data_one,
'other': self.data_two
}
self.tform_one = CropCenteredSubVolumes(['image', 'other'], [30, 30, 30])
self.tform_two = CropCenteredSubVolumes(['other'], [10, 40, 60])
self.tform_three = CropCenteredSubVolumes(['image'], [20, 10, 8])
def test_call(self):
self.data = self.tform_one(self.data)
assert isinstance(self.data['image'], np.ndarray)
assert isinstance(self.data['other'], np.ndarray)
assert self.data['image'].shape[0] == 30
assert self.data['image'].shape[1] == 30
assert self.data['image'].shape[2] == 30
assert np.all(self.data['image'] == self.data_one[1:31, 1:31, 1:31])
assert self.data['other'].shape[0] == 3
assert self.data['other'].shape[1] == 30
assert self.data['other'].shape[2] == 30
assert self.data['other'].shape[3] == 30
assert np.all(self.data['other'] == self.data_two[:, 1:31, 1:31, 1:31])
self.data = self.tform_two(self.data)
assert isinstance(self.data['other'], np.ndarray)
assert self.data['other'].shape[0] == 3
assert self.data['other'].shape[1] == 10
assert self.data['other'].shape[2] == 40
assert self.data['other'].shape[3] == 60
self.data = self.tform_three(self.data)
assert self.data['image'].shape[0] == 20
assert self.data['image'].shape[1] == 10
assert self.data['image'].shape[2] == 8
class TestMapValues:
def setup_class(self):
self.data = {
'image': np.random.rand(32, 32, 32).astype(np.float32),
'other': np.random.rand(32, 32, 32).astype(np.float32)
}
self.tform_one = MapValues(['image'], 0, 1)
self.tform_two = MapValues(['image'], 0, 100)
self.tform_three = MapValues(['other'], 100, 1000)
def test_call(self):
self.data = self.tform_one(self.data)
assert np.isclose(np.max(self.data['image']), 1, atol=1e-04)
assert np.min(self.data['image']) == 0
self.data = self.tform_two(self.data)
assert np.isclose(np.max(self.data['image']), 100, atol=1e-04)
assert np.min(self.data['image']) == 0
self.data = self.tform_three(self.data)
assert np.isclose(np.max(self.data['other']), 1000, atol=1e-04)
assert np.isclose(np.min(self.data['other']), 100, atol=1e-04)
| [
"numpy.eye",
"eisen.transforms.imaging.ResampleNiftiVolumes",
"numpy.random.rand",
"eisen.transforms.imaging.NiftiToNumpy",
"numpy.ones",
"numpy.arange",
"eisen.transforms.imaging.NumpyToNifti",
"numpy.asanyarray",
"numpy.max",
"numpy.array_equal",
"eisen.transforms.imaging.CropCenteredSubVolume... | [((665, 718), 'eisen.transforms.imaging.CreateConstantFlags', 'CreateConstantFlags', (["['flag1', 'flag2']", '[32.2, 42.0]'], {}), "(['flag1', 'flag2'], [32.2, 42.0])\n", (684, 718), False, 'from eisen.transforms.imaging import CreateConstantFlags\n'), ((744, 814), 'eisen.transforms.imaging.CreateConstantFlags', 'CreateConstantFlags', (["['flag3', 'flag4', 'flag5']", "['flag3', 42, False]"], {}), "(['flag3', 'flag4', 'flag5'], ['flag3', 42, False])\n", (763, 814), False, 'from eisen.transforms.imaging import CreateConstantFlags\n'), ((1560, 1620), 'eisen.transforms.imaging.RenameFields', 'RenameFields', (["['image', 'label']", "['new_image', 'new_label']"], {}), "(['image', 'label'], ['new_image', 'new_label'])\n", (1572, 1620), False, 'from eisen.transforms.imaging import RenameFields\n'), ((1646, 1684), 'eisen.transforms.imaging.RenameFields', 'RenameFields', (["['new_image']", "['image']"], {}), "(['new_image'], ['image'])\n", (1658, 1684), False, 'from eisen.transforms.imaging import RenameFields\n'), ((1963, 1998), 'numpy.all', 'np.all', (["(self.data['new_image'] == 1)"], {}), "(self.data['new_image'] == 1)\n", (1969, 1998), True, 'import numpy as np\n'), ((2200, 2231), 'numpy.all', 'np.all', (["(self.data['image'] == 1)"], {}), "(self.data['image'] == 1)\n", (2206, 2231), True, 'import numpy as np\n'), ((2427, 2459), 'eisen.transforms.imaging.FilterFields', 'FilterFields', (["['image', 'label']"], {}), "(['image', 'label'])\n", (2439, 2459), False, 'from eisen.transforms.imaging import FilterFields\n'), ((2485, 2508), 'eisen.transforms.imaging.FilterFields', 'FilterFields', (["['image']"], {}), "(['image'])\n", (2497, 2508), False, 'from eisen.transforms.imaging import FilterFields\n'), ((2684, 2715), 'numpy.all', 'np.all', (["(self.data['image'] == 1)"], {}), "(self.data['image'] == 1)\n", (2690, 2715), True, 'import numpy as np\n'), ((2909, 2940), 'numpy.all', 'np.all', (["(self.data['image'] == 1)"], {}), "(self.data['image'] == 1)\n", (2915, 2940), True, 'import numpy as np\n'), ((3259, 3331), 'eisen.transforms.imaging.ResampleNiftiVolumes', 'ResampleNiftiVolumes', (["['image']", '[0.5, 0.5, 0.5]'], {'interpolation': '"""linear"""'}), "(['image'], [0.5, 0.5, 0.5], interpolation='linear')\n", (3279, 3331), False, 'from eisen.transforms.imaging import ResampleNiftiVolumes\n'), ((3357, 3429), 'eisen.transforms.imaging.ResampleNiftiVolumes', 'ResampleNiftiVolumes', (["['image']", '[1.0, 1.0, 1.0]'], {'interpolation': '"""linear"""'}), "(['image'], [1.0, 1.0, 1.0], interpolation='linear')\n", (3377, 3429), False, 'from eisen.transforms.imaging import ResampleNiftiVolumes\n'), ((3457, 3529), 'eisen.transforms.imaging.ResampleNiftiVolumes', 'ResampleNiftiVolumes', (["['image']", '[2.0, 2.0, 2.0]'], {'interpolation': '"""linear"""'}), "(['image'], [2.0, 2.0, 2.0], interpolation='linear')\n", (3477, 3529), False, 'from eisen.transforms.imaging import ResampleNiftiVolumes\n'), ((3852, 3893), 'numpy.asanyarray', 'np.asanyarray', (["self.data['image'].dataobj"], {}), "(self.data['image'].dataobj)\n", (3865, 3893), True, 'import numpy as np\n'), ((4325, 4366), 'numpy.asanyarray', 'np.asanyarray', (["self.data['image'].dataobj"], {}), "(self.data['image'].dataobj)\n", (4338, 4366), True, 'import numpy as np\n'), ((4796, 4837), 'numpy.asanyarray', 'np.asanyarray', (["self.data['image'].dataobj"], {}), "(self.data['image'].dataobj)\n", (4809, 4837), True, 'import numpy as np\n'), ((5390, 5413), 'eisen.transforms.imaging.NiftiToNumpy', 'NiftiToNumpy', (["['image']"], {}), "(['image'])\n", (5402, 5413), False, 'from eisen.transforms.imaging import NiftiToNumpy\n'), ((5439, 5481), 'eisen.transforms.imaging.NiftiToNumpy', 'NiftiToNumpy', (["['label']"], {'multichannel': '(True)'}), "(['label'], multichannel=True)\n", (5451, 5481), False, 'from eisen.transforms.imaging import NiftiToNumpy\n'), ((5683, 5725), 'numpy.all', 'np.all', (["(self.np_data == self.data['image'])"], {}), "(self.np_data == self.data['image'])\n", (5689, 5725), True, 'import numpy as np\n'), ((6418, 6450), 'eisen.transforms.imaging.NumpyToNifti', 'NumpyToNifti', (["['image', 'label']"], {}), "(['image', 'label'])\n", (6430, 6450), False, 'from eisen.transforms.imaging import NumpyToNifti\n'), ((6876, 6908), 'numpy.array_equal', 'np.array_equal', (['img', 'self.np_img'], {}), '(img, self.np_img)\n', (6890, 6908), True, 'import numpy as np\n'), ((6999, 7031), 'numpy.array_equal', 'np.array_equal', (['lbl', 'self.np_lbl'], {}), '(lbl, self.np_lbl)\n', (7013, 7031), True, 'import numpy as np\n'), ((7369, 7425), 'eisen.transforms.imaging.CropCenteredSubVolumes', 'CropCenteredSubVolumes', (["['image', 'other']", '[30, 30, 30]'], {}), "(['image', 'other'], [30, 30, 30])\n", (7391, 7425), False, 'from eisen.transforms.imaging import CropCenteredSubVolumes\n'), ((7451, 7498), 'eisen.transforms.imaging.CropCenteredSubVolumes', 'CropCenteredSubVolumes', (["['other']", '[10, 40, 60]'], {}), "(['other'], [10, 40, 60])\n", (7473, 7498), False, 'from eisen.transforms.imaging import CropCenteredSubVolumes\n'), ((7527, 7573), 'eisen.transforms.imaging.CropCenteredSubVolumes', 'CropCenteredSubVolumes', (["['image']", '[20, 10, 8]'], {}), "(['image'], [20, 10, 8])\n", (7549, 7573), False, 'from eisen.transforms.imaging import CropCenteredSubVolumes\n'), ((7927, 7988), 'numpy.all', 'np.all', (["(self.data['image'] == self.data_one[1:31, 1:31, 1:31])"], {}), "(self.data['image'] == self.data_one[1:31, 1:31, 1:31])\n", (7933, 7988), True, 'import numpy as np\n'), ((8201, 8265), 'numpy.all', 'np.all', (["(self.data['other'] == self.data_two[:, 1:31, 1:31, 1:31])"], {}), "(self.data['other'] == self.data_two[:, 1:31, 1:31, 1:31])\n", (8207, 8265), True, 'import numpy as np\n'), ((9007, 9033), 'eisen.transforms.imaging.MapValues', 'MapValues', (["['image']", '(0)', '(1)'], {}), "(['image'], 0, 1)\n", (9016, 9033), False, 'from eisen.transforms.imaging import MapValues\n'), ((9059, 9087), 'eisen.transforms.imaging.MapValues', 'MapValues', (["['image']", '(0)', '(100)'], {}), "(['image'], 0, 100)\n", (9068, 9087), False, 'from eisen.transforms.imaging import MapValues\n'), ((9116, 9147), 'eisen.transforms.imaging.MapValues', 'MapValues', (["['other']", '(100)', '(1000)'], {}), "(['other'], 100, 1000)\n", (9125, 9147), False, 'from eisen.transforms.imaging import MapValues\n'), ((579, 604), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (593, 604), True, 'import numpy as np\n'), ((1461, 1499), 'numpy.ones', 'np.ones', (['[32, 32, 3]'], {'dtype': 'np.float32'}), '([32, 32, 3], dtype=np.float32)\n', (1468, 1499), True, 'import numpy as np\n'), ((2328, 2366), 'numpy.ones', 'np.ones', (['[32, 32, 3]'], {'dtype': 'np.float32'}), '([32, 32, 3], dtype=np.float32)\n', (2335, 2366), True, 'import numpy as np\n'), ((3140, 3149), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3146, 3149), True, 'import numpy as np\n'), ((3910, 3921), 'numpy.max', 'np.max', (['dta'], {}), '(dta)\n', (3916, 3921), True, 'import numpy as np\n'), ((3943, 3954), 'numpy.min', 'np.min', (['dta'], {}), '(dta)\n', (3949, 3954), True, 'import numpy as np\n'), ((4383, 4394), 'numpy.max', 'np.max', (['dta'], {}), '(dta)\n', (4389, 4394), True, 'import numpy as np\n'), ((4416, 4427), 'numpy.min', 'np.min', (['dta'], {}), '(dta)\n', (4422, 4427), True, 'import numpy as np\n'), ((4854, 4865), 'numpy.max', 'np.max', (['dta'], {}), '(dta)\n', (4860, 4865), True, 'import numpy as np\n'), ((4887, 4898), 'numpy.min', 'np.min', (['dta'], {}), '(dta)\n', (4893, 4898), True, 'import numpy as np\n'), ((5212, 5221), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5218, 5221), True, 'import numpy as np\n'), ((5269, 5278), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5275, 5278), True, 'import numpy as np\n'), ((6256, 6265), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6262, 6265), True, 'import numpy as np\n'), ((6315, 6324), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6321, 6324), True, 'import numpy as np\n'), ((6706, 6715), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6712, 6715), True, 'import numpy as np\n'), ((6774, 6783), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6780, 6783), True, 'import numpy as np\n'), ((9247, 9273), 'numpy.max', 'np.max', (["self.data['image']"], {}), "(self.data['image'])\n", (9253, 9273), True, 'import numpy as np\n'), ((9305, 9331), 'numpy.min', 'np.min', (["self.data['image']"], {}), "(self.data['image'])\n", (9311, 9331), True, 'import numpy as np\n'), ((9411, 9437), 'numpy.max', 'np.max', (["self.data['image']"], {}), "(self.data['image'])\n", (9417, 9437), True, 'import numpy as np\n'), ((9471, 9497), 'numpy.min', 'np.min', (["self.data['image']"], {}), "(self.data['image'])\n", (9477, 9497), True, 'import numpy as np\n'), ((9579, 9605), 'numpy.max', 'np.max', (["self.data['other']"], {}), "(self.data['other'])\n", (9585, 9605), True, 'import numpy as np\n'), ((9651, 9677), 'numpy.min', 'np.min', (["self.data['other']"], {}), "(self.data['other'])\n", (9657, 9677), True, 'import numpy as np\n'), ((3017, 3038), 'numpy.ones', 'np.ones', (['[32, 32, 32]'], {}), '([32, 32, 32])\n', (3024, 3038), True, 'import numpy as np\n'), ((3998, 4026), 'numpy.arange', 'np.arange', (['(0)', '(31.5)'], {'step': '(0.5)'}), '(0, 31.5, step=0.5)\n', (4007, 4026), True, 'import numpy as np\n'), ((4471, 4495), 'numpy.arange', 'np.arange', (['(0)', '(32)'], {'step': '(1)'}), '(0, 32, step=1)\n', (4480, 4495), True, 'import numpy as np\n'), ((4945, 4969), 'numpy.arange', 'np.arange', (['(0)', '(32)'], {'step': '(2)'}), '(0, 32, step=2)\n', (4954, 4969), True, 'import numpy as np\n'), ((5047, 5073), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(32)'], {}), '(32, 32, 32)\n', (5061, 5073), True, 'import numpy as np\n'), ((5118, 5147), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(32)', '(3)'], {}), '(32, 32, 32, 3)\n', (5132, 5147), True, 'import numpy as np\n'), ((6094, 6120), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(32)'], {}), '(32, 32, 32)\n', (6108, 6120), True, 'import numpy as np\n'), ((6162, 6188), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(32)'], {}), '(32, 32, 32)\n', (6176, 6188), True, 'import numpy as np\n'), ((6800, 6841), 'numpy.asanyarray', 'np.asanyarray', (["self.data['image'].dataobj"], {}), "(self.data['image'].dataobj)\n", (6813, 6841), True, 'import numpy as np\n'), ((6923, 6964), 'numpy.asanyarray', 'np.asanyarray', (["self.data['label'].dataobj"], {}), "(self.data['label'].dataobj)\n", (6936, 6964), True, 'import numpy as np\n'), ((7119, 7145), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(32)'], {}), '(32, 32, 32)\n', (7133, 7145), True, 'import numpy as np\n'), ((7190, 7219), 'numpy.random.rand', 'np.random.rand', (['(3)', '(32)', '(32)', '(32)'], {}), '(3, 32, 32, 32)\n', (7204, 7219), True, 'import numpy as np\n'), ((8857, 8883), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(32)'], {}), '(32, 32, 32)\n', (8871, 8883), True, 'import numpy as np\n'), ((8925, 8951), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(32)'], {}), '(32, 32, 32)\n', (8939, 8951), True, 'import numpy as np\n')] |
"""
Based on https://github.com/nshepperd/gpt-2/blob/finetuning/train.py
"""
import json
from pathlib import Path
import sys
import shutil
from typing import List, Tuple
import fire
import numpy as np
import matplotlib.pyplot as plt
import sentencepiece as spm
import tensorflow as tf
import tqdm
from . import model, sample
from lm.data import END_OF_TEXT
from lm.fire_utils import only_allow_defined_args
def main():
return fire.Fire(train)
@only_allow_defined_args
def train(
run_path,
dataset_path,
sp_model_path,
*,
batch_size,
lr=1e-3,
epochs=10,
sample_length=None,
sample_num=1,
sample_every=1000,
restore_from=None, # latest by default, or "path/model-STEP"
save_every=1000,
log_every=20,
config='default',
accum_gradients=1, # accumulate gradients N times
find_lr=False, # instead of normal training, run lr range finder
validate=False, # instead of training, run validation and exit
clean=False, # clean run folder
# override hparams from config
n_ctx=None,
n_embd=None,
n_head=None,
n_layer=None,
):
sp_model = spm.SentencePieceProcessor()
sp_model.load(sp_model_path)
run_path = Path(run_path)
if clean and run_path.exists():
extra_names = {
p.name for p in run_path.iterdir()
if not (
p.name in {'checkpoints', 'samples', 'summaries', 'params.json'}
or p.name.startswith('find-lr')
)}
assert not extra_names, extra_names
shutil.rmtree(run_path)
run_path.mkdir(exist_ok=True, parents=True)
checkpoints_path = run_path / 'checkpoints'
samples_path = run_path / 'samples'
summaries_path = run_path / 'summaries'
dataset_path = Path(dataset_path)
hparams = model.HPARAMS[config]
hparams.n_vocab = len(sp_model)
if n_ctx is not None: hparams.n_ctx = n_ctx
n_ctx = hparams.n_ctx
if n_embd is not None: hparams.n_embd = n_embd
if n_head is not None: hparams.n_head = n_head
if n_layer is not None: hparams.n_layer = n_layer
del n_layer, n_embd, n_head
params_text = json.dumps(dict(
hparams=hparams.values(),
dataset_path=str(dataset_path),
sp_model_path=sp_model_path,
batch_size=batch_size,
accum_gradients=accum_gradients,
lr=lr,
epochs=epochs,
restore_from=str(restore_from),
argv=sys.argv,
), indent=4, sort_keys=True)
print(params_text)
if not (validate or find_lr):
(run_path / 'params.json').write_text(params_text)
if sample_length is None:
sample_length = n_ctx - 1
elif sample_length > n_ctx:
raise ValueError(
f'Can\'t get samples longer than window size: {n_ctx}')
step_tokens = n_ctx * batch_size * accum_gradients
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
output = model.model(hparams=hparams, X=context)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=context[:, 1:], logits=output['logits'][:, :-1]))
summaries_path.mkdir(exist_ok=True, parents=True)
summary_writer = tf.summary.FileWriter(
summaries_path / 'train', sess.graph)
tf_sample = sample.sample_sequence(
hparams=hparams,
length=sample_length,
context=context,
batch_size=batch_size,
temperature=1.0,
top_k=40)
train_vars = tf.trainable_variables()
learning_rate = tf.placeholder(tf.float32, name='lr')
opt = tf.train.AdamOptimizer(learning_rate)
accum_gradients = max(accum_gradients, 1)
if accum_gradients > 1:
train_op, zero_ops, accum_ops = \
_accum_gradients_ops(train_vars, opt, loss)
else:
train_op = opt.minimize(loss, var_list=train_vars)
saver = tf.train.Saver(
var_list=train_vars,
max_to_keep=2,
keep_checkpoint_every_n_hours=4)
sess.run(tf.global_variables_initializer())
if restore_from or checkpoints_path.exists():
if restore_from is None:
restore_from = tf.train.latest_checkpoint(checkpoints_path)
print(f'Restoring from {restore_from}')
saver.restore(sess, restore_from)
print(f'Loading dataset from {dataset_path}')
valid_dataset = np.load(dataset_path / 'valid.npy')
print(f'Validation dataset has {len(valid_dataset):,} tokens')
step = 1
step_path = checkpoints_path / 'step'
if step_path.exists():
# Load the step number if we're resuming a run
# Add 1 so we don't immediately try to save again
step = int(step_path.read_text()) + 1
def save():
if find_lr:
return
checkpoints_path.mkdir(exist_ok=True, parents=True)
saver.save(sess, checkpoints_path / 'model', global_step=step)
step_path.write_text(str(step) + '\n')
def write_summaries(**kwargs):
summary = tf.Summary()
for k, v in kwargs.items():
summary.value.add(tag=k, simple_value=v)
summary_writer.add_summary(summary, step * step_tokens)
def generate_samples():
context_tokens = [sp_model.PieceToId(END_OF_TEXT)]
all_text = []
index = 0
while index < sample_num:
out = sess.run(
tf_sample,
feed_dict={context: batch_size * [context_tokens]})
for i in range(min(sample_num - index, batch_size)):
text = sp_model.DecodeIds(list(map(int, out[i])))
text = f'======== SAMPLE {index + 1} ========\n{text}\n'
all_text.append(text)
index += 1
samples_path.mkdir(exist_ok=True, parents=True)
(samples_path / f'samples-{step}.txt').write_text(
'\n'.join(all_text))
def validation():
# TODO use more context here
loss_values = [
sess.run(loss, feed_dict={context: batch})
for batch in _valid_batch_generator(
valid_dataset, batch_size=batch_size, n_ctx=n_ctx)]
return np.mean(loss_values)
if validate:
print('Validating...')
loss_value = validation()
print(f'Validation loss: {loss_value:.4f}')
return
train_dataset = np.load(dataset_path / 'train.npy')
print(f'Train dataset has {len(train_dataset):,} tokens')
epoch_size = len(train_dataset) // step_tokens
def train_step():
batch = _gen_batch(
train_dataset,
n_ctx=n_ctx,
batch_size=batch_size * accum_gradients,
)
if accum_gradients > 1:
sess.run(zero_ops)
loss_value = 0.
for i in range(accum_gradients):
mini_batch = batch[i * batch_size: (i + 1) * batch_size]
*_, mb_loss_value = sess.run(
accum_ops + [loss], feed_dict={context: mini_batch})
loss_value += mb_loss_value / accum_gradients
sess.run(train_op, feed_dict={learning_rate: lr})
else:
_, loss_value = sess.run(
[train_op, loss],
feed_dict={context: batch, learning_rate: lr})
if step % log_every == 0 and not find_lr:
write_summaries(loss=loss_value, learning_rate=lr)
return loss_value
if find_lr:
lr = 1e-6
max_lr = 10
lr_multiplier = 1.25
lr_data = []
find_lr_path = run_path / f'find-lr-{step}.png'
print('Training...')
avg_loss = (0.0, 0.0)
try:
for epoch in tqdm.trange(1, epochs + 1, desc='epoch'):
epoch_pbar = tqdm.trange(epoch_size, desc=f'epoch {epoch}')
for _ in epoch_pbar:
if step % save_every == 0:
save()
valid_loss = validation()
write_summaries(valid_loss=valid_loss)
if step % sample_every == 0:
generate_samples()
lv = train_step()
step += 1
if find_lr:
lr *= lr_multiplier
if lr > max_lr or lr_data and lv > 2 * lr_data[0][1]:
_plot_find_lr_data(lr_data, find_lr_path)
return
lr_data.append((lr, lv))
avg_loss = (avg_loss[0] * 0.99 + lv,
avg_loss[1] * 0.99 + 1.0)
avg = avg_loss[0] / avg_loss[1]
epoch_pbar.set_postfix({
'step': step,
'loss': f'{lv:.2f}',
'avg': f'{avg:.2f}',
})
save()
except KeyboardInterrupt:
print('Interrupted, saving')
save()
sys.exit(1)
def _gen_batch(dataset: np.ndarray, n_ctx: int, batch_size: int):
indices = [np.random.randint(0, len(dataset) - n_ctx)
for _ in range(batch_size)]
return [dataset[idx : idx + n_ctx] for idx in indices]
def _accum_gradients_ops(train_vars, opt, loss):
# https://stackoverflow.com/a/46773161/217088
accum_vars = [tf.Variable(tf.zeros_like(v.initialized_value()),
trainable=False)
for v in train_vars]
zero_ops = [v.assign(tf.zeros_like(v)) for v in accum_vars]
gvs = opt.compute_gradients(loss, train_vars)
accum_ops = [accum_vars[i].assign_add(gv[0]) for i, gv in enumerate(gvs)]
train_op = opt.apply_gradients(
[(accum_vars[i], gv[1]) for i, gv in enumerate(gvs)])
return train_op, zero_ops, accum_ops
def _plot_find_lr_data(lr_data: List[Tuple[float, float]], path: Path):
plt.figure(figsize=(12, 6))
plt.plot([lr for lr, _ in lr_data], [lv for _, lv in lr_data])
plt.xscale('log')
plt.xlabel('learning rate')
plt.ylabel('loss')
plt.savefig(path)
print(f'Saved lr range test to {path}')
# TODO - save results to json as well, to be able to re-plot
def _valid_batch_generator(dataset, *, batch_size: int, n_ctx: int):
start_indices = range(0, len(dataset) - n_ctx, n_ctx)
return _batch_it(
(dataset[start_idx: start_idx + n_ctx] for start_idx in tqdm.tqdm(
start_indices, desc='validation', leave=False)),
batch_size=batch_size)
def _batch_it(it, batch_size: int):
batch = []
for x in it:
batch.append(x)
if len(batch) == batch_size:
yield batch
batch = []
# last is dropped
| [
"fire.Fire",
"matplotlib.pyplot.ylabel",
"sentencepiece.SentencePieceProcessor",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"sys.exit",
"numpy.mean",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow... | [((434, 450), 'fire.Fire', 'fire.Fire', (['train'], {}), '(train)\n', (443, 450), False, 'import fire\n'), ((1235, 1263), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (1261, 1263), True, 'import sentencepiece as spm\n'), ((1313, 1327), 'pathlib.Path', 'Path', (['run_path'], {}), '(run_path)\n', (1317, 1327), False, 'from pathlib import Path\n'), ((1869, 1887), 'pathlib.Path', 'Path', (['dataset_path'], {}), '(dataset_path)\n', (1873, 1887), False, 'from pathlib import Path\n'), ((2951, 2967), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2965, 2967), True, 'import tensorflow as tf\n'), ((10460, 10487), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (10470, 10487), True, 'import matplotlib.pyplot as plt\n'), ((10492, 10554), 'matplotlib.pyplot.plot', 'plt.plot', (['[lr for lr, _ in lr_data]', '[lv for _, lv in lr_data]'], {}), '([lr for lr, _ in lr_data], [lv for _, lv in lr_data])\n', (10500, 10554), True, 'import matplotlib.pyplot as plt\n'), ((10559, 10576), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (10569, 10576), True, 'import matplotlib.pyplot as plt\n'), ((10581, 10608), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""learning rate"""'], {}), "('learning rate')\n", (10591, 10608), True, 'import matplotlib.pyplot as plt\n'), ((10613, 10631), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (10623, 10631), True, 'import matplotlib.pyplot as plt\n'), ((10636, 10653), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (10647, 10653), True, 'import matplotlib.pyplot as plt\n'), ((1646, 1669), 'shutil.rmtree', 'shutil.rmtree', (['run_path'], {}), '(run_path)\n', (1659, 1669), False, 'import shutil\n'), ((3020, 3045), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3030, 3045), True, 'import tensorflow as tf\n'), ((3073, 3117), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (3087, 3117), True, 'import tensorflow as tf\n'), ((3423, 3482), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(summaries_path / 'train')", 'sess.graph'], {}), "(summaries_path / 'train', sess.graph)\n", (3444, 3482), True, 'import tensorflow as tf\n'), ((3741, 3765), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3763, 3765), True, 'import tensorflow as tf\n'), ((3790, 3827), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""lr"""'}), "(tf.float32, name='lr')\n", (3804, 3827), True, 'import tensorflow as tf\n'), ((3842, 3879), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3864, 3879), True, 'import tensorflow as tf\n'), ((4162, 4249), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'train_vars', 'max_to_keep': '(2)', 'keep_checkpoint_every_n_hours': '(4)'}), '(var_list=train_vars, max_to_keep=2,\n keep_checkpoint_every_n_hours=4)\n', (4176, 4249), True, 'import tensorflow as tf\n'), ((4680, 4715), 'numpy.load', 'np.load', (["(dataset_path / 'valid.npy')"], {}), "(dataset_path / 'valid.npy')\n", (4687, 4715), True, 'import numpy as np\n'), ((6832, 6867), 'numpy.load', 'np.load', (["(dataset_path / 'train.npy')"], {}), "(dataset_path / 'train.npy')\n", (6839, 6867), True, 'import numpy as np\n'), ((3218, 3324), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'context[:, 1:]', 'logits': "output['logits'][:, :-1]"}), "(labels=context[:, 1:],\n logits=output['logits'][:, :-1])\n", (3264, 3324), True, 'import tensorflow as tf\n'), ((4300, 4333), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4331, 4333), True, 'import tensorflow as tf\n'), ((5373, 5385), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (5383, 5385), True, 'import tensorflow as tf\n'), ((6616, 6636), 'numpy.mean', 'np.mean', (['loss_values'], {}), '(loss_values)\n', (6623, 6636), True, 'import numpy as np\n'), ((8250, 8290), 'tqdm.trange', 'tqdm.trange', (['(1)', '(epochs + 1)'], {'desc': '"""epoch"""'}), "(1, epochs + 1, desc='epoch')\n", (8261, 8290), False, 'import tqdm\n'), ((10076, 10092), 'tensorflow.zeros_like', 'tf.zeros_like', (['v'], {}), '(v)\n', (10089, 10092), True, 'import tensorflow as tf\n'), ((4458, 4502), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoints_path'], {}), '(checkpoints_path)\n', (4484, 4502), True, 'import tensorflow as tf\n'), ((8321, 8367), 'tqdm.trange', 'tqdm.trange', (['epoch_size'], {'desc': 'f"""epoch {epoch}"""'}), "(epoch_size, desc=f'epoch {epoch}')\n", (8332, 8367), False, 'import tqdm\n'), ((9556, 9567), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9564, 9567), False, 'import sys\n'), ((10978, 11034), 'tqdm.tqdm', 'tqdm.tqdm', (['start_indices'], {'desc': '"""validation"""', 'leave': '(False)'}), "(start_indices, desc='validation', leave=False)\n", (10987, 11034), False, 'import tqdm\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Feature Engineering
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
import statsmodels.api as sm
import sys
from scipy import stats
from scipy.special import boxcox1p, logit
from scipy.stats import norm, skew
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import PolynomialFeatures
sys.path.append(os.path.join('..', 'src'))
import importlib
import utils
importlib.reload(utils)
import params
importlib.reload(params)
from params import ProjectParameters
# # define paths and capture data
inputs = os.path.join('..', 'data', '02_intermediate')
outputs = os.path.join('..', 'data', '03_processed')
reports = os.path.join('..', 'data', '06_reporting')
# # set project parameters
numerical_cols = ProjectParameters().numerical_cols
# # plotting
def get_distribution(column, save_file):
#Check the new distribution
sns.distplot(column, fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(column)
# plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('estimate distribution')
plt.savefig(os.path.join(reports,save_file+'.jpg'), bbox_inches = "tight")
plt.close()
fig = plt.figure()
res = stats.probplot(column, plot=plt)
plt.savefig(os.path.join(reports,save_file+'_regline.jpg'), bbox_inches = "tight")
plt.close()
# # transformations
# build polynomials
def build_polynomials(df, cols, method = 'k_degree', degrees=2, testing = False):
print('number of columns before building polynomials:', df.shape[1])
if method == 'simple_square':
poly = df[cols]**2
poly.columns = [c+'_power2' for c in df[cols].columns]
df = pd.concat([df, poly], axis=1)
elif method == 'k_degree':
poly = PolynomialFeatures(2, include_bias = False)
transformed = poly.fit_transform(df[cols])
expanded_cols = poly.get_feature_names(df.columns)
df = pd.DataFrame(transformed, columns = expanded_cols)
print('number of columns after building polynomials:', df.shape[1])
return df
# change skewness
def get_skewness(df):
cols = ProjectParameters().numerical_cols
df = df[cols]
numeric_features = df.dtypes[(df.dtypes != "object") & (df.dtypes != "bool")].index.to_list()
skewed_features = df[numeric_features].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
skewness = pd.DataFrame({'skew' :skewed_features})
return skewness
def treat_skewness(df, df_name, bc_lambda = 0, testing=False):
skewness = get_skewness(df)
skewness = skewness[skewness['skew'] > 0.75]
if testing:
print("There are {} skewed features prone to Box Cox transform on {} data".format(skewness.shape[0], df_name))
print(skewness.head(10))
skewed_features = skewness.index
for feature in skewed_features:
df[feature] = boxcox1p(df[feature], bc_lambda)
if testing:
skewness = get_skewness(df)
skewness = skewness[skewness['skew'] > 0.75]
print("Done transformation to deal with skewness".format(skewness.shape[0]))
print(skewness.head(10))
return df
# label transformation is only applicable for regression problems
def transform_label(df, transformation = None, testing=False):
print('transform_label function activated')
df = df.copy()
if transformation == 'log':
df = np.log1p(df)
elif transformation == 'logit':
max_x = max(df)+1
min_x = min(df)-1
df = (df - min_x) / (max_x - min_x)
df = logit(df)
return df
| [
"scipy.special.boxcox1p",
"sklearn.preprocessing.PolynomialFeatures",
"seaborn.distplot",
"matplotlib.pyplot.ylabel",
"os.path.join",
"scipy.stats.norm.fit",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"scipy.special.logit",
"importlib.reload",
"pandas.DataFrame",
"matplotlib.pyplot... | [((505, 528), 'importlib.reload', 'importlib.reload', (['utils'], {}), '(utils)\n', (521, 528), False, 'import importlib\n'), ((544, 568), 'importlib.reload', 'importlib.reload', (['params'], {}), '(params)\n', (560, 568), False, 'import importlib\n'), ((651, 696), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""02_intermediate"""'], {}), "('..', 'data', '02_intermediate')\n", (663, 696), False, 'import os\n'), ((707, 749), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""03_processed"""'], {}), "('..', 'data', '03_processed')\n", (719, 749), False, 'import os\n'), ((760, 802), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""06_reporting"""'], {}), "('..', 'data', '06_reporting')\n", (772, 802), False, 'import os\n'), ((446, 471), 'os.path.join', 'os.path.join', (['""".."""', '"""src"""'], {}), "('..', 'src')\n", (458, 471), False, 'import os\n'), ((850, 869), 'params.ProjectParameters', 'ProjectParameters', ([], {}), '()\n', (867, 869), False, 'from params import ProjectParameters\n'), ((978, 1008), 'seaborn.distplot', 'sns.distplot', (['column'], {'fit': 'norm'}), '(column, fit=norm)\n', (990, 1008), True, 'import seaborn as sns\n'), ((1082, 1098), 'scipy.stats.norm.fit', 'norm.fit', (['column'], {}), '(column)\n', (1090, 1098), False, 'from scipy.stats import norm, skew\n'), ((1249, 1272), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1259, 1272), True, 'import matplotlib.pyplot as plt\n'), ((1277, 1311), 'matplotlib.pyplot.title', 'plt.title', (['"""estimate distribution"""'], {}), "('estimate distribution')\n", (1286, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1395, 1406), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1404, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1434), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1432, 1434), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1477), 'scipy.stats.probplot', 'stats.probplot', (['column'], {'plot': 'plt'}), '(column, plot=plt)\n', (1459, 1477), False, 'from scipy import stats\n'), ((1569, 1580), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1578, 1580), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2669), 'pandas.DataFrame', 'pd.DataFrame', (["{'skew': skewed_features}"], {}), "({'skew': skewed_features})\n", (2642, 2669), True, 'import pandas as pd\n'), ((1328, 1369), 'os.path.join', 'os.path.join', (['reports', "(save_file + '.jpg')"], {}), "(reports, save_file + '.jpg')\n", (1340, 1369), False, 'import os\n'), ((1494, 1543), 'os.path.join', 'os.path.join', (['reports', "(save_file + '_regline.jpg')"], {}), "(reports, save_file + '_regline.jpg')\n", (1506, 1543), False, 'import os\n'), ((1921, 1950), 'pandas.concat', 'pd.concat', (['[df, poly]'], {'axis': '(1)'}), '([df, poly], axis=1)\n', (1930, 1950), True, 'import pandas as pd\n'), ((2358, 2377), 'params.ProjectParameters', 'ProjectParameters', ([], {}), '()\n', (2375, 2377), False, 'from params import ProjectParameters\n'), ((3107, 3139), 'scipy.special.boxcox1p', 'boxcox1p', (['df[feature]', 'bc_lambda'], {}), '(df[feature], bc_lambda)\n', (3115, 3139), False, 'from scipy.special import boxcox1p, logit\n'), ((3621, 3633), 'numpy.log1p', 'np.log1p', (['df'], {}), '(df)\n', (3629, 3633), True, 'import numpy as np\n'), ((1997, 2038), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(2)'], {'include_bias': '(False)'}), '(2, include_bias=False)\n', (2015, 2038), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2164, 2212), 'pandas.DataFrame', 'pd.DataFrame', (['transformed'], {'columns': 'expanded_cols'}), '(transformed, columns=expanded_cols)\n', (2176, 2212), True, 'import pandas as pd\n'), ((3779, 3788), 'scipy.special.logit', 'logit', (['df'], {}), '(df)\n', (3784, 3788), False, 'from scipy.special import boxcox1p, logit\n')] |
import numpy as np
import sys
from gaussquad2d import gaussquad1d, gaussquad2d, gaussquad3d
from masternodes import masternodes
from shap import *
sys.path.insert(0, '../util')
sys.path.insert(0, '../mesh')
def mkmaster(mesh, ndim, pgauss=None):
if ndim == 2:
if pgauss == None:
pgauss = mesh['porder']*2
# Instantiates master data structure and copies the polynomial order and local DG points over to the master structure
master = {}
master['porder'] = mesh['porder']
master['plocvol'], master['tlocvol'], master['plocface'], master['tlocface'], master['corner'], master['perm'], _ = masternodes(master['porder'], ndim)
master['plocvol'] = master['plocvol'][:,:-1]
master['gptsface'], master['gwface'] = gaussquad1d(pgauss)
master['gptsvol'], master['gwvol'] = gaussquad2d(pgauss)
master['shapface'] = shape1d(master['porder'], master['plocface'], master['gptsface'])
master['shapvol'] = shape2d(master['porder'], master['plocvol'], master['gptsvol'])
master['shapvol_nodal'] = shape2d(master['porder'], master['plocvol'], master['plocvol']) # Useful for computing normal vectors and gradients
# Generate mass matrices - note the order of the transpose differs from the matlab script because of the C vs Fortran ordering
master['massface'] = master['shapface'][:,:,0].T@np.diag(master['gwface'])@master['shapface'][:,:,0]
master['massvol'] = master['shapvol'][:,:,0].T@np.diag(master['gwvol'])@master['shapvol'][:,:,0]
# Convection matrices
convx = np.squeeze(master['shapvol'][:,:,0]).T@np.diag(master['gwvol'])@np.squeeze(master['shapvol'][:,:,1])
convy = np.squeeze(master['shapvol'][:,:,0]).T@np.diag(master['gwvol'])@np.squeeze(master['shapvol'][:,:,2])
master['conv'] = np.concatenate((convx[None, :, :], convy[None, :, :]), axis=0) # Adding a 3rd empty axis so that they may be concatenated safely
if ndim == 3:
if pgauss == None:
pgauss = mesh['porder']*2
# Instantiates master data structure and copies the polynomial order and local DG points over to the master structure
master = {}
master['porder'] = mesh['porder']
master['plocvol'], master['tlocvol'], master['plocface'], master['tlocface'], master['corner'], _, master['perm'] = masternodes(master['porder'], ndim)
master['gptsface'], master['gwface'] = gaussquad2d(pgauss)
master['gptsvol'], master['gwvol'] = gaussquad3d(pgauss)
master['shapface'] = shape2d(master['porder'], master['plocface'], master['gptsface'])
master['shapvol'] = shape3d(master['porder'], master['plocvol'], master['gptsvol'])
master['shapvol_nodal'] = shape3d(master['porder'], master['plocvol'], master['plocvol']) # Shape functions evaluated at the nodes for calculating the gradient
master['phi_inv'] = np.linalg.pinv(master['shapvol'][:, :, 0])
# Generate mass matrices - note the order of the transpose differs from the matlab script because of the C vs Fortran ordering
master['massface'] = master['shapface'][:,:,0].T@np.diag(master['gwface'])@master['shapface'][:,:,0]
master['massvol'] = master['shapvol'][:,:,0].T@np.diag(master['gwvol'])@master['shapvol'][:,:,0]
# Convection matrices
convx = np.squeeze(master['shapvol'][:,:,0]).T@np.diag(master['gwvol'])@np.squeeze(master['shapvol'][:,:,1])
convy = np.squeeze(master['shapvol'][:,:,0]).T@np.diag(master['gwvol'])@np.squeeze(master['shapvol'][:,:,2])
convz = np.squeeze(master['shapvol'][:,:,0]).T@np.diag(master['gwvol'])@np.squeeze(master['shapvol'][:,:,3])
master['conv'] = np.concatenate((convx[None, :, :], convy[None, :, :], convz[None, :, :]), axis=0) # Adding a 3rd empty axis so that they may be concatenated safely
return master
if __name__ == '__main__':
np.set_printoptions(suppress=True, linewidth=np.inf, precision=4)
porder = 3
mesh = mkmesh_square(porder)
master = mkmaster(mesh, 2*porder)
sys.path.insert(0, '../util')
from import_util import load_mat
ma1d = load_mat('ma1d')
mass = load_mat('mass')
conv = load_mat('conv').transpose((1, 0, 2)).ravel(order='F')
print(np.allclose(ma1d, master['ma1d']))
print(np.allclose(mass, master['mass']))
print(np.allclose(conv, np.ravel(master['conv']))) | [
"gaussquad2d.gaussquad1d",
"sys.path.insert",
"numpy.allclose",
"numpy.linalg.pinv",
"gaussquad2d.gaussquad3d",
"gaussquad2d.gaussquad2d",
"numpy.squeeze",
"import_util.load_mat",
"numpy.diag",
"numpy.concatenate",
"numpy.ravel",
"masternodes.masternodes",
"numpy.set_printoptions"
] | [((148, 177), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../util"""'], {}), "(0, '../util')\n", (163, 177), False, 'import sys\n'), ((178, 207), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../mesh"""'], {}), "(0, '../mesh')\n", (193, 207), False, 'import sys\n'), ((3940, 4005), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'linewidth': 'np.inf', 'precision': '(4)'}), '(suppress=True, linewidth=np.inf, precision=4)\n', (3959, 4005), True, 'import numpy as np\n'), ((4097, 4126), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../util"""'], {}), "(0, '../util')\n", (4112, 4126), False, 'import sys\n'), ((4176, 4192), 'import_util.load_mat', 'load_mat', (['"""ma1d"""'], {}), "('ma1d')\n", (4184, 4192), False, 'from import_util import load_mat\n'), ((4204, 4220), 'import_util.load_mat', 'load_mat', (['"""mass"""'], {}), "('mass')\n", (4212, 4220), False, 'from import_util import load_mat\n'), ((645, 680), 'masternodes.masternodes', 'masternodes', (["master['porder']", 'ndim'], {}), "(master['porder'], ndim)\n", (656, 680), False, 'from masternodes import masternodes\n'), ((782, 801), 'gaussquad2d.gaussquad1d', 'gaussquad1d', (['pgauss'], {}), '(pgauss)\n', (793, 801), False, 'from gaussquad2d import gaussquad1d, gaussquad2d, gaussquad3d\n'), ((847, 866), 'gaussquad2d.gaussquad2d', 'gaussquad2d', (['pgauss'], {}), '(pgauss)\n', (858, 866), False, 'from gaussquad2d import gaussquad1d, gaussquad2d, gaussquad3d\n'), ((1855, 1917), 'numpy.concatenate', 'np.concatenate', (['(convx[None, :, :], convy[None, :, :])'], {'axis': '(0)'}), '((convx[None, :, :], convy[None, :, :]), axis=0)\n', (1869, 1917), True, 'import numpy as np\n'), ((2383, 2418), 'masternodes.masternodes', 'masternodes', (["master['porder']", 'ndim'], {}), "(master['porder'], ndim)\n", (2394, 2418), False, 'from masternodes import masternodes\n'), ((2467, 2486), 'gaussquad2d.gaussquad2d', 'gaussquad2d', (['pgauss'], {}), '(pgauss)\n', (2478, 2486), False, 'from gaussquad2d import gaussquad1d, gaussquad2d, gaussquad3d\n'), ((2532, 2551), 'gaussquad2d.gaussquad3d', 'gaussquad3d', (['pgauss'], {}), '(pgauss)\n', (2543, 2551), False, 'from gaussquad2d import gaussquad1d, gaussquad2d, gaussquad3d\n'), ((2938, 2980), 'numpy.linalg.pinv', 'np.linalg.pinv', (["master['shapvol'][:, :, 0]"], {}), "(master['shapvol'][:, :, 0])\n", (2952, 2980), True, 'import numpy as np\n'), ((3738, 3823), 'numpy.concatenate', 'np.concatenate', (['(convx[None, :, :], convy[None, :, :], convz[None, :, :])'], {'axis': '(0)'}), '((convx[None, :, :], convy[None, :, :], convz[None, :, :]),\n axis=0)\n', (3752, 3823), True, 'import numpy as np\n'), ((4298, 4331), 'numpy.allclose', 'np.allclose', (['ma1d', "master['ma1d']"], {}), "(ma1d, master['ma1d'])\n", (4309, 4331), True, 'import numpy as np\n'), ((4343, 4376), 'numpy.allclose', 'np.allclose', (['mass', "master['mass']"], {}), "(mass, master['mass'])\n", (4354, 4376), True, 'import numpy as np\n'), ((1676, 1714), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 1]"], {}), "(master['shapvol'][:, :, 1])\n", (1686, 1714), True, 'import numpy as np\n'), ((1793, 1831), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 2]"], {}), "(master['shapvol'][:, :, 2])\n", (1803, 1831), True, 'import numpy as np\n'), ((3442, 3480), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 1]"], {}), "(master['shapvol'][:, :, 1])\n", (3452, 3480), True, 'import numpy as np\n'), ((3559, 3597), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 2]"], {}), "(master['shapvol'][:, :, 2])\n", (3569, 3597), True, 'import numpy as np\n'), ((3676, 3714), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 3]"], {}), "(master['shapvol'][:, :, 3])\n", (3686, 3714), True, 'import numpy as np\n'), ((4406, 4430), 'numpy.ravel', 'np.ravel', (["master['conv']"], {}), "(master['conv'])\n", (4414, 4430), True, 'import numpy as np\n'), ((1400, 1425), 'numpy.diag', 'np.diag', (["master['gwface']"], {}), "(master['gwface'])\n", (1407, 1425), True, 'import numpy as np\n'), ((1507, 1531), 'numpy.diag', 'np.diag', (["master['gwvol']"], {}), "(master['gwvol'])\n", (1514, 1531), True, 'import numpy as np\n'), ((1651, 1675), 'numpy.diag', 'np.diag', (["master['gwvol']"], {}), "(master['gwvol'])\n", (1658, 1675), True, 'import numpy as np\n'), ((1768, 1792), 'numpy.diag', 'np.diag', (["master['gwvol']"], {}), "(master['gwvol'])\n", (1775, 1792), True, 'import numpy as np\n'), ((3174, 3199), 'numpy.diag', 'np.diag', (["master['gwface']"], {}), "(master['gwface'])\n", (3181, 3199), True, 'import numpy as np\n'), ((3281, 3305), 'numpy.diag', 'np.diag', (["master['gwvol']"], {}), "(master['gwvol'])\n", (3288, 3305), True, 'import numpy as np\n'), ((3417, 3441), 'numpy.diag', 'np.diag', (["master['gwvol']"], {}), "(master['gwvol'])\n", (3424, 3441), True, 'import numpy as np\n'), ((3534, 3558), 'numpy.diag', 'np.diag', (["master['gwvol']"], {}), "(master['gwvol'])\n", (3541, 3558), True, 'import numpy as np\n'), ((3651, 3675), 'numpy.diag', 'np.diag', (["master['gwvol']"], {}), "(master['gwvol'])\n", (3658, 3675), True, 'import numpy as np\n'), ((1612, 1650), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 0]"], {}), "(master['shapvol'][:, :, 0])\n", (1622, 1650), True, 'import numpy as np\n'), ((1729, 1767), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 0]"], {}), "(master['shapvol'][:, :, 0])\n", (1739, 1767), True, 'import numpy as np\n'), ((3378, 3416), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 0]"], {}), "(master['shapvol'][:, :, 0])\n", (3388, 3416), True, 'import numpy as np\n'), ((3495, 3533), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 0]"], {}), "(master['shapvol'][:, :, 0])\n", (3505, 3533), True, 'import numpy as np\n'), ((3612, 3650), 'numpy.squeeze', 'np.squeeze', (["master['shapvol'][:, :, 0]"], {}), "(master['shapvol'][:, :, 0])\n", (3622, 3650), True, 'import numpy as np\n'), ((4232, 4248), 'import_util.load_mat', 'load_mat', (['"""conv"""'], {}), "('conv')\n", (4240, 4248), False, 'from import_util import load_mat\n')] |
import numpy as np
import pandas as pd
import random
class DataPreHandle:
# min-max标准化(线性标准化)
@staticmethod
def min_max_normalization(X: pd.DataFrame):
for n in range(X.shape[1]):
X[:, n] = (X[:, n] - np.min(X[:, n])) / (np.max(X[:, n]) - np.min(X[:, n]))
return X
# z-score 标准化(正态分布标准化)
@staticmethod
def zero_mean_normalization(X: pd.DataFrame):
for n in range(X.shape[1]):
X[:, n] = (X[:, n] - X[:, n].mean()) / X[:, n].std()
return X
# 把连续值分成10段的离散值
@staticmethod
def discrete_normalization(data: pd.DataFrame, k: int, *args):
columns = data.columns[0: -1]
for column in columns:
if column in args:
continue
temp = (data[column].max() - data[column].min()) / k
min = data[column].min()
t = 0
for i in range(1, k):
t += (data[column] >= i * temp + min) * 1
data[column] = t
# data[column] = (data[column] >= 9 * temp + min) * 1 +\
# (data[column] >= 8 * temp + min) * 1 +\
# (data[column] >= 7 * temp + min) * 1 +\
# (data[column] >= 6 * temp + min) * 1 +\
# (data[column] >= 5 * temp + min) * 1 +\
# (data[column] >= 4 * temp + min) * 1 +\
# (data[column] >= 3 * temp + min) * 1 +\
# (data[column] >= 2 * temp + min) * 1 +\
# (data[column] >= 1 * temp + min) * 1
return data
# 将df中的字符串更改为离散的数字
@staticmethod
def str2num(data: pd.DataFrame, cols=None):
if cols is None:
cols = [data.columns.values[-1]]
for col in cols:
values = list(set(data[col]))
values.sort()
for value, i in zip(values, range(len(values))):
data[col].replace(value, i, inplace=True)
return data
class FeaturePreHandle:
# 随机生成一个序列
@staticmethod
def rand_list(ran: int, size: int):
r_list = [x for x in range(ran)]
for i in range(len(r_list)-1, 0, -1):
j = random.randint(0, i - 1)
temp = r_list[i]
r_list[i] = r_list[j]
r_list[j] = temp
return r_list[:size]
# 获取一个序列的补集
@staticmethod
def pick_complementary_list(total: int, sub: list):
total_list = [x for x in range(total)]
for num in sub:
total_list.remove(num)
return total_list
class ModuleData:
def __init__(self, **data):
self.data = data
| [
"numpy.max",
"random.randint",
"numpy.min"
] | [((2139, 2163), 'random.randint', 'random.randint', (['(0)', '(i - 1)'], {}), '(0, i - 1)\n', (2153, 2163), False, 'import random\n'), ((236, 251), 'numpy.min', 'np.min', (['X[:, n]'], {}), '(X[:, n])\n', (242, 251), True, 'import numpy as np\n'), ((256, 271), 'numpy.max', 'np.max', (['X[:, n]'], {}), '(X[:, n])\n', (262, 271), True, 'import numpy as np\n'), ((274, 289), 'numpy.min', 'np.min', (['X[:, n]'], {}), '(X[:, n])\n', (280, 289), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 10:42:00 2016
@author: fbx182
"""
import pandas as pd
import numpy as np
def resample(df, sr=0.1524, fill=None):
"""
Resamples an input DataFrame to a specified sample rate
Empty cells can be filled with the fill argument
"""
#Calculate the index numbers (integers) where the current data relies
s = (df.index // sr).astype(int)
#and use that as a basis for how many samples should be output:
new_index = np.arange(s.min(), s.max() + 1)
#Save the original column order
cols_org = df.columns.values
#Aggregate numeric columns by taking the mean
df_num = df.select_dtypes(include=[np.number]).groupby(s).mean()
#Aggregate non-numeric columns by selecting the most popular one within each index
df_non_num = df.select_dtypes(exclude=[np.number]).groupby(s).agg(lambda x: x.value_counts().index[0])
#Recombine the numeric and non-numeric columns and reorder columns to original order
grp = pd.concat([df_num, df_non_num], axis=1)[cols_org]
#Save the NaN mask in the end of the dataframe to restore it after interpolation/fill
grp_mask = grp.fillna(method='backfill').isnull()
#Run different fill algorithms based on the 'fill' argument
switcher = {
'interpolate': grp.reindex(new_index).interpolate(),
'ffill': grp.reindex(new_index).ffill(),
'pad': grp.reindex(new_index).ffill()
}
grp = switcher.get(fill, grp.reindex(new_index))
#Apply the NaN mask
grp[grp_mask] = np.nan
# Rename the index integers to actual floats
grp.index = grp.index * sr
return grp
def resample_like(df, otherindex, fill=None):
"""
Resamples an input DataFrame to a specified sample rate
Empty cells can be filled with the fill argument
"""
#Calculate the index numbers (integers) where the current data relies
# s = (df.index // sr).astype(int)
s = np.digitize(df.index, otherindex)
if len(set(s)) < len(s)/2:
df = resample(df, sr=0.1, fill='interpolate')
s = np.digitize(df.index, otherindex)
#and use that as a basis for how many samples should be output:
# new_index = np.arange(s.min(), s.max())
new_index=np.arange(0, len(otherindex))
#Save the original column order
cols_org = df.columns.values
#Aggregate numeric columns by taking the mean
df_num = df.select_dtypes(include=[np.number]).groupby(s).mean()
#Aggregate non-numeric columns by selecting the most popular one within each index
df_non_num = df.select_dtypes(exclude=[np.number]).groupby(s).agg(lambda x: x.value_counts().index[0])
#Recombine the numeric and non-numeric columns and reorder columns to original order
grp = pd.concat([df_num, df_non_num], axis=1)[cols_org]
#Save the NaN mask in the end of the dataframe to restore it after interpolation/fill
grp_mask = grp.fillna(method='backfill').isnull()
#Run different fill algorithms based on the 'fill' argument
switcher = {
'interpolate': grp.reindex(new_index).interpolate(),
'ffill': grp.reindex(new_index).ffill(),
'pad': grp.reindex(new_index).ffill()
}
grp = switcher.get(fill, grp.reindex(new_index))
#Apply the NaN mask
grp[grp_mask] = np.nan
# Rename the index integers to actual floats
# grp.index = grp.index * sr
grp.set_index(otherindex[new_index], inplace=True)
return grp.iloc[1:,:]
def drop_duplicate_columns(df, keep='last'):
tmp = []
drop = []
if keep=='last':
for i, col in reversed(list(enumerate(df.columns))):
# print(col)
if col in tmp:
drop += [i]
else:
tmp += [col]
else:
for i, col in enumerate(df.columns):
if col in tmp:
drop += [i]
else:
tmp += [col]
# print(drop)
# print(cols)
# print(df.columns)
return df.iloc[:, [j for j, c in enumerate(df.columns) if j not in drop]]
if __name__ == '__main__':
#TEST THE resample_like()
# df_long = pd.DataFrame(np.array([np.arange(0,5,0.1),np.arange(0, 50, 1)]).T).set_index(0)
# df_short = pd.DataFrame(np.array([np.arange(0,5,1.0),np.arange(0, 500, 100)]).T).set_index(0)
# df = pd.DataFrame(np.array([np.arange(1,3,0.2),np.arange(1000, 3000, 200)]).T).set_index(0)
# rs = resample_like(df, df_short, fill='interpolate')
## rs= resample(df, sr=2)
#TEST drop_duplicate_columns():
df1 = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
df2 = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('CDEF'))
df3 = drop_duplicate_columns(pd.concat([df1, df2], axis=1), keep='first')
| [
"numpy.digitize",
"numpy.random.randint",
"pandas.concat"
] | [((1938, 1971), 'numpy.digitize', 'np.digitize', (['df.index', 'otherindex'], {}), '(df.index, otherindex)\n', (1949, 1971), True, 'import numpy as np\n'), ((1002, 1041), 'pandas.concat', 'pd.concat', (['[df_num, df_non_num]'], {'axis': '(1)'}), '([df_num, df_non_num], axis=1)\n', (1011, 1041), True, 'import pandas as pd\n'), ((2078, 2111), 'numpy.digitize', 'np.digitize', (['df.index', 'otherindex'], {}), '(df.index, otherindex)\n', (2089, 2111), True, 'import numpy as np\n'), ((2750, 2789), 'pandas.concat', 'pd.concat', (['[df_num, df_non_num]'], {'axis': '(1)'}), '([df_num, df_non_num], axis=1)\n', (2759, 2789), True, 'import pandas as pd\n'), ((4531, 4571), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': '(100, 4)'}), '(0, 100, size=(100, 4))\n', (4548, 4571), True, 'import numpy as np\n'), ((4616, 4656), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': '(100, 4)'}), '(0, 100, size=(100, 4))\n', (4633, 4656), True, 'import numpy as np\n'), ((4711, 4740), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)'}), '([df1, df2], axis=1)\n', (4720, 4740), True, 'import pandas as pd\n')] |
from collections import namedtuple
import random
import numpy as np
import torch
# very exhaustive but as a result code is very easy to read ;)
Transition = namedtuple('Transition', 's a r done')
Transitions = namedtuple('Transitions', 's a r done') # functionally the same as Transition
NStepTransitions = namedtuple('Batch', 's a n_step_sum_of_r n_step_s done_within_n_step') # each item corresponds to a tensor
class SequentialBuffer:
def __init__(self, gamma: float, n_step: int):
self.memory = []
self.gamma = gamma
self.n_step = n_step
def push(self, transition: Transition) -> None:
self.memory.append(transition)
def instantiate_NStepTransitions_and_empty_buffer(self) -> NStepTransitions:
"""We empty the buffer because this is an on-policy algorithm."""
dummy_transition = Transition(np.zeros((self.memory[0].s.shape)), 0, 0, 0)
memory_with_dummies = self.memory + [dummy_transition] * (self.n_step - 1)
transitions = Transitions(*zip(*memory_with_dummies))
length = len(self.memory) # not computed with memory_with_dummies
s = torch.tensor(transitions.s[:-(self.n_step-1) or None], dtype=torch.float).view(length, -1)
a = torch.tensor(transitions.a[:-(self.n_step-1) or None], dtype=torch.long ).view(length, 1)
n_step_s = torch.tensor(transitions.s[self.n_step-1:], dtype=torch.float).view(length, -1)
# last few are dummies and their values will be ignored conveniently through done_within_n_step
n_step_sum_of_r = np.zeros((len(self.memory),))
done_within_n_step = np.zeros((len(self.memory),)) # including the zeroth and the (n-1)th step
for t in range(len(transitions.r[:-(self.n_step-1) or None])): # t = 0, 1, ..., T-1, (T, ..., T+n_step-1), where bracket terms due to dummies
sum = 0
for i in range(self.n_step): # from 0 to n_step - 1 inclusive; exactly what we want
sum += (self.gamma ** i) * transitions.r[t+i]
if transitions.done[t+i]:
done_within_n_step[t] = 1 # indicates that we shouldn't care about the value of n_step_s
break
n_step_sum_of_r[t] = sum
n_step_sum_of_r = torch.tensor(n_step_sum_of_r, dtype=torch.float).view(length, 1)
done_within_n_step = torch.tensor(done_within_n_step, dtype=torch.long).view(length, 1)
self.memory = []
return NStepTransitions(s, a, n_step_sum_of_r, n_step_s, done_within_n_step)
# for SIL
# no done is needed here because we only need to calculate the monte-carlo return for one episode at a time
TransitionWithoutDone = namedtuple('TransitionWithoutDone', 's a r')
TransitionWithoutDoneWithReturn = namedtuple('TransitionWithoutDoneWithReturn', 's a R')
MonteCarloTransitions = namedtuple('MonteCarloTransitions', 's a R')
class SILBuffer:
def __init__(self, gamma: float):
self.memory = []
self.current_episode = []
self.gamma = gamma
def push(self, transition: TransitionWithoutDone) -> None:
self.current_episode.append(transition)
def process_and_empty_current_episode(self) -> None:
discounted_return = 0
for transition in reversed(self.current_episode):
discounted_return = transition.r + self.gamma * discounted_return
self.memory.append(TransitionWithoutDoneWithReturn(transition.s, transition.a, discounted_return))
self.current_episode = []
def ready_for(self, batch_size: int) -> bool:
return len(self.memory) >= batch_size
def sample(self, batch_size: int) -> MonteCarloTransitions:
transitions = random.sample(self.memory, batch_size)
transitions = MonteCarloTransitions(*zip(*transitions))
s = torch.tensor(transitions.s, dtype=torch.float).view(batch_size, -1)
a = torch.tensor(transitions.a, dtype=torch.long ).view(batch_size, 1)
R = torch.tensor(transitions.R, dtype=torch.float).view(batch_size, 1)
return MonteCarloTransitions(s, a, R) | [
"torch.tensor",
"random.sample",
"collections.namedtuple",
"numpy.zeros"
] | [((159, 197), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', '"""s a r done"""'], {}), "('Transition', 's a r done')\n", (169, 197), False, 'from collections import namedtuple\n'), ((212, 251), 'collections.namedtuple', 'namedtuple', (['"""Transitions"""', '"""s a r done"""'], {}), "('Transitions', 's a r done')\n", (222, 251), False, 'from collections import namedtuple\n'), ((310, 380), 'collections.namedtuple', 'namedtuple', (['"""Batch"""', '"""s a n_step_sum_of_r n_step_s done_within_n_step"""'], {}), "('Batch', 's a n_step_sum_of_r n_step_s done_within_n_step')\n", (320, 380), False, 'from collections import namedtuple\n'), ((2707, 2751), 'collections.namedtuple', 'namedtuple', (['"""TransitionWithoutDone"""', '"""s a r"""'], {}), "('TransitionWithoutDone', 's a r')\n", (2717, 2751), False, 'from collections import namedtuple\n'), ((2786, 2840), 'collections.namedtuple', 'namedtuple', (['"""TransitionWithoutDoneWithReturn"""', '"""s a R"""'], {}), "('TransitionWithoutDoneWithReturn', 's a R')\n", (2796, 2840), False, 'from collections import namedtuple\n'), ((2865, 2909), 'collections.namedtuple', 'namedtuple', (['"""MonteCarloTransitions"""', '"""s a R"""'], {}), "('MonteCarloTransitions', 's a R')\n", (2875, 2909), False, 'from collections import namedtuple\n'), ((3718, 3756), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (3731, 3756), False, 'import random\n'), ((864, 896), 'numpy.zeros', 'np.zeros', (['self.memory[0].s.shape'], {}), '(self.memory[0].s.shape)\n', (872, 896), True, 'import numpy as np\n'), ((1151, 1226), 'torch.tensor', 'torch.tensor', (['transitions.s[:-(self.n_step - 1) or None]'], {'dtype': 'torch.float'}), '(transitions.s[:-(self.n_step - 1) or None], dtype=torch.float)\n', (1163, 1226), False, 'import torch\n'), ((1262, 1336), 'torch.tensor', 'torch.tensor', (['transitions.a[:-(self.n_step - 1) or None]'], {'dtype': 'torch.long'}), '(transitions.a[:-(self.n_step - 1) or None], dtype=torch.long)\n', (1274, 1336), False, 'import torch\n'), ((1373, 1437), 'torch.tensor', 'torch.tensor', (['transitions.s[self.n_step - 1:]'], {'dtype': 'torch.float'}), '(transitions.s[self.n_step - 1:], dtype=torch.float)\n', (1385, 1437), False, 'import torch\n'), ((2291, 2339), 'torch.tensor', 'torch.tensor', (['n_step_sum_of_r'], {'dtype': 'torch.float'}), '(n_step_sum_of_r, dtype=torch.float)\n', (2303, 2339), False, 'import torch\n'), ((2385, 2435), 'torch.tensor', 'torch.tensor', (['done_within_n_step'], {'dtype': 'torch.long'}), '(done_within_n_step, dtype=torch.long)\n', (2397, 2435), False, 'import torch\n'), ((3833, 3879), 'torch.tensor', 'torch.tensor', (['transitions.s'], {'dtype': 'torch.float'}), '(transitions.s, dtype=torch.float)\n', (3845, 3879), False, 'import torch\n'), ((3913, 3958), 'torch.tensor', 'torch.tensor', (['transitions.a'], {'dtype': 'torch.long'}), '(transitions.a, dtype=torch.long)\n', (3925, 3958), False, 'import torch\n'), ((3993, 4039), 'torch.tensor', 'torch.tensor', (['transitions.R'], {'dtype': 'torch.float'}), '(transitions.R, dtype=torch.float)\n', (4005, 4039), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""w11_LogReg_Hierarchy.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GQGlQibsvHyTPEczCmzK-G7k8kQBHzAO
## Постановка задачи
Загрузим данные, приведем их к числовым, заполним пропуски, нормализуем данные и оптимизируем память.
Разделим выборку на обучающую/проверочную в соотношении 80/20.
Построим 4 модели логистической регрессии: для 8, 6 и остальных классов, для 2, 5 и остальных, для 1, 7 и остальных, и для 4 и 3 - по убыванию частоты значения. Будем использовать перекрестную проверку при принятии решения об оптимальном наборе столбцов.
Проведем предсказание и проверим качество через каппа-метрику.
Данные:
* https://video.ittensive.com/machine-learning/prudential/train.csv.gz
Соревнование: https://www.kaggle.com/c/prudential-life-insurance-assessment/
© ITtensive, 2020
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, confusion_matrix, make_scorer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn import preprocessing
data = pd.read_csv("https://video.ittensive.com/machine-learning/prudential/train.csv.gz")
data = data.iloc[:1000]
print (data.info())
data["Product_Info_2_1"] = data["Product_Info_2"].str.slice(0, 1)
data["Product_Info_2_2"] = pd.to_numeric(data["Product_Info_2"].str.slice(1, 2))
data.drop(labels=["Product_Info_2"], axis=1, inplace=True)
for l in data["Product_Info_2_1"].unique():
data["Product_Info_2_1" + l] = data["Product_Info_2_1"].isin([l]).astype("int8")
data.drop(labels=["Product_Info_2_1"], axis=1, inplace=True)
data.fillna(value=-1, inplace=True)
def reduce_mem_usage (df):
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if str(col_type)[:5] == "float":
c_min = df[col].min()
c_max = df[col].max()
if c_min > np.finfo("f2").min and c_max < np.finfo("f2").max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo("f4").min and c_max < np.finfo("f4").max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
elif str(col_type)[:3] == "int":
c_min = df[col].min()
c_max = df[col].max()
if c_min > np.iinfo("i1").min and c_max < np.iinfo("i1").max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo("i2").min and c_max < np.iinfo("i2").max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo("i4").min and c_max < np.iinfo("i4").max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo("i8").min and c_max < np.iinfo("i8").max:
df[col] = df[col].astype(np.int64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024**2
print('Потребление памяти меньше на', round(start_mem - end_mem, 2), 'Мб (минус', round(100 * (start_mem - end_mem) / start_mem, 1), '%)')
return df
data = reduce_mem_usage(data)
print (data.info())
columns_groups = ["Insurance_History", "InsurеdInfo", "Medical_Keyword",
"Family_Hist", "Medical_History", "Product_Info"]
columns = ["Wt", "Ht", "Ins_Age", "BMI"]
for cg in columns_groups:
columns.extend(data.columns[data.columns.str.startswith(cg)])
print (columns)
scaler = preprocessing.StandardScaler()
data_transformed = pd.DataFrame(scaler.fit_transform(data[columns]))
columns_transofrmed = data_transformed.columns
data_transformed['Response'] = data['Response']
data_train,data_test = train_test_split(data_transformed,test_size=0.2)
def regression_model(columns,df):
y = df['Response']
x = df.drop('Response',axis=1)
model = LogisticRegression(max_iter=1000)
model.fit(x,y)
return model
def logistic_regression(columns,df_train):
model = regression_model(columns,df_train)
logr_grid = GridSearchCV(model,{},cv=5,n_jobs=2,
scoring=make_scorer(cohen_kappa_score))
x = df_train.drop('Response',axis=1)
y = df_train['Response']
logr_grid.fit(x,y)
return logr_grid.best_score_
"""### Оптимальный набор столбцов
Для каждого уровня иерархии это будет свой набор столбцов в исходных данных.
### Перекрестная проверка
Разбиваем обучающую выборку еще на k (часто 5) частей, на каждой части данных обучаем модель. Затем проверяем 1-ю, 2-ю, 3-ю, 4-ю части на 5; 1-ю, 2-ю, 3-ю, 5-ю части на 4 и т.д.
В итоге обучение пройдет весь набор данных, и каждая часть набора будет проверена на всех оставшихся (перекрестным образом).
"""
def find_opt_col(data_train):
kappa_score_opt = 0
columns_opt = []
for col in columns_transofrmed:
kappa_score = logistic_regression([col],data_train)
if kappa_score > kappa_score_opt:
columns_opt = [col]
kappa_score = kappa_score_opt
for col in columns_transofrmed:
if col not in columns_opt:
columns_opt.append(col)
kappa_score = logistic_regression(columns_opt,data_train)
if kappa_score < kappa_score_opt:
columns_opt.pop()
else:
kappa_score_opt = kappa_score
print(col)
return columns_opt,kappa_score_opt
responses = [[6,8],[2,5],[1,7],[3,4]]
logr_models = [{}] * len(responses)
data_train_current = data_train.copy()
i = 0
for response in responses:
m_train = data_train_current.copy()
if response != [3,4]:
m_train['Response'] = m_train['Response'].apply(lambda x:0 if x not in response else x)
columns_opt,kappa_score_opt = find_opt_col(m_train)
print(i,kappa_score_opt,columns_opt)
logr_models[i] = {
"model":regression_model(columns_opt,m_train),
"columns":columns_opt
}
if response != [3,4]:
data_train_current = data_train_current[~data_train_current['Response'].isin(response)]
i += 1
def logr_hierarchy(x):
for response in range(0,len(responses)):
if x['target' + str(response)] > 0:
x['target'] = x['target' + str(response)]
break;
return x
for response in range(0,len(responses)):
model = logr_models[response]['model']
columns_opt = logr_models[response]['columns']
x = pd.DataFrame(data_test,columns=columns_opt)
data_test['target' + str(response)] = model.predict(x)
data_test = data_test.apply(logr_hierarchy,axis=1,result_type='expand')
data_test.head()
cohen_kappa_score(data_test['target'],data_test['Response'],weights='quadratic')
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.make_scorer",
"sklearn.metrics.cohen_kappa_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"numpy.iinfo",
"numpy.finfo",
"pandas.DataFrame"
] | [((1222, 1310), 'pandas.read_csv', 'pd.read_csv', (['"""https://video.ittensive.com/machine-learning/prudential/train.csv.gz"""'], {}), "(\n 'https://video.ittensive.com/machine-learning/prudential/train.csv.gz')\n", (1233, 1310), True, 'import pandas as pd\n'), ((3596, 3626), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (3624, 3626), False, 'from sklearn import preprocessing\n'), ((3817, 3866), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_transformed'], {'test_size': '(0.2)'}), '(data_transformed, test_size=0.2)\n', (3833, 3866), False, 'from sklearn.model_selection import train_test_split\n'), ((6656, 6743), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (["data_test['target']", "data_test['Response']"], {'weights': '"""quadratic"""'}), "(data_test['target'], data_test['Response'], weights=\n 'quadratic')\n", (6673, 6743), False, 'from sklearn.metrics import cohen_kappa_score, confusion_matrix, make_scorer\n'), ((3973, 4006), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(1000)'}), '(max_iter=1000)\n', (3991, 4006), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6463, 6507), 'pandas.DataFrame', 'pd.DataFrame', (['data_test'], {'columns': 'columns_opt'}), '(data_test, columns=columns_opt)\n', (6475, 6507), True, 'import pandas as pd\n'), ((4223, 4253), 'sklearn.metrics.make_scorer', 'make_scorer', (['cohen_kappa_score'], {}), '(cohen_kappa_score)\n', (4234, 4253), False, 'from sklearn.metrics import cohen_kappa_score, confusion_matrix, make_scorer\n'), ((2058, 2072), 'numpy.finfo', 'np.finfo', (['"""f2"""'], {}), "('f2')\n", (2066, 2072), True, 'import numpy as np\n'), ((2089, 2103), 'numpy.finfo', 'np.finfo', (['"""f2"""'], {}), "('f2')\n", (2097, 2103), True, 'import numpy as np\n'), ((2187, 2201), 'numpy.finfo', 'np.finfo', (['"""f4"""'], {}), "('f4')\n", (2195, 2201), True, 'import numpy as np\n'), ((2218, 2232), 'numpy.finfo', 'np.finfo', (['"""f4"""'], {}), "('f4')\n", (2226, 2232), True, 'import numpy as np\n'), ((2494, 2508), 'numpy.iinfo', 'np.iinfo', (['"""i1"""'], {}), "('i1')\n", (2502, 2508), True, 'import numpy as np\n'), ((2525, 2539), 'numpy.iinfo', 'np.iinfo', (['"""i1"""'], {}), "('i1')\n", (2533, 2539), True, 'import numpy as np\n'), ((2620, 2634), 'numpy.iinfo', 'np.iinfo', (['"""i2"""'], {}), "('i2')\n", (2628, 2634), True, 'import numpy as np\n'), ((2651, 2665), 'numpy.iinfo', 'np.iinfo', (['"""i2"""'], {}), "('i2')\n", (2659, 2665), True, 'import numpy as np\n'), ((2747, 2761), 'numpy.iinfo', 'np.iinfo', (['"""i4"""'], {}), "('i4')\n", (2755, 2761), True, 'import numpy as np\n'), ((2778, 2792), 'numpy.iinfo', 'np.iinfo', (['"""i4"""'], {}), "('i4')\n", (2786, 2792), True, 'import numpy as np\n'), ((2874, 2888), 'numpy.iinfo', 'np.iinfo', (['"""i8"""'], {}), "('i8')\n", (2882, 2888), True, 'import numpy as np\n'), ((2905, 2919), 'numpy.iinfo', 'np.iinfo', (['"""i8"""'], {}), "('i8')\n", (2913, 2919), True, 'import numpy as np\n')] |
import numpy as np
from scipy import interpolate
from .gps_jax import sample_gp, rbf_kernel
class NHGPS():
"""
This class is used for data generation from the NH-GPS model.
"""
def __init__(self, intensity_bound, time_bound, hypers, num_trials=1):
"""
This method initializes the model object.
:param intensity_bound: intensity bound of the model
:param time_bound: the time bound of the model
:param hypers: [effects_gp_output_variance, effects_gp_length_scale, memory_decay_factor, background_gp_output_variance, background_gp_length_scale]
:param num_types: number of types of events n
:param num_trials: number of trials
"""
self.num_trials = num_trials
self.time_bound = time_bound
self.intensity_bound = intensity_bound # array of length num_types
self.hypers = hypers
self.temporal_gp_sample = []
self.temporal_gp_points = []
self.temporal_gp_interpolated_points = []
self.temporal_gp_interpolated_values = []
self.temporal_gp_interpolated_background_points = []
self.temporal_gp_interpolated_background_values = []
self.temporal_gp_background_sample = []
self.temporal_gp_background_points = []
self.intensities = []
self.phis = []
self.self_effects = []
def reset_intensities(self):
"""
This method resets the intensities, phis and self effects
"""
self.intensities = []
self.phis = []
self.self_effects = []
def generate_temporal_gp(self, grid_points):
"""
This method generates a 1d gp sample given 1d data vector, and stores it as a class attribute.
As the training data for the gp, the method takes all the differences between the data points.
:param grid_points: vector of shape n x 1.
:return:
"""
kernel = rbf_kernel(grid_points, grid_points, self.hypers[0], self.hypers[1])
self.kk = kernel
gp_sample = sample_gp(kernel)
self.temporal_gp_sample = gp_sample
self.temporal_gp_points = grid_points
kernel_background = rbf_kernel(grid_points, grid_points, self.hypers[3], self.hypers[4])
gp_sample = sample_gp(kernel_background)
self.temporal_gp_background_sample = gp_sample
self.temporal_gp_background_points = grid_points
return
def calculate_self_effects(self, candidate_point, history):
"""
This method calculates the self effects given the history at candidate_point
:param candidate_point: candidate event
:param history: accepted events until candidate_point
:param type_index: type of the event
:return: the value of the self effects at candidate_point
"""
memory_decay = self.hypers[2]
self_effects = 0.
assert self.temporal_gp_points is not None or self.temporal_gp_sample is not None, "Please generate a temporal gp"
history = np.array(history)
if len(history) > 0:
relevant_history = history[np.where(candidate_point > history)]
for history_point in relevant_history:
time_difference = candidate_point - history_point
self_effects += self.temporal_gp_function(time_difference) * np.exp(- memory_decay * time_difference)
return self_effects
else:
return 0
def get_background_intensity(self, candidate_point):
return self.temporal_gp_function(candidate_point, for_background=True)
def temporal_gp_function(self, value, for_background=False):
"""
This method evaluates the gp at a certain point (from the gp's training data).
:param value: point where to evaluate the gp.
:param for_background: if true the gp function of the background rate is evaluated
:return: gp's value.
"""
# use the correct GP - the background GP or the self effects GP
temporal_gp_interpolated_points = self.temporal_gp_interpolated_background_points if for_background else \
self.temporal_gp_interpolated_points
temporal_gp_interpolated_values = self.temporal_gp_interpolated_background_values if for_background else \
self.temporal_gp_interpolated_values
temporal_gp_sample = self.temporal_gp_background_sample if for_background else self.temporal_gp_sample
temporal_gp_points = self.temporal_gp_background_points if for_background else self.temporal_gp_points
# Every time we evaluate the gp in a point we check if we already evaluated the gp there.
# If yes, we look for it in the map, if not we evaluate the gp and store
# the point and the gp value.
temporal_gp_interpolated_points_arr = np.array(temporal_gp_interpolated_points)
ind = np.argwhere(temporal_gp_interpolated_points_arr == value)
if len(ind):
return temporal_gp_interpolated_values[ind[0][0]]
else:
temporal_gp_interpolated_points.append(value)
temporal_gp_value = get_gp_value_for_sample(value, temporal_gp_sample,
temporal_gp_points)
temporal_gp_interpolated_values.append(temporal_gp_value)
return temporal_gp_value
def sort_candidates(self, candidates):
return np.sort(candidates.flatten())
def thinning(self, candidates, use_history=True):
"""
This method implements the thinning algorithm for Hawkes process and generates data from the model-
:param candidates: Candidate points matrix num_trials x num_types x n
:return: accepted data points matrix num_trials x num_types x m
"""
history = []
for k, candidate_trial in enumerate(candidates):
print(k)
history.append([])
self.phis.append([])
self.self_effects.append([])
self.intensities.append([])
for i, candidate in enumerate(candidate_trial):
intensity = self.evaluate_intensity(candidate, history[-1], use_history)
r = np.random.uniform()
if r < intensity / self.intensity_bound:
history[-1].append(candidate)
self.intensities[-1].append(intensity)
else:
self.phis[-1] = self.phis[-1][:-1]
self.self_effects[-1] = self.self_effects[-1][:-1]
history[-1] = np.array(history[-1])
return history
def evaluate_intensity(self, candidate_point, history, use_history=True):
"""
This method evaluates the Hawkes process intensity at a certain point give the history.
:param candiate_point: the data point in which the intensity should be evaluated (x,y,t)
:param history: the history of the process [(x_i,y_i,t_i)] t_{i-1} < t_i
:param self_effects: Boolean. If False than the intensity is of a Poisson process.
:return: the intensity
"""
background_rate = self.get_background_intensity(candidate_point)
self_effects = 0.
if use_history:
self_effects = self.calculate_self_effects(candidate_point, history)
linear_intensity = background_rate + self_effects
self.phis[-1].append(linear_intensity)
self.self_effects[-1].append(self_effects)
intensity = self.intensity_bound / (1. + np.exp(- linear_intensity))
return intensity
def generate_candidates(self, number_of_candidates, num_trials):
"""
This method generates candidates for the thinning the process from a uniform distribution.
:param number_of_candidates: number of candidates to be generated.
:return: a matrix n x d of data points.
"""
candidates = [np.random.uniform(0, self.time_bound, number_of_candidates[i]) for i in
range(num_trials)]
sorted_candidates = [self.sort_candidates(candidates[i]) for i in range(num_trials)]
return sorted_candidates
def get_gp_value_for_sample(sample, gp, gp_points):
"""
This function evaluautes the value of the gp at sample using interpolation.
:param sample: point where to evaluate the gp
:param gp: gp values
:param gp_points: points where the gp is already evaluated.
:return: value of the gp at sample.
"""
try:
gp_np = gp.data.numpy()
gp_points_np = gp_points.data.numpy()
except AttributeError:
# import jax.numpy as np
gp_np = gp
gp_points_np = gp_points
if np.array(sample).shape:
ind = np.argwhere(np.equal(gp_points_np, sample).sum(axis=1) == gp_points_np.shape[1])
# if the point where we want to estimate the gp is in the gp input- return the gp value at this point
# else, perform spline interpolation.
if len(ind):
return gp_np[ind[0]]
else:
tck = interpolate.bisplrep(gp_points_np[:, 0], gp_points_np[:, 1], gp_np)
interpolated_value = interpolate.bisplev(sample[0], sample[1], tck)
return interpolated_value
else:
ind = np.argwhere(gp_points_np == sample)
# if the point where we want to estimate the gp is in the gp input- return the gp value at this point
# else, perform spline interpolation.
if len(ind):
return gp_np[ind[0]]
else:
s = interpolate.InterpolatedUnivariateSpline(gp_points_np, gp_np)
interpolated_value = s(sample)
return interpolated_value
| [
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.where",
"scipy.interpolate.bisplev",
"numpy.equal",
"numpy.exp",
"numpy.array",
"numpy.argwhere",
"scipy.interpolate.bisplrep",
"numpy.random.uniform"
] | [((3045, 3062), 'numpy.array', 'np.array', (['history'], {}), '(history)\n', (3053, 3062), True, 'import numpy as np\n'), ((4847, 4888), 'numpy.array', 'np.array', (['temporal_gp_interpolated_points'], {}), '(temporal_gp_interpolated_points)\n', (4855, 4888), True, 'import numpy as np\n'), ((4903, 4960), 'numpy.argwhere', 'np.argwhere', (['(temporal_gp_interpolated_points_arr == value)'], {}), '(temporal_gp_interpolated_points_arr == value)\n', (4914, 4960), True, 'import numpy as np\n'), ((8707, 8723), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (8715, 8723), True, 'import numpy as np\n'), ((9278, 9313), 'numpy.argwhere', 'np.argwhere', (['(gp_points_np == sample)'], {}), '(gp_points_np == sample)\n', (9289, 9313), True, 'import numpy as np\n'), ((6583, 6604), 'numpy.array', 'np.array', (['history[-1]'], {}), '(history[-1])\n', (6591, 6604), True, 'import numpy as np\n'), ((7931, 7993), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.time_bound', 'number_of_candidates[i]'], {}), '(0, self.time_bound, number_of_candidates[i])\n', (7948, 7993), True, 'import numpy as np\n'), ((9068, 9135), 'scipy.interpolate.bisplrep', 'interpolate.bisplrep', (['gp_points_np[:, 0]', 'gp_points_np[:, 1]', 'gp_np'], {}), '(gp_points_np[:, 0], gp_points_np[:, 1], gp_np)\n', (9088, 9135), False, 'from scipy import interpolate\n'), ((9169, 9215), 'scipy.interpolate.bisplev', 'interpolate.bisplev', (['sample[0]', 'sample[1]', 'tck'], {}), '(sample[0], sample[1], tck)\n', (9188, 9215), False, 'from scipy import interpolate\n'), ((9554, 9615), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interpolate.InterpolatedUnivariateSpline', (['gp_points_np', 'gp_np'], {}), '(gp_points_np, gp_np)\n', (9594, 9615), False, 'from scipy import interpolate\n'), ((3131, 3166), 'numpy.where', 'np.where', (['(candidate_point > history)'], {}), '(candidate_point > history)\n', (3139, 3166), True, 'import numpy as np\n'), ((6223, 6242), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6240, 6242), True, 'import numpy as np\n'), ((7540, 7565), 'numpy.exp', 'np.exp', (['(-linear_intensity)'], {}), '(-linear_intensity)\n', (7546, 7565), True, 'import numpy as np\n'), ((3362, 3401), 'numpy.exp', 'np.exp', (['(-memory_decay * time_difference)'], {}), '(-memory_decay * time_difference)\n', (3368, 3401), True, 'import numpy as np\n'), ((8757, 8787), 'numpy.equal', 'np.equal', (['gp_points_np', 'sample'], {}), '(gp_points_np, sample)\n', (8765, 8787), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Structures data in ML-friendly ways."""
import re
import copy
import datetime as dt
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV
from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS
from avaml.aggregatedata.time_parameters import to_time_parameters
from varsomdata import getforecastapi as gf
from varsomdata import getmisc as gm
__author__ = 'arwi'
LABEL_PROBLEM_PRIMARY = {
"ext_attr": [
"avalanche_problem_type_id",
"avalanche_problem_type_name",
"avalanche_type_id",
"avalanche_type_name",
"avalanche_ext_id",
"avalanche_ext_name"
],
"values": {
_NONE: [0, "", 0, "", 0, ""],
"new-loose": [3, "Nysnø (løssnøskred)", 20, "Løssnøskred", 10, "Tørre løssnøskred"],
"wet-loose": [5, "Våt snø (løssnøskred)", 20, "Løssnøskred", 15, "Våte løssnøskred"],
"new-slab": [7, "Nysnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"drift-slab": [10, "Fokksnø (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"pwl-slab": [30, "Vedvarende svakt lag (flakskred)", 10, "Flakskred", 20, "Tørre flakskred"],
"wet-slab": [45, "Våt snø (flakskred)", 10, "Flakskred", 25, "Våte flakskred"],
"glide": [50, "Glideskred", 10, "Flakskred", 25, "Våte flakskred"]
}
}
LABEL_PROBLEM = {
"cause": {
"ext_attr": ["aval_cause_id", "aval_cause_name"],
"values": {
"0": [0, ""],
"new-snow": [10, "Nedføyket svakt lag med nysnø"],
"hoar": [11, "Nedsnødd eller nedføyket overflaterim"],
"facet": [13, "Nedsnødd eller nedføyket kantkornet snø"],
"crust": [14, "Dårlig binding mellom glatt skare og overliggende snø"],
"snowdrift": [15, "Dårlig binding mellom lag i fokksnøen"],
"ground-facet": [16, "Kantkornet snø ved bakken"],
"crust-above-facet": [18, "Kantkornet snø over skarelag"],
"crust-below-facet": [19, "Kantkornet snø under skarelag"],
"ground-water": [20, "Vann ved bakken/smelting fra bakken"],
"water-layers": [22, "Opphopning av vann i/over lag i snødekket"],
"loose": [24, "Ubunden snø"]
}
},
"dsize": {
"ext_attr": ["destructive_size_ext_id", "destructive_size_ext_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "1 - Små"],
'2': [2, "2 - Middels"],
'3': [3, "3 - Store"],
'4': [4, "4 - Svært store"],
'5': [5, "5 - Ekstremt store"]
}
},
"prob": {
"ext_attr": ["aval_probability_id", "aval_probability_name"],
"values": {
'0': [0, "Ikke gitt"],
'2': [2, "Lite sannsynlig"],
'3': [3, "Mulig"],
'5': [5, "Sannsynlig"],
}
},
"trig": {
"ext_attr": ["aval_trigger_simple_id", "aval_trigger_simple_name"],
"values": {
'0': [0, "Ikke gitt"],
'10': [10, "Stor tilleggsbelastning"],
'21': [21, "Liten tilleggsbelastning"],
'22': [22, "Naturlig utløst"]
}
},
"dist": {
"ext_attr": ["aval_distribution_id", "aval_distribution_name"],
"values": {
'0': [0, "Ikke gitt"],
'1': [1, "Få bratte heng"],
'2': [2, "Noen bratte heng"],
'3': [3, "Mange bratte heng"],
'4': [4, "De fleste bratte heng"]
}
},
"lev_fill": {
"ext_attr": ["exposed_height_fill"],
"values": {
'0': [0],
'1': [1],
'2': [2],
'3': [3],
'4': [4],
}
}
}
LABEL_PROBLEM_MULTI = {
"aspect": {
"ext_attr": "valid_expositions",
}
}
LABEL_PROBLEM_REAL = {
"lev_max": {
"ext_attr": "exposed_height_1",
},
"lev_min": {
"ext_attr": "exposed_height_2",
}
}
LABEL_GLOBAL = {
"danger_level": {
"ext_attr": ["danger_level", "danger_level_name"],
"values": {
'1': [1, "1 liten"],
'2': [2, "2 Moderat"],
'3': [3, "3 Betydelig"],
'4': [4, "4 Stor"],
'5': [5, "5 Meget stor"]
}
},
"emergency_warning": {
"ext_attr": ["emergency_warning"],
"values": {
"Ikke gitt": ["Ikke gitt"],
"Naturlig utløste skred": ["Naturlig utløste skred"],
}
}
}
COMPETENCE = [0, 110, 115, 120, 130, 150]
class ForecastDataset:
def __init__(self, regobs_types, seasons=('2017-18', '2018-19', '2019-20'), max_file_age=23):
"""
Object contains aggregated data used to generate labeled datasets.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param seasons: Tuple/list of string representations of avalanche seasons to fetch.
"""
self.seasons = sorted(list(set(seasons)))
self.date = None
self.regobs_types = regobs_types
self.weather = {}
self.regobs = {}
self.varsom = {}
self.labels = {}
self.use_label = True
for season in seasons:
varsom, labels = _get_varsom_obs(year=season, max_file_age=max_file_age)
self.varsom = merge(self.varsom, varsom)
self.labels = merge(self.labels, labels)
regobs = _get_regobs_obs(season, regobs_types, max_file_age=max_file_age)
self.regobs = merge(self.regobs, regobs)
weather = _get_weather_obs(season, max_file_age=max_file_age)
self.weather = merge(self.weather, weather)
@staticmethod
def date(regobs_types, date: dt.date, days, use_label=True):
"""
Create a dataset containing just a given day's data.
:param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
:param date: Date to fetch and create dataset for.
:param days: How many days to fetch before date. This will be max for .label()'s days parameter.
"""
self = ForecastDataset(regobs_types, [])
self.date = date
self.use_label = use_label
self.regobs = _get_regobs_obs(None, regobs_types, date=date, days=days)
self.varsom, labels = _get_varsom_obs(None, date=date, days=days-1 if days > 0 else 1)
self.weather = _get_weather_obs(None, date=date, days=days-2 if days > 2 else 1)
self.labels = {}
for label_keys, label in labels.items():
if label_keys not in self.labels:
self.labels[label_keys] = {}
for (label_date, label_region), label_data in label.items():
if label_date == date.isoformat():
subkey = (label_date, label_region)
self.labels[label_keys][subkey] = label_data
return self
def label(self, days, with_varsom=True):
"""Creates a LabeledData containing relevant label and features formatted either in a flat structure or as
a time series.
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
:return: LabeledData
"""
table = {}
row_weight = {}
df = None
df_weight = None
df_label = pd.DataFrame(self.labels, dtype="U")
days_w = {0: 1, 1: 1, 2: 1}.get(days, days - 1)
days_v = {0: 1, 1: 2, 2: 2}.get(days, days)
days_r = days + 1
varsom_index = pd.DataFrame(self.varsom).index
weather_index = pd.DataFrame(self.weather).index
if len(df_label.index) == 0 and self.use_label:
raise NoBulletinWithinRangeError()
if self.date and not self.use_label:
season = gm.get_season_from_date(self.date)
regions = gm.get_forecast_regions(year=season, get_b_regions=True)
date_region = [(self.date.isoformat(), region) for region in regions]
else:
date_region = df_label.index
for monotonic_idx, entry_idx in enumerate(date_region):
date, region_id = dt.date.fromisoformat(entry_idx[0]), entry_idx[1]
def prev_key(day_dist):
return (date - dt.timedelta(days=day_dist)).isoformat(), region_id
# Just check that we can use this entry.
try:
if with_varsom:
for n in range(1, days_v):
if prev_key(n) not in varsom_index:
raise KeyError()
for n in range(0, days_w):
if prev_key(n) not in weather_index:
raise KeyError()
add_row = True
# We don't check for RegObs as it is more of the good to have type of data
except KeyError:
add_row = False
if add_row:
row = {}
for region in REGIONS:
row[(f"region_id_{region}", "0")] = float(region == region_id)
if with_varsom:
for column in self.varsom.keys():
for n in range(1, days_v):
# We try/except an extra time since single dates may run without a forecast.
row[(column, str(n))] = self.varsom[column][prev_key(n)]
for column in self.weather.keys():
for n in range(0, days_w):
try:
row[(column, str(n))] = self.weather[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
for column in self.regobs.keys():
for n in range(2, days_r):
try:
row[(column, str(n))] = self.regobs[column][prev_key(n)]
except KeyError:
row[(column, str(n))] = 0
try:
weight_sum = self.regobs['accuracy'][prev_key(0)]
if weight_sum < 0:
row_weight[entry_idx] = 1 / 2
elif weight_sum == 0:
row_weight[entry_idx] = 1
elif weight_sum > 0:
row_weight[entry_idx] = 2
except KeyError:
row_weight[entry_idx] = 1
# Some restructuring to make DataFrame parse the dict correctly
for key in row.keys():
if key not in table:
table[key] = {}
table[key][entry_idx] = row[key]
# Build DataFrame iteratively to preserve system memory (floats in dicts are apparently expensive).
if (monotonic_idx > 0 and monotonic_idx % 1000 == 0) or monotonic_idx == len(date_region) - 1:
df_new = pd.DataFrame(table, dtype=np.float32).fillna(0)
df_weight_new = pd.Series(row_weight)
df = df_new if df is None else pd.concat([df, df_new])
df_weight = df_weight_new if df is None else pd.concat([df_weight, df_weight_new])
table = {}
row_weight = {}
if df is None or len(df.index) == 0:
raise NoDataFoundError()
if self.use_label:
df_label = df_label.loc[df.index]
df_label.sort_index(axis=0, inplace=True)
df_label.sort_index(axis=1, inplace=True)
df.sort_index(axis=0, inplace=True)
df_weight.sort_index(axis=0, inplace=True)
else:
df_label = None
return LabeledData(df, df_label, df_weight, days, self.regobs_types, with_varsom, self.seasons)
class LabeledData:
is_normalized = False
with_regions = True
elevation_class = (False, False)
scaler = StandardScaler()
def __init__(self, data, label, row_weight, days, regobs_types, with_varsom, seasons=False):
"""Holds labels and features.
:param data: A DataFrame containing the features of the dataset.
:param label: DataFrame of labels.
:param row_weight: Series containing row weights
:param days: How far back in time values should data be included.
If 0, only weather data for the forecast day is evaluated.
If 1, day 0 is used for weather, 1 for Varsom.
If 2, day 0 is used for weather, 1 for Varsom, 2 for RegObs.
If 3, days 0-1 is used for weather, 1-2 for Varsom, 2-3 for RegObs.
If 5, days 0-3 is used for weather, 1-4 for Varsom, 2-5 for RegObs.
The reason for this is to make sure that each kind of data contain
the same number of data points, if we want to use some time series
frameworks that are picky about such things.
:param regobs_types: A tuple/list of strings of types of observations to fetch from RegObs.,
e.g., `("Faretegn")`.
:param with_varsom: Whether to include previous avalanche bulletins into the indata.
"""
self.data = data
self.row_weight = row_weight
if label is not None:
self.label = label
self.label = self.label.replace(_NONE, 0)
self.label = self.label.replace(np.nan, 0)
try: self.label['CLASS', _NONE] = self.label['CLASS', _NONE].replace(0, _NONE).values
except KeyError: pass
try: self.label['MULTI'] = self.label['MULTI'].replace(0, "0").values
except KeyError: pass
try: self.label['REAL'] = self.label['REAL'].astype(np.float)
except KeyError: pass
self.pred = label.copy()
for col in self.pred.columns:
self.pred[col].values[:] = 0
try: self.pred['CLASS', _NONE] = _NONE
except KeyError: pass
try: self.pred['MULTI'] = "0"
except KeyError: pass
else:
self.label = None
self.pred = None
self.days = days
self.with_varsom = with_varsom
self.regobs_types = regobs_types
if self.data is not None:
self.scaler.fit(self.data.values)
self.single = not seasons
self.seasons = sorted(list(set(seasons if seasons else [])))
def normalize(self, by=None):
"""Normalize the data feature-wise using MinMax.
:return: Normalized copy of LabeledData
"""
by = by if by is not None else self
if not self.is_normalized:
ld = self.copy()
data = by.scaler.transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = by
return ld
elif self.is_normalized != by:
return self.denormalize().normalize(by=by)
else:
return self.copy()
def denormalize(self):
"""Denormalize the data feature-wise using MinMax.
:return: Denormalized copy of LabeledData
"""
if self.is_normalized:
ld = self.copy()
data = self.is_normalized.scaler.inverse_transform(self.data.values)
ld.data = pd.DataFrame(data=data, index=self.data.index, columns=self.data.columns)
ld.is_normalized = False
return ld
else:
return self.copy()
def drop_regions(self):
"""Remove regions from input data"""
if self.with_regions:
ld = self.copy()
region_columns = list(filter(lambda x: re.match(r'^region_id', x[0]), ld.data.columns))
ld.data.drop(region_columns, axis=1, inplace=True)
ld.with_regions = False
ld.scaler.fit(ld.data.values)
return ld
else:
return self.copy()
def stretch_temperatures(self):
"""Stretch out temperatures near zero"""
ld = self.copy()
if self.data is not None:
temp_cols = [bool(re.match(r"^temp_(max|min)$", title)) for title in ld.data.columns.get_level_values(0)]
ld.data.loc[:, temp_cols] = np.sign(ld.data.loc[:, temp_cols]) * np.sqrt(np.abs(ld.data.loc[:, temp_cols]))
ld.scaler.fit(ld.data.values)
return ld
def problem_graph(self):
label = pd.Series(self.label["CLASS", _NONE, "problem_1"], name="label")
pred1 = pd.Series(self.pred["CLASS", _NONE, "problem_1"], name="problem_1")
pred2 = pd.Series(self.pred["CLASS", _NONE, "problem_2"], name="problem_2")
groups = pd.concat([label, pred1, pred2], axis=1).groupby(["label", "problem_1"], dropna=False)
count = groups.count()["problem_2"].rename("count")
p2 = groups["problem_2"].apply(lambda x: pd.Series.mode(x)[0]).replace(0, np.nan)
return pd.concat([count, p2], axis=1)
def statham(self):
"""Make a danger level in the same manner as Statham et al., 2018."""
if self.pred is None:
raise NotPredictedError
label = self.label[("CLASS", _NONE, "danger_level")].apply(np.int)
pred = self.pred[("CLASS", _NONE, "danger_level")].apply(np.int)
ones = pd.Series(np.ones(pred.shape), index=pred.index)
cols = ["label", "diff", "n"]
df = pd.DataFrame(pd.concat([label, label - pred, ones], axis=1).values, columns=cols)
bias = df.groupby(cols[:-1]).count().unstack().droplevel(0, axis=1)
n = df.groupby(cols[0]).count()["n"]
share = bias.divide(n, axis=0)
return pd.concat([n, share], axis=1)
def adam(self):
if self.pred is None:
raise NotPredictedError
touch = pd.DataFrame({
1: {(2, 10): "A", (3, 10): "A", (3, 21): "B", (5, 21): "B", (3, 22): "B", (5, 22): "B"},
2: {(2, 10): "A", (3, 10): "B", (3, 21): "C", (5, 21): "D", (3, 22): "C", (5, 22): "D"},
3: {(2, 10): "B", (3, 10): "C", (3, 21): "D", (5, 21): "E", (3, 22): "D", (5, 22): "E"},
4: {(2, 10): "B", (3, 10): "C", (3, 21): "D", (5, 21): "E", (3, 22): "D", (5, 22): "E"}
})
danger = pd.DataFrame({
1: {"A": 1, "B": 1, "C": 1, "D": 2, "E": 3},
2: {"A": 1, "B": 2, "C": 2, "D": 3, "E": 4},
3: {"A": 2, "B": 2, "C": 3, "D": 3, "E": 4},
4: {"A": 2, "B": 3, "C": 4, "D": 4, "E": 5},
5: {"A": 2, "B": 3, "C": 4, "D": 4, "E": 5}
})
def get_danger(series):
p1 = series["CLASS", _NONE, "problem_1"]
p2 = series["CLASS", _NONE, "problem_2"]
p3 = series["CLASS", _NONE, "problem_2"]
dl = ("CLASS", _NONE, "danger_level")
ew = ("CLASS", _NONE, "emergency_warning")
if p1 == _NONE:
series[dl] = "1"
series[ew] = "Ikke gitt"
else:
p1 = series["CLASS", p1][["prob", "trig", "dist", "dsize"]].apply(np.int)
try:
dl1 = str(danger.loc[touch.loc[(p1["prob"], p1["trig"]), p1["dist"]], p1["dsize"]])
except KeyError:
dl1 = 0
if p2 != _NONE:
p2 = series["CLASS", p2][["prob", "trig", "dist", "dsize"]].apply(np.int)
try:
dl1 = str(danger.loc[touch.loc[(p1["prob"], p1["trig"]), p1["dist"]], p1["dsize"]])
except KeyError:
series[dl] = "2"
series[ew] = "Ikke gitt"
try:
if p1["trig"] == 22 and p1["dsize"] >= 3:
series[ew] = "Naturlig utløste skred"
except KeyError:
pass
return series
ld = self.copy()
ld.pred = ld.pred.apply(get_danger, axis=1)
return ld
def to_elev_class(self, exclude_label=False):
"""Convert all elevations to classes"""
if self.elevation_class == (True, exclude_label):
return self.copy()
elif self.elevation_class == (True, not exclude_label):
return self.from_elev_class().to_elev_class(exclude_label)
MAX_ELEV = 2500
def round_min(series):
region = int(series.name[1])
elev = float(series.values[0])
tl = REGION_ELEV[region][0]
return 0 if abs(elev - 0) <= abs(elev - tl) else 1
def round_max(series):
region = int(series.name[1])
elev = float(series.values[0])
tl = REGION_ELEV[region][1]
return 0 if abs(elev - MAX_ELEV) <= abs(elev - tl) else 1
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
df["CLASS", problem, "lev_min"] = df[[("REAL", problem, "lev_min")]].apply(round_min, axis=1).apply(str)
df["CLASS", problem, "lev_max"] = df[[("REAL", problem, "lev_max")]].apply(round_max, axis=1).apply(str)
df.drop([
("CLASS", problem, "lev_fill"),
("REAL", problem, "lev_min"),
("REAL", problem, "lev_max")
], axis=1, inplace=True)
df.sort_index(inplace=True, axis=1)
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
df[f"{prefix[0]}_min", prefix[1]] = df[[(f"{prefix[0]}_min", prefix[1])]].apply(round_min, axis=1)
df[f"{prefix[0]}_max", prefix[1]] = df[[(f"{prefix[0]}_max", prefix[1])]].apply(round_max, axis=1)
df.drop([
(f"{prefix[0]}_fill_1", prefix[1]),
(f"{prefix[0]}_fill_2", prefix[1]),
(f"{prefix[0]}_fill_3", prefix[1]),
(f"{prefix[0]}_fill_4", prefix[1]),
], axis=1, inplace=True)
range_ld = self.copy().denormalize()
range_ld = range_ld.to_elevation_fmt_4(exclude_label)
if self.label is not None and not exclude_label:
convert_label(range_ld.label)
if self.pred is not None:
convert_label(range_ld.pred)
if self.data is not None:
convert_data(range_ld.data)
range_ld.scaler.fit(range_ld.data)
range_ld.elevation_class = (True, exclude_label)
if self.is_normalized:
return range_ld.normalize()
else:
return range_ld
def from_elev_class(self):
"""Convert all elevation classes to elevations"""
if not self.elevation_class[0]:
return self.copy()
exclude_label = self.elevation_class[1]
MAX_ELEV = 2500
def find_min(series):
region = int(series.name[1])
is_middle = bool(float(series.values[0]))
tl = REGION_ELEV[region][0]
return tl if is_middle else 0
def find_max(series):
region = int(series.name[1])
is_middle = bool(float(series.values[0]))
tl = REGION_ELEV[region][1]
return tl if is_middle else MAX_ELEV
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
df["REAL", problem, "lev_min"] = df[[("CLASS", problem, "lev_min")]].apply(find_min, axis=1).apply(str)
df["REAL", problem, "lev_max"] = df[[("CLASS", problem, "lev_max")]].apply(find_max, axis=1).apply(str)
df["CLASS", problem, "lev_fill"] = "4"
df.drop([
("CLASS", problem, "lev_min"),
("CLASS", problem, "lev_max"),
], axis=1, inplace=True)
df.sort_index(inplace=True, axis=1)
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
df[f"{prefix[0]}_min", prefix[1]] = df[[(f"{prefix[0]}_min", prefix[1])]].apply(find_min, axis=1)
df[f"{prefix[0]}_max", prefix[1]] = df[[(f"{prefix[0]}_max", prefix[1])]].apply(find_max, axis=1)
df[f"{prefix[0]}_fill_1", prefix[1]] = 0
df[f"{prefix[0]}_fill_2", prefix[1]] = 0
df[f"{prefix[0]}_fill_3", prefix[1]] = 0
df[f"{prefix[0]}_fill_4", prefix[1]] = 1
df.sort_index(inplace=True, axis=1)
range_ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(range_ld.label)
if self.pred is not None:
convert_label(range_ld.pred)
if self.data is not None:
convert_data(range_ld.data)
range_ld.scaler.fit(range_ld.data)
range_ld.elevation_class = (False, False)
if self.is_normalized:
return range_ld.normalize()
else:
return range_ld
def to_elevation_fmt_1(self, exclude_label=False):
"""Convert all elevations to format 1"""
MAX_ELEV = 2500
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
fill = df["CLASS", problem, "lev_fill"].apply(str)
twos = fill == "2"
threes = fill == "3"
fours = fill == "4"
df.loc[np.logical_or(twos, threes), ("REAL", problem, "lev_max")] = 0
df.loc[np.logical_or(twos, threes), ("REAL", problem, "lev_min")] = 0
df.loc[np.logical_or(twos, threes), ("CLASS", problem, "lev_fill")] = "1"
df.loc[fours, ("REAL", problem, "lev_max")] = df.loc[fours, ("REAL", problem, "lev_min")]
df.loc[fours, ("REAL", problem, "lev_min")] = 0
df.loc[fours, ("CLASS", problem, "lev_fill")] = "1"
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
ones = df[(f"{prefix[0]}_fill_1", prefix[1])].apply(np.bool)
twos = df[(f"{prefix[0]}_fill_2", prefix[1])].apply(np.bool)
threes = df[(f"{prefix[0]}_fill_3", prefix[1])].apply(np.bool)
fours = df[(f"{prefix[0]}_fill_4", prefix[1])].apply(np.bool)
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_max", prefix[1])] = 0
df.loc[np.logical_or(twos, threes), (f"{prefix[0]}_fill_1", prefix[1])] = 1
df[(f"{prefix[0]}_fill_2", prefix[1])] = np.zeros(twos.shape)
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
df.loc[fours, (f"{prefix[0]}_max", prefix[1])] = df.loc[fours, (f"{prefix[0]}_min", prefix[1])]
df.loc[fours, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[threes == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(ld.label)
if self.pred is not None:
convert_label(ld.pred)
if self.data is not None:
convert_data(ld.data)
ld.scaler.fit(ld.data)
if self.is_normalized:
return ld.normalize()
else:
return ld
def to_elevation_fmt_4(self, exclude_label=False):
"""Convert all elevations to ranges"""
MAX_ELEV = 2500
def convert_label(df):
problems = df.columns.get_level_values(1).unique().to_series().replace(_NONE, np.nan).dropna()
for problem in problems:
fill = df["CLASS", problem, "lev_fill"].apply(str)
ones = fill == "1"
twos = fill == "2"
threes = fill == "3"
df.loc[ones, ("REAL", problem, "lev_min")] = df.loc[ones, ("REAL", problem, "lev_max")]
df.loc[ones, ("REAL", problem, "lev_max")] = MAX_ELEV
df.loc[ones, ("CLASS", problem, "lev_fill")] = "4"
df.loc[twos, ("REAL", problem, "lev_min")] = 0
df.loc[twos, ("CLASS", problem, "lev_fill")] = "4"
df.loc[threes, ("REAL", problem, "lev_min")] = 0
df.loc[threes, ("REAL", problem, "lev_max")] = MAX_ELEV
df.loc[threes, ("CLASS", problem, "lev_fill")] = "4"
def convert_data(df):
prefixes = set(map(lambda y: (y[0][:-7], y[1]), filter(lambda x: re.search(r"lev_fill", x[0]), df.columns)))
for prefix in prefixes:
ones = df[(f"{prefix[0]}_fill_1", prefix[1])].apply(np.bool)
twos = df[(f"{prefix[0]}_fill_2", prefix[1])].apply(np.bool)
threes = df[(f"{prefix[0]}_fill_3", prefix[1])].apply(np.bool)
fours = df[(f"{prefix[0]}_fill_4", prefix[1])].apply(np.bool)
df.loc[ones, (f"{prefix[0]}_min", prefix[1])] = df.loc[ones, (f"{prefix[0]}_max", prefix[1])]
df.loc[ones, (f"{prefix[0]}_max", prefix[1])] = MAX_ELEV
df.loc[ones == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_1", prefix[1])] = np.zeros(ones.shape)
df.loc[twos, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[twos == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_2", prefix[1])] = np.zeros(twos.shape)
df.loc[threes, (f"{prefix[0]}_min", prefix[1])] = 0
df.loc[threes, (f"{prefix[0]}_max", prefix[1])] = MAX_ELEV
df.loc[threes == True, (f"{prefix[0]}_fill_4", prefix[1])] = 1
df[(f"{prefix[0]}_fill_3", prefix[1])] = np.zeros(threes.shape)
ld = self.copy().denormalize()
if self.label is not None and not exclude_label:
convert_label(ld.label)
if self.pred is not None:
convert_label(ld.pred)
if self.data is not None:
convert_data(ld.data)
ld.scaler.fit(ld.data)
if self.is_normalized:
return ld.normalize()
else:
return ld
def valid_pred(self):
"""Makes the bulletins internally coherent. E.g., removes problem 3 if problem 2 is blank."""
if self.pred is None:
raise NotPredictedError
ld = self.copy()
if self.elevation_class:
ld = ld.from_elev_class()
# Handle Problem 1-3
prob_cols = []
for n in range(1, 4):
if f"problem_{n}" in list(ld.pred["CLASS", _NONE].columns):
prob_cols.append(("CLASS", _NONE, f"problem_{n}"))
prev_eq = np.zeros((ld.pred.shape[0], len(prob_cols)), dtype=bool)
for n, col in enumerate(prob_cols):
for mcol in prob_cols[0:n]:
# If equal to problem_n-1/2, set to _NONE.
prev_eq[:, n] = np.logical_or(
prev_eq[:, n],
np.equal(ld.pred[mcol], ld.pred[col])
)
# Set to None if problem_n-1/2 was _NONE.
prev_eq[:, n] = np.logical_or(
prev_eq[:, n],
ld.pred[mcol] == _NONE
)
ld.pred.loc[prev_eq[:, n], col] = _NONE
# Delete subproblem solutions that are irrelevant
for subprob in PROBLEMS.values():
rows = np.any(np.char.equal(ld.pred.loc[:, prob_cols].values.astype("U"), subprob), axis=1) == False
columns = [name == subprob for name in ld.pred.columns.get_level_values(1)]
ld.pred.loc[rows, columns] = _NONE
# Set problem_amount to the right number
ld.pred['CLASS', _NONE, 'problem_amount'] = np.sum(ld.pred.loc[:, prob_cols] != _NONE, axis=1).astype(str)
# If lev_fill is "3" or "4", lev_min is always "0"
for subprob in PROBLEMS.values():
if "lev_fill" in ld.pred["CLASS", subprob].columns:
fill = ld.pred.astype(str)["CLASS", subprob, "lev_fill"]
if "lev_min" in ld.pred["REAL", subprob]:
ld.pred.loc[np.logical_or(fill == "1", fill == "2"), ("REAL", subprob, "lev_min")] = "0"
if "lev_min" in ld.pred["REAL", subprob] and "lev_max" in ld.pred["REAL", subprob]:
real = ld.pred["REAL", subprob].replace("", np.nan).astype(np.float)
reversed_idx = real["lev_min"] > real["lev_max"]
average = real.loc[reversed_idx, "lev_min"] + real.loc[reversed_idx, "lev_max"] / 2
ld.pred.loc[reversed_idx, ("REAL", subprob, "lev_min")] = average
ld.pred.loc[reversed_idx, ("REAL", subprob, "lev_max")] = average
ld.pred.loc[:, ["CLASS", "MULTI"]] = ld.pred.loc[:, ["CLASS", "MULTI"]].astype(str)
ld.pred["REAL"] = ld.pred["REAL"].replace("", np.nan).astype(np.float)
return ld
def split(self, rounds=3, seed="<PASSWORD>"):
"""Returns a split of the object into a training set, a test set and a validation set.
Parameters rounds and seed are not used any more.
Use as:
for test, train, eval in ld.split():
model.fit(test)
model.predict(train)
model.predict(eval)
"""
train_regions = [3007, 3012, 3010, 3009, 3013, 3017, 3014, 3032, 3027, 3029, 3022, 3031, 3023, 3037, 3024, 3028]
test_regions = [3011, 3016, 3035]
eval_regions = [3006, 3015, 3034]
split = []
for regions in [train_regions, test_regions, eval_regions]:
ld = self.copy()
ld.data = ld.data.iloc[[region in regions for region in ld.data.index.get_level_values(1)]]
ld.label = ld.label.iloc[[region in regions for region in ld.label.index.get_level_values(1)]]
ld.pred = ld.pred.iloc[[region in regions for region in ld.pred.index.get_level_values(1)]]
ld.row_weight = ld.row_weight.iloc[[region in regions for region in ld.row_weight.index.get_level_values(1)]]
split.append(ld)
return [tuple(split)]
def f1(self):
"""Get F1, precision, recall and RMSE of all labels.
:return: Series with scores of all possible labels and values.
"""
if self.label is None or self.pred is None:
raise DatasetMissingLabel()
dummies = self.to_dummies()
old_settings = np.seterr(divide='ignore', invalid='ignore')
df_idx = pd.MultiIndex.from_arrays([[], [], [], []])
df = pd.DataFrame(index=df_idx, columns=["f1", "precision", "recall", "rmse"])
try:
prob_cols = [
name.startswith("problem_") for name in self.label.columns.get_level_values(2)
]
except KeyError:
prob_cols = pd.DataFrame(index=self.label.index)
for column, pred_series in dummies["pred"].items():
if column[1]:
true_idx = self.label.loc[
np.any(np.char.equal(self.label.loc[:, prob_cols].values.astype("U"), column[1]), axis=1)
].index
pred_idx = self.pred.loc[
np.any(np.char.equal(self.pred.loc[:, prob_cols].values.astype("U"), column[1]), axis=1)
].index
idx = list(set(true_idx.to_list()).intersection(set(pred_idx.to_list())))
else:
idx = list(set(self.label.index).intersection(set(self.pred.index)))
if column[0] in ["CLASS", "MULTI"] and column in dummies["label"].columns:
truth = dummies["label"][column][idx]
pred = pred_series[idx]
true_pos = np.sum(truth * pred)
if not np.sum(truth) or (column[0] == "CLASS" and column[1] and column[3] == "0"):
continue
prec = true_pos / np.sum(pred) if np.sum(pred) else 0
recall = true_pos / np.sum(truth)
f1 = 2 * prec * recall / (prec + recall) if prec + recall else 0
df.loc[column] = pd.Series([f1, prec, recall, np.nan], index=df.columns)
elif column[0] in ["REAL"] and column in dummies["label"].columns:
truth = dummies["label"][column][idx]
pred = pred_series[idx]
if not len(truth):
continue
rmse = np.sqrt(np.sum(np.square(pred - truth))) / len(truth)
df.loc[column] = pd.Series([np.nan, np.nan, np.nan, rmse], index=df.columns)
np.seterr(**old_settings)
return df
def to_timeseries(self):
"""Formats the data in a way that is parseable for e.g. `tslearn`. That is, a numpy array with
shape `(rows, timeseries, features)`.
:return: (numpy.ndarray, list of feature names)
"""
columns = self.data.columns.get_level_values(0).unique()
number_of_features = len(columns)
number_of_days = self.days - 1 if self.days >= 3 else 1
shape = self.data.shape
ts_array = np.zeros((shape[0], number_of_features * number_of_days), np.float64)
# Multiply the region labels with the size of the time dimension.
for idx in range(0, len(REGIONS)):
for day in range(0, number_of_days + 1):
ts_array[:, idx * number_of_days + day] = self.data.values[:, idx]
ts_array[:, len(REGIONS) * number_of_days - 1:] = self.data.values[:, len(REGIONS) - 1:]
ts_array = ts_array.reshape((shape[0], number_of_features, number_of_days))
return ts_array.transpose((0, 2, 1)), columns
def to_time_parameters(self, orig_days=-1):
"""Collapses the time series to fewer dimensions"""
ld = self.copy()
ld.data = pd.concat([
ld.data.loc[:, ld.data.columns.get_level_values(1).values.astype(int) <= orig_days],
to_time_parameters(ld)
], axis=1).sort_index()
ld.scaler.fit(ld.data.values)
return ld
def to_dummies(self):
"""Convert categorical variable into dummy/indicator variables.
:return: pd.DataFrame
"""
if self.label is None:
raise DatasetMissingLabel()
dummies = {}
for name, df in [('label', self.label), ('pred', self.pred)]:
dummies_types = {}
dummies_class = {}
for subprob in df.loc[:, ["CLASS"]].columns.get_level_values(1).unique():
try:
sub_df = self.label["CLASS", subprob]
try: col = pd.get_dummies(sub_df, prefix_sep=':').columns
except ValueError: col = []
if name == 'label':
dum = pd.DataFrame(pd.get_dummies(sub_df, prefix_sep=':'), columns=col)
dummies_class[subprob] = dum.fillna(0)
columns = dummies_class[subprob].columns.values.astype("U")
idx = pd.MultiIndex.from_tuples(
[(a[0], a[2]) for a in np.char.partition(columns, sep=":")],
names=["attribute", "label"]
)
dummies_class[subprob].columns = idx
else:
dum = pd.DataFrame(pd.get_dummies(df["CLASS", subprob], prefix_sep=':'), columns=col)
dummies_class[subprob] = dum.fillna(0)
columns = dummies_class[subprob].columns.values.astype("U")
idx = pd.MultiIndex.from_tuples(
[(a[0], a[2]) for a in np.char.partition(columns, sep=":")],
names=["attribute", "label"]
)
dummies_class[subprob].columns = idx
except KeyError:
pass
dummies_types["CLASS"] = pd.concat(dummies_class.values(), keys=dummies_class.keys(), axis=1)
dummies_multi = {}
try:
for subprob in df.loc[:, ['MULTI']].columns.get_level_values(1).unique():
try:
multi = df['MULTI'][subprob].replace(_NONE, "0").values.astype(np.int).astype("U")
if name == 'label':
multimax = np.max(np.char.str_len(multi), axis=0)
multi = np.char.zfill(multi, multimax)
multi = np.nan_to_num(np.array([[list(elem) for elem in row] for row in multi]))
multi = multi.reshape(multi.shape[0], multi.shape[1] * multi.shape[2]).astype(np.float)
columns = zip(df["MULTI"][subprob].columns, multimax)
columns = [[(c, str(n)) for n in range(max)] for c, max in columns]
columns = [item for sublist in columns for item in sublist]
columns = pd.MultiIndex.from_tuples(columns, names=["attribute", "label"])
dummies_multi[subprob] = pd.DataFrame(multi, index=df.index, columns=columns)
except KeyError:
pass
dummies_types["MULTI"] = pd.concat(dummies_multi.values(), keys=dummies_multi.keys(), axis=1)
except (KeyError, ValueError):
pass
dummies_real = {}
try:
for subprob in df.loc[:, ["REAL"]].columns.get_level_values(1).unique():
try:
columns = pd.MultiIndex.from_tuples(
[(a, "") for a in df["REAL"][subprob].columns],
names=["attribute", "label"]
)
dummies_real[subprob] = pd.DataFrame(
df['REAL'][subprob].values,
columns=columns,
index=df.index
)
except KeyError:
pass
dummies_types["REAL"] = pd.concat(dummies_real.values(), keys=dummies_real.keys(), axis=1)
except (KeyError, ValueError):
pass
dummies[name] = pd.concat(dummies_types.values(), keys=dummies_types.keys(), axis=1)
return pd.concat(dummies.values(), keys=dummies.keys(), axis=1).replace("", np.nan).astype(np.float)
def to_csv(self, tag=""):
""" Writes a csv-file in `varsomdata/localstorage` named according to the properties of the dataset.
A `label.csv` is also always written.
"""
regobs = ""
if len(self.regobs_types) and self.days >= 2:
regobs = f"_regobs_{'--'.join([REG_ENG[obs_type] for obs_type in self.regobs_types])}"
varsom = "" if self.with_varsom else "_novarsom"
tag_ = "_" + tag if tag else ""
if self.single:
pathname_data = f"{se.local_storage}single_data_v{CSV_VERSION}{tag_}_days_{self.days}{regobs}{varsom}.csv"
pathname_label = f"{se.local_storage}single_label_v{CSV_VERSION}{tag_}_days_{self.days}{regobs}{varsom}.csv"
pathname_weight = f"{se.local_storage}single_weight_v{CSV_VERSION}{tag_}_days_{self.days}{regobs}{varsom}.csv"
try:
old_ld = LabeledData.from_csv(
self.days,
self.regobs_types,
with_varsom=self.with_varsom,
seasons=self.seasons,
tag=tag,
)
ld = self.denormalize()
ld.data = pd.concat([old_ld.data, ld.data], axis=0)
ld.row_weight = pd.concat([old_ld.row_weight, ld.row_weight], axis=0)
unique = np.unique(ld.data.index)
ld.data = ld.data.loc[unique]
ld.row_weight = ld.row_weight.loc[unique]
if old_ld.label is not None and ld.label is not None:
ld.label = pd.concat([old_ld.label, ld.label], axis=0)
if ld.label is not None:
ld.label = ld.label.loc[unique]
except CsvMissingError:
ld = self.denormalize()
else:
seasons = "--".join(self.seasons)
pathname_data = f"{se.local_storage}data_v{CSV_VERSION}{tag_}_days_{self.days}{regobs}{varsom}_{seasons}.csv"
pathname_label = f"{se.local_storage}label_v{CSV_VERSION}{tag_}_days_{self.days}{regobs}{varsom}_{seasons}.csv"
pathname_weight = f"{se.local_storage}weight_v{CSV_VERSION}{tag_}_days_{self.days}{regobs}{varsom}_{seasons}.csv"
ld = self.denormalize()
ld.data.to_csv(pathname_data, sep=';')
ld.row_weight.to_csv(pathname_weight, sep=';', header=False)
if ld.label is not None:
ld.label.to_csv(pathname_label, sep=';')
def to_aw(self):
"""Convert predictions to AvalancheWarnings.
:return: AvalancheWarning[]
"""
if self.label is None or self.pred is None:
raise DatasetMissingLabel()
aws = []
for name, row in self.pred.iterrows():
aw = gf.AvalancheWarning()
aw.region_id = int(name[1])
aw.valid_from = dt.datetime.combine(dt.date.fromisoformat(name[0]), dt.datetime.min.time())
aw.valid_to = dt.datetime.combine(dt.date.fromisoformat(name[0]), dt.datetime.max.time())
aw.mountain_weather = gf.MountainWeather()
for int_attr, dict in LABEL_GLOBAL.items():
for idx, ext_attr in enumerate(dict['ext_attr']):
try:
ext_val = dict['values'][row['CLASS', '', int_attr]][idx]
setattr(aw, ext_attr, ext_val)
except KeyError:
pass
try:
for p_idx in [1, 2, 3]:
p_prefix = f"problem_{p_idx}"
try:
p_name = row['CLASS', '', p_prefix]
except KeyError:
continue
if p_name == "":
break
problem = gf.AvalancheWarningProblem()
problem.avalanche_problem_id = -p_idx + 4
for idx, ext_attr in enumerate(LABEL_PROBLEM_PRIMARY['ext_attr']):
try:
ext_val = LABEL_PROBLEM_PRIMARY['values'][row['CLASS', '', p_prefix]][idx]
setattr(problem, ext_attr, ext_val)
except KeyError: pass
for int_attr, dict in LABEL_PROBLEM.items():
for idx, ext_attr in enumerate(dict['ext_attr']):
try:
ext_val = dict['values'][row['CLASS', p_name, int_attr]][idx]
setattr(problem, ext_attr, ext_val)
except KeyError: pass
for int_attr, dict in LABEL_PROBLEM_MULTI.items():
try:
ext_attr = dict['ext_attr']
ext_val = row['MULTI', p_name, int_attr]
except KeyError: pass
setattr(problem, ext_attr, ext_val)
for int_attr, dict in LABEL_PROBLEM_REAL.items():
try:
ext_attr = dict['ext_attr']
ext_val = row['REAL', p_name, int_attr]
setattr(problem, ext_attr, ext_val)
except KeyError: pass
aw.avalanche_problems.append(problem)
aws.append(aw)
except KeyError:
pass
return aws
def copy(self):
"""Deep copy LabeledData.
:return: copied LabeledData
"""
ld = LabeledData(
self.data.copy(deep=True) if self.data is not None else None,
self.label.copy(deep=True) if self.label is not None else None,
self.row_weight.copy(deep=True),
self.days,
copy.copy(self.regobs_types),
self.with_varsom,
self.seasons
)
ld.is_normalized = self.is_normalized
ld.with_regions = self.with_regions
ld.elevation_class = self.elevation_class
ld.scaler = self.scaler
ld.pred = self.pred.copy(deep=True) if self.pred is not None else None
return ld
@staticmethod
def from_csv(days, regobs_types, seasons=('2017-18', '2018-19', '2019-20'), with_varsom=True, tag=""):
"""Read LabeledData from previously written .csv-file.
:param days: How far back in time values should data be included.
:param regobs_types: A tuple/list of strings of types of observations to fetch from RegObs.,
e.g., `("Faretegn")`.
"""
single = not seasons
seasons = "--".join(sorted(list(set(seasons if seasons else []))))
tag = "_" + tag if tag else ""
regobs = ""
if len(regobs_types) and days >= 2:
regobs = f"_regobs_{'--'.join([REG_ENG[obs_type] for obs_type in regobs_types])}"
varsom = "" if with_varsom else "_novarsom"
if single:
pathname_data = f"{se.local_storage}single_data_v{CSV_VERSION}{tag}_days_{days}{regobs}{varsom}.csv"
pathname_label = f"{se.local_storage}single_label_v{CSV_VERSION}{tag}_days_{days}{regobs}{varsom}.csv"
pathname_weight = f"{se.local_storage}single_weight_v{CSV_VERSION}{tag}_days_{days}{regobs}{varsom}.csv"
else:
pathname_data = f"{se.local_storage}data_v{CSV_VERSION}{tag}_days_{days}{regobs}{varsom}_{seasons}.csv"
pathname_label = f"{se.local_storage}label_v{CSV_VERSION}{tag}_days_{days}{regobs}{varsom}_{seasons}.csv"
pathname_weight = f"{se.local_storage}weight_v{CSV_VERSION}{tag}_days_{days}{regobs}{varsom}_{seasons}.csv"
try:
label = pd.read_csv(pathname_label, sep=";", header=[0, 1, 2], index_col=[0, 1], low_memory=False, dtype="U")
columns = [(col[0], re.sub(r'Unnamed:.*', _NONE, col[1]), col[2]) for col in label.columns.tolist()]
label.columns = pd.MultiIndex.from_tuples(columns)
except FileNotFoundError:
label = None
try:
data = pd.read_csv(pathname_data, sep=";", header=[0, 1], index_col=[0, 1])
row_weight = pd.read_csv(pathname_weight, sep=";", header=None, index_col=[0, 1], low_memory=False, squeeze=True)
data.columns = pd.MultiIndex.from_tuples(data.columns)
except FileNotFoundError:
raise CsvMissingError()
return LabeledData(data, label, row_weight, days, regobs_types, with_varsom, seasons)
class CsvMissingError(Error):
pass
class SingleDateCsvError(Error):
pass
class NoBulletinWithinRangeError(Error):
pass
class NoDataFoundError(Error):
pass
class DatasetMissingLabel(Error):
pass
class NotPredictedError(Error):
pass
| [
"pandas.read_csv",
"avaml.aggregatedata.time_parameters.to_time_parameters",
"numpy.char.zfill",
"varsomdata.getmisc.get_season_from_date",
"numpy.equal",
"copy.copy",
"avaml.aggregatedata.download._get_weather_obs",
"pandas.MultiIndex.from_tuples",
"datetime.timedelta",
"re.search",
"pandas.Ser... | [((12939, 12955), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12953, 12955), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6420, 6477), 'avaml.aggregatedata.download._get_regobs_obs', '_get_regobs_obs', (['None', 'regobs_types'], {'date': 'date', 'days': 'days'}), '(None, regobs_types, date=date, days=days)\n', (6435, 6477), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((6508, 6574), 'avaml.aggregatedata.download._get_varsom_obs', '_get_varsom_obs', (['None'], {'date': 'date', 'days': '(days - 1 if days > 0 else 1)'}), '(None, date=date, days=days - 1 if days > 0 else 1)\n', (6523, 6574), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((6596, 6663), 'avaml.aggregatedata.download._get_weather_obs', '_get_weather_obs', (['None'], {'date': 'date', 'days': '(days - 2 if days > 2 else 1)'}), '(None, date=date, days=days - 2 if days > 2 else 1)\n', (6612, 6663), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((8363, 8399), 'pandas.DataFrame', 'pd.DataFrame', (['self.labels'], {'dtype': '"""U"""'}), "(self.labels, dtype='U')\n", (8375, 8399), True, 'import pandas as pd\n'), ((17648, 17712), 'pandas.Series', 'pd.Series', (["self.label['CLASS', _NONE, 'problem_1']"], {'name': '"""label"""'}), "(self.label['CLASS', _NONE, 'problem_1'], name='label')\n", (17657, 17712), True, 'import pandas as pd\n'), ((17729, 17796), 'pandas.Series', 'pd.Series', (["self.pred['CLASS', _NONE, 'problem_1']"], {'name': '"""problem_1"""'}), "(self.pred['CLASS', _NONE, 'problem_1'], name='problem_1')\n", (17738, 17796), True, 'import pandas as pd\n'), ((17813, 17880), 'pandas.Series', 'pd.Series', (["self.pred['CLASS', _NONE, 'problem_2']"], {'name': '"""problem_2"""'}), "(self.pred['CLASS', _NONE, 'problem_2'], name='problem_2')\n", (17822, 17880), True, 'import pandas as pd\n'), ((18151, 18181), 'pandas.concat', 'pd.concat', (['[count, p2]'], {'axis': '(1)'}), '([count, p2], axis=1)\n', (18160, 18181), True, 'import pandas as pd\n'), ((18871, 18900), 'pandas.concat', 'pd.concat', (['[n, share]'], {'axis': '(1)'}), '([n, share], axis=1)\n', (18880, 18900), True, 'import pandas as pd\n'), ((19005, 19403), 'pandas.DataFrame', 'pd.DataFrame', (["{(1): {(2, 10): 'A', (3, 10): 'A', (3, 21): 'B', (5, 21): 'B', (3, 22): 'B',\n (5, 22): 'B'}, (2): {(2, 10): 'A', (3, 10): 'B', (3, 21): 'C', (5, 21):\n 'D', (3, 22): 'C', (5, 22): 'D'}, (3): {(2, 10): 'B', (3, 10): 'C', (3,\n 21): 'D', (5, 21): 'E', (3, 22): 'D', (5, 22): 'E'}, (4): {(2, 10): 'B',\n (3, 10): 'C', (3, 21): 'D', (5, 21): 'E', (3, 22): 'D', (5, 22): 'E'}}"], {}), "({(1): {(2, 10): 'A', (3, 10): 'A', (3, 21): 'B', (5, 21): 'B',\n (3, 22): 'B', (5, 22): 'B'}, (2): {(2, 10): 'A', (3, 10): 'B', (3, 21):\n 'C', (5, 21): 'D', (3, 22): 'C', (5, 22): 'D'}, (3): {(2, 10): 'B', (3,\n 10): 'C', (3, 21): 'D', (5, 21): 'E', (3, 22): 'D', (5, 22): 'E'}, (4):\n {(2, 10): 'B', (3, 10): 'C', (3, 21): 'D', (5, 21): 'E', (3, 22): 'D',\n (5, 22): 'E'}})\n", (19017, 19403), True, 'import pandas as pd\n'), ((19451, 19712), 'pandas.DataFrame', 'pd.DataFrame', (["{(1): {'A': 1, 'B': 1, 'C': 1, 'D': 2, 'E': 3}, (2): {'A': 1, 'B': 2, 'C': \n 2, 'D': 3, 'E': 4}, (3): {'A': 2, 'B': 2, 'C': 3, 'D': 3, 'E': 4}, (4):\n {'A': 2, 'B': 3, 'C': 4, 'D': 4, 'E': 5}, (5): {'A': 2, 'B': 3, 'C': 4,\n 'D': 4, 'E': 5}}"], {}), "({(1): {'A': 1, 'B': 1, 'C': 1, 'D': 2, 'E': 3}, (2): {'A': 1,\n 'B': 2, 'C': 2, 'D': 3, 'E': 4}, (3): {'A': 2, 'B': 2, 'C': 3, 'D': 3,\n 'E': 4}, (4): {'A': 2, 'B': 3, 'C': 4, 'D': 4, 'E': 5}, (5): {'A': 2,\n 'B': 3, 'C': 4, 'D': 4, 'E': 5}})\n", (19463, 19712), True, 'import pandas as pd\n'), ((33153, 33170), 'avaml.aggregatedata.download.PROBLEMS.values', 'PROBLEMS.values', ([], {}), '()\n', (33168, 33170), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((33668, 33685), 'avaml.aggregatedata.download.PROBLEMS.values', 'PROBLEMS.values', ([], {}), '()\n', (33683, 33685), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((36215, 36259), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (36224, 36259), True, 'import numpy as np\n'), ((36278, 36321), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[[], [], [], []]'], {}), '([[], [], [], []])\n', (36303, 36321), True, 'import pandas as pd\n'), ((36335, 36408), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df_idx', 'columns': "['f1', 'precision', 'recall', 'rmse']"}), "(index=df_idx, columns=['f1', 'precision', 'recall', 'rmse'])\n", (36347, 36408), True, 'import pandas as pd\n'), ((38345, 38370), 'numpy.seterr', 'np.seterr', ([], {}), '(**old_settings)\n', (38354, 38370), True, 'import numpy as np\n'), ((38859, 38928), 'numpy.zeros', 'np.zeros', (['(shape[0], number_of_features * number_of_days)', 'np.float64'], {}), '((shape[0], number_of_features * number_of_days), np.float64)\n', (38867, 38928), True, 'import numpy as np\n'), ((5429, 5484), 'avaml.aggregatedata.download._get_varsom_obs', '_get_varsom_obs', ([], {'year': 'season', 'max_file_age': 'max_file_age'}), '(year=season, max_file_age=max_file_age)\n', (5444, 5484), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((5511, 5537), 'avaml.merge', 'merge', (['self.varsom', 'varsom'], {}), '(self.varsom, varsom)\n', (5516, 5537), False, 'from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV\n'), ((5564, 5590), 'avaml.merge', 'merge', (['self.labels', 'labels'], {}), '(self.labels, labels)\n', (5569, 5590), False, 'from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV\n'), ((5612, 5676), 'avaml.aggregatedata.download._get_regobs_obs', '_get_regobs_obs', (['season', 'regobs_types'], {'max_file_age': 'max_file_age'}), '(season, regobs_types, max_file_age=max_file_age)\n', (5627, 5676), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((5703, 5729), 'avaml.merge', 'merge', (['self.regobs', 'regobs'], {}), '(self.regobs, regobs)\n', (5708, 5729), False, 'from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV\n'), ((5752, 5803), 'avaml.aggregatedata.download._get_weather_obs', '_get_weather_obs', (['season'], {'max_file_age': 'max_file_age'}), '(season, max_file_age=max_file_age)\n', (5768, 5803), False, 'from avaml.aggregatedata.download import _get_varsom_obs, _get_weather_obs, _get_regobs_obs, REG_ENG, PROBLEMS\n'), ((5831, 5859), 'avaml.merge', 'merge', (['self.weather', 'weather'], {}), '(self.weather, weather)\n', (5836, 5859), False, 'from avaml import Error, setenvironment as se, _NONE, CSV_VERSION, REGIONS, merge, REGION_ELEV\n'), ((8557, 8582), 'pandas.DataFrame', 'pd.DataFrame', (['self.varsom'], {}), '(self.varsom)\n', (8569, 8582), True, 'import pandas as pd\n'), ((8613, 8639), 'pandas.DataFrame', 'pd.DataFrame', (['self.weather'], {}), '(self.weather)\n', (8625, 8639), True, 'import pandas as pd\n'), ((8817, 8851), 'varsomdata.getmisc.get_season_from_date', 'gm.get_season_from_date', (['self.date'], {}), '(self.date)\n', (8840, 8851), True, 'from varsomdata import getmisc as gm\n'), ((8874, 8930), 'varsomdata.getmisc.get_forecast_regions', 'gm.get_forecast_regions', ([], {'year': 'season', 'get_b_regions': '(True)'}), '(year=season, get_b_regions=True)\n', (8897, 8930), True, 'from varsomdata import getmisc as gm\n'), ((15957, 16030), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'index': 'self.data.index', 'columns': 'self.data.columns'}), '(data=data, index=self.data.index, columns=self.data.columns)\n', (15969, 16030), True, 'import pandas as pd\n'), ((16539, 16612), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'index': 'self.data.index', 'columns': 'self.data.columns'}), '(data=data, index=self.data.index, columns=self.data.columns)\n', (16551, 16612), True, 'import pandas as pd\n'), ((18524, 18543), 'numpy.ones', 'np.ones', (['pred.shape'], {}), '(pred.shape)\n', (18531, 18543), True, 'import numpy as np\n'), ((46957, 46978), 'varsomdata.getforecastapi.AvalancheWarning', 'gf.AvalancheWarning', ([], {}), '()\n', (46976, 46978), True, 'from varsomdata import getforecastapi as gf\n'), ((47259, 47279), 'varsomdata.getforecastapi.MountainWeather', 'gf.MountainWeather', ([], {}), '()\n', (47277, 47279), True, 'from varsomdata import getforecastapi as gf\n'), ((49974, 50002), 'copy.copy', 'copy.copy', (['self.regobs_types'], {}), '(self.regobs_types)\n', (49983, 50002), False, 'import copy\n'), ((51901, 52006), 'pandas.read_csv', 'pd.read_csv', (['pathname_label'], {'sep': '""";"""', 'header': '[0, 1, 2]', 'index_col': '[0, 1]', 'low_memory': '(False)', 'dtype': '"""U"""'}), "(pathname_label, sep=';', header=[0, 1, 2], index_col=[0, 1],\n low_memory=False, dtype='U')\n", (51912, 52006), True, 'import pandas as pd\n'), ((52144, 52178), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['columns'], {}), '(columns)\n', (52169, 52178), True, 'import pandas as pd\n'), ((52270, 52338), 'pandas.read_csv', 'pd.read_csv', (['pathname_data'], {'sep': '""";"""', 'header': '[0, 1]', 'index_col': '[0, 1]'}), "(pathname_data, sep=';', header=[0, 1], index_col=[0, 1])\n", (52281, 52338), True, 'import pandas as pd\n'), ((52364, 52468), 'pandas.read_csv', 'pd.read_csv', (['pathname_weight'], {'sep': '""";"""', 'header': 'None', 'index_col': '[0, 1]', 'low_memory': '(False)', 'squeeze': '(True)'}), "(pathname_weight, sep=';', header=None, index_col=[0, 1],\n low_memory=False, squeeze=True)\n", (52375, 52468), True, 'import pandas as pd\n'), ((52492, 52531), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['data.columns'], {}), '(data.columns)\n', (52517, 52531), True, 'import pandas as pd\n'), ((9163, 9198), 'datetime.date.fromisoformat', 'dt.date.fromisoformat', (['entry_idx[0]'], {}), '(entry_idx[0])\n', (9184, 9198), True, 'import datetime as dt\n'), ((12051, 12072), 'pandas.Series', 'pd.Series', (['row_weight'], {}), '(row_weight)\n', (12060, 12072), True, 'import pandas as pd\n'), ((17462, 17496), 'numpy.sign', 'np.sign', (['ld.data.loc[:, temp_cols]'], {}), '(ld.data.loc[:, temp_cols])\n', (17469, 17496), True, 'import numpy as np\n'), ((17899, 17939), 'pandas.concat', 'pd.concat', (['[label, pred1, pred2]'], {'axis': '(1)'}), '([label, pred1, pred2], axis=1)\n', (17908, 17939), True, 'import pandas as pd\n'), ((18627, 18673), 'pandas.concat', 'pd.concat', (['[label, label - pred, ones]'], {'axis': '(1)'}), '([label, label - pred, ones], axis=1)\n', (18636, 18673), True, 'import pandas as pd\n'), ((28261, 28281), 'numpy.zeros', 'np.zeros', (['twos.shape'], {}), '(twos.shape)\n', (28269, 28281), True, 'import numpy as np\n'), ((28339, 28361), 'numpy.zeros', 'np.zeros', (['threes.shape'], {}), '(threes.shape)\n', (28347, 28361), True, 'import numpy as np\n'), ((28678, 28700), 'numpy.zeros', 'np.zeros', (['threes.shape'], {}), '(threes.shape)\n', (28686, 28700), True, 'import numpy as np\n'), ((30978, 30998), 'numpy.zeros', 'np.zeros', (['ones.shape'], {}), '(ones.shape)\n', (30986, 30998), True, 'import numpy as np\n'), ((31200, 31220), 'numpy.zeros', 'np.zeros', (['twos.shape'], {}), '(twos.shape)\n', (31208, 31220), True, 'import numpy as np\n'), ((31501, 31523), 'numpy.zeros', 'np.zeros', (['threes.shape'], {}), '(threes.shape)\n', (31509, 31523), True, 'import numpy as np\n'), ((32908, 32960), 'numpy.logical_or', 'np.logical_or', (['prev_eq[:, n]', '(ld.pred[mcol] == _NONE)'], {}), '(prev_eq[:, n], ld.pred[mcol] == _NONE)\n', (32921, 32960), True, 'import numpy as np\n'), ((33522, 33572), 'numpy.sum', 'np.sum', (['(ld.pred.loc[:, prob_cols] != _NONE)'], {'axis': '(1)'}), '(ld.pred.loc[:, prob_cols] != _NONE, axis=1)\n', (33528, 33572), True, 'import numpy as np\n'), ((36607, 36643), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.label.index'}), '(index=self.label.index)\n', (36619, 36643), True, 'import pandas as pd\n'), ((37484, 37504), 'numpy.sum', 'np.sum', (['(truth * pred)'], {}), '(truth * pred)\n', (37490, 37504), True, 'import numpy as np\n'), ((37870, 37925), 'pandas.Series', 'pd.Series', (['[f1, prec, recall, np.nan]'], {'index': 'df.columns'}), '([f1, prec, recall, np.nan], index=df.columns)\n', (37879, 37925), True, 'import pandas as pd\n'), ((45392, 45433), 'pandas.concat', 'pd.concat', (['[old_ld.data, ld.data]'], {'axis': '(0)'}), '([old_ld.data, ld.data], axis=0)\n', (45401, 45433), True, 'import pandas as pd\n'), ((45466, 45519), 'pandas.concat', 'pd.concat', (['[old_ld.row_weight, ld.row_weight]'], {'axis': '(0)'}), '([old_ld.row_weight, ld.row_weight], axis=0)\n', (45475, 45519), True, 'import pandas as pd\n'), ((45545, 45569), 'numpy.unique', 'np.unique', (['ld.data.index'], {}), '(ld.data.index)\n', (45554, 45569), True, 'import numpy as np\n'), ((47067, 47097), 'datetime.date.fromisoformat', 'dt.date.fromisoformat', (['name[0]'], {}), '(name[0])\n', (47088, 47097), True, 'import datetime as dt\n'), ((47099, 47121), 'datetime.datetime.min.time', 'dt.datetime.min.time', ([], {}), '()\n', (47119, 47121), True, 'import datetime as dt\n'), ((47169, 47199), 'datetime.date.fromisoformat', 'dt.date.fromisoformat', (['name[0]'], {}), '(name[0])\n', (47190, 47199), True, 'import datetime as dt\n'), ((47201, 47223), 'datetime.datetime.max.time', 'dt.datetime.max.time', ([], {}), '()\n', (47221, 47223), True, 'import datetime as dt\n'), ((12120, 12143), 'pandas.concat', 'pd.concat', (['[df, df_new]'], {}), '([df, df_new])\n', (12129, 12143), True, 'import pandas as pd\n'), ((12205, 12242), 'pandas.concat', 'pd.concat', (['[df_weight, df_weight_new]'], {}), '([df_weight, df_weight_new])\n', (12214, 12242), True, 'import pandas as pd\n'), ((17333, 17368), 're.match', 're.match', (['"""^temp_(max|min)$"""', 'title'], {}), "('^temp_(max|min)$', title)\n", (17341, 17368), False, 'import re\n'), ((17507, 17540), 'numpy.abs', 'np.abs', (['ld.data.loc[:, temp_cols]'], {}), '(ld.data.loc[:, temp_cols])\n', (17513, 17540), True, 'import numpy as np\n'), ((32762, 32799), 'numpy.equal', 'np.equal', (['ld.pred[mcol]', 'ld.pred[col]'], {}), '(ld.pred[mcol], ld.pred[col])\n', (32770, 32799), True, 'import numpy as np\n'), ((37685, 37697), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (37691, 37697), True, 'import numpy as np\n'), ((37741, 37754), 'numpy.sum', 'np.sum', (['truth'], {}), '(truth)\n', (37747, 37754), True, 'import numpy as np\n'), ((38276, 38335), 'pandas.Series', 'pd.Series', (['[np.nan, np.nan, np.nan, rmse]'], {'index': 'df.columns'}), '([np.nan, np.nan, np.nan, rmse], index=df.columns)\n', (38285, 38335), True, 'import pandas as pd\n'), ((45775, 45818), 'pandas.concat', 'pd.concat', (['[old_ld.label, ld.label]'], {'axis': '(0)'}), '([old_ld.label, ld.label], axis=0)\n', (45784, 45818), True, 'import pandas as pd\n'), ((47989, 48017), 'varsomdata.getforecastapi.AvalancheWarningProblem', 'gf.AvalancheWarningProblem', ([], {}), '()\n', (48015, 48017), True, 'from varsomdata import getforecastapi as gf\n'), ((52035, 52070), 're.sub', 're.sub', (['"""Unnamed:.*"""', '_NONE', 'col[1]'], {}), "('Unnamed:.*', _NONE, col[1])\n", (52041, 52070), False, 'import re\n'), ((11971, 12008), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {'dtype': 'np.float32'}), '(table, dtype=np.float32)\n', (11983, 12008), True, 'import pandas as pd\n'), ((16901, 16929), 're.match', 're.match', (['"""^region_id"""', 'x[0]'], {}), "('^region_id', x[0])\n", (16909, 16929), False, 'import re\n'), ((26956, 26983), 'numpy.logical_or', 'np.logical_or', (['twos', 'threes'], {}), '(twos, threes)\n', (26969, 26983), True, 'import numpy as np\n'), ((27042, 27069), 'numpy.logical_or', 'np.logical_or', (['twos', 'threes'], {}), '(twos, threes)\n', (27055, 27069), True, 'import numpy as np\n'), ((27128, 27155), 'numpy.logical_or', 'np.logical_or', (['twos', 'threes'], {}), '(twos, threes)\n', (27141, 27155), True, 'import numpy as np\n'), ((27957, 27984), 'numpy.logical_or', 'np.logical_or', (['twos', 'threes'], {}), '(twos, threes)\n', (27970, 27984), True, 'import numpy as np\n'), ((28046, 28073), 'numpy.logical_or', 'np.logical_or', (['twos', 'threes'], {}), '(twos, threes)\n', (28059, 28073), True, 'import numpy as np\n'), ((28135, 28162), 'numpy.logical_or', 'np.logical_or', (['twos', 'threes'], {}), '(twos, threes)\n', (28148, 28162), True, 'import numpy as np\n'), ((37529, 37542), 'numpy.sum', 'np.sum', (['truth'], {}), '(truth)\n', (37535, 37542), True, 'import numpy as np\n'), ((37669, 37681), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (37675, 37681), True, 'import numpy as np\n'), ((39690, 39712), 'avaml.aggregatedata.time_parameters.to_time_parameters', 'to_time_parameters', (['ld'], {}), '(ld)\n', (39708, 39712), False, 'from avaml.aggregatedata.time_parameters import to_time_parameters\n'), ((42197, 42227), 'numpy.char.zfill', 'np.char.zfill', (['multi', 'multimax'], {}), '(multi, multimax)\n', (42210, 42227), True, 'import numpy as np\n'), ((42733, 42797), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['columns'], {'names': "['attribute', 'label']"}), "(columns, names=['attribute', 'label'])\n", (42758, 42797), True, 'import pandas as pd\n'), ((42847, 42899), 'pandas.DataFrame', 'pd.DataFrame', (['multi'], {'index': 'df.index', 'columns': 'columns'}), '(multi, index=df.index, columns=columns)\n', (42859, 42899), True, 'import pandas as pd\n'), ((43336, 43443), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[(a, '') for a in df['REAL'][subprob].columns]"], {'names': "['attribute', 'label']"}), "([(a, '') for a in df['REAL'][subprob].columns],\n names=['attribute', 'label'])\n", (43361, 43443), True, 'import pandas as pd\n'), ((43570, 43643), 'pandas.DataFrame', 'pd.DataFrame', (["df['REAL'][subprob].values"], {'columns': 'columns', 'index': 'df.index'}), "(df['REAL'][subprob].values, columns=columns, index=df.index)\n", (43582, 43643), True, 'import pandas as pd\n'), ((18095, 18112), 'pandas.Series.mode', 'pd.Series.mode', (['x'], {}), '(x)\n', (18109, 18112), True, 'import pandas as pd\n'), ((22744, 22771), 're.search', 're.search', (['"""lev_fill"""', 'x[0]'], {}), "('lev_fill', x[0])\n", (22753, 22771), False, 'import re\n'), ((25367, 25394), 're.search', 're.search', (['"""lev_fill"""', 'x[0]'], {}), "('lev_fill', x[0])\n", (25376, 25394), False, 'import re\n'), ((27542, 27569), 're.search', 're.search', (['"""lev_fill"""', 'x[0]'], {}), "('lev_fill', x[0])\n", (27551, 27569), False, 'import re\n'), ((30269, 30296), 're.search', 're.search', (['"""lev_fill"""', 'x[0]'], {}), "('lev_fill', x[0])\n", (30278, 30296), False, 'import re\n'), ((33914, 33953), 'numpy.logical_or', 'np.logical_or', (["(fill == '1')", "(fill == '2')"], {}), "(fill == '1', fill == '2')\n", (33927, 33953), True, 'import numpy as np\n'), ((40364, 40402), 'pandas.get_dummies', 'pd.get_dummies', (['sub_df'], {'prefix_sep': '""":"""'}), "(sub_df, prefix_sep=':')\n", (40378, 40402), True, 'import pandas as pd\n'), ((40543, 40581), 'pandas.get_dummies', 'pd.get_dummies', (['sub_df'], {'prefix_sep': '""":"""'}), "(sub_df, prefix_sep=':')\n", (40557, 40581), True, 'import pandas as pd\n'), ((41103, 41155), 'pandas.get_dummies', 'pd.get_dummies', (["df['CLASS', subprob]"], {'prefix_sep': '""":"""'}), "(df['CLASS', subprob], prefix_sep=':')\n", (41117, 41155), True, 'import pandas as pd\n'), ((9281, 9308), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'day_dist'}), '(days=day_dist)\n', (9293, 9308), True, 'import datetime as dt\n'), ((38203, 38226), 'numpy.square', 'np.square', (['(pred - truth)'], {}), '(pred - truth)\n', (38212, 38226), True, 'import numpy as np\n'), ((42133, 42155), 'numpy.char.str_len', 'np.char.str_len', (['multi'], {}), '(multi)\n', (42148, 42155), True, 'import numpy as np\n'), ((40852, 40887), 'numpy.char.partition', 'np.char.partition', (['columns'], {'sep': '""":"""'}), "(columns, sep=':')\n", (40869, 40887), True, 'import numpy as np\n'), ((41426, 41461), 'numpy.char.partition', 'np.char.partition', (['columns'], {'sep': '""":"""'}), "(columns, sep=':')\n", (41443, 41461), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Digital filter bandpass zero-phase implementation (filtfilt). Apply a digital filter forward and backward to a signal.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import butter, filtfilt, sosfiltfilt, freqz
from splearn.fourier import fast_fourier_transform
def butter_bandpass_filter(signal, lowcut, highcut, sampling_rate, order=4, verbose=False):
r"""
Digital filter bandpass zero-phase implementation (filtfilt)
Apply a digital filter forward and backward to a signal
Args:
signal : ndarray, shape (trial,channel,time)
Input signal by trials in time domain
lowcut : int
Lower bound filter
highcut : int
Upper bound filter
sampling_rate : int
Sampling frequency
order : int, default: 4
Order of the filter
verbose : boolean, default: False
Print and plot details
Returns:
y : ndarray
Filter signal
"""
sos = _butter_bandpass(lowcut, highcut, sampling_rate, order=order, output='sos')
y = sosfiltfilt(sos, signal, axis=2)
if verbose:
tmp_x = signal[0, 0]
tmp_y = y[0, 0]
# time domain
plt.plot(tmp_x, label='signal')
plt.show()
plt.plot(tmp_y, label='Filtered')
plt.show()
# freq domain
lower_xlim = lowcut-10 if (lowcut-10) > 0 else 0
fast_fourier_transform(
tmp_x, sampling_rate, plot=True, plot_xlim=[lower_xlim, highcut+20], plot_label='Signal')
fast_fourier_transform(
tmp_y, sampling_rate, plot=True, plot_xlim=[lower_xlim, highcut+20], plot_label='Filtered')
plt.xlim([lower_xlim, highcut+20])
plt.ylim([0, 2])
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.show()
print('Input: Signal shape', signal.shape)
print('Output: Signal shape', y.shape)
return y
def butter_bandpass_filter_signal_1d(signal, lowcut, highcut, sampling_rate, order=4, verbose=False):
r"""
Digital filter bandpass zero-phase implementation (filtfilt)
Apply a digital filter forward and backward to a signal
Args:
signal : ndarray, shape (time,)
Single input signal in time domain
lowcut : int
Lower bound filter
highcut : int
Upper bound filter
sampling_rate : int
Sampling frequency
order : int, default: 4
Order of the filter
verbose : boolean, default: False
Print and plot details
Returns:
y : ndarray
Filter signal
"""
b, a = _butter_bandpass(lowcut, highcut, sampling_rate, order)
y = filtfilt(b, a, signal)
if verbose:
w, h = freqz(b, a)
plt.plot((sampling_rate * 0.5 / np.pi) * w,
abs(h), label="order = %d" % order)
plt.plot([0, 0.5 * sampling_rate], [np.sqrt(0.5), np.sqrt(0.5)],
'--', label='sqrt(0.5)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.grid(True)
plt.legend(loc='best')
low = max(0, lowcut-(sampling_rate/100))
high = highcut+(sampling_rate/100)
plt.xlim([low, high])
plt.ylim([0, 1.2])
plt.title('Frequency response of filter - lowcut:' +
str(lowcut)+', highcut:'+str(highcut))
plt.show()
# TIME
plt.plot(signal, label='Signal')
plt.title('Signal')
plt.show()
plt.plot(y, label='Filtered')
plt.title('Bandpass filtered')
plt.show()
# FREQ
lower_xlim = lowcut-10 if (lowcut-10) > 0 else 0
fast_fourier_transform(
signal, sampling_rate, plot=True, plot_xlim=[lower_xlim, highcut+20], plot_label='Signal')
fast_fourier_transform(
y, sampling_rate, plot=True, plot_xlim=[lower_xlim, highcut+20], plot_label='Filtered')
plt.xlim([lower_xlim, highcut+20])
plt.ylim([0, 2])
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.show()
print('Input: Signal shape', signal.shape)
print('Output: Signal shape', y.shape)
return y
def _butter_bandpass(lowcut, highcut, sampling_rate, order=4, output='ba'):
r"""
Create a Butterworth bandpass filter
Design an Nth-order digital or analog Butterworth filter and return the filter coefficients.
Args:
lowcut : int
Lower bound filter
highcut : int
Upper bound filter
sampling_rate : int
Sampling frequency
order : int, default: 4
Order of the filter
output : string, default: ba
Type of output {‘ba’, ‘zpk’, ‘sos’}
Returns:
butter : ndarray
Butterworth filter
Dependencies:
butter : scipy.signal.butter
"""
nyq = sampling_rate * 0.5
low = lowcut / nyq
high = highcut / nyq
return butter(order, [low, high], btype='bandpass', output=output)
#### ver 1
# def butter_bandpass(signal, lowcut, highcut, sampling_rate, type="sos", order=4, plot=False, **kwargs):
# r"""
# Design a `order`th-order bandpass Butterworth filter with a cutoff frequency between `lowcut`-Hz and `highcut`-Hz, which, for data sampled at `sampling_rate`-Hz.
# Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sosfiltfilt.html
# Args:
# signal : ndarray, shape (time,) or (channel,time) or (trial,channel,time)
# Input signal (1D/2D/3D), where last axis is time samples.
# lowcut : int
# Lower bound filter
# highcut : int
# Upper bound filter
# sampling_rate : int
# Sampling frequency
# type: string, optional, default: sos
# Type of output: numerator/denominator (‘ba’), or second-order sections (‘sos’).
# Default is ‘ba’ for backwards compatibility, but ‘sos’ should be used for general-purpose filtering.
# order : int, optional, default: 4
# Order of the filter
# plot : boolean, optional, default: False
# Plot signal and filtered signal in frequency domain
# plot_xlim : array of shape [lower, upper], optional, default: [lowcut-10 if (lowcut-10) > 0 else 0, highcut+20]
# If `plot=True`, set a limit on the X-axis between lower and upper bound
# plot_ylim : array of shape [lower, upper], optional, default: None
# If `plot=True`, set a limit on the Y-axis between lower and upper bound
# Returns:
# y : ndarray
# Filtered signal that has same shape in input `signal`
# Usage:
# >>> from splearn.data.generate import generate_signal
# >>>
# >>> signal_1d = generate_signal(
# >>> length_seconds=4,
# >>> sampling_rate=100,
# >>> frequencies=[4,7,11,17,40, 50],
# >>> plot=True
# >>> )
# >>> print('signal_1d.shape', signal_1d.shape)
# >>>
# >>> signal_2d = generate_signal(
# >>> length_seconds=4,
# >>> sampling_rate=100,
# >>> frequencies=[[4,7,11,17,40, 50],[1, 3]],
# >>> plot=True
# >>> )
# >>> print('signal_2d.shape', signal_2d.shape)
# >>>
# >>> signal_3d = np.expand_dims(s1, 0)
# >>> print('signal_3d.shape', signal_3d.shape)
# >>>
# >>> signal_1d_filtered = butter_bandpass(
# >>> signal=signal_1d,
# >>> lowcut=5,
# >>> highcut=20,
# >>> sampling_rate=100,
# >>> plot=True,
# >>> )
# >>> print('signal_1d_filtered.shape', signal_1d_filtered.shape)
# >>>
# >>> signal_2d_filtered = butter_bandpass(
# >>> signal=signal_2d,
# >>> lowcut=5,
# >>> highcut=20,
# >>> sampling_rate=100,
# >>> type='sos',
# >>> order=4,
# >>> plot=True,
# >>> plot_xlim=[3,20]
# >>> )
# >>> print('signal_2d_filtered.shape', signal_2d_filtered.shape)
# >>>
# >>> signal_3d_filtered = butter_bandpass(
# >>> signal=signal_3d,
# >>> lowcut=5,
# >>> highcut=20,
# >>> sampling_rate=100,
# >>> type='ba',
# >>> order=4,
# >>> plot=True,
# >>> plot_xlim=[0,40]
# >>> )
# >>> print('signal_3d_filtered.shape', signal_3d_filtered.shape)
# """
# dim = len(signal.shape)-1
# if type == 'ba':
# b, a = _butter_bandpass(lowcut, highcut, sampling_rate, order)
# y = filtfilt(b, a, signal)
# else:
# sos = _butter_bandpass(lowcut, highcut, sampling_rate,
# order=order, output='sos')
# y = sosfiltfilt(sos, signal, axis=dim)
# if plot:
# tmp_x = signal
# tmp_y = y
# if dim == 1:
# tmp_x = signal[0]
# tmp_y = y[0]
# elif dim == 2:
# tmp_x = signal[0, 0]
# tmp_y = y[0, 0]
# if type == 'ba':
# # plot frequency response of filter
# w, h = freqz(b, a)
# plt.plot((sampling_rate * 0.5 / np.pi) * w,
# abs(h), label="order = %d" % order)
# plt.plot([0, 0.5 * sampling_rate], [np.sqrt(0.5), np.sqrt(0.5)],
# '--', label='sqrt(0.5)')
# plt.xlabel('Frequency (Hz)')
# plt.ylabel('Gain')
# plt.grid(True)
# plt.legend(loc='best')
# low = max(0, lowcut-(sampling_rate/100))
# high = highcut+(sampling_rate/100)
# plt.xlim([low, high])
# plt.ylim([0, 1.2])
# plt.title('Frequency response of filter - lowcut:' +
# str(lowcut)+', highcut:'+str(highcut))
# plt.show()
# plot_xlim = kwargs['plot_xlim'] if 'plot_xlim' in kwargs else [lowcut-10 if (lowcut-10) > 0 else 0, highcut+20]
# plot_ylim = kwargs['plot_ylim'] if 'plot_ylim' in kwargs else None
# # frequency domain
# fast_fourier_transform(
# tmp_x,
# sampling_rate,
# plot=True,
# plot_xlim=plot_xlim,
# plot_ylim=plot_ylim,
# plot_label='Signal'
# )
# fast_fourier_transform(
# tmp_y,
# sampling_rate,
# plot=True,
# plot_xlim=plot_xlim,
# plot_ylim=plot_ylim,
# plot_label='Filtered'
# )
# plt.title('Signal and filtered signal in frequency domain, type:' + type + ',lowcut:' + str(lowcut) + ',highcut:' + str(highcut) + ',order:' + str(order))
# plt.legend()
# plt.show()
# return y
# def _butter_bandpass(lowcut, highcut, sampling_rate, order=4, output='ba'):
# r"""
# Create a Butterworth bandpass filter. Design an Nth-order digital or analog Butterworth filter and return the filter coefficients.
# Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html
# Args:
# lowcut : int
# Lower bound filter
# highcut : int
# Upper bound filter
# sampling_rate : int
# Sampling frequency
# order : int, default: 4
# Order of the filter
# output : string, default: ba
# Type of output {‘ba’, ‘zpk’, ‘sos’}. Type of output: numerator/denominator (‘ba’), pole-zero (‘zpk’), or second-order sections (‘sos’).
# Default is ‘ba’ for backwards compatibility, but ‘sos’ should be used for general-purpose filtering.
# Returns:
# butter : ndarray
# Scipy butterworth filter
# Dependencies:
# butter : scipy.signal.butter
# """
# nyq = sampling_rate * 0.5
# low = lowcut / nyq
# high = highcut / nyq
# return butter(order, [low, high], btype='bandpass', output=output)
# if __name__ == "__main__":
# from splearn.data.generate import signal
# signal_1d = generate_signal(
# length_seconds=4,
# sampling_rate=100,
# frequencies=[4,7,11,17,40, 50],
# plot=True
# )
# print('signal_1d.shape', signal_1d.shape)
# signal_2d = generate_signal(
# length_seconds=4,
# sampling_rate=100,
# frequencies=[[4,7,11,17,40, 50],[1, 3]],
# plot=True
# )
# print('signal_2d.shape', signal_2d.shape)
# signal_3d = np.expand_dims(s1, 0)
# print('signal_3d.shape', signal_3d.shape)
# signal_1d_filtered = butter_bandpass(
# signal=signal_1d,
# lowcut=5,
# highcut=20,
# sampling_rate=100,
# plot=True,
# )
# print('signal_1d_filtered.shape', signal_1d_filtered.shape)
# signal_2d_filtered = butter_bandpass(
# signal=signal_2d,
# lowcut=5,
# highcut=20,
# sampling_rate=100,
# type='sos',
# order=4,
# plot=True,
# plot_xlim=[3,20]
# )
# print('signal_2d_filtered.shape', signal_2d_filtered.shape)
# signal_3d_filtered = butter_bandpass(
# signal=signal_3d,
# lowcut=5,
# highcut=20,
# sampling_rate=100,
# type='ba',
# order=4,
# plot=True,
# plot_xlim=[0,40]
# )
# print('signal_3d_filtered.shape', signal_3d_filtered.shape)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"scipy.signal.filtfilt",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"scipy.signal.butter",
"splearn.fourier.fast_fourier_transform",
"scipy.signal.freqz",
"scipy.signal.sosfiltfilt",
"matp... | [((1122, 1154), 'scipy.signal.sosfiltfilt', 'sosfiltfilt', (['sos', 'signal'], {'axis': '(2)'}), '(sos, signal, axis=2)\n', (1133, 1154), False, 'from scipy.signal import butter, filtfilt, sosfiltfilt, freqz\n'), ((2760, 2782), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'signal'], {}), '(b, a, signal)\n', (2768, 2782), False, 'from scipy.signal import butter, filtfilt, sosfiltfilt, freqz\n'), ((5028, 5087), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""bandpass"""', 'output': 'output'}), "(order, [low, high], btype='bandpass', output=output)\n", (5034, 5087), False, 'from scipy.signal import butter, filtfilt, sosfiltfilt, freqz\n'), ((1256, 1287), 'matplotlib.pyplot.plot', 'plt.plot', (['tmp_x'], {'label': '"""signal"""'}), "(tmp_x, label='signal')\n", (1264, 1287), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1306), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1304, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1349), 'matplotlib.pyplot.plot', 'plt.plot', (['tmp_y'], {'label': '"""Filtered"""'}), "(tmp_y, label='Filtered')\n", (1324, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1358, 1368), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1366, 1368), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1576), 'splearn.fourier.fast_fourier_transform', 'fast_fourier_transform', (['tmp_x', 'sampling_rate'], {'plot': '(True)', 'plot_xlim': '[lower_xlim, highcut + 20]', 'plot_label': '"""Signal"""'}), "(tmp_x, sampling_rate, plot=True, plot_xlim=[\n lower_xlim, highcut + 20], plot_label='Signal')\n", (1479, 1576), False, 'from splearn.fourier import fast_fourier_transform\n'), ((1591, 1712), 'splearn.fourier.fast_fourier_transform', 'fast_fourier_transform', (['tmp_y', 'sampling_rate'], {'plot': '(True)', 'plot_xlim': '[lower_xlim, highcut + 20]', 'plot_label': '"""Filtered"""'}), "(tmp_y, sampling_rate, plot=True, plot_xlim=[\n lower_xlim, highcut + 20], plot_label='Filtered')\n", (1613, 1712), False, 'from splearn.fourier import fast_fourier_transform\n'), ((1728, 1764), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[lower_xlim, highcut + 20]'], {}), '([lower_xlim, highcut + 20])\n', (1736, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1787), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 2]'], {}), '([0, 2])\n', (1779, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1808), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1806, 1808), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1845), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (1827, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1864), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1862, 1864), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2826), 'scipy.signal.freqz', 'freqz', (['b', 'a'], {}), '(b, a)\n', (2820, 2826), False, 'from scipy.signal import butter, filtfilt, sosfiltfilt, freqz\n'), ((3055, 3083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (3065, 3083), True, 'import matplotlib.pyplot as plt\n'), ((3092, 3110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gain"""'], {}), "('Gain')\n", (3102, 3110), True, 'import matplotlib.pyplot as plt\n'), ((3119, 3133), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3127, 3133), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3164), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3152, 3164), True, 'import matplotlib.pyplot as plt\n'), ((3265, 3286), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[low, high]'], {}), '([low, high])\n', (3273, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3295, 3313), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.2]'], {}), '([0, 1.2])\n', (3303, 3313), True, 'import matplotlib.pyplot as plt\n'), ((3440, 3450), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3448, 3450), True, 'import matplotlib.pyplot as plt\n'), ((3475, 3507), 'matplotlib.pyplot.plot', 'plt.plot', (['signal'], {'label': '"""Signal"""'}), "(signal, label='Signal')\n", (3483, 3507), True, 'import matplotlib.pyplot as plt\n'), ((3516, 3535), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal"""'], {}), "('Signal')\n", (3525, 3535), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3552, 3554), True, 'import matplotlib.pyplot as plt\n'), ((3564, 3593), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'label': '"""Filtered"""'}), "(y, label='Filtered')\n", (3572, 3593), True, 'import matplotlib.pyplot as plt\n'), ((3602, 3632), 'matplotlib.pyplot.title', 'plt.title', (['"""Bandpass filtered"""'], {}), "('Bandpass filtered')\n", (3611, 3632), True, 'import matplotlib.pyplot as plt\n'), ((3641, 3651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3649, 3651), True, 'import matplotlib.pyplot as plt\n'), ((3733, 3853), 'splearn.fourier.fast_fourier_transform', 'fast_fourier_transform', (['signal', 'sampling_rate'], {'plot': '(True)', 'plot_xlim': '[lower_xlim, highcut + 20]', 'plot_label': '"""Signal"""'}), "(signal, sampling_rate, plot=True, plot_xlim=[\n lower_xlim, highcut + 20], plot_label='Signal')\n", (3755, 3853), False, 'from splearn.fourier import fast_fourier_transform\n'), ((3868, 3985), 'splearn.fourier.fast_fourier_transform', 'fast_fourier_transform', (['y', 'sampling_rate'], {'plot': '(True)', 'plot_xlim': '[lower_xlim, highcut + 20]', 'plot_label': '"""Filtered"""'}), "(y, sampling_rate, plot=True, plot_xlim=[lower_xlim, \n highcut + 20], plot_label='Filtered')\n", (3890, 3985), False, 'from splearn.fourier import fast_fourier_transform\n'), ((4001, 4037), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[lower_xlim, highcut + 20]'], {}), '([lower_xlim, highcut + 20])\n', (4009, 4037), True, 'import matplotlib.pyplot as plt\n'), ((4044, 4060), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 2]'], {}), '([0, 2])\n', (4052, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4069, 4081), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4079, 4081), True, 'import matplotlib.pyplot as plt\n'), ((4090, 4118), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (4100, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4127, 4137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4135, 4137), True, 'import matplotlib.pyplot as plt\n'), ((2976, 2988), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2983, 2988), True, 'import numpy as np\n'), ((2990, 3002), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2997, 3002), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import trapz
import multiprocessing as mp
import h5py
def airy_func(wavelength, cos_th, d, F):
Q = (2. * F / np.pi)**2
airy = 1.0 / (1.0 + Q * np.sin(np.pi * 2.e6 * d * cos_th / wavelength)**2)
return airy
def doppler_calc(w0, mu, temp, v):
sigma = w0 * 3.2765e-5 * np.sqrt(temp / mu)
w = w0 * (1.0 - 3.336e-9 * v)
print(w, sigma, w0, mu, temp, v)
return sigma, w
def gaussian(wavelength, w, sigma, amp=1.):
norm = 1. / (sigma * np.sqrt(2.*np.pi))
exp = np.exp(-0.5 * (wavelength-w)**2 / sigma**2)
return amp * norm * exp
def lorentzian(wavelength, w, gamma, amp=1.):
A = (amp * 0.5 * gamma) / np.pi
return A / ((wavelength - w)**2 + (0.5 * gamma)**2)
def forward_model(r, L, d, F, w0, mu, amp, temp, v, nlambda=1024, sm_ang=False, nprocs=6):
sm_ang = False # you are never coming back!
if type(w0) in [list, tuple]:
if not all([type(x) in [list,tuple] for x in [mu, amp, temp, v]]):
raise ValueError('need to have a list for all spec params')
if not all([len(x) == len(w0) for x in [mu, amp, temp, v]]):
raise ValueError('spec params are not all the same length')
sigma = []
w = []
for i,ww in enumerate(w0):
s, l = doppler_calc(ww, mu[i], temp[i], v[i])
sigma.append(s)
w.append(l)
if nprocs > 1:
wavelength = np.linspace(min(w) - 10.*max(sigma), max(w) + 10.*max(sigma), nlambda)
else:
wavelength = np.linspace(min(w) - 10.*max(sigma), max(w) + 10.*max(sigma), nlambda)[:,np.newaxis]
spec = 0.0
for idx,ww in enumerate(w):
spec += gaussian(wavelength, ww, sigma[idx], amp[idx])
else:
if not all([type(x) not in [list,tuple] for x in [mu, amp, temp, v]]):
raise ValueError('need to have a list or not for all spec params')
sigma, w = doppler_calc(w0, mu, temp, v)
if nprocs > 1:
wavelength = np.linspace(w - 10.*sigma, w + 10.*sigma, nlambda)
else:
wavelength = np.linspace(w - 10.*sigma, w + 10.*sigma, nlambda)[:,np.newaxis]
spec = gaussian(wavelength, w, sigma, amp)
if sm_ang:
cos_th = 1.0 - 0.5 * (r/L)**2
else:
cos_th = L / np.sqrt(L**2 + r**2)
if nprocs > 1:
def par_func(cos_th, spec, wavelength, d, F, out=None, label=None):
model = np.zeros_like(cos_th)
for i, cth in enumerate(cos_th):
model[i] = trapz(spec*airy_func(wavelength, cth, d, F), wavelength, axis=0)
model[i] *= 1000
model[i] += np.random.normal(loc=0.0, scale=np.sqrt(model[i]), size=1)
if out and label:
out.put((label, model))
else:
return model
cos_ths = np.array_split(cos_th, nprocs)
procs = []
sigs = {}
out = mp.Queue()
labels = ['{0}'.format(x) for x in range(nprocs)]
for k in range(nprocs):
p = mp.Process(target=par_func, args=(cos_ths[k], spec, wavelength, d, F),
kwargs={'out':out, 'label': labels[k]})
procs.append(p)
p.start()
for i in range(nprocs):
tup = out.get()
sigs[tup[0]] = tup[1]
for p in procs:
p.join()
model = []
for k in labels:
model.append(sigs[k])
model = np.concatenate(model)
else:
model = trapz(spec*airy_func(wavelength, cos_th, d, F), wavelength, axis=0)
return model
def ccd(npx=(4016, 6016),px_size=0.004):
cntr = [(x-1)/2. for x in npx]
return px_size * np.fromfunction(lambda i, j: np.sqrt((i-cntr[0])**2 + (j-cntr[1])**2), npx, dtype=float)
def ccd_quad(npx=(4016, 6016), px_size=0.004):
end = (int(npx[0]/2.), int(npx[1]/2.))
return px_size * np.fromfunction(lambda i,j: np.sqrt((i+0.5)**2+(j+0.5)**2), end, dtype=float)
def recomb_quad(a):
b = np.vstack((a[::-1,:],a))
return np.hstack((b[:,::-1],b))
def full_pattern(L,d,F,w0,mu,T,V,A=1.,sm_ang=False,nprocs=6,plotit=False,saveit=None):
'''
produces full synthethic ring pattern for Nikon d5200/5300 camera
Inputs:
L (float): camera lens focal length (in mm)
d (float): etalon spacing (in mm)
F (float): etalon finesse
w0 (float or list of floats): wavelengths of spectrum (in nm)
mu (float or list of floats): atomic mass of elements used, same
order as w0 list (in a.m.u.)
V (float or list of floats): flow velocity of spectrum (in km/s)
A (float or list of floats): amplitudes of spectrum, default is 1
sm_ang (bool, default=F): flag to use the small angle approximation,
for true synthetic data this should be False
nprocs (int, default=6): number of processors to use for calc.
plotit (bool, default=F): flag to plot the resulting rings
saveit (str, default=None): hdf5 filename for optional saved rings,
if left to None, the data will not be saved
Outputs:
rings (np.ndarray): output of forward forward q
'''
if type(w0) is list and type(A) is float:
A = [A]*len(w0)
a = ccd_quad()
rings = forward_model(a.flatten(),L,d,F,w0,mu,A,T,V, nprocs=nprocs)
print('done with first')
rings = rings.reshape(a.shape)
rings = recomb_quad(rings)
r = np.arange(0., (np.sqrt(rings.shape[0]**2 + rings.shape[1]**2)/2.) + 0.0005, 0.001)
ringsum = forward_model(r*0.004, L, d, F, w0, mu, A, T, V, nprocs=nprocs)
if saveit is not None:
with h5py.File(saveit,'w') as hf:
hf.create_dataset('2Ddata',data=rings,compression='lzf')
hf.create_dataset('1Ddata',data=ringsum,compression='lzf')
hf.create_dataset('1Dr',data=r,compression='lzf')
hf.create_dataset('L',data=L)
hf.create_dataset('d',data=d)
hf.create_dataset('F',data=F)
hf.create_dataset('amp',data=A)
hf.create_dataset('w0',data=w0)
hf.create_dataset('mu',data=mu)
hf.create_dataset('temp',data=T)
hf.create_dataset('vel',data=V)
if plotit:
f,axs = plt.subplots(figsize=(10,7))
axs.imshow(rings, cmap='Greys_r', interpolation='nearest', vmin=0, origin='lower')
axs.set_aspect('equal')
plt.show(block=False)
f,axs = plt.subplots(figsize=(10,7))
axs.plot(r,ringsum,lw=2)
axs.set_xlabel('R (px)')
plt.show()
return rings
| [
"numpy.sqrt",
"numpy.hstack",
"multiprocessing.Process",
"h5py.File",
"numpy.exp",
"numpy.array_split",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"numpy.sin",
"multiprocessing.Queue",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((613, 662), 'numpy.exp', 'np.exp', (['(-0.5 * (wavelength - w) ** 2 / sigma ** 2)'], {}), '(-0.5 * (wavelength - w) ** 2 / sigma ** 2)\n', (619, 662), True, 'import numpy as np\n'), ((4128, 4154), 'numpy.vstack', 'np.vstack', (['(a[::-1, :], a)'], {}), '((a[::-1, :], a))\n', (4137, 4154), True, 'import numpy as np\n'), ((4164, 4190), 'numpy.hstack', 'np.hstack', (['(b[:, ::-1], b)'], {}), '((b[:, ::-1], b))\n', (4173, 4190), True, 'import numpy as np\n'), ((404, 422), 'numpy.sqrt', 'np.sqrt', (['(temp / mu)'], {}), '(temp / mu)\n', (411, 422), True, 'import numpy as np\n'), ((2963, 2993), 'numpy.array_split', 'np.array_split', (['cos_th', 'nprocs'], {}), '(cos_th, nprocs)\n', (2977, 2993), True, 'import numpy as np\n'), ((3045, 3055), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3053, 3055), True, 'import multiprocessing as mp\n'), ((3588, 3609), 'numpy.concatenate', 'np.concatenate', (['model'], {}), '(model)\n', (3602, 3609), True, 'import numpy as np\n'), ((6396, 6425), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (6408, 6425), True, 'import matplotlib.pyplot as plt\n'), ((6556, 6577), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (6564, 6577), True, 'import matplotlib.pyplot as plt\n'), ((6594, 6623), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (6606, 6623), True, 'import matplotlib.pyplot as plt\n'), ((6697, 6707), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6705, 6707), True, 'import matplotlib.pyplot as plt\n'), ((584, 604), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (591, 604), True, 'import numpy as np\n'), ((2116, 2172), 'numpy.linspace', 'np.linspace', (['(w - 10.0 * sigma)', '(w + 10.0 * sigma)', 'nlambda'], {}), '(w - 10.0 * sigma, w + 10.0 * sigma, nlambda)\n', (2127, 2172), True, 'import numpy as np\n'), ((2407, 2431), 'numpy.sqrt', 'np.sqrt', (['(L ** 2 + r ** 2)'], {}), '(L ** 2 + r ** 2)\n', (2414, 2431), True, 'import numpy as np\n'), ((2548, 2569), 'numpy.zeros_like', 'np.zeros_like', (['cos_th'], {}), '(cos_th)\n', (2561, 2569), True, 'import numpy as np\n'), ((3162, 3277), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'par_func', 'args': '(cos_ths[k], spec, wavelength, d, F)', 'kwargs': "{'out': out, 'label': labels[k]}"}), "(target=par_func, args=(cos_ths[k], spec, wavelength, d, F),\n kwargs={'out': out, 'label': labels[k]})\n", (3172, 3277), True, 'import multiprocessing as mp\n'), ((5782, 5804), 'h5py.File', 'h5py.File', (['saveit', '"""w"""'], {}), "(saveit, 'w')\n", (5791, 5804), False, 'import h5py\n'), ((2206, 2262), 'numpy.linspace', 'np.linspace', (['(w - 10.0 * sigma)', '(w + 10.0 * sigma)', 'nlambda'], {}), '(w - 10.0 * sigma, w + 10.0 * sigma, nlambda)\n', (2217, 2262), True, 'import numpy as np\n'), ((3849, 3897), 'numpy.sqrt', 'np.sqrt', (['((i - cntr[0]) ** 2 + (j - cntr[1]) ** 2)'], {}), '((i - cntr[0]) ** 2 + (j - cntr[1]) ** 2)\n', (3856, 3897), True, 'import numpy as np\n'), ((4049, 4089), 'numpy.sqrt', 'np.sqrt', (['((i + 0.5) ** 2 + (j + 0.5) ** 2)'], {}), '((i + 0.5) ** 2 + (j + 0.5) ** 2)\n', (4056, 4089), True, 'import numpy as np\n'), ((5590, 5640), 'numpy.sqrt', 'np.sqrt', (['(rings.shape[0] ** 2 + rings.shape[1] ** 2)'], {}), '(rings.shape[0] ** 2 + rings.shape[1] ** 2)\n', (5597, 5640), True, 'import numpy as np\n'), ((272, 323), 'numpy.sin', 'np.sin', (['(np.pi * 2000000.0 * d * cos_th / wavelength)'], {}), '(np.pi * 2000000.0 * d * cos_th / wavelength)\n', (278, 323), True, 'import numpy as np\n'), ((2800, 2817), 'numpy.sqrt', 'np.sqrt', (['model[i]'], {}), '(model[i])\n', (2807, 2817), True, 'import numpy as np\n')] |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" #remove this line if you've a GPU
import numpy as np
import argparse
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
model.load_weights('model.h5')
# prevents openCL usage and unnecessary logging messages
cv2.ocl.setUseOpenCL(False)
# dictionary which assigns each label an emotion (alphabetical order)
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
# start the webcam feed
cap = cv2.VideoCapture(0)
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
if not ret:
break
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.05, minNeighbors=6)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Video', cv2.resize(frame,(1600,960),interpolation = cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | [
"cv2.ocl.setUseOpenCL",
"cv2.rectangle",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"cv2.putText",
"cv2.waitKey",
"tensorflow.keras.layers.Dense",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"t... | [((497, 509), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (507, 509), False, 'from tensorflow.keras.models import Sequential\n'), ((1178, 1205), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (1198, 1205), False, 'import cv2\n'), ((1418, 1437), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1434, 1437), False, 'import cv2\n'), ((2385, 2408), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2406, 2408), False, 'import cv2\n'), ((521, 595), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(48, 48, 1)'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1))\n", (527, 595), False, 'from tensorflow.keras.layers import Conv2D\n'), ((605, 654), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), activation='relu')\n", (611, 654), False, 'from tensorflow.keras.layers import Conv2D\n'), ((666, 696), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (678, 696), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), ((708, 721), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (715, 721), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((734, 784), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (740, 784), False, 'from tensorflow.keras.layers import Conv2D\n'), ((796, 826), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (808, 826), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), ((838, 888), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (844, 888), False, 'from tensorflow.keras.layers import Conv2D\n'), ((900, 930), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (912, 930), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), ((942, 955), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (949, 955), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((968, 977), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (975, 977), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((989, 1019), 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (994, 1019), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1031, 1043), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1038, 1043), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1055, 1085), 'tensorflow.keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (1060, 1085), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1580, 1640), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (1601, 1640), False, 'import cv2\n'), ((1652, 1691), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1664, 1691), False, 'import cv2\n'), ((1809, 1879), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y - 50)', '(x + w, y + h + 10)', '(255, 0, 0)', '(2)'], {}), '(frame, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)\n', (1822, 1879), False, 'import cv2\n'), ((2108, 2235), 'cv2.putText', 'cv2.putText', (['frame', 'emotion_dict[maxindex]', '(x + 20, y - 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, emotion_dict[maxindex], (x + 20, y - 60), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n', (2119, 2235), False, 'import cv2\n'), ((2252, 2313), 'cv2.resize', 'cv2.resize', (['frame', '(1600, 960)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(frame, (1600, 960), interpolation=cv2.INTER_CUBIC)\n', (2262, 2313), False, 'import cv2\n'), ((2077, 2098), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (2086, 2098), True, 'import numpy as np\n'), ((2321, 2335), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2332, 2335), False, 'import cv2\n'), ((1966, 1996), 'cv2.resize', 'cv2.resize', (['roi_gray', '(48, 48)'], {}), '(roi_gray, (48, 48))\n', (1976, 1996), False, 'import cv2\n')] |
import glob
import cv2
import numpy as np
import math
from demo import rot, get_bbox, retrive_bbox3d
class rgblDataLoader():
def __init__(self):
self.dirPre = ['../ROB599Perception/deploy/trainval/a*/*_image.jpg',
'../ROB599Perception/deploy/trainval/b*/*_image.jpg',
'../ROB599Perception/deploy/trainval/c*/*_image.jpg',
'../ROB599Perception/deploy/trainval/d*/*_image.jpg',
'../ROB599Perception/deploy/trainval/e*/*_image.jpg',
'../ROB599Perception/deploy/trainval/f*/*_image.jpg']
return None
def loadImageNames(self):
self.ppmFileNames = []
for fp in self.dirPre:
self.ppmFileNames = self.ppmFileNames + glob.glob(fp)
test_count = 20
#test_ids = np.flip(np.sort(np.random.choice(len(self.ppmFileNames), test_count)), 0).tolist()
#print(test_ids)
test_ids = [2651, 2623, 2621, 2444, 2414, 2112, 2064, 1981, 1711, 1678, 1663, 1450, 1396, 950, 661, 625, 402, 289, 157, 61]
self.testRGBNames = []
for test_id in test_ids:
self.testRGBNames.append(self.ppmFileNames[test_id])
del self.ppmFileNames[test_id]
def getNextBatchTraining(self, batch_size):
img = np.zeros((batch_size, 240, 320, 3), dtype=np.float32)
dps1 = np.zeros((batch_size, 60, 80, 1), dtype=np.float32)
mask = np.zeros((batch_size, 60, 80, 1), dtype=np.float32)
data_count = len(self.ppmFileNames)
ids = np.random.choice(data_count, batch_size)
for i in range(batch_size):
tmpim = cv2.imread(self.ppmFileNames[ids[i]]).astype(np.float32)
tmpim = tmpim / 255
img[i, :, :, :] = cv2.resize(tmpim, (320, 240))
td = cv2.imread(self.ppmFileNames[ids[i]].replace('_image.jpg', '_dps.jpg')).astype(np.float32)
td = cv2.cvtColor(td, cv2.COLOR_BGR2GRAY)
td = td/255
dps1[i, :, :, 0] = td
td[np.where(td==1)] = 0
td[np.where(td>0)] = 1
mask[i, :, :, 0] = td
return img, dps1, mask
def getNextBatchTesting(self, batch_size):
img = np.zeros((batch_size, 240, 320, 3), dtype=np.float32)
dps1 = np.zeros((batch_size, 60, 80, 1), dtype=np.float32)
mask = np.zeros((batch_size, 60, 80, 1), dtype=np.float32)
data_count = len(self.testRGBNames)
ids = np.random.choice(data_count, batch_size)
for i in range(batch_size):
tmpim = cv2.imread(self.testRGBNames[ids[i]]).astype(np.float32)
tmpim = tmpim / 255
img[i, :, :, :] = cv2.resize(tmpim, (320, 240))
td = cv2.imread(self.ppmFileNames[ids[i]].replace('_image.jpg', '_dps.jpg')).astype(np.float32)
td = cv2.cvtColor(td, cv2.COLOR_BGR2GRAY)
td = td/255
dps1[i, :, :, 0] = td
td[np.where(td==1)] = 0
td[np.where(td>0)] = 1
mask[i, :, :, 0] = td
return img, dps1, mask
def preproc(self):
for i in range(len(self.ppmFileNames)):
print(i)
tmpim = cv2.imread(self.ppmFileNames[i]).astype(np.float32)
tmpim = tmpim / 255
xyz = np.fromfile(self.ppmFileNames[i].replace('_image.jpg', '_cloud.bin'), dtype=np.float32)
xyz.resize([3, xyz.size // 3])
# get projection matrix
proj = np.fromfile(self.ppmFileNames[i].replace('_image.jpg', '_proj.bin'), dtype=np.float32)
proj.resize([3, 4])
# project clound points onto image
uv = proj @ np.vstack([xyz, np.ones_like(xyz[0, :])])
uv = uv / uv[2, :]
clr = np.linalg.norm(xyz, axis=0)
dps = np.zeros((tmpim.shape[0], tmpim.shape[1]), dtype=np.float32)
dpsmk = np.zeros((tmpim.shape[0], tmpim.shape[1]), dtype=np.float32)
# https://en.wikipedia.org/wiki/Bilateral_filter
for uvidx in range(uv.shape[1]):
if int(uv[0, uvidx]) >= 0 and int(uv[0, uvidx]) <= dps.shape[1] and int(uv[1, uvidx]) >= 0 and int(uv[1, uvidx]) <= dps.shape[0]:
dps[ int(uv[1, uvidx]), int(uv[0, uvidx]) ] = clr[uvidx]
dpsmk[ int(uv[1, uvidx]), int(uv[0, uvidx]) ] = 1
ksize = 11
dps_blurred = cv2.GaussianBlur(dps, (ksize, ksize), 0)
weight_count = cv2.GaussianBlur(dpsmk, (ksize, ksize), 0)
weight_count += 1e-9
res = np.divide(dps_blurred, weight_count)
res_blurred = cv2.GaussianBlur(res, (ksize, ksize), 0)
rm = np.amax(res_blurred)
res_blurred = 1-res_blurred/rm
td = cv2.resize(res_blurred, (80, 60))*255
cv2.imwrite(self.ppmFileNames[i].replace('_image.jpg', '_dps.jpg'), td)
| [
"numpy.ones_like",
"numpy.random.choice",
"numpy.where",
"numpy.zeros",
"glob.glob",
"cv2.cvtColor",
"numpy.linalg.norm",
"cv2.resize",
"cv2.GaussianBlur",
"numpy.amax",
"numpy.divide",
"cv2.imread"
] | [((1236, 1289), 'numpy.zeros', 'np.zeros', (['(batch_size, 240, 320, 3)'], {'dtype': 'np.float32'}), '((batch_size, 240, 320, 3), dtype=np.float32)\n', (1244, 1289), True, 'import numpy as np\n'), ((1305, 1356), 'numpy.zeros', 'np.zeros', (['(batch_size, 60, 80, 1)'], {'dtype': 'np.float32'}), '((batch_size, 60, 80, 1), dtype=np.float32)\n', (1313, 1356), True, 'import numpy as np\n'), ((1372, 1423), 'numpy.zeros', 'np.zeros', (['(batch_size, 60, 80, 1)'], {'dtype': 'np.float32'}), '((batch_size, 60, 80, 1), dtype=np.float32)\n', (1380, 1423), True, 'import numpy as np\n'), ((1482, 1522), 'numpy.random.choice', 'np.random.choice', (['data_count', 'batch_size'], {}), '(data_count, batch_size)\n', (1498, 1522), True, 'import numpy as np\n'), ((2146, 2199), 'numpy.zeros', 'np.zeros', (['(batch_size, 240, 320, 3)'], {'dtype': 'np.float32'}), '((batch_size, 240, 320, 3), dtype=np.float32)\n', (2154, 2199), True, 'import numpy as np\n'), ((2215, 2266), 'numpy.zeros', 'np.zeros', (['(batch_size, 60, 80, 1)'], {'dtype': 'np.float32'}), '((batch_size, 60, 80, 1), dtype=np.float32)\n', (2223, 2266), True, 'import numpy as np\n'), ((2282, 2333), 'numpy.zeros', 'np.zeros', (['(batch_size, 60, 80, 1)'], {'dtype': 'np.float32'}), '((batch_size, 60, 80, 1), dtype=np.float32)\n', (2290, 2333), True, 'import numpy as np\n'), ((2392, 2432), 'numpy.random.choice', 'np.random.choice', (['data_count', 'batch_size'], {}), '(data_count, batch_size)\n', (2408, 2432), True, 'import numpy as np\n'), ((1698, 1727), 'cv2.resize', 'cv2.resize', (['tmpim', '(320, 240)'], {}), '(tmpim, (320, 240))\n', (1708, 1727), False, 'import cv2\n'), ((1853, 1889), 'cv2.cvtColor', 'cv2.cvtColor', (['td', 'cv2.COLOR_BGR2GRAY'], {}), '(td, cv2.COLOR_BGR2GRAY)\n', (1865, 1889), False, 'import cv2\n'), ((2608, 2637), 'cv2.resize', 'cv2.resize', (['tmpim', '(320, 240)'], {}), '(tmpim, (320, 240))\n', (2618, 2637), False, 'import cv2\n'), ((2763, 2799), 'cv2.cvtColor', 'cv2.cvtColor', (['td', 'cv2.COLOR_BGR2GRAY'], {}), '(td, cv2.COLOR_BGR2GRAY)\n', (2775, 2799), False, 'import cv2\n'), ((3676, 3703), 'numpy.linalg.norm', 'np.linalg.norm', (['xyz'], {'axis': '(0)'}), '(xyz, axis=0)\n', (3690, 3703), True, 'import numpy as np\n'), ((3722, 3782), 'numpy.zeros', 'np.zeros', (['(tmpim.shape[0], tmpim.shape[1])'], {'dtype': 'np.float32'}), '((tmpim.shape[0], tmpim.shape[1]), dtype=np.float32)\n', (3730, 3782), True, 'import numpy as np\n'), ((3803, 3863), 'numpy.zeros', 'np.zeros', (['(tmpim.shape[0], tmpim.shape[1])'], {'dtype': 'np.float32'}), '((tmpim.shape[0], tmpim.shape[1]), dtype=np.float32)\n', (3811, 3863), True, 'import numpy as np\n'), ((4312, 4352), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['dps', '(ksize, ksize)', '(0)'], {}), '(dps, (ksize, ksize), 0)\n', (4328, 4352), False, 'import cv2\n'), ((4380, 4422), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['dpsmk', '(ksize, ksize)', '(0)'], {}), '(dpsmk, (ksize, ksize), 0)\n', (4396, 4422), False, 'import cv2\n'), ((4474, 4510), 'numpy.divide', 'np.divide', (['dps_blurred', 'weight_count'], {}), '(dps_blurred, weight_count)\n', (4483, 4510), True, 'import numpy as np\n'), ((4537, 4577), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['res', '(ksize, ksize)', '(0)'], {}), '(res, (ksize, ksize), 0)\n', (4553, 4577), False, 'import cv2\n'), ((4595, 4615), 'numpy.amax', 'np.amax', (['res_blurred'], {}), '(res_blurred)\n', (4602, 4615), True, 'import numpy as np\n'), ((703, 716), 'glob.glob', 'glob.glob', (['fp'], {}), '(fp)\n', (712, 716), False, 'import glob\n'), ((1963, 1980), 'numpy.where', 'np.where', (['(td == 1)'], {}), '(td == 1)\n', (1971, 1980), True, 'import numpy as np\n'), ((1999, 2015), 'numpy.where', 'np.where', (['(td > 0)'], {}), '(td > 0)\n', (2007, 2015), True, 'import numpy as np\n'), ((2873, 2890), 'numpy.where', 'np.where', (['(td == 1)'], {}), '(td == 1)\n', (2881, 2890), True, 'import numpy as np\n'), ((2909, 2925), 'numpy.where', 'np.where', (['(td > 0)'], {}), '(td > 0)\n', (2917, 2925), True, 'import numpy as np\n'), ((4676, 4709), 'cv2.resize', 'cv2.resize', (['res_blurred', '(80, 60)'], {}), '(res_blurred, (80, 60))\n', (4686, 4709), False, 'import cv2\n'), ((1579, 1616), 'cv2.imread', 'cv2.imread', (['self.ppmFileNames[ids[i]]'], {}), '(self.ppmFileNames[ids[i]])\n', (1589, 1616), False, 'import cv2\n'), ((2489, 2526), 'cv2.imread', 'cv2.imread', (['self.testRGBNames[ids[i]]'], {}), '(self.testRGBNames[ids[i]])\n', (2499, 2526), False, 'import cv2\n'), ((3107, 3139), 'cv2.imread', 'cv2.imread', (['self.ppmFileNames[i]'], {}), '(self.ppmFileNames[i])\n', (3117, 3139), False, 'import cv2\n'), ((3601, 3624), 'numpy.ones_like', 'np.ones_like', (['xyz[0, :]'], {}), '(xyz[0, :])\n', (3613, 3624), True, 'import numpy as np\n')] |
#!/usr/bin/python3 -B
"""
A convenience wrapper for Matplotlib.
"""
import sys # built-in module
import time # built-in module
import inspect # built-in module
import warnings # built-in module
import numpy as np # pip install numpy
import matplotlib # pip install matplotlib + apt install python3-tk
import matplotlib.pyplot as pp # pip install matplotlib + apt install python3-tk
import matplotlib.gridspec # pip install matplotlib + apt install python3-tk
import mpl_toolkits.mplot3d # noqa pylint: disable=unused-import
######################################################################################
#
# P U B L I C A P I
#
######################################################################################
def plot(*plot_args, **kwargs):
"""
Create a Figure, plot the given data to it, and return the Figure. The given
keyword arguments are first passed to the Figure constructor and then to the
plot() function of fig.axes[0], where fig is the newly created Figure.
Example:
x = np.linspace(0, 2 * np.pi, 100)
fig = pyplottr.plot(x, np.sin(x), title="sine", color="red")
fig.show()
"""
fig = _plot(*plot_args, **kwargs)
return fig
def plot3d(*plot_args, **kwargs):
"""
Create a Figure with 3D projection, plot the given data to it, and return the
Figure. Keyword arguments are treated as in plot().
"""
fig = _plot(*plot_args, **kwargs, projection="3d")
return fig
class DummyFigure: # pylint: disable=missing-function-docstring, unused-argument
"""
Create a mock Figure object that behaves superficially the same as a real Figure,
but doesn't actually do or contain anything. This allows the user to switch all
plotting functionality on/off by changing just a single line of code, rather than
having to comment out a bunch of lines.
"""
def __init__(self, title="DummyFigure", nrows=1, ncols=1, nplots=None, layout=None, **_kwargs):
num_axes = nrows * ncols
num_axes = max(num_axes, int(nplots or 0))
num_axes = max(num_axes, int(np.max(np.array(layout)) or 0))
self.ax = self.Axes()
self.axes = [self.ax] * num_axes
self.current_subplot = None
self.exit_request = False
self.fig = None
self.mousex = None
self.mousey = None
self.clickx = None
self.clicky = None
self.key = None
self.valid_keys = "<KEY>"
self.key_state = [False] * len(self.valid_keys)
self.fast_redraw = False
def savefig(self, *args, **kwargs):
pass
def show(self, *args, **kwargs):
pass
def events(self, *args, **kwargs):
pass
def redraw(self, *args, **kwargs):
pass
def move(self, *args, **kwargs):
pass
def resize(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
class Axes: # pylint: disable=missing-class-docstring
def bar(self, *args, **kwargs):
pass
def plot(self, *args, **kwargs):
pass
def plot3d(self, *args, **kwargs):
pass
def imshow(self, *args, **kwargs):
pass
def scatter(self, *args, **kwargs):
pass
def set_title(self, *args, **kwargs):
pass
def set_xlim(self, *args, **kwargs):
pass
def set_ylim(self, *args, **kwargs):
pass
def set_xlabel(self, *args, **kwargs):
pass
def set_ylabel(self, *args, **kwargs):
pass
def set_xticks(self, *args, **kwargs):
pass
def set_yticks(self, *args, **kwargs):
pass
def set_xticklabels(self, *args, **kwargs):
pass
def set_yticklabels(self, *args, **kwargs):
pass
def set_facecolor(self, *args, **kwargs):
pass
def grid(self, *args, **kwargs):
pass
def legend(self, *args, **kwargs):
pass
def add_patch(self, *args, **kwargs):
pass
def autoscale_view(self, *args, **kwargs):
pass
class Figure: # pylint: disable=missing-class-docstring
def __init__(self, title="Figure", nrows=1, ncols=1, nplots=None, layout=None, **subplot_kwargs):
"""
Create a Figure with one or more subplots and basic enablers for keyboard
and mouse interaction.
Example:
fig = pp.Figure("Sample 3D plot", projection="3d")
zdata = np.linspace(0, 15, 1000)
xdata = np.sin(zline)
ydata = np.cos(zline)
fig.ax.plot3D(xdata, ydata, zdata)
fig.show()
"""
_mpl_init()
self.exit_request = False
self.fig = None
self.axes = None
self.ax = None
self.current_subplot = None
self.mousex = None
self.mousey = None
self.clickx = None
self.clicky = None
self.key = None
self.valid_keys = "<KEY>"
self.key_state = [False] * len(self.valid_keys)
self.fast_redraw = False
try:
self.fig = pp.figure(num=title)
except (RuntimeError, ImportError) as e:
print(f"Error: {e}")
print("Pro tip: Check that there's an X window server running.")
print("Pro tip: Check that the $DISPLAY environment variable is defined.")
sys.exit(-1)
if 0 not in [nrows, ncols, nplots]:
self.create_axes(nrows, ncols, nplots, layout, **subplot_kwargs)
def create_axes(self, nrows=1, ncols=1, nplots=None, layout=None, **kwargs):
"""
Create one or more subplots with basic enablers for keyboard and mouse
interaction.
"""
if layout is None: # generate layout if not provided by user
nplots = nplots or (nrows * ncols) # set default value for nplots if not provided
if nplots > nrows * ncols: # derive nrows & ncols if only nplots is provided
nrows = np.clip(1, 2, np.ceil(nplots / 2)).astype(int)
ncols = np.ceil(nplots / nrows).astype(int)
layout = np.arange(nrows * ncols).reshape(nrows, ncols)
layout = np.clip(layout, 0, nplots - 1) # stretch the last subplot if not evenly divided
layout = np.array(layout)
grid = matplotlib.gridspec.GridSpec(*layout.shape)
axes = np.array(layout, dtype=object)
self.axes = np.array(range(layout.max() + 1), dtype=object)
for idx in np.unique(layout[layout >= 0]):
yslice, xslice = np.nonzero(layout == idx)
yslice = slice(yslice[0], yslice[-1] + 1)
xslice = slice(xslice[0], xslice[-1] + 1)
axes = self.fig.add_subplot(grid[yslice, xslice], **kwargs)
self.axes[idx] = axes
self.current_subplot = self.ax = self.axes[0]
self.fig.tight_layout()
self.fig.canvas.mpl_connect("close_event", self._fig_event_close)
self.fig.canvas.mpl_connect("key_press_event", self._fig_event_keypress)
self.fig.canvas.mpl_connect("motion_notify_event", self._fig_event_mousemove)
self.fig.canvas.mpl_connect("button_press_event", self._fig_event_mouseclick)
warnings.filterwarnings("ignore", category=UserWarning, module="pyplottr")
return self.axes[0] # return the first/only subplot
def savefig(self, *args, **kwargs):
""" Save the figure equivalently to pyplot.savefig(). """
self.redraw() # redraw twice to make sure all pendings events are handled
self.redraw()
pp.savefig(*args, **kwargs)
def show(self, interval=0.0):
""" Redraw and handle events until exit. """
while not self.exit_request:
self.redraw()
time.sleep(interval)
def events(self):
""" Flush any pending events. """
self.fig.canvas.flush_events()
def redraw(self):
""" Redraw and flush any pending events. """
if self.fast_redraw:
for ax in self.axes:
ax.draw_artist(ax.patch)
for line in ax.get_children():
ax.draw_artist(line)
self.events()
else: # one full redraw required in the beginning
self.fig.canvas.draw_idle()
self.events()
def move(self, x, y):
""" Move the figure window upper left corner to (x, y). """
backend = matplotlib.get_backend()
if backend == "TkAgg":
self.fig.canvas.manager.window.wm_geometry(f"+{x}+{y}")
elif backend == "WXAgg":
self.fig.canvas.manager.window.SetPosition((x, y))
else: # this works for QT and GTK
self.fig.canvas.manager.window.move(x, y)
def resize(self, width, height): # pylint: disable=no-self-use
""" Resize the figure window to (width, height) pixels. """
mng = pp.get_current_fig_manager()
mng.resize(width, height)
def close(self):
""" Close the figure window. """
pp.close(self.fig)
######################################################################################
#
# I N T E R N A L F U N C T I O N S
#
######################################################################################
def _fig_event_close(self, _evt):
self.exit_request = True
def _fig_event_keypress(self, evt):
if evt.key is not None:
self.key = evt.key
if evt.key in self.valid_keys:
idx = self.valid_keys.index(evt.key)
self.key_state[idx] = not self.key_state[idx]
def _fig_event_mousemove(self, evt):
# set mouse coords to None if not within the main subplot
self.current_subplot = evt.inaxes
if evt.inaxes == self.fig.axes[0]:
self.mousex = int(round(evt.xdata))
self.mousey = int(round(evt.ydata))
else:
self.mousex = None
self.mousey = None
def _fig_event_mouseclick(self, evt):
# do nothing if this is not a left-click on the main subplot
if evt.inaxes == self.fig.axes[0]:
if evt.button == 1:
self.clickx = int(round(evt.xdata))
self.clicky = int(round(evt.ydata))
def _mpl_init():
pp.ion() # must enable interactive mode before creating any figures
# free up some hotkeys for our own use: a, s, f, c, v, h, g, G, k, L, l, o, p, W, Q
matplotlib.rcParams["keymap.save"] = 'ctrl+s' # s
matplotlib.rcParams["keymap.fullscreen"] = 'ctrl+f' # f
matplotlib.rcParams["keymap.back"] = 'backspace' # c
matplotlib.rcParams["keymap.forward"] = '' # v
matplotlib.rcParams["keymap.home"] = '' # h
matplotlib.rcParams["keymap.grid"] = '' # g
matplotlib.rcParams["keymap.grid_minor"] = '' # G
matplotlib.rcParams["keymap.xscale"] = '' # k, L
matplotlib.rcParams["keymap.yscale"] = '' # l
matplotlib.rcParams["keymap.zoom"] = '' # o
matplotlib.rcParams["keymap.pan"] = '' # p
matplotlib.rcParams["keymap.quit_all"] = '' # W, Q
matplotlib.rcParams["keymap.quit"] = ['q', 'escape']
matplotlib.rcParams["figure.autolayout"] = True
def _plot(*plot_args, projection=None, **kwargs):
init_kwargs = _extract_kwargs(Figure.__init__, **kwargs)
axes_kwargs = _extract_kwargs(matplotlib.figure.Figure.add_subplot, **kwargs)
fig_kwargs = {**init_kwargs, **axes_kwargs}
fig = Figure(**fig_kwargs, projection=projection)
line_kwargs = _extract_kwargs(matplotlib.lines.Line2D, **kwargs)
axes_kwargs = _extract_kwargs(matplotlib.axes.Axes, **kwargs)
plot_kwargs = {**line_kwargs, **axes_kwargs}
fig.ax.plot(*plot_args, **plot_kwargs)
return fig
def _extract_kwargs(func, **kwargs):
func_kwargs = [k for k, v in inspect.signature(func).parameters.items()]
func_dict = {k: kwargs.pop(k) for k in dict(kwargs) if k in func_kwargs}
return func_dict
def _selftest():
# simple one-liners
x = np.linspace(-2 * np.pi, 2 * np.pi, 100)
plot(x, np.sin(x), x, np.cos(x), title="sin & cos")
plot(x, np.sin(np.pi * x) / (np.pi * x), title="sinc")
# subplot layouts
zline = np.linspace(0, 15, 1000)
xline = np.sin(zline)
yline = np.cos(zline)
for nplots in np.arange(2, 9):
fig = Figure(f"nplots={nplots}", nplots=nplots)
fig.axes[0].plot(xline, xline)
fig.axes[-1].plot(xline, zline)
for nrows, ncols in [[2, 1], [1, 3], [2, 4]]:
fig = Figure(f"nrows={nrows}, ncols={ncols}", nrows=nrows, ncols=ncols)
fig.axes[0].plot(xline, xline)
fig.axes[-1].plot(xline, zline)
# 3D projection
fig = plot3d(xline, yline, zline, title="3D plot", color="black")
# closing the last window closes them all
fig.show()
if __name__ == "__main__":
_selftest()
| [
"numpy.clip",
"inspect.signature",
"time.sleep",
"numpy.array",
"sys.exit",
"numpy.sin",
"numpy.arange",
"matplotlib.get_backend",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.gridspec.GridSpec",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"numpy.cos",
"numpy.nonzero",
"matp... | [((10479, 10487), 'matplotlib.pyplot.ion', 'pp.ion', ([], {}), '()\n', (10485, 10487), True, 'import matplotlib.pyplot as pp\n'), ((12186, 12225), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(100)'], {}), '(-2 * np.pi, 2 * np.pi, 100)\n', (12197, 12225), True, 'import numpy as np\n'), ((12375, 12399), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(1000)'], {}), '(0, 15, 1000)\n', (12386, 12399), True, 'import numpy as np\n'), ((12412, 12425), 'numpy.sin', 'np.sin', (['zline'], {}), '(zline)\n', (12418, 12425), True, 'import numpy as np\n'), ((12438, 12451), 'numpy.cos', 'np.cos', (['zline'], {}), '(zline)\n', (12444, 12451), True, 'import numpy as np\n'), ((12470, 12485), 'numpy.arange', 'np.arange', (['(2)', '(9)'], {}), '(2, 9)\n', (12479, 12485), True, 'import numpy as np\n'), ((6473, 6489), 'numpy.array', 'np.array', (['layout'], {}), '(layout)\n', (6481, 6489), True, 'import numpy as np\n'), ((6505, 6548), 'matplotlib.gridspec.GridSpec', 'matplotlib.gridspec.GridSpec', (['*layout.shape'], {}), '(*layout.shape)\n', (6533, 6548), False, 'import matplotlib\n'), ((6564, 6594), 'numpy.array', 'np.array', (['layout'], {'dtype': 'object'}), '(layout, dtype=object)\n', (6572, 6594), True, 'import numpy as np\n'), ((6682, 6712), 'numpy.unique', 'np.unique', (['layout[layout >= 0]'], {}), '(layout[layout >= 0])\n', (6691, 6712), True, 'import numpy as np\n'), ((7404, 7478), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning', 'module': '"""pyplottr"""'}), "('ignore', category=UserWarning, module='pyplottr')\n", (7427, 7478), False, 'import warnings\n'), ((7760, 7787), 'matplotlib.pyplot.savefig', 'pp.savefig', (['*args'], {}), '(*args, **kwargs)\n', (7770, 7787), True, 'import matplotlib.pyplot as pp\n'), ((8607, 8631), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (8629, 8631), False, 'import matplotlib\n'), ((9075, 9103), 'matplotlib.pyplot.get_current_fig_manager', 'pp.get_current_fig_manager', ([], {}), '()\n', (9101, 9103), True, 'import matplotlib.pyplot as pp\n'), ((9209, 9227), 'matplotlib.pyplot.close', 'pp.close', (['self.fig'], {}), '(self.fig)\n', (9217, 9227), True, 'import matplotlib.pyplot as pp\n'), ((12238, 12247), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (12244, 12247), True, 'import numpy as np\n'), ((12252, 12261), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (12258, 12261), True, 'import numpy as np\n'), ((5281, 5301), 'matplotlib.pyplot.figure', 'pp.figure', ([], {'num': 'title'}), '(num=title)\n', (5290, 5301), True, 'import matplotlib.pyplot as pp\n'), ((6375, 6405), 'numpy.clip', 'np.clip', (['layout', '(0)', '(nplots - 1)'], {}), '(layout, 0, nplots - 1)\n', (6382, 6405), True, 'import numpy as np\n'), ((6743, 6768), 'numpy.nonzero', 'np.nonzero', (['(layout == idx)'], {}), '(layout == idx)\n', (6753, 6768), True, 'import numpy as np\n'), ((7951, 7971), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (7961, 7971), False, 'import time\n'), ((12294, 12311), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (12300, 12311), True, 'import numpy as np\n'), ((5560, 5572), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (5568, 5572), False, 'import sys\n'), ((6307, 6331), 'numpy.arange', 'np.arange', (['(nrows * ncols)'], {}), '(nrows * ncols)\n', (6316, 6331), True, 'import numpy as np\n'), ((2219, 2235), 'numpy.array', 'np.array', (['layout'], {}), '(layout)\n', (2227, 2235), True, 'import numpy as np\n'), ((6250, 6273), 'numpy.ceil', 'np.ceil', (['(nplots / nrows)'], {}), '(nplots / nrows)\n', (6257, 6273), True, 'import numpy as np\n'), ((11993, 12016), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (12010, 12016), False, 'import inspect\n'), ((6193, 6212), 'numpy.ceil', 'np.ceil', (['(nplots / 2)'], {}), '(nplots / 2)\n', (6200, 6212), True, 'import numpy as np\n')] |
import enum
import pickle
import typing
from abc import ABC, abstractmethod
from pathlib import Path
import h5py
import numpy as np
from shapely.geometry import MultiPoint
from src.utils import hash_file
class PoseHashException(Exception):
pass
class PoseEstimation(ABC):
"""
abstract base class for PoseEstimation objects. Used as the base class for
PoseEstimationV2 and PoseEstimationV3
"""
class KeypointIndex(enum.IntEnum):
""" enum defining the 12 keypoint indexes """
NOSE = 0
LEFT_EAR = 1
RIGHT_EAR = 2
BASE_NECK = 3
LEFT_FRONT_PAW = 4
RIGHT_FRONT_PAW = 5
CENTER_SPINE = 6
LEFT_REAR_PAW = 7
RIGHT_REAR_PAW = 8
BASE_TAIL = 9
MID_TAIL = 10
TIP_TAIL = 11
def __init__(self, file_path: Path, cache_dir: typing.Optional[Path] = None,
fps: int = 30):
"""
initialize new object from h5 file
:param file_path: path to pose_est_v2.h5 file
:param cache_dir: optional cache directory, used to cache convex hulls
for faster loading
:param fps: frames per second, used for scaling time series features
from "per frame" to "per second"
"""
super().__init__()
self._num_frames = 0
self._identities = []
self._convex_hull_cache = dict()
self._path = file_path
self._cache_dir = cache_dir
self._cm_per_pixel = None
self._hash = hash_file(file_path)
self._fps = fps
self._static_objects = {}
@property
def num_frames(self) -> int:
""" return the number of frames in the pose_est file """
return self._num_frames
@property
def identities(self):
""" return list of integer identities generated from file """
return self._identities
@property
def num_identities(self) -> int:
return len(self._identities)
@property
def cm_per_pixel(self):
return self._cm_per_pixel
@property
def fps(self):
return self._fps
@property
def hash(self):
return self._hash
@abstractmethod
def get_points(self, frame_index: int, identity: int,
scale: typing.Optional[float] = None):
"""
return points and point masks for an individual frame
:param frame_index: frame index of points and masks to be returned
:param identity: identity to return points for
:param scale: optional scale factor, set to cm_per_pixel to convert
poses from pixel coordinates to cm coordinates
:return: numpy array of points (12,2), numpy array of point masks (12,)
"""
pass
@abstractmethod
def get_identity_poses(self, identity: int,
scale: typing.Optional[float] = None):
"""
return all points and point masks
:param identity: identity to return points for
:param scale: optional scale factor, set to cm_per_pixel to convert
poses from pixel coordinates to cm coordinates
:return: numpy array of points (#frames, 12, 2), numpy array of point
masks (#frames, 12)
"""
pass
@abstractmethod
def get_identity_point_mask(self, identity):
"""
get the point mask array for a given identity
:param identity: identity to return point mask for
:return: array of point masks (#frames, 12)
"""
pass
@abstractmethod
def identity_mask(self, identity):
"""
get the identity mask (indicates if specified identity is present in
each frame)
:param identity: identity to get masks for
:return: numpy array of size (#frames,)
"""
pass
@property
@abstractmethod
def identity_to_track(self):
pass
@property
@abstractmethod
def format_major_version(self):
"""
an integer giving the major version of the format
"""
pass
@property
def static_objects(self):
return self._static_objects
def get_identity_convex_hulls(self, identity):
"""
A list of length #frames containing convex hulls for the given identity.
The convex hulls are calculated using all valid points except for the
middle of tail and tip of tail points.
:param identity: identity to return points for
:return: the convex hulls (array elements will be None if there is no
valid convex hull for that frame)
"""
if identity in self._convex_hull_cache:
return self._convex_hull_cache[identity]
else:
convex_hulls = None
path = None
if self._cache_dir is not None:
path = (self._cache_dir /
"convex_hulls" /
self._path.with_suffix('').name /
f"convex_hulls_{identity}.pickle")
path.parents[0].mkdir(mode=0o775, parents=True, exist_ok=True)
try:
with path.open('rb') as f:
convex_hulls = pickle.load(f)
except:
# we weren't able to read in the cached convex hulls,
# just ignore the exception and we'll generate them
pass
if convex_hulls is None:
points, point_masks = self.get_identity_poses(identity)
body_points = points[:, :-2, :]
body_point_masks = point_masks[:, :-2]
convex_hulls = []
for frame_index in range(self.num_frames):
if sum(body_point_masks[frame_index, :]) >= 3:
filtered_points = body_points[frame_index, body_point_masks[frame_index, :] == 1, :]
convex_hulls.append(MultiPoint(filtered_points).convex_hull)
else:
convex_hulls.append(None)
if path:
with path.open('wb') as f:
pickle.dump(convex_hulls, f)
self._convex_hull_cache[identity] = convex_hulls
return convex_hulls
def compute_bearing(self, points):
base_tail_xy = points[self.KeypointIndex.BASE_TAIL.value].astype(np.float32)
base_neck_xy = points[self.KeypointIndex.BASE_NECK.value].astype(np.float32)
base_neck_offset_xy = base_neck_xy - base_tail_xy
angle_rad = np.arctan2(base_neck_offset_xy[1],
base_neck_offset_xy[0])
return angle_rad * (180 / np.pi)
def compute_all_bearings(self, identity):
bearings = np.zeros(self.num_frames, dtype=np.float32)
for i in range(self.num_frames):
points, mask = self.get_points(i, identity)
if points is not None:
bearings[i] = self.compute_bearing(points)
return bearings
def compute_all_bearings2(self, identity):
bearings = np.zeros(self.num_frames, dtype=np.float32)
# get an array of the indexes of valid frames only
indexes = np.arange(self._num_frames)[self.identity_mask(identity) == 1]
poses, _ = self.get_identity_poses(identity)
base_tail = poses[indexes, self.KeypointIndex.BASE_TAIL.value].astype(np.float32)
base_neck = poses[indexes, self.KeypointIndex.BASE_NECK.value].astype(np.float32)
offsets = base_neck - base_tail
angle_rad = np.arctan2(offsets[:, 1], offsets[:, 0])
bearings[indexes] = angle_rad * (180 / np.pi)
return bearings
@staticmethod
def get_pose_file_attributes(path: Path) -> dict:
with h5py.File(path, 'r') as pose_h5:
attrs = dict(pose_h5.attrs)
attrs['poseest'] = dict(pose_h5['poseest'].attrs)
return attrs
| [
"pickle.dump",
"pickle.load",
"h5py.File",
"numpy.zeros",
"numpy.arctan2",
"src.utils.hash_file",
"numpy.arange",
"shapely.geometry.MultiPoint"
] | [((1502, 1522), 'src.utils.hash_file', 'hash_file', (['file_path'], {}), '(file_path)\n', (1511, 1522), False, 'from src.utils import hash_file\n'), ((6555, 6613), 'numpy.arctan2', 'np.arctan2', (['base_neck_offset_xy[1]', 'base_neck_offset_xy[0]'], {}), '(base_neck_offset_xy[1], base_neck_offset_xy[0])\n', (6565, 6613), True, 'import numpy as np\n'), ((6753, 6796), 'numpy.zeros', 'np.zeros', (['self.num_frames'], {'dtype': 'np.float32'}), '(self.num_frames, dtype=np.float32)\n', (6761, 6796), True, 'import numpy as np\n'), ((7079, 7122), 'numpy.zeros', 'np.zeros', (['self.num_frames'], {'dtype': 'np.float32'}), '(self.num_frames, dtype=np.float32)\n', (7087, 7122), True, 'import numpy as np\n'), ((7557, 7597), 'numpy.arctan2', 'np.arctan2', (['offsets[:, 1]', 'offsets[:, 0]'], {}), '(offsets[:, 1], offsets[:, 0])\n', (7567, 7597), True, 'import numpy as np\n'), ((7200, 7227), 'numpy.arange', 'np.arange', (['self._num_frames'], {}), '(self._num_frames)\n', (7209, 7227), True, 'import numpy as np\n'), ((7762, 7782), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (7771, 7782), False, 'import h5py\n'), ((5192, 5206), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5203, 5206), False, 'import pickle\n'), ((6143, 6171), 'pickle.dump', 'pickle.dump', (['convex_hulls', 'f'], {}), '(convex_hulls, f)\n', (6154, 6171), False, 'import pickle\n'), ((5929, 5956), 'shapely.geometry.MultiPoint', 'MultiPoint', (['filtered_points'], {}), '(filtered_points)\n', (5939, 5956), False, 'from shapely.geometry import MultiPoint\n')] |
import os
import sys
import numpy as np
def ADD_err(gt_pose, est_pose, model):
def transform_points(points_3d, mat):
rot = np.matmul(mat[:3, :3], points_3d.T)
return rot.transpose() + mat[:3, 3]
v_A = transform_points(model, gt_pose)
v_B = transform_points(model, est_pose)
v_A = np.array([x for x in v_A])
v_B = np.array([x for x in v_B])
return np.mean(np.linalg.norm(v_A - v_B, axis=1))
def ADDS_err(gt_pose, est_pose, model):
def transform_points(points_3d, mat):
rot = np.matmul(mat[:3, :3], points_3d.T)
return rot.transpose() + mat[:3, 3]
v_A = transform_points(model, gt_pose)
v_B = transform_points(model, est_pose)
v_A = np.array([x for x in v_A])
v_B = np.array([x for x in v_B])
dist = []
idxs = np.random.randint(0, v_A.shape[0], 500)
for idx in idxs:
va = v_A[idx]
dist.append(np.linalg.norm(va - v_B, axis=1).min())
return np.mean(dist)
def rot_error(gt_pose, est_pose):
def matrix2quaternion(m):
tr = m[0, 0] + m[1, 1] + m[2, 2]
if tr > 0:
S = np.sqrt(tr + 1.0) * 2
qw = 0.25 * S
qx = (m[2, 1] - m[1, 2]) / S
qy = (m[0, 2] - m[2, 0]) / S
qz = (m[1, 0] - m[0, 1]) / S
elif (m[0, 0] > m[1, 1]) and (m[0, 0] > m[2, 2]):
S = np.sqrt(1. + m[0, 0] - m[1, 1] - m[2, 2]) * 2
qw = (m[2, 1] - m[1, 2]) / S
qx = 0.25 * S
qy = (m[0, 1] + m[1, 0]) / S
qz = (m[0, 2] + m[2, 0]) / S
elif m[1, 1] > m[2, 2]:
S = np.sqrt(1. + m[1, 1] - m[0, 0] - m[2, 2]) * 2
qw = (m[0, 2] - m[2, 0]) / S
qx = (m[0, 1] + m[1, 0]) / S
qy = 0.25 * S
qz = (m[1, 2] + m[2, 1]) / S
else:
S = np.sqrt(1. + m[2, 2] - m[0, 0] - m[1, 1]) * 2
qw = (m[1, 0] - m[0, 1]) / S
qx = (m[0, 2] + m[2, 0]) / S
qy = (m[1, 2] + m[2, 1]) / S
qz = 0.25 * S
return np.array([qw, qx, qy, qz])
gt_quat = Quaternion(matrix2quaternion(gt_pose[:3, :3]))
est_quat = Quaternion(matrix2quaternion(est_pose[:3, :3]))
return np.abs((gt_quat * est_quat.inverse).degrees)
def trans_error(gt_pose, est_pose):
trans_err_norm = np.linalg.norm(gt_pose[:3, 3] - est_pose[:3, 3])
trans_err_single = np.abs(gt_pose[:3, 3] - est_pose[:3, 3])
return trans_err_norm, trans_err_single
def IoU(box1, box2):
"""
Compute IoU between box1 and box2
Args
- box: (np.array) bboxes with size [4, ] => [x1, y1, x2, y2]
"""
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[2], box2[2])
y2 = min(box1[3], box2[3])
interArea = max(0, x2 - x1 + 1) * max(0, y2 - y1 + 1)
box1Area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2Area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
iou = interArea / float(box1Area + box2Area - interArea)
return iou
def projection_error_2d(gt_pose, est_pose, model, cam):
"""
Compute 2d projection error
Args
- gt_pose: (np.array) [4 x 4] pose matrix
- est_pose: (np.array) [4 x 4] pose matrix
- model: (np.array) [N x 3] model 3d vertices
- cam: (np.array) [3 x 3] camera matrix
"""
gt_pose = gt_pose[:3]
est_pose = est_pose[:3]
model = np.concatenate((model, np.ones((model.shape[0], 1))), axis=1)
gt_2d = np.matmul(np.matmul(cam, gt_pose), model.T)
est_2d = np.matmul(np.matmul(cam, est_pose), model.T)
gt_2d /= gt_2d[2, :]
est_2d /= est_2d[2, :]
gt_2d = gt_2d[:2, :].T
est_2d = est_2d[:2, :].T
return np.mean(np.linalg.norm(gt_2d - est_2d, axis=1))
| [
"numpy.mean",
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.array",
"numpy.random.randint",
"numpy.matmul",
"numpy.linalg.norm"
] | [((314, 340), 'numpy.array', 'np.array', (['[x for x in v_A]'], {}), '([x for x in v_A])\n', (322, 340), True, 'import numpy as np\n'), ((351, 377), 'numpy.array', 'np.array', (['[x for x in v_B]'], {}), '([x for x in v_B])\n', (359, 377), True, 'import numpy as np\n'), ((707, 733), 'numpy.array', 'np.array', (['[x for x in v_A]'], {}), '([x for x in v_A])\n', (715, 733), True, 'import numpy as np\n'), ((744, 770), 'numpy.array', 'np.array', (['[x for x in v_B]'], {}), '([x for x in v_B])\n', (752, 770), True, 'import numpy as np\n'), ((797, 836), 'numpy.random.randint', 'np.random.randint', (['(0)', 'v_A.shape[0]', '(500)'], {}), '(0, v_A.shape[0], 500)\n', (814, 836), True, 'import numpy as np\n'), ((951, 964), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (958, 964), True, 'import numpy as np\n'), ((2194, 2238), 'numpy.abs', 'np.abs', (['(gt_quat * est_quat.inverse).degrees'], {}), '((gt_quat * est_quat.inverse).degrees)\n', (2200, 2238), True, 'import numpy as np\n'), ((2298, 2346), 'numpy.linalg.norm', 'np.linalg.norm', (['(gt_pose[:3, 3] - est_pose[:3, 3])'], {}), '(gt_pose[:3, 3] - est_pose[:3, 3])\n', (2312, 2346), True, 'import numpy as np\n'), ((2370, 2410), 'numpy.abs', 'np.abs', (['(gt_pose[:3, 3] - est_pose[:3, 3])'], {}), '(gt_pose[:3, 3] - est_pose[:3, 3])\n', (2376, 2410), True, 'import numpy as np\n'), ((137, 172), 'numpy.matmul', 'np.matmul', (['mat[:3, :3]', 'points_3d.T'], {}), '(mat[:3, :3], points_3d.T)\n', (146, 172), True, 'import numpy as np\n'), ((397, 430), 'numpy.linalg.norm', 'np.linalg.norm', (['(v_A - v_B)'], {'axis': '(1)'}), '(v_A - v_B, axis=1)\n', (411, 430), True, 'import numpy as np\n'), ((529, 564), 'numpy.matmul', 'np.matmul', (['mat[:3, :3]', 'points_3d.T'], {}), '(mat[:3, :3], points_3d.T)\n', (538, 564), True, 'import numpy as np\n'), ((2030, 2056), 'numpy.array', 'np.array', (['[qw, qx, qy, qz]'], {}), '([qw, qx, qy, qz])\n', (2038, 2056), True, 'import numpy as np\n'), ((3454, 3477), 'numpy.matmul', 'np.matmul', (['cam', 'gt_pose'], {}), '(cam, gt_pose)\n', (3463, 3477), True, 'import numpy as np\n'), ((3511, 3535), 'numpy.matmul', 'np.matmul', (['cam', 'est_pose'], {}), '(cam, est_pose)\n', (3520, 3535), True, 'import numpy as np\n'), ((3675, 3713), 'numpy.linalg.norm', 'np.linalg.norm', (['(gt_2d - est_2d)'], {'axis': '(1)'}), '(gt_2d - est_2d, axis=1)\n', (3689, 3713), True, 'import numpy as np\n'), ((3392, 3420), 'numpy.ones', 'np.ones', (['(model.shape[0], 1)'], {}), '((model.shape[0], 1))\n', (3399, 3420), True, 'import numpy as np\n'), ((1107, 1124), 'numpy.sqrt', 'np.sqrt', (['(tr + 1.0)'], {}), '(tr + 1.0)\n', (1114, 1124), True, 'import numpy as np\n'), ((900, 932), 'numpy.linalg.norm', 'np.linalg.norm', (['(va - v_B)'], {'axis': '(1)'}), '(va - v_B, axis=1)\n', (914, 932), True, 'import numpy as np\n'), ((1352, 1394), 'numpy.sqrt', 'np.sqrt', (['(1.0 + m[0, 0] - m[1, 1] - m[2, 2])'], {}), '(1.0 + m[0, 0] - m[1, 1] - m[2, 2])\n', (1359, 1394), True, 'import numpy as np\n'), ((1595, 1637), 'numpy.sqrt', 'np.sqrt', (['(1.0 + m[1, 1] - m[0, 0] - m[2, 2])'], {}), '(1.0 + m[1, 1] - m[0, 0] - m[2, 2])\n', (1602, 1637), True, 'import numpy as np\n'), ((1820, 1862), 'numpy.sqrt', 'np.sqrt', (['(1.0 + m[2, 2] - m[0, 0] - m[1, 1])'], {}), '(1.0 + m[2, 2] - m[0, 0] - m[1, 1])\n', (1827, 1862), True, 'import numpy as np\n')] |
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import scipy.spatial.distance
import tensorflow as tf
from .. import losses
def test_dice():
x = np.zeros(4)
y = np.zeros(4)
out = losses.dice(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = np.ones(4)
y = np.ones(4)
out = losses.dice(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 1.0, 1.0]
out = losses.dice(x, y, axis=None).numpy()
ref = scipy.spatial.distance.dice(x, y)
assert_allclose(out, ref)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 0.0, 0.0]
out = losses.dice(x, y, axis=None).numpy()
ref = scipy.spatial.distance.dice(x, y)
assert_allclose(out, ref)
assert_allclose(out, 1)
x = np.ones((4, 32, 32, 32, 1), dtype=np.float32)
y = x.copy()
x[:2, :10, 10:] = 0
y[:2, :3, 20:] = 0
y[3:, 10:] = 0
dices = np.empty(x.shape[0])
for i in range(x.shape[0]):
dices[i] = scipy.spatial.distance.dice(x[i].flatten(), y[i].flatten())
assert_allclose(losses.dice(x, y, axis=(1, 2, 3, 4)), dices, rtol=1e-05)
assert_allclose(losses.Dice(axis=(1, 2, 3, 4))(x, y), dices.mean(), rtol=1e-05)
assert_allclose(losses.Dice(axis=(1, 2, 3, 4))(y, x), dices.mean(), rtol=1e-05)
def test_generalized_dice():
shape = (8, 32, 32, 32, 16)
x = np.zeros(shape)
y = np.zeros(shape)
assert_array_equal(losses.generalized_dice(x, y), np.zeros(shape[0]))
shape = (8, 32, 32, 32, 16)
x = np.ones(shape)
y = np.ones(shape)
assert_array_equal(losses.generalized_dice(x, y), np.zeros(shape[0]))
shape = (8, 32, 32, 32, 16)
x = np.ones(shape)
y = np.zeros(shape)
# Why aren't the losses exactly one? Could it be the propagation of floating
# point inaccuracies when summing?
assert_allclose(losses.generalized_dice(x, y), np.ones(shape[0]), atol=1e-03)
assert_allclose(
losses.GeneralizedDice(axis=(1, 2, 3))(x, y), losses.generalized_dice(x, y)
)
x = np.ones((4, 32, 32, 32, 1), dtype=np.float64)
y = x.copy()
x[:2, :10, 10:] = 0
y[:2, :3, 20:] = 0
y[3:, 10:] = 0
# Dice is similar to generalized Dice for one class. The weight factor
# makes the generalized form slightly different from Dice.
gd = losses.generalized_dice(x, y, axis=(1, 2, 3)).numpy()
dd = losses.dice(x, y, axis=(1, 2, 3, 4)).numpy()
assert_allclose(gd, dd, rtol=1e-02) # is this close enough?
def test_jaccard():
x = np.zeros(4)
y = np.zeros(4)
out = losses.jaccard(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = np.ones(4)
y = np.ones(4)
out = losses.jaccard(x, y, axis=None).numpy()
assert_allclose(out, 0)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 1.0, 1.0]
out = losses.jaccard(x, y, axis=None).numpy()
ref = scipy.spatial.distance.jaccard(x, y)
assert_allclose(out, ref)
x = [0.0, 0.0, 1.0, 1.0]
y = [1.0, 1.0, 0.0, 0.0]
out = losses.jaccard(x, y, axis=None).numpy()
ref = scipy.spatial.distance.jaccard(x, y)
assert_allclose(out, ref)
assert_allclose(out, 1)
x = np.ones((4, 32, 32, 32, 1), dtype=np.float32)
y = x.copy()
x[:2, :10, 10:] = 0
y[:2, :3, 20:] = 0
y[3:, 10:] = 0
jaccards = np.empty(x.shape[0])
for i in range(x.shape[0]):
jaccards[i] = scipy.spatial.distance.jaccard(x[i].flatten(), y[i].flatten())
assert_allclose(losses.jaccard(x, y, axis=(1, 2, 3, 4)), jaccards)
assert_allclose(losses.Jaccard(axis=(1, 2, 3, 4))(x, y), jaccards.mean())
assert_allclose(losses.Jaccard(axis=(1, 2, 3, 4))(y, x), jaccards.mean())
@pytest.mark.xfail
def test_tversky():
# TODO: write the test
assert False
@pytest.mark.xfail
def test_elbo():
# TODO: write the test
assert False
def test_wasserstein():
x = np.zeros(4)
y = np.zeros(4)
out = losses.wasserstein(x, y)
assert_allclose(out, 0)
x = np.ones(4)
y = np.ones(4)
out = losses.wasserstein(x, y)
assert_allclose(out, 1)
x = np.array([0.0, -1.0, 1.0, -1.0])
y = np.array([1.0, -1.0, 1.0, 1.0])
out = losses.wasserstein(x, y)
ref = [0.0, 1.0, 1.0, -1.0]
assert_allclose(out, ref)
x = np.array([0.0, 0.0, 1.0, 1.0])
y = np.array([1.0, 1.0, 0.0, 0.0])
out = losses.wasserstein(x, y)
assert_allclose(out, 0)
def test_gradient_penalty():
x = np.zeros(4)
y = np.zeros(4)
out = losses.gradient_penalty(x, y)
assert_allclose(out, 10)
x = np.ones(4)
y = np.ones(4)
out = losses.gradient_penalty(x, y)
assert_allclose(out, 0.001)
x = np.array([0.0, -1.0, 1.0, -1.0])
y = np.array([1.0, -1.0, 1.0, 1.0])
out = losses.gradient_penalty(x, y)
ref = [1.0001e01, 1.0000e-03, 1.0000e-03, 1.0000e-03]
assert_allclose(out, ref)
x = np.array([0.0, 0.0, 1.0, 1.0])
y = np.array([1.0, 1.0, 0.0, 0.0])
out = losses.gradient_penalty(x, y)
ref = [10.001, 10.001, 0.0, 0.0]
assert_allclose(out, ref)
def test_get():
if LooseVersion(tf.__version__) < LooseVersion("1.14.1-dev20190408"):
assert losses.get("dice") is losses.dice
assert losses.get("Dice") is losses.Dice
assert losses.get("jaccard") is losses.jaccard
assert losses.get("Jaccard") is losses.Jaccard
assert losses.get("tversky") is losses.tversky
assert losses.get("Tversky") is losses.Tversky
assert losses.get("binary_crossentropy")
else:
assert losses.get("dice") is losses.dice
assert isinstance(losses.get("Dice"), losses.Dice)
assert losses.get("jaccard") is losses.jaccard
assert isinstance(losses.get("Jaccard"), losses.Jaccard)
assert losses.get("tversky") is losses.tversky
assert isinstance(losses.get("Tversky"), losses.Tversky)
assert losses.get("binary_crossentropy")
assert losses.get("gradient_penalty") is losses.gradient_penalty
assert losses.get("wasserstein") is losses.wasserstein
assert isinstance(losses.get("Wasserstein"), losses.Wasserstein)
| [
"numpy.ones",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"distutils.version.LooseVersion"
] | [((243, 254), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (251, 254), True, 'import numpy as np\n'), ((263, 274), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (271, 274), True, 'import numpy as np\n'), ((326, 349), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(0)'], {}), '(out, 0)\n', (341, 349), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((359, 369), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (366, 369), True, 'import numpy as np\n'), ((378, 388), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (385, 388), True, 'import numpy as np\n'), ((440, 463), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(0)'], {}), '(out, 0)\n', (455, 463), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((618, 643), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'ref'], {}), '(out, ref)\n', (633, 643), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((798, 823), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'ref'], {}), '(out, ref)\n', (813, 823), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((828, 851), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(1)'], {}), '(out, 1)\n', (843, 851), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((861, 906), 'numpy.ones', 'np.ones', (['(4, 32, 32, 32, 1)'], {'dtype': 'np.float32'}), '((4, 32, 32, 32, 1), dtype=np.float32)\n', (868, 906), True, 'import numpy as np\n'), ((1002, 1022), 'numpy.empty', 'np.empty', (['x.shape[0]'], {}), '(x.shape[0])\n', (1010, 1022), True, 'import numpy as np\n'), ((1450, 1465), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1458, 1465), True, 'import numpy as np\n'), ((1474, 1489), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1482, 1489), True, 'import numpy as np\n'), ((1605, 1619), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1612, 1619), True, 'import numpy as np\n'), ((1628, 1642), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1635, 1642), True, 'import numpy as np\n'), ((1758, 1772), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1765, 1772), True, 'import numpy as np\n'), ((1781, 1796), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1789, 1796), True, 'import numpy as np\n'), ((2119, 2164), 'numpy.ones', 'np.ones', (['(4, 32, 32, 32, 1)'], {'dtype': 'np.float64'}), '((4, 32, 32, 32, 1), dtype=np.float64)\n', (2126, 2164), True, 'import numpy as np\n'), ((2507, 2541), 'numpy.testing.assert_allclose', 'assert_allclose', (['gd', 'dd'], {'rtol': '(0.01)'}), '(gd, dd, rtol=0.01)\n', (2522, 2541), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((2598, 2609), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2606, 2609), True, 'import numpy as np\n'), ((2618, 2629), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2626, 2629), True, 'import numpy as np\n'), ((2684, 2707), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(0)'], {}), '(out, 0)\n', (2699, 2707), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((2717, 2727), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2724, 2727), True, 'import numpy as np\n'), ((2736, 2746), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2743, 2746), True, 'import numpy as np\n'), ((2801, 2824), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(0)'], {}), '(out, 0)\n', (2816, 2824), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((2985, 3010), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'ref'], {}), '(out, ref)\n', (3000, 3010), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((3171, 3196), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'ref'], {}), '(out, ref)\n', (3186, 3196), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((3201, 3224), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(1)'], {}), '(out, 1)\n', (3216, 3224), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((3234, 3279), 'numpy.ones', 'np.ones', (['(4, 32, 32, 32, 1)'], {'dtype': 'np.float32'}), '((4, 32, 32, 32, 1), dtype=np.float32)\n', (3241, 3279), True, 'import numpy as np\n'), ((3378, 3398), 'numpy.empty', 'np.empty', (['x.shape[0]'], {}), '(x.shape[0])\n', (3386, 3398), True, 'import numpy as np\n'), ((3944, 3955), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3952, 3955), True, 'import numpy as np\n'), ((3964, 3975), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3972, 3975), True, 'import numpy as np\n'), ((4015, 4038), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(0)'], {}), '(out, 0)\n', (4030, 4038), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4048, 4058), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (4055, 4058), True, 'import numpy as np\n'), ((4067, 4077), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (4074, 4077), True, 'import numpy as np\n'), ((4117, 4140), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(1)'], {}), '(out, 1)\n', (4132, 4140), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4150, 4182), 'numpy.array', 'np.array', (['[0.0, -1.0, 1.0, -1.0]'], {}), '([0.0, -1.0, 1.0, -1.0])\n', (4158, 4182), True, 'import numpy as np\n'), ((4191, 4222), 'numpy.array', 'np.array', (['[1.0, -1.0, 1.0, 1.0]'], {}), '([1.0, -1.0, 1.0, 1.0])\n', (4199, 4222), True, 'import numpy as np\n'), ((4294, 4319), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'ref'], {}), '(out, ref)\n', (4309, 4319), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4329, 4359), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (4337, 4359), True, 'import numpy as np\n'), ((4368, 4398), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0, 0.0]'], {}), '([1.0, 1.0, 0.0, 0.0])\n', (4376, 4398), True, 'import numpy as np\n'), ((4438, 4461), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(0)'], {}), '(out, 0)\n', (4453, 4461), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4501, 4512), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4509, 4512), True, 'import numpy as np\n'), ((4521, 4532), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4529, 4532), True, 'import numpy as np\n'), ((4577, 4601), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(10)'], {}), '(out, 10)\n', (4592, 4601), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4611, 4621), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (4618, 4621), True, 'import numpy as np\n'), ((4630, 4640), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (4637, 4640), True, 'import numpy as np\n'), ((4685, 4712), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(0.001)'], {}), '(out, 0.001)\n', (4700, 4712), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4722, 4754), 'numpy.array', 'np.array', (['[0.0, -1.0, 1.0, -1.0]'], {}), '([0.0, -1.0, 1.0, -1.0])\n', (4730, 4754), True, 'import numpy as np\n'), ((4763, 4794), 'numpy.array', 'np.array', (['[1.0, -1.0, 1.0, 1.0]'], {}), '([1.0, -1.0, 1.0, 1.0])\n', (4771, 4794), True, 'import numpy as np\n'), ((4897, 4922), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'ref'], {}), '(out, ref)\n', (4912, 4922), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4932, 4962), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (4940, 4962), True, 'import numpy as np\n'), ((4971, 5001), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0, 0.0]'], {}), '([1.0, 1.0, 0.0, 0.0])\n', (4979, 5001), True, 'import numpy as np\n'), ((5083, 5108), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'ref'], {}), '(out, ref)\n', (5098, 5108), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((1544, 1562), 'numpy.zeros', 'np.zeros', (['shape[0]'], {}), '(shape[0])\n', (1552, 1562), True, 'import numpy as np\n'), ((1697, 1715), 'numpy.zeros', 'np.zeros', (['shape[0]'], {}), '(shape[0])\n', (1705, 1715), True, 'import numpy as np\n'), ((1968, 1985), 'numpy.ones', 'np.ones', (['shape[0]'], {}), '(shape[0])\n', (1975, 1985), True, 'import numpy as np\n'), ((5134, 5162), 'distutils.version.LooseVersion', 'LooseVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (5146, 5162), False, 'from distutils.version import LooseVersion\n'), ((5165, 5199), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.14.1-dev20190408"""'], {}), "('1.14.1-dev20190408')\n", (5177, 5199), False, 'from distutils.version import LooseVersion\n')] |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A collection of observation wrappers for construction tasks."""
from dm_construction.unity import constants as unity_constants
from dm_construction.utils import constants
from dm_construction.wrappers import base
import dm_env
from dm_env import specs
import numpy as np
# Used to avoid putting actions at exactly the limits of the scene.
_SMALL_EPSILON = 1e-6
# Added to y-coordinates of actions, to avoid triggering a collision between the
# object being placed and the object below it. We cannot use a value too low,
# because of Unity's collision behavior
_Y_MARGIN = 4e-2
def _discrete_array_spec(shape, base_name):
return specs.Array(shape, dtype=np.int32, name=base_name + "_spec")
def _continuous_array_spec(shape, base_name):
return specs.Array(shape, dtype=np.float32, name=base_name + "_spec")
def _slices_and_indices_to_indices(slices_or_indices):
indices = []
for slice_or_index in slices_or_indices:
if isinstance(slice_or_index, slice):
if slice_or_index.step not in [None, 1]:
raise ValueError("slices should only use a step size of 1")
indices.extend(list(range(slice_or_index.start, slice_or_index.stop)))
else:
indices.append(slice_or_index)
return sorted(indices)
def _get_relative_discretization_grid(point_counts_inside):
if point_counts_inside > 0:
extra_side_width = max(1. / point_counts_inside, _Y_MARGIN)
else:
extra_side_width = _Y_MARGIN
bound = 1. + extra_side_width
# Create a linspace that allows to stack blocks on the sides of each other
# as well.
return np.linspace(-bound, bound, point_counts_inside + 3)
class DiscreteRelativeGraphWrapper(base.ConstructionBaseWrapper):
"""Creates graph-based observations with discrete relative actions."""
def __init__(self,
env,
allow_reverse_action=False,
max_x_width=2.0,
max_y_width=2.0,
discretization_steps=12,
invalid_edge_penalty=0.,
enable_y_action=False,
enable_glue_action=True,
enable_selection_action=True):
"""Wraps an environment with graph-structured observations and actions.
The environment support the following modes of discrete actions:
- Agent only providing "x_action" (defining the x-coordinate of the placed
block, starting from the leftmost position).
- Agent additionally providing "y_action" (defining the y-coordinate of
the placed block, starting from the bottom most position). To use this
mode, set enable_y_position to True.
Args:
env: An instance of `ConstructionBaseWrapper`.
allow_reverse_action: Whether to allow the action to be attached to an
edge in the reverse direction. If this option is set to True and that
the edge to which the action is attached is going from a valid moved
block to a valid base block (instead of the reverse direction), then the
action corresponding to the reverse edge will be taken. Otherwise, the
episode will end with an `invalid_edge` termination.
max_x_width: The accessible width along the x axis, centered on the chosen
block center.
max_y_width: The accessible width along the y axis, centered on the chosen
block center.
discretization_steps: The number of discrete steps along the x and y axis.
invalid_edge_penalty: The penalty received when selecting an invalid edge
(a positive number; the reward will be minus that).
enable_y_action: Whether the agent also select the y-coordinate. If False,
the y coordinate is set to be a small margin on top of the block, at the
given y coordinate.
enable_glue_action: Whether the agent select whether to glue or not. If
False, glue is always applied.
enable_selection_action: Whether the agent selects the order of the
blocks.
"""
super(DiscreteRelativeGraphWrapper, self).__init__(env=env)
self._allow_reverse_action = allow_reverse_action
self._discretization_steps = discretization_steps
assert invalid_edge_penalty > -1e-6
self._invalid_edge_penalty = invalid_edge_penalty
self._enable_y_action = enable_y_action
self._enable_glue_action = enable_glue_action
self._enable_selection_action = enable_selection_action
self._init_observation_wrapping()
self._init_action_wrapping()
def _init_observation_wrapping(self):
"""Sets up attributes needed for wrapping observations."""
# Which keys from the underlying observation to include as nodes in the
# graph observation.
self._node_types = [
constants.BLOCK, constants.AVAILABLE_BLOCK, constants.OBSTACLE,
constants.TARGET]
if constants.BALL in self._env.observation_spec():
self._node_types.append(constants.BALL)
# We will first concatenate on one hots, then cherry pick the node features
# that we want. Before doing the cherry picking, these will be the indices
# of the one hot node types.
self._one_hot_feature_slice = slice(
unity_constants.BLOCK_SIZE,
unity_constants.BLOCK_SIZE + len(self._node_types))
# Which features from the underlying observation to include in the node
# attributes.
self._node_features = _slices_and_indices_to_indices([
unity_constants.POSITION_FEATURE_SLICE,
unity_constants.ORIENTATION_FEATURE_SLICE,
unity_constants.WIDTH_FEATURE_INDEX,
unity_constants.HEIGHT_FEATURE_INDEX,
unity_constants.LINEAR_VELOCITY_FEATURE_SLICE,
unity_constants.ANGULAR_VELOCITY_FEATURE_INDEX,
unity_constants.STICKY_FEATURE_INDEX,
unity_constants.FREE_OBJECT_FEATURE_INDEX,
unity_constants.SHAPE_FEATURE_SLICE,
self._one_hot_feature_slice
])
def _init_action_wrapping(self):
"""Sets up attributes needed for wrapping actions."""
valid_base_block_types = [
constants.BLOCK, constants.OBSTACLE, constants.TARGET]
if "Balls" in self._env.observation_spec():
valid_base_block_types.append(constants.BALL)
self._valid_base_block_one_hots = [
self._get_node_one_hot_index(x)
for x in valid_base_block_types
]
self._valid_moved_block_one_hots = [
self._get_node_one_hot_index(x)
for x in [constants.AVAILABLE_BLOCK]
]
self._non_physical_one_hots = [
self._get_node_one_hot_index(x)
for x in [constants.TARGET]
]
self._x_feature_index = self._get_feature_index(
unity_constants.POSITION_X_FEATURE_INDEX)
self._y_feature_index = self._get_feature_index(
unity_constants.POSITION_Y_FEATURE_INDEX)
self._height_feature_index = (
self._get_feature_index(
unity_constants.HEIGHT_FEATURE_INDEX))
standard_action_spec = self._env.action_spec()
if "Selector" not in standard_action_spec:
self._enable_selection_action = False
self._min_x = (
float(standard_action_spec["Horizontal"].minimum) + _SMALL_EPSILON)
self._min_y = (
float(standard_action_spec["Vertical"].minimum) + _SMALL_EPSILON)
self._max_x = (
float(standard_action_spec["Horizontal"].maximum) - _SMALL_EPSILON)
self._max_y = (
float(standard_action_spec["Vertical"].maximum) - _SMALL_EPSILON)
self._num_x_actions = self._discretization_steps + 3
self._num_y_actions = self._discretization_steps + 3
self._relative_x_positions = _get_relative_discretization_grid(
self._discretization_steps)
self._relative_y_positions = _get_relative_discretization_grid(
self._discretization_steps)
# Ignoring attributes with nested structure that are constant to avoid
# unnecessary deepcopies of those when restoring states. This is not
# technically necessary (e.g. we do not bother with scalar attributes).
self._state_ignore_fields.extend([
"_valid_base_block_one_hots", "_valid_moved_block_one_hots",
"_non_physical_one_hots", "_relative_x_positions",
"_relative_y_positions"
])
def _get_feature_index(self, core_index):
return self._node_features.index(core_index)
def _get_node_one_hot_index(self, object_type):
# Get the index just in the one-hots
base_index = self._node_types.index(object_type)
# Get the feature index into node_features
features = _slices_and_indices_to_indices([self._one_hot_feature_slice])
feature_index = features[base_index]
# Look up the actual index
one_hot_index = self._node_features.index(feature_index)
return one_hot_index
def action_spec(self):
edge_spec = {
"Index": specs.Array([], dtype=np.int32),
"x_action": specs.BoundedArray(
[], np.int32, 0, self._num_x_actions - 1)
}
if self._enable_y_action:
edge_spec.update({
"y_action": specs.BoundedArray(
[], np.int32, 0, self._num_y_actions - 1)
})
if self._enable_glue_action:
edge_spec.update({"sticky": specs.BoundedArray([], np.int32, 0, 1)})
return edge_spec
def observation_spec(self):
"""The observation spec as a graph.
Note that while this method returns a dictionary, it is compatible with the
GraphsTuple data structure from the graph_nets library. To convert the spec
from this method to a GraphsTuple:
from graph_nets import graphs
spec = graphs.GraphsTuple(**env.observation_spec())
Returns:
spec: the observation spec as a dictionary
"""
node_size = len(self._node_features)
nodes_spec = _continuous_array_spec([0, node_size], "nodes")
edges_spec = _continuous_array_spec([0, 1], "edges")
senders_spec = _discrete_array_spec([0], "senders")
receivers_spec = _discrete_array_spec([0], "receivers")
globals_spec = _continuous_array_spec([1, 1], "globals")
n_node_spec = _discrete_array_spec([1], "n_node")
n_edge_spec = _discrete_array_spec([1], "n_edge")
observation_spec = dict(
nodes=nodes_spec,
edges=edges_spec,
globals=globals_spec,
n_node=n_node_spec,
n_edge=n_edge_spec,
receivers=receivers_spec,
senders=senders_spec
)
return observation_spec
def _get_nodes(self, observation):
"""Returns node attributes."""
objects = []
for i, key in enumerate(self._node_types):
# Remove extra time dimension returned by some environments
# (like marble run)
features = observation[key]
if features.ndim == 3:
features = features[:, 0]
# Add a one-hot indicator of the node type.
one_hot = np.zeros(
(features.shape[0], len(self._node_types)), dtype=np.float32)
one_hot[:, i] = 1
features = np.concatenate([features, one_hot], axis=1)
objects.append(features)
return np.concatenate(objects, axis=0)
def _get_edges(self, nodes):
sender_node_inds = np.arange(len(nodes))
receiver_node_inds = np.arange(len(nodes))
senders, receivers = np.meshgrid(sender_node_inds, receiver_node_inds)
senders, receivers = senders.flatten(
).astype(np.int32), receivers.flatten().astype(np.int32)
# This removes self-edges.
same_index = senders == receivers
senders = senders[~same_index]
receivers = receivers[~same_index]
edge_content = np.zeros([senders.shape[0], 1], dtype=np.float32)
return edge_content, senders, receivers
def _get_globals(self):
return np.zeros([1], dtype=np.float32)
def _order_nodes(self, observation):
"""Order nodes based on object id."""
indices = observation["nodes"][
:, unity_constants.ID_FEATURE_INDEX].astype(int)
ordering = np.argsort(indices)
# update nodes
nodes = observation["nodes"][ordering]
# update senders/receivers
ordering = list(ordering)
inverse_ordering = np.array(
[ordering.index(i) for i in range(len(ordering))], dtype=np.int32)
if observation["senders"] is not None:
senders = inverse_ordering[observation["senders"]]
else:
senders = None
if observation["receivers"] is not None:
receivers = inverse_ordering[observation["receivers"]]
else:
receivers = None
new_observation = observation.copy()
new_observation.update(dict(
nodes=nodes,
senders=senders,
receivers=receivers))
return new_observation
def _select_node_features(self, observation):
"""Cherry-pick desired node features."""
nodes = observation["nodes"][:, self._node_features]
new_observation = observation.copy()
new_observation["nodes"] = nodes
return new_observation
def _process_time_step(self, time_step):
nodes = self._get_nodes(time_step.observation)
edges, senders, receivers = self._get_edges(nodes)
globals_ = self._get_globals()
observation = dict(
nodes=nodes,
edges=edges,
globals=globals_[np.newaxis],
n_node=np.array([nodes.shape[0]], dtype=int),
n_edge=np.array([edges.shape[0]], dtype=int),
receivers=receivers,
senders=senders)
observation = self._order_nodes(observation)
observation = self._select_node_features(observation)
time_step = time_step._replace(observation=observation)
return time_step
def _compute_continuous_action(self, base_pos, base_length, moved_length,
offset, min_pos, max_pos):
ratio = (base_length + moved_length) / 2.
return np.clip(base_pos + offset * ratio, min_pos, max_pos)
def reset(self, *args, **kwargs): # pylint: disable=useless-super-delegation
"""Reset the environment.
Note that while this method returns observations as a dictionary, they are
compatible with the GraphsTuple data structure from the graph_nets library.
To convert the observations returned by this method to a GraphsTuple:
from graph_nets import graphs
timestep = env.reset()
timestep = timestep._replace(
observation=graphs.GraphsTuple(**timestep.observation))
Args:
*args: args to pass to super
**kwargs: args to pass to super
Returns:
timestep: a dm_env.TimeStep
"""
return super(DiscreteRelativeGraphWrapper, self).reset(*args, **kwargs)
def step(self, action):
"""Step the environment.
Note that while this method returns observations as a dictionary, they are
compatible with the GraphsTuple data structure from the graph_nets library.
To convert the observations returned by this method to a GraphsTuple:
from graph_nets import graphs
timestep = env.step(action)
timestep = timestep._replace(
observation=graphs.GraphsTuple(**timestep.observation))
Args:
action: the action to take in the environment.
Returns:
timestep: a dm_env.TimeStep
"""
valid_action, base_block, moved_block = self._validate_edge_index(
int(action["Index"]))
if not valid_action:
self._termination_reason = constants.TERMINATION_INVALID_EDGE
self._last_time_step = dm_env.TimeStep(
step_type=dm_env.StepType.LAST,
observation=self._last_time_step.observation,
reward=-self._invalid_edge_penalty,
discount=0)
return self._last_time_step
block_x = base_block[self._x_feature_index]
block_y = base_block[self._y_feature_index]
selector = moved_block[self._x_feature_index]
width_index = self._get_feature_index(
unity_constants.WIDTH_FEATURE_INDEX)
base_width = np.abs(base_block[width_index])
moved_width = np.abs(moved_block[width_index])
base_height = np.abs(base_block[self._height_feature_index])
moved_height = np.abs(moved_block[self._height_feature_index])
x_continuous_action = self._compute_continuous_action(
base_pos=block_x,
base_length=base_width,
moved_length=moved_width,
offset=self._relative_x_positions[action["x_action"]],
min_pos=self._min_x,
max_pos=self._max_x)
if self._enable_y_action:
y_continuous_action = self._compute_continuous_action(
base_pos=block_y,
base_length=base_height,
moved_length=moved_height,
offset=self._relative_y_positions[action["y_action"]],
min_pos=self._min_y,
max_pos=self._max_y)
else:
y_continuous_action = block_y + _Y_MARGIN
if all(base_block[self._non_physical_one_hots] < 0.5):
y_continuous_action += (base_height + moved_height) / 2.
updated_action = {
"Horizontal": np.array(x_continuous_action, dtype=np.float32),
"Vertical": np.array(y_continuous_action, dtype=np.float32),
"Sticky": np.array(1., dtype=np.int32),
}
if self._enable_glue_action:
updated_action["Sticky"] = action["sticky"]
if self._enable_selection_action:
updated_action["Selector"] = selector
self._last_time_step = self._process_time_step(
self._env.step(updated_action))
return self._last_time_step
def _validate_edge_index(self, edge_index):
"""Checks that an action connecting first_node to second_node is valid.
An action is valid if it connects a marker or block to an avaible block.
Args:
edge_index: Index of the edge to apply the action relatively with.
Returns:
is_valid: A boolean indicating whether the action was valid.
base_block: The features of the base block, or None.
moved_block: The features of the moved block, or None.
"""
previous_observation = self._last_time_step.observation
edges = list(
zip(previous_observation["senders"], previous_observation["receivers"]))
edge = edges[edge_index]
nodes = previous_observation["nodes"]
first_node_features = nodes[edge[0]]
second_node_features = nodes[edge[1]]
if not self._enable_selection_action:
first_movable_block = next((i for i, x in enumerate(nodes)
if x[self._valid_moved_block_one_hots] > 0.5),
None)
if edge[0] != first_movable_block and edge[1] != first_movable_block:
return False, None, None
if self._allow_reverse_action and any(
first_node_features[self._valid_base_block_one_hots] > 0.5):
base_block = first_node_features
moved_block = second_node_features
elif any(second_node_features[self._valid_base_block_one_hots] > 0.5):
base_block = second_node_features
moved_block = first_node_features
else:
return False, None, None # Not a valid base block.
if not any(moved_block[self._valid_moved_block_one_hots] > 0.5):
return False, None, None # Not a valid moved block.
return True, base_block, moved_block
@property
def termination_reason(self):
if self._termination_reason:
return self._termination_reason
return super(DiscreteRelativeGraphWrapper, self).termination_reason
@property
def all_termination_reasons(self):
return self.core_env.all_termination_reasons + [
constants.TERMINATION_INVALID_EDGE]
| [
"numpy.clip",
"numpy.abs",
"dm_env.specs.Array",
"dm_env.TimeStep",
"numpy.argsort",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"dm_env.specs.BoundedArray",
"numpy.concatenate",
"numpy.meshgrid"
] | [((1273, 1333), 'dm_env.specs.Array', 'specs.Array', (['shape'], {'dtype': 'np.int32', 'name': "(base_name + '_spec')"}), "(shape, dtype=np.int32, name=base_name + '_spec')\n", (1284, 1333), False, 'from dm_env import specs\n'), ((1391, 1453), 'dm_env.specs.Array', 'specs.Array', (['shape'], {'dtype': 'np.float32', 'name': "(base_name + '_spec')"}), "(shape, dtype=np.float32, name=base_name + '_spec')\n", (1402, 1453), False, 'from dm_env import specs\n'), ((2203, 2254), 'numpy.linspace', 'np.linspace', (['(-bound)', 'bound', '(point_counts_inside + 3)'], {}), '(-bound, bound, point_counts_inside + 3)\n', (2214, 2254), True, 'import numpy as np\n'), ((11494, 11525), 'numpy.concatenate', 'np.concatenate', (['objects'], {'axis': '(0)'}), '(objects, axis=0)\n', (11508, 11525), True, 'import numpy as np\n'), ((11675, 11724), 'numpy.meshgrid', 'np.meshgrid', (['sender_node_inds', 'receiver_node_inds'], {}), '(sender_node_inds, receiver_node_inds)\n', (11686, 11724), True, 'import numpy as np\n'), ((11996, 12045), 'numpy.zeros', 'np.zeros', (['[senders.shape[0], 1]'], {'dtype': 'np.float32'}), '([senders.shape[0], 1], dtype=np.float32)\n', (12004, 12045), True, 'import numpy as np\n'), ((12128, 12159), 'numpy.zeros', 'np.zeros', (['[1]'], {'dtype': 'np.float32'}), '([1], dtype=np.float32)\n', (12136, 12159), True, 'import numpy as np\n'), ((12350, 12369), 'numpy.argsort', 'np.argsort', (['indices'], {}), '(indices)\n', (12360, 12369), True, 'import numpy as np\n'), ((14141, 14193), 'numpy.clip', 'np.clip', (['(base_pos + offset * ratio)', 'min_pos', 'max_pos'], {}), '(base_pos + offset * ratio, min_pos, max_pos)\n', (14148, 14193), True, 'import numpy as np\n'), ((16200, 16231), 'numpy.abs', 'np.abs', (['base_block[width_index]'], {}), '(base_block[width_index])\n', (16206, 16231), True, 'import numpy as np\n'), ((16250, 16282), 'numpy.abs', 'np.abs', (['moved_block[width_index]'], {}), '(moved_block[width_index])\n', (16256, 16282), True, 'import numpy as np\n'), ((16302, 16348), 'numpy.abs', 'np.abs', (['base_block[self._height_feature_index]'], {}), '(base_block[self._height_feature_index])\n', (16308, 16348), True, 'import numpy as np\n'), ((16368, 16415), 'numpy.abs', 'np.abs', (['moved_block[self._height_feature_index]'], {}), '(moved_block[self._height_feature_index])\n', (16374, 16415), True, 'import numpy as np\n'), ((9316, 9347), 'dm_env.specs.Array', 'specs.Array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (9327, 9347), False, 'from dm_env import specs\n'), ((9369, 9429), 'dm_env.specs.BoundedArray', 'specs.BoundedArray', (['[]', 'np.int32', '(0)', '(self._num_x_actions - 1)'], {}), '([], np.int32, 0, self._num_x_actions - 1)\n', (9387, 9429), False, 'from dm_env import specs\n'), ((11407, 11450), 'numpy.concatenate', 'np.concatenate', (['[features, one_hot]'], {'axis': '(1)'}), '([features, one_hot], axis=1)\n', (11421, 11450), True, 'import numpy as np\n'), ((15730, 15881), 'dm_env.TimeStep', 'dm_env.TimeStep', ([], {'step_type': 'dm_env.StepType.LAST', 'observation': 'self._last_time_step.observation', 'reward': '(-self._invalid_edge_penalty)', 'discount': '(0)'}), '(step_type=dm_env.StepType.LAST, observation=self.\n _last_time_step.observation, reward=-self._invalid_edge_penalty, discount=0\n )\n', (15745, 15881), False, 'import dm_env\n'), ((17238, 17285), 'numpy.array', 'np.array', (['x_continuous_action'], {'dtype': 'np.float32'}), '(x_continuous_action, dtype=np.float32)\n', (17246, 17285), True, 'import numpy as np\n'), ((17307, 17354), 'numpy.array', 'np.array', (['y_continuous_action'], {'dtype': 'np.float32'}), '(y_continuous_action, dtype=np.float32)\n', (17315, 17354), True, 'import numpy as np\n'), ((17374, 17403), 'numpy.array', 'np.array', (['(1.0)'], {'dtype': 'np.int32'}), '(1.0, dtype=np.int32)\n', (17382, 17403), True, 'import numpy as np\n'), ((13612, 13649), 'numpy.array', 'np.array', (['[nodes.shape[0]]'], {'dtype': 'int'}), '([nodes.shape[0]], dtype=int)\n', (13620, 13649), True, 'import numpy as np\n'), ((13666, 13703), 'numpy.array', 'np.array', (['[edges.shape[0]]'], {'dtype': 'int'}), '([edges.shape[0]], dtype=int)\n', (13674, 13703), True, 'import numpy as np\n'), ((9526, 9586), 'dm_env.specs.BoundedArray', 'specs.BoundedArray', (['[]', 'np.int32', '(0)', '(self._num_y_actions - 1)'], {}), '([], np.int32, 0, self._num_y_actions - 1)\n', (9544, 9586), False, 'from dm_env import specs\n'), ((9678, 9716), 'dm_env.specs.BoundedArray', 'specs.BoundedArray', (['[]', 'np.int32', '(0)', '(1)'], {}), '([], np.int32, 0, 1)\n', (9696, 9716), False, 'from dm_env import specs\n')] |
from collections import deque
from threading import Thread
from time import sleep
from typing import Dict
import numpy as np
from core.data.command import Command
from core.device.abstract import Connector
from core.device.manager import DeviceManager
from core.task.abstract import BaseTask
from core.task.manager import TaskManager
from core.utils.observable import Observable, Observer
class PBRMeasureAll(BaseTask):
def __init__(self, config):
self.__dict__.update(config)
required = ['sleep_period', 'lower_tol', 'upper_tol', 'od_channel',
'max_outliers', 'device_id', 'pump_id']
self.validate_attributes(required, type(self).__name__)
self.latest_values = deque(maxlen=2)
self.outliers = 0
self.device: Connector = DeviceManager().get_device(self.device_id)
self.average_od = self.measure_initial_od_average()
self.od = Observable()
super(PBRMeasureAll, self).__init__()
self.commands_to_execute: Dict[str, dict] = {
"pwm_settings": {
"id": "12"
},
"light_0": {
"id": "9",
"args": [0]
},
"light_1": {
"id": "9",
"args": [1]
},
"od_0": {
"id": "5",
"args": [0, 30]
},
"od_1": {
"id": "5",
"args": [1, 30]
},
"ph": {
"id": "4",
"args": [5, 0]
},
"temp": {
"id": "2"
},
"pump": {
"id": "6",
"args": [self.pump_id]
},
"o2": {
"id": "14"
},
"ft_0": {
"id": "17",
"args": [0]
},
"ft_1": {
"id": "17",
"args": [1]
}
}
def get_od_for_init(self):
cmd = Command(self.device_id, "5",
[self.od_channel],
self.task_id,
is_awaited=True)
self.device.post_command(cmd)
cmd.await_cmd()
if cmd.is_valid:
return cmd.response
def measure_initial_od_average(self):
data = []
# collect the OD value from 5 measurements
while len(data) < 5:
od = self.get_od_for_init()
if od is not None:
data.append(od['od'])
data.sort()
computed = False
average = 0
# calculate the average OD from the measured data
while not computed:
mean = np.mean(data)
median = np.median(data)
if len(data) < 2:
computed = True
average = data[0]
if mean / median <= 1:
if mean / median >= 0.9:
computed = True
average = mean
else:
data = data[1:]
else:
data = data[:-1]
return average
def handle_outlier(self, measured_od) -> bool:
"""
Decides whether the measured OD value is an outlier or not.
:param measured_od: optical density value
:return: True if it is an outlier, False otherwise
"""
lower_tol = self.calculate_tolerance(-self.lower_tol)
upper_tol = self.calculate_tolerance(self.upper_tol)
if lower_tol <= measured_od <= upper_tol:
self.outliers = 0
self.average_od = self.calculate_average()
return False
else:
self.outliers += 1
if self.outliers > self.max_outliers:
self.outliers = 0
self.average_od = self.calculate_average()
return False
else:
return True
def calculate_tolerance(self, value):
return ((100 + value) / 100) * self.average_od
def calculate_average(self):
"""
Helper method which calculates the average of a list while removing the elements from the objects deque.
:return: The average of the deque
"""
my_list = []
while self.latest_values:
my_list.append(self.latest_values.pop())
return sum(my_list) / len(my_list)
def start(self):
t = Thread(target=self._run)
t.start()
def _run(self):
self.average_od = self.measure_initial_od_average()
od_variant = 'od_1' if self.od_channel == 1 else 'od_0'
while self.is_active:
commands = []
for _name, _command in self.commands_to_execute.items():
command = Command(self.device_id,
_command.get("id"),
_command.get("args", []),
self.task_id,
is_awaited=True)
commands.append((_name, command))
self.device.post_command(command, 1)
for name, command in commands:
command.await_cmd()
if command.is_valid and name == od_variant:
od = command.response['od']
self.latest_values.appendleft(od)
od_is_outlier = self.handle_outlier(od)
if not od_is_outlier:
self.od.value = od
command.response = {'od': od, 'outlier': od_is_outlier, 'channel': self.od_channel}
command.save_data_to_db()
sleep(self.sleep_period)
def end(self):
self.is_active = False
class ePBRMeasureAll(PBRMeasureAll):
def __init__(self, config):
super(ePBRMeasureAll, self).__init__(config)
self.commands_to_execute: Dict[str, dict] = {
"od_0": {
"id": "5",
"args": [0]
},
"od_1": {
"id": "5",
"args": [1]
},
"ph": {
"id": "4"
},
"temp": {
"id": "2"
}
}
class PBRGeneralPump(BaseTask, Observer):
def __init__(self, config):
self.__dict__.update(config)
required = ['min_od', 'max_od', 'pump_id', 'device_id',
'measure_all_task_id', 'pump_on_command', 'pump_off_command']
self.validate_attributes(required, type(self).__name__)
self.is_pump_on = False
self.device = DeviceManager().get_device(self.device_id)
self.od_task: PBRMeasureAll = TaskManager().get_task(self.measure_all_task_id)
self.od_task.od.observe(self)
super(PBRGeneralPump, self).__init__()
def get_pump_command(self, state: bool) -> Command:
if state:
return Command(self.device_id, self.pump_on_command.get("command_id"),
eval(self.pump_on_command.get("arguments", "[]")), self.task_id)
else:
return Command(self.device_id, self.pump_off_command.get("command_id"),
eval(self.pump_off_command.get("arguments", "[]")), self.task_id)
def update(self, observable: Observable):
self.stabilize(observable.value)
def start(self):
pass
def end(self):
pass
def is_od_value_too_high(self, od):
return od > self.max_od
def is_od_value_too_low(self, od):
return od < self.min_od
def turn_pump_on(self):
self.change_pump_state(True)
def turn_pump_off(self):
self.change_pump_state(False)
def change_pump_state(self, state: bool):
for try_n in range(5):
command = self.get_pump_command(state)
self.device.post_command(command, 1)
command.await_cmd()
if isinstance(command.response['success'], bool) and command.response['success']:
command.save_command_to_db()
self.is_pump_on = state
return
raise ConnectionError
def stabilize(self, od):
if self.is_od_value_too_high(od):
if not self.is_pump_on:
self.turn_pump_on()
elif self.is_od_value_too_low(od):
if self.is_pump_on:
self.turn_pump_off()
| [
"numpy.mean",
"numpy.median",
"collections.deque",
"core.task.manager.TaskManager",
"core.device.manager.DeviceManager",
"time.sleep",
"core.utils.observable.Observable",
"core.data.command.Command",
"threading.Thread"
] | [((725, 740), 'collections.deque', 'deque', ([], {'maxlen': '(2)'}), '(maxlen=2)\n', (730, 740), False, 'from collections import deque\n'), ((922, 934), 'core.utils.observable.Observable', 'Observable', ([], {}), '()\n', (932, 934), False, 'from core.utils.observable import Observable, Observer\n'), ((2053, 2131), 'core.data.command.Command', 'Command', (['self.device_id', '"""5"""', '[self.od_channel]', 'self.task_id'], {'is_awaited': '(True)'}), "(self.device_id, '5', [self.od_channel], self.task_id, is_awaited=True)\n", (2060, 2131), False, 'from core.data.command import Command\n'), ((4467, 4491), 'threading.Thread', 'Thread', ([], {'target': 'self._run'}), '(target=self._run)\n', (4473, 4491), False, 'from threading import Thread\n'), ((2741, 2754), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2748, 2754), True, 'import numpy as np\n'), ((2776, 2791), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (2785, 2791), True, 'import numpy as np\n'), ((5694, 5718), 'time.sleep', 'sleep', (['self.sleep_period'], {}), '(self.sleep_period)\n', (5699, 5718), False, 'from time import sleep\n'), ((801, 816), 'core.device.manager.DeviceManager', 'DeviceManager', ([], {}), '()\n', (814, 816), False, 'from core.device.manager import DeviceManager\n'), ((6647, 6662), 'core.device.manager.DeviceManager', 'DeviceManager', ([], {}), '()\n', (6660, 6662), False, 'from core.device.manager import DeviceManager\n'), ((6728, 6741), 'core.task.manager.TaskManager', 'TaskManager', ([], {}), '()\n', (6739, 6741), False, 'from core.task.manager import TaskManager\n')] |
"""
Code for:
Trade-offs in Large Scale Distributed Tuplewise Estimation and Learning
Author: <NAME>
"""
import os
import argparse
import logging
import json
import matplotlib.pyplot as plt
import numpy as np
import make_exps as me
DEFAULT_BASE_DIR = "exps"
def make_runs(start_run=0, end_run=25,
base_dir=DEFAULT_BASE_DIR):
"""Starts runs indexed between start_run and end_run on the database."""
for i, reshuffle_mod in enumerate([1, 5, 25, 125, 10000]):
for j in range(start_run, end_run):
# Remove all handlers associated with the root logger object.
# Allows to write the log in another folder.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
path_exp = "{}/exp_{}/run_{:02d}".format(base_dir, i, j)
me.make_exps(reshuffle_mod, path_exp)
# If one wants to plot again already made results.
# me.load_results_and_plot(path_exp)
out_folder_list = ["{}/exp_{}/run_{:02d}".format(base_dir, i, j)
for j in range(start_run, end_run)]
me.load_all_results_and_plot(out_folder_list, type_plot="average")
me.load_all_results_and_plot(out_folder_list, type_plot="quantile")
def make_final_graph(base_dir=DEFAULT_BASE_DIR,
start_run=0, end_run=100):
"""Makes the figure 4 of the publication."""
plt.style.use('default')
plt.rc('text', usetex=True)
plt.rc('font', family='serif') # sans-
plt.rcParams.update({'font.size': 16,
'font.serif' : ['Computer Modern Roman']})
plt.figure(1, figsize=(8, 7))
pos = {4: 221, 2: 222, 1: 223, 0:224}
for i, _ in [(4, 10000), (2, 25), (1, 5), (0, 1)]:
out_folder_list = ["{}/exp_{}/run_{:02d}".format(base_dir, i, j)
for j in range(start_run, end_run)]
res_dict = dict()
for out_folder in out_folder_list:
p_learn = json.load(open(
"{}/dynamics.json".format(out_folder), "rt"))
# Convert to array to make everything plottable.
for k in p_learn:
if k.endswith("AUC"):
p_learn[k] = np.array(p_learn[k])
if k in res_dict:
res_dict[k].append(p_learn[k])
else:
res_dict[k] = [p_learn[k]]
out_folder_plot = "/".join(out_folder_list[0].split("/")[:-1])
plt.subplot(pos[i])
me.plot_quantiles(res_dict, out_folder_plot, "quantile",
pos=pos[i]%10, saveit=False)
plt.savefig("cumul_shuttle_exp.pdf")
def make_final_legend():
"""Makes the legend of figure 4 of the publication."""
fig = plt.figure(figsize=(10, 1))
me.get_final_graph_legend(fig)
fig.savefig("cumul_shuttle_leg.pdf")
def main():
os.environ['MKL_NUM_THREADS'] = "1"
parser = argparse.ArgumentParser()
parser.add_argument("start_run",
help="Beggining of runs indexes.", type=int)
parser.add_argument("end_run",
help="End of run indexes.", type=int)
parser.add_argument("base_dir",
help="Base directory for the experiments.", type=str)
args = parser.parse_args()
make_runs(args.start_run, args.end_run, args.base_dir)
if __name__ == "__main__":
# Interactive mode, change the parameters.
# main()
# Results presented in the paper:
me.convert_data_to_pickle()
make_runs(0, 100, "exps")
make_final_graph()
make_final_legend()
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"make_exps.get_final_graph_legend",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"make_exps.convert_data_to_pickle",
"make_exps.load_all_results_and_plot",
"make_exps.make_exps",
"numpy.array... | [((1443, 1467), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (1456, 1467), True, 'import matplotlib.pyplot as plt\n'), ((1472, 1499), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1478, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1534), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (1510, 1534), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1626), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16, 'font.serif': ['Computer Modern Roman']}"], {}), "({'font.size': 16, 'font.serif': ['Computer Modern Roman']})\n", (1566, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1657, 1686), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 7)'}), '(1, figsize=(8, 7))\n', (1667, 1686), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2688), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cumul_shuttle_exp.pdf"""'], {}), "('cumul_shuttle_exp.pdf')\n", (2663, 2688), True, 'import matplotlib.pyplot as plt\n'), ((2784, 2811), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 1)'}), '(figsize=(10, 1))\n', (2794, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2816, 2846), 'make_exps.get_final_graph_legend', 'me.get_final_graph_legend', (['fig'], {}), '(fig)\n', (2841, 2846), True, 'import make_exps as me\n'), ((2954, 2979), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2977, 2979), False, 'import argparse\n'), ((3518, 3545), 'make_exps.convert_data_to_pickle', 'me.convert_data_to_pickle', ([], {}), '()\n', (3543, 3545), True, 'import make_exps as me\n'), ((1150, 1216), 'make_exps.load_all_results_and_plot', 'me.load_all_results_and_plot', (['out_folder_list'], {'type_plot': '"""average"""'}), "(out_folder_list, type_plot='average')\n", (1178, 1216), True, 'import make_exps as me\n'), ((1225, 1292), 'make_exps.load_all_results_and_plot', 'me.load_all_results_and_plot', (['out_folder_list'], {'type_plot': '"""quantile"""'}), "(out_folder_list, type_plot='quantile')\n", (1253, 1292), True, 'import make_exps as me\n'), ((2508, 2527), 'matplotlib.pyplot.subplot', 'plt.subplot', (['pos[i]'], {}), '(pos[i])\n', (2519, 2527), True, 'import matplotlib.pyplot as plt\n'), ((2536, 2627), 'make_exps.plot_quantiles', 'me.plot_quantiles', (['res_dict', 'out_folder_plot', '"""quantile"""'], {'pos': '(pos[i] % 10)', 'saveit': '(False)'}), "(res_dict, out_folder_plot, 'quantile', pos=pos[i] % 10,\n saveit=False)\n", (2553, 2627), True, 'import make_exps as me\n'), ((856, 893), 'make_exps.make_exps', 'me.make_exps', (['reshuffle_mod', 'path_exp'], {}), '(reshuffle_mod, path_exp)\n', (868, 893), True, 'import make_exps as me\n'), ((738, 773), 'logging.root.removeHandler', 'logging.root.removeHandler', (['handler'], {}), '(handler)\n', (764, 773), False, 'import logging\n'), ((2253, 2273), 'numpy.array', 'np.array', (['p_learn[k]'], {}), '(p_learn[k])\n', (2261, 2273), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.