code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
from builtins import *
import os
import argparse
import nibabel as nib
import numpy as np
FileType=argparse.FileType
parser = argparse.ArgumentParser(description='Test multilevel bias corrector.')
parser.add_argument('--infile','-i', metavar='INIMAGE',
help='input file', required=True)
parser.add_argument('--outfile','-o', metavar='CSVFILE',
help='outfile file', required=True)
parser.add_argument('--along','-a', metavar='x,y,z(,t)', type=str,
help='x,y and z locations for profile, e.g. '+
'64,:,97 takes profile along y at x=64, z=97 '+
'(locations in zero-indexed voxels.', required=True)
args = parser.parse_args()
innii = nib.load(args.infile)
indata = innii.get_fdata()
along = args.along.split(",")
if 4 > len(along) < 3 :
print ("--along should be three or four whole numbers and colons (:) "+
"separated by commas.")
os.exit(os.syserr)
if len(indata.shape) == 3:
indata = np.expand_dims(indata,3)
selected = indata
skipaxis=0
for indices in along:
if indices == ":":
skipaxis += 1
else:
selected = np.take(selected, int(indices), skipaxis)
np.savetxt(args.outfile,selected.reshape(-1),fmt='%1.3e')
| [
"numpy.expand_dims",
"os.exit",
"argparse.ArgumentParser",
"nibabel.load"
] | [((152, 222), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test multilevel bias corrector."""'}), "(description='Test multilevel bias corrector.')\n", (175, 222), False, 'import argparse\n'), ((757, 778), 'nibabel.load', 'nib.load', (['args.infile'], {}), '(args.infile)\n', (765, 778), True, 'import nibabel as nib\n'), ((968, 986), 'os.exit', 'os.exit', (['os.syserr'], {}), '(os.syserr)\n', (975, 986), False, 'import os\n'), ((1028, 1053), 'numpy.expand_dims', 'np.expand_dims', (['indata', '(3)'], {}), '(indata, 3)\n', (1042, 1053), True, 'import numpy as np\n')] |
# %%
"""
Example implementations of HARK.ConsumptionSaving.ConsPortfolioModel
"""
from copy import copy
from time import time
import matplotlib.pyplot as plt
import numpy as np
from HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle
from HARK.ConsumptionSaving.ConsPortfolioModel import (
PortfolioConsumerType,
init_portfolio,
)
from HARK.utilities import plot_funcs
# %%
# Make and solve an example portfolio choice consumer type
print("Now solving an example portfolio choice problem; this might take a moment...")
MyType = PortfolioConsumerType()
MyType.cycles = 0
t0 = time()
MyType.solve()
t1 = time()
MyType.cFunc = [MyType.solution[t].cFuncAdj for t in range(MyType.T_cycle)]
MyType.ShareFunc = [MyType.solution[t].ShareFuncAdj for t in range(MyType.T_cycle)]
print(
"Solving an infinite horizon portfolio choice problem took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print("Consumption function over market resources:")
plot_funcs(MyType.cFunc[0], 0.0, 20.0)
print("Risky asset share as a function of market resources:")
print("Optimal (blue) versus Theoretical Limit (orange)")
plt.xlabel("Normalized Market Resources")
plt.ylabel("Portfolio Share")
plt.ylim(0.0, 1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plot_funcs(
[
MyType.ShareFunc[0],
lambda m: MyType.ShareLimit * np.ones_like(m),
],
0.0,
200.0,
)
# %%
# Now simulate this consumer type
MyType.track_vars = ["cNrm", "Share", "aNrm", "t_age"]
MyType.T_sim = 100
MyType.initialize_sim()
MyType.simulate()
# %%
print("\n\n\n")
print("For derivation of the numerical limiting portfolio share")
print("as market resources approach infinity, see")
print(
"http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/Portfolio-CRRA/"
)
# %%
""
# Make another example type, but this one optimizes risky portfolio share only
# on the discrete grid of values implicitly chosen by RiskyCount, using explicit
# value maximization.
init_discrete_share = init_portfolio.copy()
init_discrete_share["DiscreteShareBool"] = True
# Have to actually construct value function for this to work
init_discrete_share["vFuncBool"] = True
# %%
# Make and solve a discrete portfolio choice consumer type
print("Now solving a discrete choice portfolio problem; this might take a minute...")
DiscreteType = PortfolioConsumerType(**init_discrete_share)
DiscreteType.cycles = 0
t0 = time()
DiscreteType.solve()
t1 = time()
DiscreteType.cFunc = [
DiscreteType.solution[t].cFuncAdj for t in range(DiscreteType.T_cycle)
]
DiscreteType.ShareFunc = [
DiscreteType.solution[t].ShareFuncAdj for t in range(DiscreteType.T_cycle)
]
print(
"Solving an infinite horizon discrete portfolio choice problem took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print("Consumption function over market resources:")
plot_funcs(DiscreteType.cFunc[0], 0.0, 50.0)
print("Risky asset share as a function of market resources:")
print("Optimal (blue) versus Theoretical Limit (orange)")
plt.xlabel("Normalized Market Resources")
plt.ylabel("Portfolio Share")
plt.ylim(0.0, 1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plot_funcs(
[DiscreteType.ShareFunc[0], lambda m: DiscreteType.ShareLimit * np.ones_like(m)],
0.0,
200.0,
)
# %%
print("\n\n\n")
# %%
""
# Make another example type, but this one can only update their risky portfolio
# share in any particular period with 15% probability.
init_sticky_share = init_portfolio.copy()
init_sticky_share["AdjustPrb"] = 0.15
# %%
# Make and solve a discrete portfolio choice consumer type
print(
'Now solving a portfolio choice problem with "sticky" portfolio shares; this might take a moment...'
)
StickyType = PortfolioConsumerType(**init_sticky_share)
StickyType.cycles = 0
t0 = time()
StickyType.solve()
t1 = time()
StickyType.cFuncAdj = [
StickyType.solution[t].cFuncAdj for t in range(StickyType.T_cycle)
]
StickyType.cFuncFxd = [
StickyType.solution[t].cFuncFxd for t in range(StickyType.T_cycle)
]
StickyType.ShareFunc = [
StickyType.solution[t].ShareFuncAdj for t in range(StickyType.T_cycle)
]
print(
"Solving an infinite horizon sticky portfolio choice problem took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print(
"Consumption function over market resources when the agent can adjust his portfolio:"
)
plot_funcs(StickyType.cFuncAdj[0], 0.0, 50.0)
# %%
print(
"Consumption function over market resources when the agent CAN'T adjust, by current share:"
)
M = np.linspace(0.0, 50.0, 200)
for s in np.linspace(0.0, 1.0, 21):
C = StickyType.cFuncFxd[0](M, s * np.ones_like(M))
plt.plot(M, C)
plt.xlim(0.0, 50.0)
plt.ylim(0.0, None)
plt.show()
# %%
print("Risky asset share function over market resources (when possible to adjust):")
print("Optimal (blue) versus Theoretical Limit (orange)")
plt.xlabel("Normalized Market Resources")
plt.ylabel("Portfolio Share")
plt.ylim(0.0, 1.0)
plot_funcs(
[StickyType.ShareFunc[0], lambda m: StickyType.ShareLimit * np.ones_like(m)],
0.0,
200.0,
)
# %%
""
# Make another example type, but this one has *age-varying* perceptions of risky asset returns.
# Begin by making a lifecycle dictionary, but adjusted for the portfolio choice model.
init_age_varying_risk_perceptions = copy(init_lifecycle)
init_age_varying_risk_perceptions["RiskyCount"] = init_portfolio["RiskyCount"]
init_age_varying_risk_perceptions["ShareCount"] = init_portfolio["ShareCount"]
init_age_varying_risk_perceptions["aXtraMax"] = init_portfolio["aXtraMax"]
init_age_varying_risk_perceptions["aXtraCount"] = init_portfolio["aXtraCount"]
init_age_varying_risk_perceptions["aXtraNestFac"] = init_portfolio["aXtraNestFac"]
init_age_varying_risk_perceptions["BoroCnstArt"] = init_portfolio["BoroCnstArt"]
init_age_varying_risk_perceptions["CRRA"] = init_portfolio["CRRA"]
init_age_varying_risk_perceptions["DiscFac"] = init_portfolio["DiscFac"]
# %%
init_age_varying_risk_perceptions["RiskyAvg"] = [1.08] * init_lifecycle["T_cycle"]
init_age_varying_risk_perceptions["RiskyStd"] = list(
np.linspace(0.20, 0.30, init_lifecycle["T_cycle"])
)
init_age_varying_risk_perceptions["RiskyAvgTrue"] = 1.08
init_age_varying_risk_perceptions["RiskyStdTrue"] = 0.20
AgeVaryingRiskPercType = PortfolioConsumerType(**init_age_varying_risk_perceptions)
AgeVaryingRiskPercType.cycles = 1
# %%
# Solve the agent type with age-varying risk perceptions
# print('Now solving a portfolio choice problem with age-varying risk perceptions...')
t0 = time()
AgeVaryingRiskPercType.solve()
AgeVaryingRiskPercType.cFunc = [
AgeVaryingRiskPercType.solution[t].cFuncAdj
for t in range(AgeVaryingRiskPercType.T_cycle)
]
AgeVaryingRiskPercType.ShareFunc = [
AgeVaryingRiskPercType.solution[t].ShareFuncAdj
for t in range(AgeVaryingRiskPercType.T_cycle)
]
t1 = time()
print(
"Solving a "
+ str(AgeVaryingRiskPercType.T_cycle)
+ " period portfolio choice problem with age-varying risk perceptions took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print("Consumption function over market resources in each lifecycle period:")
plot_funcs(AgeVaryingRiskPercType.cFunc, 0.0, 20.0)
print("Risky asset share function over market resources in each lifecycle period:")
plot_funcs(AgeVaryingRiskPercType.ShareFunc, 0.0, 200.0)
# %% [markdown]
# The code below tests the mathematical limits of the model.
# %%
# Create a grid of market resources for the plots
mMin = 0 # Minimum ratio of assets to income to plot
mMax = 5 * 1e2 # Maximum ratio of assets to income to plot
mPts = 1000 # Number of points to plot
eevalgrid = np.linspace(0, mMax, mPts) # range of values of assets for the plot
# Number of points that will be used to approximate the risky distribution
risky_count_grid = [5, 50, 200]
# Plot by ages (time periods) at which to plot. We will use the default life-cycle calibration.
ages = [2, 4, 6, 8]
# Create lifecycle dictionary with portfolio choice parameters
merton_dict = copy(init_lifecycle)
merton_dict["RiskyCount"] = init_portfolio["RiskyCount"]
merton_dict["ShareCount"] = init_portfolio["ShareCount"]
merton_dict["aXtraMax"] = init_portfolio["aXtraMax"]
merton_dict["aXtraCount"] = init_portfolio["aXtraCount"]
merton_dict["aXtraNestFac"] = init_portfolio["aXtraNestFac"]
merton_dict["BoroCnstArt"] = init_portfolio["BoroCnstArt"]
merton_dict["CRRA"] = init_portfolio["CRRA"]
merton_dict["DiscFac"] = init_portfolio["DiscFac"]
merton_dict["RiskyAvgTrue"] = 1.08
merton_dict["RiskyStdTrue"] = 0.20
# Create a function to compute the Merton-Samuelson limiting portfolio share.
def RiskyShareMertSamLogNormal(RiskPrem, CRRA, RiskyVar):
return RiskPrem / (CRRA * RiskyVar)
# %% Calibration and solution
for rcount in risky_count_grid:
# Create a new dictionary and replace the number of points that
# approximate the risky return distribution
# Create new dictionary copying the default
merton_dict["RiskyCount"] = rcount
# Create and solve agent
agent = PortfolioConsumerType(**merton_dict)
agent.solve()
# Compute the analytical Merton-Samuelson limiting portfolio share
RiskyVar = agent.RiskyStd ** 2
RiskPrem = agent.RiskyAvg - agent.Rfree
MS_limit = RiskyShareMertSamLogNormal(RiskPrem, agent.CRRA, RiskyVar)
# Now compute the limiting share numerically, using the approximated
# distribution
agent.update_ShareLimit()
NU_limit = agent.ShareLimit
plt.figure()
for a in ages:
plt.plot(
eevalgrid, agent.solution[a].ShareFuncAdj(eevalgrid), label="t = %i" % (a)
)
plt.axhline(
NU_limit, c="k", ls="-.", label="Exact limit as $m\\rightarrow \\infty$."
)
plt.axhline(
MS_limit, c="k", ls="--", label="M&S Limit without returns discretization."
)
plt.ylim(0, 1.05)
plt.xlim(eevalgrid[0] + 1, eevalgrid[-1])
plt.xscale("log")
plt.legend()
plt.title(
"Risky Portfolio Share by Age\n Risky distribution with {points} equiprobable points".format(
points=rcount
)
)
plt.xlabel("Wealth (m)")
plt.ioff()
plt.draw()
# %%
# %%
| [
"matplotlib.pyplot.figure",
"HARK.utilities.plot_funcs",
"matplotlib.pyplot.draw",
"numpy.linspace",
"matplotlib.pyplot.axhline",
"HARK.ConsumptionSaving.ConsPortfolioModel.init_portfolio.copy",
"matplotlib.pyplot.show",
"numpy.ones_like",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"m... | [((550, 573), 'HARK.ConsumptionSaving.ConsPortfolioModel.PortfolioConsumerType', 'PortfolioConsumerType', ([], {}), '()\n', (571, 573), False, 'from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\n'), ((597, 603), 'time.time', 'time', ([], {}), '()\n', (601, 603), False, 'from time import time\n'), ((624, 630), 'time.time', 'time', ([], {}), '()\n', (628, 630), False, 'from time import time\n'), ((1010, 1048), 'HARK.utilities.plot_funcs', 'plot_funcs', (['MyType.cFunc[0]', '(0.0)', '(20.0)'], {}), '(MyType.cFunc[0], 0.0, 20.0)\n', (1020, 1048), False, 'from HARK.utilities import plot_funcs\n'), ((1169, 1210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalized Market Resources"""'], {}), "('Normalized Market Resources')\n", (1179, 1210), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1240), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Portfolio Share"""'], {}), "('Portfolio Share')\n", (1221, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1241, 1259), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1249, 1259), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2235), 'HARK.ConsumptionSaving.ConsPortfolioModel.init_portfolio.copy', 'init_portfolio.copy', ([], {}), '()\n', (2233, 2235), False, 'from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\n'), ((2551, 2595), 'HARK.ConsumptionSaving.ConsPortfolioModel.PortfolioConsumerType', 'PortfolioConsumerType', ([], {}), '(**init_discrete_share)\n', (2572, 2595), False, 'from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\n'), ((2625, 2631), 'time.time', 'time', ([], {}), '()\n', (2629, 2631), False, 'from time import time\n'), ((2658, 2664), 'time.time', 'time', ([], {}), '()\n', (2662, 2664), False, 'from time import time\n'), ((3101, 3145), 'HARK.utilities.plot_funcs', 'plot_funcs', (['DiscreteType.cFunc[0]', '(0.0)', '(50.0)'], {}), '(DiscreteType.cFunc[0], 0.0, 50.0)\n', (3111, 3145), False, 'from HARK.utilities import plot_funcs\n'), ((3266, 3307), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalized Market Resources"""'], {}), "('Normalized Market Resources')\n", (3276, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3308, 3337), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Portfolio Share"""'], {}), "('Portfolio Share')\n", (3318, 3337), True, 'import matplotlib.pyplot as plt\n'), ((3338, 3356), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3346, 3356), True, 'import matplotlib.pyplot as plt\n'), ((3872, 3893), 'HARK.ConsumptionSaving.ConsPortfolioModel.init_portfolio.copy', 'init_portfolio.copy', ([], {}), '()\n', (3891, 3893), False, 'from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\n'), ((4124, 4166), 'HARK.ConsumptionSaving.ConsPortfolioModel.PortfolioConsumerType', 'PortfolioConsumerType', ([], {}), '(**init_sticky_share)\n', (4145, 4166), False, 'from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\n'), ((4194, 4200), 'time.time', 'time', ([], {}), '()\n', (4198, 4200), False, 'from time import time\n'), ((4225, 4231), 'time.time', 'time', ([], {}), '()\n', (4229, 4231), False, 'from time import time\n'), ((4800, 4845), 'HARK.utilities.plot_funcs', 'plot_funcs', (['StickyType.cFuncAdj[0]', '(0.0)', '(50.0)'], {}), '(StickyType.cFuncAdj[0], 0.0, 50.0)\n', (4810, 4845), False, 'from HARK.utilities import plot_funcs\n'), ((4961, 4988), 'numpy.linspace', 'np.linspace', (['(0.0)', '(50.0)', '(200)'], {}), '(0.0, 50.0, 200)\n', (4972, 4988), True, 'import numpy as np\n'), ((4998, 5023), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(21)'], {}), '(0.0, 1.0, 21)\n', (5009, 5023), True, 'import numpy as np\n'), ((5099, 5118), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0)', '(50.0)'], {}), '(0.0, 50.0)\n', (5107, 5118), True, 'import matplotlib.pyplot as plt\n'), ((5119, 5138), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', 'None'], {}), '(0.0, None)\n', (5127, 5138), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5149), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5147, 5149), True, 'import matplotlib.pyplot as plt\n'), ((5299, 5340), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalized Market Resources"""'], {}), "('Normalized Market Resources')\n", (5309, 5340), True, 'import matplotlib.pyplot as plt\n'), ((5341, 5370), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Portfolio Share"""'], {}), "('Portfolio Share')\n", (5351, 5370), True, 'import matplotlib.pyplot as plt\n'), ((5371, 5389), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (5379, 5389), True, 'import matplotlib.pyplot as plt\n'), ((5734, 5754), 'copy.copy', 'copy', (['init_lifecycle'], {}), '(init_lifecycle)\n', (5738, 5754), False, 'from copy import copy\n'), ((6710, 6768), 'HARK.ConsumptionSaving.ConsPortfolioModel.PortfolioConsumerType', 'PortfolioConsumerType', ([], {}), '(**init_age_varying_risk_perceptions)\n', (6731, 6768), False, 'from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\n'), ((6958, 6964), 'time.time', 'time', ([], {}), '()\n', (6962, 6964), False, 'from time import time\n'), ((7277, 7283), 'time.time', 'time', ([], {}), '()\n', (7281, 7283), False, 'from time import time\n'), ((7603, 7654), 'HARK.utilities.plot_funcs', 'plot_funcs', (['AgeVaryingRiskPercType.cFunc', '(0.0)', '(20.0)'], {}), '(AgeVaryingRiskPercType.cFunc, 0.0, 20.0)\n', (7613, 7654), False, 'from HARK.utilities import plot_funcs\n'), ((7739, 7795), 'HARK.utilities.plot_funcs', 'plot_funcs', (['AgeVaryingRiskPercType.ShareFunc', '(0.0)', '(200.0)'], {}), '(AgeVaryingRiskPercType.ShareFunc, 0.0, 200.0)\n', (7749, 7795), False, 'from HARK.utilities import plot_funcs\n'), ((8097, 8123), 'numpy.linspace', 'np.linspace', (['(0)', 'mMax', 'mPts'], {}), '(0, mMax, mPts)\n', (8108, 8123), True, 'import numpy as np\n'), ((8468, 8488), 'copy.copy', 'copy', (['init_lifecycle'], {}), '(init_lifecycle)\n', (8472, 8488), False, 'from copy import copy\n'), ((5084, 5098), 'matplotlib.pyplot.plot', 'plt.plot', (['M', 'C'], {}), '(M, C)\n', (5092, 5098), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6566), 'numpy.linspace', 'np.linspace', (['(0.2)', '(0.3)', "init_lifecycle['T_cycle']"], {}), "(0.2, 0.3, init_lifecycle['T_cycle'])\n", (6529, 6566), True, 'import numpy as np\n'), ((9488, 9524), 'HARK.ConsumptionSaving.ConsPortfolioModel.PortfolioConsumerType', 'PortfolioConsumerType', ([], {}), '(**merton_dict)\n', (9509, 9524), False, 'from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio\n'), ((9928, 9940), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9938, 9940), True, 'import matplotlib.pyplot as plt\n'), ((10080, 10171), 'matplotlib.pyplot.axhline', 'plt.axhline', (['NU_limit'], {'c': '"""k"""', 'ls': '"""-."""', 'label': '"""Exact limit as $m\\\\rightarrow \\\\infty$."""'}), "(NU_limit, c='k', ls='-.', label=\n 'Exact limit as $m\\\\rightarrow \\\\infty$.')\n", (10091, 10171), True, 'import matplotlib.pyplot as plt\n'), ((10185, 10278), 'matplotlib.pyplot.axhline', 'plt.axhline', (['MS_limit'], {'c': '"""k"""', 'ls': '"""--"""', 'label': '"""M&S Limit without returns discretization."""'}), "(MS_limit, c='k', ls='--', label=\n 'M&S Limit without returns discretization.')\n", (10196, 10278), True, 'import matplotlib.pyplot as plt\n'), ((10293, 10310), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.05)'], {}), '(0, 1.05)\n', (10301, 10310), True, 'import matplotlib.pyplot as plt\n'), ((10315, 10356), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(eevalgrid[0] + 1)', 'eevalgrid[-1]'], {}), '(eevalgrid[0] + 1, eevalgrid[-1])\n', (10323, 10356), True, 'import matplotlib.pyplot as plt\n'), ((10361, 10378), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (10371, 10378), True, 'import matplotlib.pyplot as plt\n'), ((10383, 10395), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10393, 10395), True, 'import matplotlib.pyplot as plt\n'), ((10559, 10583), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wealth (m)"""'], {}), "('Wealth (m)')\n", (10569, 10583), True, 'import matplotlib.pyplot as plt\n'), ((10589, 10599), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (10597, 10599), True, 'import matplotlib.pyplot as plt\n'), ((10604, 10614), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (10612, 10614), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5078), 'numpy.ones_like', 'np.ones_like', (['M'], {}), '(M)\n', (5075, 5078), True, 'import numpy as np\n'), ((1554, 1569), 'numpy.ones_like', 'np.ones_like', (['m'], {}), '(m)\n', (1566, 1569), True, 'import numpy as np\n'), ((3646, 3661), 'numpy.ones_like', 'np.ones_like', (['m'], {}), '(m)\n', (3658, 3661), True, 'import numpy as np\n'), ((5466, 5481), 'numpy.ones_like', 'np.ones_like', (['m'], {}), '(m)\n', (5478, 5481), True, 'import numpy as np\n')] |
"""
This module collects several standard analysis routines.
"""
import numpy as np
import pickle
from tqdm import tqdm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
import mst_ida.models.mesxr3 as m3
import mst_ida.models.base.flux_maps as fm
import mst_ida.data.mesxr as mesxr
from mst_ida.analysis.samplers import Quad_Sampler
from mst_ida.models.base.geometry import flux_coords
lowE_fname = '/home/pdvanmeter/data/ratio_tables/rt_mesxr_lowE_ppcd.pkl'
midE_fname = '/home/pdvanmeter/data/ratio_tables/rt_mesxr_midE_ppcd.pkl'
class Ratio_Analyzer_midE(object):
"""
"""
def __init__(self, shot, frame, priors=[200,2400], center=True, smooth=True, fname=midE_fname,
delta_a=0.06, delta_h=0.01):
self.shot = shot
self.frame = frame
self._center = center
self._smooth = smooth
self._priors = priors
# Load the ratio data
self.ratios, self.sigmas, self.ps, self.thresholds = mesxr.get_ratio_data(self.shot, self.frame,
smooth=self._smooth, center=self._center)
# Load the ratio tables
with open(fname, 'rb') as f:
self.r_table = pickle.load(f)
# Load the flux surface mapping
try:
print('MSTfit reconstruction loaded.')
self._flux = fm.MSTfit_Flux(shot=self.shot, frame=self.frame)
except:
print('MSTfit reconstruction not available. Defaulting to model.')
self._flux = flux_coords(delta_a=delta_a, delta_h=delta_h)
# Load the model geometry
self._det = m3.MESXR_Avg(shot=shot, mode='midE', pfm=True)
nlos = len(self._det.los)
self._rhos_all = np.zeros(nlos)
for ii,los in enumerate(self._det.los):
ell_max = los.intercept_with_circle(0.52)
ells = np.linspace(-ell_max, ell_max, num=100)
xs, ys = los.get_xy(ells)
self._rhos_all[ii] = np.amin(self._flux.rho(xs,ys))
# Set up the Quad_Samplers
self._samplers = [Quad_Sampler(lambda x, n=index: self._ln_likelihood(x,n), self._priors, vect=True) for index in range(nlos)]
def fit(self, indices):
"""
"""
# Fit the data for all specified indices
for index in indices:
self._samplers[index].fit()
# Get summary statistics
self._Tes_chord = np.array([self._samplers[index].mean for index in indices])
self._Te_errs_chord = np.array([self._samplers[index].std for index in indices])
self._rhos_chord = self._rhos_all[indices]
# Sort the results by flux surfaces
sorted_indices = np.argsort(self._rhos_chord)
self.Tes = self._Tes_chord[sorted_indices]
self.Te_errs = self._Te_errs_chord[sorted_indices]
self.rhos = self._rhos_chord[sorted_indices]
# Set up the Profile predictor
X = np.atleast_2d(self.rhos).T
y = np.maximum(self.Tes, 0)
alpha = self.Te_errs**2
kernel = C(1000, (1, 1e4)) * RBF(0.05, (1e-3, 1))
self._gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9, alpha=alpha)
self._gp.fit(X,y)
def predict(self, rhos):
"""
"""
# Predict, then convert back to normal dimensions
xs = np.atleast_2d(rhos).T
y_pred, y_sigma = self._gp.predict(xs, return_std=True)
xs = np.squeeze(xs)
y_pred = np.squeeze(y_pred)
y_sigma = np.squeeze(y_sigma)
return y_pred, y_sigma
def get_data(self):
"""
Returns the ME-SXR data used to generate the ratios, either smoothed or raw. This is just a wrapper meant to simplify access.
Returns:
mesxr_data: (dict) ME-SXR 1D profiles, sorted by threshold.
mesxr_sigma: (dict) Uncertainties associated with each measurement, sorted by threshold.
mesxr_ps: (numpy.ndarray*) Average chord radius for each set of measurements. *If smooth=False, this will be a dictionary
indexed in the same was as mesxr_data.
thresholds: (numpy.ndarray) List of thresholds in the detector configuration.
"""
if self._smooth:
return mesxr.get_smooth_data(self.shot, self.frame, center=self._center)
else:
return mesxr.get_8c_data(self.shot, self.frame, center=self._center)
def _ln_likelihood(self, Te, index):
"""
Defines the log of the likelihood function for each chord.
Args:
Te = (np.ndarray) Array of values (in eV) for which to evaluate the likelihood function.
index = (int) Index specifying a particular chord.
"""
Te = np.atleast_1d(Te)
r_model = np.zeros([len(Te), len(self.thresholds)])
for n,Ec in enumerate(self.thresholds):
r_model[:,n] = np.interp(Te, self.r_table.temp[:,index], self.r_table.ratio[Ec][:,index])
chi2 = np.zeros([len(Te), len(self.thresholds)])
for ii in range(len(Te)):
chi2[ii,:] = (self.ratios[index,:] - r_model[ii,:])**2 / self.sigmas[index,:]**2
return -0.5*np.sum(chi2, axis=1) | [
"numpy.maximum",
"numpy.sum",
"sklearn.gaussian_process.kernels.ConstantKernel",
"numpy.argsort",
"pickle.load",
"mst_ida.data.mesxr.get_smooth_data",
"numpy.interp",
"numpy.atleast_2d",
"numpy.linspace",
"sklearn.gaussian_process.GaussianProcessRegressor",
"sklearn.gaussian_process.kernels.RBF"... | [((1086, 1176), 'mst_ida.data.mesxr.get_ratio_data', 'mesxr.get_ratio_data', (['self.shot', 'self.frame'], {'smooth': 'self._smooth', 'center': 'self._center'}), '(self.shot, self.frame, smooth=self._smooth, center=\n self._center)\n', (1106, 1176), True, 'import mst_ida.data.mesxr as mesxr\n'), ((1802, 1848), 'mst_ida.models.mesxr3.MESXR_Avg', 'm3.MESXR_Avg', ([], {'shot': 'shot', 'mode': '"""midE"""', 'pfm': '(True)'}), "(shot=shot, mode='midE', pfm=True)\n", (1814, 1848), True, 'import mst_ida.models.mesxr3 as m3\n'), ((1910, 1924), 'numpy.zeros', 'np.zeros', (['nlos'], {}), '(nlos)\n', (1918, 1924), True, 'import numpy as np\n'), ((2639, 2698), 'numpy.array', 'np.array', (['[self._samplers[index].mean for index in indices]'], {}), '([self._samplers[index].mean for index in indices])\n', (2647, 2698), True, 'import numpy as np\n'), ((2730, 2788), 'numpy.array', 'np.array', (['[self._samplers[index].std for index in indices]'], {}), '([self._samplers[index].std for index in indices])\n', (2738, 2788), True, 'import numpy as np\n'), ((2922, 2950), 'numpy.argsort', 'np.argsort', (['self._rhos_chord'], {}), '(self._rhos_chord)\n', (2932, 2950), True, 'import numpy as np\n'), ((3220, 3243), 'numpy.maximum', 'np.maximum', (['self.Tes', '(0)'], {}), '(self.Tes, 0)\n', (3230, 3243), True, 'import numpy as np\n'), ((3356, 3432), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'kernel', 'n_restarts_optimizer': '(9)', 'alpha': 'alpha'}), '(kernel=kernel, n_restarts_optimizer=9, alpha=alpha)\n', (3380, 3432), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((3696, 3710), 'numpy.squeeze', 'np.squeeze', (['xs'], {}), '(xs)\n', (3706, 3710), True, 'import numpy as np\n'), ((3729, 3747), 'numpy.squeeze', 'np.squeeze', (['y_pred'], {}), '(y_pred)\n', (3739, 3747), True, 'import numpy as np\n'), ((3767, 3786), 'numpy.squeeze', 'np.squeeze', (['y_sigma'], {}), '(y_sigma)\n', (3777, 3786), True, 'import numpy as np\n'), ((5038, 5055), 'numpy.atleast_1d', 'np.atleast_1d', (['Te'], {}), '(Te)\n', (5051, 5055), True, 'import numpy as np\n'), ((1364, 1378), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1375, 1378), False, 'import pickle\n'), ((1526, 1574), 'mst_ida.models.base.flux_maps.MSTfit_Flux', 'fm.MSTfit_Flux', ([], {'shot': 'self.shot', 'frame': 'self.frame'}), '(shot=self.shot, frame=self.frame)\n', (1540, 1574), True, 'import mst_ida.models.base.flux_maps as fm\n'), ((2051, 2090), 'numpy.linspace', 'np.linspace', (['(-ell_max)', 'ell_max'], {'num': '(100)'}), '(-ell_max, ell_max, num=100)\n', (2062, 2090), True, 'import numpy as np\n'), ((3180, 3204), 'numpy.atleast_2d', 'np.atleast_2d', (['self.rhos'], {}), '(self.rhos)\n', (3193, 3204), True, 'import numpy as np\n'), ((3295, 3316), 'sklearn.gaussian_process.kernels.ConstantKernel', 'C', (['(1000)', '(1, 10000.0)'], {}), '(1000, (1, 10000.0))\n', (3296, 3316), True, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C\n'), ((3315, 3336), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['(0.05)', '(0.001, 1)'], {}), '(0.05, (0.001, 1))\n', (3318, 3336), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C\n'), ((3595, 3614), 'numpy.atleast_2d', 'np.atleast_2d', (['rhos'], {}), '(rhos)\n', (3608, 3614), True, 'import numpy as np\n'), ((4534, 4599), 'mst_ida.data.mesxr.get_smooth_data', 'mesxr.get_smooth_data', (['self.shot', 'self.frame'], {'center': 'self._center'}), '(self.shot, self.frame, center=self._center)\n', (4555, 4599), True, 'import mst_ida.data.mesxr as mesxr\n'), ((4635, 4696), 'mst_ida.data.mesxr.get_8c_data', 'mesxr.get_8c_data', (['self.shot', 'self.frame'], {'center': 'self._center'}), '(self.shot, self.frame, center=self._center)\n', (4652, 4696), True, 'import mst_ida.data.mesxr as mesxr\n'), ((5194, 5270), 'numpy.interp', 'np.interp', (['Te', 'self.r_table.temp[:, index]', 'self.r_table.ratio[Ec][:, index]'], {}), '(Te, self.r_table.temp[:, index], self.r_table.ratio[Ec][:, index])\n', (5203, 5270), True, 'import numpy as np\n'), ((5505, 5525), 'numpy.sum', 'np.sum', (['chi2'], {'axis': '(1)'}), '(chi2, axis=1)\n', (5511, 5525), True, 'import numpy as np\n'), ((1698, 1743), 'mst_ida.models.base.geometry.flux_coords', 'flux_coords', ([], {'delta_a': 'delta_a', 'delta_h': 'delta_h'}), '(delta_a=delta_a, delta_h=delta_h)\n', (1709, 1743), False, 'from mst_ida.models.base.geometry import flux_coords\n')] |
from tensorflow import keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import shutil
import random
import numpy as np
def predict_image(img_path, model_name):
# models_dir = 'C:/Users/perro/PycharmProjects/cv_proj/models'
models_dir = '/app/models'
model_path = os.path.join(models_dir, model_name)
model = keras.models.load_model(model_path)
img = image.load_img(img_path, target_size=(160, 160))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = np.vstack([x])
pred = model.predict(x)
pred_class = pred.argmax(axis=-1)
labels = ["Kanye", "Neither", "Santi"]
predicted_label = labels[int(pred_class)]
print(pred, pred_class)
return predicted_label
def copy_to(DIR, LIST, SOURCE):
for file in LIST:
if os.path.getsize(os.path.join(SOURCE, file)) == 0:
continue
source_path = os.path.join(SOURCE, file)
dest_path = os.path.join(DIR, file)
shutil.copyfile(source_path, dest_path)
def split_data(SOURCE, TRAIN, VAL, TEST, SPLIT_SIZE):
li = os.listdir(SOURCE)
num = len(li)
if num < 10:
print("Must have at least 10 images to use this function")
return
train_set = random.sample(li, int(num * SPLIT_SIZE))
li = [file for file in li if file not in train_set]
val_set = random.sample(li, int(0.5 * len(li)))
test_set = [file for file in li if file not in val_set]
copy_to(TRAIN, train_set, SOURCE)
copy_to(VAL, val_set, SOURCE)
copy_to(TEST, test_set, SOURCE)
print('Data successfully split!')
def evaluate_model(model_name):
test_datagen = ImageDataGenerator(rescale=1./255.)
test_generator = test_datagen.flow_from_directory(
'C:/Users/perro/PycharmProjects/cv_proj/data/test',
target_size=(300,300),
batch_size=5,
class_mode='binary'
)
models_dir = 'C:/Users/perro/PycharmProjects/cv_proj/cv_models'
model_path = os.path.join(models_dir, model_name)
model = keras.models.load_model(model_path)
print(model.evaluate(test_generator, steps=5))
| [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.models.load_model",
"tensorflow.keras.preprocessing.image.img_to_array",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.load_img",
"shutil.copyfile",
"os.path.join",
"os.listdir",
"numpy.vstack"
] | [((361, 397), 'os.path.join', 'os.path.join', (['models_dir', 'model_name'], {}), '(models_dir, model_name)\n', (373, 397), False, 'import os\n'), ((410, 445), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['model_path'], {}), '(model_path)\n', (433, 445), False, 'from tensorflow import keras\n'), ((457, 505), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(160, 160)'}), '(img_path, target_size=(160, 160))\n', (471, 505), False, 'from tensorflow.keras.preprocessing import image\n'), ((514, 537), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (532, 537), False, 'from tensorflow.keras.preprocessing import image\n'), ((546, 571), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (560, 571), True, 'import numpy as np\n'), ((580, 594), 'numpy.vstack', 'np.vstack', (['[x]'], {}), '([x])\n', (589, 594), True, 'import numpy as np\n'), ((1151, 1169), 'os.listdir', 'os.listdir', (['SOURCE'], {}), '(SOURCE)\n', (1161, 1169), False, 'import os\n'), ((1713, 1752), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255.0)'}), '(rescale=1.0 / 255.0)\n', (1731, 1752), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2037, 2073), 'os.path.join', 'os.path.join', (['models_dir', 'model_name'], {}), '(models_dir, model_name)\n', (2049, 2073), False, 'import os\n'), ((2086, 2121), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['model_path'], {}), '(model_path)\n', (2109, 2121), False, 'from tensorflow import keras\n'), ((967, 993), 'os.path.join', 'os.path.join', (['SOURCE', 'file'], {}), '(SOURCE, file)\n', (979, 993), False, 'import os\n'), ((1014, 1037), 'os.path.join', 'os.path.join', (['DIR', 'file'], {}), '(DIR, file)\n', (1026, 1037), False, 'import os\n'), ((1046, 1085), 'shutil.copyfile', 'shutil.copyfile', (['source_path', 'dest_path'], {}), '(source_path, dest_path)\n', (1061, 1085), False, 'import shutil\n'), ((890, 916), 'os.path.join', 'os.path.join', (['SOURCE', 'file'], {}), '(SOURCE, file)\n', (902, 916), False, 'import os\n')] |
from __future__ import print_function, division
import numpy as np
from scipy.sparse import csr_matrix
from scipy.linalg import blas
from timeit import default_timer as timer
from pyscf.nao.m_blas_wrapper import spmv_wrapper
from pyscf.nao.m_tddft_iter_gpu import tddft_iter_gpu_c
try:
import numba
from pyscf.nao.m_iter_div_eigenenergy_numba import div_eigenenergy_numba
use_numba = True
except:
use_numba = False
class tddft_iter_c():
def __init__(self, sv, pb, tddft_iter_tol=1e-2, tddft_iter_broadening=0.00367493,
nfermi_tol=1e-5, telec=None, nelec=None, fermi_energy=None, xc_code='LDA,PZ',
GPU=False, precision="single", **kvargs):
""" Iterative TDDFT a la PK, DF, OC JCTC """
from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
from pyscf.nao.m_comp_dm import comp_dm
import sys
assert tddft_iter_tol>1e-6
assert type(tddft_iter_broadening)==float
assert sv.wfsx.x.shape[-1]==1 # i.e. real eigenvectors we accept here
if precision == "single":
self.dtype = np.float32
self.dtypeComplex = np.complex64
elif precision == "double":
self.dtype = np.float64
self.dtypeComplex = np.complex128
else:
raise ValueError("precision can be only single or double")
self.rf0_ncalls = 0
self.l0_ncalls = 0
self.matvec_ncalls = 0
self.tddft_iter_tol = tddft_iter_tol
self.eps = tddft_iter_broadening
self.sv, self.pb, self.norbs, self.nspin = sv, pb, sv.norbs, sv.nspin
self.v_dab = pb.get_dp_vertex_coo(dtype=self.dtype).tocsr()
self.cc_da = pb.get_da2cc_coo(dtype=self.dtype).tocsr()
self.moms0,self.moms1 = pb.comp_moments(dtype=self.dtype)
self.nprod = self.moms0.size
self.kernel, self.kernel_dim = pb.comp_coulomb_pack(dtype=self.dtype)
if xc_code.upper()!='RPA' :
dm = comp_dm(sv.wfsx.x, sv.get_occupations())
pb.comp_fxc_pack(dm, xc_code, kernel = self.kernel, dtype=self.dtype, **kvargs)
self.telec = sv.hsx.telec if telec is None else telec
self.nelec = sv.hsx.nelec if nelec is None else nelec
self.fermi_energy = sv.fermi_energy if fermi_energy is None else fermi_energy
self.x = np.require(sv.wfsx.x, dtype=self.dtype, requirements='CW')
self.ksn2e = np.require(sv.wfsx.ksn2e, dtype=self.dtype, requirements='CW')
ksn2fd = fermi_dirac_occupations(self.telec, self.ksn2e, self.fermi_energy)
self.ksn2f = (3-self.nspin)*ksn2fd
self.nfermi = np.argmax(ksn2fd[0,0,:]<nfermi_tol)
self.vstart = np.argmax(1.0-ksn2fd[0,0,:]>nfermi_tol)
self.xocc = self.x[0,0,0:self.nfermi,:,0] # does python creates a copy at this point ?
self.xvrt = self.x[0,0,self.vstart:,:,0] # does python creates a copy at this point ?
self.tddft_iter_gpu = tddft_iter_gpu_c(GPU, self.v_dab, self.ksn2f, self.ksn2e,
self.norbs, self.nfermi, self.vstart)
def apply_rf0(self, v, comega=1j*0.0):
""" This applies the non-interacting response function to a vector (a set of vectors?) """
assert len(v)==len(self.moms0), "%r, %r "%(len(v), len(self.moms0))
self.rf0_ncalls+=1
# np.require may perform a copy of v, is it really necessary??
vdp = self.cc_da * np.require(v, dtype=np.complex64)
no = self.norbs
sab = csr_matrix((np.transpose(vdp)*self.v_dab).reshape([no,no]))
if self.tddft_iter_gpu.GPU:
vdp = self.tddft_iter_gpu.apply_rf0_gpu(self.xocc, sab, comega)
else:
#
# WARNING!!!!
# nb2v is column major, while self.xvrt is row major
# What a mess!!
nb2v = self.xocc*sab
nm2v = blas.cgemm(1.0, nb2v, np.transpose(self.xvrt))
if use_numba:
div_eigenenergy_numba(self.ksn2e, self.ksn2f, self.nfermi,
self.vstart, comega, nm2v, self.ksn2e.shape[2])
else:
for n,[en,fn] in enumerate(zip(self.ksn2e[0,0,:self.nfermi],self.ksn2f[0,0,:self.nfermi])):
for j,[em,fm] in enumerate(zip(self.ksn2e[0,0,n+1:],self.ksn2f[0,0,n+1:])):
m = j+n+1-self.vstart
nm2v[n,m] = nm2v[n,m] * (fn-fm) *\
( 1.0 / (comega - (em - en)) - 1.0 / (comega + (em - en)) )
nb2v = blas.cgemm(1.0, nm2v, self.xvrt)
ab2v = blas.cgemm(1.0, np.transpose(self.xocc), nb2v).reshape(no*no)
vdp = self.v_dab*ab2v
return vdp*self.cc_da
def comp_veff(self, vext, comega=1j*0.0):
from scipy.sparse.linalg import gmres, lgmres as gmres_alias, LinearOperator
""" This computes an effective field (scalar potential) given the external scalar potential """
assert len(vext)==len(self.moms0), "%r, %r "%(len(vext), len(self.moms0))
self.comega_current = comega
veff_op = LinearOperator((self.nprod,self.nprod), matvec=self.vext2veff_matvec, dtype=self.dtypeComplex)
resgm = gmres_alias(veff_op, np.require(vext, dtype=self.dtypeComplex,
requirements='C'), tol=self.tddft_iter_tol)
return resgm
def vext2veff_matvec(self, v):
self.matvec_ncalls+=1
chi0 = self.apply_rf0(v, self.comega_current)
# For some reason it is very difficult to pass only one dimension
# of an array to the fortran routines?? matvec[0, :].ctypes.data_as(POINTER(c_float))
# is not working!!!
# real part
chi0_reim = np.require(chi0.real, dtype=self.dtype, requirements=["A", "O"])
matvec_real = spmv_wrapper(1.0, self.kernel, chi0_reim)
# imaginary part
chi0_reim = np.require(chi0.imag, dtype=self.dtype, requirements=["A", "O"])
matvec_imag = spmv_wrapper(1.0, self.kernel, chi0_reim)
return v - (matvec_real + 1.0j*matvec_imag)
def comp_polariz_xx(self, comegas):
""" Polarizability """
polariz = np.zeros_like(comegas, dtype=np.complex64)
for iw,comega in enumerate(comegas):
veff,info = self.comp_veff(self.moms1[:,0], comega)
chi0 = self.apply_rf0( veff, comega )
polariz[iw] = np.dot(self.moms1[:,0], chi0)
if self.tddft_iter_gpu.GPU:
self.tddft_iter_gpu.clean_gpu()
return polariz
def comp_nonin(self, comegas):
"""
Non interacting polarizability
"""
vext = np.transpose(self.moms1)
pxx = np.zeros(comegas.shape, dtype=np.complex64)
for iomega, omega in enumerate(comegas):
chi0 = self.apply_rf0(vext[0,:], omega)
pxx[iomega] =-np.dot(chi0, vext[0,:])
return pxx
| [
"numpy.zeros_like",
"pyscf.nao.m_fermi_dirac.fermi_dirac_occupations",
"numpy.argmax",
"numpy.transpose",
"numpy.zeros",
"numpy.require",
"scipy.linalg.blas.cgemm",
"pyscf.nao.m_iter_div_eigenenergy_numba.div_eigenenergy_numba",
"scipy.sparse.linalg.LinearOperator",
"pyscf.nao.m_blas_wrapper.spmv_... | [((2210, 2268), 'numpy.require', 'np.require', (['sv.wfsx.x'], {'dtype': 'self.dtype', 'requirements': '"""CW"""'}), "(sv.wfsx.x, dtype=self.dtype, requirements='CW')\n", (2220, 2268), True, 'import numpy as np\n'), ((2286, 2348), 'numpy.require', 'np.require', (['sv.wfsx.ksn2e'], {'dtype': 'self.dtype', 'requirements': '"""CW"""'}), "(sv.wfsx.ksn2e, dtype=self.dtype, requirements='CW')\n", (2296, 2348), True, 'import numpy as np\n'), ((2362, 2428), 'pyscf.nao.m_fermi_dirac.fermi_dirac_occupations', 'fermi_dirac_occupations', (['self.telec', 'self.ksn2e', 'self.fermi_energy'], {}), '(self.telec, self.ksn2e, self.fermi_energy)\n', (2385, 2428), False, 'from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n'), ((2486, 2525), 'numpy.argmax', 'np.argmax', (['(ksn2fd[0, 0, :] < nfermi_tol)'], {}), '(ksn2fd[0, 0, :] < nfermi_tol)\n', (2495, 2525), True, 'import numpy as np\n'), ((2540, 2585), 'numpy.argmax', 'np.argmax', (['(1.0 - ksn2fd[0, 0, :] > nfermi_tol)'], {}), '(1.0 - ksn2fd[0, 0, :] > nfermi_tol)\n', (2549, 2585), True, 'import numpy as np\n'), ((2791, 2891), 'pyscf.nao.m_tddft_iter_gpu.tddft_iter_gpu_c', 'tddft_iter_gpu_c', (['GPU', 'self.v_dab', 'self.ksn2f', 'self.ksn2e', 'self.norbs', 'self.nfermi', 'self.vstart'], {}), '(GPU, self.v_dab, self.ksn2f, self.ksn2e, self.norbs, self.\n nfermi, self.vstart)\n', (2807, 2891), False, 'from pyscf.nao.m_tddft_iter_gpu import tddft_iter_gpu_c\n'), ((4762, 4861), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(self.nprod, self.nprod)'], {'matvec': 'self.vext2veff_matvec', 'dtype': 'self.dtypeComplex'}), '((self.nprod, self.nprod), matvec=self.vext2veff_matvec,\n dtype=self.dtypeComplex)\n', (4776, 4861), False, 'from scipy.sparse.linalg import gmres, lgmres as gmres_alias, LinearOperator\n'), ((5342, 5406), 'numpy.require', 'np.require', (['chi0.real'], {'dtype': 'self.dtype', 'requirements': "['A', 'O']"}), "(chi0.real, dtype=self.dtype, requirements=['A', 'O'])\n", (5352, 5406), True, 'import numpy as np\n'), ((5425, 5466), 'pyscf.nao.m_blas_wrapper.spmv_wrapper', 'spmv_wrapper', (['(1.0)', 'self.kernel', 'chi0_reim'], {}), '(1.0, self.kernel, chi0_reim)\n', (5437, 5466), False, 'from pyscf.nao.m_blas_wrapper import spmv_wrapper\n'), ((5509, 5573), 'numpy.require', 'np.require', (['chi0.imag'], {'dtype': 'self.dtype', 'requirements': "['A', 'O']"}), "(chi0.imag, dtype=self.dtype, requirements=['A', 'O'])\n", (5519, 5573), True, 'import numpy as np\n'), ((5592, 5633), 'pyscf.nao.m_blas_wrapper.spmv_wrapper', 'spmv_wrapper', (['(1.0)', 'self.kernel', 'chi0_reim'], {}), '(1.0, self.kernel, chi0_reim)\n', (5604, 5633), False, 'from pyscf.nao.m_blas_wrapper import spmv_wrapper\n'), ((5763, 5805), 'numpy.zeros_like', 'np.zeros_like', (['comegas'], {'dtype': 'np.complex64'}), '(comegas, dtype=np.complex64)\n', (5776, 5805), True, 'import numpy as np\n'), ((6198, 6222), 'numpy.transpose', 'np.transpose', (['self.moms1'], {}), '(self.moms1)\n', (6210, 6222), True, 'import numpy as np\n'), ((6233, 6276), 'numpy.zeros', 'np.zeros', (['comegas.shape'], {'dtype': 'np.complex64'}), '(comegas.shape, dtype=np.complex64)\n', (6241, 6276), True, 'import numpy as np\n'), ((3222, 3255), 'numpy.require', 'np.require', (['v'], {'dtype': 'np.complex64'}), '(v, dtype=np.complex64)\n', (3232, 3255), True, 'import numpy as np\n'), ((4236, 4268), 'scipy.linalg.blas.cgemm', 'blas.cgemm', (['(1.0)', 'nm2v', 'self.xvrt'], {}), '(1.0, nm2v, self.xvrt)\n', (4246, 4268), False, 'from scipy.linalg import blas\n'), ((4890, 4949), 'numpy.require', 'np.require', (['vext'], {'dtype': 'self.dtypeComplex', 'requirements': '"""C"""'}), "(vext, dtype=self.dtypeComplex, requirements='C')\n", (4900, 4949), True, 'import numpy as np\n'), ((5975, 6005), 'numpy.dot', 'np.dot', (['self.moms1[:, 0]', 'chi0'], {}), '(self.moms1[:, 0], chi0)\n', (5981, 6005), True, 'import numpy as np\n'), ((3650, 3673), 'numpy.transpose', 'np.transpose', (['self.xvrt'], {}), '(self.xvrt)\n', (3662, 3673), True, 'import numpy as np\n'), ((3718, 3828), 'pyscf.nao.m_iter_div_eigenenergy_numba.div_eigenenergy_numba', 'div_eigenenergy_numba', (['self.ksn2e', 'self.ksn2f', 'self.nfermi', 'self.vstart', 'comega', 'nm2v', 'self.ksn2e.shape[2]'], {}), '(self.ksn2e, self.ksn2f, self.nfermi, self.vstart,\n comega, nm2v, self.ksn2e.shape[2])\n', (3739, 3828), False, 'from pyscf.nao.m_iter_div_eigenenergy_numba import div_eigenenergy_numba\n'), ((6389, 6413), 'numpy.dot', 'np.dot', (['chi0', 'vext[0, :]'], {}), '(chi0, vext[0, :])\n', (6395, 6413), True, 'import numpy as np\n'), ((3298, 3315), 'numpy.transpose', 'np.transpose', (['vdp'], {}), '(vdp)\n', (3310, 3315), True, 'import numpy as np\n'), ((4301, 4324), 'numpy.transpose', 'np.transpose', (['self.xocc'], {}), '(self.xocc)\n', (4313, 4324), True, 'import numpy as np\n')] |
from os.path import join
from torchvision.transforms import transforms
from tqdm import tqdm
from src.plunder_standardize_colors import standardize_colors
import numpy as np
import torch
from os import listdir, makedirs
import matplotlib.pyplot as plt
from src.pl_modules.vae import VaeModel
from PIL import Image
if __name__ == "__main__":
images_path = "data/train/"
reconstruction_path = "data/train_reconstruction/"
makedirs(reconstruction_path, exist_ok=True)
directories = listdir(images_path)
directories = [join(images_path, x) for x in directories]
files = []
for i, directory in enumerate(directories):
for file in listdir(directory):
if file.startswith("img"):
files.append(join(directory, file))
break
files = sorted(files)
images = [np.load(file)["observations"] for file in files]
for i, img in enumerate(images):
img = Image.fromarray(img)
img.save(reconstruction_path + "/" + f"{i}".zfill(5) + ".png")
# device = "cuda"
# vae = VaeModel.load_from_checkpoint(
# "/home/michele/projects/dlai-project/checkpoints/vae/best.ckpt",
# map_location=device,
# ).to(device)
#
# transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
#
# for i, img in enumerate(tqdm(images)):
# obs = transform(img).cuda().unsqueeze(0)
# with torch.no_grad():
# reconstruction = vae(obs)[0]
# reconstruction = (
# reconstruction.squeeze().permute(1, 2, 0).cpu().detach().numpy()
# )
# f, axarr = plt.subplots(2)
# axarr[0].imshow(img)
# axarr[1].imshow(reconstruction)
# f.savefig(reconstruction_path + "/" + f"{i}".zfill(5) + ".png")
# plt.close(f)
| [
"numpy.load",
"os.makedirs",
"PIL.Image.fromarray",
"os.path.join",
"os.listdir"
] | [((437, 481), 'os.makedirs', 'makedirs', (['reconstruction_path'], {'exist_ok': '(True)'}), '(reconstruction_path, exist_ok=True)\n', (445, 481), False, 'from os import listdir, makedirs\n'), ((500, 520), 'os.listdir', 'listdir', (['images_path'], {}), '(images_path)\n', (507, 520), False, 'from os import listdir, makedirs\n'), ((540, 560), 'os.path.join', 'join', (['images_path', 'x'], {}), '(images_path, x)\n', (544, 560), False, 'from os.path import join\n'), ((667, 685), 'os.listdir', 'listdir', (['directory'], {}), '(directory)\n', (674, 685), False, 'from os import listdir, makedirs\n'), ((943, 963), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (958, 963), False, 'from PIL import Image\n'), ((842, 855), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (849, 855), True, 'import numpy as np\n'), ((755, 776), 'os.path.join', 'join', (['directory', 'file'], {}), '(directory, file)\n', (759, 776), False, 'from os.path import join\n')] |
#!/usr/bin/python3.5
"""
Draw FeedForward Neural Network Predictions over original videos.
"""
import sys
import os
OPENCV_HOME = os.environ['OPENCV_HOME']
sys.path.insert(1, OPENCV_HOME + '/lib/python3.5/dist-packages')
import cv2
import numpy as np
from Util import print_msg
from FFPredict import load_data, param_suits as ff_param_suits
param_suits = {
'sponge_set_1': {
'file_predictions': 'data/pickles/sponge_center_predictions.csv',
'file_video': 'data/generated_videos/sponge_centre_100__filterless_segmented.avi',
'file_results': 'data/generated_videos/sponge_centre_100__result.avi',
'roi_shape': (600, 500, 3),
'scale': 0.5,
'plot_tracking': True
},
'sponge_set_2': {
'file_predictions': 'data/pickles/sponge_longside_predictions.csv',
'file_video': 'data/generated_videos/sponge_longside_100__filterless_segmented.avi',
'file_results': 'data/generated_videos/sponge_longside_100__result.avi',
'roi_shape': (530, 500, 3),
'scale': 0.5,
'plot_tracking': False
},
'sponge_set_3': {
'file_predictions': 'data/pickles/sponge_shortside_predictions.csv',
'file_video': 'data/generated_videos/sponge_shortside_100__filterless_segmented.avi',
'file_results': 'data/generated_videos/sponge_shortside_100__result.avi',
'roi_shape': (475, 525, 3),
'scale': 0.5,
'plot_tracking': False
},
'plasticine_set_1': {
'file_predictions': 'data/pickles/plasticine_center_predictions.csv',
'file_video': 'data/generated_videos/a_plasticine_centre_100__filterless_segmented.avi',
'file_results': 'data/generated_videos/plasticine_centre_100__result.avi',
'roi_shape': (550, 400, 3),
'scale': 0.5,
'plot_tracking': True
},
'plasticine_set_2': {
'file_predictions': 'data/pickles/plasticine_longside_predictions.csv',
'file_video': 'data/generated_videos/a_plasticine_longside_100__filterless_segmented.avi',
'file_results': 'data/generated_videos/plasticine_longside_100__result.avi',
'roi_shape': (530, 400, 3),
'scale': 0.5,
'plot_tracking': False
},
'plasticine_set_3': {
'file_predictions': 'data/pickles/plasticine_shortside_predictions.csv',
'file_video': 'data/generated_videos/a_plasticine_shortside_100__filterless_segmented.avi',
'file_results': 'data/generated_videos/plasticine_shortside_100__result.avi',
'roi_shape': (450, 525, 3),
'scale': 0.5,
'plot_tracking': False
}
}
def plot(param_name, param_suit):
"""
Draw FeedForward Neural Network Predictions over original videos.
"""
contours = np.loadtxt(param_suit['file_predictions'])
print_msg(type(contours), contours.dtype, contours.shape)
contours = contours.astype(np.int32)
print_msg(type(contours), contours.dtype, contours.shape)
contours = contours.reshape((contours.shape[0], int(contours.shape[1]/2), 2))
print_msg(type(contours), contours.dtype, contours.shape)
ff_param_suit = ff_param_suits[param_name]
X, Y = load_data(ff_param_suit['file_train_data'],
ff_param_suit['file_force_data'])
Y = Y.astype(np.int32)
Y = Y.reshape((Y.shape[0], int(Y.shape[1] / 2), 2))
size = tuple(np.rint(np.array(param_suit['roi_shape'])[0:2] * param_suit['scale']).astype(np.int32))
cap = cv2.VideoCapture(param_suit['file_video'])
fourcc = cv2.VideoWriter_fourcc(*'MPEG')
print_msg("Base image size ", size)
out = cv2.VideoWriter(param_suit['file_results'], fourcc, 30, size)
i = 0
plot_tracking = param_suit['plot_tracking']
while cap.isOpened():
# Capture frame-by-frame
ret, img = cap.read()
if not ret: break
cv2.circle(img, tuple(X[i,0:2].astype(int)), 3, 255, 1)
cv2.polylines(img, [contours[i]], True, (0, 225, 225), 2)
if plot_tracking:
cv2.polylines(img, [Y[i]], True, (0, 0, 255), 2)
cv2.imshow('Demo', img)
out.write(img)
i += 1
key = cv2.waitKey(5) & 0xFF
if key == ord('q'):
break
if key == ord('s'):
cv2.imwrite(param_suit['file_results'] + '.jpg', img)
out.release()
cv2.waitKey(-1)
def print_instructions():
""" Prints usage instructions. """
print("Use: " + sys.argv[0] + " <set_of_paramteres>\nOptions:")
for key in param_suits.keys():
print('\t', key)
if __name__ == '__main__':
nargs = len(sys.argv)
if nargs != 2 or sys.argv[1] not in param_suits:
print_instructions()
sys.exit(1)
param_suit = param_suits[sys.argv[1]]
plot(sys.argv[1], param_suit)
| [
"cv2.VideoWriter_fourcc",
"cv2.polylines",
"cv2.waitKey",
"cv2.imwrite",
"sys.path.insert",
"cv2.VideoCapture",
"FFPredict.load_data",
"numpy.array",
"numpy.loadtxt",
"cv2.VideoWriter",
"cv2.imshow",
"sys.exit",
"Util.print_msg"
] | [((158, 222), 'sys.path.insert', 'sys.path.insert', (['(1)', "(OPENCV_HOME + '/lib/python3.5/dist-packages')"], {}), "(1, OPENCV_HOME + '/lib/python3.5/dist-packages')\n", (173, 222), False, 'import sys\n'), ((2754, 2796), 'numpy.loadtxt', 'np.loadtxt', (["param_suit['file_predictions']"], {}), "(param_suit['file_predictions'])\n", (2764, 2796), True, 'import numpy as np\n'), ((3165, 3242), 'FFPredict.load_data', 'load_data', (["ff_param_suit['file_train_data']", "ff_param_suit['file_force_data']"], {}), "(ff_param_suit['file_train_data'], ff_param_suit['file_force_data'])\n", (3174, 3242), False, 'from FFPredict import load_data, param_suits as ff_param_suits\n'), ((3463, 3505), 'cv2.VideoCapture', 'cv2.VideoCapture', (["param_suit['file_video']"], {}), "(param_suit['file_video'])\n", (3479, 3505), False, 'import cv2\n'), ((3519, 3550), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MPEG'"], {}), "(*'MPEG')\n", (3541, 3550), False, 'import cv2\n'), ((3555, 3590), 'Util.print_msg', 'print_msg', (['"""Base image size """', 'size'], {}), "('Base image size ', size)\n", (3564, 3590), False, 'from Util import print_msg\n'), ((3601, 3662), 'cv2.VideoWriter', 'cv2.VideoWriter', (["param_suit['file_results']", 'fourcc', '(30)', 'size'], {}), "(param_suit['file_results'], fourcc, 30, size)\n", (3616, 3662), False, 'import cv2\n'), ((4324, 4339), 'cv2.waitKey', 'cv2.waitKey', (['(-1)'], {}), '(-1)\n', (4335, 4339), False, 'import cv2\n'), ((3910, 3967), 'cv2.polylines', 'cv2.polylines', (['img', '[contours[i]]', '(True)', '(0, 225, 225)', '(2)'], {}), '(img, [contours[i]], True, (0, 225, 225), 2)\n', (3923, 3967), False, 'import cv2\n'), ((4063, 4086), 'cv2.imshow', 'cv2.imshow', (['"""Demo"""', 'img'], {}), "('Demo', img)\n", (4073, 4086), False, 'import cv2\n'), ((4680, 4691), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4688, 4691), False, 'import sys\n'), ((4006, 4054), 'cv2.polylines', 'cv2.polylines', (['img', '[Y[i]]', '(True)', '(0, 0, 255)', '(2)'], {}), '(img, [Y[i]], True, (0, 0, 255), 2)\n', (4019, 4054), False, 'import cv2\n'), ((4140, 4154), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (4151, 4154), False, 'import cv2\n'), ((4248, 4301), 'cv2.imwrite', 'cv2.imwrite', (["(param_suit['file_results'] + '.jpg')", 'img'], {}), "(param_suit['file_results'] + '.jpg', img)\n", (4259, 4301), False, 'import cv2\n'), ((3373, 3406), 'numpy.array', 'np.array', (["param_suit['roi_shape']"], {}), "(param_suit['roi_shape'])\n", (3381, 3406), True, 'import numpy as np\n')] |
import numpy as np
"""
used for converting the decoded image to rle mask
"""
def rle_encode(im):
'''
im: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels = im.flatten(order = 'F')
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle_decode(rle_mask):
s = rle_mask.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(101*101, dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(101,101) | [
"numpy.asarray",
"numpy.where",
"numpy.zeros",
"numpy.concatenate"
] | [((252, 286), 'numpy.concatenate', 'np.concatenate', (['[[0], pixels, [0]]'], {}), '([[0], pixels, [0]])\n', (266, 286), True, 'import numpy as np\n'), ((600, 635), 'numpy.zeros', 'np.zeros', (['(101 * 101)'], {'dtype': 'np.uint8'}), '(101 * 101, dtype=np.uint8)\n', (608, 635), True, 'import numpy as np\n'), ((486, 510), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'int'}), '(x, dtype=int)\n', (496, 510), True, 'import numpy as np\n'), ((298, 333), 'numpy.where', 'np.where', (['(pixels[1:] != pixels[:-1])'], {}), '(pixels[1:] != pixels[:-1])\n', (306, 333), True, 'import numpy as np\n')] |
import gym
import numpy as np
class QLearningAgent:
def __init__(self, obs_n, act_n, learning_rate=0.01, gamma=0.9, e_greed=0.1):
self.act_n = act_n
self.lr = learning_rate
self.gamma = gamma
self.epsilon = e_greed
self.Q = np.zeros((obs_n, act_n))
# 根据输入观察值,采样输出的动作值,带探索
def sample(self, obs):
if np.random.uniform(0, 1) < (1.0 - self.epsilon): # 根据table的Q值选动作
action = self.predict(obs)
else:
action = np.random.choice(self.act_n) # 有一定概率随机探索选取一个动作
return action
# 根据输入观察值,预测输出的动作值
def predict(self, obs):
Q_list = self.Q[obs, :]
maxQ = np.max(Q_list)
action_list = np.where(Q_list == maxQ)[0] # maxQ可能对应多个action
action = np.random.choice(action_list)
return action
# 学习方法,也就是更新Q-table的方法
def learn(self, obs, action, reward, next_obs, done):
"""
off-policy
obs: 交互前的obs, s_t
action: 本次交互选择的action, a_t
reward: 本次动作获得的奖励r
next_obs: 本次交互后的obs, s_t+1
done: episode是否结束
"""
predict_Q = self.Q[obs, action]
if done:
target_Q = reward
else:
target_Q = reward + self.gamma * np.max(self.Q[next_obs, :]) # Q-learning
self.Q[obs, action] += self.lr * (target_Q - predict_Q) # 修正q
# 把Q表格的数据保存到文件中
def save(self):
npy_file = 'q_table_ql.npy'
np.save(npy_file, self.Q)
print(npy_file + ' saved.')
# 从文件中读取数据到 Q表格
def restore(self, npy_file='./q_table.npy'):
self.Q = np.load(npy_file)
print(npy_file + ' loaded.')
def run_episode(env, agent, render=False):
total_steps = 0 # 记录每个episode走了多少step
total_reward = 0
obs = env.reset() # 重置环境, 重新开一局(即开始新的一个episode)
while True:
action = agent.sample(obs) # 根据算法选择一个动作
next_obs, reward, done, _ = env.step(action) # 与环境进行一次交互
# 训练 Q-learning 算法
agent.learn(obs, action, reward, next_obs, done)
obs = next_obs # 存储上一个观察值
total_reward += reward
total_steps += 1 # 计算step数
if render:
env.render() # 渲染新的一帧图形
if done:
break
return total_reward, total_steps
def test_episode(env, agent):
total_reward = 0
obs = env.reset()
while True:
action = agent.predict(obs) # greedy
next_obs, reward, done, _ = env.step(action)
total_reward += reward
obs = next_obs
# time.sleep(0.5)
env.render()
if done:
break
return total_reward
def main():
# 使用gym创建悬崖环境
env = gym.make("CliffWalking-v0") # 0 up, 1 right, 2 down, 3 left
# 创建一个agent实例,输入超参数
agent = QLearningAgent(obs_n=env.observation_space.n,
act_n=env.action_space.n,
learning_rate=0.1,
gamma=0.9,
e_greed=0.1)
# 训练500个episode,打印每个episode的分数
for episode in range(500):
ep_reward, ep_steps = run_episode(env, agent, False)
print(
"Episode %s: step = %s , reward = %.1f" % (episode, ep_steps, ep_reward))
agent.save()
# 全部训练结束,查看算法效果
test_reward = test_episode(env, agent)
print('test reward = %.1f' % test_reward)
def print_q_table():
arr = np.load("q_table_ql.npy")
print(arr) # arr的类型为 <class 'numpy.ndarray'>
if __name__ == '__main__':
# main()
print_q_table()
| [
"numpy.random.uniform",
"numpy.load",
"numpy.save",
"gym.make",
"numpy.zeros",
"numpy.max",
"numpy.where",
"numpy.random.choice"
] | [((2671, 2698), 'gym.make', 'gym.make', (['"""CliffWalking-v0"""'], {}), "('CliffWalking-v0')\n", (2679, 2698), False, 'import gym\n'), ((3382, 3407), 'numpy.load', 'np.load', (['"""q_table_ql.npy"""'], {}), "('q_table_ql.npy')\n", (3389, 3407), True, 'import numpy as np\n'), ((270, 294), 'numpy.zeros', 'np.zeros', (['(obs_n, act_n)'], {}), '((obs_n, act_n))\n', (278, 294), True, 'import numpy as np\n'), ((669, 683), 'numpy.max', 'np.max', (['Q_list'], {}), '(Q_list)\n', (675, 683), True, 'import numpy as np\n'), ((771, 800), 'numpy.random.choice', 'np.random.choice', (['action_list'], {}), '(action_list)\n', (787, 800), True, 'import numpy as np\n'), ((1465, 1490), 'numpy.save', 'np.save', (['npy_file', 'self.Q'], {}), '(npy_file, self.Q)\n', (1472, 1490), True, 'import numpy as np\n'), ((1614, 1631), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (1621, 1631), True, 'import numpy as np\n'), ((361, 384), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (378, 384), True, 'import numpy as np\n'), ((500, 528), 'numpy.random.choice', 'np.random.choice', (['self.act_n'], {}), '(self.act_n)\n', (516, 528), True, 'import numpy as np\n'), ((706, 730), 'numpy.where', 'np.where', (['(Q_list == maxQ)'], {}), '(Q_list == maxQ)\n', (714, 730), True, 'import numpy as np\n'), ((1267, 1294), 'numpy.max', 'np.max', (['self.Q[next_obs, :]'], {}), '(self.Q[next_obs, :])\n', (1273, 1294), True, 'import numpy as np\n')] |
import os
import re
import time
from multiprocessing import Manager
from multiprocessing import Process
import PySimpleGUI as sg
import numpy as np
import phonemizer
import pyaudio
import pyloudnorm
import soundfile as sf
import torch
from numpy import trim_zeros
from pynput import keyboard
from torchaudio.transforms import Vad as VoiceActivityDetection
import parameters
class CorpusCreator:
def __init__(self):
"""
Load prompt list and prepare status quo
"""
self.corpus_name = parameters.current_corpus_name
self.index = Manager().list()
self.vad = self.vad = VoiceActivityDetection(sample_rate=parameters.sampling_rate)
self.meter = pyloudnorm.Meter(parameters.sampling_rate)
self.audio_save_dir = "Corpora/{}/".format(self.corpus_name)
os.makedirs("Corpora/{}/unprocessed".format(self.corpus_name), exist_ok=True)
self.record_flag = Manager().list()
self.stop_recorder_process_flag = Manager().list()
recorder_process = Process(target=self.recorder)
recorder_process.start()
self.stop_flag = False
self.datapoint = Manager().list()
self.datapoint.append("")
self.datapoint.append("")
self.done_ones = list()
self.lookup_path = "Corpora/{}/metadata.csv".format(self.corpus_name)
with open("Corpora/{}/prompts.txt".format(self.corpus_name), mode='r', encoding='utf8') as prompts:
self.prompt_list = Manager().list(prompts.read().split("\n"))
if not os.path.exists(self.lookup_path):
with open(self.lookup_path, mode='w', encoding='utf8') as lookup_file:
lookup_file.write("")
else:
for file in os.listdir("Corpora/{}".format(self.corpus_name)):
if file.endswith(".wav"):
self.done_ones.append(self.prompt_list.pop(0))
self.index.append("")
self.update_datapoint(self.prompt_list[0])
def update_datapoint(self, sentence):
"""
Load new datapoint for display and use
"""
self.datapoint[0] = sentence
self.datapoint[1] = phonemize(sentence)
def update_lookup(self):
"""
Call this when the datapoint is recorded and saved. Load new Datapoint AFTER this has completed
"""
with open(self.lookup_path, mode='r', encoding='utf8') as lookup_file:
current_file = lookup_file.read()
new_file = current_file + "\n" + "{}|{}|{}".format("{}.wav".format(len(self.index)), self.datapoint[1], self.datapoint[0])
with open(self.lookup_path, mode='w', encoding='utf8') as lookup_file:
lookup_file.write(new_file.strip("\n"))
if len(self.prompt_list) > 1:
self.done_ones.append(self.prompt_list.pop(0))
else:
self.stop_flag = True
self.index.append("")
def run(self):
"""
TKinter really wants to stay in the main-thread, so this just starts the key-listener and recording process and then keeps updating the window.
"""
listener = keyboard.Listener(on_press=self.handle_key_down, on_release=self.handle_key_up)
listener.start()
sg.theme('DarkGreen2')
layout = [[sg.Text("", font="Any 20", size=(2000, 1), pad=((0, 0), (350, 0)), justification='center', key="sentence1"), ],
[sg.Text("", font="Any 20", size=(2000, 1), pad=(0, 0), justification='center', key="sentence2"), ],
[sg.Text("", font="Any 20", size=(2000, 1), pad=((0, 0), (0, 30)), justification='center', key="sentence3"), ],
[sg.Text("", font="Any 18", size=(2000, 1), pad=(0, 0), justification='center', key="phonemes1"), ],
[sg.Text("", font="Any 18", size=(2000, 1), pad=(0, 0), justification='center', key="phonemes2"), ],
[sg.Text("", font="Any 18", size=(2000, 1), pad=(0, 0), justification='center', key="phonemes3"), ],
[sg.Text("Left CTRL-Key for push-to-talk, ESC-Key to exit, ALT-Key to redo the last prompt", font="Any 10", size=(2000, 1),
pad=((0, 0), (300, 0)),
justification='center', ), ]]
window = sg.Window(self.corpus_name, layout)
window.read(5)
window.bring_to_front()
window.maximize()
while True:
event, values = window.read(200)
if event == sg.WIN_CLOSED or self.stop_flag:
break
window["sentence1"].update("")
window["sentence2"].update("")
window["phonemes2"].update("")
window["phonemes3"].update("")
window["sentence3"].update(self.datapoint[0])
window["phonemes1"].update(self.datapoint[1])
if len(self.datapoint[0]) > 45:
prompt_list = self.datapoint[0].split()
promt1 = " ".join(prompt_list[:-int(len(prompt_list) / 2)])
promt2 = " ".join(prompt_list[-int(len(prompt_list) / 2):])
window["sentence2"].update(promt1)
window["sentence3"].update(promt2)
if len(self.datapoint[1]) > 45:
phoneme_list = self.datapoint[1].split()
phonemes1 = " ".join(phoneme_list[:-int(len(phoneme_list) / 2)])
phonemes2 = " ".join(phoneme_list[-int(len(phoneme_list) / 2):])
window["phonemes1"].update(phonemes1)
window["phonemes2"].update(phonemes2)
if len(self.datapoint[0]) > 90:
prompt_list = self.datapoint[0].split()
promt1 = " ".join(prompt_list[:-int(len(prompt_list) / 3) * 2])
promt2 = " ".join(prompt_list[-int(len(prompt_list) / 3) * 2:-int(len(prompt_list) / 3)])
promt3 = " ".join(prompt_list[-int(len(prompt_list) / 3):])
window["sentence1"].update(promt1)
window["sentence2"].update(promt2)
window["sentence3"].update(promt3)
if len(self.datapoint[1]) > 90:
phoneme_list = self.datapoint[1].split()
phonemes1 = " ".join(phoneme_list[:-int(len(phoneme_list) / 3) * 2])
phonemes2 = " ".join(phoneme_list[-int(len(phoneme_list) / 3) * 2:-int(len(phoneme_list) / 3)])
phonemes3 = " ".join(phoneme_list[-int(len(phoneme_list) / 3):])
window["phonemes1"].update(phonemes1)
window["phonemes2"].update(phonemes2)
window["phonemes3"].update(phonemes3)
self.stop_recorder_process_flag.append("")
listener.stop()
window.close()
time.sleep(1)
def handle_key_down(self, key):
if key == keyboard.Key.ctrl_l and len(self.record_flag) == 0:
self.record_flag.append("")
elif key == keyboard.Key.esc:
self.stop_flag = True
elif key == keyboard.Key.alt or key == keyboard.Key.alt_l:
self.go_back()
def go_back(self):
"""
go back to the previous sentence and re-record it
"""
if len(self.done_ones) > 0:
with open(self.lookup_path, mode='r', encoding='utf8') as lookup_file:
current_file = lookup_file.read()
new_file = "\n".join(current_file.split("\n")[:-1]).strip("\n")
with open(self.lookup_path, mode='w', encoding='utf8') as lookup_file:
lookup_file.write(new_file)
self.prompt_list.insert(0, self.done_ones.pop())
self.index.pop()
self.update_datapoint(self.prompt_list[0])
def handle_key_up(self, key):
if key == keyboard.Key.ctrl_l:
while len(self.record_flag) > 0:
self.record_flag.pop()
self.update_lookup()
if len(self.prompt_list) > 0:
self.update_datapoint(self.prompt_list[0])
def recorder(self):
pa = pyaudio.PyAudio()
while True:
if len(self.stop_recorder_process_flag) != 0:
pa.terminate()
break
if len(self.record_flag) != 0:
stream = pa.open(format=pyaudio.paFloat32,
channels=1,
rate=parameters.sampling_rate,
input=True,
output=False,
frames_per_buffer=1024,
input_device_index=1)
frames = list()
while len(self.record_flag) != 0:
frames.append(np.frombuffer(stream.read(1024), dtype=np.float32))
stream.stop_stream()
stream.close()
audio = np.hstack(frames)
try:
sf.write(file=self.audio_save_dir + "unprocessed/{}.wav".format(len(self.index) - 1), data=audio, samplerate=parameters.sampling_rate)
audio = self.apply_signal_processing(audio)
sf.write(file=self.audio_save_dir + "{}.wav".format(len(self.index) - 1), data=audio, samplerate=parameters.sampling_rate)
except ValueError:
print(
"Recording was too short! Remember that the recording goes for as long as you keep the CTRL button PRESSED and saves when you RELEASE.")
else:
time.sleep(0.01)
def normalize_loudness(self, audio):
loudness = self.meter.integrated_loudness(audio)
loud_normed = pyloudnorm.normalize.loudness(audio, loudness, -30.0)
peak = np.amax(np.abs(loud_normed))
peak_normed = np.divide(loud_normed, peak)
return peak_normed
def cut_silence_from_begin_and_end(self, audio):
silence = torch.zeros([20000])
no_silence_front = self.vad(torch.cat((silence, torch.Tensor(audio), silence), 0))
reversed_audio = torch.flip(no_silence_front, (0,))
no_silence_back = self.vad(torch.Tensor(reversed_audio))
unreversed_audio = torch.flip(no_silence_back, (0,))
return trim_zeros(unreversed_audio.detach().numpy())
def apply_signal_processing(self, audio):
audio = self.normalize_loudness(audio)
return self.cut_silence_from_begin_and_end(audio)
def phonemize(text):
phones = phonemizer.phonemize(text,
language_switch='remove-flags',
backend="espeak",
language=parameters.phonemizer_language,
preserve_punctuation=True,
strip=True,
punctuation_marks=';:,.!?¡¿—…"«»“”~/',
with_stress=True).replace(";", ",").replace("/", " ") \
.replace(":", ",").replace('"', ",").replace("-", ",").replace("-", ",").replace("\n", " ") \
.replace("\t", " ").replace("¡", "").replace("¿", "").replace(",", "~")
phones = re.sub("~+", "~", phones)
return phones
| [
"numpy.abs",
"pyloudnorm.normalize.loudness",
"PySimpleGUI.theme",
"os.path.exists",
"phonemizer.phonemize",
"torch.Tensor",
"PySimpleGUI.Window",
"torch.zeros",
"re.sub",
"numpy.divide",
"pynput.keyboard.Listener",
"pyloudnorm.Meter",
"time.sleep",
"numpy.hstack",
"torchaudio.transforms... | [((11101, 11126), 're.sub', 're.sub', (['"""~+"""', '"""~"""', 'phones'], {}), "('~+', '~', phones)\n", (11107, 11126), False, 'import re\n'), ((622, 682), 'torchaudio.transforms.Vad', 'VoiceActivityDetection', ([], {'sample_rate': 'parameters.sampling_rate'}), '(sample_rate=parameters.sampling_rate)\n', (644, 682), True, 'from torchaudio.transforms import Vad as VoiceActivityDetection\n'), ((704, 746), 'pyloudnorm.Meter', 'pyloudnorm.Meter', (['parameters.sampling_rate'], {}), '(parameters.sampling_rate)\n', (720, 746), False, 'import pyloudnorm\n'), ((1032, 1061), 'multiprocessing.Process', 'Process', ([], {'target': 'self.recorder'}), '(target=self.recorder)\n', (1039, 1061), False, 'from multiprocessing import Process\n'), ((3143, 3222), 'pynput.keyboard.Listener', 'keyboard.Listener', ([], {'on_press': 'self.handle_key_down', 'on_release': 'self.handle_key_up'}), '(on_press=self.handle_key_down, on_release=self.handle_key_up)\n', (3160, 3222), False, 'from pynput import keyboard\n'), ((3257, 3279), 'PySimpleGUI.theme', 'sg.theme', (['"""DarkGreen2"""'], {}), "('DarkGreen2')\n", (3265, 3279), True, 'import PySimpleGUI as sg\n'), ((4284, 4319), 'PySimpleGUI.Window', 'sg.Window', (['self.corpus_name', 'layout'], {}), '(self.corpus_name, layout)\n', (4293, 4319), True, 'import PySimpleGUI as sg\n'), ((6721, 6734), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6731, 6734), False, 'import time\n'), ((8001, 8018), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (8016, 8018), False, 'import pyaudio\n'), ((9621, 9674), 'pyloudnorm.normalize.loudness', 'pyloudnorm.normalize.loudness', (['audio', 'loudness', '(-30.0)'], {}), '(audio, loudness, -30.0)\n', (9650, 9674), False, 'import pyloudnorm\n'), ((9741, 9769), 'numpy.divide', 'np.divide', (['loud_normed', 'peak'], {}), '(loud_normed, peak)\n', (9750, 9769), True, 'import numpy as np\n'), ((9869, 9889), 'torch.zeros', 'torch.zeros', (['[20000]'], {}), '([20000])\n', (9880, 9889), False, 'import torch\n'), ((10006, 10040), 'torch.flip', 'torch.flip', (['no_silence_front', '(0,)'], {}), '(no_silence_front, (0,))\n', (10016, 10040), False, 'import torch\n'), ((10133, 10166), 'torch.flip', 'torch.flip', (['no_silence_back', '(0,)'], {}), '(no_silence_back, (0,))\n', (10143, 10166), False, 'import torch\n'), ((1543, 1575), 'os.path.exists', 'os.path.exists', (['self.lookup_path'], {}), '(self.lookup_path)\n', (1557, 1575), False, 'import os\n'), ((9698, 9717), 'numpy.abs', 'np.abs', (['loud_normed'], {}), '(loud_normed)\n', (9704, 9717), True, 'import numpy as np\n'), ((10076, 10104), 'torch.Tensor', 'torch.Tensor', (['reversed_audio'], {}), '(reversed_audio)\n', (10088, 10104), False, 'import torch\n'), ((575, 584), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (582, 584), False, 'from multiprocessing import Manager\n'), ((929, 938), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (936, 938), False, 'from multiprocessing import Manager\n'), ((988, 997), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (995, 997), False, 'from multiprocessing import Manager\n'), ((1151, 1160), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1158, 1160), False, 'from multiprocessing import Manager\n'), ((3299, 3410), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': '"""Any 20"""', 'size': '(2000, 1)', 'pad': '((0, 0), (350, 0))', 'justification': '"""center"""', 'key': '"""sentence1"""'}), "('', font='Any 20', size=(2000, 1), pad=((0, 0), (350, 0)),\n justification='center', key='sentence1')\n", (3306, 3410), True, 'import PySimpleGUI as sg\n'), ((3430, 3530), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': '"""Any 20"""', 'size': '(2000, 1)', 'pad': '(0, 0)', 'justification': '"""center"""', 'key': '"""sentence2"""'}), "('', font='Any 20', size=(2000, 1), pad=(0, 0), justification=\n 'center', key='sentence2')\n", (3437, 3530), True, 'import PySimpleGUI as sg\n'), ((3549, 3659), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': '"""Any 20"""', 'size': '(2000, 1)', 'pad': '((0, 0), (0, 30))', 'justification': '"""center"""', 'key': '"""sentence3"""'}), "('', font='Any 20', size=(2000, 1), pad=((0, 0), (0, 30)),\n justification='center', key='sentence3')\n", (3556, 3659), True, 'import PySimpleGUI as sg\n'), ((3679, 3779), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': '"""Any 18"""', 'size': '(2000, 1)', 'pad': '(0, 0)', 'justification': '"""center"""', 'key': '"""phonemes1"""'}), "('', font='Any 18', size=(2000, 1), pad=(0, 0), justification=\n 'center', key='phonemes1')\n", (3686, 3779), True, 'import PySimpleGUI as sg\n'), ((3798, 3898), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': '"""Any 18"""', 'size': '(2000, 1)', 'pad': '(0, 0)', 'justification': '"""center"""', 'key': '"""phonemes2"""'}), "('', font='Any 18', size=(2000, 1), pad=(0, 0), justification=\n 'center', key='phonemes2')\n", (3805, 3898), True, 'import PySimpleGUI as sg\n'), ((3917, 4017), 'PySimpleGUI.Text', 'sg.Text', (['""""""'], {'font': '"""Any 18"""', 'size': '(2000, 1)', 'pad': '(0, 0)', 'justification': '"""center"""', 'key': '"""phonemes3"""'}), "('', font='Any 18', size=(2000, 1), pad=(0, 0), justification=\n 'center', key='phonemes3')\n", (3924, 4017), True, 'import PySimpleGUI as sg\n'), ((4036, 4221), 'PySimpleGUI.Text', 'sg.Text', (['"""Left CTRL-Key for push-to-talk, ESC-Key to exit, ALT-Key to redo the last prompt"""'], {'font': '"""Any 10"""', 'size': '(2000, 1)', 'pad': '((0, 0), (300, 0))', 'justification': '"""center"""'}), "(\n 'Left CTRL-Key for push-to-talk, ESC-Key to exit, ALT-Key to redo the last prompt'\n , font='Any 10', size=(2000, 1), pad=((0, 0), (300, 0)), justification=\n 'center')\n", (4043, 4221), True, 'import PySimpleGUI as sg\n'), ((8825, 8842), 'numpy.hstack', 'np.hstack', (['frames'], {}), '(frames)\n', (8834, 8842), True, 'import numpy as np\n'), ((9483, 9499), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (9493, 9499), False, 'import time\n'), ((1485, 1494), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1492, 1494), False, 'from multiprocessing import Manager\n'), ((9946, 9965), 'torch.Tensor', 'torch.Tensor', (['audio'], {}), '(audio)\n', (9958, 9965), False, 'import torch\n'), ((10416, 10637), 'phonemizer.phonemize', 'phonemizer.phonemize', (['text'], {'language_switch': '"""remove-flags"""', 'backend': '"""espeak"""', 'language': 'parameters.phonemizer_language', 'preserve_punctuation': '(True)', 'strip': '(True)', 'punctuation_marks': '""";:,.!?¡¿—…"«»“”~/"""', 'with_stress': '(True)'}), '(text, language_switch=\'remove-flags\', backend=\'espeak\',\n language=parameters.phonemizer_language, preserve_punctuation=True,\n strip=True, punctuation_marks=\';:,.!?¡¿—…"«»“”~/\', with_stress=True)\n', (10436, 10637), False, 'import phonemizer\n')] |
import numpy as np
from ramp_to_hill.hill_system import *
from ramp_systems.ramp_system import RampSystem
import DSGRN
def test_num_unique_vectors():
vectors = [np.array([1,2,3]),np.array([1,1,1]),np.array([1,2,3])]
assert(num_unique_vectors(vectors) == 2)
vectors = [np.array([1,1,1]),np.array([1,2,3]),np.array([1,2,3])]
assert(num_unique_vectors(vectors) == 2)
def test_find_equilibria():
net = DSGRN.Network("X0:~X1 \n X1:~X0")
L = [[0,1],[1,0]]
Delta = [[0,1],[1,0]]
theta = [[0,1.5],[1.5,0]]
gamma = [1,1]
RS = RampSystem(net,L,Delta,theta,gamma)
n = make_hill_coefficient_array(net,50)
sign = make_sign_from_network(net)
HS = HillSystemParameter(net,sign,L,Delta,theta,n,gamma)
## test finding a single equilibrium
eq = find_equilibrium([1,2],HS,100,tol = 1e-3)
assert(eq.shape == (2,))
#large hill coefficient so hill equilibrium should be close to DSGRN equilibrium
assert(np.allclose(eq,np.array([1,2]),atol = 1e-1))
## test finding equilibria from DSGRN fixed points.
FPs = [(0,1),(1,0)]
eq = find_hill_equilibria_from_FPs(FPs,HS,RS,100,tol = 1e-3)
assert(len(eq) == 2)
assert(np.allclose(eq[0],np.array([1,2]),atol = 1e-1))
assert(np.allclose(eq[1],np.array([2,1]),atol = 1e-1))
| [
"numpy.array",
"DSGRN.Network",
"ramp_systems.ramp_system.RampSystem"
] | [((420, 453), 'DSGRN.Network', 'DSGRN.Network', (['"""X0:~X1 \n X1:~X0"""'], {}), "('X0:~X1 \\n X1:~X0')\n", (433, 453), False, 'import DSGRN\n'), ((559, 598), 'ramp_systems.ramp_system.RampSystem', 'RampSystem', (['net', 'L', 'Delta', 'theta', 'gamma'], {}), '(net, L, Delta, theta, gamma)\n', (569, 598), False, 'from ramp_systems.ramp_system import RampSystem\n'), ((166, 185), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (174, 185), True, 'import numpy as np\n'), ((184, 203), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (192, 203), True, 'import numpy as np\n'), ((202, 221), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (210, 221), True, 'import numpy as np\n'), ((281, 300), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (289, 300), True, 'import numpy as np\n'), ((299, 318), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (307, 318), True, 'import numpy as np\n'), ((317, 336), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (325, 336), True, 'import numpy as np\n'), ((971, 987), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (979, 987), True, 'import numpy as np\n'), ((1201, 1217), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1209, 1217), True, 'import numpy as np\n'), ((1260, 1276), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (1268, 1276), True, 'import numpy as np\n')] |
# MegFlow is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2019-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#!/usr/bin/env python
# coding=utf-8
from loguru import logger
import onnxruntime
import cv2
import numpy as np
def load_onnx_model(onnx_path):
onnx_session = onnxruntime.InferenceSession(onnx_path)
return onnx_session
def get_output_name(onnx_session):
output_name = []
for node in onnx_session.get_outputs():
output_name.append(node.name)
return output_name
def transform(image, target_shape=(960, 960)):
image_height, image_width, _ = image.shape
ratio_h = target_shape[1] * 1.0 / image_height
ratio_w = target_shape[0] * 1.0 / image_width
image = cv2.resize(image, target_shape)
return image, ratio_h, ratio_w
def is_overlap_v1(rect1, rect2, iou_threshold):
xx1 = max(rect1[0], rect2[0])
yy1 = max(rect1[1], rect2[1])
xx2 = min(rect1[2], rect2[2])
yy2 = min(rect1[3], rect2[3])
dx = max(0, xx2 - xx1 + 1)
dy = max(0, yy2 - yy1 + 1)
i = dx * dy
u = (rect1[2] - rect1[0] + 1) * (rect1[3] - rect1[1] + 1) + (
rect2[2] - rect2[0] + 1) * (rect2[3] - rect2[1] + 1) - i
ov = i / u
return ov >= iou_threshold
def raw_nms(boxes, iou_threshold=0.3):
if 0 == len(boxes):
return []
rects = list(boxes)
for i in range(len(rects)):
rects[i] = list(rects[i])
rects[i].append(i)
rects.sort(key=lambda x: x[4], reverse=True)
rect_valid = [True for i in range(len(rects))]
for i in range(len(rects)):
if rect_valid[i]:
j = i + 1
while j < len(rect_valid):
if is_overlap_v1(rects[i], rects[j], iou_threshold):
rect_valid[j] = False
j = j + 1
return [x[5] for i, x in enumerate(rects) if rect_valid[i]]
def onnx_inference(onnx_session, num_classes, image, topk_candidates=1000):
output_name = get_output_name(onnx_session)
image, ratio_h, ratio_w = transform(image)
image = image.astype(np.float32)
image = np.expand_dims(image.transpose((2, 0, 1)), 0)
scores, boxes = onnx_session.run(output_name, input_feed={"input": image})
keep = scores.max(axis=1) > 0.1
scores = scores[keep]
boxes = boxes[keep]
scores = scores.flatten()
# Keep top k top scoring indices only.
num_topk = min(topk_candidates, len(boxes))
# torch.sort is actually faster than .topk (at least on GPUs)
topk_idxs = np.argsort(scores)
scores = scores[topk_idxs][-num_topk:]
topk_idxs = topk_idxs[-num_topk:]
# filter out the proposals with low confidence score
shift_idxs = topk_idxs // num_classes
classes = topk_idxs % num_classes
boxes = boxes[shift_idxs]
boxes[:, 0] /= ratio_w
boxes[:, 1] /= ratio_h
boxes[:, 2] /= ratio_w
boxes[:, 3] /= ratio_h
return boxes, scores, classes
def run(onnx_session, image, class_names, score_thrs, nms_thr=0.6):
num_classes = len(class_names)
import time
t0 = time.time()
boxes, scores, cls_idxs = onnx_inference(onnx_session, num_classes, image)
cost = time.time() - t0
logger.info(f'memd inference: {cost}s')
assert len(boxes) == len(scores) and len(boxes) == len(cls_idxs)
if isinstance(score_thrs, float):
keep = scores > max(score_thrs, 0.2)
else:
score_thrs = np.asarray(score_thrs)
keep = scores > np.maximum(score_thrs[cls_idxs], 0.2)
pred_boxes = np.concatenate(
[boxes, scores[:, np.newaxis], cls_idxs[:, np.newaxis]], axis=1)
pred_boxes = pred_boxes[keep]
all_boxes = []
for cls_idx in range(len(class_names)):
keep_per_cls = pred_boxes[:, -1] == cls_idx
if keep_per_cls.sum() > 0:
pred_boxes_per_cls = pred_boxes[keep_per_cls].astype(np.float32)
keep_idx = raw_nms(pred_boxes_per_cls[:, :5], nms_thr)
for idx in keep_idx:
all_boxes.append(pred_boxes_per_cls[idx])
return all_boxes
| [
"numpy.maximum",
"numpy.concatenate",
"numpy.asarray",
"time.time",
"onnxruntime.InferenceSession",
"numpy.argsort",
"loguru.logger.info",
"cv2.resize"
] | [((518, 557), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['onnx_path'], {}), '(onnx_path)\n', (546, 557), False, 'import onnxruntime\n'), ((954, 985), 'cv2.resize', 'cv2.resize', (['image', 'target_shape'], {}), '(image, target_shape)\n', (964, 985), False, 'import cv2\n'), ((2726, 2744), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (2736, 2744), True, 'import numpy as np\n'), ((3269, 3280), 'time.time', 'time.time', ([], {}), '()\n', (3278, 3280), False, 'import time\n'), ((3392, 3431), 'loguru.logger.info', 'logger.info', (['f"""memd inference: {cost}s"""'], {}), "(f'memd inference: {cost}s')\n", (3403, 3431), False, 'from loguru import logger\n'), ((3720, 3799), 'numpy.concatenate', 'np.concatenate', (['[boxes, scores[:, np.newaxis], cls_idxs[:, np.newaxis]]'], {'axis': '(1)'}), '([boxes, scores[:, np.newaxis], cls_idxs[:, np.newaxis]], axis=1)\n', (3734, 3799), True, 'import numpy as np\n'), ((3371, 3382), 'time.time', 'time.time', ([], {}), '()\n', (3380, 3382), False, 'import time\n'), ((3617, 3639), 'numpy.asarray', 'np.asarray', (['score_thrs'], {}), '(score_thrs)\n', (3627, 3639), True, 'import numpy as np\n'), ((3664, 3701), 'numpy.maximum', 'np.maximum', (['score_thrs[cls_idxs]', '(0.2)'], {}), '(score_thrs[cls_idxs], 0.2)\n', (3674, 3701), True, 'import numpy as np\n')] |
# importing the libraries
import numpy as np
import string
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
def process_sentence(sentence) :
"""
Parameters
----------
sentence : a string of words
Returns
-------
clean_sentence : a string of words without having the unessasry words
"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
sentence = re.sub(r'\$\w*', '', sentence)
# remove old style retweet text "RT"
sentence = re.sub(r'^RT[\s]+', '', sentence)
# remove hyperlinks
sentence = re.sub(r'https?:\/\/.*[\r\n]*', '', sentence)
# remove hashtags
# only removing the hash # sign from the word
sentence = re.sub(r'#', '', sentence)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,reduce_len=True)
sentence_tokens = tokenizer.tokenize(sentence)
clean_sentence = []
for word in sentence_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
stem_word = stemmer.stem(word) # stemming word
clean_sentence.append(stem_word)
return clean_sentence
def build_freqs(tweets, ys):
"""Build frequencies.
Input:
tweets: a list of sentences
ys: an m x 1 array with the sentiment label of each sentence
(either 0 or 1)
Output:
freqs: a dictionary mapping each (word, sentiment) pair to its
frequency
"""
# Convert np array to list since zip needs an iterable.
# The squeeze is necessary or the list ends up with one element.
# Also note that this is just a NOP if ys is already a list.
yslist = np.squeeze(ys).tolist()
# Start with an empty dictionary and populate it by looping over all tweets
# and over all processed words in each tweet.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_sentence(tweet):
pair = (word, y)
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs | [
"nltk.stem.PorterStemmer",
"nltk.tokenize.TweetTokenizer",
"nltk.corpus.stopwords.words",
"numpy.squeeze",
"re.sub"
] | [((442, 457), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (455, 457), False, 'from nltk.stem import PorterStemmer\n'), ((483, 509), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (498, 509), False, 'from nltk.corpus import stopwords\n'), ((570, 601), 're.sub', 're.sub', (['"""\\\\$\\\\w*"""', '""""""', 'sentence'], {}), "('\\\\$\\\\w*', '', sentence)\n", (576, 601), False, 'import re\n'), ((659, 692), 're.sub', 're.sub', (['"""^RT[\\\\s]+"""', '""""""', 'sentence'], {}), "('^RT[\\\\s]+', '', sentence)\n", (665, 692), False, 'import re\n'), ((734, 782), 're.sub', 're.sub', (['"""https?:\\\\/\\\\/.*[\\\\r\\\\n]*"""', '""""""', 'sentence'], {}), "('https?:\\\\/\\\\/.*[\\\\r\\\\n]*', '', sentence)\n", (740, 782), False, 'import re\n'), ((870, 895), 're.sub', 're.sub', (['"""#"""', '""""""', 'sentence'], {}), "('#', '', sentence)\n", (876, 895), False, 'import re\n'), ((937, 1009), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {'preserve_case': '(False)', 'strip_handles': '(True)', 'reduce_len': '(True)'}), '(preserve_case=False, strip_handles=True, reduce_len=True)\n', (951, 1009), False, 'from nltk.tokenize import TweetTokenizer\n'), ((1956, 1970), 'numpy.squeeze', 'np.squeeze', (['ys'], {}), '(ys)\n', (1966, 1970), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Carry out Z-Score based single sample DE analysis."""
from typing import List
import numpy as np
import pandas as pd
def do_z_score(
data: pd.DataFrame,
design: pd.DataFrame,
control: str = 'Control',
threshold: float = 2.0,
) -> pd.DataFrame:
"""Carry out Z-Score based single sample DE analysis.
:param data: Dataframe containing the gene expression values
:param design: Dataframe containing the design table for the data
:param control: label used for representing the control in the design table of the data
:param threshold: Threshold for choosing patients that are "extreme" w.r.t. the controls.
:return: Dataframe containing the Single Sample scores using Z_Scores
"""
# Check if the control variable is as per the R Naming standards
assert control[0].isalpha(), "Please pass the control indicator contains atleast 1 alphabet."
# Transpose matrix to get the patients as the rows
data = data.transpose()
# Give each label an integer to represent the labels during classification
label_mapping = {
key: val
for val, key in enumerate(np.unique(design['Target']))
}
# Make sure the number of rows of transposed data and design are equal
assert len(data) == len(design)
# Extract the controls from the dataset
controls = data[list(design.Target == control)]
# Calculate the "Z Score" of each individual patient
mean = controls.mean(axis=0)
std = controls.std(axis=0)
z_scores = (data - mean) / std
out_z_scores = z_scores.copy()
# Values that are greater than the 2 sigma or lesser than negative 2 sigma are considered as extremes
out_z_scores[z_scores > threshold] = 1
out_z_scores[z_scores < -threshold] = -1
# Values between upper and lower limit are assigned 0
out_z_scores[(z_scores < threshold) & (z_scores > -threshold)] = 0
df = pd.DataFrame(data=out_z_scores, index=data.index, columns=data.columns)
label = design['Target'].map(label_mapping)
label.reset_index(drop=True, inplace=True)
output_df = df.apply(_bin).copy()
output_df['label'] = label.values
return output_df
def _bin(row: pd.Series) -> List[int]:
"""Replace values greater than 0 as 1 and lesser than 0 as -1."""
return [
1 if (val > 0) else (-1 if (val < 0) else 0)
for val in row
]
| [
"pandas.DataFrame",
"numpy.unique"
] | [((1947, 2018), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'out_z_scores', 'index': 'data.index', 'columns': 'data.columns'}), '(data=out_z_scores, index=data.index, columns=data.columns)\n', (1959, 2018), True, 'import pandas as pd\n'), ((1174, 1201), 'numpy.unique', 'np.unique', (["design['Target']"], {}), "(design['Target'])\n", (1183, 1201), True, 'import numpy as np\n')] |
import glob
import os
import numpy as np
from spydiff import import_difmap_model, clean_difmap
from from_fits import create_clean_image_from_fits_file
from utils import hdi_of_mcmc
import matplotlib
label_size = 16
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['xtick.labelsize'] = label_size
matplotlib.rcParams['ytick.labelsize'] = label_size
matplotlib.rcParams['axes.titlesize'] = label_size
matplotlib.rcParams['axes.labelsize'] = label_size
matplotlib.rcParams['font.size'] = label_size
matplotlib.rcParams['legend.fontsize'] = label_size
# matplotlib.rcParams['text.usetex'] = True
# matplotlib.rcParams['text.latex.unicode'] = True
# matplotlib.rcParams['text.latex.preview'] = True
# matplotlib.rcParams['font.family'] = 'serif'
# matplotlib.rcParams['font.serif'] = 'cm'
import matplotlib.pyplot as plt
base_dir = "/home/ilya/Dropbox/papers/boot/new_pics/corner/new/parametric/1807+698/"
mcmc_samples = os.path.join(base_dir, "samples_of_mcmc.txt")
# (60000, 50)
mcmc = np.loadtxt(mcmc_samples)
mcmc = mcmc[::10, :]
booted_mdl_paths = glob.glob(os.path.join(base_dir, "mdl_booted_*"))
boot_samples = list()
for booted_mdl in booted_mdl_paths:
comps = import_difmap_model(booted_mdl)
comps = sorted(comps, key=lambda x: np.hypot(x.p[1], x.p[2]))
params = list()
for comp in comps:
params.extend(list(comp.p))
boot_samples.append(params)
boot = np.atleast_2d(boot_samples)
cred_mass = 0.68
count = 0
param_n = 2
ratios = list()
distances = list()
fluxes = list()
boot_stds = list()
mcmc_stds = list()
comps = import_difmap_model(os.path.join(base_dir, "new2.mdl"))
comps = sorted(comps, key=lambda x: np.hypot(x.p[1], x.p[2]))
length = sum([comp.size for comp in comps])
for j, comp in enumerate(comps):
hdi_min, hdi_max = hdi_of_mcmc(boot[:, count + param_n], cred_mass=cred_mass)
boot_std = hdi_max - hdi_min
hdi_min, hdi_max = hdi_of_mcmc(mcmc[:, count + param_n], cred_mass=cred_mass)
mcmc_std = hdi_max - hdi_min
# boot_std = np.std(boot[:, count + param_n])
# mcmc_std = np.std(mcmc[:, count + param_n])
count += len(comp)
ratios.append(boot_std/mcmc_std)
boot_stds.append(boot_std)
mcmc_stds.append(mcmc_std)
distances.append(np.hypot(comp.p[1], comp.p[2]))
fluxes.append(comp.p[0])
# boot_stds = np.hypot(boot_std_1, boot_std_2)
# mcmc_stds = np.hypot(mcmc_std_1, mcmc_std_2)
# position_ratios = np.array(boot_stds)/np.array(mcmc_stds)
# np.savetxt(os.path.join(base_dir, "position_ratios.txt"), position_ratios)
# np.savetxt(os.path.join(base_dir, "flux_ratios.txt"), ratios)
# np.savetxt(os.path.join(base_dir, "size_ratios.txt"), ratios)
# np.savetxt(os.path.join(base_dir, "distances.txt"), distances)
# np.savetxt(os.path.join(base_dir, "fluxes.txt"), fluxes)
position_ratios = np.loadtxt(os.path.join(base_dir, "position_ratios.txt"))
flux_ratios = np.loadtxt(os.path.join(base_dir, "flux_ratios.txt"))
size_ratios = np.loadtxt(os.path.join(base_dir, "size_ratios.txt"))
distances = np.loadtxt(os.path.join(base_dir, "distances.txt"))
fluxes = np.loadtxt(os.path.join(base_dir, "fluxes.txt"))
# Get beam
uv_fits = "1807+698.u.2007_07_03.uvf"
path_to_script = '/home/ilya/github/vlbi_errors/difmap/final_clean_nw'
# clean_difmap(uv_fits, "cc.fits", "I", (1024, 0.1), path=base_dir,
# path_to_script=path_to_script, outpath=base_dir)
ccimage = create_clean_image_from_fits_file(os.path.join(base_dir, "cc.fits"))
beam = ccimage.beam
beam = np.sqrt(beam[0]*beam[1])
# First row [:, 0] - for distance dependence
# Second row [:, 1] - for flux dependence
# fig, axes = plt.subplots(3, 2, sharex=True)
fig = plt.figure()
axes00 = fig.add_subplot(3, 2, 1)
axes01 = fig.add_subplot(3, 2, 2, sharey=axes00)
axes10 = fig.add_subplot(3, 2, 3, sharex=axes00)
axes11 = fig.add_subplot(3, 2, 4, sharey=axes10, sharex=axes01)
axes20 = fig.add_subplot(3, 2, 5, sharex=axes00)
axes21 = fig.add_subplot(3, 2, 6, sharex=axes01, sharey=axes20)
axes00.plot(np.array(distances)/beam, flux_ratios, 'o')
# axes.legend(loc='upper right')
# axes.set_xlabel("Distance from phase center, [beam widths]")
axes00.set_ylabel(r"$\sigma_{boot}^{flux}$ / $\sigma_{mcmc}^{flux}$", size=20)
# fig.savefig(os.path.join(base_dir, "flux_std_ratio_vs_distance.pdf"), format="pdf",
# bbox_inches='tight', dpi=600)
# fig, axes = plt.subplots()
axes01.plot(np.array(fluxes), flux_ratios, 'o')
# axes.legend(loc='upper right')
# axes.set_xlabel("Flux of component, [Jy]")
# axes.set_ylabel(r"$\sigma_{boot}^{flux}$ / $\sigma_{mcmc}^{flux}$", size=20)
# fig.savefig(os.path.join(base_dir, "flux_std_ratio_vs_flux.pdf"), format="pdf",
# bbox_inches='tight', dpi=600)
axes10.plot(np.array(distances)/beam, position_ratios, 'o')
axes10.set_ylabel(r"$\sigma_{boot}^{position}$ / $\sigma_{mcmc}^{position}$", size=20)
axes11.plot(np.array(fluxes), position_ratios, 'o')
axes20.plot(np.array(distances)/beam, size_ratios, 'o')
axes20.set_ylabel(r"$\sigma_{boot}^{size}$ / $\sigma_{mcmc}^{size}$", size=20)
axes20.set_xlabel("Distance from phase center, [beam widths]")
axes21.plot(np.array(fluxes), size_ratios, 'o')
axes21.set_xlabel("Flux of component, [Jy]")
fig.subplots_adjust(hspace=0)
# plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
# fig.subplots_adjust(wspace=0)
fig.savefig(os.path.join(base_dir, "boot_to_mcmc.pdf"), format="pdf",
bbox_inches='tight', dpi=600) | [
"numpy.atleast_2d",
"utils.hdi_of_mcmc",
"spydiff.import_difmap_model",
"numpy.hypot",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.loadtxt",
"os.path.join",
"numpy.sqrt"
] | [((971, 1016), 'os.path.join', 'os.path.join', (['base_dir', '"""samples_of_mcmc.txt"""'], {}), "(base_dir, 'samples_of_mcmc.txt')\n", (983, 1016), False, 'import os\n'), ((1038, 1062), 'numpy.loadtxt', 'np.loadtxt', (['mcmc_samples'], {}), '(mcmc_samples)\n', (1048, 1062), True, 'import numpy as np\n'), ((1441, 1468), 'numpy.atleast_2d', 'np.atleast_2d', (['boot_samples'], {}), '(boot_samples)\n', (1454, 1468), True, 'import numpy as np\n'), ((3514, 3540), 'numpy.sqrt', 'np.sqrt', (['(beam[0] * beam[1])'], {}), '(beam[0] * beam[1])\n', (3521, 3540), True, 'import numpy as np\n'), ((3679, 3691), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3689, 3691), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1152), 'os.path.join', 'os.path.join', (['base_dir', '"""mdl_booted_*"""'], {}), "(base_dir, 'mdl_booted_*')\n", (1126, 1152), False, 'import os\n'), ((1224, 1255), 'spydiff.import_difmap_model', 'import_difmap_model', (['booted_mdl'], {}), '(booted_mdl)\n', (1243, 1255), False, 'from spydiff import import_difmap_model, clean_difmap\n'), ((1626, 1660), 'os.path.join', 'os.path.join', (['base_dir', '"""new2.mdl"""'], {}), "(base_dir, 'new2.mdl')\n", (1638, 1660), False, 'import os\n'), ((1824, 1882), 'utils.hdi_of_mcmc', 'hdi_of_mcmc', (['boot[:, count + param_n]'], {'cred_mass': 'cred_mass'}), '(boot[:, count + param_n], cred_mass=cred_mass)\n', (1835, 1882), False, 'from utils import hdi_of_mcmc\n'), ((1939, 1997), 'utils.hdi_of_mcmc', 'hdi_of_mcmc', (['mcmc[:, count + param_n]'], {'cred_mass': 'cred_mass'}), '(mcmc[:, count + param_n], cred_mass=cred_mass)\n', (1950, 1997), False, 'from utils import hdi_of_mcmc\n'), ((2851, 2896), 'os.path.join', 'os.path.join', (['base_dir', '"""position_ratios.txt"""'], {}), "(base_dir, 'position_ratios.txt')\n", (2863, 2896), False, 'import os\n'), ((2923, 2964), 'os.path.join', 'os.path.join', (['base_dir', '"""flux_ratios.txt"""'], {}), "(base_dir, 'flux_ratios.txt')\n", (2935, 2964), False, 'import os\n'), ((2991, 3032), 'os.path.join', 'os.path.join', (['base_dir', '"""size_ratios.txt"""'], {}), "(base_dir, 'size_ratios.txt')\n", (3003, 3032), False, 'import os\n'), ((3057, 3096), 'os.path.join', 'os.path.join', (['base_dir', '"""distances.txt"""'], {}), "(base_dir, 'distances.txt')\n", (3069, 3096), False, 'import os\n'), ((3118, 3154), 'os.path.join', 'os.path.join', (['base_dir', '"""fluxes.txt"""'], {}), "(base_dir, 'fluxes.txt')\n", (3130, 3154), False, 'import os\n'), ((3452, 3485), 'os.path.join', 'os.path.join', (['base_dir', '"""cc.fits"""'], {}), "(base_dir, 'cc.fits')\n", (3464, 3485), False, 'import os\n'), ((4406, 4422), 'numpy.array', 'np.array', (['fluxes'], {}), '(fluxes)\n', (4414, 4422), True, 'import numpy as np\n'), ((4886, 4902), 'numpy.array', 'np.array', (['fluxes'], {}), '(fluxes)\n', (4894, 4902), True, 'import numpy as np\n'), ((5137, 5153), 'numpy.array', 'np.array', (['fluxes'], {}), '(fluxes)\n', (5145, 5153), True, 'import numpy as np\n'), ((5364, 5406), 'os.path.join', 'os.path.join', (['base_dir', '"""boot_to_mcmc.pdf"""'], {}), "(base_dir, 'boot_to_mcmc.pdf')\n", (5376, 5406), False, 'import os\n'), ((2274, 2304), 'numpy.hypot', 'np.hypot', (['comp.p[1]', 'comp.p[2]'], {}), '(comp.p[1], comp.p[2])\n', (2282, 2304), True, 'import numpy as np\n'), ((4015, 4034), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (4023, 4034), True, 'import numpy as np\n'), ((4739, 4758), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (4747, 4758), True, 'import numpy as np\n'), ((4939, 4958), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (4947, 4958), True, 'import numpy as np\n'), ((1698, 1722), 'numpy.hypot', 'np.hypot', (['x.p[1]', 'x.p[2]'], {}), '(x.p[1], x.p[2])\n', (1706, 1722), True, 'import numpy as np\n'), ((1296, 1320), 'numpy.hypot', 'np.hypot', (['x.p[1]', 'x.p[2]'], {}), '(x.p[1], x.p[2])\n', (1304, 1320), True, 'import numpy as np\n')] |
import random
import numpy as np
def update_target(model, model_target):
model_target.load_state_dict(model.state_dict())
class ReplayBuffer(object):
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.next_idx = 0
def push(self, state, action, reward, next_state, done):
data = (state, action, reward, next_state, done)
if self.next_idx >= len(self.buffer):
self.buffer.append(data)
else:
self.buffer[self.next_idx] = data
self.next_idx = (self.next_idx + 1) % self.capacity
def __len__(self):
return len(self.buffer)
def encode_sample(self, idxes):
obss, acts, rews, nobss, dones = [], [], [], [], []
for i in idxes:
data = self.buffer[i]
obs, act, rew, nobs, done = data
obss.append(np.array(obs, copy=False))
acts.append(np.array(act, copy=False))
rews.append(rew)
nobss.append(np.array(nobs, copy=False))
dones.append(done)
return np.array(obss), np.array(acts), np.array(rews), np.array(nobss), np.array(dones)
def sample(self, batch_size):
idxes = [random.randint(0, len(self.buffer) - 1) for _ in range(batch_size)]
return self.encode_sample(idxes) | [
"numpy.array"
] | [((922, 936), 'numpy.array', 'np.array', (['obss'], {}), '(obss)\n', (930, 936), True, 'import numpy as np\n'), ((938, 952), 'numpy.array', 'np.array', (['acts'], {}), '(acts)\n', (946, 952), True, 'import numpy as np\n'), ((954, 968), 'numpy.array', 'np.array', (['rews'], {}), '(rews)\n', (962, 968), True, 'import numpy as np\n'), ((970, 985), 'numpy.array', 'np.array', (['nobss'], {}), '(nobss)\n', (978, 985), True, 'import numpy as np\n'), ((987, 1002), 'numpy.array', 'np.array', (['dones'], {}), '(dones)\n', (995, 1002), True, 'import numpy as np\n'), ((758, 783), 'numpy.array', 'np.array', (['obs'], {'copy': '(False)'}), '(obs, copy=False)\n', (766, 783), True, 'import numpy as np\n'), ((800, 825), 'numpy.array', 'np.array', (['act'], {'copy': '(False)'}), '(act, copy=False)\n', (808, 825), True, 'import numpy as np\n'), ((863, 889), 'numpy.array', 'np.array', (['nobs'], {'copy': '(False)'}), '(nobs, copy=False)\n', (871, 889), True, 'import numpy as np\n')] |
import numpy as np
c16 = np.complex128()
f8 = np.float64()
i8 = np.int64()
u8 = np.uint64()
c8 = np.complex64()
f4 = np.float32()
i4 = np.int32()
u4 = np.uint32()
dt = np.datetime64(0, "D")
td = np.timedelta64(0, "D")
b_ = np.bool_()
b = bool()
c = complex()
f = float()
i = int()
AR = np.array([0], dtype=np.float64)
AR.setflags(write=False)
# unary ops
reveal_type(-c16) # E: {complex128}
reveal_type(-c8) # E: {complex64}
reveal_type(-f8) # E: {float64}
reveal_type(-f4) # E: {float32}
reveal_type(-i8) # E: {int64}
reveal_type(-i4) # E: {int32}
reveal_type(-u8) # E: {uint64}
reveal_type(-u4) # E: {uint32}
reveal_type(-td) # E: numpy.timedelta64
reveal_type(-AR) # E: Any
reveal_type(+c16) # E: {complex128}
reveal_type(+c8) # E: {complex64}
reveal_type(+f8) # E: {float64}
reveal_type(+f4) # E: {float32}
reveal_type(+i8) # E: {int64}
reveal_type(+i4) # E: {int32}
reveal_type(+u8) # E: {uint64}
reveal_type(+u4) # E: {uint32}
reveal_type(+td) # E: numpy.timedelta64
reveal_type(+AR) # E: Any
reveal_type(abs(c16)) # E: {float64}
reveal_type(abs(c8)) # E: {float32}
reveal_type(abs(f8)) # E: {float64}
reveal_type(abs(f4)) # E: {float32}
reveal_type(abs(i8)) # E: {int64}
reveal_type(abs(i4)) # E: {int32}
reveal_type(abs(u8)) # E: {uint64}
reveal_type(abs(u4)) # E: {uint32}
reveal_type(abs(td)) # E: numpy.timedelta64
reveal_type(abs(b_)) # E: numpy.bool_
reveal_type(abs(AR)) # E: Any
# Time structures
reveal_type(dt + td) # E: numpy.datetime64
reveal_type(dt + i) # E: numpy.datetime64
reveal_type(dt + i4) # E: numpy.datetime64
reveal_type(dt + i8) # E: numpy.datetime64
reveal_type(dt - dt) # E: numpy.timedelta64
reveal_type(dt - i) # E: numpy.datetime64
reveal_type(dt - i4) # E: numpy.datetime64
reveal_type(dt - i8) # E: numpy.datetime64
reveal_type(td + td) # E: numpy.timedelta64
reveal_type(td + i) # E: numpy.timedelta64
reveal_type(td + i4) # E: numpy.timedelta64
reveal_type(td + i8) # E: numpy.timedelta64
reveal_type(td - td) # E: numpy.timedelta64
reveal_type(td - i) # E: numpy.timedelta64
reveal_type(td - i4) # E: numpy.timedelta64
reveal_type(td - i8) # E: numpy.timedelta64
reveal_type(td / f) # E: numpy.timedelta64
reveal_type(td / f4) # E: numpy.timedelta64
reveal_type(td / f8) # E: numpy.timedelta64
reveal_type(td / td) # E: {float64}
reveal_type(td // td) # E: {int64}
# boolean
reveal_type(b_ / b) # E: {float64}
reveal_type(b_ / b_) # E: {float64}
reveal_type(b_ / i) # E: {float64}
reveal_type(b_ / i8) # E: {float64}
reveal_type(b_ / i4) # E: {float64}
reveal_type(b_ / u8) # E: {float64}
reveal_type(b_ / u4) # E: {float64}
reveal_type(b_ / f) # E: {float64}
reveal_type(b_ / f8) # E: {float64}
reveal_type(b_ / f4) # E: {float32}
reveal_type(b_ / c) # E: {complex128}
reveal_type(b_ / c16) # E: {complex128}
reveal_type(b_ / c8) # E: {complex64}
reveal_type(b / b_) # E: {float64}
reveal_type(b_ / b_) # E: {float64}
reveal_type(i / b_) # E: {float64}
reveal_type(i8 / b_) # E: {float64}
reveal_type(i4 / b_) # E: {float64}
reveal_type(u8 / b_) # E: {float64}
reveal_type(u4 / b_) # E: {float64}
reveal_type(f / b_) # E: {float64}
reveal_type(f8 / b_) # E: {float64}
reveal_type(f4 / b_) # E: {float32}
reveal_type(c / b_) # E: {complex128}
reveal_type(c16 / b_) # E: {complex128}
reveal_type(c8 / b_) # E: {complex64}
# Complex
reveal_type(c16 + c16) # E: {complex128}
reveal_type(c16 + f8) # E: {complex128}
reveal_type(c16 + i8) # E: {complex128}
reveal_type(c16 + c8) # E: {complex128}
reveal_type(c16 + f4) # E: {complex128}
reveal_type(c16 + i4) # E: {complex128}
reveal_type(c16 + b_) # E: {complex128}
reveal_type(c16 + b) # E: {complex128}
reveal_type(c16 + c) # E: {complex128}
reveal_type(c16 + f) # E: {complex128}
reveal_type(c16 + i) # E: {complex128}
reveal_type(c16 + AR) # E: Any
reveal_type(c16 + c16) # E: {complex128}
reveal_type(f8 + c16) # E: {complex128}
reveal_type(i8 + c16) # E: {complex128}
reveal_type(c8 + c16) # E: {complex128}
reveal_type(f4 + c16) # E: {complex128}
reveal_type(i4 + c16) # E: {complex128}
reveal_type(b_ + c16) # E: {complex128}
reveal_type(b + c16) # E: {complex128}
reveal_type(c + c16) # E: {complex128}
reveal_type(f + c16) # E: {complex128}
reveal_type(i + c16) # E: {complex128}
reveal_type(AR + c16) # E: Any
reveal_type(c8 + c16) # E: {complex128}
reveal_type(c8 + f8) # E: {complex128}
reveal_type(c8 + i8) # E: {complex128}
reveal_type(c8 + c8) # E: {complex64}
reveal_type(c8 + f4) # E: {complex64}
reveal_type(c8 + i4) # E: {complex64}
reveal_type(c8 + b_) # E: {complex64}
reveal_type(c8 + b) # E: {complex64}
reveal_type(c8 + c) # E: {complex128}
reveal_type(c8 + f) # E: {complex128}
reveal_type(c8 + i) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}]
reveal_type(c8 + AR) # E: Any
reveal_type(c16 + c8) # E: {complex128}
reveal_type(f8 + c8) # E: {complex128}
reveal_type(i8 + c8) # E: {complex128}
reveal_type(c8 + c8) # E: {complex64}
reveal_type(f4 + c8) # E: {complex64}
reveal_type(i4 + c8) # E: {complex64}
reveal_type(b_ + c8) # E: {complex64}
reveal_type(b + c8) # E: {complex64}
reveal_type(c + c8) # E: {complex128}
reveal_type(f + c8) # E: {complex128}
reveal_type(i + c8) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}]
reveal_type(AR + c8) # E: Any
# Float
reveal_type(f8 + f8) # E: {float64}
reveal_type(f8 + i8) # E: {float64}
reveal_type(f8 + f4) # E: {float64}
reveal_type(f8 + i4) # E: {float64}
reveal_type(f8 + b_) # E: {float64}
reveal_type(f8 + b) # E: {float64}
reveal_type(f8 + c) # E: {complex128}
reveal_type(f8 + f) # E: {float64}
reveal_type(f8 + i) # E: {float64}
reveal_type(f8 + AR) # E: Any
reveal_type(f8 + f8) # E: {float64}
reveal_type(i8 + f8) # E: {float64}
reveal_type(f4 + f8) # E: {float64}
reveal_type(i4 + f8) # E: {float64}
reveal_type(b_ + f8) # E: {float64}
reveal_type(b + f8) # E: {float64}
reveal_type(c + f8) # E: {complex128}
reveal_type(f + f8) # E: {float64}
reveal_type(i + f8) # E: {float64}
reveal_type(AR + f8) # E: Any
reveal_type(f4 + f8) # E: {float64}
reveal_type(f4 + i8) # E: {float64}
reveal_type(f4 + f4) # E: {float32}
reveal_type(f4 + i4) # E: {float32}
reveal_type(f4 + b_) # E: {float32}
reveal_type(f4 + b) # E: {float32}
reveal_type(f4 + c) # E: {complex128}
reveal_type(f4 + f) # E: {float64}
reveal_type(f4 + i) # E: numpy.floating[{_NBitInt}]
reveal_type(f4 + AR) # E: Any
reveal_type(f8 + f4) # E: {float64}
reveal_type(i8 + f4) # E: {float64}
reveal_type(f4 + f4) # E: {float32}
reveal_type(i4 + f4) # E: {float32}
reveal_type(b_ + f4) # E: {float32}
reveal_type(b + f4) # E: {float32}
reveal_type(c + f4) # E: {complex128}
reveal_type(f + f4) # E: {float64}
reveal_type(i + f4) # E: numpy.floating[{_NBitInt}]
reveal_type(AR + f4) # E: Any
# Int
reveal_type(i8 + i8) # E: {int64}
reveal_type(i8 + u8) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(i8 + i4) # E: {int64}
reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(i8 + b_) # E: {int64}
reveal_type(i8 + b) # E: {int64}
reveal_type(i8 + c) # E: {complex128}
reveal_type(i8 + f) # E: {float64}
reveal_type(i8 + i) # E: {int64}
reveal_type(i8 + AR) # E: Any
reveal_type(u8 + u8) # E: {uint64}
reveal_type(u8 + i4) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(u8 + u4) # E: {uint64}
reveal_type(u8 + b_) # E: {uint64}
reveal_type(u8 + b) # E: {uint64}
reveal_type(u8 + c) # E: {complex128}
reveal_type(u8 + f) # E: {float64}
reveal_type(u8 + i) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(u8 + AR) # E: Any
reveal_type(i8 + i8) # E: {int64}
reveal_type(u8 + i8) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(i4 + i8) # E: {int64}
reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(b_ + i8) # E: {int64}
reveal_type(b + i8) # E: {int64}
reveal_type(c + i8) # E: {complex128}
reveal_type(f + i8) # E: {float64}
reveal_type(i + i8) # E: {int64}
reveal_type(AR + i8) # E: Any
reveal_type(u8 + u8) # E: {uint64}
reveal_type(i4 + u8) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(u4 + u8) # E: {uint64}
reveal_type(b_ + u8) # E: {uint64}
reveal_type(b + u8) # E: {uint64}
reveal_type(c + u8) # E: {complex128}
reveal_type(f + u8) # E: {float64}
reveal_type(i + u8) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(AR + u8) # E: Any
reveal_type(i4 + i8) # E: {int64}
reveal_type(i4 + i4) # E: {int32}
reveal_type(i4 + i) # E: {int_}
reveal_type(i4 + b_) # E: {int32}
reveal_type(i4 + b) # E: {int32}
reveal_type(i4 + AR) # E: Any
reveal_type(u4 + i8) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(u4 + i4) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(u4 + u8) # E: {uint64}
reveal_type(u4 + u4) # E: {uint32}
reveal_type(u4 + i) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(u4 + b_) # E: {uint32}
reveal_type(u4 + b) # E: {uint32}
reveal_type(u4 + AR) # E: Any
reveal_type(i8 + i4) # E: {int64}
reveal_type(i4 + i4) # E: {int32}
reveal_type(i + i4) # E: {int_}
reveal_type(b_ + i4) # E: {int32}
reveal_type(b + i4) # E: {int32}
reveal_type(AR + i4) # E: Any
reveal_type(i8 + u4) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(i4 + u4) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(u8 + u4) # E: {uint64}
reveal_type(u4 + u4) # E: {uint32}
reveal_type(b_ + u4) # E: {uint32}
reveal_type(b + u4) # E: {uint32}
reveal_type(i + u4) # E: Union[numpy.signedinteger[Any], {float64}]
reveal_type(AR + u4) # E: Any
| [
"numpy.uint32",
"numpy.bool_",
"numpy.uint64",
"numpy.complex128",
"numpy.datetime64",
"numpy.float32",
"numpy.timedelta64",
"numpy.array",
"numpy.complex64",
"numpy.int64",
"numpy.int32",
"numpy.float64"
] | [((26, 41), 'numpy.complex128', 'np.complex128', ([], {}), '()\n', (39, 41), True, 'import numpy as np\n'), ((47, 59), 'numpy.float64', 'np.float64', ([], {}), '()\n', (57, 59), True, 'import numpy as np\n'), ((65, 75), 'numpy.int64', 'np.int64', ([], {}), '()\n', (73, 75), True, 'import numpy as np\n'), ((81, 92), 'numpy.uint64', 'np.uint64', ([], {}), '()\n', (90, 92), True, 'import numpy as np\n'), ((99, 113), 'numpy.complex64', 'np.complex64', ([], {}), '()\n', (111, 113), True, 'import numpy as np\n'), ((119, 131), 'numpy.float32', 'np.float32', ([], {}), '()\n', (129, 131), True, 'import numpy as np\n'), ((137, 147), 'numpy.int32', 'np.int32', ([], {}), '()\n', (145, 147), True, 'import numpy as np\n'), ((153, 164), 'numpy.uint32', 'np.uint32', ([], {}), '()\n', (162, 164), True, 'import numpy as np\n'), ((171, 192), 'numpy.datetime64', 'np.datetime64', (['(0)', '"""D"""'], {}), "(0, 'D')\n", (184, 192), True, 'import numpy as np\n'), ((198, 220), 'numpy.timedelta64', 'np.timedelta64', (['(0)', '"""D"""'], {}), "(0, 'D')\n", (212, 220), True, 'import numpy as np\n'), ((227, 237), 'numpy.bool_', 'np.bool_', ([], {}), '()\n', (235, 237), True, 'import numpy as np\n'), ((292, 323), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.float64'}), '([0], dtype=np.float64)\n', (300, 323), True, 'import numpy as np\n')] |
import numpy as np
from respy.python.evaluate.evaluate_auxiliary import create_draws_and_prob_wages
from respy.python.evaluate.evaluate_auxiliary import (
simulate_probability_of_agents_observed_choice,
)
from respy.python.shared.shared_auxiliary import get_conditional_probabilities
def pyth_contributions(state_space, data, periods_draws_prob, tau, optim_paras):
"""Calculate the likelihood contribution of each individual in the sample.
The function calculates all likelihood contributions for all observations in the
data which means all individual-period-type combinations. Then, likelihoods are
accumulated within each individual and type over all periods. After that, the result
is multiplied with the type-specific shares which yields the contribution to the
likelihood for each individual.
Parameters
----------
state_space : class
Class of state space.
data : pd.DataFrame
DataFrame with the empirical dataset.
periods_draws_prob : np.ndarray
Array with shape (num_periods, num_draws_prob, num_choices) containing i.i.d.
draws from standard normal distributions.
tau : float
Smoothing parameter for choice probabilities.
optim_paras : dict
Dictionary with quantities that were extracted from the parameter vector.
Returns
-------
contribs : np.ndarray
Array with shape (num_agents,) containing contributions of estimated agents.
"""
if np.count_nonzero(optim_paras["shocks_cholesky"]) == 0:
return np.ones(data.Identifier.unique().shape[0])
# Convert data to np.ndarray. Separate wages from other characteristics as they need
# to be integers.
agents = data[
[
"Period",
"Experience_A",
"Experience_B",
"Years_Schooling",
"Lagged_Choice",
"Choice",
]
].values.astype(int)
wages_observed = data["Wage"].values
# Get the number of observations for each individual and an array with indices of
# each individual's first observation. After that, extract initial education levels
# per agent which are important for type-specific probabilities.
num_obs_per_agent = np.bincount(data.Identifier.values)
idx_agents_first_observation = np.hstack((0, np.cumsum(num_obs_per_agent)[:-1]))
agents_initial_education_levels = agents[idx_agents_first_observation, 3]
# Update type-specific probabilities conditional on whether the initial level of
# education is greater than nine.
type_shares = get_conditional_probabilities(
optim_paras["type_shares"], agents_initial_education_levels
)
# Extract observable components of the state space and agent's decision.
periods, exp_as, exp_bs, edus, choices_lagged, choices = (
agents[:, i] for i in range(6)
)
# Get indices of states in the state space corresponding to all observations for all
# types. The indexer has the shape (num_obs, num_types).
ks = state_space.indexer[periods, exp_as, exp_bs, edus, choices_lagged - 1, :]
# Reshape periods, choices and wages_observed so that they match the shape (num_obs,
# num_types) of the indexer.
periods = state_space.states[ks, 0]
choices = choices.repeat(state_space.num_types).reshape(-1, state_space.num_types)
wages_observed = wages_observed.repeat(state_space.num_types).reshape(
-1, state_space.num_types
)
wages_systematic = state_space.rewards[ks, -2:]
# Adjust the draws to simulate the expected maximum utility and calculate the
# probability of observing the wage.
draws, prob_wages = create_draws_and_prob_wages(
wages_observed,
wages_systematic,
periods,
periods_draws_prob,
choices,
optim_paras["shocks_cholesky"],
)
# Simulate the probability of observing the choice of the individual.
prob_choices = simulate_probability_of_agents_observed_choice(
state_space.rewards[ks, -2:],
state_space.rewards[ks, :4],
state_space.emaxs[ks, :4],
draws,
optim_paras["delta"],
state_space.states[ks, 3] >= state_space.edu_max,
choices - 1,
tau,
)
# Multiply the probability of the agent's choice with the probability of wage and
# average over all draws to get the probability of the observation.
prob_obs = (prob_choices * prob_wages).mean(axis=2)
# Accumulate the likelihood of observations for each individual-type combination
# over all periods.
prob_type = np.multiply.reduceat(prob_obs, idx_agents_first_observation)
# Multiply each individual-type contribution with its type-specific shares and sum
# over types to get the likelihood contribution for each individual.
contribs = (prob_type * type_shares).sum(axis=1)
return contribs
| [
"numpy.count_nonzero",
"respy.python.evaluate.evaluate_auxiliary.simulate_probability_of_agents_observed_choice",
"respy.python.evaluate.evaluate_auxiliary.create_draws_and_prob_wages",
"numpy.multiply.reduceat",
"numpy.cumsum",
"numpy.bincount",
"respy.python.shared.shared_auxiliary.get_conditional_pro... | [((2243, 2278), 'numpy.bincount', 'np.bincount', (['data.Identifier.values'], {}), '(data.Identifier.values)\n', (2254, 2278), True, 'import numpy as np\n'), ((2584, 2678), 'respy.python.shared.shared_auxiliary.get_conditional_probabilities', 'get_conditional_probabilities', (["optim_paras['type_shares']", 'agents_initial_education_levels'], {}), "(optim_paras['type_shares'],\n agents_initial_education_levels)\n", (2613, 2678), False, 'from respy.python.shared.shared_auxiliary import get_conditional_probabilities\n'), ((3674, 3809), 'respy.python.evaluate.evaluate_auxiliary.create_draws_and_prob_wages', 'create_draws_and_prob_wages', (['wages_observed', 'wages_systematic', 'periods', 'periods_draws_prob', 'choices', "optim_paras['shocks_cholesky']"], {}), "(wages_observed, wages_systematic, periods,\n periods_draws_prob, choices, optim_paras['shocks_cholesky'])\n", (3701, 3809), False, 'from respy.python.evaluate.evaluate_auxiliary import create_draws_and_prob_wages\n'), ((3955, 4196), 'respy.python.evaluate.evaluate_auxiliary.simulate_probability_of_agents_observed_choice', 'simulate_probability_of_agents_observed_choice', (['state_space.rewards[ks, -2:]', 'state_space.rewards[ks, :4]', 'state_space.emaxs[ks, :4]', 'draws', "optim_paras['delta']", '(state_space.states[ks, 3] >= state_space.edu_max)', '(choices - 1)', 'tau'], {}), "(state_space.rewards[ks, -2:],\n state_space.rewards[ks, :4], state_space.emaxs[ks, :4], draws,\n optim_paras['delta'], state_space.states[ks, 3] >= state_space.edu_max,\n choices - 1, tau)\n", (4001, 4196), False, 'from respy.python.evaluate.evaluate_auxiliary import simulate_probability_of_agents_observed_choice\n'), ((4597, 4657), 'numpy.multiply.reduceat', 'np.multiply.reduceat', (['prob_obs', 'idx_agents_first_observation'], {}), '(prob_obs, idx_agents_first_observation)\n', (4617, 4657), True, 'import numpy as np\n'), ((1485, 1533), 'numpy.count_nonzero', 'np.count_nonzero', (["optim_paras['shocks_cholesky']"], {}), "(optim_paras['shocks_cholesky'])\n", (1501, 1533), True, 'import numpy as np\n'), ((2328, 2356), 'numpy.cumsum', 'np.cumsum', (['num_obs_per_agent'], {}), '(num_obs_per_agent)\n', (2337, 2356), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
# This file contains functions for generating figures used in exploratory analysis of
# iCGM Sensitivity Analysis results.
#
# This includes a number of different visualization for checking different aspects of the results
# and all of the figures that were previously made for the non-pairwise version of this analysis.
#
# The relevant code for final report figures has
# been moved to icgm_sensitivity_analysis_report_figures_and_tables.py. The code
# in this longer exploratory analysis file is no longer being maintained but
# has been left as is, in case a similar exploratory
# analysis is conducted at some point or these visualization functions are useful for
# adding to a data science viz-tools.
#
# %% REQUIRED LIBRARIES
import os
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import plotly.express as px
import datetime as dt
import itertools
from src.visualization.save_view_fig import save_view_fig
import json
from scipy import stats
import tidepool_data_science_metrics as metrics
from plotly.subplots import make_subplots
import plotly.figure_factory as ff
utc_string = dt.datetime.utcnow().strftime("%Y-%m-%d-%H-%m-%S")
# Calculate MBE and MARD
# (https://github.com/tidepool-org/icgm-sensitivity-analysis/blob/jameno/analysis-tables/src/simulator_functions.py)
def add_error_fields(df):
"""
Parameters
----------
df: dataframe
dataframe to add error fields to (for use in MARD and MBE calculations)
Returns
-------
df: dataframe
dataframe with new error field columns
"""
# default icgm and ysi ranges [40, 400] and [0, 900]
sensor_bg_range = (40, 400)
bg_range = (0, 900)
sensor_min, sensor_max = sensor_bg_range
bg_min, bg_max = bg_range
# calculate the icgm error (difference and percentage)
sensor_bg_values = df["bg_sensor"].values
bg_values = df["bg"].values
icgm_error = sensor_bg_values - bg_values
df["icgmError"] = icgm_error
abs_difference_error = np.abs(icgm_error)
df["absError"] = abs_difference_error
df["absRelDiff"] = 100 * abs_difference_error / bg_values
df["withinMeasRange"] = (sensor_bg_values >= sensor_min) & (
sensor_bg_values <= sensor_max
)
return df
def calc_mbe(df):
"""
Calculate mean bias
Parameters
----------
df: dataframe
dataframe to calculate mean bias error (MBE) from
Returns
-------
mean bias error calculation
"""
# Default icgm and ysi ranges [40, 400] and [0, 900]
df = add_error_fields(df)
return np.mean(df.loc[df["withinMeasRange"], "icgmError"])
def calc_mard(df):
"""
Calculate Mean Absolute Relative Deviation (MARD)
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5375072/
Parameters
----------
df: dataframe
dataframe to calculate mean absolute relative deviation (MARD) from
Returns
-------
mard calculation
"""
df = add_error_fields(df)
abs_relative_difference_in_measurement_range = df.loc[
df["withinMeasRange"], "absRelDiff"
]
return np.mean(abs_relative_difference_in_measurement_range)
# Parse out simulation id
def get_sim_id(patient_characteristics_df, filename):
"""
Parse out simulation ID from the filename and patient characteristics data frame
Parameters
----------
patient_characteristics_df: dataframe
dataframe of patient characteristics
filename: str
filename corresponding to the simulation
Returns
-------
"""
sensor_num = (
filename.split("/")[-1]
.split(".")[2]
.replace("s", "")
.replace("Senor", "Sensor")
)
vp_id = (
patient_characteristics_df["patient_scenario_filename"]
.iloc[0]
.split("/")[-1]
.split(".")[0]
.replace("train_", "")
)
bg_test_condition = filename.split(".")[1]
analysis_type = filename.split(".")[3]
sim_id = (
"vp"
+ str(vp_id)
+ ".bg"
+ ".s"
+ str(sensor_num)
+ "."
+ str(bg_test_condition)
+ "."
+ analysis_type
)
return sim_id
def get_data_old_format(
filename, simulation_df, patient_characteristics_df, sensor_characteristics_df=""
):
"""
Returns a list of data for simulation dataframes that are in the old data format.
Parameters
----------
filename: str
name of file corresponding
simulation_df: dataframe
dataframe of the particular simulation want to return data for
patient_characteristics_df: dataframe
dataframe of patient characteristics
sensor_characteristics_df: dataframe
dataframe of sensor characteristics
Returns
-------
list of data items that will be a row in aggregated summary dataframe
"""
sim_id = get_sim_id(patient_characteristics_df, filename)
virtual_patient_num = "vp" + str(
patient_characteristics_df["patient_scenario_filename"]
.iloc[0]
.split("/")[-1]
.split(".")[0]
.replace("train_", "")
)
sensor_num = (
filename.split("/")[-1]
.split(".")[2]
.replace("s", "")
.replace("Senor", "Sensor")
)
patient_scenario_filename = (
patient_characteristics_df["patient_scenario_filename"].iloc[0].split("/")[-1]
)
age = patient_characteristics_df["age"].iloc[0]
ylw = patient_characteristics_df["ylw"].iloc[0]
cir = simulation_df["cir"].iloc[0]
isf = simulation_df["isf"].iloc[0]
sbr = simulation_df["sbr"].iloc[0]
starting_bg = simulation_df["bg"].iloc[0]
starting_bg_sensor = simulation_df["bg_sensor"].iloc[0]
true_bolus = simulation_df["true_bolus"].iloc[1]
if "IdealSensor" in filename:
initial_bias = np.nan
bias_norm_factor = np.nan
bias_drift_oscillations = np.nan
bias_drift_range_start = np.nan
bias_drift_range_end = np.nan
noise_coefficient = np.nan
mard = np.nan
mbe = np.nan
else:
initial_bias = sensor_characteristics_df["initial_bias"].iloc[0]
bias_norm_factor = sensor_characteristics_df["bias_norm_factor"].iloc[0]
bias_drift_oscillations = sensor_characteristics_df[
"bias_drift_oscillations"
].iloc[0]
bias_drift_range_start = sensor_characteristics_df[
"bias_drift_range_start"
].iloc[0]
bias_drift_range_end = sensor_characteristics_df["bias_drift_range_end"].iloc[0]
noise_coefficient = sensor_characteristics_df["noise_coefficient"].iloc[0]
mard = calc_mard(simulation_df)
mbe = calc_mbe(simulation_df)
delay = np.nan
bias_drift_type = np.nan
bias_type = np.nan
noise_per_sensor = np.nan
noise = np.nan
bias_factor = np.nan
phi_drift = np.nan
drift_multiplier = np.nan
drift_multiplier_start = np.nan
drift_multiplier_end = np.nan
noise_max = np.nan
bg_test_condition = filename.split(".")[1].replace("bg", "")
analysis_type = filename.split(".")[3]
LBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[0]
LBGI_RS = metrics.glucose.lbgi_risk_score(LBGI)
DKAI = metrics.insulin.dka_index(simulation_df["iob"], simulation_df["sbr"].iloc[0])
DKAI_RS = metrics.insulin.dka_risk_score(DKAI)
HBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[1]
BGRI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[2]
percent_lt_54 = metrics.glucose.percent_values_lt_54(bg_array=simulation_df["bg"])
return [
filename,
sim_id,
virtual_patient_num,
sensor_num,
patient_scenario_filename,
age,
ylw,
cir,
isf,
sbr,
starting_bg,
starting_bg_sensor,
true_bolus,
initial_bias,
bias_norm_factor,
bias_drift_oscillations,
bias_drift_range_start,
bias_drift_range_end,
noise_coefficient,
delay,
bias_drift_type,
bias_type,
noise_per_sensor,
noise,
bias_factor,
phi_drift,
drift_multiplier,
drift_multiplier_start,
drift_multiplier_end,
noise_max,
mard,
mbe,
bg_test_condition,
analysis_type,
LBGI,
LBGI_RS,
DKAI,
DKAI_RS,
HBGI,
BGRI,
percent_lt_54,
]
def get_data(
filename, simulation_df, simulation_characteristics_json_data, baseline=False
):
"""
Returns a list of data from the simulation files that are in the new format
Parameters
----------
filename: str
name of file corresponding
simulation_df: dataframe
dataframe of the particular simulation want to return data for
simulation_characteristics_json_data: dataframe
json simulation characteristics data corresponding to that simulaton
baseline: bool
whether this particular file is a baseline file
Returns
-------
list of data items that will be a row in aggregated summary dataframe
"""
sim_id = simulation_characteristics_json_data["sim_id"]
virtual_patient_num = simulation_characteristics_json_data["sim_id"].split(".")[0]
sensor_num = filename.split(".")[2]
patient_scenario_filename = filename.split(".")[0]
age = simulation_characteristics_json_data["controller"]["config"]["age"]
ylw = simulation_characteristics_json_data["controller"]["config"]["ylw"]
cir = simulation_characteristics_json_data["patient"]["config"][
"carb_ratio_schedule"
]["schedule"][0]["setting"]
isf = simulation_characteristics_json_data["patient"]["config"][
"insulin_sensitivity_schedule"
]["schedule"][0]["setting"]
sbr = simulation_characteristics_json_data["patient"]["config"]["basal_schedule"][
"schedule"
][0]["setting"]
starting_bg = simulation_df["bg"].iloc[0]
starting_bg_sensor = simulation_df["bg_sensor"].iloc[0]
true_bolus = simulation_df["true_bolus"].iloc[1]
if baseline:
initial_bias = np.nan
bias_norm_factor = np.nan
bias_drift_oscillations = np.nan
bias_drift_range_start = np.nan
bias_drift_range_end = np.nan
noise_coefficient = np.nan
delay = np.nan
bias_drift_type = np.nan
bias_type = np.nan
noise_per_sensor = np.nan
noise = np.nan
bias_factor = np.nan
phi_drift = np.nan
drift_multiplier = np.nan
drift_multiplier_start = np.nan
drift_multiplier_end = np.nan
noise_max = np.nan
mard = np.nan
mbe = np.nan
else:
initial_bias = simulation_characteristics_json_data["patient"]["sensor"][
"initial_bias"
]
bias_norm_factor = simulation_characteristics_json_data["patient"]["sensor"][
"bias_norm_factor"
]
bias_drift_oscillations = simulation_characteristics_json_data["patient"][
"sensor"
]["bias_drift_oscillations"]
bias_drift_range_start = simulation_characteristics_json_data["patient"][
"sensor"
]["bias_drift_range_start"]
bias_drift_range_end = simulation_characteristics_json_data["patient"][
"sensor"
]["bias_drift_range_end"]
noise_coefficient = simulation_characteristics_json_data["patient"]["sensor"][
"noise_coefficient"
]
delay = simulation_characteristics_json_data["patient"]["sensor"]["delay"]
bias_drift_type = simulation_characteristics_json_data["patient"]["sensor"][
"bias_drift_type"
]
bias_type = simulation_characteristics_json_data["patient"]["sensor"][
"bias_type"
]
noise_per_sensor = simulation_characteristics_json_data["patient"]["sensor"][
"noise_per_sensor"
]
noise = simulation_characteristics_json_data["patient"]["sensor"]["noise"]
bias_factor = simulation_characteristics_json_data["patient"]["sensor"][
"bias_factor"
]
phi_drift = simulation_characteristics_json_data["patient"]["sensor"][
"phi_drift"
]
drift_multiplier = simulation_characteristics_json_data["patient"]["sensor"][
"drift_multiplier"
]
drift_multiplier_start = simulation_characteristics_json_data["patient"][
"sensor"
]["drift_multiplier_start"]
drift_multiplier_end = simulation_characteristics_json_data["patient"][
"sensor"
]["drift_multiplier_end"]
noise_max = simulation_characteristics_json_data["patient"]["sensor"][
"noise_max"
]
mard = calc_mard(simulation_df)
mbe = calc_mbe(simulation_df)
bg_test_condition = filename.split(".")[1].replace("bg", "")
analysis_type = filename.split(".")[3]
LBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[0]
LBGI_RS = metrics.glucose.lbgi_risk_score(LBGI)
DKAI = metrics.insulin.dka_index(simulation_df["iob"], simulation_df["sbr"].iloc[0])
DKAI_RS = metrics.insulin.dka_risk_score(DKAI)
HBGI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[1]
BGRI = metrics.glucose.blood_glucose_risk_index(bg_array=simulation_df["bg"])[2]
percent_lt_54 = metrics.glucose.percent_values_lt_54(bg_array=simulation_df["bg"])
return [
filename,
sim_id,
virtual_patient_num,
sensor_num,
patient_scenario_filename,
age,
ylw,
cir,
isf,
sbr,
starting_bg,
starting_bg_sensor,
true_bolus,
initial_bias,
bias_norm_factor,
bias_drift_oscillations,
bias_drift_range_start,
bias_drift_range_end,
noise_coefficient,
delay,
bias_drift_type,
bias_type,
noise_per_sensor,
noise,
bias_factor,
phi_drift,
drift_multiplier,
drift_multiplier_start,
drift_multiplier_end,
noise_max,
mard,
mbe,
bg_test_condition,
analysis_type,
LBGI,
LBGI_RS,
DKAI,
DKAI_RS,
HBGI,
BGRI,
percent_lt_54,
]
# %% Visualization Functions
# %% FUNCTIONS
# TODO: us mypy and specify the types
utc_string = dt.datetime.utcnow().strftime("%Y-%m-%d-%H-%m-%S")
# TODO: automatically grab the code version to add to the figures generated
code_version = "v0-1-0"
# Adding in some generic methods for tables based on bins
def bin_data(bin_breakpoints):
"""
Parameters
----------
bin_breakpoints: array-like
Array-like containing Interval objects from which to build the IntervalIndex.
Returns
-------
interval index
"""
# the bin_breakpoints are the points that are greater than or equal to
return pd.IntervalIndex.from_breaks(bin_breakpoints, closed="left")
def get_metadata_tables(demographic_df, fig_path):
"""
Parameters
----------
demographic_df: dataframe
dataframe of demographic characteristics (age, ylw) for patient corresponding to simulation
fig_path: str
filepath of where to save the tables
Returns
-------
"""
# %% prepare demographic data for tables
virtual_patient_group = demographic_df.groupby("virtual_patient_num")
demographic_reduced_df = virtual_patient_group[
["age", "ylw", "CIR", "ISF", "SBR"]
].median()
# get replace age and years living with (ylw) < 0 with np.nan
demographic_reduced_df[demographic_reduced_df < 0] = np.nan
# %% Age Breakdown Table
# TODO: this can be generalized for any time we want to get counts by bins
age_bin_breakpoints = np.array([0, 7, 14, 25, 50, 100])
age_bins = bin_data(age_bin_breakpoints)
# make an age table
age_table = pd.DataFrame(index=age_bins.astype("str"))
age_table.index.name = "Age (years old)"
# cut the data by bin
demographic_reduced_df["age_bin"] = pd.cut(demographic_reduced_df["age"], age_bins)
age_table["Count"] = demographic_reduced_df.groupby("age_bin")["age"].count().values
# add in missing data
age_table.loc["Missing", "Count"] = demographic_reduced_df["age"].isnull().sum()
# make sure that counts add up correctly
# TODO: make a test that checks that the total subjects equal the total counts in the table
total_virtual_subjects_from_table = age_table["Count"].sum()
assert total_virtual_subjects_from_table == len(demographic_reduced_df)
# add total to end of table
age_table.loc["Total", "Count"] = total_virtual_subjects_from_table
age_table.reset_index(inplace=True)
make_table(
age_table,
table_name="age-table",
analysis_name="icgm-sensitivity-analysis",
cell_height=[30],
cell_width=[150],
image_type="png",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
)
# %% Years Living With (YLW) Breakdown Table
ylw_bin_breakpoints = np.array([0, 1, 5, 100])
ylw_bins = bin_data(ylw_bin_breakpoints)
# make an ylw table
ylw_table = pd.DataFrame(index=ylw_bins.astype("str"))
ylw_table.index.name = "T1D Duration (years)"
# cut the data by bin
demographic_reduced_df["ylw_bin"] = pd.cut(demographic_reduced_df["ylw"], ylw_bins)
ylw_table["Count"] = demographic_reduced_df.groupby("ylw_bin")["ylw"].count().values
# add in missing data
ylw_table.loc["Missing", "Count"] = demographic_reduced_df["ylw"].isnull().sum()
# make sure that counts add up correctly
# TODO: make a test that checks that the total subjects equal the total counts in the table
total_virtual_subjects_from_table = ylw_table["Count"].sum()
assert total_virtual_subjects_from_table == len(demographic_reduced_df)
# add total to end of table
ylw_table.loc["Total", "Count"] = total_virtual_subjects_from_table
ylw_table.reset_index(inplace=True)
make_table(
ylw_table,
table_name="ylw-table",
analysis_name="icgm-sensitivity-analysis",
cell_height=[30],
cell_width=[200, 150],
image_type="png",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
)
# %% Carb to Insulin Ratio Table
cir_bin_breakpoints = np.array(
[
demographic_reduced_df["CIR"].min(),
5,
10,
15,
20,
25,
demographic_reduced_df["CIR"].max() + 1,
]
).astype(int)
cir_bins = bin_data(cir_bin_breakpoints)
# make an cir table
cir_table = pd.DataFrame(index=cir_bins.astype("str"))
cir_table.index.name = "Carb-to-Insulin-Ratio"
# cut the data by bin
demographic_reduced_df["cir_bin"] = np.nan
demographic_reduced_df["cir_bin"] = pd.cut(demographic_reduced_df["CIR"], cir_bins)
cir_table["Count"] = demographic_reduced_df.groupby("cir_bin")["CIR"].count().values
# add in missing data
cir_table.loc["Missing", "Count"] = demographic_reduced_df["CIR"].isnull().sum()
# make sure that counts add up correctly
# TODO: make a test that checks that the total subjects equal the total counts in the table
total_virtual_subjects_from_table = cir_table["Count"].sum()
assert total_virtual_subjects_from_table == len(demographic_reduced_df)
# add total to end of table
cir_table.loc["Total", "Count"] = total_virtual_subjects_from_table
cir_table.reset_index(inplace=True)
make_table(
cir_table,
table_name="cir-table",
analysis_name="icgm-sensitivity-analysis",
cell_height=[30],
cell_width=[200, 150],
image_type="png",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
)
# %% ISF Table
isf_bin_breakpoints = np.array(
[
np.min([demographic_reduced_df["ISF"].min(), 5]),
10,
25,
50,
75,
100,
200,
np.max([400, demographic_reduced_df["ISF"].max() + 1]),
]
).astype(int)
isf_bins = bin_data(isf_bin_breakpoints)
# make an isf table
isf_table = pd.DataFrame(index=isf_bins.astype("str"))
isf_table.index.name = "Insulin Sensitivity Factor"
# cut the data by bin
demographic_reduced_df["isf_bin"] = np.nan
demographic_reduced_df["isf_bin"] = pd.cut(demographic_reduced_df["ISF"], isf_bins)
isf_table["Count"] = demographic_reduced_df.groupby("isf_bin")["ISF"].count().values
# add in missing data
isf_table.loc["Missing", "Count"] = demographic_reduced_df["ISF"].isnull().sum()
# make sure that counts add up correctly
# TODO: make a test that checks that the total subjects equal the total counts in the table
total_virtual_subjects_from_table = isf_table["Count"].sum()
assert total_virtual_subjects_from_table == len(demographic_reduced_df)
# add total to end of table
isf_table.loc["Total", "Count"] = total_virtual_subjects_from_table
isf_table.reset_index(inplace=True)
make_table(
isf_table,
table_name="isf-table",
analysis_name="icgm-sensitivity-analysis",
cell_height=[30],
cell_width=[250, 150],
image_type="png",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
)
# %% Basal Rate (BR) Table
br_bin_breakpoints = np.append(
np.arange(0, 1.5, 0.25),
np.arange(1.5, demographic_reduced_df["SBR"].max() + 0.5, 0.5),
)
br_bins = bin_data(br_bin_breakpoints)
# make an br table
br_table = pd.DataFrame(index=br_bins.astype("str"))
br_table.index.name = "Basal Rate"
# cut the data by bin
demographic_reduced_df["br_bin"] = np.nan
demographic_reduced_df["br_bin"] = pd.cut(demographic_reduced_df["SBR"], br_bins)
br_table["Count"] = demographic_reduced_df.groupby("br_bin")["SBR"].count().values
# add in missing data
br_table.loc["Missing", "Count"] = demographic_reduced_df["SBR"].isnull().sum()
# make sure that counts add up correctly
# TODO: make a test that checks that the total subjects equal the total counts in the table
total_virtual_subjects_from_table = br_table["Count"].sum()
assert total_virtual_subjects_from_table == len(demographic_reduced_df)
# add total to end of table
br_table.loc["Total", "Count"] = total_virtual_subjects_from_table
br_table.reset_index(inplace=True)
make_table(
br_table,
table_name="br-table",
analysis_name="icgm-sensitivity-analysis",
cell_height=[30],
cell_width=[200, 150],
image_type="png",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
)
def make_table(
table_df,
image_type="png",
table_name="table-<number-or-name>",
analysis_name="analysis-<name>",
cell_height=[30],
cell_width=[150],
cell_header_height=[30],
view_fig=True,
save_fig=True,
save_csv=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Parameters
----------
table_df: dataframe
dataframe for making the table from
image_type: str
file type ("png","jpg","pdf", etc.) to save image as
table_name: str
name to use for the table when saving the figure
analysis_name: str
name of the analysis this table is associated with
cell_height: sized
height of the cells in the table
cell_width: sized
width of the cells in the table
cell_header_height: sized
height of the header cells in the table
view_fig: bool
whether or not to view the table (opens in browser)
save_fig: bool
whether or not to save the table
save_csv: bool
whether to save the table contents as a csv
save_fig_path: str
file path for where to save the figure
Returns
-------
"""
# TODO: reduce the number of inputs to: df, style_dict, and save_dict
table_cols = table_df.columns
n_rows, n_cols = table_df.shape
_table = go.Table(
columnwidth=cell_width,
header=dict(
line_color="black",
values=list(table_cols),
fill_color="rgb(243, 243, 243)",
align="center",
font_size=14,
height=cell_header_height[0],
),
cells=dict(
line_color="black",
values=table_df[table_cols].T,
fill_color="white",
align="center",
font_size=13,
height=cell_height[0],
),
)
if len(cell_width) > 1:
table_width = np.sum(np.asarray(cell_width))
else:
table_width = n_cols * cell_width[0]
table_height = (n_rows + 1.5) * cell_height[0] + cell_header_height[0]
table_layout = go.Layout(
margin=dict(l=10, r=10, t=10, b=0), width=table_width, height=table_height
)
fig = go.Figure(data=_table, layout=table_layout)
# print(table_height, table_width)
save_view_fig(
fig,
image_type=image_type,
figure_name=table_name,
analysis_name=analysis_name,
view_fig=view_fig,
save_fig=save_fig,
save_fig_path=save_fig_path,
width=table_width,
height=table_height,
)
file_name = "{}-{}_{}_{}".format(
analysis_name, table_name, utc_string, code_version
)
if save_csv:
table_df.to_csv(os.path.join(save_fig_path, file_name + ".csv"))
return
def make_boxplot(
table_df,
image_type="png",
figure_name="<number-or-name>-boxplot",
analysis_name="analysis-<name>",
metric="LBGI",
level_of_analysis="analysis_type",
notched_boxplot=True,
y_scale_type="linear",
view_fig=True,
save_fig=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Creates a boxplot (either single boxplot or broken down by analysis level) for a given metric
Parameters
----------
table_df: dataframe
dataframe of data to pull from
image_type: str
file type ("png","jpg","pdf", etc.) to save image as
figure_name: str
name to use for figure
analysis_name: str
name of the analysis this table is associated with
metric: str
metric column to use
level_of_analysis: str
which level of analysis to use for breakdown of boxplots
notched_boxplot: bool
whether to make a notched boxplot
y_scale_type: str
type ("log","linear") for y-axis
view_fig: bool
whether or not to view the figure (opens in browser)
save_fig: bool
whether or not to save the figure
save_fig_path: str
file path for where to save the figure
Returns
-------
"""
# If level_of_analysis is to show all analyses (no breakdown), show as single box.
if level_of_analysis == "all":
summary_fig = px.box(
x=None,
y=table_df[metric].apply(lambda x: x + 1),
points=False,
color_discrete_sequence=px.colors.qualitative.T10,
notched=notched_boxplot,
log_y=True,
)
# Otherwise show separate boxplot for each breakdown category.
else:
table_df = table_df.sort_values([level_of_analysis])
summary_fig = px.box(
y=table_df[metric].apply(lambda x: x + 1),
points=False,
color=table_df[level_of_analysis + "_label"],
color_discrete_sequence=px.colors.qualitative.T10,
# can also explicitly define the sequence: ["red", "green", "blue"],
notched=notched_boxplot,
facet_col=table_df[level_of_analysis + "_label"],
boxmode="overlay",
log_y=True,
)
# TODO: adjust axes back to deal with adding +1 to all y values
summary_fig.update_layout(
title="Distribution of "
+ metric
+ " By "
+ level_of_analysis_dict[level_of_analysis],
showlegend=True,
# xaxis=dict(title=level_of_analysis_dict[level_of_analysis]),
yaxis=dict(title=metric),
plot_bgcolor="#D3D3D3",
legend_title=level_of_analysis_dict[level_of_analysis],
)
summary_fig.update_yaxes(
type=y_scale_type,
# range=[0, ],
tickvals=[1, 2, 3, 6, 11, 26, 51, 101, 251, 501],
ticktext=["0", "1", "2", "5", "10", "25", "50", "100", "250", "500"],
)
summary_fig.update_traces(marker=dict(size=2, opacity=0.3))
summary_fig.for_each_annotation(
lambda a: a.update(text=a.text.split("=")[1].replace(" Analysis", ""))
)
save_view_fig(
summary_fig,
image_type,
figure_name,
analysis_name,
view_fig,
save_fig,
save_fig_path,
)
return
def make_bubble_plot(
table_df,
image_type="png",
figure_name="<number-or-name>-bubbleplot",
analysis_name="analysis-<name>",
metric="LBGI",
level_of_analysis="analysis_type",
view_fig=True,
save_fig=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Creates a "stoplight" like plot showing the distribution of the simulations
across the different risk score bins (for example, for DKAI risk score).
Parameters
----------
table_df: dataframe
dataframe for making the figure from
image_type: str
file type ("png","jpg","pdf", etc.) to save image as
figure_name: str
name to use for the figure when saving
analysis_name: str
name of the analysis this figure is associated with
metric
metric column to use
level_of_analysis: str
which level of analysis to use for breakdown of boxplots
view_fig: bool
whether or not to view the figure (opens in browser)
save_fig: bool
whether or not to save the figure
save_fig_path: str
file path for where to save the figure
Returns
-------
"""
if level_of_analysis == "all":
df = table_df[[metric, metric + " String"]]
grouped_df = (
df.groupby([metric, metric + " String"])
.size()
.reset_index(name="count")
.sort_values(by=metric, ascending=True)
)
grouped_df["percentage"] = (
grouped_df["count"] / grouped_df["count"].sum()
).apply(lambda x: "{:.1%}".format(x))
# For adding in rows that don't exist
metric_values = [0, 1, 2, 3, 4]
for metric_value in metric_values:
if not ((grouped_df[metric] == metric_value)).any():
data = [[metric_value, score_dict[metric_value], 0.001, ""]]
df2 = pd.DataFrame(
data, columns=[metric, metric + " String", "count", "percentage"]
)
grouped_df = pd.concat([grouped_df, df2], axis=0, ignore_index=True)
grouped_df = grouped_df.sort_values(by=[metric], ascending=True)
summary_fig = px.scatter(
x=[1] * len(grouped_df[metric]),
y=grouped_df[metric],
size=grouped_df["count"],
color=grouped_df[metric + " String"],
# text=grouped_df["percentage"],
color_discrete_map=color_dict,
size_max=25,
)
for index, row in grouped_df.iterrows():
if row["count"] >= 1:
summary_fig.add_annotation(
x=1,
y=row[metric]
+ 0.15
+ float(row["percentage"].replace("%", "")) * 0.0015,
text=row["percentage"],
font=dict(size=12),
showarrow=False,
)
layout = go.Layout(
showlegend=True,
title="Distribution of "
+ metric
+ " Across "
+ level_of_analysis_dict[level_of_analysis],
yaxis=dict(title=metric, tickvals=[0, 1, 2, 3, 4], range=[-0.25, 4.4]),
xaxis=dict(
title="", tickvals=[0, 1, 2], range=[0, 2], showticklabels=False
),
plot_bgcolor="#D3D3D3",
legend_title="Tidepool " + metric + "<br>",
legend={"traceorder": "reversed"},
)
else:
df = table_df[
[
level_of_analysis,
level_of_analysis + "_label",
metric,
metric + " String",
]
]
grouped_df = (
df.groupby(
[
level_of_analysis,
level_of_analysis + "_label",
metric,
metric + " String",
]
)
.size()
.reset_index(name="count")
.sort_values(
by=[level_of_analysis, level_of_analysis + "_label", metric],
ascending=True,
)
)
sum_df = grouped_df.groupby(level_of_analysis)["count"].transform("sum")
grouped_df["percentage"] = (
grouped_df["count"].div(sum_df).apply(lambda x: "{:.1%}".format(x))
)
grouped_df["percentage"] = grouped_df["percentage"].apply(
lambda x: x[: len(x) - 3] + "%" if x[len(x) - 3 :] == ".0%" else x
)
# For adding in rows that don't exist
metric_values, analysis_levels, analysis_labels = (
[0, 1, 2, 3, 4],
grouped_df[level_of_analysis].unique(),
grouped_df[level_of_analysis + "_label"].unique(),
)
for metric_value, level in itertools.product(metric_values, analysis_levels):
if not (
(grouped_df[metric] == metric_value)
& (grouped_df[level_of_analysis] == level)
).any():
data = [[level, metric_value, score_dict[metric_value], 0.001, ""]]
df2 = pd.DataFrame(
data,
columns=[
level_of_analysis,
metric,
metric + " String",
"count",
"percentage",
],
)
df2[level_of_analysis + "_label"] = df2[level_of_analysis].replace(
analysis_type_labels
)
grouped_df = pd.concat([grouped_df, df2], axis=0, ignore_index=True)
grouped_df = grouped_df.sort_values(
by=[level_of_analysis, level_of_analysis + "_label", metric], ascending=True
)
summary_fig = px.scatter(
x=grouped_df[level_of_analysis + "_label"],
y=grouped_df[metric],
# text=grouped_df["percentage"],
size=grouped_df["count"],
color=grouped_df[metric + " String"],
color_discrete_map=color_dict,
# color=grouped_df["count"],
# colorscale="RdYlGn",
size_max=25,
)
if level_of_analysis == "bg_test_condition":
annotation_font_size = 9
height_parameter = 0.1
else:
annotation_font_size = 12
height_parameter = 0.15
for index, row in grouped_df.iterrows():
if row["count"] >= 1:
summary_fig.add_annotation(
x=row[level_of_analysis + "_label"],
y=row[metric]
+ height_parameter
+ float(row["percentage"].replace("%", "")) * 0.0015,
text=row["percentage"],
font=dict(size=annotation_font_size),
showarrow=False,
)
if level_of_analysis == "analysis_type":
tickangle = 45
else:
tickangle = 0
layout = go.Layout(
showlegend=True,
title="Distribution of "
+ metric
+ " Across "
+ level_of_analysis_dict[level_of_analysis],
yaxis=dict(title=metric, tickvals=[0, 1, 2, 3, 4], range=[-0.25, 4.4]),
xaxis=dict(
title=level_of_analysis_dict[level_of_analysis],
type="category",
tickangle=tickangle,
),
plot_bgcolor="#D3D3D3",
legend_title="Tidepool " + metric + "<br>",
legend={"traceorder": "reversed"},
)
summary_fig.update_layout(layout)
save_view_fig(
summary_fig,
image_type,
figure_name,
analysis_name,
view_fig,
save_fig,
save_fig_path,
)
return
def make_histogram(
table_df,
image_type="png",
figure_name="<number-or-name>-histogram",
analysis_name="analysis-<name>",
metric="LBGI",
level_of_analysis="analysis_type",
view_fig=True,
save_fig=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Makes a histogram for a particular metric and level of analysis
Parameters
----------
table_df: dataframe
dataframe for making the figure from
image_type: str
file type ("png","jpg","pdf", etc.) to save image as
figure_name: str
name to use for the figure when saving
analysis_name: str
name of the analysis this figure is associated with
metric: str
metric column to use
level_of_analysis: str
which level of analysis to use for breakdown of histograms
view_fig: bool
whether or not to view the figure (opens in browser)
save_fig: bool
whether or not to save the figure
save_fig_path: str
file path for where to save the figure
Returns
-------
"""
if level_of_analysis == "all":
df = table_df[[metric]]
grouped_df = df.groupby([metric]).size().reset_index(name="count")
summary_fig = px.histogram(
x=grouped_df[metric],
nbins=500,
# log_x=True,
color_discrete_sequence=px.colors.qualitative.T10,
)
layout = go.Layout(
showlegend=True,
title="Distribution of "
+ metric
+ " By "
+ level_of_analysis_dict[level_of_analysis],
plot_bgcolor="#D3D3D3",
xaxis=dict(title=metric),
legend_title=level_of_analysis_dict[level_of_analysis],
)
else:
df = table_df[[level_of_analysis, metric]]
grouped_df = (
df.groupby([level_of_analysis, metric]).size().reset_index(name="count")
)
if level_of_analysis == "analysis_type":
summary_fig = px.histogram(
x=grouped_df[metric],
# log_x=True,
facet_row=grouped_df[level_of_analysis],
nbins=500,
color_discrete_sequence=px.colors.qualitative.T10,
color=grouped_df[level_of_analysis],
)
else:
summary_fig = px.histogram(
x=grouped_df[metric],
# log_x=True,
facet_col=grouped_df[level_of_analysis],
facet_col_wrap=3,
nbins=500,
color_discrete_sequence=px.colors.qualitative.T10,
color=grouped_df[level_of_analysis],
)
layout = go.Layout(
showlegend=True,
title="Distribution of "
+ metric
+ " Across "
+ level_of_analysis_dict[level_of_analysis],
plot_bgcolor="#D3D3D3",
# xaxis=dict(title=metric),
legend_title=level_of_analysis_dict[level_of_analysis],
)
summary_fig.update_layout(layout)
summary_fig.for_each_annotation(
lambda a: a.update(text=a.text.split("=")[1].replace(" Analysis", ""))
)
save_view_fig(
summary_fig,
image_type,
figure_name,
analysis_name,
view_fig,
save_fig,
save_fig_path,
)
return
def make_distribution_table(
table_df,
image_type="png",
table_name="<number-or-name>-table",
analysis_name="analysis-<name>",
metric="LBGI",
level_of_analysis="analysis_type",
view_fig=True,
save_fig=True,
save_csv=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Create a table showing the distribution of results broken down acorss particular
analysis types.
Parameters
----------
table_df: dataframe
dataframe for making the table from
image_type: str
file type ("png","jpg","pdf", etc.) to save image as
table_name: str
name to use for the table when saving
analysis_name: str
name of the analysis this table is associated with
metric: str
metric column to use
level_of_analysis: str
which level of analysis to use for breakdown of tables
view_fig: bool
whether or not to view the table (opens in browser)
save_fig: bool
whether or not to save the table
save_csv: bool
whether or not to save a csv of the table contents
save_fig_path: str
file path for where to save the table
Returns
-------
"""
if level_of_analysis == "all":
df = table_df[[metric]]
distribution_df = df[metric].describe().to_frame().transpose()
distribution_df.insert(0, "", ["All Analyses Combined"], True)
else:
df = table_df[[level_of_analysis, metric]]
distribution_df = (
df.groupby(level_of_analysis)[[metric]].describe().reset_index()
)
distribution_df.columns = distribution_df.columns.droplevel(0)
if level_of_analysis == "bg_test_condition":
distribution_df.iloc[:, 0] = distribution_df.iloc[:, 0].apply(
lambda x: "BG Test Condition {}".format(x)
)
distribution_df = distribution_df.round(2)
distribution_df = distribution_df.rename(
columns={
"mean": "Mean",
"50%": "Median",
"std": "Standard Deviation",
"min": "Minimum",
"max": "Maximum",
"count": "Number of Simulations",
}
)
distribution_df = distribution_df.replace(
"correction_bolus", "Correction Bolus Analyses"
)
distribution_df = distribution_df.replace("meal_bolus", "Meal Bolus Analyses")
distribution_df = distribution_df.replace("temp_basal_only", "Temp Basal Analyses")
make_table(
distribution_df,
image_type=image_type,
table_name=table_name,
analysis_name=analysis_name,
cell_height=[30],
cell_width=[240, 130, 100, 100, 100, 100, 100, 100],
cell_header_height=[60],
view_fig=view_fig,
save_fig=save_fig,
save_csv=save_csv,
save_fig_path=save_fig_path,
)
return
# %% Summary Table
def prepare_results_for_summary_table(results_df):
"""
Prepare results for creation of a summary table of the results.
Parameters
----------
results_df: dataframe
dataframe of results to use
Returns
-------
"""
# %% first remove any/all iCGM sensor batches that did not meet iCGM special controls
# summary_df_reduced = results_df[results_df["ICGM_PASS%"] == 100]
summary_df_reduced = results_df.copy()
# first do all analyses
all_analyses_summary_df = get_summary_stats(
summary_df_reduced, "All Analyses Combined"
)
# break up by analysis type
# rename the analysis types
summary_df_reduced.replace({"temp_basal_only": "Temp Basal Analysis"}, inplace=True)
summary_df_reduced.replace(
{"correction_bolus": "Correction Bolus Analysis"}, inplace=True
)
summary_df_reduced.replace({"meal_bolus": "Meal Bolus Analysis"}, inplace=True)
for analysis_type in summary_df_reduced["analysis_type"].unique():
temp_df = summary_df_reduced[
summary_df_reduced["analysis_type"] == analysis_type
]
temp_summary = get_summary_stats(temp_df, analysis_type)
all_analyses_summary_df = pd.concat([all_analyses_summary_df, temp_summary])
# break up by bg test condition
summary_df_reduced = summary_df_reduced.sort_values(by=["bg_test_condition"])
for bg_test_condition in summary_df_reduced["bg_test_condition"].unique():
temp_df = summary_df_reduced[
summary_df_reduced["bg_test_condition"] == bg_test_condition
]
temp_summary = get_summary_stats(
temp_df, "BG Test Condition {}".format(bg_test_condition)
)
all_analyses_summary_df = pd.concat([all_analyses_summary_df, temp_summary])
return all_analyses_summary_df
def get_summary_stats(df, level_of_analysis_name):
"""
Get summary stats.
Parameters
----------
df: dataframe
dataframe to get summary stats from
level_of_analysis_name: str
what level of analysis breakdown to use
Returns
-------
"""
# Commented out risk score columsn pending whether want to show
# median values for the categorical risk score measures
# create a summary table
# NOTE: there is a known bug with plotly tables https://github.com/plotly/plotly.js/issues/3251
outcome_table_cols = [
"Median LBGI<br>" " (IQR)", # adding in spacing because of bug
# "Median LBGI Risk Score<br>"
# " (IQR)", # adding in spacing because of bug
"Median DKAI<br>" " (IQR)", # adding in spacing because of bug
# "Median DKAI Risk Score<br>"
# " (IQR)", # adding in spacing because of bug
]
outcome_names = [
"LBGI",
"DKAI",
] # ["LBGI", "LBGI Risk Score", "DKAI", "DKAI Risk Score"]
count_name = " Number of<br>Simulations"
summary_table_cols = [count_name] + outcome_table_cols
summary_table = pd.DataFrame(columns=summary_table_cols)
summary_table.index.name = "Level of Analysis"
for outcome, outcome_table_col in zip(outcome_names, outcome_table_cols):
summary_stats = pd.Series(df[outcome].describe())
summary_table.loc[level_of_analysis_name, count_name] = summary_stats["count"]
summary_table.loc[
level_of_analysis_name, outcome_table_col
] = "{} (IQR={}-{})".format(
summary_stats["50%"].round(1),
summary_stats["25%"].round(1),
summary_stats["75%"].round(1),
)
return summary_table
def make_frequency_table(
results_df,
image_type="png",
table_name="<number-or-name>-frequency-table",
analysis_name="analysis-<name>",
cell_header_height=[60],
cell_height=[30],
cell_width=[200, 100, 150, 150],
metric="LBGI",
level_of_analysis="analysis_type",
view_fig=True,
save_fig=True,
save_csv=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Parameters
----------
results_df
image_type
table_name
analysis_name
cell_header_height
cell_height
cell_width
metric
level_of_analysis
view_fig
save_fig
save_csv
save_fig_path
Returns
-------
"""
level_of_analysis_dict = {
"all": "All Analyses Combined",
"analysis_type": "Analysis Type",
"bg_test_condition": "BG Test Condition",
}
if level_of_analysis == "all":
results_df_reduced = results_df[[metric + " String"]]
frequency_df = results_df_reduced[metric + " String"].value_counts().to_frame()
frequency_df = frequency_df.T
# TODO: update this; not python way to do
percentage_df = frequency_df.apply(lambda x: x / x.sum(), axis=1)
for row in range(len(frequency_df)):
for col in range(len(frequency_df.columns)):
frequency_df.iloc[row, col] = (
str(frequency_df.iloc[row, col])
+ " ("
+ str("{:.1%}".format(percentage_df.iloc[row, col]))
+ ")"
)
column_names = [""] + list(color_dict.keys())
frequency_df.insert(0, "", ["All Analyses Combined"], True)
else:
frequency_df = pd.crosstab(
results_df[level_of_analysis], results_df[metric + " String"]
).reset_index()
# TODO: update this; not python way to do
percentage_df = frequency_df.loc[
:, frequency_df.columns != level_of_analysis
].apply(lambda x: x / x.sum(), axis=1)
for row in range(len(frequency_df)):
for col in range(len(frequency_df.columns) - 1):
frequency_df.iloc[row, col + 1] = (
str(frequency_df.iloc[row, col + 1])
+ " ("
+ str("{:.1%}".format(percentage_df.iloc[row, col]))
+ ")"
)
frequency_df = frequency_df.rename(
columns={level_of_analysis: level_of_analysis_dict[level_of_analysis]}
)
column_names = [level_of_analysis_dict[level_of_analysis]] + list(
color_dict.keys()
)
if level_of_analysis == "bg_test_condition":
frequency_df.iloc[:, 0] = frequency_df.iloc[:, 0].apply(
lambda x: "BG Test Condition {}".format(x)
)
# frequency_df = frequency_df.round(2)
frequency_df = frequency_df.replace("correction_bolus", "Correction Bolus Analyses")
frequency_df = frequency_df.replace("meal_bolus", "Meal Bolus Analyses")
frequency_df = frequency_df.replace("temp_basal_only", "Temp Basal Analyses")
for metric_value in score_dict.keys():
if score_dict[metric_value] not in frequency_df.columns:
frequency_df[score_dict[metric_value]] = "0 (0.0%)"
frequency_df = frequency_df.reindex(columns=column_names)
frequency_df = frequency_df.rename(
columns={"Analysis Type": "", "BG Test Condition": ""}
)
make_table(
frequency_df,
image_type=image_type,
table_name=table_name,
analysis_name=analysis_name,
cell_height=cell_height,
cell_width=cell_width,
cell_header_height=cell_header_height,
view_fig=view_fig,
save_fig=save_fig,
save_csv=save_csv,
save_fig_path=save_fig_path,
)
return
# Functions of cdfs
def ecdf(x):
"""
Helper function for empirical cumulative distribution function graph.
Parameters
----------
x: array
array to apply ecdf to
Returns
-------
"""
x = np.sort(x)
def result(v):
return np.searchsorted(x, v, side="right") / x.size
return result
def create_cdf(
data,
title="CDF",
image_type="png",
figure_name="<number-or-name>-boxplot",
analysis_name="analysis-<name>",
view_fig=True,
save_fig=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Create cumulative distribution graph
Parameters
----------
data
title
image_type
figure_name
analysis_name
view_fig
save_fig
save_fig_path
Returns
-------
"""
fig = go.Figure()
fig.add_scatter(x=np.unique(data), y=ecdf(data)(np.unique(data)))
fig.update_layout(title=title)
save_view_fig(
fig,
image_type,
figure_name,
analysis_name,
view_fig,
save_fig,
save_fig_path,
)
return
########## Spearman Correlation Coefficient Table #################
def spearman_correlation_table(
results_df,
image_type="png",
table_name="spearman-correlation-table",
analysis_name="icgm-sensitivity-analysis",
cell_header_height=[60],
cell_height=[30],
cell_width=[250, 150, 150, 150, 150],
view_fig=True,
save_fig=True,
save_csv=True,
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Function for create a spearment correlation coefficient table
Parameters
----------
results_df: dataframe
dataframe pulling from for data to calculate spearman correlation coefficients from
image_type: str
file type ("png","jpg","pdf", etc.) to save image as
table_name: str
name to use for the table when saving the figure
analysis_name: str
name of the analysis this table is associated with
cell_header_height: sized
height of the header cells in the table
cell_height: sized
height of the cells in the table
cell_width: sized
width of the cells in the table
view_fig: bool
whether or not to view the table (opens in browser)
save_fig: bool
whether or not to save the table
save_csv: bool
whether to save the table contents as a csv
save_fig_path: str
file path for where to save the figure
Returns
-------
"""
rows = [
"bias_factor",
"bias_drift_oscillations",
"bias_drift_range_start",
"bias_drift_range_end",
"noise_coefficient",
"mard",
"mbe",
]
cols = ["LBGI", "LBGI Risk Score", "DKAI", "DKAI Risk Score"]
data = {}
for col in cols:
row_data = []
for row in rows:
rho, pval = stats.spearmanr(results_df[row], results_df[col])
row_data.append("(" + str(round(rho, 3)) + ", " + str(round(pval, 3)) + ")")
data[col] = row_data
spearman_correlation_df = pd.DataFrame(data)
spearman_correlation_df.insert(
0,
"",
[
"Bias Factor",
"Bias Drift Oscillations",
"Bias Drift Range Start",
"Bias Drift Range End",
"Noise Coefficient",
"Mean Absolute Relative Difference",
"Mean Bias Error",
],
)
make_table(
spearman_correlation_df,
image_type=image_type,
table_name=table_name,
analysis_name=analysis_name,
cell_height=cell_height,
cell_width=cell_width,
cell_header_height=cell_header_height,
view_fig=view_fig,
save_fig=save_fig,
save_csv=save_csv,
save_fig_path=save_fig_path,
)
return
# Function for checking distributions
def create_scatter(
df,
x_value="cir",
y_value="LBGI",
color_value="",
image_type="png",
analysis_name="icgm_sensitivity_analysis",
view_fig=False,
save_fig=True,
title="",
fig_name="",
save_fig_path=os.path.join("..", "..", "reports", "figures"),
):
"""
Generic function for creating and saving a plotly express scatterplot.
Parameters
----------
df: dataframe
dataframe want to create scatter plot from
x_value: str
column from that dataframe want to use for x value
y_value: str
column from that dataframe want to use for y value
color_value: str
column from that dataframe want to use for color value
image_type: str
file type ("png","jpg","pdf", etc.) to save image as
analysis_name: str
name of the analysis this scatterplot is associated with (ex. "icgm_sensitivity_analysis"
view_fig: bool
whether or not to view the figure
save_fig: bool
whether or not to savve the figure
title: str
title of the figure
fig_name: str
name of the figure
save_fig_path: str
path to save the figure at
Returns
-------
"""
if color_value != "":
df = df.sort_values(by=color_value, ascending=True)
fig = px.scatter(
data_frame=df,
x=x_value,
y=y_value,
opacity=0.3,
color=color_value,
title=title,
color_continuous_scale=px.colors.sequential.Viridis,
) # , color_continuous_scale=px.colors.diverging.RdYlGn)
fig.update_traces(marker=dict(size=3))
else:
fig = px.scatter(data_frame=df, x=x_value, y=y_value, opacity=0.3, title=title)
fig.update_traces(marker=dict(size=3))
save_view_fig(
fig,
image_type,
fig_name,
analysis_name,
view_fig,
save_fig,
save_fig_path,
)
return
def generate_all_check_distribution_scatterplots(
df, fig_path=os.path.join("..", "..", "reports", "figures")
):
"""
Generate a set of scatter plots to check the distributions
Parameters
----------
df: dataframe
dataframe to use for figures
fig_path: str
file path to use for saving figures
Returns
-------
"""
settings = ["CIR", "ISF", "SBR"]
outcome_metrics = ["LBGI", "DKAI", "HBGI"]
sensor_characteristics = [
"bias_drift_oscillations",
"bias_drift_range_start",
"bias_drift_range_end",
"noise_coefficient",
]
analysis_levels = ["bg_test_condition_label", "analysis_type_label"]
for x, y in itertools.product(sensor_characteristics, outcome_metrics):
create_scatter(
df=df,
x_value=x,
y_value=y,
title="Distribution of " + y + " by " + x,
fig_name="distribution_" + y + "_" + x,
save_fig_path=fig_path,
)
# Investigate high LBGI risk scores
create_scatter(
df=df,
x_value="starting_bg",
y_value="LBGI",
color_value="LBGI Risk Score String",
title="Distribution of LBGI by Simulation Starting BG",
fig_name="distribution_LBGI_by_starting_bg",
save_fig_path=fig_path,
)
# Check distributions
unusual_settings_results_df = df[df["CIR"] < 4]
rest_results_df = df[df["CIR"] >= 4]
for x, y in itertools.product(sensor_characteristics, ["HBGI", "LBGI"]):
create_scatter(
df=unusual_settings_results_df,
x_value=x,
y_value=y,
title="Distribution of " + y + " by " + x + "<br>Where CIR < 4",
fig_name="distribution_" + y + "_" + x + "_cir<4",
save_fig_path=fig_path,
)
create_scatter(
df=rest_results_df,
x_value=x,
y_value=y,
title="Distribution of " + y + " by " + x + "<br>Where CIR >= 4",
fig_name="distribution_" + y + "_" + x + "_cir>=4",
save_fig_path=fig_path,
)
unusual_settings_results_df = df[df["SBR"] < 0.5]
rest_results_df = df[df["SBR"] >= 0.5]
for x, y in itertools.product(sensor_characteristics, ["HBGI", "LBGI"]):
create_scatter(
df=unusual_settings_results_df,
x_value=x,
y_value=y,
title="Distribution of " + y + " by " + x + "<br>Where SBR < 0.5",
fig_name="distribution_" + y + "_" + x + "_sbr<0.5",
save_fig_path=fig_path,
)
create_scatter(
df=rest_results_df,
x_value=x,
y_value=y,
title="Distribution of " + y + " by " + x + "<br>Where SBR >= 0.5",
fig_name="distribution_" + y + "_" + x + "_sbr>=0.5",
save_fig_path=fig_path,
)
for x, y in itertools.product(sensor_characteristics, outcome_metrics):
for setting in settings:
create_scatter(
df=df,
x_value=x,
y_value=y,
color_value=setting,
title="Distribution of "
+ y
+ " by "
+ x
+ "<br>(Color-coded by "
+ setting
+ ")",
fig_name="distribution_" + y + "_" + x + "_color_" + setting,
save_fig_path=fig_path,
)
for analysis_level in analysis_levels:
create_scatter(
df=df,
x_value=x,
y_value=y,
color_value=analysis_level,
title="Distribution of "
+ y
+ " by "
+ x
+ "<br>(Color-coded by "
+ analysis_level
+ ")",
fig_name="distribution_" + y + "_" + x + "_color_" + analysis_level,
save_fig_path=fig_path,
)
for x, y in itertools.product(settings, outcome_metrics):
for analysis_level in analysis_levels:
create_scatter(
df=df,
x_value=x,
y_value=y,
color_value=analysis_level,
title="Distribution of "
+ y
+ " by "
+ x
+ "<br>(Color-coded by "
+ analysis_level
+ ")",
fig_name="distribution_" + y + "_" + x + "_color_" + analysis_level,
save_fig_path=fig_path,
)
# Check distributions
for x, y in itertools.product(settings, outcome_metrics):
create_scatter(df=df, x_value=x, y_value=y, save_fig_path=fig_path)
for x, y in itertools.product(settings, settings):
for color in outcome_metrics:
create_scatter(
df=df, x_value=x, y_value=y, color_value=color, save_fig_path=fig_path
)
return
def run_pairwise_comparison(results_df, baseline_df, save_fig_folder_name):
"""
Parameters
----------
results_df: dataframe
dataframe for where the results (icgm) data is stored
baseline_df: dataframe
dataframe for where the baseline (ideal sensor) data is stored
save_fig_folder_name: str
name of the folder to save figures in
Returns
-------
"""
# Add ratio to each row
# Need to look up for each row into the baseline_df by virtual patient and by
fig_path = os.path.join(
"..",
"..",
"reports",
"figures",
"icgm-sensitivity-paired-comparison-figures",
save_fig_folder_name,
)
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
combined_df = results_df.merge(
baseline_df,
how="left",
left_on=["virtual_patient_num", "analysis_type", "bg_test_condition"],
right_on=["virtual_patient_num", "analysis_type", "bg_test_condition"],
suffixes=("_icgm", "_baseline"),
)
combined_df["LBGI Ratio"] = combined_df["LBGI_icgm"] / combined_df["LBGI_baseline"]
combined_df["HBGI Ratio"] = combined_df["HBGI_icgm"] / combined_df["HBGI_baseline"]
combined_df["DKAI Ratio"] = combined_df["DKAI_icgm"] / combined_df["DKAI_baseline"]
combined_df["BGRI Ratio"] = combined_df["BGRI_icgm"] / combined_df["BGRI_baseline"]
combined_df["Percent <54 Ratio"] = (
combined_df["percent_lt_54_icgm"] / combined_df["percent_lt_54_baseline"]
)
combined_df["LBGI Percent Change"] = (
(combined_df["LBGI_icgm"] - combined_df["LBGI_baseline"]) * 100
) / combined_df["LBGI_baseline"]
combined_df["HBGI Percent Change"] = (
(combined_df["HBGI_icgm"] - combined_df["HBGI_baseline"]) * 100
) / combined_df["HBGI_baseline"]
combined_df["DKAI Percent Change"] = (
(combined_df["DKAI_icgm"] - combined_df["DKAI_baseline"]) * 100
) / combined_df["DKAI_baseline"]
combined_df["BGRI Percent Change"] = (
(combined_df["BGRI_icgm"] - combined_df["BGRI_baseline"]) * 100
) / combined_df["BGRI_baseline"]
combined_df["Percent <54 Percent Change"] = (
(combined_df["percent_lt_54_icgm"] - combined_df["percent_lt_54_baseline"])
* 100
) / combined_df["percent_lt_54_baseline"]
combined_df["LBGI Difference"] = (
combined_df["LBGI_icgm"] - combined_df["LBGI_baseline"]
)
combined_df["HBGI Difference"] = (
combined_df["HBGI_icgm"] - combined_df["HBGI_baseline"]
)
combined_df["DKAI Difference"] = (
combined_df["DKAI_icgm"] - combined_df["DKAI_baseline"]
)
combined_df["BGRI Difference"] = (
combined_df["BGRI_icgm"] - combined_df["BGRI_baseline"]
)
combined_df["Percent <54 Difference"] = (
combined_df["percent_lt_54_icgm"] - combined_df["percent_lt_54_baseline"]
)
combined_df.to_csv(
path_or_buf=os.path.join(
fig_path, "pairwise_comparison_combined_df_" + save_fig_folder_name + ".csv"
),
index=False,
)
run_pairwise_comparison_figures(save_fig_folder_name)
return
def run_pairwise_comparison_figures(save_fig_folder_name):
"""
Create a set of figures for viewing and analyzing the results of the pairwise comparison.
There are a bunch of figures commented out that can be uncommented if needed.
Additional figures can be added to this function as needed for further
exploratory analysis and final analysis report.
Parameters
----------
save_fig_folder_name: str
name of folder to save figures to and also to pull data from
Returns
-------
"""
fig_path = os.path.join(
"..",
"..",
"reports",
"figures",
"icgm-sensitivity-paired-comparison-figures",
save_fig_folder_name,
)
combined_df = pd.read_csv(
os.path.join(
"..",
"..",
"reports",
"figures",
"icgm-sensitivity-paired-comparison-figures",
save_fig_folder_name,
"pairwise_comparison_combined_df_" + save_fig_folder_name + ".csv",
)
)
# combined_df['sensor_num_icgm'] = combined_df['sensor_num_icgm'].apply(lambda x: int(x))
# combined_df = combined_df.sort_values(by=['sensor_num_icgm'])
# combined_df['sensor_num_icgm_string'] = combined_df['sensor_num_icgm'].apply(lambda x: "Sensor "+str(x))
# Make Paired Comparison Box Plot
# create_paired_comparison_box_plots(combined_df, fig_path=fig_path)
# Make Paired Comparison Scatter Plot
create_paired_comparison_scatter_plots(
combined_df,
fig_path=os.path.join(fig_path, "distributions-sensor-characteristic-outcome"),
)
# Generate crosstab of risk scores
create_table_paired_risk_score_bins(
combined_df, fig_path=os.path.join(fig_path, "risk-score-crosstabs")
)
create_sensor_characteristic_scatters(
combined_df,
fig_path=os.path.join(fig_path, "sensor_characteristic_distributions"),
)
########## Below are additional figures that could be run as needed ##########
# Make Paired Comparison Histogram/KDE
# create_paired_comparison_histogram_kde(
# combined_df,
# fig_path=os.path.join(fig_path, "histogram_kde_plots"),
# )
# combined_df["initial_bias_cutpoint_5"] = np.where(combined_df['initial_bias_icgm']>5, "Initial Bias > 5", "Initial Bias <= 5")
# create_paired_comparison_scatter_plots(combined_df, fig_path=os.path.join(fig_path, "distributions-sensor-characteristic-outcome_by_inital_bias_cutpoint"), color_value="initial_bias_cutpoint_5")
# Generate graphs for all of the visualizations that do not match risk scores
# create_visualization_simulations_changed_rs(combined_df)
# Print counts that meet a number of different criteria
# print_counts_simulations_different_criteria(combined_df)
# Generate a table that shows each of the sensors and all of the characteristics
# create_sensor_characteristics_table(combined_df, fig_path=fig_path)
# Generate scatterplots showing distribution of outcome metrics by sensor
# create_paired_comparison_by_sensor_scatter_plots(combined_df, fig_path)
# Generate scatterplots showing distribution of outcome metrics by analysis level
# create_paired_comparison_by_analysis_level_scatter_plots(combined_df, fig_path=os.path.join(fig_path, "distributions-sensor-characteristic-analysis-level"), analysis_level="analysis_type_label")
# create_paired_comparison_by_analysis_level_scatter_plots(combined_df, fig_path=os.path.join(fig_path, "distributions-sensor-characteristic-analysis-level"), analysis_level="bg_test_condition_label")
# Generate scatterplots showing distribution of outcome metrics across sensor characteristic space
# create_paired_comparison_bivariate_sensor_characteristic_scatter(combined_df, fig_path=os.path.join(fig_path, "distributions-sensor-characteristc-bivariate-space"))
return
def create_paired_comparison_histogram_kde(df, fig_path, color_value=""):
"""
Parameters
----------
df: dataframe
dataframe using to create histogram
fig_path: str
path to save figure at
color_value: str
what field to use for color value
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
comparison_types = [" Difference"] # " Ratio", " Percent Change", " Difference"]
outcome_metrics = ["LBGI"] # "DKAI", "HBGI", "Percent <54"]
# Specify the cutoffs want to check
threshold_dict = {"LBGI Difference": [2]}
for comparison_type, outcome_metric in itertools.product(
comparison_types, outcome_metrics
):
distribution_metric = outcome_metric + comparison_type
for threshold in threshold_dict[distribution_metric]:
# Add column so can look at distributions above and below threshold
df["threshold_column"] = np.where(
df[distribution_metric] > threshold, True, False
)
# Histogram with distribution above and below threshold
# Another method
# fig = px.histogram(df, x=distribution_metric, color="threshold_column", marginal="box")
hist_data = [
df[df["threshold_column"] == True][distribution_metric],
df[df["threshold_column"] == False][distribution_metric],
]
group_labels = [
distribution_metric + " > " + str(threshold),
distribution_metric + " <= " + str(threshold),
]
fig = ff.create_distplot(
hist_data, group_labels, bin_size=0.05, histnorm="probability"
)
fig.show()
# save fig - in the title, want to have distribution_metric, threshold in title
# Histogram with distribution above threshold
hist_data = [df[df["threshold_column"] == True][distribution_metric]]
group_labels = [distribution_metric + " > " + str(threshold)]
fig = ff.create_distplot(
hist_data, group_labels, bin_size=0.05, histnorm="probability"
)
fig.show()
# Histogram with distribution below threshold
hist_data = [df[df["threshold_column"] == False][distribution_metric]]
group_labels = [distribution_metric + " <= " + str(threshold)]
fig = ff.create_distplot(
hist_data, group_labels, bin_size=0.05, histnorm="probability"
)
fig.show()
# KDEF with overall distribution but with a line for threshold
hist_data = [df[distribution_metric]]
group_labels = ["All Data"]
fig = ff.create_distplot(
hist_data, group_labels, bin_size=0.05, histnorm="probability"
)
# Add in the line at the threshold
fig.show()
# CDF with distribution above and below threshold
fig = go.Figure(
data=[
go.Histogram(
x=df[distribution_metric],
cumulative_enabled=True,
histnorm="probability",
)
]
)
fig.add_trace(go.Scatter(x=[2, 2], y=[0, 1]))
fig.show()
# Add in the line at the threshold
return
def print_counts_simulations_different_criteria(df):
"""
used for testing
prints counts of simulations with different criteria
Parameters
----------
df: dataframe
what dataframe to check data in
Returns
-------
"""
filenames = df.loc[
(
(df["true_bolus_icgm"] != df["true_bolus_baseline"])
& (
df["true_bolus_baseline"] == 0
) # ((combined_df["true_bolus_icgm"] == 0) | (combined_df["true_bolus_baseline"]==0))
& (df["analysis_type"] == "meal_bolus")
)
]
print(filenames[0:10])
print(len(filenames))
# Check for the HBGI cases where high difference and the boluses are the same - should return zero
filenames = df.loc[
((df["HBGI Difference"] > 20) | (df["HBGI Difference"] < -20))
& (df["true_bolus_icgm"] == df["true_bolus_baseline"])
& (df["analysis_type"] == "meal_bolus")
]
print(filenames[0:10])
print(len(filenames))
return
def create_table_paired_risk_score_bins(df, fig_path):
"""
Creates a set of crosstab tables showing the count and percentage of scenarios
that started in each particular risk score bin (for a given metric) and switched
to a different particular bin. Saves those tables
Parameters
----------
df: dataframe
dataframe using to create the risk score bin tables
fig_path: str
file path to save the tables to
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
for metric in ["DKAI", "LBGI"]:
frequency_df = pd.crosstab(
df[metric + " Risk Score String_baseline"],
df[metric + " Risk Score String_icgm"],
) # .reset_index()
# TODO: update this; not pythonic way to do
percentage_df = frequency_df.loc[
:, frequency_df.columns != metric + " Risk Score String_baseline"
].apply(lambda x: x / x.sum(), axis=1)
for row in range(len(frequency_df)):
for col in range(len(frequency_df.columns)):
frequency_df.iloc[row, col] = (
str("{:,}".format(frequency_df.iloc[row, col]))
+ " ("
+ str("{:.1%}".format(percentage_df.iloc[row, col]))
+ ")"
)
frequency_df = frequency_df.reset_index()
frequency_df = frequency_df.rename(
columns={
metric
+ " Risk Score String_baseline": metric
+ " Risk Score<br>Rows: Baseline; Columns: iCGM"
}
)
make_table(
frequency_df,
table_name=metric + "_paired_risk_score_cross_tab",
analysis_name="icgm-sensitivity-analysis",
cell_header_height=[60],
cell_height=[30],
cell_width=[250, 125, 125, 125, 125, 125, 125],
image_type="png",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
for bg_test_condition in df["bg_test_condition"].unique():
reduced_df = df[df["bg_test_condition"] == bg_test_condition]
frequency_df = pd.crosstab(
reduced_df[metric + " Risk Score String_baseline"],
reduced_df[metric + " Risk Score String_icgm"],
) # .reset_index()
# TODO: update this; not pythonic way to do
percentage_df = frequency_df.loc[
:, frequency_df.columns != metric + " Risk Score String_baseline"
].apply(lambda x: x / x.sum(), axis=1)
for row in range(len(frequency_df)):
for col in range(len(frequency_df.columns)):
frequency_df.iloc[row, col] = (
str("{:,}".format(frequency_df.iloc[row, col]))
+ " ("
+ str("{:.1%}".format(percentage_df.iloc[row, col]))
+ ")"
)
frequency_df = frequency_df.reset_index()
frequency_df = frequency_df.rename(
columns={
metric
+ " Risk Score String_baseline": metric
+ " Risk Score: BG Test Condition "
+ str(bg_test_condition)
+ "<br>Rows: Baseline; Columns: iCGM"
}
)
make_table(
frequency_df,
table_name=metric
+ "_paired_risk_score_cross_tab_bg_test_condition"
+ str(bg_test_condition),
analysis_name="icgm-sensitivity-analysis",
cell_header_height=[60],
cell_height=[30],
cell_width=[250, 125, 125, 125, 125, 125, 125],
image_type="png",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
for analysis_type in df["analysis_type_label_icgm"].unique():
reduced_df = df[df["analysis_type_label_icgm"] == analysis_type]
frequency_df = pd.crosstab(
reduced_df[metric + " Risk Score String_baseline"],
reduced_df[metric + " Risk Score String_icgm"],
) # .reset_index()
# TODO: update this; not pythonic way to do
percentage_df = frequency_df.loc[
:, frequency_df.columns != metric + " Risk Score String_baseline"
].apply(lambda x: x / x.sum(), axis=1)
for row in range(len(frequency_df)):
for col in range(len(frequency_df.columns)):
frequency_df.iloc[row, col] = (
str("{:,}".format(frequency_df.iloc[row, col]))
+ " ("
+ str("{:.1%}".format(percentage_df.iloc[row, col]))
+ ")"
)
frequency_df = frequency_df.reset_index()
frequency_df = frequency_df.rename(
columns={
metric
+ " Risk Score String_baseline": metric
+ " Risk Score: "
+ str(analysis_type)
+ "<br>Rows: Baseline; Columns: iCGM"
}
)
make_table(
frequency_df,
table_name=metric
+ "_paired_risk_score_cross_tab_"
+ str(analysis_type),
analysis_name="icgm-sensitivity-analysis",
cell_header_height=[60],
cell_height=[30],
cell_width=[250, 125, 125, 125, 125, 125, 125],
image_type="png",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
)
return
def create_sensor_characteristic_scatters(df, fig_path):
"""
Create a set of scatterplots to see the bivariate distribution of the
sensor characteristics (i.e. how noise varies by across the sensor biases from the fitting)
Parameters
----------
df: dataframe
dataframe that contains the sensor characteristics (i.e. combined results dataframe)
fig_path: str
file path to save the figures to
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
sensor_characteristics = [
"noise_per_sensor",
"initial_bias",
"bias_factor",
"phi_drift",
"drift_multiplier_start",
"drift_multiplier_end",
"noise_max",
]
sensor_characteristics_dict = {
"noise_per_sensor": "Noise Per Sensor",
"initial_bias": "Initial Bias",
"bias_factor": "Bias Factor",
"phi_drift": "Phi Drift",
"drift_multiplier_start": "Drift Multiplier Start",
"drift_multiplier_end": "Drift Multiplier End",
"noise_max": "Noise Max",
}
# Create a plot for each of the sensor characteristics specified
for i, sensor_characteristic_y in enumerate(sensor_characteristics):
for j, sensor_characteristic_x in enumerate(sensor_characteristics):
fig = px.scatter(
df,
x=sensor_characteristic_x + "_icgm",
y=sensor_characteristic_y + "_icgm",
)
fig.show()
save_view_fig(
fig,
image_type="png",
figure_name=sensor_characteristic_x
+ "_"
+ sensor_characteristic_y
+ "_sensor_characteristic_distributions",
analysis_name="icgm-sensitivity-analysis",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
return
def create_paired_comparison_bivariate_sensor_characteristic_scatter(df, fig_path):
"""
Parameters
----------
df
fig_path
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
sensor_characteristics = [
"initial_bias",
"bias_drift_oscillations",
"bias_drift_range_start",
"bias_drift_range_end",
"noise_coefficient",
"delay",
"bias_drift_type",
"bias_type",
"noise_per_sensor",
"noise",
"bias_factor",
"phi_drift",
"drift_multiplier",
"drift_multiplier_start",
"drift_multiplier_end",
"noise_max",
]
outcome_metrics = ["LBGI", "HBGI", "DKAI"]
comparison_types = [" Difference", " Ratio"]
for (
comparison_type,
outcome_metric,
sensor_characteristic_x,
sensor_characteristic_y,
) in itertools.product(
comparison_types, outcome_metrics, sensor_characteristics, ["initial_bias"]
):
if sensor_characteristic_x != sensor_characteristic_y:
df_reduced = df.replace([np.inf, -np.inf], np.nan).dropna(
subset=[outcome_metric + comparison_type], how="all"
)
print(df_reduced[outcome_metric + comparison_type].unique())
create_scatter(
df=df_reduced,
x_value=sensor_characteristic_x + "_icgm",
y_value=sensor_characteristic_y + "_icgm",
color_value=outcome_metric + comparison_type,
title=outcome_metric
+ comparison_type
+ " Baseline vs. iCGM Sensors<br>"
+ sensor_characteristic_x
+ " by "
+ sensor_characteristic_y,
fig_name="distribution_"
+ outcome_metric
+ comparison_type
+ "_"
+ sensor_characteristic_x
+ "_by_"
+ sensor_characteristic_y,
save_fig_path=fig_path,
)
# Create a plot for each of the sensor characteristics specified
for comparison_type in []: # comparison_types:
for outcome_metric in []: # outcome_metrics:
n_cols = len(sensor_characteristics)
n_rows = len(sensor_characteristics)
subplot_titles = []
sensor_characteristics_dict = {
"sensor_num": "iCGM Sensor Number",
"initial_bias": "Initial Bias",
"bias_factor": "Bias Factor",
"bias_drift_oscillations": "Bias Factor Oscillations",
"bias_drift_range_start": "Bias Drift Range Start",
"bias_drift_range_end": "Bias Drift Range End",
"noise_coefficient": "Noise Coefficient",
}
for sensor_characteristics_y in sensor_characteristics:
for sensor_characteristics_x in sensor_characteristics:
subplot_titles.append(
sensor_characteristics_dict[sensor_characteristics_y]
+ " By "
+ sensor_characteristics_dict[sensor_characteristics_x]
)
fig = make_subplots(
rows=n_rows,
cols=n_cols,
subplot_titles=subplot_titles,
horizontal_spacing=0.1,
)
for i, sensor_characteristic_y in enumerate(sensor_characteristics):
for j, sensor_characteristic_x in enumerate(sensor_characteristics):
fig.add_trace(
go.Scatter(
x=df[sensor_characteristic_x + "_icgm"],
y=df[sensor_characteristic_y + "_icgm"],
# color=df[sensor_characteristic_y+"_icgm"],
mode="markers",
marker=dict(
size=3,
opacity=0.4,
color=df[outcome_metric + comparison_type],
),
showlegend=False,
),
row=i + 1,
col=j + 1,
)
fig.update_xaxes(
title_text=sensor_characteristics_dict[sensor_characteristic_x],
row=i + 1,
col=j + 1,
)
fig.update_yaxes(
title_text=sensor_characteristics_dict[sensor_characteristic_y],
row=i + 1,
col=j + 1,
)
fig.update_layout(
title=outcome_metric
+ comparison_type
+ "<br>Baseline vs. iCGM Sensors Across Sensor Characteristic Space",
legend_title=outcome_metric + comparison_type,
showlegend=True,
font_size=5,
)
for i in fig["layout"]["annotations"]:
i["font"] = dict(size=7)
save_view_fig(
fig,
image_type="png",
figure_name="distribution_"
+ outcome_metric
+ comparison_type
+ "_across_sensor_characteristic_space",
analysis_name="icgm-sensitivity-analysis",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
width=200 * n_cols,
height=200 * n_rows,
)
return
def create_paired_comparison_scatter_plots(combined_df, fig_path, color_value=""):
"""
Parameters
----------
combined_df: dataframe
dataframe to make scatterplots from
fig_path: str
file path to save figures at
color_value: str
what field to use for color for plots (i.e. for a third dimension)
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
comparison_types = [" Ratio", " Percent Change", " Difference"]
outcome_metrics = ["LBGI", "DKAI", "HBGI"] # "Percent <54"]
sensor_characteristics = [
"mard_icgm",
"mbe_icgm",
"initial_bias_icgm",
"bias_norm_factor_icgm",
"bias_drift_oscillations_icgm",
"bias_drift_range_start_icgm",
"bias_drift_range_end_icgm",
"noise_coefficient_icgm",
"delay_icgm",
"bias_drift_type_icgm",
"bias_type_icgm",
"noise_per_sensor_icgm",
"noise_icgm",
"bias_factor_icgm",
"phi_drift_icgm",
"drift_multiplier_icgm",
"drift_multiplier_start_icgm",
"drift_multiplier_end_icgm",
"noise_max_icgm",
]
for comparison_type, outcome_metric, sensor_characteristic in itertools.product(
comparison_types, outcome_metrics, sensor_characteristics
):
create_scatter(
df=combined_df,
x_value=sensor_characteristic,
y_value=outcome_metric + comparison_type,
color_value=color_value,
title="Distribution of "
+ outcome_metric
+ comparison_type
+ "<br>By "
+ sensor_characteristic,
fig_name="distribution_"
+ outcome_metric
+ "_"
+ comparison_type
+ "_by_"
+ sensor_characteristic,
save_fig_path=fig_path,
)
return
def create_paired_comparison_box_plots(combined_df, fig_path):
"""
Create boxplots for viewing paired comparison results.
Parameters
----------
combined_df: dataframe
dataframe to use for displaying data
fig_path: str
filepath to use for saving figures
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
graph_metrics = [
"LBGI Ratio",
"HBGI Ratio",
"DKAI Ratio",
"BGRI Ratio",
"Percent <54 Ratio",
"LBGI Percent Change",
"HBGI Percent Change",
"DKAI Percent Change",
"BGRI Percent Change",
"Percent <54 Percent Change",
"LBGI Difference",
"HBGI Difference",
"DKAI Difference",
"BGRI Difference",
"Percent <54 Difference",
]
for metric in [graph_metrics]:
fig = px.box(
combined_df,
x="sensor_num_icgm_string",
y=metric,
color="sensor_num_icgm_string",
labels={"sensor_num_icgm_string": "Sensor Number"},
title=metric + " (Between Baseline and iCGM Simulations)",
points=False,
)
fig.update_xaxes(tick0=0, dtick=1)
fig.update_layout(showlegend=False)
save_view_fig(
fig,
image_type="png",
figure_name=metric + "_pairwise_comparison_boxplot",
analysis_name="icgm-sensitivity-analysis",
view_fig=True,
save_fig=True,
save_fig_path=fig_path,
)
return
def create_paired_comparison_by_analysis_level_scatter_plots(
combined_df, fig_path, analysis_level="analysis_type_label"
):
"""
Create a set of scatter plots showing the distribution of comparison risk metrics by sensor
characteristic and how this changes across the categories in an analysis level (bg test condition, analysis type)
Parameters
----------
combined_df: dataframe
dataframe of combined results
fig_path: str
file path for saving figures
analysis_level: str
analysis level want to see the scatter plot comparison for
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
sensor_characteristics = [
"initial_bias"
] # "bias_factor", "bias_drift_oscillations", "bias_drift_range_start", "bias_drift_range_end","noise_coefficient"]
outcome_metrics = ["LBGI", "HBGI", "DKAI"]
comparison_types = [" Ratio"] # [" Difference",
combined_df = combined_df.sort_values(by=["bg_test_condition_label_icgm"])
analysis_level_unique_values = combined_df[analysis_level + "_icgm"].unique()
# Create a plot for each of the sensor characteristics specified
for comparison_type in comparison_types:
for sensor_characteristic in sensor_characteristics:
n_cols = len(outcome_metrics)
n_rows = len(analysis_level_unique_values)
subplot_titles = []
for analysis_level_value in analysis_level_unique_values:
for metric in outcome_metrics:
if analysis_level == "bg_test_condition":
subplot_titles.append(
"BG Test Condition "
+ str(analysis_level_value)
+ ", "
+ metric
+ comparison_type
)
else:
subplot_titles.append(
str(analysis_level_value) + ", " + metric + comparison_type
)
fig = make_subplots(
rows=n_rows,
cols=n_cols,
subplot_titles=subplot_titles,
horizontal_spacing=0.1,
)
for i, analysis_level_value in enumerate(analysis_level_unique_values):
for j, metric in enumerate(outcome_metrics):
df = combined_df[
combined_df[analysis_level + "_icgm"] == analysis_level_value
]
print(
df[[sensor_characteristic + "_icgm", metric + comparison_type]]
)
fig.add_trace(
go.Scatter(
x=df[sensor_characteristic + "_icgm"],
y=df[metric + comparison_type],
customdata=df["filename_icgm"],
mode="markers",
marker=dict(size=4, opacity=0.6),
showlegend=False,
),
row=i + 1,
col=j + 1,
)
y_columns = [metric + comparison_type for metric in outcome_metrics]
y_max_value = max(combined_df[y_columns])
x_max_value = max(combined_df[sensor_characteristic + "_icgm"])
x_min_value = min(combined_df[sensor_characteristic + "_icgm"])
analysis_level_dict = {
"bg_test_condition_label": "BG Test Condition",
"analysis_type_label": "Analysis Type",
}
fig.update_layout(
title="Outcome Metric "
+ comparison_type
+ ": Baseline vs. iCGM Sensors<br>By "
+ analysis_level_dict[analysis_level],
legend_title="Risk Scores",
showlegend=True,
font_size=6,
)
sensor_characteristics_dict = {
"sensor_num": "iCGM Sensor Number",
"initial_bias": "Initial Bias",
"bias_factor": "Bias Factor",
"bias_drift_oscillations": "Bias Factor Oscillations",
"bias_drift_range_start": "Bias Drift Range Start",
"bias_drift_range_end": "Bias Drift Range End",
"noise_coefficient": "Noise Coefficient",
}
fig.update_yaxes(range=[0, 200])
fig.update_xaxes(
title=sensor_characteristics_dict[sensor_characteristic],
range=[x_min_value, x_max_value],
)
for i in fig["layout"]["annotations"]:
i["font"] = dict(size=7)
save_view_fig(
fig,
image_type="png",
figure_name="distribution_"
+ comparison_type
+ "_"
+ analysis_level
+ "_sensor_characteristic"
+ "_pairwise_comparison_scatter",
analysis_name="icgm-sensitivity-analysis",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
width=200 * n_cols,
height=200 * n_rows,
)
return
def create_paired_comparison_by_sensor_scatter_plots(combined_df, fig_path):
"""
Create a set of scatter plots showing the distribution of comparison risk metrics by sensor
characteristic and how this changes across the sensor numbers.
Parameters
----------
combined_df: dataframe
dataframe of combined results
fig_path: str
file path for saving figures
Returns
-------
"""
if not os.path.exists(fig_path):
print("making directory " + fig_path + "...")
os.makedirs(fig_path)
graph_metrics = ["LBGI", "DKAI", "HBGI", "BGRI"]
n_rows = 5
n_cols = 6
subplot_titles = []
for i in range(n_rows):
for j in range(n_cols):
sensor_num = i * n_cols + j
subplot_titles.append("Sensor " + str(sensor_num))
for metric in graph_metrics:
fig = make_subplots(
rows=n_rows,
cols=n_cols,
subplot_titles=subplot_titles,
horizontal_spacing=0.05,
)
max_value = (
max(
max(combined_df[metric + "_baseline"]),
max(combined_df[metric + "_icgm"]),
)
+ 2
)
fill_color_dict = {
"0 - None": "rgba(15, 115, 198, 0.2)",
"1 - Negligible": "rgba(6, 180, 6, 0.2)",
"2 - Minor": "rgba(208, 192, 127, 0.2)",
"3 - Serious": "rgba(225, 131, 37, 0.2)",
"4 - Critical": "rgba(154, 58, 57, 0.2)",
}
for i in range(n_rows):
for j in range(n_cols):
# Add in risk score lines for LBGI and HBGI
if metric in ["LBGI", "DKAI"]:
if (i == 0) & (j == 0):
show_legend = True
else:
show_legend = False
if metric == "LBGI":
thresholds = [0, 2.5, 2.5, 5, max_value]
elif metric == "DKAI":
thresholds = [2, 6, 8, 5, max_value]
risk_levels = [
"0 - None",
"1 - Negligible",
"2 - Minor",
"3 - Serious",
"4 - Critical",
]
for risk_level, threshold in zip(risk_levels, thresholds):
fig.add_trace(
go.Scatter(
x=[0, max_value],
y=[threshold, threshold],
hoverinfo="skip",
name=risk_level,
mode="lines",
fillcolor=fill_color_dict[risk_level],
line=dict(width=0.5, color=fill_color_dict[risk_level]),
showlegend=show_legend,
stackgroup="one",
),
row=i + 1,
col=j + 1,
)
# Filter the dataset for the particular sensor
sensor_num = i * n_cols + j
df = combined_df[combined_df["sensor_num_icgm"] == sensor_num]
df["hovertext"] = df["filename_icgm"] + "<br>Baseline: " + df[
metric + "_baseline"
].astype(str) + "<br>iCGM: " + df[metric + "_icgm"].astype(
str
) + "<br>iCGM Sensor Characteristics: " "<br>Initial Bias: " + df[
"initial_bias_icgm"
].astype(
str
) + "<br>Bias Factor: " + df[
"bias_factor_icgm"
].astype(
str
) + "<br>Bias Drift Oscillations: " + df[
"bias_drift_oscillations_icgm"
].astype(
str
) + "<br>Bias Drift Range Start: " + df[
"bias_drift_range_start_icgm"
].astype(
str
) + "<br>Bias Drift Range End: " + df[
"bias_drift_range_end_icgm"
].astype(
str
) + "<br>Noise Coefficient: " + df[
"noise_coefficient_icgm"
].astype(
str
)
if metric in ["LBGI", "DKAI"]:
marker_dict = dict(color="gray", size=5, opacity=0.6)
else:
marker_dict = dict(size=5, opacity=0.6, color="gray")
fig.add_trace(
go.Scatter(
name="Sensor " + str(sensor_num),
x=df[metric + "_baseline"],
y=df[metric + "_icgm"],
customdata=df["filename_icgm"],
hovertext=df["hovertext"],
mode="markers",
showlegend=False,
marker=marker_dict,
),
row=i + 1,
col=j + 1,
)
fig.add_trace(
go.Scatter(
x=[0, max_value],
y=[0, max_value],
mode="lines",
showlegend=False,
opacity=0.4,
line=dict(width=1, color="black"),
),
row=i + 1,
col=j + 1,
)
fig.update_layout(
title=metric + ": Baseline vs. iCGM Sensors",
legend_title="Risk Scores",
showlegend=True,
font_size=8,
)
fig.update_yaxes(title="iCGM", range=[0, max_value])
fig.update_xaxes(title="Baseline", range=[0, max_value])
for i in fig["layout"]["annotations"]:
i["font"] = dict(size=10)
save_view_fig(
fig,
image_type="png",
figure_name=metric + "_pairwise_comparison_scatter",
analysis_name="icgm-sensitivity-analysis",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
width=200 * n_cols,
height=200 * n_rows,
)
return
def create_sensor_characteristics_table(df, fig_path):
"""
Table of sensor characteristics.
Parameters
----------
df: dataframe
dataframe of combined results
fig_path: str
file path for saving figures
Returns
-------
"""
columns = [
"sensor_num_icgm",
"initial_bias_icgm",
"bias_factor_icgm",
"bias_drift_oscillations_icgm",
"bias_drift_range_start_icgm",
"bias_drift_range_end_icgm",
"noise_coefficient_icgm",
]
sensor_characteristics_df = df[columns].drop_duplicates()
sensor_characteristics_df = sensor_characteristics_df.sort_values(
by=["sensor_num_icgm"]
)
sensor_characteristics_df = sensor_characteristics_df.rename(
columns={
"sensor_num_icgm": "iCGM Sensor Number",
"initial_bias_icgm": "Initial Bias",
"bias_factor_icgm": "Bias Factor",
"bias_drift_oscillations_icgm": "Bias Factor Oscillations",
"bias_drift_range_start_icgm": "Bias Drift Range Start",
"bias_drift_range_end_icgm": "Bias Drift Range End",
"noise_coefficient_icgm": "Noise Coefficient",
}
)
print(sensor_characteristics_df)
return
def generate_all_results_figures(
df, fig_path=os.path.join("..", "..", "reports", "figures")
):
"""
Generate all of the results figures
Parameters
----------
df: dataframe
dataframe of combined results
fig_path: str
file path for saving figures
Returns
-------
"""
# Create Spearman Correlation Coefficient Table
spearman_correlation_table(df, save_fig_path=fig_path)
# Iterate through each metric and analysis_level category shown below and create boxplot
# figure with both log scale and linear scale.
metrics = ["LBGI", "DKAI"]
analysis_levels = ["bg_test_condition", "analysis_type", "all"]
y_axis_scales = ["log"] # , "linear"]
for analysis_level, metric, axis_scale in itertools.product(
analysis_levels, metrics, y_axis_scales
):
make_boxplot(
df,
figure_name="boxplot-" + analysis_level + "-" + metric,
analysis_name="icgm-sensitivity-analysis",
metric=metric,
level_of_analysis=analysis_level,
notched_boxplot=False,
y_scale_type=axis_scale,
image_type="png",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
"""
make_histogram(
df,
figure_name="histogram-" + analysis_level + "-" + metric,
analysis_name="icgm-sensitivity-analysis",
metric=metric,
level_of_analysis=analysis_level,
image_type="png",
view_fig=False,
save_fig=True,
save_fig_path=fig_path
)
"""
make_distribution_table(
df,
table_name="distribution-table-" + analysis_level + "-" + metric,
analysis_name="icgm-sensitivity-analysis",
metric=metric,
level_of_analysis=analysis_level,
image_type="png",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
metrics = ["LBGI Risk Score", "DKAI Risk Score"]
analysis_levels = ["bg_test_condition", "analysis_type", "all"]
for analysis_level, metric in itertools.product(analysis_levels, metrics):
make_bubble_plot(
df,
image_type="png",
figure_name="bubbleplot-" + analysis_level + "-" + metric,
analysis_name="icgm-sensitivity-analysis",
metric=metric,
level_of_analysis=analysis_level,
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
########### SUMMARY TABLE #################
all_analyses_summary_df = prepare_results_for_summary_table(df)
# make table
make_table(
all_analyses_summary_df.reset_index(),
table_name="summary-risk-table",
analysis_name="icgm-sensitivity-analysis",
cell_header_height=[60],
cell_height=[30],
cell_width=[200, 150, 150, 150],
image_type="png",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
########### DEMOGRAPHICS TABLE #################
get_metadata_tables(df, fig_path=fig_path)
########## CDF Plots #################
metrics = ["LBGI", "DKAI", "LBGI Risk Score", "DKAI Risk Score"]
for metric in metrics:
create_cdf(
data=df[metric],
title="CDF for " + metric,
image_type="png",
figure_name="cdf-" + metric,
analysis_name="icgm-sensitivity-analysis",
view_fig=False,
save_fig=True,
save_fig_path=fig_path,
)
########## Proportion/Frequency Tables #################
metrics = ["LBGI Risk Score", "DKAI Risk Score"]
analysis_levels = ["bg_test_condition", "analysis_type", "all"]
for analysis_level, metric in itertools.product(analysis_levels, metrics):
make_frequency_table(
df,
image_type="png",
table_name="frequency-table-" + metric + "-" + analysis_level,
analysis_name="icgm-sensitivity-analysis",
cell_header_height=[30],
cell_height=[30],
cell_width=[250, 130, 135, 120, 120, 120],
metric=metric,
level_of_analysis=analysis_level,
view_fig=False,
save_fig=True,
save_csv=True,
save_fig_path=fig_path,
)
return
def settings_outside_clinical_bounds(cir, isf, sbr):
"""
Identifies whether any of the settings are outside clinical bounds (based on medical advisory)
Parameters
----------
cir: float
carb to insulin ratio for the particular scenario
isf: float
insulin sensitivity factor for the particular scenario
sbr:float
scheduled basal rate for the particular scenario
Returns
-------
a boolean for whether any of the settings fall outside of the clinical bounds criteria as defined in the function
"""
return (
(float(isf) < 10)
| (float(isf) > 500)
| (float(cir) < 2)
| (float(cir) > 150)
| (float(sbr) < 0.05)
| (float(sbr) > 30)
)
def create_data_frame_for_figures(
results_path,
save_path,
results_folder_name,
old_format=False,
patient_characteristics_path=os.path.join(
"..",
"..",
"data",
"raw",
"icgm-sensitivity-analysis-results-2020-09-19-nogit",
),
is_baseline=False,
):
"""
Create an aggregate dataframe of the simulation results (for either baseline or iCGM) for
use in creating figures and analysing the results. Save those aggregated dataframes as csvs.
Parameters
----------
old_format: bool
whether the data is in the old format
patient_characteristics_path: str
filepath of where patient characteristics are
results_path: str
path to where the results want to create dataframe for are
save_path: str
file path of outer folder for where to save the csvs of the aggregated dataframes to
results_folder_name: str
name of the folder that the results are located in (to be used in title of csvs saved)
is_baseline: bool
whether or not this results set is for the baseline (vs. icgm)
slightly different loading of data for baseline
Returns
-------
dataframe of aggregated results
"""
data = []
if old_format:
for i, filename in enumerate(sorted(os.listdir(results_path))): # [0:100]):
if filename.endswith(".csv"):
print(i, filename)
simulation_df = pd.read_csv(os.path.join(results_path, filename))
filename_components = filename.split(".")
f = open(
os.path.join(
patient_characteristics_path, (filename_components[0] + ".json")
),
"r",
)
json_data = json.loads(f.read())
patient_characteristics_df = pd.DataFrame(
json_data,
index=[
"i",
],
)
vp_id = (
patient_characteristics_df["patient_scenario_filename"]
.iloc[0]
.split("/")[-1]
.split(".")[0]
.replace("train_", "")
)
if filename_components[2] == "sIdealSensor":
f = open(
os.path.join(results_path, "sIdealSensor.json"), "r"
) # Add this file with the correct parameters
json_data = json.loads(f.read())
else:
f = open(
os.path.join(
results_path,
(
filename_components[0]
+ "."
+ filename_components[1]
+ "."
+ filename_components[2]
+ ".json"
),
),
"r",
)
json_data = json.loads(f.read())
sensor_characteristics_df = pd.DataFrame(
json_data,
index=[
"i",
],
)
# Add in the data, filtering out the virtual patients outside of clinical bounds
# if vp_id not in vp_outside_clinical_bounds:
if filename_components[2] == "sIdealSensor":
data.append(
get_data_old_format(
filename, simulation_df, patient_characteristics_df
)
)
else:
data.append(
get_data_old_format(
filename,
simulation_df,
patient_characteristics_df,
sensor_characteristics_df,
)
)
else:
removed_scenarios = []
for i, filename in enumerate(sorted(os.listdir(results_path))): # [0:100])):
if filename.endswith(".tsv"):
print(i, filename)
simulation_df = pd.read_csv(
os.path.join(results_path, filename), sep="\t"
)
# Check that the first two bg values are equal
assert (
simulation_df.loc[0]["bg"] == simulation_df.loc[1]["bg"]
), "First two BG values of simulation are not equal"
f = open(
os.path.join(results_path, filename.replace(".tsv", ".json")), "r"
)
simulation_characteristics_json_data = json.loads(f.read())
vp_id = filename.split(".")[0].replace("vp", "")
cir = simulation_characteristics_json_data["patient"]["config"][
"carb_ratio_schedule"
]["schedule"][0]["setting"].replace(" g", "")
isf = simulation_characteristics_json_data["patient"]["config"][
"insulin_sensitivity_schedule"
]["schedule"][0]["setting"].replace(" m", "")
sbr = simulation_characteristics_json_data["patient"]["config"][
"basal_schedule"
]["schedule"][0]["setting"].replace(" U", "")
# Add in the data
# if vp_id not in vp_outside_clinical_bounds:
if settings_outside_clinical_bounds(cir, isf, sbr):
print(filename + " has settings outside clinical bounds.")
removed_scenarios.append([filename, cir, isf, sbr])
else:
data.append(
get_data(
filename,
simulation_df,
simulation_characteristics_json_data,
baseline=is_baseline,
)
)
removed_scenarios_df = pd.DataFrame(
removed_scenarios, columns=["filename", "cir", "isf", "sbr"]
)
removed_scenarios_df.to_csv(
path_or_buf=os.path.join(
save_path, results_folder_name + "_removed_scenarios_df.csv"
),
index=False,
)
columns = [
"filename",
"sim_id",
"virtual_patient_num",
"sensor_num",
"patient_scenario_filename",
"age",
"ylw",
"CIR",
"ISF",
"SBR",
"starting_bg",
"starting_bg_sensor",
"true_bolus",
"initial_bias",
"bias_norm_factor",
"bias_drift_oscillations",
"bias_drift_range_start",
"bias_drift_range_end",
"noise_coefficient",
"delay",
"bias_drift_type",
"bias_type",
"noise_per_sensor",
"noise",
"bias_factor",
"phi_drift",
"drift_multiplier",
"drift_multiplier_start",
"drift_multiplier_end",
"noise_max",
"mard",
"mbe",
"bg_test_condition",
"analysis_type",
"LBGI",
"LBGI Risk Score",
"DKAI",
"DKAI Risk Score",
"HBGI",
"BGRI",
"percent_lt_54",
]
results_df = pd.DataFrame(data, columns=columns)
results_df = clean_up_results_df(results_df)
results_df.to_csv(
path_or_buf=os.path.join(save_path, results_folder_name + "_results_df.csv"),
index=False,
)
return results_df
def clean_up_results_df(results_df):
"""
Parameters
----------
results_df: dataframe
dataframe to clean up
Returns
-------
"""
results_df[["age", "ylw"]] = results_df[["age", "ylw"]].apply(pd.to_numeric)
# rename the analysis types
results_df.replace({"tempBasal": "Temp Basal Analysis"}, inplace=True)
results_df.replace({"correctionBolus": "Correction Bolus Analysis"}, inplace=True)
results_df["analysis_type_label"] = results_df["analysis_type"].replace(
analysis_type_labels
)
results_df["bg_test_condition_label"] = results_df["bg_test_condition"].replace(
analysis_type_labels
)
results_df["DKAI Risk Score String"] = results_df["DKAI Risk Score"].replace(
score_dict
)
results_df["LBGI Risk Score String"] = results_df["LBGI Risk Score"].replace(
score_dict
)
return results_df
########## DICTIONARIES ###################
score_dict = {
0: "0 - None",
1: "1 - Negligible",
2: "2 - Minor",
3: "3 - Serious",
4: "4 - Critical",
}
color_dict = {
"0 - None": "#0F73C6",
"1 - Negligible": "#06B406",
"2 - Minor": "#D0C07F",
"3 - Serious": "#E18325",
"4 - Critical": "#9A3A39",
}
analysis_type_labels = {
"correction_bolus": "Correction Bolus",
"meal_bolus": "Meal Bolus",
"temp_basal_only": "Temp Basal Only",
}
level_of_analysis_dict = {
"all": "All Analyses",
"analysis_type": "Analysis Type",
"bg_test_condition": "BG Test Condition",
}
if __name__ == "__main__":
#### LOAD IN DATA #####
# Specify this parameter based on whether want to load the data and run the figures
# or just run the figures
data_already_loaded = True
# Specify the iCGM data filepath
icgm_folder_name = "icgm-sensitivity-analysis-results-2020-11-02-nogit"
results_files_path = os.path.join("..", "..", "data", "raw", icgm_folder_name)
# Specify the Baseline data fildepath
ideal_sensor_folder_name = "icgm-sensitivity-analysis-results-2020-11-05-nogit"
baseline_files_path = os.path.join(
"..", "..", "data", "raw", ideal_sensor_folder_name
)
# Set where to save figures
save_fig_folder_name = icgm_folder_name
results_save_fig_path = os.path.join(
"..",
"..",
"reports",
"figures",
"icgm-sensitivity-paired-comparison-figures",
save_fig_folder_name,
)
if not os.path.exists(results_save_fig_path):
print("making directory " + results_save_fig_path + "...")
os.makedirs(results_save_fig_path)
# Load in the data (uncomment this section if data not previously loaded for desired files)
if not data_already_loaded:
icgm_results_df = create_data_frame_for_figures(
results_path=results_files_path,
save_path=results_save_fig_path,
results_folder_name=icgm_folder_name,
old_format=False,
)
baseline_sensor_df = create_data_frame_for_figures(
is_baseline=True,
results_path=baseline_files_path,
save_path=results_save_fig_path,
results_folder_name=ideal_sensor_folder_name,
old_format=False,
)
run_pairwise_comparison(
results_df=icgm_results_df,
baseline_df=baseline_sensor_df,
save_fig_folder_name=save_fig_folder_name,
)
else:
# Just create the figures (loads in the already existing combined_df)
run_pairwise_comparison_figures(save_fig_folder_name=save_fig_folder_name)
| [
"numpy.abs",
"datetime.datetime.utcnow",
"numpy.mean",
"numpy.arange",
"tidepool_data_science_metrics.glucose.blood_glucose_risk_index",
"os.path.join",
"plotly.express.scatter",
"numpy.unique",
"pandas.DataFrame",
"plotly.express.box",
"os.path.exists",
"tidepool_data_science_metrics.insulin.... | [((2034, 2052), 'numpy.abs', 'np.abs', (['icgm_error'], {}), '(icgm_error)\n', (2040, 2052), True, 'import numpy as np\n'), ((2609, 2660), 'numpy.mean', 'np.mean', (["df.loc[df['withinMeasRange'], 'icgmError']"], {}), "(df.loc[df['withinMeasRange'], 'icgmError'])\n", (2616, 2660), True, 'import numpy as np\n'), ((3134, 3187), 'numpy.mean', 'np.mean', (['abs_relative_difference_in_measurement_range'], {}), '(abs_relative_difference_in_measurement_range)\n', (3141, 3187), True, 'import numpy as np\n'), ((7242, 7279), 'tidepool_data_science_metrics.glucose.lbgi_risk_score', 'metrics.glucose.lbgi_risk_score', (['LBGI'], {}), '(LBGI)\n', (7273, 7279), True, 'import tidepool_data_science_metrics as metrics\n'), ((7291, 7368), 'tidepool_data_science_metrics.insulin.dka_index', 'metrics.insulin.dka_index', (["simulation_df['iob']", "simulation_df['sbr'].iloc[0]"], {}), "(simulation_df['iob'], simulation_df['sbr'].iloc[0])\n", (7316, 7368), True, 'import tidepool_data_science_metrics as metrics\n'), ((7383, 7419), 'tidepool_data_science_metrics.insulin.dka_risk_score', 'metrics.insulin.dka_risk_score', (['DKAI'], {}), '(DKAI)\n', (7413, 7419), True, 'import tidepool_data_science_metrics as metrics\n'), ((7610, 7676), 'tidepool_data_science_metrics.glucose.percent_values_lt_54', 'metrics.glucose.percent_values_lt_54', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (7646, 7676), True, 'import tidepool_data_science_metrics as metrics\n'), ((13176, 13213), 'tidepool_data_science_metrics.glucose.lbgi_risk_score', 'metrics.glucose.lbgi_risk_score', (['LBGI'], {}), '(LBGI)\n', (13207, 13213), True, 'import tidepool_data_science_metrics as metrics\n'), ((13225, 13302), 'tidepool_data_science_metrics.insulin.dka_index', 'metrics.insulin.dka_index', (["simulation_df['iob']", "simulation_df['sbr'].iloc[0]"], {}), "(simulation_df['iob'], simulation_df['sbr'].iloc[0])\n", (13250, 13302), True, 'import tidepool_data_science_metrics as metrics\n'), ((13317, 13353), 'tidepool_data_science_metrics.insulin.dka_risk_score', 'metrics.insulin.dka_risk_score', (['DKAI'], {}), '(DKAI)\n', (13347, 13353), True, 'import tidepool_data_science_metrics as metrics\n'), ((13544, 13610), 'tidepool_data_science_metrics.glucose.percent_values_lt_54', 'metrics.glucose.percent_values_lt_54', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (13580, 13610), True, 'import tidepool_data_science_metrics as metrics\n'), ((15126, 15186), 'pandas.IntervalIndex.from_breaks', 'pd.IntervalIndex.from_breaks', (['bin_breakpoints'], {'closed': '"""left"""'}), "(bin_breakpoints, closed='left')\n", (15154, 15186), True, 'import pandas as pd\n'), ((16003, 16036), 'numpy.array', 'np.array', (['[0, 7, 14, 25, 50, 100]'], {}), '([0, 7, 14, 25, 50, 100])\n', (16011, 16036), True, 'import numpy as np\n'), ((16278, 16325), 'pandas.cut', 'pd.cut', (["demographic_reduced_df['age']", 'age_bins'], {}), "(demographic_reduced_df['age'], age_bins)\n", (16284, 16325), True, 'import pandas as pd\n'), ((17312, 17336), 'numpy.array', 'np.array', (['[0, 1, 5, 100]'], {}), '([0, 1, 5, 100])\n', (17320, 17336), True, 'import numpy as np\n'), ((17583, 17630), 'pandas.cut', 'pd.cut', (["demographic_reduced_df['ylw']", 'ylw_bins'], {}), "(demographic_reduced_df['ylw'], ylw_bins)\n", (17589, 17630), True, 'import pandas as pd\n'), ((19132, 19179), 'pandas.cut', 'pd.cut', (["demographic_reduced_df['CIR']", 'cir_bins'], {}), "(demographic_reduced_df['CIR'], cir_bins)\n", (19138, 19179), True, 'import pandas as pd\n'), ((20716, 20763), 'pandas.cut', 'pd.cut', (["demographic_reduced_df['ISF']", 'isf_bins'], {}), "(demographic_reduced_df['ISF'], isf_bins)\n", (20722, 20763), True, 'import pandas as pd\n'), ((22133, 22179), 'pandas.cut', 'pd.cut', (["demographic_reduced_df['SBR']", 'br_bins'], {}), "(demographic_reduced_df['SBR'], br_bins)\n", (22139, 22179), True, 'import pandas as pd\n'), ((23367, 23413), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (23379, 23413), False, 'import os\n'), ((25299, 25342), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': '_table', 'layout': 'table_layout'}), '(data=_table, layout=table_layout)\n', (25308, 25342), True, 'import plotly.graph_objects as go\n'), ((25388, 25597), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig'], {'image_type': 'image_type', 'figure_name': 'table_name', 'analysis_name': 'analysis_name', 'view_fig': 'view_fig', 'save_fig': 'save_fig', 'save_fig_path': 'save_fig_path', 'width': 'table_width', 'height': 'table_height'}), '(fig, image_type=image_type, figure_name=table_name,\n analysis_name=analysis_name, view_fig=view_fig, save_fig=save_fig,\n save_fig_path=save_fig_path, width=table_width, height=table_height)\n', (25401, 25597), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((26181, 26227), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (26193, 26227), False, 'import os\n'), ((29035, 29140), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['summary_fig', 'image_type', 'figure_name', 'analysis_name', 'view_fig', 'save_fig', 'save_fig_path'], {}), '(summary_fig, image_type, figure_name, analysis_name, view_fig,\n save_fig, save_fig_path)\n', (29048, 29140), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((29469, 29515), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (29481, 29515), False, 'import os\n'), ((36913, 37018), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['summary_fig', 'image_type', 'figure_name', 'analysis_name', 'view_fig', 'save_fig', 'save_fig_path'], {}), '(summary_fig, image_type, figure_name, analysis_name, view_fig,\n save_fig, save_fig_path)\n', (36926, 37018), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((37345, 37391), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (37357, 37391), False, 'import os\n'), ((40321, 40426), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['summary_fig', 'image_type', 'figure_name', 'analysis_name', 'view_fig', 'save_fig', 'save_fig_path'], {}), '(summary_fig, image_type, figure_name, analysis_name, view_fig,\n save_fig, save_fig_path)\n', (40334, 40426), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((40776, 40822), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (40788, 40822), False, 'import os\n'), ((46432, 46472), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'summary_table_cols'}), '(columns=summary_table_cols)\n', (46444, 46472), True, 'import pandas as pd\n'), ((47405, 47451), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (47417, 47451), False, 'import os\n'), ((51115, 51125), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (51122, 51125), True, 'import numpy as np\n'), ((51429, 51475), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (51441, 51475), False, 'import os\n'), ((51718, 51729), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (51727, 51729), True, 'import plotly.graph_objects as go\n'), ((51840, 51937), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig', 'image_type', 'figure_name', 'analysis_name', 'view_fig', 'save_fig', 'save_fig_path'], {}), '(fig, image_type, figure_name, analysis_name, view_fig,\n save_fig, save_fig_path)\n', (51853, 51937), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((52408, 52454), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (52420, 52454), False, 'import os\n'), ((54017, 54035), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (54029, 54035), True, 'import pandas as pd\n'), ((55054, 55100), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (55066, 55100), False, 'import os\n'), ((56626, 56720), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig', 'image_type', 'fig_name', 'analysis_name', 'view_fig', 'save_fig', 'save_fig_path'], {}), '(fig, image_type, fig_name, analysis_name, view_fig, save_fig,\n save_fig_path)\n', (56639, 56720), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((56860, 56906), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (56872, 56906), False, 'import os\n'), ((57505, 57563), 'itertools.product', 'itertools.product', (['sensor_characteristics', 'outcome_metrics'], {}), '(sensor_characteristics, outcome_metrics)\n', (57522, 57563), False, 'import itertools\n'), ((58275, 58334), 'itertools.product', 'itertools.product', (['sensor_characteristics', "['HBGI', 'LBGI']"], {}), "(sensor_characteristics, ['HBGI', 'LBGI'])\n", (58292, 58334), False, 'import itertools\n'), ((59040, 59099), 'itertools.product', 'itertools.product', (['sensor_characteristics', "['HBGI', 'LBGI']"], {}), "(sensor_characteristics, ['HBGI', 'LBGI'])\n", (59057, 59099), False, 'import itertools\n'), ((59716, 59774), 'itertools.product', 'itertools.product', (['sensor_characteristics', 'outcome_metrics'], {}), '(sensor_characteristics, outcome_metrics)\n', (59733, 59774), False, 'import itertools\n'), ((60834, 60878), 'itertools.product', 'itertools.product', (['settings', 'outcome_metrics'], {}), '(settings, outcome_metrics)\n', (60851, 60878), False, 'import itertools\n'), ((61461, 61505), 'itertools.product', 'itertools.product', (['settings', 'outcome_metrics'], {}), '(settings, outcome_metrics)\n', (61478, 61505), False, 'import itertools\n'), ((61600, 61637), 'itertools.product', 'itertools.product', (['settings', 'settings'], {}), '(settings, settings)\n', (61617, 61637), False, 'import itertools\n'), ((62357, 62475), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""', '"""icgm-sensitivity-paired-comparison-figures"""', 'save_fig_folder_name'], {}), "('..', '..', 'reports', 'figures',\n 'icgm-sensitivity-paired-comparison-figures', save_fig_folder_name)\n", (62369, 62475), False, 'import os\n'), ((65601, 65719), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""', '"""icgm-sensitivity-paired-comparison-figures"""', 'save_fig_folder_name'], {}), "('..', '..', 'reports', 'figures',\n 'icgm-sensitivity-paired-comparison-figures', save_fig_folder_name)\n", (65613, 65719), False, 'import os\n'), ((69702, 69754), 'itertools.product', 'itertools.product', (['comparison_types', 'outcome_metrics'], {}), '(comparison_types, outcome_metrics)\n', (69719, 69754), False, 'import itertools\n'), ((82504, 82602), 'itertools.product', 'itertools.product', (['comparison_types', 'outcome_metrics', 'sensor_characteristics', "['initial_bias']"], {}), "(comparison_types, outcome_metrics, sensor_characteristics,\n ['initial_bias'])\n", (82521, 82602), False, 'import itertools\n'), ((88612, 88688), 'itertools.product', 'itertools.product', (['comparison_types', 'outcome_metrics', 'sensor_characteristics'], {}), '(comparison_types, outcome_metrics, sensor_characteristics)\n', (88629, 88688), False, 'import itertools\n'), ((104168, 104214), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""'], {}), "('..', '..', 'reports', 'figures')\n", (104180, 104214), False, 'import os\n'), ((104888, 104946), 'itertools.product', 'itertools.product', (['analysis_levels', 'metrics', 'y_axis_scales'], {}), '(analysis_levels, metrics, y_axis_scales)\n', (104905, 104946), False, 'import itertools\n'), ((106336, 106379), 'itertools.product', 'itertools.product', (['analysis_levels', 'metrics'], {}), '(analysis_levels, metrics)\n', (106353, 106379), False, 'import itertools\n'), ((108033, 108076), 'itertools.product', 'itertools.product', (['analysis_levels', 'metrics'], {}), '(analysis_levels, metrics)\n', (108050, 108076), False, 'import itertools\n'), ((109529, 109626), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""data"""', '"""raw"""', '"""icgm-sensitivity-analysis-results-2020-09-19-nogit"""'], {}), "('..', '..', 'data', 'raw',\n 'icgm-sensitivity-analysis-results-2020-09-19-nogit')\n", (109541, 109626), False, 'import os\n'), ((116925, 116960), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (116937, 116960), True, 'import pandas as pd\n'), ((119057, 119114), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""data"""', '"""raw"""', 'icgm_folder_name'], {}), "('..', '..', 'data', 'raw', icgm_folder_name)\n", (119069, 119114), False, 'import os\n'), ((119268, 119333), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""data"""', '"""raw"""', 'ideal_sensor_folder_name'], {}), "('..', '..', 'data', 'raw', ideal_sensor_folder_name)\n", (119280, 119333), False, 'import os\n'), ((119454, 119572), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""', '"""icgm-sensitivity-paired-comparison-figures"""', 'save_fig_folder_name'], {}), "('..', '..', 'reports', 'figures',\n 'icgm-sensitivity-paired-comparison-figures', save_fig_folder_name)\n", (119466, 119572), False, 'import os\n'), ((1142, 1162), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (1160, 1162), True, 'import datetime as dt\n'), ((7154, 7224), 'tidepool_data_science_metrics.glucose.blood_glucose_risk_index', 'metrics.glucose.blood_glucose_risk_index', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (7194, 7224), True, 'import tidepool_data_science_metrics as metrics\n'), ((7431, 7501), 'tidepool_data_science_metrics.glucose.blood_glucose_risk_index', 'metrics.glucose.blood_glucose_risk_index', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (7471, 7501), True, 'import tidepool_data_science_metrics as metrics\n'), ((7516, 7586), 'tidepool_data_science_metrics.glucose.blood_glucose_risk_index', 'metrics.glucose.blood_glucose_risk_index', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (7556, 7586), True, 'import tidepool_data_science_metrics as metrics\n'), ((13088, 13158), 'tidepool_data_science_metrics.glucose.blood_glucose_risk_index', 'metrics.glucose.blood_glucose_risk_index', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (13128, 13158), True, 'import tidepool_data_science_metrics as metrics\n'), ((13365, 13435), 'tidepool_data_science_metrics.glucose.blood_glucose_risk_index', 'metrics.glucose.blood_glucose_risk_index', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (13405, 13435), True, 'import tidepool_data_science_metrics as metrics\n'), ((13450, 13520), 'tidepool_data_science_metrics.glucose.blood_glucose_risk_index', 'metrics.glucose.blood_glucose_risk_index', ([], {'bg_array': "simulation_df['bg']"}), "(bg_array=simulation_df['bg'])\n", (13490, 13520), True, 'import tidepool_data_science_metrics as metrics\n'), ((14588, 14608), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (14606, 14608), True, 'import datetime as dt\n'), ((21755, 21778), 'numpy.arange', 'np.arange', (['(0)', '(1.5)', '(0.25)'], {}), '(0, 1.5, 0.25)\n', (21764, 21778), True, 'import numpy as np\n'), ((34047, 34096), 'itertools.product', 'itertools.product', (['metric_values', 'analysis_levels'], {}), '(metric_values, analysis_levels)\n', (34064, 34096), False, 'import itertools\n'), ((35055, 35247), 'plotly.express.scatter', 'px.scatter', ([], {'x': "grouped_df[level_of_analysis + '_label']", 'y': 'grouped_df[metric]', 'size': "grouped_df['count']", 'color': "grouped_df[metric + ' String']", 'color_discrete_map': 'color_dict', 'size_max': '(25)'}), "(x=grouped_df[level_of_analysis + '_label'], y=grouped_df[metric],\n size=grouped_df['count'], color=grouped_df[metric + ' String'],\n color_discrete_map=color_dict, size_max=25)\n", (35065, 35247), True, 'import plotly.express as px\n'), ((38355, 38456), 'plotly.express.histogram', 'px.histogram', ([], {'x': 'grouped_df[metric]', 'nbins': '(500)', 'color_discrete_sequence': 'px.colors.qualitative.T10'}), '(x=grouped_df[metric], nbins=500, color_discrete_sequence=px.\n colors.qualitative.T10)\n', (38367, 38456), True, 'import plotly.express as px\n'), ((39820, 40026), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'showlegend': '(True)', 'title': "('Distribution of ' + metric + ' Across ' + level_of_analysis_dict[\n level_of_analysis])", 'plot_bgcolor': '"""#D3D3D3"""', 'legend_title': 'level_of_analysis_dict[level_of_analysis]'}), "(showlegend=True, title='Distribution of ' + metric + ' Across ' +\n level_of_analysis_dict[level_of_analysis], plot_bgcolor='#D3D3D3',\n legend_title=level_of_analysis_dict[level_of_analysis])\n", (39829, 40026), True, 'import plotly.graph_objects as go\n'), ((44627, 44677), 'pandas.concat', 'pd.concat', (['[all_analyses_summary_df, temp_summary]'], {}), '([all_analyses_summary_df, temp_summary])\n', (44636, 44677), True, 'import pandas as pd\n'), ((45154, 45204), 'pandas.concat', 'pd.concat', (['[all_analyses_summary_df, temp_summary]'], {}), '([all_analyses_summary_df, temp_summary])\n', (45163, 45204), True, 'import pandas as pd\n'), ((56131, 56286), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'df', 'x': 'x_value', 'y': 'y_value', 'opacity': '(0.3)', 'color': 'color_value', 'title': 'title', 'color_continuous_scale': 'px.colors.sequential.Viridis'}), '(data_frame=df, x=x_value, y=y_value, opacity=0.3, color=\n color_value, title=title, color_continuous_scale=px.colors.sequential.\n Viridis)\n', (56141, 56286), True, 'import plotly.express as px\n'), ((56499, 56572), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'df', 'x': 'x_value', 'y': 'y_value', 'opacity': '(0.3)', 'title': 'title'}), '(data_frame=df, x=x_value, y=y_value, opacity=0.3, title=title)\n', (56509, 56572), True, 'import plotly.express as px\n'), ((62539, 62563), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (62553, 62563), False, 'import os\n'), ((62627, 62648), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (62638, 62648), False, 'import os\n'), ((65810, 66001), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""reports"""', '"""figures"""', '"""icgm-sensitivity-paired-comparison-figures"""', 'save_fig_folder_name', "('pairwise_comparison_combined_df_' + save_fig_folder_name + '.csv')"], {}), "('..', '..', 'reports', 'figures',\n 'icgm-sensitivity-paired-comparison-figures', save_fig_folder_name, \n 'pairwise_comparison_combined_df_' + save_fig_folder_name + '.csv')\n", (65822, 66001), False, 'import os\n'), ((69308, 69332), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (69322, 69332), False, 'import os\n'), ((69396, 69417), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (69407, 69417), False, 'import os\n'), ((74035, 74059), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (74049, 74059), False, 'import os\n'), ((74123, 74144), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (74134, 74144), False, 'import os\n'), ((74205, 74304), 'pandas.crosstab', 'pd.crosstab', (["df[metric + ' Risk Score String_baseline']", "df[metric + ' Risk Score String_icgm']"], {}), "(df[metric + ' Risk Score String_baseline'], df[metric +\n ' Risk Score String_icgm'])\n", (74216, 74304), True, 'import pandas as pd\n'), ((79967, 79991), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (79981, 79991), False, 'import os\n'), ((80055, 80076), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (80066, 80076), False, 'import os\n'), ((81700, 81724), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (81714, 81724), False, 'import os\n'), ((81788, 81809), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (81799, 81809), False, 'import os\n'), ((87685, 87709), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (87699, 87709), False, 'import os\n'), ((87773, 87794), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (87784, 87794), False, 'import os\n'), ((89619, 89643), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (89633, 89643), False, 'import os\n'), ((89707, 89728), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (89718, 89728), False, 'import os\n'), ((90230, 90457), 'plotly.express.box', 'px.box', (['combined_df'], {'x': '"""sensor_num_icgm_string"""', 'y': 'metric', 'color': '"""sensor_num_icgm_string"""', 'labels': "{'sensor_num_icgm_string': 'Sensor Number'}", 'title': "(metric + ' (Between Baseline and iCGM Simulations)')", 'points': '(False)'}), "(combined_df, x='sensor_num_icgm_string', y=metric, color=\n 'sensor_num_icgm_string', labels={'sensor_num_icgm_string':\n 'Sensor Number'}, title=metric +\n ' (Between Baseline and iCGM Simulations)', points=False)\n", (90236, 90457), True, 'import plotly.express as px\n'), ((90637, 90836), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig'], {'image_type': '"""png"""', 'figure_name': "(metric + '_pairwise_comparison_boxplot')", 'analysis_name': '"""icgm-sensitivity-analysis"""', 'view_fig': '(True)', 'save_fig': '(True)', 'save_fig_path': 'fig_path'}), "(fig, image_type='png', figure_name=metric +\n '_pairwise_comparison_boxplot', analysis_name=\n 'icgm-sensitivity-analysis', view_fig=True, save_fig=True,\n save_fig_path=fig_path)\n", (90650, 90836), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((91572, 91596), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (91586, 91596), False, 'import os\n'), ((91660, 91681), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (91671, 91681), False, 'import os\n'), ((96830, 96854), 'os.path.exists', 'os.path.exists', (['fig_path'], {}), '(fig_path)\n', (96844, 96854), False, 'import os\n'), ((96918, 96939), 'os.makedirs', 'os.makedirs', (['fig_path'], {}), '(fig_path)\n', (96929, 96939), False, 'import os\n'), ((97260, 97359), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': 'n_rows', 'cols': 'n_cols', 'subplot_titles': 'subplot_titles', 'horizontal_spacing': '(0.05)'}), '(rows=n_rows, cols=n_cols, subplot_titles=subplot_titles,\n horizontal_spacing=0.05)\n', (97273, 97359), False, 'from plotly.subplots import make_subplots\n'), ((102485, 102726), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig'], {'image_type': '"""png"""', 'figure_name': "(metric + '_pairwise_comparison_scatter')", 'analysis_name': '"""icgm-sensitivity-analysis"""', 'view_fig': '(False)', 'save_fig': '(True)', 'save_fig_path': 'fig_path', 'width': '(200 * n_cols)', 'height': '(200 * n_rows)'}), "(fig, image_type='png', figure_name=metric +\n '_pairwise_comparison_scatter', analysis_name=\n 'icgm-sensitivity-analysis', view_fig=False, save_fig=True,\n save_fig_path=fig_path, width=200 * n_cols, height=200 * n_rows)\n", (102498, 102726), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((115628, 115702), 'pandas.DataFrame', 'pd.DataFrame', (['removed_scenarios'], {'columns': "['filename', 'cir', 'isf', 'sbr']"}), "(removed_scenarios, columns=['filename', 'cir', 'isf', 'sbr'])\n", (115640, 115702), True, 'import pandas as pd\n'), ((119636, 119673), 'os.path.exists', 'os.path.exists', (['results_save_fig_path'], {}), '(results_save_fig_path)\n', (119650, 119673), False, 'import os\n'), ((119750, 119784), 'os.makedirs', 'os.makedirs', (['results_save_fig_path'], {}), '(results_save_fig_path)\n', (119761, 119784), False, 'import os\n'), ((25016, 25038), 'numpy.asarray', 'np.asarray', (['cell_width'], {}), '(cell_width)\n', (25026, 25038), True, 'import numpy as np\n'), ((25816, 25863), 'os.path.join', 'os.path.join', (['save_fig_path', "(file_name + '.csv')"], {}), "(save_fig_path, file_name + '.csv')\n", (25828, 25863), False, 'import os\n'), ((39128, 39311), 'plotly.express.histogram', 'px.histogram', ([], {'x': 'grouped_df[metric]', 'facet_row': 'grouped_df[level_of_analysis]', 'nbins': '(500)', 'color_discrete_sequence': 'px.colors.qualitative.T10', 'color': 'grouped_df[level_of_analysis]'}), '(x=grouped_df[metric], facet_row=grouped_df[level_of_analysis],\n nbins=500, color_discrete_sequence=px.colors.qualitative.T10, color=\n grouped_df[level_of_analysis])\n', (39140, 39311), True, 'import plotly.express as px\n'), ((39468, 39669), 'plotly.express.histogram', 'px.histogram', ([], {'x': 'grouped_df[metric]', 'facet_col': 'grouped_df[level_of_analysis]', 'facet_col_wrap': '(3)', 'nbins': '(500)', 'color_discrete_sequence': 'px.colors.qualitative.T10', 'color': 'grouped_df[level_of_analysis]'}), '(x=grouped_df[metric], facet_col=grouped_df[level_of_analysis],\n facet_col_wrap=3, nbins=500, color_discrete_sequence=px.colors.\n qualitative.T10, color=grouped_df[level_of_analysis])\n', (39480, 39669), True, 'import plotly.express as px\n'), ((51161, 51196), 'numpy.searchsorted', 'np.searchsorted', (['x', 'v'], {'side': '"""right"""'}), "(x, v, side='right')\n", (51176, 51196), True, 'import numpy as np\n'), ((51752, 51767), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (51761, 51767), True, 'import numpy as np\n'), ((53818, 53867), 'scipy.stats.spearmanr', 'stats.spearmanr', (['results_df[row]', 'results_df[col]'], {}), '(results_df[row], results_df[col])\n', (53833, 53867), False, 'from scipy import stats\n'), ((64834, 64928), 'os.path.join', 'os.path.join', (['fig_path', "('pairwise_comparison_combined_df_' + save_fig_folder_name + '.csv')"], {}), "(fig_path, 'pairwise_comparison_combined_df_' +\n save_fig_folder_name + '.csv')\n", (64846, 64928), False, 'import os\n'), ((66606, 66675), 'os.path.join', 'os.path.join', (['fig_path', '"""distributions-sensor-characteristic-outcome"""'], {}), "(fig_path, 'distributions-sensor-characteristic-outcome')\n", (66618, 66675), False, 'import os\n'), ((66794, 66840), 'os.path.join', 'os.path.join', (['fig_path', '"""risk-score-crosstabs"""'], {}), "(fig_path, 'risk-score-crosstabs')\n", (66806, 66840), False, 'import os\n'), ((66929, 66990), 'os.path.join', 'os.path.join', (['fig_path', '"""sensor_characteristic_distributions"""'], {}), "(fig_path, 'sensor_characteristic_distributions')\n", (66941, 66990), False, 'import os\n'), ((70012, 70070), 'numpy.where', 'np.where', (['(df[distribution_metric] > threshold)', '(True)', '(False)'], {}), '(df[distribution_metric] > threshold, True, False)\n', (70020, 70070), True, 'import numpy as np\n'), ((70676, 70763), 'plotly.figure_factory.create_distplot', 'ff.create_distplot', (['hist_data', 'group_labels'], {'bin_size': '(0.05)', 'histnorm': '"""probability"""'}), "(hist_data, group_labels, bin_size=0.05, histnorm=\n 'probability')\n", (70694, 70763), True, 'import plotly.figure_factory as ff\n'), ((71137, 71224), 'plotly.figure_factory.create_distplot', 'ff.create_distplot', (['hist_data', 'group_labels'], {'bin_size': '(0.05)', 'histnorm': '"""probability"""'}), "(hist_data, group_labels, bin_size=0.05, histnorm=\n 'probability')\n", (71155, 71224), True, 'import plotly.figure_factory as ff\n'), ((71508, 71595), 'plotly.figure_factory.create_distplot', 'ff.create_distplot', (['hist_data', 'group_labels'], {'bin_size': '(0.05)', 'histnorm': '"""probability"""'}), "(hist_data, group_labels, bin_size=0.05, histnorm=\n 'probability')\n", (71526, 71595), True, 'import plotly.figure_factory as ff\n'), ((71828, 71915), 'plotly.figure_factory.create_distplot', 'ff.create_distplot', (['hist_data', 'group_labels'], {'bin_size': '(0.05)', 'histnorm': '"""probability"""'}), "(hist_data, group_labels, bin_size=0.05, histnorm=\n 'probability')\n", (71846, 71915), True, 'import plotly.figure_factory as ff\n'), ((75815, 75931), 'pandas.crosstab', 'pd.crosstab', (["reduced_df[metric + ' Risk Score String_baseline']", "reduced_df[metric + ' Risk Score String_icgm']"], {}), "(reduced_df[metric + ' Risk Score String_baseline'], reduced_df[\n metric + ' Risk Score String_icgm'])\n", (75826, 75931), True, 'import pandas as pd\n'), ((77752, 77868), 'pandas.crosstab', 'pd.crosstab', (["reduced_df[metric + ' Risk Score String_baseline']", "reduced_df[metric + ' Risk Score String_icgm']"], {}), "(reduced_df[metric + ' Risk Score String_baseline'], reduced_df[\n metric + ' Risk Score String_icgm'])\n", (77763, 77868), True, 'import pandas as pd\n'), ((80890, 80983), 'plotly.express.scatter', 'px.scatter', (['df'], {'x': "(sensor_characteristic_x + '_icgm')", 'y': "(sensor_characteristic_y + '_icgm')"}), "(df, x=sensor_characteristic_x + '_icgm', y=\n sensor_characteristic_y + '_icgm')\n", (80900, 80983), True, 'import plotly.express as px\n'), ((81078, 81335), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig'], {'image_type': '"""png"""', 'figure_name': "(sensor_characteristic_x + '_' + sensor_characteristic_y +\n '_sensor_characteristic_distributions')", 'analysis_name': '"""icgm-sensitivity-analysis"""', 'view_fig': '(False)', 'save_fig': '(True)', 'save_fig_path': 'fig_path'}), "(fig, image_type='png', figure_name=sensor_characteristic_x +\n '_' + sensor_characteristic_y + '_sensor_characteristic_distributions',\n analysis_name='icgm-sensitivity-analysis', view_fig=False, save_fig=\n True, save_fig_path=fig_path)\n", (81091, 81335), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((84857, 84955), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': 'n_rows', 'cols': 'n_cols', 'subplot_titles': 'subplot_titles', 'horizontal_spacing': '(0.1)'}), '(rows=n_rows, cols=n_cols, subplot_titles=subplot_titles,\n horizontal_spacing=0.1)\n', (84870, 84955), False, 'from plotly.subplots import make_subplots\n'), ((86796, 87091), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig'], {'image_type': '"""png"""', 'figure_name': "('distribution_' + outcome_metric + comparison_type +\n '_across_sensor_characteristic_space')", 'analysis_name': '"""icgm-sensitivity-analysis"""', 'view_fig': '(True)', 'save_fig': '(True)', 'save_fig_path': 'fig_path', 'width': '(200 * n_cols)', 'height': '(200 * n_rows)'}), "(fig, image_type='png', figure_name='distribution_' +\n outcome_metric + comparison_type +\n '_across_sensor_characteristic_space', analysis_name=\n 'icgm-sensitivity-analysis', view_fig=True, save_fig=True,\n save_fig_path=fig_path, width=200 * n_cols, height=200 * n_rows)\n", (86809, 87091), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((93110, 93208), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': 'n_rows', 'cols': 'n_cols', 'subplot_titles': 'subplot_titles', 'horizontal_spacing': '(0.1)'}), '(rows=n_rows, cols=n_cols, subplot_titles=subplot_titles,\n horizontal_spacing=0.1)\n', (93123, 93208), False, 'from plotly.subplots import make_subplots\n'), ((95826, 96148), 'src.visualization.save_view_fig.save_view_fig', 'save_view_fig', (['fig'], {'image_type': '"""png"""', 'figure_name': "('distribution_' + comparison_type + '_' + analysis_level +\n '_sensor_characteristic' + '_pairwise_comparison_scatter')", 'analysis_name': '"""icgm-sensitivity-analysis"""', 'view_fig': '(False)', 'save_fig': '(True)', 'save_fig_path': 'fig_path', 'width': '(200 * n_cols)', 'height': '(200 * n_rows)'}), "(fig, image_type='png', figure_name='distribution_' +\n comparison_type + '_' + analysis_level + '_sensor_characteristic' +\n '_pairwise_comparison_scatter', analysis_name=\n 'icgm-sensitivity-analysis', view_fig=False, save_fig=True,\n save_fig_path=fig_path, width=200 * n_cols, height=200 * n_rows)\n", (95839, 96148), False, 'from src.visualization.save_view_fig import save_view_fig\n'), ((117055, 117119), 'os.path.join', 'os.path.join', (['save_path', "(results_folder_name + '_results_df.csv')"], {}), "(save_path, results_folder_name + '_results_df.csv')\n", (117067, 117119), False, 'import os\n'), ((31117, 31196), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "[metric, metric + ' String', 'count', 'percentage']"}), "(data, columns=[metric, metric + ' String', 'count', 'percentage'])\n", (31129, 31196), True, 'import pandas as pd\n'), ((31264, 31319), 'pandas.concat', 'pd.concat', (['[grouped_df, df2]'], {'axis': '(0)', 'ignore_index': '(True)'}), '([grouped_df, df2], axis=0, ignore_index=True)\n', (31273, 31319), True, 'import pandas as pd\n'), ((34358, 34460), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "[level_of_analysis, metric, metric + ' String', 'count', 'percentage']"}), "(data, columns=[level_of_analysis, metric, metric + ' String',\n 'count', 'percentage'])\n", (34370, 34460), True, 'import pandas as pd\n'), ((34831, 34886), 'pandas.concat', 'pd.concat', (['[grouped_df, df2]'], {'axis': '(0)', 'ignore_index': '(True)'}), '([grouped_df, df2], axis=0, ignore_index=True)\n', (34840, 34886), True, 'import pandas as pd\n'), ((48757, 48831), 'pandas.crosstab', 'pd.crosstab', (['results_df[level_of_analysis]', "results_df[metric + ' String']"], {}), "(results_df[level_of_analysis], results_df[metric + ' String'])\n", (48768, 48831), True, 'import pandas as pd\n'), ((51782, 51797), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (51791, 51797), True, 'import numpy as np\n'), ((72389, 72419), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[2, 2]', 'y': '[0, 1]'}), '(x=[2, 2], y=[0, 1])\n', (72399, 72419), True, 'import plotly.graph_objects as go\n'), ((110708, 110732), 'os.listdir', 'os.listdir', (['results_path'], {}), '(results_path)\n', (110718, 110732), False, 'import os\n'), ((111277, 111313), 'pandas.DataFrame', 'pd.DataFrame', (['json_data'], {'index': "['i']"}), "(json_data, index=['i'])\n", (111289, 111313), True, 'import pandas as pd\n'), ((112629, 112665), 'pandas.DataFrame', 'pd.DataFrame', (['json_data'], {'index': "['i']"}), "(json_data, index=['i'])\n", (112641, 112665), True, 'import pandas as pd\n'), ((113626, 113650), 'os.listdir', 'os.listdir', (['results_path'], {}), '(results_path)\n', (113636, 113650), False, 'import os\n'), ((115786, 115860), 'os.path.join', 'os.path.join', (['save_path', "(results_folder_name + '_removed_scenarios_df.csv')"], {}), "(save_path, results_folder_name + '_removed_scenarios_df.csv')\n", (115798, 115860), False, 'import os\n'), ((110871, 110907), 'os.path.join', 'os.path.join', (['results_path', 'filename'], {}), '(results_path, filename)\n', (110883, 110907), False, 'import os\n'), ((111014, 111090), 'os.path.join', 'os.path.join', (['patient_characteristics_path', "(filename_components[0] + '.json')"], {}), "(patient_characteristics_path, filename_components[0] + '.json')\n", (111026, 111090), False, 'import os\n'), ((113811, 113847), 'os.path.join', 'os.path.join', (['results_path', 'filename'], {}), '(results_path, filename)\n', (113823, 113847), False, 'import os\n'), ((72146, 72239), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'df[distribution_metric]', 'cumulative_enabled': '(True)', 'histnorm': '"""probability"""'}), "(x=df[distribution_metric], cumulative_enabled=True, histnorm=\n 'probability')\n", (72158, 72239), True, 'import plotly.graph_objects as go\n'), ((111800, 111847), 'os.path.join', 'os.path.join', (['results_path', '"""sIdealSensor.json"""'], {}), "(results_path, 'sIdealSensor.json')\n", (111812, 111847), False, 'import os\n'), ((112049, 112175), 'os.path.join', 'os.path.join', (['results_path', "(filename_components[0] + '.' + filename_components[1] + '.' +\n filename_components[2] + '.json')"], {}), "(results_path, filename_components[0] + '.' +\n filename_components[1] + '.' + filename_components[2] + '.json')\n", (112061, 112175), False, 'import os\n')] |
seed=123
from keras import backend as K
import numpy as np
np.random.seed(seed)
import tensorflow as tf
tf.set_random_seed(seed)
import random
random.seed(seed)
import skimage.io
from skimage import img_as_ubyte
import model as modellib
import pandas as pd
import os
import my_functions as f
import imgaug.augmenters as iaa
from mrcnn.model import log
from visualize import display_instances
import matplotlib.pyplot as plt
import cv2 as cv
import math
import argparse
#######################################################################################
## SET UP CONFIGURATION
parser = argparse.ArgumentParser("my_inference.py")
parser.add_argument("--src", help="path to the src, Exp: dataset/Normalized_Images/", type=str, required=True)
parser.add_argument("--dest", help="path to the dest, Exp: dataset/Normalized_Images/", type=str, required=True)
parser.add_argument("--model", help="name of the model, Exp:final.h5", type=str, required=True)
args = parser.parse_args()
#######################################################################################
## SET UP CONFIGURATION
from config import Config
class BowlConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "Inference"
BACKBONE = "resnet50"
IMAGE_RESIZE_MODE = "square" ## tried to modfied but I am using other git clone
## No augmentati
ZOOM = False
ASPECT_RATIO = 1
MIN_ENLARGE = 1
IMAGE_MIN_SCALE = False ## Not using this
# IMAGE_MIN_DIM = 512 # We scale small images up so that smallest side is 512
# IMAGE_MAX_DIM = False
IMAGE_MIN_DIM = 768
IMAGE_MAX_DIM = 1024
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MAX_INSTANCES = 512
DETECTION_NMS_THRESHOLD = 0.4
DETECTION_MIN_CONFIDENCE = 0.7
LEARNING_RATE = 0.0001
# Number of classes (including background)
NUM_CLASSES = 1 + 1 + 1 # background + nuclei
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) # anchor side in pixels
# RPN_ANCHOR_SCALES = (128, 256, 512)
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 200
USE_MINI_MASK = True
inference_config = BowlConfig()
inference_config.display()
#######################################################################################
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
## Change this with the path to the last epoch of train
model_path = args.model
## change this with the correct paths for images and sample submission
src_path = args.src
dest_path = args.dest
test_path = os.path.join(ROOT_DIR, src_path)
# sample_submission = pd.read_csv('dataset/Normalized_Images/test.txt', delimiter="\t")
# sample_submission = pd.DataFrame({"ImageId": ["50HD0147.png"]})
image_ids = [image_id for image_id in os.listdir(src_path) if image_id.endswith(".png")]
sample_submission = pd.DataFrame({"ImageId": image_ids})
print("Loading weights from ", model_path)
try:
os.mkdir(dest_path)
except:
pass
try:
os.mkdir(os.path.join(dest_path, "predicted_mask/"))
os.mkdir(os.path.join(dest_path, "predicted_images"))
except:
pass
import time
start_time = time.time()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
model.load_weights(model_path, by_name=True)
ImageId_d = []
EncodedPixels_d = []
n_images= len(sample_submission.ImageId)
def fit_hough_rotate(image_path):
img = cv.imread(image_path, cv.IMREAD_UNCHANGED)
# img = original
mask = img > 0
mask = mask.all(2)
coords = np.argwhere(mask)
x0, y0 = coords.min(axis=0)
x1, y1 = coords.max(axis=0) + 1
img = img[x0:x1, y0:y1]
imgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
imgray = cv.bilateralFilter(imgray, 10, 50, 50)
v = np.median(imgray)
sigma = 0.33
lower_thresh = int(max(0, (1.0 - sigma) * v))
upper_thresh = int(min(255, (1.0 + sigma) * v))
edges = cv.Canny(imgray, lower_thresh, upper_thresh)
lines = cv.HoughLinesP(edges, 1, math.pi / 128, 40, None, 60, 10)
if lines is None or len(lines) < 4:
lines = cv.HoughLinesP(edges, 1, math.pi / 128, 40, None, 80, 20)
if lines is None or len(lines) < 4:
lines = cv.HoughLinesP(edges, 1, math.pi / 128, 20, None, 80, 20)
# lines = cv.HoughLines(edges, 1, math.pi/64 , 100, None, 80, 10)
# lines_theta = [line[0][1] for line in lines[:100]]
lines_theta = np.array([np.arctan((line[0][3] - line[0][1]) / (line[0][2] - line[0][0])) for line in lines[:100]])
def reject_outliers(data, m=2):
return data[abs(data - np.mean(data)) < m * np.std(data)]
lines_theta = reject_outliers(lines_theta, 1)
# for l in lines[:100]:
# for rho, theta in l:
# a = np.cos(theta)
# b = np.sin(theta)
# x0 = a * rho
# y0 = b * rho
# x1 = int(x0 + 1000 * (-b))
# y1 = int(y0 + 1000 * (a))
# x2 = int(x0 - 1000 * (-b))
# y2 = int(y0 - 1000 * (a))
# cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)
# for line in l:
# pt1 = (line[0], line[1])
# pt2 = (line[2], line[3])
# cv.line(img, pt1, pt2, (0, 0, 255), 3)
augmentation = iaa.Sequential([
iaa.Rotate((-np.mean(lines_theta) * 180 / math.pi), fit_output=True)
# iaa.KeepSizeByResize(iaa.Rotate((-np.mean(lines_theta) * 180 / math.pi), fit_output=True))
], random_order=False)
img = augmentation.augment_image(skimage.io.imread(image_path))
plt.imshow(img)
plt.show()
return img
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
for i in np.arange(n_images):
image_id = sample_submission.ImageId[i]
print('Start detect',i, ' ' ,image_id)
##Set seeds for each image, just in case..
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
## Load the image
image_path = os.path.join(test_path, image_id)
original_image = skimage.io.imread(image_path)
# original_image = cv.imread(image_path, cv.IMREAD_UNCHANGED)
# original_image = fit_hough_rotate(image_path)
####################################################################
## This is needed for tki8,ki8h,kii8,ki8i8,ki8,ki88,ki8,ki8e,ki8,ki,ki8888,ki8ki8ki8,ki8 ,ki8,ki8stage 2 image that has only one channel
if len(original_image.shape)<3:
original_image = img_as_ubyte(original_image)
original_image = np.expand_dims(original_image,2)
original_image = original_image[:,:,[0,0,0]] # flip r and b
####################################################################
original_image = original_image[:,:,:3]
augmentation = iaa.Sequential([
iaa.PadToAspectRatio(1.5, position="center"),
iaa.Resize({"height": 768, "width": 1024}),
# iaa.CropAndPad(percent=(-0.3))
# iaa.Rotate((-30))
# iaa.Fliplr(1),
# iaa.Flipud(1),
], random_order=False)
det = augmentation.to_deterministic()
original_image = augmentation.augment_image(original_image)
## Make prediction for that image
results = model.detect([original_image], verbose=1)
# Display results
ax = get_ax(1)
r = results[0]
display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
['bg', 'crypt', 'gland'], r['scores'],
title=image_id, dest=os.path.join(dest_path, "predicted_images/"))
## Proccess prediction into rle
pred_masks = results[0]['masks']
scores_masks = results[0]['scores']
class_ids = results[0]['class_ids']
pred = np.zeros(original_image.shape[:2])
for i in range(1, inference_config.NUM_CLASSES):
if sum(class_ids == (inference_config.NUM_CLASSES-i)): # using inference_config.NUM_CLASSES-i instead of i to fix the mismatch in semantic dataset
mask = np.any(pred_masks[:,:,class_ids==(inference_config.NUM_CLASSES-i)], 2)
pred[mask] = i
for i, class_id in enumerate(class_ids):
if class_id == 1: # using inference_config.NUM_CLASSES-i instead of i to fix the mismatch in semantic dataset
cv.imwrite(os.path.join(dest_path, "predicted_mask/", image_id[:-len(".png")]+ "_" + str(i) + ".png"), pred_masks[:, :, i]*255)
# cv2.imwrite(os.path.join("prediction", image_id), pred)
# plt.imsave(os.path.join("prediction", image_id), pred)
# np.savetxt(os.path.join("benchmark_correct_size/MaskRCNN_square_rotated/test", os.path.basename(image_id))+".txt", pred, fmt='%d')
# f.write2csv('submission.csv', ImageId_d, EncodedPixels_d)
end_time = time.time()
ellapsed_time = (end_time-start_time)/3600
print('Time required to train ', ellapsed_time, 'hours')
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"cv2.bilateralFilter",
"numpy.mean",
"numpy.arange",
"cv2.HoughLinesP",
"os.path.join",
"pandas.DataFrame",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"numpy.std",
"tensorflow.set_random_seed",
"random.seed",
"matplotlib.pyplot... | [((59, 79), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (73, 79), True, 'import numpy as np\n'), ((104, 128), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (122, 128), True, 'import tensorflow as tf\n'), ((143, 160), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (154, 160), False, 'import random\n'), ((591, 633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""my_inference.py"""'], {}), "('my_inference.py')\n", (614, 633), False, 'import argparse\n'), ((2612, 2623), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2621, 2623), False, 'import os\n'), ((2636, 2666), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (2648, 2666), False, 'import os\n'), ((2877, 2909), 'os.path.join', 'os.path.join', (['ROOT_DIR', 'src_path'], {}), '(ROOT_DIR, src_path)\n', (2889, 2909), False, 'import os\n'), ((3174, 3210), 'pandas.DataFrame', 'pd.DataFrame', (["{'ImageId': image_ids}"], {}), "({'ImageId': image_ids})\n", (3186, 3210), True, 'import pandas as pd\n'), ((3464, 3475), 'time.time', 'time.time', ([], {}), '()\n', (3473, 3475), False, 'import time\n'), ((3524, 3610), 'model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'MODEL_DIR'}), "(mode='inference', config=inference_config, model_dir=\n MODEL_DIR)\n", (3541, 3610), True, 'import model as modellib\n'), ((6283, 6302), 'numpy.arange', 'np.arange', (['n_images'], {}), '(n_images)\n', (6292, 6302), True, 'import numpy as np\n'), ((9270, 9281), 'time.time', 'time.time', ([], {}), '()\n', (9279, 9281), False, 'import time\n'), ((3265, 3284), 'os.mkdir', 'os.mkdir', (['dest_path'], {}), '(dest_path)\n', (3273, 3284), False, 'import os\n'), ((3829, 3871), 'cv2.imread', 'cv.imread', (['image_path', 'cv.IMREAD_UNCHANGED'], {}), '(image_path, cv.IMREAD_UNCHANGED)\n', (3838, 3871), True, 'import cv2 as cv\n'), ((3948, 3965), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (3959, 3965), True, 'import numpy as np\n'), ((4075, 4110), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (4086, 4110), True, 'import cv2 as cv\n'), ((4124, 4162), 'cv2.bilateralFilter', 'cv.bilateralFilter', (['imgray', '(10)', '(50)', '(50)'], {}), '(imgray, 10, 50, 50)\n', (4142, 4162), True, 'import cv2 as cv\n'), ((4171, 4188), 'numpy.median', 'np.median', (['imgray'], {}), '(imgray)\n', (4180, 4188), True, 'import numpy as np\n'), ((4321, 4365), 'cv2.Canny', 'cv.Canny', (['imgray', 'lower_thresh', 'upper_thresh'], {}), '(imgray, lower_thresh, upper_thresh)\n', (4329, 4365), True, 'import cv2 as cv\n'), ((4378, 4435), 'cv2.HoughLinesP', 'cv.HoughLinesP', (['edges', '(1)', '(math.pi / 128)', '(40)', 'None', '(60)', '(10)'], {}), '(edges, 1, math.pi / 128, 40, None, 60, 10)\n', (4392, 4435), True, 'import cv2 as cv\n'), ((5883, 5898), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (5893, 5898), True, 'import matplotlib.pyplot as plt\n'), ((5903, 5913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5911, 5913), True, 'import matplotlib.pyplot as plt\n'), ((6198, 6258), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (6210, 6258), True, 'import matplotlib.pyplot as plt\n'), ((6443, 6460), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6454, 6460), False, 'import random\n'), ((6465, 6485), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6479, 6485), True, 'import numpy as np\n'), ((6490, 6514), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (6508, 6514), True, 'import tensorflow as tf\n'), ((6555, 6588), 'os.path.join', 'os.path.join', (['test_path', 'image_id'], {}), '(test_path, image_id)\n', (6567, 6588), False, 'import os\n'), ((8271, 8305), 'numpy.zeros', 'np.zeros', (['original_image.shape[:2]'], {}), '(original_image.shape[:2])\n', (8279, 8305), True, 'import numpy as np\n'), ((3103, 3123), 'os.listdir', 'os.listdir', (['src_path'], {}), '(src_path)\n', (3113, 3123), False, 'import os\n'), ((3320, 3362), 'os.path.join', 'os.path.join', (['dest_path', '"""predicted_mask/"""'], {}), "(dest_path, 'predicted_mask/')\n", (3332, 3362), False, 'import os\n'), ((3377, 3420), 'os.path.join', 'os.path.join', (['dest_path', '"""predicted_images"""'], {}), "(dest_path, 'predicted_images')\n", (3389, 3420), False, 'import os\n'), ((4492, 4549), 'cv2.HoughLinesP', 'cv.HoughLinesP', (['edges', '(1)', '(math.pi / 128)', '(40)', 'None', '(80)', '(20)'], {}), '(edges, 1, math.pi / 128, 40, None, 80, 20)\n', (4506, 4549), True, 'import cv2 as cv\n'), ((7033, 7061), 'skimage.img_as_ubyte', 'img_as_ubyte', (['original_image'], {}), '(original_image)\n', (7045, 7061), False, 'from skimage import img_as_ubyte\n'), ((7087, 7120), 'numpy.expand_dims', 'np.expand_dims', (['original_image', '(2)'], {}), '(original_image, 2)\n', (7101, 7120), True, 'import numpy as np\n'), ((4614, 4671), 'cv2.HoughLinesP', 'cv.HoughLinesP', (['edges', '(1)', '(math.pi / 128)', '(20)', 'None', '(80)', '(20)'], {}), '(edges, 1, math.pi / 128, 20, None, 80, 20)\n', (4628, 4671), True, 'import cv2 as cv\n'), ((4829, 4893), 'numpy.arctan', 'np.arctan', (['((line[0][3] - line[0][1]) / (line[0][2] - line[0][0]))'], {}), '((line[0][3] - line[0][1]) / (line[0][2] - line[0][0]))\n', (4838, 4893), True, 'import numpy as np\n'), ((7349, 7393), 'imgaug.augmenters.PadToAspectRatio', 'iaa.PadToAspectRatio', (['(1.5)'], {'position': '"""center"""'}), "(1.5, position='center')\n", (7369, 7393), True, 'import imgaug.augmenters as iaa\n'), ((7403, 7445), 'imgaug.augmenters.Resize', 'iaa.Resize', (["{'height': 768, 'width': 1024}"], {}), "({'height': 768, 'width': 1024})\n", (7413, 7445), True, 'import imgaug.augmenters as iaa\n'), ((8059, 8103), 'os.path.join', 'os.path.join', (['dest_path', '"""predicted_images/"""'], {}), "(dest_path, 'predicted_images/')\n", (8071, 8103), False, 'import os\n'), ((8533, 8607), 'numpy.any', 'np.any', (['pred_masks[:, :, class_ids == inference_config.NUM_CLASSES - i]', '(2)'], {}), '(pred_masks[:, :, class_ids == inference_config.NUM_CLASSES - i], 2)\n', (8539, 8607), True, 'import numpy as np\n'), ((5009, 5021), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (5015, 5021), True, 'import numpy as np\n'), ((4988, 5001), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (4995, 5001), True, 'import numpy as np\n'), ((5627, 5647), 'numpy.mean', 'np.mean', (['lines_theta'], {}), '(lines_theta)\n', (5634, 5647), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import time
import datetime
from os import listdir
from os.path import isfile, join
import random
def preprocess(path, timetrack=False):
# the path can be both a file or a folder containing multiple files
# Should it be a folder, be sure that the folder contains ONLY the dataset files unzipped
try:
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
onlyfiles.sort()
files=[path+"/"+str(file) for file in onlyfiles]
except:
files=[path]
if timetrack:
count = 0
t0 = time.time()
lt = time.time()
print(count)
data = []
for file_path in files:
file = open(file_path, "r")
if timetrack:
print("--New File--")
print(str(file_path))
for line in file:
# Removing 'not data' parts of the line string
line = line.replace('id-', '')
line = line.replace('\n', '')
line = line.replace('user', '')
line = line.replace('|', '')
line = line.replace(' ', ' ')
aux_str = ''
info = 0 # 0 = time; 1 = clicked_article; 2 = click; 3 = user_features; 4 = articles_list
features = np.zeros(136, dtype=np.bool)
articles_list = []
for i in line:
if i == ' ':
if info == 0:
timestamp = int(aux_str)
aux_str = ''
info+=1
elif info == 1:
clicked_article = int(aux_str)
aux_str = ''
info+=1
elif info == 2:
click = int(aux_str)
aux_str = ''
info+=1
elif info == 3:
try:
features[int(aux_str)-1] = 1
aux_str = ''
except:
articles_list.append(int(aux_str))
aux_str = ''
info+=1
elif info == 4:
articles_list.append(int(aux_str))
aux_str = ''
else:
aux_str+=i
articles_list.append(int(aux_str))
aux_str = ''
if timetrack:
count += 1
if count%100000 == 0:
t1 = time.time()
dt = t1-t0
print(str(count)+" - "+str(datetime.timedelta(seconds=t1-lt))+" - "+str(datetime.timedelta(seconds=dt)))
lt = t1
data.append({'Timestamp': timestamp, 'Clicked_Article': clicked_article, 'Click': click, 'User_Features': features, 'Article_List': np.asarray(articles_list)})
file.close()
df = pd.DataFrame(data)
return df
def read_sample(filename, p=0.01):
# keep the header, then take only 1% of lines
# if random from [0,1] interval is greater than 0.01 the row will be skipped
df = pd.read_csv(
filename,
skiprows=lambda i: i > 0 and random.random() > p
)
return df
| [
"pandas.DataFrame",
"numpy.asarray",
"numpy.zeros",
"time.time",
"random.random",
"datetime.timedelta",
"os.path.join",
"os.listdir"
] | [((2956, 2974), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2968, 2974), True, 'import pandas as pd\n'), ((592, 603), 'time.time', 'time.time', ([], {}), '()\n', (601, 603), False, 'import time\n'), ((617, 628), 'time.time', 'time.time', ([], {}), '()\n', (626, 628), False, 'import time\n'), ((1269, 1297), 'numpy.zeros', 'np.zeros', (['(136)'], {'dtype': 'np.bool'}), '(136, dtype=np.bool)\n', (1277, 1297), True, 'import numpy as np\n'), ((387, 400), 'os.listdir', 'listdir', (['path'], {}), '(path)\n', (394, 400), False, 'from os import listdir\n'), ((411, 424), 'os.path.join', 'join', (['path', 'f'], {}), '(path, f)\n', (415, 424), False, 'from os.path import isfile, join\n'), ((2556, 2567), 'time.time', 'time.time', ([], {}), '()\n', (2565, 2567), False, 'import time\n'), ((2896, 2921), 'numpy.asarray', 'np.asarray', (['articles_list'], {}), '(articles_list)\n', (2906, 2921), True, 'import numpy as np\n'), ((3234, 3249), 'random.random', 'random.random', ([], {}), '()\n', (3247, 3249), False, 'import random\n'), ((2691, 2721), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'dt'}), '(seconds=dt)\n', (2709, 2721), False, 'import datetime\n'), ((2646, 2681), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(t1 - lt)'}), '(seconds=t1 - lt)\n', (2664, 2681), False, 'import datetime\n')] |
import copy
from gurobipy import Model, LinExpr, GRB, quicksum
import math
import numpy as np
import pandas as pd
import sys
class RobustOCT:
def __init__(self, X, y, tree, X_col_labels, labels, costs, budget, time_limit, threads, verbose):
'''
:param data: The training data
:param label: Name of the column representing the class label
:param tree: Tree object
:param time_limit: The given time limit for solving the MIP
:param costs: The costs of uncertainty
:param budget: The budget of uncertainty
'''
self.cat_features = X_col_labels
self.X = X
self.y = y
self.labels = labels
# datapoints contains the indicies of our training data
self.datapoints = np.arange(0, self.X.shape[0])
self.tree = tree
# Regularization term: encourage less branching without sacrificing accuracy
self.reg = 1 / (len(tree.Nodes) + 1)
'''
Get range of data, and store indices of branching variables based on range
'''
min_values = X.min(axis=0)
max_values = X.max(axis=0)
f_theta_indices = []
b_indices = []
for f in self.cat_features:
min_value = min_values[f]
max_value = max_values[f]
# cutoffs are from min_value to max_value - 1
for theta in range(min_value, max_value):
f_theta_indices += [(f,theta)]
b_indices += [(n, f, theta) for n in self.tree.Nodes]
self.min_values = X.min(axis=0)
self.max_values = X.max(axis=0)
self.f_theta_indices = f_theta_indices
self.b_indices = b_indices
'''
Create uncertainty set
'''
self.epsilon = budget # Budget of uncertainty
self.gammas = costs # Cost of feature uncertainty
self.eta = budget + 1 # Cost of label uncertainty - future work
# Decision Variables
self.t = 0
self.b = 0
self.w = 0
# Gurobi model
self.model = Model('RobustOCT')
if not verbose:
# supress all logging
self.model.params.OutputFlag = 0
# The cuts we add in the callback function would be treated as lazy constraints
self.model.params.LazyConstraints = 1
'''
To compare all approaches in a fair setting we limit the solver to use only one thread to merely evaluate
the strength of the formulation.
'''
if threads is not None:
self.model.params.Threads = threads
self.model.params.TimeLimit = time_limit
'''
The following variables are used for the Benders problem to keep track of the times we call the callback.
- counter_integer tracks number of times we call the callback from an integer node in the branch-&-bound tree
- time_integer tracks the associated time spent in the callback for these calls
- counter_general tracks number of times we call the callback from a non-integer node in the branch-&-bound tree
- time_general tracks the associated time spent in the callback for these calls
the ones ending with success are related to success calls. By success we mean ending up adding a lazy constraint
to the model
'''
self.model._total_callback_time_integer = 0
self.model._total_callback_time_integer_success = 0
self.model._total_callback_time_general = 0
self.model._total_callback_time_general_success = 0
self.model._callback_counter_integer = 0
self.model._callback_counter_integer_success = 0
self.model._callback_counter_general = 0
self.model._callback_counter_general_success = 0
self.model._total_cuts = 0
# We also pass the following information to the model as we need them in the callback
self.model._master = self
###########################################################
# Create the master problem
###########################################################
def create_master_problem(self):
# define variables
# t is the objective value of the problem
self.t = self.model.addVars(self.datapoints, vtype=GRB.CONTINUOUS, ub=1, name='t')
# w[n,k] == 1 iff at node n we do not branch and we make the prediction k
self.w = self.model.addVars(self.tree.Nodes + self.tree.Leaves, self.labels, vtype=GRB.BINARY, name='w')
# b[n,f,theta] ==1 iff at node n we branch on feature f with cutoff theta
self.b = self.model.addVars(self.b_indices, vtype=GRB.BINARY, name='b')
# we need these in the callback to have access to the value of the decision variables
self.model._vars_t = self.t
self.model._vars_b = self.b
self.model._vars_w = self.w
# define constraints
# sum(b[n,f,theta], f, theta) + sum(w[n,k], k) = 1 for all n in nodes
self.model.addConstrs(
(quicksum(self.b[n, f, theta] for (f,theta) in self.f_theta_indices) +
quicksum(self.w[n, k] for k in self.labels) == 1) for n in self.tree.Nodes)
# sum(w[n,k], k) = 1 for all n in leaves
self.model.addConstrs(
(quicksum(self.w[n, k] for k in self.labels) == 1) for n in self.tree.Leaves)
# define objective function
obj = LinExpr(0)
for i in self.datapoints:
obj.add(self.t[i])
# Add regularization term so that in case of tie in objective function,
# encourage less branching
obj.add(-1 * self.reg * quicksum(self.b[n, f, theta] for (n,f,theta) in self.b_indices))
self.model.setObjective(obj, GRB.MAXIMIZE)
| [
"gurobipy.quicksum",
"gurobipy.LinExpr",
"numpy.arange",
"gurobipy.Model"
] | [((791, 820), 'numpy.arange', 'np.arange', (['(0)', 'self.X.shape[0]'], {}), '(0, self.X.shape[0])\n', (800, 820), True, 'import numpy as np\n'), ((2085, 2103), 'gurobipy.Model', 'Model', (['"""RobustOCT"""'], {}), "('RobustOCT')\n", (2090, 2103), False, 'from gurobipy import Model, LinExpr, GRB, quicksum\n'), ((5490, 5500), 'gurobipy.LinExpr', 'LinExpr', (['(0)'], {}), '(0)\n', (5497, 5500), False, 'from gurobipy import Model, LinExpr, GRB, quicksum\n'), ((5714, 5777), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f, theta] for n, f, theta in self.b_indices)'], {}), '(self.b[n, f, theta] for n, f, theta in self.b_indices)\n', (5722, 5777), False, 'from gurobipy import Model, LinExpr, GRB, quicksum\n'), ((5362, 5405), 'gurobipy.quicksum', 'quicksum', (['(self.w[n, k] for k in self.labels)'], {}), '(self.w[n, k] for k in self.labels)\n', (5370, 5405), False, 'from gurobipy import Model, LinExpr, GRB, quicksum\n'), ((5106, 5172), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f, theta] for f, theta in self.f_theta_indices)'], {}), '(self.b[n, f, theta] for f, theta in self.f_theta_indices)\n', (5114, 5172), False, 'from gurobipy import Model, LinExpr, GRB, quicksum\n'), ((5192, 5235), 'gurobipy.quicksum', 'quicksum', (['(self.w[n, k] for k in self.labels)'], {}), '(self.w[n, k] for k in self.labels)\n', (5200, 5235), False, 'from gurobipy import Model, LinExpr, GRB, quicksum\n')] |
import cv2
import os
from argparse import ArgumentParser
import numpy as np
def lapse(folder: str = None,
fps: float = None,
name: str = None,
gamma: float = None,
scale: float = None,
extension: str = '.png'
):
'''convert sequential png files to mp4'''
invgamma = 1 / gamma
table = np.array([((i / 255.0) ** invgamma) * 255 for i in np.arange(0, 256)]).astype('uint8')
fourcc = 0x7634706d
images = sorted([img for img in os.listdir(folder) if img.endswith(extension)])
frame = cv2.imread(os.path.join(folder, images[0]))
# if height is None or width is None:
if name is None:
name = folder + 'movie.mp4'
# name = os.path.split(folder)[1]+'.mp4'
height, width, layers = frame.shape
if scale is not None:
width = int(width * scale)
height = int(height * scale)
video = cv2.VideoWriter(name, fourcc, fps, (width, height))
for image in images:
im = cv2.imread(os.path.join(folder, image))
res = cv2.resize(cv2.LUT(im, table), (width, height), interpolation=cv2.INTER_AREA)
video.write(res)
cv2.destroyAllWindows()
video.release()
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('folder', type=str, help='folder containing images')
p.add_argument('-s', '--scale', type=float, help='rescale the image - relative factor', default=None)
p.add_argument('-f', '--fps', type=float, help='frames per second', default=5)
p.add_argument('-n', '--name', type=str, help='output name with .mp4 ext', default=None)
p.add_argument('-g', '--gamma', type=float, default=1.0)
p.add_argument('-e', '--sufix', type=str, default='.png', help="File extenion. Default: .png")
P = p.parse_args()
lapse(folder=P.folder, scale=P.scale, fps=P.fps, name=P.name, gamma=P.gamma, extension=P.sufix)
| [
"argparse.ArgumentParser",
"cv2.LUT",
"numpy.arange",
"cv2.VideoWriter",
"cv2.destroyAllWindows",
"os.path.join",
"os.listdir"
] | [((912, 963), 'cv2.VideoWriter', 'cv2.VideoWriter', (['name', 'fourcc', 'fps', '(width, height)'], {}), '(name, fourcc, fps, (width, height))\n', (927, 963), False, 'import cv2\n'), ((1165, 1188), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1186, 1188), False, 'import cv2\n'), ((1246, 1262), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1260, 1262), False, 'from argparse import ArgumentParser\n'), ((577, 608), 'os.path.join', 'os.path.join', (['folder', 'images[0]'], {}), '(folder, images[0])\n', (589, 608), False, 'import os\n'), ((1014, 1041), 'os.path.join', 'os.path.join', (['folder', 'image'], {}), '(folder, image)\n', (1026, 1041), False, 'import os\n'), ((1068, 1086), 'cv2.LUT', 'cv2.LUT', (['im', 'table'], {}), '(im, table)\n', (1075, 1086), False, 'import cv2\n'), ((506, 524), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (516, 524), False, 'import os\n'), ((409, 426), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (418, 426), True, 'import numpy as np\n')] |
import numpy as np
from argparse import ArgumentParser
import json
import timeit
from basis_generator import basis_gs
# Example:
# python save_basis.py \
# --dim 2 \
# --num_obs 2 \
# --len_seq 2 \
# --num_out 2 \
# --stop 10000 \
# --save_metadata True \
# --save_data True
def main(args):
start = timeit.default_timer()
X_basis = basis_gs(args.dim,
args.num_obs,
args.len_seq,
args.num_out,
args.level,
args.remove_last_out,
args.stop)
stop = timeit.default_timer()
if args.save_metadata == True:
meta_data = {}
meta_data["dimension"] = args.dim
meta_data["maximum length of sequences"] = args.len_seq
meta_data["num of observables"] = args.num_obs
meta_data["num of outcomes"] = args.num_out
meta_data["time"] = stop - start
meta_data["number of LI moment matrices"] = len(X_basis)
meta_data["moment matrix size"] = X_basis[0].shape
meta_data["level"] = args.level
dir_name = "data/data_basis/"
NAME = '{}-dim-{}-num_obs-{}-len_seq-{}-num_out-{}-level'.format(args.dim, args.num_obs, args.len_seq, args.num_out, args.level)
with open(dir_name + NAME + '-meta_data.json', 'w') as fp:
json.dump(meta_data, fp)
if args.save_data == True:
np.save(dir_name + NAME, [X for X in X_basis])
print("The running time is {}".format(stop - start))
print("Done!")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--dim", type=int, default=2)
parser.add_argument("--num_obs", type=int, default=3)
parser.add_argument("--len_seq", type=int, default=2)
parser.add_argument("--num_out", type=int, default=2)
parser.add_argument("--level", type=int, default=1)
parser.add_argument("--remove_last_out", type=str2bool, nargs='?',
const=True, default=True)
#parser.add_argument("--norm_prec", type=float, default=0.0000001)
parser.add_argument("--stop", type=int, default=10000)
parser.add_argument("--save_metadata", type=str2bool, nargs='?',
const=True, default=True)
parser.add_argument("--save_data", type=str2bool, nargs='?',
const=True, default=True)
args = parser.parse_args()
main(args)
| [
"json.dump",
"numpy.save",
"argparse.ArgumentParser",
"timeit.default_timer",
"basis_generator.basis_gs"
] | [((336, 358), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (356, 358), False, 'import timeit\n'), ((374, 483), 'basis_generator.basis_gs', 'basis_gs', (['args.dim', 'args.num_obs', 'args.len_seq', 'args.num_out', 'args.level', 'args.remove_last_out', 'args.stop'], {}), '(args.dim, args.num_obs, args.len_seq, args.num_out, args.level,\n args.remove_last_out, args.stop)\n', (382, 483), False, 'from basis_generator import basis_gs\n'), ((636, 658), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (656, 658), False, 'import timeit\n'), ((1909, 1925), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1923, 1925), False, 'from argparse import ArgumentParser\n'), ((1457, 1503), 'numpy.save', 'np.save', (['(dir_name + NAME)', '[X for X in X_basis]'], {}), '(dir_name + NAME, [X for X in X_basis])\n', (1464, 1503), True, 'import numpy as np\n'), ((1392, 1416), 'json.dump', 'json.dump', (['meta_data', 'fp'], {}), '(meta_data, fp)\n', (1401, 1416), False, 'import json\n')] |
import os, shutil
import json
import os.path as osp
import re
import logging
import time
import random
from functools import reduce
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
import numpy as np
import scipy as sp
from scipy.spatial.distance import pdist, squareform
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch
from torch import nn
# torch.multiprocessing.set_sharing_strategy('file_system')
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torchvision.models import resnet18
from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, \
longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow
from model.datasets.glove import load_glove_as_dict
from model.datasets.CompositionDataset import CompositionDataset
from model.SepMask import SepMix
from model.pygcn import normalize
tqdm_commons = {'ncols': 100, 'ascii': True, 'leave': True}
if 'NO_GPU_WAIT' not in os.environ.keys():
wait_gpu(req_mem=int(os.environ.get('REQ_MEM', '4000')))
def params(p):
p.add_argument('--dataset', choices=['mitstates', 'ut-zap50k'],
default='ut-zap50k', help='Dataset for training and testing.')
p.add_argument('--data_path', default='.', help='Path where you place your dataset.')
p.add_argument('--split', choices=['compositional-split', 'natural-split'], default='compositional-split')
p.add_argument('--lr', type=float, default=1e-4)
p.add_argument('--lr_decay', '--lr-decay', type=float, default=0.1)
p.add_argument('--batch-size', '--batch_size', type=int, default=512)
p.add_argument('--test-batch-size', '--test_batch_size', type=int, default=32)
p.add_argument('--momentum', type=float, default=0.9)
p.add_argument('--weight_decay', type=float, default=1e-5)
p.add_argument('--sch_milestones', type=int, nargs='+', default=[500])
p.add_argument('--dropout', type=float, default=0.0)
p.add_argument('--loss_weights', type=str, default='{}')
p.add_argument('--rank_margin', type=float, default=1.0)
p.add_argument('--latent_dims', type=int, default=512)
p.add_argument('--kneg', type=int, default=5)
p.add_argument('--num_workers', type=int, default=5)
p.add_argument('--meta_samples', type=float, default=0.9)
p.add_argument('--meta_inclusive', action='store_true', default=True)
p.add_argument('--pre_feat', action='store_true', default=False)
p.add_argument('--debug_val', '--debug-val',
action='store_true', default=False)
p.add_argument('--model_dir', type=str, default=".")
return p
def log_t_loss(neg, pos, anchor, sim=None, margin=None):
return torch.log(1+torch.exp(margin+sim(neg, anchor)-sim(pos, anchor))).sum() / pos.shape[0]
def t_loss(neg, pos, anchor, sim=None, margin=None):
return F.relu(margin+sim(neg, anchor)-sim(pos, anchor)).sum() / pos.shape[0]
def log_m_loss(x, anchor, sim=None, margin=None):
return torch.log(1+torch.exp(margin+sim(x, anchor))).sum() / x.shape[0]
def m_loss(x, anchor, sim=None, margin=None):
return F.relu(margin+sim(x, anchor)).sum() / x.shape[0]
def sep_loss(a, b):
return (a * b).norm().sum() / a.shape[0]
def h_mean(a, b):
return (2*a*b) / (a+b+1e-8)
def sim(x, y): return -(x-y).norm(p=2, dim=1).unsqueeze(1)
def loss_meta(*args, **kwargs):
output_pp, negs = args
loss_weights = kwargs['loss_weights']
rank_margin = kwargs['rank_margin']
rand_a = kwargs['rand_a']
rand_o = kwargs['rand_o']
should_print = kwargs['should_print']
lp = dict.fromkeys(['ra', 'ro', 'ica', 'ico', 'lca', 'lco'], 0.)
lp['ra'] = F.mse_loss(output_pp['lin_att_values'], output_pp['masked']['lin_att_values'])
lp['ro'] = F.mse_loss(output_pp['lin_obj_values'], output_pp['masked']['lin_obj_values'])
lp['lca'] = F.nll_loss(F.log_softmax(output_pp['masked']['lin_att_logits'], -1), output_pp['masked']['att_idx'])
lp['lco'] = F.nll_loss(F.log_softmax(output_pp['masked']['lin_obj_logits'], -1), output_pp['masked']['obj_idx'])
lp['ica'] = F.nll_loss(F.log_softmax(output_pp['masked']['im_att_logits'], -1), output_pp['masked']['att_idx'])
lp['ico'] = F.nll_loss(F.log_softmax(output_pp['masked']['im_obj_logits'], -1), output_pp['masked']['obj_idx'])
ln = dict.fromkeys(['ta', 'to', 'ita', 'ito'], 0.)
for k in range(len(negs)):
output_pn, output_np = negs[k]
if rand_a > loss_weights['step_a']:
ln['ta'] += log_t_loss(output_np['masked']['lin_att_values'], output_pp['masked']['lin_att_values'], output_pp['masked']['im_att_feat'],
sim=sim, margin=rank_margin)
ln['ita'] += log_t_loss(output_np['masked']['im_att_feat'], output_pp['masked']['im_att_feat'], output_pp['masked']['lin_att_values'],
sim=sim, margin=rank_margin)
if rand_o >loss_weights['step_o']:
ln['to'] += log_t_loss(output_pn['masked']['lin_obj_values'], output_pp['masked']['lin_obj_values'], output_pp['masked']['im_obj_feat'],
sim=sim, margin=rank_margin)
ln['ito'] += log_t_loss(output_pn['masked']['im_obj_feat'], output_pp['masked']['im_obj_feat'], output_pp['masked']['lin_obj_values'],
sim=sim, margin=rank_margin)
for k in ln.keys():
ln[k] /= len(negs)
losses = {**lp, **ln}
pop_keys = []
for k in losses.keys():
lw = loss_weights.get(k, 0.0)
if lw == 0.0 or type(losses[k]) is float:
pop_keys.append(k)
continue
losses[k] *= lw
for ki in pop_keys:
losses.pop(ki)
return losses
def loss_separate(*args, **kwargs):
output_pp, negs = args
loss_weights = kwargs['loss_weights']
rank_margin = kwargs['rank_margin']
should_print = kwargs['should_print']
rand_a = kwargs['rand_a']
rand_o = kwargs['rand_o']
lp = dict.fromkeys(['ra', 'ro', 'ica', 'ico', 'lca', 'lco'], 0.)
lp['lca'] = F.nll_loss(F.log_softmax(output_pp['lin_att_logits'], -1), output_pp['att_idx'])
lp['lco'] = F.nll_loss(F.log_softmax(output_pp['lin_obj_logits'], -1), output_pp['obj_idx'])
lp['ica'] = F.nll_loss(F.log_softmax(output_pp['im_att_logits'], -1), output_pp['att_idx'])
lp['ico'] = F.nll_loss(F.log_softmax(output_pp['im_obj_logits'], -1), output_pp['obj_idx'])
ln = dict.fromkeys(['ta', 'to', 'ita', 'ito'], 0.)
for k in range(len(negs)):
output_pn, output_np = negs[k]
if rand_a > loss_weights['step_a']:
ln['ta'] += log_t_loss(output_np['lin_att_values'], output_pp['lin_att_values'], output_pp['im_att_feat'],
sim=sim, margin=rank_margin)
ln['ita'] += log_t_loss(output_np['im_att_feat'], output_pp['im_att_feat'], output_pp['lin_att_values'],
sim=sim, margin=rank_margin)
if rand_o >loss_weights['step_o']:
ln['to'] += log_t_loss(output_pn['lin_obj_values'], output_pp['lin_obj_values'], output_pp['im_obj_feat'],
sim=sim, margin=rank_margin)
ln['ito'] += log_t_loss(output_pn['im_obj_feat'], output_pp['im_obj_feat'], output_pp['lin_obj_values'],
sim=sim, margin=rank_margin)
for k in ln.keys():
ln[k] /= len(negs)
losses = {**lp, **ln}
pop_keys = []
for k in losses.keys():
lw = loss_weights.get(k, 0.0)
if lw == 0.0 or type(losses[k]) is float:
pop_keys.append(k)
continue
losses[k] *= lw
for ki in pop_keys:
losses.pop(ki)
return losses
def val_separate(model, dataloader, phase='val', topk=1, **kwargs):
args = kwargs['args']
model.eval()
correct_unseen = 0
correct_seen = 0
train_pairs = dataloader.dataset.train_pairs
test_pairs = dataloader.dataset.val_pairs if dataloader.dataset.phase == 'val' else dataloader.dataset.test_pairs
with torch.no_grad():
test_sample_num = len(dataloader.dataset)
seen_pairs = sorted(list(set(train_pairs).intersection(test_pairs)))
unseen_pairs = sorted(list(set(test_pairs) - set(train_pairs)))
# pair_data_seen = np.zeros([len(seen_pairs), kwargs['complex_dim']])
pair_data_seen_att = np.zeros([len(seen_pairs), kwargs['complex_dim']])
pair_data_seen_obj = np.zeros([len(seen_pairs), kwargs['complex_dim']])
# pair_data_unseen = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
pair_data_unseen_att = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
pair_data_unseen_obj = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
# test_data = np.zeros([test_sample_num, kwargs['complex_dim']])
test_data_att = np.zeros([test_sample_num, kwargs['complex_dim']])
test_data_obj = np.zeros([test_sample_num, kwargs['complex_dim']])
i = 0
for _, data in tqdm(enumerate(dataloader), desc='GT Feature', total=len(dataloader), disable=args.no_pbar, **tqdm_commons):
if args.parallel:
output = model.module.forward(data[0].float(), None, None, status='eval', vis_backbone=kwargs['vis_backbone'])
else:
output = model.forward(data[0].float(), None, None, status='eval', vis_backbone=kwargs['vis_backbone'])
feat_tmp = output['im_feat']
# test_data[i:i + feat_tmp.shape[0], :] = output['im_feat'].detach().cpu().numpy()
test_data_att[i:i + feat_tmp.shape[0], :] = output['im_att_feat'].detach().cpu().numpy()
test_data_obj[i:i + feat_tmp.shape[0], :] = output['im_obj_feat'].detach().cpu().numpy()
# test_data_residue[i:i + dataloader.batch_size, :] = model.get_residue(data[0].cuda()).detach().cpu().numpy()
i += dataloader.batch_size
if args.debug_mode:
break
for i in range(0, len(unseen_pairs)):
att_idx = torch.Tensor([dataloader.dataset.attr2idx[unseen_pairs[i][0]]]).long()
obj_idx = torch.Tensor([dataloader.dataset.obj2idx[unseen_pairs[i][1]]]).long()
if args.parallel:
output = model.module.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, mask_target=True, status='eval', ignore_img=True)
else:
output = model.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, mask_target=True, status='eval', ignore_img=True)
# tmp = output['lin_feat_recs']
# pair_data_unseen[i, :] = output['lin_feat_recs'].detach().cpu().numpy()
pair_data_unseen_att[i, :] = output['lin_att_values'].detach().cpu().numpy()
pair_data_unseen_obj[i, :] = output['lin_obj_values'].detach().cpu().numpy()
if args.debug_mode:
break
for i in range(0, len(seen_pairs)):
att_idx = torch.Tensor([dataloader.dataset.attr2idx[seen_pairs[i][0]]]).long()
obj_idx = torch.Tensor([dataloader.dataset.obj2idx[seen_pairs[i][1]]]).long()
if args.parallel:
output = model.module.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, status='eval', mask_target=True, ignore_img=True)
else:
output = model.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, status='eval', mask_target=True, ignore_img=True)
# tmp = output['lin_feat_recs']
# pair_data_seen[i, :] = output['lin_feat_recs'].detach().cpu().numpy()
pair_data_seen_att[i, :] = output['lin_att_values'].detach().cpu().numpy()
pair_data_seen_obj[i, :] = output['lin_obj_values'].detach().cpu().numpy()
if args.debug_mode:
break
# pair_data_seen[0:len(unseen_pairs), :] = pair_data_unseen
pair_t_unseen_att = torch.FloatTensor(pair_data_unseen_att).cuda()
pair_t_seen_att = torch.FloatTensor(pair_data_seen_att).cuda()
pair_t_att = torch.cat((pair_t_unseen_att, pair_t_seen_att))
pair_t_unseen_obj = torch.FloatTensor(pair_data_unseen_obj).cuda()
pair_t_seen_obj = torch.FloatTensor(pair_data_seen_obj).cuda()
pair_t_obj = torch.cat((pair_t_unseen_obj, pair_t_seen_obj))
dist = torch.zeros(test_sample_num, len(unseen_pairs) + len(seen_pairs))
STEPS = 50
correct_unseen = torch.zeros(STEPS, )
total_unseen = 0
correct_seen = torch.zeros(STEPS, )
total_seen = 0
for i in tqdm(range(0, test_sample_num), disable=args.no_pbar, **tqdm_commons):
dist[i] = sim(pair_t_att, torch.Tensor(test_data_att[i, :]).cuda().repeat(pair_t_att.shape[0], 1)).squeeze() + \
sim(pair_t_obj, torch.Tensor(test_data_obj[i, :]).cuda().repeat(pair_t_obj.shape[0], 1)).squeeze()
dist_diff = dist.max() - dist.min()
biases = torch.linspace(-dist_diff-0.1, dist_diff+0.1, STEPS)
for i in tqdm(range(0, test_sample_num), disable=args.no_pbar, **tqdm_commons):
_, att_gt, obj_gt = dataloader.dataset.data[i]
is_seen = (att_gt, obj_gt) in seen_pairs
if is_seen:
total_seen += 1
else:
total_unseen += 1
for ii, bias in enumerate(biases):
dist_bias = dist[i].clone()
dist_bias[:len(unseen_pairs)] += bias
preds = dist_bias.argsort(dim=0)[-topk:]
for pred in preds:
pred_pairs = (unseen_pairs + seen_pairs)[pred]
correct = int(pred_pairs[0] == att_gt and pred_pairs[1] == obj_gt)
if is_seen:
correct_seen[ii] += correct
else:
correct_unseen[ii] += correct
if correct == 1:
continue
if args.debug_mode:
break
correct_unseen /= total_unseen
correct_seen /= total_seen
auc = torch.trapz(correct_seen, correct_unseen)
seen_acc = float(correct_seen.max())
unseen_acc = float(correct_unseen.max())
test_info = {
'phase': phase,
'auc': float(auc),
'seen_acc': seen_acc,
'unseen_acc': unseen_acc,
'overall_acc': (total_seen * seen_acc + total_unseen * unseen_acc) / (total_seen + total_unseen),
'h_mean': float(h_mean(correct_unseen, correct_seen).max())
}
return test_info
def split_meta(*args, meta_samples=0.9, meta_inclusive=False):
a_label_p, o_label_p = args
all_indices = list(range(len(a_label_p)))
meta_idx = random.sample(all_indices, int(len(all_indices)*meta_samples))
if meta_inclusive:
train_idx = all_indices
else:
train_idx = list(set(all_indices) - set(meta_idx))
return train_idx, meta_idx
def train_step(model, optimizer, data_loader, loss_func=loss_separate, meta_loss_func=None, meta_optimizer=None, device=torch.device('cuda'), args=None, **kwargs):
model.train()
train_info = {}
train_loss_avg = RunningAverage(len(data_loader))
train_loss_meta_avg = RunningAverage(len(data_loader))
loss_weights = kwargs['loss_weights']
t = tqdm(data_loader, disable=args.no_pbar, **tqdm_commons)
should_print = True
correct = att_correct = obj_correct = acc_att = acc_obj = total_count = 0
with torch.autograd.set_detect_anomaly(args.debug_mode):
for i, data in enumerate(t):
img_p, a_label_p, o_label_p = data[0], data[3], data[4]
img_pn, att_idx_pn, obj_idx_pn = data[0+7], data[3+7], data[4+7]
img_np, att_idx_np, obj_idx_np = data[0+14], data[3+14], data[4+14]
if meta_optimizer is not None:
train_idx, meta_idx = split_meta(a_label_p, o_label_p, meta_samples=args.meta_samples, meta_inclusive=True)
else:
train_idx = list(range(len(a_label_p)))
loss = torch.Tensor([0])
rand_a = np.random.rand()
rand_o = np.random.rand()
if len(train_idx) > 0:
output_pp = model.forward(img_p[train_idx], a_label_p[train_idx], o_label_p[train_idx], mask_target=False, vis_backbone=kwargs['vis_backbone'])
att_preds = torch.argmax(torch.softmax(output_pp['im_att_logits'], -1), -1)
obj_preds = torch.argmax(torch.softmax(output_pp['im_obj_logits'], -1), -1)
att_correct = (att_preds == a_label_p.to(att_preds.device))
obj_correct = (obj_preds == o_label_p.to(att_preds.device))
correct += int((att_correct & obj_correct).sum())
acc_att += int(att_correct.sum())
acc_obj += int(obj_correct.sum())
total_count += data[0].shape[0]
negs = []
output_pn = model.forward(img_pn[train_idx], att_idx_pn[train_idx], obj_idx_pn[train_idx], mask_target=False, vis_backbone=kwargs['vis_backbone']) # pos att, neg obj
output_np = model.forward(img_np[train_idx], att_idx_np[train_idx], obj_idx_np[train_idx], mask_target=False, vis_backbone=kwargs['vis_backbone']) # neg att, pos obj
negs.append((output_pn, output_np))
losses = loss_func(output_pp, negs, rand_a=rand_a, rand_o=rand_o, loss_weights=loss_weights, should_print=should_print, rank_margin=args.rank_margin, args=args)
loss = 0
for _, v in losses.items():
loss += v
t.set_description(''.join([f'{k}={round(v.item(),3)} ' for k, v in losses.items()]))
if loss != loss:
logging.getLogger(myself()).critical('Training aborted because loss becomes NaN.')
raise ValueError
if loss != 0:
model.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# extra generalization training
loss_meta = torch.Tensor([0])
if meta_optimizer is not None:
output_pp = model.forward(img_p[meta_idx], a_label_p[meta_idx], o_label_p[meta_idx], mask_target=False, vis_backbone=kwargs['vis_backbone'], ignore_img=True)
output_pp.update({'masked': model.forward(img_p[meta_idx], a_label_p[meta_idx], o_label_p[meta_idx], mask_target=True, vis_backbone=kwargs['vis_backbone'])})
output_pn = {}
output_pn.update({'masked': model.forward(img_pn[meta_idx], att_idx_pn[meta_idx], obj_idx_pn[meta_idx], mask_target=True, vis_backbone=kwargs['vis_backbone'])})
output_np = {}
output_np.update({'masked': model.forward(img_np[meta_idx], att_idx_np[meta_idx], obj_idx_np[meta_idx], mask_target=True, vis_backbone=kwargs['vis_backbone'])})
losses_meta = meta_loss_func(output_pp, [(output_pn, output_np)], rand_a=rand_a, rand_o=rand_o, loss_weights=loss_weights, should_print=should_print, rank_margin=args.rank_margin, args=args)
loss_meta = 0
for _, v in losses_meta.items():
loss_meta += v
if loss_meta != loss_meta:
logging.getLogger(myself()).critical('Training aborted because loss_meta becomes NaN.')
raise ValueError
model.zero_grad()
loss_meta.backward(retain_graph=True)
meta_optimizer.step()
if loss != 0:
train_info = {
'phase': 'train',
'loss': train_loss_avg.add(loss.item()),
'loss_meta': train_loss_meta_avg.add(loss_meta.item()),
'acc': correct / total_count,
'acc_att': acc_att / total_count,
'acc_obj': acc_obj / total_count
}
else:
train_info = {
'phase': 'train',
'loss': 0,
'loss_meta': train_loss_meta_avg.add(loss_meta.item()),
'acc': correct / total_count,
'acc_att': acc_att / total_count,
'acc_obj': acc_obj / total_count
}
if args.debug_mode:
break
should_print = False
return train_info
if __name__ == '__main__':
args = init(user_param=params)
obj_class_num = {'ut-zap50k': 12, 'mitstates': 245}
att_class_num = {'ut-zap50k': 16, 'mitstates': 115}
obj_encode_dims = {'ut-zap50k': 300, 'mitstates': 300}
att_encode_dims = {'ut-zap50k': 300, 'mitstates': 300}
loss_weights = json.loads(args.loss_weights)
glove_embedding = load_glove_as_dict(f'{args.data_path}/glove', dimension=300, identifier='42B')
train_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'train', split=args.split, embedding_dict=glove_embedding, kneg=args.kneg, precompute_feat=args.pre_feat)
test_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'test', split=args.split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=True)
test_dataloader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=False)
if args.split == 'compositional-split':
val_dataset = test_dataset
val_dataloader = test_dataloader
elif args.split == 'natural-split':
val_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'val', split=args.split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)
val_dataloader = DataLoader(val_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=False)
def _emb(s):
ss = re.split('\.|-', s)
emb = np.zeros(glove_embedding['the'].shape)
if len(ss) == 1:
try:
emb = glove_embedding[ss[0]]
except KeyError:
logging.warning(
f'Cannot embed word \"{ss[0]}\", fallback to <unk>')
emb = glove_embedding['<unk>']
else:
for w in ss:
try:
emb += glove_embedding[w]
except KeyError:
logging.warning(
f'Cannot embed word \"{w}\", fallback to <unk>')
emb += glove_embedding['<unk>']
return emb
att_emb_dict = {k: v for (k, v) in [(
kk, _emb(kk.lower())) for kk in train_dataset.attrs]}
obj_emb_dict = {k: v for (k, v) in [(
kk, _emb(kk.lower())) for kk in train_dataset.objs]}
train_dataset.att_emb_dict = att_emb_dict
train_dataset.obj_emb_dict = obj_emb_dict
test_dataset.att_emb_dict = att_emb_dict
test_dataset.obj_emb_dict = obj_emb_dict
val_dataset.att_emb_dict = att_emb_dict
val_dataset.obj_emb_dict = obj_emb_dict
att_emb = np.array([v for (_, v) in att_emb_dict.items()])
att_adj = squareform(1-pdist(att_emb, 'cosine'))
obj_emb = np.array([v for (_, v) in obj_emb_dict.items()])
obj_adj = squareform(1-pdist(obj_emb, 'cosine'))
device = torch.device('cuda')
seen_mask = torch.zeros((att_class_num[args.dataset]+obj_class_num[args.dataset], att_class_num[args.dataset]+obj_class_num[args.dataset]))
for seen_pair in train_dataset.train_pairs:
att_idx, obj_idx = train_dataset.attr2idx[seen_pair[0]], train_dataset.obj2idx[seen_pair[1]]
seen_mask[att_idx, att_class_num[args.dataset]+obj_idx] = 1
seen_mask[att_class_num[args.dataset]+obj_idx, att_idx] = 1
seen_mask[:att_class_num[args.dataset], :att_class_num[args.dataset]] = 1
seen_mask[att_class_num[args.dataset]:, att_class_num[args.dataset]:] = 1
model_config = {
'complex_dim': args.latent_dims,
'primitive_dim': 512,
'seen_mask': seen_mask == 1,
'obj_encodes': torch.Tensor(obj_emb).cuda(),
'att_encodes': torch.Tensor(att_emb).cuda(),
'obj_encode_dim': obj_encode_dims[args.dataset],
'att_encode_dim': att_encode_dims[args.dataset],
'obj_class_num': obj_class_num[args.dataset],
'att_class_num': att_class_num[args.dataset],
'obj_adj': torch.Tensor(normalize(obj_adj)).cuda(),
'att_adj': torch.Tensor(normalize(att_adj)).cuda(),
'dropout': args.dropout,
'args': args
}
model = SepMix(**model_config).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
lr_schdlr = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.sch_milestones, gamma=0.1, last_epoch=args.start_epoch-1)
vis_backbone = None if args.pre_feat else torch.nn.Sequential(*list(resnet18(pretrained=True).children())[:-1]).cuda().eval()
val = val_separate # val_separate val_distance
loss_func = loss_separate # loss_separate loss_dist
meta_loss_func = loss_meta # loss_meta loss_meta_dist
meta_optimizer = optimizer if args.meta_samples > 0 else None
model, optimizer, lr_schdlr = prepare_train(model, optimizer, lr_schdlr, args)
writer = SummaryWriter(log_dir=args.summary_to)
if not args.test_only:
shutil.copy(osp.join('model', 'SepMask.py'), osp.join(args.save_model_to, args.model_id, 'SepMask.py'))
shutil.copy(osp.join('.', 'train.py'), osp.join(args.save_model_to, args.model_id, 'train.py'))
ss = ShouldSaveModel(init_step=args.start_epoch-1)
es = EarlyStop(patience=args.patience)
for epoch in range(args.start_epoch, args.max_epoch):
logging.getLogger(myself()).info("-"*10 + f" Epoch {epoch} starts. " + "-"*10) # for timing
with elapsed_timer() as elapsed:
train_info = train_step(model, optimizer, train_dataloader, loss_func=loss_func, meta_loss_func=meta_loss_func, meta_optimizer=meta_optimizer, loss_weights=loss_weights, device=device, vis_backbone=vis_backbone, args=args)
logging.getLogger(myself()).info(f"Epoch {epoch} finished. Elapsed={elapsed():.2f}s.") # for timing
logging.getLogger(myself()).info(
f"Epoch {epoch}, "
f"{train_info}"
)
val_info = val(model, val_dataloader, phase='val', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
logging.getLogger(myself()).info(
f"Epoch {epoch}, "
f"{val_info}"
)
lr_schdlr.step()
states_dict = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_schdlr.state_dict(),
'checkpoint_epoch': epoch,
'initial_lr': args.lr
}
if ss.step(loss=1e3, acc=val_info['auc'], criterion=lambda x1, x2: x2):
save_checkpoint(
f'{args.save_model_to}/{args.model_id}/best.state', **states_dict)
save_checkpoint(
f'{args.save_model_to}/{args.model_id}/latest.state', **states_dict)
if es.step(loss=1e3, acc=val_info['auc'], criterion=lambda x1, x2: x2):
break
if args.debug_mode:
break
logging.getLogger(myself()).info('Training ended.')
states = load_checkpoint(f'{args.save_model_to}/{args.model_id}/best.state', state_dict_to_load=['model', 'checkpoint_epoch'])
best_epoch = states['checkpoint_epoch']
model.load_state_dict(states['model'])
test_info = val(model, test_dataloader, phase='test', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
logging.getLogger(myself()).info(
f"Best model at epoch {best_epoch}, "
f"{test_info}"
)
elif args.test_only:
best_epoch = 0
pretrained = torch.load(os.path.join(args.model_dir, 'best.state'))
model.load_state_dict(pretrained['model'])
for i in range(3):
print("step: ", i+1)
test_info = val(model, test_dataloader, topk=i+1, phase='test', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
print("test: {}".format(i+1))
print(test_info)
print()
test_info = val(model, val_dataloader, topk=i+1, phase='test', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
print("val: {}".format(i+1))
print(test_info)
print('\n\n')
writer.close()
| [
"model.misc.utils.myself",
"model.misc.utils.init",
"model.datasets.CompositionDataset.CompositionDataset",
"model.misc.utils.prepare_train",
"torch.cat",
"torch.autograd.set_detect_anomaly",
"scipy.spatial.distance.pdist",
"torch.device",
"torch.no_grad",
"os.path.join",
"json.loads",
"model.... | [((157, 199), 'resource.getrlimit', 'resource.getrlimit', (['resource.RLIMIT_NOFILE'], {}), '(resource.RLIMIT_NOFILE)\n', (175, 199), False, 'import resource\n'), ((200, 261), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(2048, rlimit[1])'], {}), '(resource.RLIMIT_NOFILE, (2048, rlimit[1]))\n', (218, 261), False, 'import resource\n'), ((1123, 1140), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (1138, 1140), False, 'import os, shutil\n'), ((3816, 3894), 'torch.nn.functional.mse_loss', 'F.mse_loss', (["output_pp['lin_att_values']", "output_pp['masked']['lin_att_values']"], {}), "(output_pp['lin_att_values'], output_pp['masked']['lin_att_values'])\n", (3826, 3894), True, 'from torch.nn import functional as F\n'), ((3910, 3988), 'torch.nn.functional.mse_loss', 'F.mse_loss', (["output_pp['lin_obj_values']", "output_pp['masked']['lin_obj_values']"], {}), "(output_pp['lin_obj_values'], output_pp['masked']['lin_obj_values'])\n", (3920, 3988), True, 'from torch.nn import functional as F\n'), ((15191, 15211), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (15203, 15211), False, 'import torch\n'), ((15438, 15493), 'tqdm.tqdm', 'tqdm', (['data_loader'], {'disable': 'args.no_pbar'}), '(data_loader, disable=args.no_pbar, **tqdm_commons)\n', (15442, 15493), False, 'from tqdm import tqdm\n'), ((20742, 20765), 'model.misc.utils.init', 'init', ([], {'user_param': 'params'}), '(user_param=params)\n', (20746, 20765), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((21027, 21056), 'json.loads', 'json.loads', (['args.loss_weights'], {}), '(args.loss_weights)\n', (21037, 21056), False, 'import json\n'), ((21081, 21159), 'model.datasets.glove.load_glove_as_dict', 'load_glove_as_dict', (['f"""{args.data_path}/glove"""'], {'dimension': '(300)', 'identifier': '"""42B"""'}), "(f'{args.data_path}/glove', dimension=300, identifier='42B')\n", (21099, 21159), False, 'from model.datasets.glove import load_glove_as_dict\n'), ((21183, 21353), 'model.datasets.CompositionDataset.CompositionDataset', 'CompositionDataset', (['f"""{args.data_path}/{args.dataset}"""', '"""train"""'], {'split': 'args.split', 'embedding_dict': 'glove_embedding', 'kneg': 'args.kneg', 'precompute_feat': 'args.pre_feat'}), "(f'{args.data_path}/{args.dataset}', 'train', split=args.\n split, embedding_dict=glove_embedding, kneg=args.kneg, precompute_feat=\n args.pre_feat)\n", (21201, 21353), False, 'from model.datasets.CompositionDataset import CompositionDataset\n'), ((21367, 21515), 'model.datasets.CompositionDataset.CompositionDataset', 'CompositionDataset', (['f"""{args.data_path}/{args.dataset}"""', '"""test"""'], {'split': 'args.split', 'embedding_dict': 'glove_embedding', 'precompute_feat': 'args.pre_feat'}), "(f'{args.data_path}/{args.dataset}', 'test', split=args.\n split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)\n", (21385, 21515), False, 'from model.datasets.CompositionDataset import CompositionDataset\n'), ((23900, 23920), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (23912, 23920), False, 'import torch\n'), ((23937, 24073), 'torch.zeros', 'torch.zeros', (['(att_class_num[args.dataset] + obj_class_num[args.dataset], att_class_num[\n args.dataset] + obj_class_num[args.dataset])'], {}), '((att_class_num[args.dataset] + obj_class_num[args.dataset], \n att_class_num[args.dataset] + obj_class_num[args.dataset]))\n', (23948, 24073), False, 'import torch\n'), ((25365, 25493), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'args.sch_milestones', 'gamma': '(0.1)', 'last_epoch': '(args.start_epoch - 1)'}), '(optimizer, milestones=args.\n sch_milestones, gamma=0.1, last_epoch=args.start_epoch - 1)\n', (25401, 25493), False, 'import torch\n'), ((25891, 25939), 'model.misc.utils.prepare_train', 'prepare_train', (['model', 'optimizer', 'lr_schdlr', 'args'], {}), '(model, optimizer, lr_schdlr, args)\n', (25904, 25939), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((25954, 25992), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'args.summary_to'}), '(log_dir=args.summary_to)\n', (25967, 25992), False, 'from tensorboardX import SummaryWriter\n'), ((4016, 4072), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['masked']['lin_att_logits']", '(-1)'], {}), "(output_pp['masked']['lin_att_logits'], -1)\n", (4029, 4072), True, 'from torch.nn import functional as F\n'), ((4133, 4189), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['masked']['lin_obj_logits']", '(-1)'], {}), "(output_pp['masked']['lin_obj_logits'], -1)\n", (4146, 4189), True, 'from torch.nn import functional as F\n'), ((4250, 4305), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['masked']['im_att_logits']", '(-1)'], {}), "(output_pp['masked']['im_att_logits'], -1)\n", (4263, 4305), True, 'from torch.nn import functional as F\n'), ((4366, 4421), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['masked']['im_obj_logits']", '(-1)'], {}), "(output_pp['masked']['im_obj_logits'], -1)\n", (4379, 4421), True, 'from torch.nn import functional as F\n'), ((6150, 6196), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['lin_att_logits']", '(-1)'], {}), "(output_pp['lin_att_logits'], -1)\n", (6163, 6196), True, 'from torch.nn import functional as F\n'), ((6247, 6293), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['lin_obj_logits']", '(-1)'], {}), "(output_pp['lin_obj_logits'], -1)\n", (6260, 6293), True, 'from torch.nn import functional as F\n'), ((6344, 6389), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['im_att_logits']", '(-1)'], {}), "(output_pp['im_att_logits'], -1)\n", (6357, 6389), True, 'from torch.nn import functional as F\n'), ((6440, 6485), 'torch.nn.functional.log_softmax', 'F.log_softmax', (["output_pp['im_obj_logits']", '(-1)'], {}), "(output_pp['im_obj_logits'], -1)\n", (6453, 6485), True, 'from torch.nn import functional as F\n'), ((8079, 8094), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8092, 8094), False, 'import torch\n'), ((8907, 8957), 'numpy.zeros', 'np.zeros', (["[test_sample_num, kwargs['complex_dim']]"], {}), "([test_sample_num, kwargs['complex_dim']])\n", (8915, 8957), True, 'import numpy as np\n'), ((8982, 9032), 'numpy.zeros', 'np.zeros', (["[test_sample_num, kwargs['complex_dim']]"], {}), "([test_sample_num, kwargs['complex_dim']])\n", (8990, 9032), True, 'import numpy as np\n'), ((12138, 12185), 'torch.cat', 'torch.cat', (['(pair_t_unseen_att, pair_t_seen_att)'], {}), '((pair_t_unseen_att, pair_t_seen_att))\n', (12147, 12185), False, 'import torch\n'), ((12353, 12400), 'torch.cat', 'torch.cat', (['(pair_t_unseen_obj, pair_t_seen_obj)'], {}), '((pair_t_unseen_obj, pair_t_seen_obj))\n', (12362, 12400), False, 'import torch\n'), ((12528, 12546), 'torch.zeros', 'torch.zeros', (['STEPS'], {}), '(STEPS)\n', (12539, 12546), False, 'import torch\n'), ((12597, 12615), 'torch.zeros', 'torch.zeros', (['STEPS'], {}), '(STEPS)\n', (12608, 12615), False, 'import torch\n'), ((13044, 13100), 'torch.linspace', 'torch.linspace', (['(-dist_diff - 0.1)', '(dist_diff + 0.1)', 'STEPS'], {}), '(-dist_diff - 0.1, dist_diff + 0.1, STEPS)\n', (13058, 13100), False, 'import torch\n'), ((14201, 14242), 'torch.trapz', 'torch.trapz', (['correct_seen', 'correct_unseen'], {}), '(correct_seen, correct_unseen)\n', (14212, 14242), False, 'import torch\n'), ((15605, 15655), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['args.debug_mode'], {}), '(args.debug_mode)\n', (15638, 15655), False, 'import torch\n'), ((22504, 22524), 're.split', 're.split', (['"""\\\\.|-"""', 's'], {}), "('\\\\.|-', s)\n", (22512, 22524), False, 'import re\n'), ((22538, 22576), 'numpy.zeros', 'np.zeros', (["glove_embedding['the'].shape"], {}), "(glove_embedding['the'].shape)\n", (22546, 22576), True, 'import numpy as np\n'), ((26250, 26297), 'model.misc.utils.ShouldSaveModel', 'ShouldSaveModel', ([], {'init_step': '(args.start_epoch - 1)'}), '(init_step=args.start_epoch - 1)\n', (26265, 26297), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((26309, 26342), 'model.misc.utils.EarlyStop', 'EarlyStop', ([], {'patience': 'args.patience'}), '(patience=args.patience)\n', (26318, 26342), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((28197, 28318), 'model.misc.utils.load_checkpoint', 'load_checkpoint', (['f"""{args.save_model_to}/{args.model_id}/best.state"""'], {'state_dict_to_load': "['model', 'checkpoint_epoch']"}), "(f'{args.save_model_to}/{args.model_id}/best.state',\n state_dict_to_load=['model', 'checkpoint_epoch'])\n", (28212, 28318), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((16182, 16199), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (16194, 16199), False, 'import torch\n'), ((16221, 16237), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (16235, 16237), True, 'import numpy as np\n'), ((16259, 16275), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (16273, 16275), True, 'import numpy as np\n'), ((18295, 18312), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (18307, 18312), False, 'import torch\n'), ((21673, 21698), 'model.misc.utils.worker_init_fn_seed', 'worker_init_fn_seed', (['args'], {}), '(args)\n', (21692, 21698), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((21882, 21907), 'model.misc.utils.worker_init_fn_seed', 'worker_init_fn_seed', (['args'], {}), '(args)\n', (21901, 21907), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((22114, 22261), 'model.datasets.CompositionDataset.CompositionDataset', 'CompositionDataset', (['f"""{args.data_path}/{args.dataset}"""', '"""val"""'], {'split': 'args.split', 'embedding_dict': 'glove_embedding', 'precompute_feat': 'args.pre_feat'}), "(f'{args.data_path}/{args.dataset}', 'val', split=args.\n split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)\n", (22132, 22261), False, 'from model.datasets.CompositionDataset import CompositionDataset\n'), ((23743, 23767), 'scipy.spatial.distance.pdist', 'pdist', (['att_emb', '"""cosine"""'], {}), "(att_emb, 'cosine')\n", (23748, 23767), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((23860, 23884), 'scipy.spatial.distance.pdist', 'pdist', (['obj_emb', '"""cosine"""'], {}), "(obj_emb, 'cosine')\n", (23865, 23884), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((25222, 25244), 'model.SepMask.SepMix', 'SepMix', ([], {}), '(**model_config)\n', (25228, 25244), False, 'from model.SepMask import SepMix\n'), ((26040, 26071), 'os.path.join', 'osp.join', (['"""model"""', '"""SepMask.py"""'], {}), "('model', 'SepMask.py')\n", (26048, 26071), True, 'import os.path as osp\n'), ((26073, 26130), 'os.path.join', 'osp.join', (['args.save_model_to', 'args.model_id', '"""SepMask.py"""'], {}), "(args.save_model_to, args.model_id, 'SepMask.py')\n", (26081, 26130), True, 'import os.path as osp\n'), ((26152, 26177), 'os.path.join', 'osp.join', (['"""."""', '"""train.py"""'], {}), "('.', 'train.py')\n", (26160, 26177), True, 'import os.path as osp\n'), ((26179, 26234), 'os.path.join', 'osp.join', (['args.save_model_to', 'args.model_id', '"""train.py"""'], {}), "(args.save_model_to, args.model_id, 'train.py')\n", (26187, 26234), True, 'import os.path as osp\n'), ((27831, 27920), 'model.misc.utils.save_checkpoint', 'save_checkpoint', (['f"""{args.save_model_to}/{args.model_id}/latest.state"""'], {}), "(f'{args.save_model_to}/{args.model_id}/latest.state', **\n states_dict)\n", (27846, 27920), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((1167, 1200), 'os.environ.get', 'os.environ.get', (['"""REQ_MEM"""', '"""4000"""'], {}), "('REQ_MEM', '4000')\n", (1181, 1200), False, 'import os, shutil\n'), ((11999, 12038), 'torch.FloatTensor', 'torch.FloatTensor', (['pair_data_unseen_att'], {}), '(pair_data_unseen_att)\n', (12016, 12038), False, 'import torch\n'), ((12072, 12109), 'torch.FloatTensor', 'torch.FloatTensor', (['pair_data_seen_att'], {}), '(pair_data_seen_att)\n', (12089, 12109), False, 'import torch\n'), ((12214, 12253), 'torch.FloatTensor', 'torch.FloatTensor', (['pair_data_unseen_obj'], {}), '(pair_data_unseen_obj)\n', (12231, 12253), False, 'import torch\n'), ((12287, 12324), 'torch.FloatTensor', 'torch.FloatTensor', (['pair_data_seen_obj'], {}), '(pair_data_seen_obj)\n', (12304, 12324), False, 'import torch\n'), ((24675, 24696), 'torch.Tensor', 'torch.Tensor', (['obj_emb'], {}), '(obj_emb)\n', (24687, 24696), False, 'import torch\n'), ((24732, 24753), 'torch.Tensor', 'torch.Tensor', (['att_emb'], {}), '(att_emb)\n', (24744, 24753), False, 'import torch\n'), ((26528, 26543), 'model.misc.utils.elapsed_timer', 'elapsed_timer', ([], {}), '()\n', (26541, 26543), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((27715, 27802), 'model.misc.utils.save_checkpoint', 'save_checkpoint', (['f"""{args.save_model_to}/{args.model_id}/best.state"""'], {}), "(f'{args.save_model_to}/{args.model_id}/best.state', **\n states_dict)\n", (27730, 27802), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((28781, 28823), 'os.path.join', 'os.path.join', (['args.model_dir', '"""best.state"""'], {}), "(args.model_dir, 'best.state')\n", (28793, 28823), False, 'import os, shutil\n'), ((10109, 10172), 'torch.Tensor', 'torch.Tensor', (['[dataloader.dataset.attr2idx[unseen_pairs[i][0]]]'], {}), '([dataloader.dataset.attr2idx[unseen_pairs[i][0]]])\n', (10121, 10172), False, 'import torch\n'), ((10202, 10264), 'torch.Tensor', 'torch.Tensor', (['[dataloader.dataset.obj2idx[unseen_pairs[i][1]]]'], {}), '([dataloader.dataset.obj2idx[unseen_pairs[i][1]]])\n', (10214, 10264), False, 'import torch\n'), ((11048, 11109), 'torch.Tensor', 'torch.Tensor', (['[dataloader.dataset.attr2idx[seen_pairs[i][0]]]'], {}), '([dataloader.dataset.attr2idx[seen_pairs[i][0]]])\n', (11060, 11109), False, 'import torch\n'), ((11139, 11199), 'torch.Tensor', 'torch.Tensor', (['[dataloader.dataset.obj2idx[seen_pairs[i][1]]]'], {}), '([dataloader.dataset.obj2idx[seen_pairs[i][1]]])\n', (11151, 11199), False, 'import torch\n'), ((16512, 16557), 'torch.softmax', 'torch.softmax', (["output_pp['im_att_logits']", '(-1)'], {}), "(output_pp['im_att_logits'], -1)\n", (16525, 16557), False, 'import torch\n'), ((16604, 16649), 'torch.softmax', 'torch.softmax', (["output_pp['im_obj_logits']", '(-1)'], {}), "(output_pp['im_obj_logits'], -1)\n", (16617, 16649), False, 'import torch\n'), ((22429, 22454), 'model.misc.utils.worker_init_fn_seed', 'worker_init_fn_seed', (['args'], {}), '(args)\n', (22448, 22454), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((22709, 22775), 'logging.warning', 'logging.warning', (['f"""Cannot embed word "{ss[0]}", fallback to <unk>"""'], {}), '(f\'Cannot embed word "{ss[0]}", fallback to <unk>\')\n', (22724, 22775), False, 'import logging\n'), ((25030, 25048), 'model.pygcn.normalize', 'normalize', (['obj_adj'], {}), '(obj_adj)\n', (25039, 25048), False, 'from model.pygcn import normalize\n'), ((25098, 25116), 'model.pygcn.normalize', 'normalize', (['att_adj'], {}), '(att_adj)\n', (25107, 25116), False, 'from model.pygcn import normalize\n'), ((28146, 28154), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (28152, 28154), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((28590, 28598), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (28596, 28598), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((23005, 23067), 'logging.warning', 'logging.warning', (['f"""Cannot embed word "{w}", fallback to <unk>"""'], {}), '(f\'Cannot embed word "{w}", fallback to <unk>\')\n', (23020, 23067), False, 'import logging\n'), ((26436, 26444), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (26442, 26444), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((26825, 26833), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (26831, 26833), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((26938, 26946), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (26944, 26946), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((27212, 27220), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (27218, 27220), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((17949, 17957), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (17955, 17957), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((19573, 19581), 'model.misc.utils.myself', 'myself', ([], {}), '()\n', (19579, 19581), False, 'from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow\n'), ((12768, 12801), 'torch.Tensor', 'torch.Tensor', (['test_data_att[i, :]'], {}), '(test_data_att[i, :])\n', (12780, 12801), False, 'import torch\n'), ((12891, 12924), 'torch.Tensor', 'torch.Tensor', (['test_data_obj[i, :]'], {}), '(test_data_obj[i, :])\n', (12903, 12924), False, 'import torch\n'), ((25559, 25584), 'torchvision.models.resnet18', 'resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (25567, 25584), False, 'from torchvision.models import resnet18\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas
from collections import defaultdict
from matplotlib.colors import ListedColormap
from argparse import ArgumentParser
def transform_dict(edge_dict):
"""
Transforms a dictionary of dictionaries to flip key/values.
:param edge_dict:
:return:
"""
new_dict = defaultdict()
for x in edge_dict:
for y in edge_dict[x]:
if not y in new_dict:
new_dict[y] = defaultdict()
new_dict[y][x] = edge_dict[x][y]
return new_dict
def strip_dim(local_df: pandas.DataFrame, dim: str) -> pandas.DataFrame:
"""
Strip either 1D/2D/3D from column names, return new df w/o dimensionality annotations.
:param local_df:
:param dim:
:return:
"""
new_names = defaultdict()
for column in local_df.columns:
if column.split("_")[-1] == dim:
new_names[column] = column.strip("_"+dim)
local_df = local_df.rename(new_names, axis=1)
return local_df
def plot_2d_decision_regions(X, y, classifier, resolution = 0.02):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# Plot the decision surface. This is kinda BS. There must be an equation to get the actual slope from the Perceptron
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
predict_line = np.array([xx1.ravel(), xx2.ravel()]).T
print(repr(predict_line))
Z = classifier.predict(predict_line)
print("Z "+repr(Z))
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=.4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, c1 in enumerate(np.unique(y)):
plt.scatter(x=X[y == c1, 0], y=X[y == c1, 1],
alpha=.8, c = cmap(idx),
marker = markers[idx], label = c1)
def plot_decision_regions(X, y, classifier, test_idx = None, resolution = 0.02):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# Plot the decision surface. This is kinda BS. There must be an equation to get the actual slope from the Perceptron
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
predict_line = np.array([xx1.ravel(), xx2.ravel()]).T
Z = classifier.predict(predict_line)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=.4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, c1 in enumerate(np.unique(y)):
plt.scatter(x=X[y == c1, 0], y=X[y == c1, 1],
alpha=.8, c = cmap(idx),
marker = markers[idx], label = c1)
if test_idx:
X_test, y_test = X[test_idx, :],y[test_idx]
plt.scatter(X_test[:, 0], X_test[: ,1], c='',
alpha=1.0, linewidth=1, marker='o',
s=55, label='test set') | [
"matplotlib.pyplot.scatter",
"collections.defaultdict",
"matplotlib.pyplot.contourf",
"numpy.arange",
"numpy.unique"
] | [((388, 401), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (399, 401), False, 'from collections import defaultdict\n'), ((847, 860), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (858, 860), False, 'from collections import defaultdict\n'), ((1845, 1892), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx1', 'xx2', 'Z'], {'alpha': '(0.4)', 'cmap': 'cmap'}), '(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n', (1857, 1892), True, 'import matplotlib.pyplot as plt\n'), ((2906, 2953), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx1', 'xx2', 'Z'], {'alpha': '(0.4)', 'cmap': 'cmap'}), '(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n', (2918, 2953), True, 'import matplotlib.pyplot as plt\n'), ((1552, 1589), 'numpy.arange', 'np.arange', (['x1_min', 'x1_max', 'resolution'], {}), '(x1_min, x1_max, resolution)\n', (1561, 1589), True, 'import numpy as np\n'), ((1618, 1655), 'numpy.arange', 'np.arange', (['x2_min', 'x2_max', 'resolution'], {}), '(x2_min, x2_max, resolution)\n', (1627, 1655), True, 'import numpy as np\n'), ((1994, 2006), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2003, 2006), True, 'import numpy as np\n'), ((2667, 2704), 'numpy.arange', 'np.arange', (['x1_min', 'x1_max', 'resolution'], {}), '(x1_min, x1_max, resolution)\n', (2676, 2704), True, 'import numpy as np\n'), ((2733, 2770), 'numpy.arange', 'np.arange', (['x2_min', 'x2_max', 'resolution'], {}), '(x2_min, x2_max, resolution)\n', (2742, 2770), True, 'import numpy as np\n'), ((3055, 3067), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3064, 3067), True, 'import numpy as np\n'), ((3302, 3411), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_test[:, 0]', 'X_test[:, 1]'], {'c': '""""""', 'alpha': '(1.0)', 'linewidth': '(1)', 'marker': '"""o"""', 's': '(55)', 'label': '"""test set"""'}), "(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidth=1,\n marker='o', s=55, label='test set')\n", (3313, 3411), True, 'import matplotlib.pyplot as plt\n'), ((521, 534), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (532, 534), False, 'from collections import defaultdict\n'), ((1268, 1280), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1277, 1280), True, 'import numpy as np\n'), ((2383, 2395), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2392, 2395), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import os
datosLF = np.loadtxt('LaxFriedrichs_finalstate.dat')
datosUG = np.loadtxt('UpwindGodunov_finalstate.dat')
datosAN = np.loadtxt('Analytic.dat')
os.system('mkdir Plots/')
results=['Density','Velocity','Energy','Pressure']
i=1
for label in results:
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(datosLF[:,0],datosLF[:,i],label='Lax Fridrich method')
ax.plot(datosUG[:,0],datosUG[:,i],label='Upwind Godunov method')
ax.plot(datosAN[:,0],datosAN[:,i],label='Analytic solution')
ax.set_ylim(0,np.amax(datosAN[:,i])*1.15)
lgd = ax.legend(loc=3, bbox_to_anchor=(1.3, 0.5))
ax.set_title(label+' profile after one unit of time')
ax.set_xlabel('x')
ax.set_ylabel(label)
# Put a legend below current axis
plt.savefig(("Plots/"+label+".pdf"),bbox_extra_artists=(lgd,), bbox_inches='tight')
i+=1
| [
"matplotlib.pyplot.subplot",
"os.system",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.savefig"
] | [((72, 114), 'numpy.loadtxt', 'np.loadtxt', (['"""LaxFriedrichs_finalstate.dat"""'], {}), "('LaxFriedrichs_finalstate.dat')\n", (82, 114), True, 'import numpy as np\n'), ((125, 167), 'numpy.loadtxt', 'np.loadtxt', (['"""UpwindGodunov_finalstate.dat"""'], {}), "('UpwindGodunov_finalstate.dat')\n", (135, 167), True, 'import numpy as np\n'), ((178, 204), 'numpy.loadtxt', 'np.loadtxt', (['"""Analytic.dat"""'], {}), "('Analytic.dat')\n", (188, 204), True, 'import numpy as np\n'), ((206, 231), 'os.system', 'os.system', (['"""mkdir Plots/"""'], {}), "('mkdir Plots/')\n", (215, 231), False, 'import os\n'), ((319, 331), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (329, 331), True, 'import matplotlib.pyplot as plt\n'), ((338, 354), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (349, 354), True, 'import matplotlib.pyplot as plt\n'), ((778, 868), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Plots/' + label + '.pdf')"], {'bbox_extra_artists': '(lgd,)', 'bbox_inches': '"""tight"""'}), "('Plots/' + label + '.pdf', bbox_extra_artists=(lgd,),\n bbox_inches='tight')\n", (789, 868), True, 'import matplotlib.pyplot as plt\n'), ((563, 585), 'numpy.amax', 'np.amax', (['datosAN[:, i]'], {}), '(datosAN[:, i])\n', (570, 585), True, 'import numpy as np\n')] |
import argparse
import logging
import os
import torch
from torchvision.models.detection import maskrcnn_resnet50_fpn
import transforms as T
from dataloader import TinyVocDataset
from engine import train_one_epoch, evaluate, my_eval
import utils
logger = logging.getLogger(__name__)
def get_args():
""" Argument parser for traing hyperparameter"""
parser = argparse.ArgumentParser()
parser.add_argument('--weight', type=str,
default='trained_model/current.pkl', help='model.pth path(s)')
parser.add_argument('--dataset', type=str,
default='dataset', help='model.pth path(s)')
parser.add_argument('--optim', default='SGD',
choices=['SGD', 'Adam'], help='optimizer')
parser.add_argument('--lr', '--learning_rate', dest='lr', default=1e-2,
type=float, help='optimizer')
parser.add_argument('-b', '--batch_size', dest='bs',
default=4, help='batch size', type=int)
parser.add_argument('--nr_epoch', default=100,
help='number of epoch', type=int)
parser.add_argument('--start_epoch', default=0,
help='start epoch', type=int)
parser.add_argument('--nr_worker', default=5,
help='number of dataloader worker', type=int)
parser.add_argument('--nr_class', default=21,
help='number of categories', type=int)
parser.add_argument('--eval', action='store_true',
help='if set, it will only forwarding the whole training dataset and show the evaluating result')
parser.add_argument('--test', action='store_true',
help='if set, it will only forwarding the whole testing data and show the evaluating result')
parser.add_argument('--save_json', action='store_true',
help='make submission file')
parser.add_argument('--pretrained', action='store_true',
help='use pretrained model on coco dataset')
opt = parser.parse_args()
opt.device = torch.device('cuda') if torch.cuda.is_available()\
else torch.device('cpu')
print(opt)
return opt
def get_model(pretrained, num_classes):
""" To get the maskRcnn model
Note: TBD, change the backbone model pfn and other modules
:param: pretrained(bool), if set, the model we load the model weights
pretrained on pascal dataset
:param: num_classes(int), it should be set to N(catagerories) +1(background)
"""
# anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
# aspect_ratios=((0.5, 1.0, 2.0),))
# roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
# output_size=7,
# sampling_ratio=2)
if pretrained:
print(">>>>>Use pretrained model<<<<<")
model = maskrcnn_resnet50_fpn(pretrained=True, num_classes=21)
else:
model = maskrcnn_resnet50_fpn(pretrained=False, num_classes=21)
return model
def get_transform(train):
""" To get the transforms object
:param: train(bool), if set, it will apply all augmented methods;
otherwise, only to_tensor and normalize will be used.
"""
transforms = []
if train:
transforms.append(T.ColorJitter(0.2, 0.2, 0.2, 0.05))
transforms.append(T.ToTensor())
transforms.append(T.Normalize())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def train(opt):
# prepare dataloader
train_dataset = TinyVocDataset(
'dataset', transforms=get_transform(train=True), train=True)
val_dataset = TinyVocDataset(
'dataset', transforms=get_transform(train=False), train=True)
nr_train = round(0.8*len(train_dataset))
indices = torch.randperm(len(train_dataset)).tolist()
train_dataset = torch.utils.data.Subset(train_dataset, indices[:nr_train])
val_dataset = torch.utils.data.Subset(val_dataset, indices[nr_train:])
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.bs, shuffle=True, num_workers=opt.nr_worker, collate_fn=utils.collate_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=opt.bs, shuffle=False, num_workers=opt.nr_worker, collate_fn=utils.collate_fn)
# prepare model
model = get_model(pretrained=opt.pretrained, num_classes=opt.nr_class)
if os.path.isfile(opt.weight):
print("load model weight, {}".format(opt.weight))
model.load_state_dict(torch.load(opt.weight))
print("model loaded!")
model.to(opt.device)
# construct an optimizer and a learning rate scheduler
params = [p for p in model.parameters() if p.requires_grad]
if opt.optim.lower() == 'adam':
optimizer = torch.optim.Adam(params, lr=opt.lr, weight_decay=0.0005)
elif opt.optim.lower() == 'sgd':
optimizer = torch.optim.SGD(
params, lr=opt.lr, momentum=0.5, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=5, gamma=0.8)
best = float('inf')
for epoch in range(opt.start_epoch, opt.nr_epoch):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, train_loader,
opt.device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
if epoch % 10 == 0:
evaluate(model, val_loader, device=opt.device)
eval_res = my_eval(model, val_loader, device=opt.device)
print(">>>>>>evalutated result, totoal loss = {}<<<<<<".format(eval_res))
if (eval_res < best):
torch.save(model.state_dict(),
'trained_model/epoch_{}_{}.pkl'.format(epoch, eval_res))
torch.save(model.state_dict(), 'trained_model/current.pkl')
best = eval_res
def eval_(opt):
""" Show evaluated result of training data
forwarding training datasets and shows the evaluated result measured by cocoeval-tool
"""
dataset = TinyVocDataset(
'dataset', transforms=get_transform(False), train=True)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=opt.bs, shuffle=False, num_workers=opt.nr_worker, collate_fn=utils.collate_fn)
model = get_model(pretrained=False, num_classes=opt.nr_class)
if os.path.isfile(opt.weight):
print("load model weight, {}".format(opt.weight))
model.load_state_dict(torch.load(opt.weight))
print("model loaded!")
model.to(opt.device)
evaluate(model, dataloader, device=opt.device)
def test(opt):
"""For warding the test data"""
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
import numpy as np
from utils import binary_mask_to_rle
from PIL import Image
from torchvision.transforms import functional as F
import json
# prepare model
model = get_model(pretrained=False, num_classes=opt.nr_class)
if os.path.isfile(opt.weight):
print("load model weight, {}".format(opt.weight))
model.load_state_dict(torch.load(opt.weight))
print(">>>>>>> model loaded! <<<<<<<<")
model.to(opt.device).eval()
# prepare test data
coco = COCO(os.path.join(opt.dataset, "test.json"))
# coco = COCO(os.path.join(opt.dataset, "pascal_train.json"))
img_dir = 'dataset/images/'
if opt.save_json:
coco_dt = []
for imgid in coco.imgs:
img_path = img_dir + coco.loadImgs(ids=imgid)[0]['file_name']
print(img_path)
img = F.to_tensor(Image.open(img_path).convert('RGB'))
with torch.no_grad():
output = model([img.to(opt.device)])[0]
masks = output['masks'].cpu().numpy()
categories = output['labels'].cpu().numpy()
scores = output['scores'].cpu().numpy()
# If any objects are detected in this image
n_instances = len(scores)
if (len(categories) > 0):
for i in range(n_instances): # Loop all instances
# if scores[i] < 0.3:
# continue
pred = {}
pred['image_id'] = imgid
pred['category_id'] = int(categories[i])
# save binary mask to RLE, e.g. 512x512 -> rle
b_mask = np.where(masks[i][0] > 0.5, 1, 0) # .astype(int)
pred['segmentation'] = binary_mask_to_rle(b_mask)
pred['score'] = float(scores[i])
coco_dt.append(pred)
with open("submittions/0856126.json", "w") as f:
json.dump(coco_dt, f)
if __name__ == '__main__':
opt = get_args()
if opt.test:
test(opt)
elif opt.eval:
eval_(opt)
else:
train(opt)
| [
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"engine.my_eval",
"logging.getLogger",
"os.path.isfile",
"torch.device",
"torch.no_grad",
"os.path.join",
"transforms.ColorJitter",
"torch.utils.data.DataLoader",
"torch.load",
"torchvision.models.detection.maskrcnn_resnet50_fpn",
... | [((256, 283), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'import logging\n'), ((368, 393), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (391, 393), False, 'import argparse\n'), ((3618, 3639), 'transforms.Compose', 'T.Compose', (['transforms'], {}), '(transforms)\n', (3627, 3639), True, 'import transforms as T\n'), ((4015, 4073), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['train_dataset', 'indices[:nr_train]'], {}), '(train_dataset, indices[:nr_train])\n', (4038, 4073), False, 'import torch\n'), ((4092, 4148), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['val_dataset', 'indices[nr_train:]'], {}), '(val_dataset, indices[nr_train:])\n', (4115, 4148), False, 'import torch\n'), ((4169, 4304), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'opt.bs', 'shuffle': '(True)', 'num_workers': 'opt.nr_worker', 'collate_fn': 'utils.collate_fn'}), '(train_dataset, batch_size=opt.bs, shuffle=True,\n num_workers=opt.nr_worker, collate_fn=utils.collate_fn)\n', (4196, 4304), False, 'import torch\n'), ((4327, 4461), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'opt.bs', 'shuffle': '(False)', 'num_workers': 'opt.nr_worker', 'collate_fn': 'utils.collate_fn'}), '(val_dataset, batch_size=opt.bs, shuffle=False,\n num_workers=opt.nr_worker, collate_fn=utils.collate_fn)\n', (4354, 4461), False, 'import torch\n'), ((4570, 4596), 'os.path.isfile', 'os.path.isfile', (['opt.weight'], {}), '(opt.weight)\n', (4584, 4596), False, 'import os\n'), ((5162, 5228), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(5)', 'gamma': '(0.8)'}), '(optimizer, step_size=5, gamma=0.8)\n', (5193, 5228), False, 'import torch\n'), ((6356, 6486), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'opt.bs', 'shuffle': '(False)', 'num_workers': 'opt.nr_worker', 'collate_fn': 'utils.collate_fn'}), '(dataset, batch_size=opt.bs, shuffle=False,\n num_workers=opt.nr_worker, collate_fn=utils.collate_fn)\n', (6383, 6486), False, 'import torch\n'), ((6566, 6592), 'os.path.isfile', 'os.path.isfile', (['opt.weight'], {}), '(opt.weight)\n', (6580, 6592), False, 'import os\n'), ((6766, 6812), 'engine.evaluate', 'evaluate', (['model', 'dataloader'], {'device': 'opt.device'}), '(model, dataloader, device=opt.device)\n', (6774, 6812), False, 'from engine import train_one_epoch, evaluate, my_eval\n'), ((7195, 7221), 'os.path.isfile', 'os.path.isfile', (['opt.weight'], {}), '(opt.weight)\n', (7209, 7221), False, 'import os\n'), ((2124, 2149), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2147, 2149), False, 'import torch\n'), ((2100, 2120), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2112, 2120), False, 'import torch\n'), ((2164, 2183), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2176, 2183), False, 'import torch\n'), ((3006, 3060), 'torchvision.models.detection.maskrcnn_resnet50_fpn', 'maskrcnn_resnet50_fpn', ([], {'pretrained': '(True)', 'num_classes': '(21)'}), '(pretrained=True, num_classes=21)\n', (3027, 3060), False, 'from torchvision.models.detection import maskrcnn_resnet50_fpn\n'), ((3087, 3142), 'torchvision.models.detection.maskrcnn_resnet50_fpn', 'maskrcnn_resnet50_fpn', ([], {'pretrained': '(False)', 'num_classes': '(21)'}), '(pretrained=False, num_classes=21)\n', (3108, 3142), False, 'from torchvision.models.detection import maskrcnn_resnet50_fpn\n'), ((3487, 3499), 'transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3497, 3499), True, 'import transforms as T\n'), ((3523, 3536), 'transforms.Normalize', 'T.Normalize', ([], {}), '()\n', (3534, 3536), True, 'import transforms as T\n'), ((4946, 5002), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'opt.lr', 'weight_decay': '(0.0005)'}), '(params, lr=opt.lr, weight_decay=0.0005)\n', (4962, 5002), False, 'import torch\n'), ((5386, 5471), 'engine.train_one_epoch', 'train_one_epoch', (['model', 'optimizer', 'train_loader', 'opt.device', 'epoch'], {'print_freq': '(10)'}), '(model, optimizer, train_loader, opt.device, epoch,\n print_freq=10)\n', (5401, 5471), False, 'from engine import train_one_epoch, evaluate, my_eval\n'), ((5701, 5746), 'engine.my_eval', 'my_eval', (['model', 'val_loader'], {'device': 'opt.device'}), '(model, val_loader, device=opt.device)\n', (5708, 5746), False, 'from engine import train_one_epoch, evaluate, my_eval\n'), ((7456, 7494), 'os.path.join', 'os.path.join', (['opt.dataset', '"""test.json"""'], {}), "(opt.dataset, 'test.json')\n", (7468, 7494), False, 'import os\n'), ((3429, 3463), 'transforms.ColorJitter', 'T.ColorJitter', (['(0.2)', '(0.2)', '(0.2)', '(0.05)'], {}), '(0.2, 0.2, 0.2, 0.05)\n', (3442, 3463), True, 'import transforms as T\n'), ((3578, 3605), 'transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (3600, 3605), True, 'import transforms as T\n'), ((4686, 4708), 'torch.load', 'torch.load', (['opt.weight'], {}), '(opt.weight)\n', (4696, 4708), False, 'import torch\n'), ((5060, 5129), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': 'opt.lr', 'momentum': '(0.5)', 'weight_decay': '(0.0005)'}), '(params, lr=opt.lr, momentum=0.5, weight_decay=0.0005)\n', (5075, 5129), False, 'import torch\n'), ((5634, 5680), 'engine.evaluate', 'evaluate', (['model', 'val_loader'], {'device': 'opt.device'}), '(model, val_loader, device=opt.device)\n', (5642, 5680), False, 'from engine import train_one_epoch, evaluate, my_eval\n'), ((6682, 6704), 'torch.load', 'torch.load', (['opt.weight'], {}), '(opt.weight)\n', (6692, 6704), False, 'import torch\n'), ((7311, 7333), 'torch.load', 'torch.load', (['opt.weight'], {}), '(opt.weight)\n', (7321, 7333), False, 'import torch\n'), ((8878, 8899), 'json.dump', 'json.dump', (['coco_dt', 'f'], {}), '(coco_dt, f)\n', (8887, 8899), False, 'import json\n'), ((7856, 7871), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7869, 7871), False, 'import torch\n'), ((8595, 8628), 'numpy.where', 'np.where', (['(masks[i][0] > 0.5)', '(1)', '(0)'], {}), '(masks[i][0] > 0.5, 1, 0)\n', (8603, 8628), True, 'import numpy as np\n'), ((8688, 8714), 'utils.binary_mask_to_rle', 'binary_mask_to_rle', (['b_mask'], {}), '(b_mask)\n', (8706, 8714), False, 'from utils import binary_mask_to_rle\n'), ((7802, 7822), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (7812, 7822), False, 'from PIL import Image\n')] |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import pandas_datareader.data as web
pytestmark = pytest.mark.stable
class TestEcondb(object):
def test_get_cdh_e_fos(self):
# EUROSTAT
# Employed doctorate holders in non managerial and non professional
# occupations by fields of science (%)
df = web.DataReader(
"dataset=CDH_E_FOS&GEO=NO,PL,PT,RU&FOS07=FOS1&Y_GRAD=TOTAL",
"econdb",
start=pd.Timestamp("2005-01-01"),
end=pd.Timestamp("2010-01-01"),
)
assert isinstance(df, pd.DataFrame)
assert df.shape == (2, 4)
# the levels and not returned consistently for econdb
names = list(df.columns.names)
levels = [lvl.values.tolist() for lvl in list(df.columns.levels)]
exp_col = pd.MultiIndex.from_product(levels, names=names)
exp_idx = pd.DatetimeIndex(["2006-01-01", "2009-01-01"], name="TIME_PERIOD")
values = np.array([[25.49, np.nan, 39.05, np.nan], [20.38, 25.1, 27.77, 38.1]])
expected = pd.DataFrame(values, index=exp_idx, columns=exp_col)
tm.assert_frame_equal(df, expected)
def test_get_tourism(self):
# OECD
# TOURISM_INBOUND
df = web.DataReader(
"dataset=OE_TOURISM_INBOUND&COUNTRY=JPN,USA&" "VARIABLE=INB_ARRIVALS_TOTAL",
"econdb",
start=pd.Timestamp("2008-01-01"),
end=pd.Timestamp("2012-01-01"),
)
df = df.astype(np.float)
jp = np.array([8351000, 6790000, 8611000, 6219000, 8368000], dtype=float)
us = np.array(
[175702304, 160507424, 164079728, 167600272, 171320416], dtype=float
)
index = pd.date_range("2008-01-01", "2012-01-01", freq="AS", name="TIME_PERIOD")
# sometimes the country and variable columns are swapped
lvl1 = df.columns.levels[0][0]
if lvl1 == "Total international arrivals":
df = df.swaplevel(0, 1, axis=1)
for label, values in [("Japan", jp), ("United States", us)]:
expected = pd.Series(
values, index=index, name="Total international arrivals"
)
tm.assert_series_equal(df[label]["Total international arrivals"], expected)
def test_bls(self):
# BLS
# CPI
df = web.DataReader(
"ticker=BLS_CU.CUSR0000SA0.M.US",
"econdb",
start=pd.Timestamp("2010-01-01"),
end=pd.Timestamp("2013-01-27"),
)
assert df.loc["2010-05-01"][0] == 217.3
| [
"pandas.DataFrame",
"pandas.Timestamp",
"pandas.util.testing.assert_frame_equal",
"pandas.date_range",
"pandas.DatetimeIndex",
"pandas.MultiIndex.from_product",
"numpy.array",
"pandas.Series",
"pandas.util.testing.assert_series_equal"
] | [((858, 905), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['levels'], {'names': 'names'}), '(levels, names=names)\n', (884, 905), True, 'import pandas as pd\n'), ((924, 990), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2006-01-01', '2009-01-01']"], {'name': '"""TIME_PERIOD"""'}), "(['2006-01-01', '2009-01-01'], name='TIME_PERIOD')\n", (940, 990), True, 'import pandas as pd\n'), ((1009, 1079), 'numpy.array', 'np.array', (['[[25.49, np.nan, 39.05, np.nan], [20.38, 25.1, 27.77, 38.1]]'], {}), '([[25.49, np.nan, 39.05, np.nan], [20.38, 25.1, 27.77, 38.1]])\n', (1017, 1079), True, 'import numpy as np\n'), ((1099, 1151), 'pandas.DataFrame', 'pd.DataFrame', (['values'], {'index': 'exp_idx', 'columns': 'exp_col'}), '(values, index=exp_idx, columns=exp_col)\n', (1111, 1151), True, 'import pandas as pd\n'), ((1160, 1195), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['df', 'expected'], {}), '(df, expected)\n', (1181, 1195), True, 'import pandas.util.testing as tm\n'), ((1557, 1625), 'numpy.array', 'np.array', (['[8351000, 6790000, 8611000, 6219000, 8368000]'], {'dtype': 'float'}), '([8351000, 6790000, 8611000, 6219000, 8368000], dtype=float)\n', (1565, 1625), True, 'import numpy as np\n'), ((1639, 1717), 'numpy.array', 'np.array', (['[175702304, 160507424, 164079728, 167600272, 171320416]'], {'dtype': 'float'}), '([175702304, 160507424, 164079728, 167600272, 171320416], dtype=float)\n', (1647, 1717), True, 'import numpy as np\n'), ((1756, 1828), 'pandas.date_range', 'pd.date_range', (['"""2008-01-01"""', '"""2012-01-01"""'], {'freq': '"""AS"""', 'name': '"""TIME_PERIOD"""'}), "('2008-01-01', '2012-01-01', freq='AS', name='TIME_PERIOD')\n", (1769, 1828), True, 'import pandas as pd\n'), ((2121, 2188), 'pandas.Series', 'pd.Series', (['values'], {'index': 'index', 'name': '"""Total international arrivals"""'}), "(values, index=index, name='Total international arrivals')\n", (2130, 2188), True, 'import pandas as pd\n'), ((2231, 2306), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (["df[label]['Total international arrivals']", 'expected'], {}), "(df[label]['Total international arrivals'], expected)\n", (2253, 2306), True, 'import pandas.util.testing as tm\n'), ((503, 529), 'pandas.Timestamp', 'pd.Timestamp', (['"""2005-01-01"""'], {}), "('2005-01-01')\n", (515, 529), True, 'import pandas as pd\n'), ((547, 573), 'pandas.Timestamp', 'pd.Timestamp', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (559, 573), True, 'import pandas as pd\n'), ((1429, 1455), 'pandas.Timestamp', 'pd.Timestamp', (['"""2008-01-01"""'], {}), "('2008-01-01')\n", (1441, 1455), True, 'import pandas as pd\n'), ((1473, 1499), 'pandas.Timestamp', 'pd.Timestamp', (['"""2012-01-01"""'], {}), "('2012-01-01')\n", (1485, 1499), True, 'import pandas as pd\n'), ((2475, 2501), 'pandas.Timestamp', 'pd.Timestamp', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (2487, 2501), True, 'import pandas as pd\n'), ((2519, 2545), 'pandas.Timestamp', 'pd.Timestamp', (['"""2013-01-27"""'], {}), "('2013-01-27')\n", (2531, 2545), True, 'import pandas as pd\n')] |
import cv2
import os
import numpy as np
image_folder = 'viss'
mt_image_folder = '/home/fothar/MaskTrackRCNN/None/'
def imFunc(e):
return int(e[:-4])
def sceneFunc(e):
return int(e)
scenes = [scene for scene in os.listdir(image_folder) if os.path.isdir(os.path.join(image_folder, scene))]
scenes.sort(key=sceneFunc)
for scene in scenes:
print(scene)
video = cv2.VideoWriter(os.path.join(image_folder, 'combined' + scene + '.avi'), cv2.VideoWriter_fourcc(*'DIVX'), 24, (3*549, 360))
scene_dir = os.path.join(image_folder, scene)
images = [img for img in os.listdir(scene_dir) if img.endswith(".png")]
images.sort(key=imFunc)
for image in images:
clus_img = cv2.imread(os.path.join(scene_dir, image))
orig_img = clus_img[360:, :549]
clus_res = clus_img[:360, :549]
mt_img = cv2.imread(os.path.join(mt_image_folder, scene, image))
mt_res = mt_img[:360]
img = np.concatenate((orig_img, mt_res, clus_res), axis=1)
video.write(img)
'''
for i in range(10):
video.write(np.zeros((720, 1098, 3), dtype=np.uint8))
'''
cv2.destroyAllWindows()
video.release()
| [
"cv2.VideoWriter_fourcc",
"cv2.destroyAllWindows",
"os.path.join",
"os.listdir",
"numpy.concatenate"
] | [((517, 550), 'os.path.join', 'os.path.join', (['image_folder', 'scene'], {}), '(image_folder, scene)\n', (529, 550), False, 'import os\n'), ((1141, 1164), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1162, 1164), False, 'import cv2\n'), ((222, 246), 'os.listdir', 'os.listdir', (['image_folder'], {}), '(image_folder)\n', (232, 246), False, 'import os\n'), ((393, 448), 'os.path.join', 'os.path.join', (['image_folder', "('combined' + scene + '.avi')"], {}), "(image_folder, 'combined' + scene + '.avi')\n", (405, 448), False, 'import os\n'), ((450, 481), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (472, 481), False, 'import cv2\n'), ((955, 1007), 'numpy.concatenate', 'np.concatenate', (['(orig_img, mt_res, clus_res)'], {'axis': '(1)'}), '((orig_img, mt_res, clus_res), axis=1)\n', (969, 1007), True, 'import numpy as np\n'), ((264, 297), 'os.path.join', 'os.path.join', (['image_folder', 'scene'], {}), '(image_folder, scene)\n', (276, 297), False, 'import os\n'), ((581, 602), 'os.listdir', 'os.listdir', (['scene_dir'], {}), '(scene_dir)\n', (591, 602), False, 'import os\n'), ((723, 753), 'os.path.join', 'os.path.join', (['scene_dir', 'image'], {}), '(scene_dir, image)\n', (735, 753), False, 'import os\n'), ((864, 907), 'os.path.join', 'os.path.join', (['mt_image_folder', 'scene', 'image'], {}), '(mt_image_folder, scene, image)\n', (876, 907), False, 'import os\n')] |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data generator for the linear model."""
from matched_markets.methodology import semantics
from matched_markets.methodology import utils
import numpy as np
import pandas as pd
from scipy import stats
class DataSimulator(object):
r"""Simulates geoexperiment datasets via a basic linear model.
For the model:
$y_{i,t} = \alpha_{sales} m_i + \beta \delta_{i,t} + m_i^\nu \epsilon_{i,t}$
$x_{i,t} = \alpha_{cost} m_i + \delta_{i,t} + m_i^\nu w_{i,t}$
$\delta_{i,t} = m_i I(i \in treat) I(t \in test)$
Where:
* $y$ represents response values
* $x$ represents input (cost) values.
* $N_{ctrl}, N_{treat}$ represent the number of geos in the control and
treatment groups respectively.
* Geo means $m = [1, \ldots, N_{ctrl}, 1, \ldots, N_{treat}]$.
* Heteroskedasticity parameter $\nu$, for example
$\nu=0.5$ for $var(y_i) \propto m_i$
* Causal cost in geo i at time t: $\delta_{i,t}$
* $\epsilon_{i,t} \sim N(0, \sigma^2_{resp})$ normal error
term for response.
* $w_{i,t} \sim N(0, \sigma^2_{cost})$ normal error term for cost.
Example:
```
# Experimental design.
n_control = 50
n_treat = 50
time_pre = 150
time_test = 75
# Linear params.
hetresp = 1.0
hetcost = 0.0
beta = 0.0
# Noise params.
hetsked = 0.0
sig_resp = 1.0
sig_cost = 1.0
# Column names.
df_keys = {
'key_response': 'sales',
'key_cost': 'cost',
'key_group': 'geo.group',
'key_period': 'period',
'key_geo': 'geo',
'key_date': 'date'
}
# Make simulator.
simulator = DataSimulator(n_control, n_treat,
time_pre, time_test,
hetresp, hetcost, beta,
hetsked, sig_resp, sig_cost,
**df_keys)
# Simulate data.
fake_data = simulator.sample()
```
"""
def __init__(self,
n_control, n_treat,
time_pre, time_test, # no cooldown as yet
hetresp, hetcost, beta,
hetsked, sig_resp, sig_cost,
noise_treat_only=False,
seed=None, **kwargs):
"""Creates a data simulator.
Args:
n_control: int. The number of control geos.
n_treat: int. The number of treatment geos.
time_pre: int. The number of pre-test period ticks.
time_test: int. The number of test period ticks.
hetresp: float. The degree of mean response variable heterogeneity.
hetcost: float. The degree of mean cost variable heterogeneity.
beta: float. The iROAS coefficient to be used.
hetsked: float. The degree of heteroskedasticity in cost and response.
sig_resp: float. The noise level in the response variable.
sig_cost: float. The noise level in the cost variable.
noise_treat_only: bool. Whether to add noise only in the treatment period.
seed: int. Sets the seed of the random number generator.
**kwargs: optional sematics for the produced data frame.
"""
# Constants.
self.n_control = n_control
self.n_treat = n_treat
self.time_pre = time_pre
self.time_test = time_test
self.time_total = time_pre + time_test
# Model parameters.
self.hetresp = hetresp
self.hetcost = hetcost
self.beta = beta
self.hetsked = hetsked
self.sig_resp = sig_resp
self.sig_cost = sig_cost
# Derived facts.
self.n_total = self.n_treat + self.n_control
self.col_len = self.n_total * self.time_total
# Extract any column / index name information supplied by the user.
user_df_names = utils.kwarg_subdict('key_', **kwargs)
self._df_names = semantics.DataFrameNameMapping(**user_df_names)
# Options
self.noise_treat_only = noise_treat_only
# Extract any semantics for control / treatment supplied by user.
user_group_semantics = utils.kwarg_subdict('group_', **kwargs)
self._groups = semantics.GroupSemantics(**user_group_semantics)
# Extract any semantics for experimental period supplied by user.
user_period_semantics = utils.kwarg_subdict('period_', **kwargs)
self._periods = semantics.PeriodSemantics(**user_period_semantics)
if seed is None:
seed = np.random.randint(0, 2**32)
self._rng = np.random.RandomState(seed=seed)
def make_period_base(self):
"""Returns a vector indicating test period entries for one geo."""
zeros_pre = np.zeros(self.time_pre)
ones_test = np.ones(self.time_test)
return np.hstack((zeros_pre, ones_test))
def make_geo_sizes(self):
"""Returns a column of geo 'sizes' for constructing heterogeneity."""
sizes_control = np.arange(1, self.n_control + 1)
sizes_treat = np.arange(1, self.n_treat + 1)
sizes = np.hstack((sizes_control, sizes_treat))
return np.kron(sizes, np.ones(self.time_total))
def make_geos(self):
"""Returns a column of geo labels."""
geo_names = np.arange(1, self.n_total + 1)
reps = np.ones(self.time_total)
return np.kron(geo_names, reps)
def make_periods(self):
"""Returns a column indicating experimental period of each entry."""
period_base = self.make_period_base()
return np.kron(np.ones(self.n_total), period_base)
def make_groups(self):
"""Returns a vector of ones at treatment group entries, zero in control."""
control = np.ones(self.n_control * self.time_total, dtype=int)
treatment = 2*np.ones(self.n_treat * self.time_total, dtype=int)
return np.hstack((control, treatment))
def make_cost_causal(self):
"""Returns a column representing the cost caused by the experiment."""
zeros_control = np.zeros(self.n_control)
range_treat = np.arange(1, self.n_treat + 1)
cost_base = np.hstack((zeros_control, range_treat))
period_base = self.make_period_base()
cost_causal = np.kron(cost_base, period_base)
return cost_causal
def make_test_mask(self):
"""Returns a column of ones in test period entries and zeros elsewhere."""
return np.kron(np.ones(self.n_total), self.make_period_base())
def make_noise(self, sig):
"""Returns a vector of additive noise with standard deviation sig."""
sig_multiplier = sig * np.power(self.make_geo_sizes(), self.hetsked)
white_noise = stats.norm.rvs(size=self.col_len, random_state=self._rng)
noise = sig_multiplier * white_noise
if self.noise_treat_only:
noise *= self.make_test_mask()
return noise
def make_cost(self):
"""Returns a sales column for the dataset."""
sizes = self.make_geo_sizes()
cost_default = self.hetcost * sizes
cost_causal = self.make_cost_causal()
return cost_default + cost_causal + self.make_noise(self.sig_cost)
def make_sales(self):
"""Returns a sales column for the dataset."""
sizes = self.make_geo_sizes()
means = self.hetresp * sizes
incr_cost = self.make_cost_causal()
return self.beta * incr_cost + means + self.make_noise(self.sig_resp)
def make_dates(self):
"""Returns an integer column representing dates for the dataset."""
return np.kron(np.ones(self.n_total), np.arange(self.time_total))
def sample(self):
"""Draw a sample dataset from the model.
Returns:
A `pd.DataFrame`.
"""
dates = self.make_dates()
groups = self.make_groups()
periods = self.make_periods()
geos = self.make_geos()
cost = self.make_cost()
sales = self.make_sales()
sizes = self.make_geo_sizes()
data = {
self._df_names.date: dates,
self._df_names.group: groups,
self._df_names.period: periods,
self._df_names.geo: geos,
self._df_names.response: sales,
self._df_names.cost: cost,
'size': sizes,
}
frame = pd.DataFrame(data, index=np.arange(self.col_len))
frame = frame.set_index(self._df_names.geo, append=False)
return frame
| [
"scipy.stats.norm.rvs",
"numpy.zeros",
"numpy.ones",
"numpy.random.RandomState",
"matched_markets.methodology.utils.kwarg_subdict",
"matched_markets.methodology.semantics.GroupSemantics",
"numpy.hstack",
"matched_markets.methodology.semantics.DataFrameNameMapping",
"numpy.random.randint",
"numpy.a... | [((4305, 4342), 'matched_markets.methodology.utils.kwarg_subdict', 'utils.kwarg_subdict', (['"""key_"""'], {}), "('key_', **kwargs)\n", (4324, 4342), False, 'from matched_markets.methodology import utils\n'), ((4364, 4411), 'matched_markets.methodology.semantics.DataFrameNameMapping', 'semantics.DataFrameNameMapping', ([], {}), '(**user_df_names)\n', (4394, 4411), False, 'from matched_markets.methodology import semantics\n'), ((4570, 4609), 'matched_markets.methodology.utils.kwarg_subdict', 'utils.kwarg_subdict', (['"""group_"""'], {}), "('group_', **kwargs)\n", (4589, 4609), False, 'from matched_markets.methodology import utils\n'), ((4629, 4677), 'matched_markets.methodology.semantics.GroupSemantics', 'semantics.GroupSemantics', ([], {}), '(**user_group_semantics)\n', (4653, 4677), False, 'from matched_markets.methodology import semantics\n'), ((4777, 4817), 'matched_markets.methodology.utils.kwarg_subdict', 'utils.kwarg_subdict', (['"""period_"""'], {}), "('period_', **kwargs)\n", (4796, 4817), False, 'from matched_markets.methodology import utils\n'), ((4838, 4888), 'matched_markets.methodology.semantics.PeriodSemantics', 'semantics.PeriodSemantics', ([], {}), '(**user_period_semantics)\n', (4863, 4888), False, 'from matched_markets.methodology import semantics\n'), ((4968, 5000), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (4989, 5000), True, 'import numpy as np\n'), ((5119, 5142), 'numpy.zeros', 'np.zeros', (['self.time_pre'], {}), '(self.time_pre)\n', (5127, 5142), True, 'import numpy as np\n'), ((5159, 5182), 'numpy.ones', 'np.ones', (['self.time_test'], {}), '(self.time_test)\n', (5166, 5182), True, 'import numpy as np\n'), ((5194, 5227), 'numpy.hstack', 'np.hstack', (['(zeros_pre, ones_test)'], {}), '((zeros_pre, ones_test))\n', (5203, 5227), True, 'import numpy as np\n'), ((5351, 5383), 'numpy.arange', 'np.arange', (['(1)', '(self.n_control + 1)'], {}), '(1, self.n_control + 1)\n', (5360, 5383), True, 'import numpy as np\n'), ((5402, 5432), 'numpy.arange', 'np.arange', (['(1)', '(self.n_treat + 1)'], {}), '(1, self.n_treat + 1)\n', (5411, 5432), True, 'import numpy as np\n'), ((5445, 5484), 'numpy.hstack', 'np.hstack', (['(sizes_control, sizes_treat)'], {}), '((sizes_control, sizes_treat))\n', (5454, 5484), True, 'import numpy as np\n'), ((5619, 5649), 'numpy.arange', 'np.arange', (['(1)', '(self.n_total + 1)'], {}), '(1, self.n_total + 1)\n', (5628, 5649), True, 'import numpy as np\n'), ((5661, 5685), 'numpy.ones', 'np.ones', (['self.time_total'], {}), '(self.time_total)\n', (5668, 5685), True, 'import numpy as np\n'), ((5697, 5721), 'numpy.kron', 'np.kron', (['geo_names', 'reps'], {}), '(geo_names, reps)\n', (5704, 5721), True, 'import numpy as np\n'), ((6039, 6091), 'numpy.ones', 'np.ones', (['(self.n_control * self.time_total)'], {'dtype': 'int'}), '(self.n_control * self.time_total, dtype=int)\n', (6046, 6091), True, 'import numpy as np\n'), ((6172, 6203), 'numpy.hstack', 'np.hstack', (['(control, treatment)'], {}), '((control, treatment))\n', (6181, 6203), True, 'import numpy as np\n'), ((6330, 6354), 'numpy.zeros', 'np.zeros', (['self.n_control'], {}), '(self.n_control)\n', (6338, 6354), True, 'import numpy as np\n'), ((6373, 6403), 'numpy.arange', 'np.arange', (['(1)', '(self.n_treat + 1)'], {}), '(1, self.n_treat + 1)\n', (6382, 6403), True, 'import numpy as np\n'), ((6420, 6459), 'numpy.hstack', 'np.hstack', (['(zeros_control, range_treat)'], {}), '((zeros_control, range_treat))\n', (6429, 6459), True, 'import numpy as np\n'), ((6520, 6551), 'numpy.kron', 'np.kron', (['cost_base', 'period_base'], {}), '(cost_base, period_base)\n', (6527, 6551), True, 'import numpy as np\n'), ((6945, 7002), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'size': 'self.col_len', 'random_state': 'self._rng'}), '(size=self.col_len, random_state=self._rng)\n', (6959, 7002), False, 'from scipy import stats\n'), ((4924, 4953), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (4941, 4953), True, 'import numpy as np\n'), ((5511, 5535), 'numpy.ones', 'np.ones', (['self.time_total'], {}), '(self.time_total)\n', (5518, 5535), True, 'import numpy as np\n'), ((5883, 5904), 'numpy.ones', 'np.ones', (['self.n_total'], {}), '(self.n_total)\n', (5890, 5904), True, 'import numpy as np\n'), ((6110, 6160), 'numpy.ones', 'np.ones', (['(self.n_treat * self.time_total)'], {'dtype': 'int'}), '(self.n_treat * self.time_total, dtype=int)\n', (6117, 6160), True, 'import numpy as np\n'), ((6702, 6723), 'numpy.ones', 'np.ones', (['self.n_total'], {}), '(self.n_total)\n', (6709, 6723), True, 'import numpy as np\n'), ((7761, 7782), 'numpy.ones', 'np.ones', (['self.n_total'], {}), '(self.n_total)\n', (7768, 7782), True, 'import numpy as np\n'), ((7784, 7810), 'numpy.arange', 'np.arange', (['self.time_total'], {}), '(self.time_total)\n', (7793, 7810), True, 'import numpy as np\n'), ((8445, 8468), 'numpy.arange', 'np.arange', (['self.col_len'], {}), '(self.col_len)\n', (8454, 8468), True, 'import numpy as np\n')] |
# This module is to plot the basic map and stations and epicenter
def p(lat0=0,lon0=0,lat=1,lon=1,stlas= [47.761659, 48.7405, 49.755100, 45.737167],stlos= [12.864466, 11.8671, 10.849660, 14.795714],name='stations.pdf',marker='^',color='g'):
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
plt.clf()
#plt.ion()
m = Basemap(llcrnrlon=lon0,llcrnrlat=lat0,urcrnrlon=lon0+lon,urcrnrlat=lat0+lat,projection='cass',lat_0=(lat0*2+lat)/2,lon_0=(lon0*2+lon)/2)
m.drawparallels(np.arange(lat0, lat0+lat, lat/3), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(lon0, lon0+lon, lon/3), labels=[0, 0, 0, 1])
m.drawmapboundary()
x, y = m(stlos, stlas)
m.scatter(x, y, 40, color=color, marker=marker)
plt.savefig(name)
| [
"numpy.arange",
"mpl_toolkits.basemap.Basemap",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf"
] | [((355, 364), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (362, 364), True, 'import matplotlib.pyplot as plt\n'), ((389, 556), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': 'lon0', 'llcrnrlat': 'lat0', 'urcrnrlon': '(lon0 + lon)', 'urcrnrlat': '(lat0 + lat)', 'projection': '"""cass"""', 'lat_0': '((lat0 * 2 + lat) / 2)', 'lon_0': '((lon0 * 2 + lon) / 2)'}), "(llcrnrlon=lon0, llcrnrlat=lat0, urcrnrlon=lon0 + lon, urcrnrlat=\n lat0 + lat, projection='cass', lat_0=(lat0 * 2 + lat) / 2, lon_0=(lon0 *\n 2 + lon) / 2)\n", (396, 556), False, 'from mpl_toolkits.basemap import Basemap\n'), ((786, 803), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {}), '(name)\n', (797, 803), True, 'import matplotlib.pyplot as plt\n'), ((547, 583), 'numpy.arange', 'np.arange', (['lat0', '(lat0 + lat)', '(lat / 3)'], {}), '(lat0, lat0 + lat, lat / 3)\n', (556, 583), True, 'import numpy as np\n'), ((622, 658), 'numpy.arange', 'np.arange', (['lon0', '(lon0 + lon)', '(lon / 3)'], {}), '(lon0, lon0 + lon, lon / 3)\n', (631, 658), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import sys
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')]
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))]
import fire
import json
import os
import numpy as np
import tensorflow as tf
import tflex
import random
import model, sample, encoder
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
def interact_model(
model_name='117M',
asker=None,
responder=None,
restore_from=None,
seed=None,
length=None,
temperature=1,
top_k=0,
top_p=0.0,
penalize=0,
prompt=None
):
"""
Interactively chat with the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
:penalize=0.0 : Float value controlling "used" penalty. Implements repetition
reduction (similar to CTRL) if set to a value > 0. A decent setting might be 0.85
with temperature 0.3 and top_k 40.
"""
if asker is None:
raise Exception("Add a name present in the training dataset that you will be chatting as")
if responder is None:
raise Exception("Add a name present in the training dataset that gpt will be chatting as")
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tflex.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [1, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=1,
temperature=temperature, top_k=top_k, top_p=top_p, penalize=penalize
)
saver = tflex.Saver()
if restore_from is None:
restore_from = os.path.join('models', model_name)
ckpt = tflex.latest_checkpoint(restore_from)
saver.restore(sess, ckpt)
input_ = ''
time = 1924862493344
while True:
time = increase_time(time)
input_ = input_ + f'({time}) {asker}: ' + input(f"{asker}: ")
time = increase_time(time)
input_ = input_ + f'\n ({time}) {responder}: '
if len(input_) > 1 and input_.endswith('\n'):
input_ = input_[:-1]
context_tokens = enc.encode(input_)
out = sess.run(output, feed_dict={
context: [context_tokens]
})[:, len(context_tokens):]
enc.decode(out[0])
text = enc.decode(out[0]).split(f') {asker}', 1)[0]
print(f'\n ({time}) {responder}: ' + text.rsplit('(', 1)[0])
input_ = input_ + text
sys.stdout.flush()
def increase_time(time):
# increase timestamp for each message seen by gpt
return time + random.randint(100, 1000)
if __name__ == '__main__':
fire.Fire(interact_model)
| [
"encoder.get_encoder",
"os.path.abspath",
"json.load",
"numpy.random.seed",
"fire.Fire",
"tflex.Saver",
"random.randint",
"tflex.latest_checkpoint",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"model.default_hparams",
"sys.stdout.flush",
"sample.sample_sequence",
"tensorflow.Gr... | [((2235, 2266), 'encoder.get_encoder', 'encoder.get_encoder', (['model_name'], {}), '(model_name)\n', (2254, 2266), False, 'import model, sample, encoder\n'), ((2281, 2304), 'model.default_hparams', 'model.default_hparams', ([], {}), '()\n', (2302, 2304), False, 'import model, sample, encoder\n'), ((4164, 4189), 'fire.Fire', 'fire.Fire', (['interact_model'], {}), '(interact_model)\n', (4173, 4189), False, 'import fire\n'), ((430, 461), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (447, 461), False, 'import logging\n'), ((2678, 2713), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[1, None]'], {}), '(tf.int32, [1, None])\n', (2692, 2713), True, 'import tensorflow as tf\n'), ((2722, 2742), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2736, 2742), True, 'import numpy as np\n'), ((2751, 2775), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (2769, 2775), True, 'import tensorflow as tf\n'), ((2793, 2956), 'sample.sample_sequence', 'sample.sample_sequence', ([], {'hparams': 'hparams', 'length': 'length', 'context': 'context', 'batch_size': '(1)', 'temperature': 'temperature', 'top_k': 'top_k', 'top_p': 'top_p', 'penalize': 'penalize'}), '(hparams=hparams, length=length, context=context,\n batch_size=1, temperature=temperature, top_k=top_k, top_p=top_p,\n penalize=penalize)\n', (2815, 2956), False, 'import model, sample, encoder\n'), ((3024, 3037), 'tflex.Saver', 'tflex.Saver', ([], {}), '()\n', (3035, 3037), False, 'import tflex\n'), ((3146, 3183), 'tflex.latest_checkpoint', 'tflex.latest_checkpoint', (['restore_from'], {}), '(restore_from)\n', (3169, 3183), False, 'import tflex\n'), ((4105, 4130), 'random.randint', 'random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (4119, 4130), False, 'import random\n'), ((2319, 2369), 'os.path.join', 'os.path.join', (['"""models"""', 'model_name', '"""hparams.json"""'], {}), "('models', model_name, 'hparams.json')\n", (2331, 2369), False, 'import os\n'), ((2412, 2424), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2421, 2424), False, 'import json\n'), ((3096, 3130), 'os.path.join', 'os.path.join', (['"""models"""', 'model_name'], {}), "('models', model_name)\n", (3108, 3130), False, 'import os\n'), ((3987, 4005), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4003, 4005), False, 'import sys\n'), ((103, 128), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (118, 128), False, 'import os\n'), ((198, 223), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (213, 223), False, 'import os\n'), ((2639, 2649), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2647, 2649), True, 'import tensorflow as tf\n')] |
import torch
from torch.nn import ModuleList, Parameter
from dpm.distributions import (
Bernoulli, Categorical, Normal, Data, Distribution
)
from dpm.train import train
from dpm.criterion import cross_entropy
import numpy as np
class LinearDiscriminantAnalysis(Distribution):
def __init__(self, n_classes=2, n_features=10):
super().__init__()
self.y_dist = Categorical(probs=[1.0/n_classes for _ in range(n_classes)])
self.x_means = Parameter(torch.randn(n_classes, n_features).float())
self.scale = Parameter(torch.eye(n_features).float().cholesky())
self.n_dims = n_features
self.n_classes = n_classes
self.n_features = n_features
def create_dist(self, class_num):
return Normal(self.x_means[class_num], self.covariance, learnable=False)
def log_prob(self, x, y):
ids = y.long()
log_probs = torch.cat([self.create_dist(i).log_prob(x).view(-1, 1)
for i in range(self.n_classes)], dim=1)
y_probs = self.y_dist.log_prob(y).view(-1, 1)
return (y_probs + log_probs.gather(1, ids.view(-1, 1))).sum(-1)
def sample(self, batch_size, return_y=False):
indices = self.y_dist.sample(batch_size).view(-1).long()
samples = torch.stack([self.create_dist(i).sample(batch_size)
for i in range(self.n_classes)])
# if you want class, return indicies as well
if return_y:
return samples[indices, np.arange(batch_size)], indices.view(-1, 1)
return samples[indices, np.arange(batch_size)]
def fit(self, x, y, **kwargs):
data = Data(x, y)
stats = train(data, self, cross_entropy, **kwargs)
return stats
def predict(self, x):
log_probs = torch.cat([self.create_dist(i).log_prob(x).view(-1, 1)
for i in range(self.n_classes)], dim=1)
y_probs = self.y_dist.logits.expand_as(log_probs)
probs = y_probs + log_probs
return probs.max(dim=1)[1].view(-1, 1)
@property
def covariance(self):
return torch.mm(self.scale, self.scale.t())
# BASE
class GenerativeClassifier(Distribution):
def __init__(self, y_dist, x_dist):
super().__init__()
self.y_dist = y_dist
self.x_dist = ModuleList(x_dist)
self.n_dims = x_dist[0].n_dims
def log_prob(self, x, y):
ids = y.long()
log_probs = torch.cat([sub_model.log_prob(x).view(-1, 1)
for sub_model in self.x_dist], dim=1)
y_probs = self.y_dist.log_prob(y).view(-1, 1)
return (y_probs + log_probs.gather(1, ids.view(-1, 1))).sum(-1)
def sample(self, batch_size, return_y=False):
indices = self.y_dist.sample(batch_size).view(-1).long()
samples = torch.stack([sub_model.sample(batch_size)
for sub_model in self.x_dist])
# if you want class, return indicies as well
if return_y:
return samples[indices, np.arange(batch_size)], indices.view(-1, 1)
return samples[indices, np.arange(batch_size)]
def fit(self, x, y, **kwargs):
data = Data(x, y)
stats = train(data, self, cross_entropy, **kwargs)
return stats
def predict(self, x):
log_probs = torch.cat([sub_model.log_prob(x).view(-1, 1)
for sub_model in self.x_dist], dim=1)
y_probs = self.y_dist.logits.expand_as(log_probs)
probs = y_probs + log_probs
return probs.max(dim=1)[1].view(-1, 1)
# Specific Models
class QuadraticDiscriminantAnalysis(GenerativeClassifier):
def __init__(self, n_classes=2, n_features=10):
super().__init__(Categorical(probs=[1.0/n_classes for _ in range(n_classes)]),
[Normal(loc=torch.randn(n_features), scale=torch.eye(n_features)) for _ in range(n_classes)])
self.n_classes = n_classes
self.n_features = n_features
# Naive Independece Assumptions
class GaussianNaiveBayes(GenerativeClassifier):
def __init__(self, n_classes=2, n_features=10):
super().__init__(Categorical(probs=[1.0/n_classes for _ in range(n_classes)]),
[Normal(loc=torch.randn(n_features), scale=torch.ones(n_features)) for _ in range(n_classes)])
self.n_classes = n_classes
self.n_features = n_features
class BernoulliNaiveBayes(GenerativeClassifier):
def __init__(self, n_features=10):
super().__init__(Bernoulli(probs=torch.rand(1)),
[Bernoulli(probs=torch.rand(n_features)) for _ in range(2)])
self.n_classes = 2
self.n_features = n_features
class MultinomialNaiveBayes(GenerativeClassifier):
def __init__(self, n_classes=2, n_features=10, n_states=4):
super().__init__(Categorical(probs=[1.0/n_classes for _ in range(n_classes)]),
[Categorical(probs=[[0.5 for _ in range(n_states)]
for _ in range(n_features)])
for _ in range(n_classes)])
self.n_classes = n_classes
self.n_features = n_features
self.n_states = n_states
| [
"torch.ones",
"dpm.train.train",
"torch.eye",
"torch.nn.ModuleList",
"torch.randn",
"dpm.distributions.Data",
"numpy.arange",
"torch.rand",
"dpm.distributions.Normal"
] | [((754, 819), 'dpm.distributions.Normal', 'Normal', (['self.x_means[class_num]', 'self.covariance'], {'learnable': '(False)'}), '(self.x_means[class_num], self.covariance, learnable=False)\n', (760, 819), False, 'from dpm.distributions import Bernoulli, Categorical, Normal, Data, Distribution\n'), ((1658, 1668), 'dpm.distributions.Data', 'Data', (['x', 'y'], {}), '(x, y)\n', (1662, 1668), False, 'from dpm.distributions import Bernoulli, Categorical, Normal, Data, Distribution\n'), ((1685, 1727), 'dpm.train.train', 'train', (['data', 'self', 'cross_entropy'], {}), '(data, self, cross_entropy, **kwargs)\n', (1690, 1727), False, 'from dpm.train import train\n'), ((2329, 2347), 'torch.nn.ModuleList', 'ModuleList', (['x_dist'], {}), '(x_dist)\n', (2339, 2347), False, 'from torch.nn import ModuleList, Parameter\n'), ((3201, 3211), 'dpm.distributions.Data', 'Data', (['x', 'y'], {}), '(x, y)\n', (3205, 3211), False, 'from dpm.distributions import Bernoulli, Categorical, Normal, Data, Distribution\n'), ((3228, 3270), 'dpm.train.train', 'train', (['data', 'self', 'cross_entropy'], {}), '(data, self, cross_entropy, **kwargs)\n', (3233, 3270), False, 'from dpm.train import train\n'), ((1584, 1605), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (1593, 1605), True, 'import numpy as np\n'), ((3127, 3148), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (3136, 3148), True, 'import numpy as np\n'), ((478, 512), 'torch.randn', 'torch.randn', (['n_classes', 'n_features'], {}), '(n_classes, n_features)\n', (489, 512), False, 'import torch\n'), ((4553, 4566), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4563, 4566), False, 'import torch\n'), ((1508, 1529), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (1517, 1529), True, 'import numpy as np\n'), ((3051, 3072), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (3060, 3072), True, 'import numpy as np\n'), ((3853, 3876), 'torch.randn', 'torch.randn', (['n_features'], {}), '(n_features)\n', (3864, 3876), False, 'import torch\n'), ((3884, 3905), 'torch.eye', 'torch.eye', (['n_features'], {}), '(n_features)\n', (3893, 3905), False, 'import torch\n'), ((4267, 4290), 'torch.randn', 'torch.randn', (['n_features'], {}), '(n_features)\n', (4278, 4290), False, 'import torch\n'), ((4298, 4320), 'torch.ones', 'torch.ones', (['n_features'], {}), '(n_features)\n', (4308, 4320), False, 'import torch\n'), ((4611, 4633), 'torch.rand', 'torch.rand', (['n_features'], {}), '(n_features)\n', (4621, 4633), False, 'import torch\n'), ((553, 574), 'torch.eye', 'torch.eye', (['n_features'], {}), '(n_features)\n', (562, 574), False, 'import torch\n')] |
import json
import hashlib
from typing import List, Set
from collections import defaultdict
import feather
import numpy as np
import pandas as pd
from scipy import sparse as sp
import config as cfg
class ProductEncoder:
def __init__(self, product_csv_path):
self.product_idx = {}
self.product_pid = {}
for idx, pid in enumerate(pd.read_csv(product_csv_path).product_id.values):
self.product_idx[pid] = idx
self.product_pid[idx] = pid
def toIdx(self, x):
if type(x) == str:
pid = x
return self.product_idx[pid]
return [self.product_idx[pid] for pid in x]
def toPid(self, x):
if type(x) == int:
idx = x
return self.product_pid[idx]
return [self.product_pid[idx] for idx in x]
@property
def num_products(self):
return len(self.product_idx)
class ProductFeatEncoder:
def __init__(self, product_csv_path, init=True, exclude=None):
# self.products = pd.read_csv(product_csv_path).fillna(0)
self.products = pd.read_csv(product_csv_path)
self.products_map = defaultdict(dict)
self.features = defaultdict(dict)
if exclude:
self.products = self.products.drop(exclude, axis=1)
if init:
self.create_encoder()
self.create_features()
def create_encoder(self):
cols = self.products.dtypes[self.products.dtypes == "object"].index.tolist()
global_idx = 0
for col in cols:
self.products_map[col] = defaultdict(dict)
for idx, pid in enumerate(self.products[col].unique()):
self.products_map[col]["pid"][pid] = idx
self.products_map[col]["idx"][idx] = pid
self.products_map[col]["gidx"][idx] = pid
def create_features(self):
for tup in self.products.set_index("product_id").itertuples():
for name, val in tup._asdict().items():
if name == "Index":
continue
val = (
self.products_map[name]["pid"][val]
if name in self.products_map
else val
)
self.features[tup.Index][name] = val
def product_features(self, ids):
if type(ids) == str:
return self.features[ids]
return [self.features[product_id] for product_id in ids]
def product_features_idx(self, ind):
if type(ind) == str:
return self.features[ind]
return [
self.features[self.products_map["product_id"]["idx"][idx]] for idx in ind
]
class TrainingSample:
def __init__(
self, row: sp.coo_matrix, target_items: Set[int], client_id: str = None
):
self.row = row
self.target_items = target_items
self.client_id = client_id
def make_coo_row(
transaction_history,
product_encoder: ProductEncoder,
last_transaction=False,
normalize=True,
entity=False,
):
idx = []
values = []
items = defaultdict(int)
if last_transaction:
transaction_history = transaction_history[-1:]
for trans in transaction_history:
for i in trans["products"]:
pidx = product_encoder.toIdx(i["product_id"])
items[pidx] += 1.0
n_items = sum(items.values())
for pidx, val in items.items():
idx.append(pidx)
if normalize:
val = val / n_items
if entity:
val = 1
values.append(val)
return sp.coo_matrix(
(np.array(values).astype(np.float32), ([0] * len(idx), idx)),
shape=(1, product_encoder.num_products),
)
def cache_to_feather(cache_fm, num_products=43038):
dfs = []
for key, item in cache_fm.items():
df_model = pd.Series(item).apply(pd.Series)
dfs.append(df_model)
df_scores = dfs[0]
for df in dfs[1:]:
df_scores = df_scores.join(df)
df_scores = df_scores.sort_index().reindex(range(num_products)).fillna(0)
feather.write_dataframe(df_scores, cfg.ASSETS_DIR / "implicit_scores.feather")
def create_products_in_transaction(
transaction_history, product_encoder: ProductEncoder, outfile
):
"""Collect item2vec file."""
for trans in transaction_history:
products_str = " ".join(
str(product_encoder.toIdx(i["product_id"])) for i in trans["products"]
)
outfile.write(products_str + "\n")
outfile.flush()
def update_item_cost(transaction_history, product_encoder, storage):
for txn in transaction_history:
for item in txn["products"]:
key = product_encoder.toIdx(item["product_id"])
item_cost = item["s"] / max(item["quantity"], 1)
if storage[key] == 0:
storage[key] = item_cost
else:
storage[key] = (storage[key] + item_cost) / 2.0
def average_precision(actual, recommended, k=30):
ap_sum = 0
hits = 0
for i in range(k):
product_id = recommended[i] if i < len(recommended) else None
if product_id is not None and product_id in actual:
hits += 1
ap_sum += hits / (i + 1)
return ap_sum / k
def normalized_average_precision(actual, recommended, k=30):
actual = set(actual)
if len(actual) == 0:
return 0.0
ap = average_precision(actual, recommended, k=k)
ap_ideal = average_precision(actual, list(actual)[:k], k=k)
return ap / ap_ideal
def recall_k(actual, recommended, k=30):
return len(set(actual).intersection(set(recommended[:k]))) / max(
len(set(actual)), 1
)
def get_shard_path(n_shard, jsons_dir=cfg.JSONS_DIR):
return "{}/{:02d}.jsons.splitted".format(jsons_dir, n_shard)
def md5_hash(x):
return int(hashlib.md5(x.encode()).hexdigest(), 16)
def get_check_users():
check_users = []
with open(cfg.CHECK_QUERY_PATH) as f:
for line in f:
query_data, _ = line.strip().split("\t")
client_id = json.loads(query_data)["client_id"]
check_users.append(client_id)
return check_users
| [
"json.loads",
"feather.write_dataframe",
"pandas.read_csv",
"collections.defaultdict",
"numpy.array",
"pandas.Series"
] | [((3096, 3112), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3107, 3112), False, 'from collections import defaultdict\n'), ((4081, 4159), 'feather.write_dataframe', 'feather.write_dataframe', (['df_scores', "(cfg.ASSETS_DIR / 'implicit_scores.feather')"], {}), "(df_scores, cfg.ASSETS_DIR / 'implicit_scores.feather')\n", (4104, 4159), False, 'import feather\n'), ((1085, 1114), 'pandas.read_csv', 'pd.read_csv', (['product_csv_path'], {}), '(product_csv_path)\n', (1096, 1114), True, 'import pandas as pd\n'), ((1143, 1160), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1154, 1160), False, 'from collections import defaultdict\n'), ((1185, 1202), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1196, 1202), False, 'from collections import defaultdict\n'), ((1575, 1592), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1586, 1592), False, 'from collections import defaultdict\n'), ((3850, 3865), 'pandas.Series', 'pd.Series', (['item'], {}), '(item)\n', (3859, 3865), True, 'import pandas as pd\n'), ((6070, 6092), 'json.loads', 'json.loads', (['query_data'], {}), '(query_data)\n', (6080, 6092), False, 'import json\n'), ((360, 389), 'pandas.read_csv', 'pd.read_csv', (['product_csv_path'], {}), '(product_csv_path)\n', (371, 389), True, 'import pandas as pd\n'), ((3609, 3625), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3617, 3625), True, 'import numpy as np\n')] |
import os
os.environ["BLIS_NUM_THREADS"] = '1'
os.environ["OPENBLAS_NUM_THREADS"] = '1'
os.environ["MKL_NUM_THREADS"] = '1'
os.environ["OMP_NUM_THREADS"] = '1'
os.environ["NUMEXPR_NUM_THREADS"] = '1'
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["MKL_DEBUG_CPU_TYPE"] = "5"
import numpy as np
from numpy import sqrt, log
from iwal.utils import *
from sklearn.linear_model import LogisticRegression
import sys
import time
import warnings
import pickle
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=ConvergenceWarning)
import multiprocessing as mp
import argparse
import mkl
mkl.set_num_threads(1)
def get_passive(X_tr, Y_tr, X_te, Y_te, batch=100, trials=10):
checkpoints = []
acc_tr = []
acc_te = []
n_round = int(X_tr.shape[0] / batch) + 1
perms = [np.random.permutation(X_tr.shape[0]) for _ in range(trials)]
for i in range(1, n_round):
print(i, n_round)
checkpoints.append([])
total_score = 0
for j in range(trials):
clf = LogisticRegression()
final = min(batch * i, X_tr.shape[0] - 1)
clf.fit(X_tr[perms[j]][:final], Y_tr[perms[j]][:final])
checkpoints[-1].append(final)
total_score += clf.score(X_tr, Y_tr)
acc_tr.append(total_score / trials)
return acc_tr, acc_te, checkpoints
def get_model(X, Y, w, iters=1000, b=True, tol=1e-3):
clf = LogisticRegression(C=1e8, max_iter=iters, tol=tol, solver='lbfgs', fit_intercept=b)
clf.fit(X, Y, sample_weight=w)
return clf
def get_alternative(X, Y, x, y, w):
clfa = get_model(X - x, Y, w, b=False)
clfa.intercept_ += 1e-20 * (1 - y)
return clfa
class IWAL():
def __init__(self, X_tr, Y_tr, X_te, Y_te, c0=.01):
print('starting training', c0)
self.X_tr = X_tr
self.X_te = X_te
self.Y_tr = Y_tr
self.Y_te = Y_te
self.n = X_tr.shape[0]
self.c0 = c0
self.c1 = 5 + 2 * np.sqrt(2)
self.c2 = 5
self.errors = 0
self.P = []
self.G = []
self.entropy = []
def get_query(self, clf, idx, k, diagnostic=False, thresholdtype='iwal0'):
x = (self.X_tr[idx])[np.newaxis]
y = clf.predict(x)[0]
clfa = get_alternative(self.X_tr[self.sk_idxs], self.Y_f, x, y, self.sk_w)
G = (clf.score(self.X_tr[self.sk_idxs], self.Y_f, sample_weight=self.sk_w)
- clfa.score(self.X_tr[self.sk_idxs] - x, self.Y_f, sample_weight=self.sk_w))
assert clfa.predict(x - x)[0] == 1 - y, "pred: {}, y: {}".format(clfa.predict(x)[0], 1 - y)
# print(G)
if diagnostic:
print('index', idx, 'actual', self.Y_tr[idx], 'predicted', y,
'G', G, -sum(clf.predict_log_proba(x)[0] * clf.predict_proba(x)[0]))
self.entropy.append(-sum(clf.predict_log_proba(x)[0] * clf.predict_proba(x)[0]))
self.G.append(G)
c = self.c0 * log(k) / (k)
if thresholdtype == 'iwal0':
threshold = np.sqrt(c) + c
else:
threshold = np.sqrt(c * self.score) + c
if G < threshold:
P = 1
else:
if thresholdtype == 'iwal0':
a, b = self.c1, -self.c1 + 1
else:
a, b = self.c1 * np.sqrt(self.score), (-self.c1 + 1) * np.sqrt(self.score)
d, e = self.c2, -self.c2 + 1
P = ((a * np.sqrt(c) + np.sqrt(a ** 2 * c + 4 * d * c * (G - b * np.sqrt(c) - e * c))) / (
2 * (G - b * np.sqrt(c) - e * c))) ** 2
self.P.append(P)
return P
def run(self, nqueries, init_queries=100, batch=100, diagnostic=False, querystyle='reg', thresholdtype='iwal0'):
# retraining
self.acc_tr = []
self.acc_te = []
# weighted
self.acc_model_tr = []
self.acc_model_te = []
self.checkpoints = []
self.idxs = np.zeros(self.X_tr.shape[0], dtype=bool)
idxs_seed = np.random.choice(np.arange(self.n), init_queries, replace=False)
self.Y_f = [self.Y_tr[i] for i in idxs_seed] # len num queries
self.idxs[idxs_seed] = True
self.sk_idxs = idxs_seed.tolist() # len num queries
self.sk_w = [1 for i in range(init_queries)] # len num queries
self.clf = get_model(self.X_tr[self.sk_idxs], self.Y_f, self.sk_w)
self.score = self.clf.score(self.X_tr[self.sk_idxs], self.Y_f, sample_weight=self.sk_w)
init_time = time.time()
total_queries = init_queries # how many we have paid for
k = init_queries # round
while k < nqueries:
k = k + 1
idx = k % self.X_tr.shape[0]
P = self.get_query(self.clf, idx, k, diagnostic=diagnostic, thresholdtype=thresholdtype)
train_flag = False
if (querystyle == 'reg' and np.random.rand() < P) or P == 1: # condition to take a query
train_flag = True
self.sk_idxs.append(idx)
self.sk_w.append(1 / P)
self.Y_f.append(self.Y_tr[idx])
if self.idxs[idx] == 0:
total_queries += 1
self.idxs[idx] = 1
if total_queries % batch == 0:
self.checkpoints.append(total_queries)
self.acc_model_tr.append(self.clf.score(self.X_tr, self.Y_tr))
self.acc_model_te.append(self.clf.score(self.X_te, self.Y_te))
clfr = get_model(self.X_tr[self.idxs], self.Y_tr[self.idxs],
np.ones(np.sum(self.idxs.astype(int))))
self.acc_tr.append(clfr.score(self.X_tr, self.Y_tr))
self.acc_te.append(clfr.score(self.X_te, self.Y_te))
print('rounds', k, 'queries', total_queries)
print('train_accuracy', clfr.score(self.X_tr, self.Y_tr))
print('errors', self.errors, 'time', time.time() - init_time)
elif querystyle == 'ora':
train_flag = True
self.sk_idxs.append(idx)
self.sk_w.append(1)
self.Y_f.append(self.clf.predict(self.X_tr[idx][np.newaxis])[0])
if train_flag:
self.clf = get_model(self.X_tr[self.sk_idxs], self.Y_f, self.sk_w)
self.score = self.clf.score(self.X_tr[self.sk_idxs], self.Y_f, sample_weight=self.sk_w)
if __name__ == "__main__":
print(np.__config__.show())
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='dataset', type=str)
parser.add_argument('--initqueries', help='init number queries', type=int, default=250)
parser.add_argument('--batch', help='number queries every checkpoint', type=int, default=250)
parser.add_argument('--passes', help='number of passes', type=int, )
parser.add_argument('--thresholdtype', help='threshold type', type=str, choices=['iwal0', 'iwal1'], )
parser.add_argument('--querystyle', help='sampling style', type=str, choices=['reg', 'ora'], )
parser.add_argument('--diagnostic', help='diagnostic', type=bool, default=False)
parser.add_argument('--clist', nargs='+', type=float, default=[1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1])
opts = parser.parse_args()
if opts.data == 'cifar64':
X_tr, Y_tr, X_te, Y_te = get_cifar64_data()
elif opts.data == 'mnist_half':
X_tr, Y_tr, X_te, Y_te = get_MNISThalf()
elif opts.data == 'fashion':
X_tr, Y_tr, X_te, Y_te = get_fashion_data()
elif opts.data == 'svhn':
X_tr, Y_tr, X_te, Y_te = get_svhn_data()
else:
raise Exception("Dataset not found.")
C0 = opts.clist
def run_single(opts, X_tr, Y_tr, X_te, Y_te, c0, i):
np.random.seed(123)
s = IWAL(X_tr, Y_tr, X_te, Y_te, c0=c0)
s.run(nqueries=opts.passes * X_tr.shape[0], init_queries=opts.initqueries, diagnostic=opts.diagnostic,
querystyle=opts.querystyle, thresholdtype=opts.thresholdtype, batch=opts.batch)
name = 'log/{}_{}_{}_{}'.format(opts.data, int(-np.log10(c0)), opts.querystyle, opts.thresholdtype)
with open(name + '.pkl', 'wb') as f:
pickle.dump((s, c0), f)
return s
print(opts)
pool = mp.Pool(len(C0))
final = pool.starmap(run_single, [(opts, X_tr, Y_tr, X_te, Y_te, c0, i) for i, c0 in zip(range(len(C0)), C0)])
| [
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.log",
"warnings.filterwarnings",
"numpy.random.rand",
"numpy.__config__.show",
"mkl.set_num_threads",
"numpy.zeros",
"time.time",
"sklearn.linear_model.LogisticRegression",
"numpy.arange",
"numpy.random.permutation",
"nu... | [((512, 574), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (535, 574), False, 'import warnings\n'), ((575, 632), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (598, 632), False, 'import warnings\n'), ((633, 695), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'ConvergenceWarning'}), "('ignore', category=ConvergenceWarning)\n", (656, 695), False, 'import warnings\n'), ((754, 776), 'mkl.set_num_threads', 'mkl.set_num_threads', (['(1)'], {}), '(1)\n', (773, 776), False, 'import mkl\n'), ((1560, 1655), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(100000000.0)', 'max_iter': 'iters', 'tol': 'tol', 'solver': '"""lbfgs"""', 'fit_intercept': 'b'}), "(C=100000000.0, max_iter=iters, tol=tol, solver='lbfgs',\n fit_intercept=b)\n", (1578, 1655), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6713, 6738), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6736, 6738), False, 'import argparse\n'), ((953, 989), 'numpy.random.permutation', 'np.random.permutation', (['X_tr.shape[0]'], {}), '(X_tr.shape[0])\n', (974, 989), True, 'import numpy as np\n'), ((4069, 4109), 'numpy.zeros', 'np.zeros', (['self.X_tr.shape[0]'], {'dtype': 'bool'}), '(self.X_tr.shape[0], dtype=bool)\n', (4077, 4109), True, 'import numpy as np\n'), ((4629, 4640), 'time.time', 'time.time', ([], {}), '()\n', (4638, 4640), False, 'import time\n'), ((6678, 6698), 'numpy.__config__.show', 'np.__config__.show', ([], {}), '()\n', (6696, 6698), True, 'import numpy as np\n'), ((7961, 7980), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (7975, 7980), True, 'import numpy as np\n'), ((1177, 1197), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1195, 1197), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4147, 4164), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (4156, 4164), True, 'import numpy as np\n'), ((8399, 8422), 'pickle.dump', 'pickle.dump', (['(s, c0)', 'f'], {}), '((s, c0), f)\n', (8410, 8422), False, 'import pickle\n'), ((2119, 2129), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2126, 2129), True, 'import numpy as np\n'), ((3094, 3100), 'numpy.log', 'log', (['k'], {}), '(k)\n', (3097, 3100), False, 'from numpy import sqrt, log\n'), ((3168, 3178), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3175, 3178), True, 'import numpy as np\n'), ((3221, 3244), 'numpy.sqrt', 'np.sqrt', (['(c * self.score)'], {}), '(c * self.score)\n', (3228, 3244), True, 'import numpy as np\n'), ((8290, 8302), 'numpy.log10', 'np.log10', (['c0'], {}), '(c0)\n', (8298, 8302), True, 'import numpy as np\n'), ((3445, 3464), 'numpy.sqrt', 'np.sqrt', (['self.score'], {}), '(self.score)\n', (3452, 3464), True, 'import numpy as np\n'), ((3483, 3502), 'numpy.sqrt', 'np.sqrt', (['self.score'], {}), '(self.score)\n', (3490, 3502), True, 'import numpy as np\n'), ((5006, 5022), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5020, 5022), True, 'import numpy as np\n'), ((3566, 3576), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3573, 3576), True, 'import numpy as np\n'), ((6170, 6181), 'time.time', 'time.time', ([], {}), '()\n', (6179, 6181), False, 'import time\n'), ((3680, 3690), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3687, 3690), True, 'import numpy as np\n'), ((3621, 3631), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3628, 3631), True, 'import numpy as np\n')] |
import numpy as np
import requests
import re
# generate sentences base on letters. Model is trained with Alice's Adventures in Wonderland
# example output "'Oh, I BEL yourt!' Saic 'Alice thing seemst,'
# Alice reminused all cranged at the end of everying and bring rause
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def tanh(x):
return np.tanh(x)
def dsigmoid(grad_a, act):
return grad_a * (act - np.square(act))
def dtanh(grad_a, act):
return grad_a * (1 - np.square(act))
def softmax(x):
eps = 1e-20
out = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return out / (np.sum(out, axis=1).reshape(-1, 1) + eps)
def cross_entropy(pred, y):
return -(np.multiply(y, np.log(pred + 1e-20))).sum()
class LSTM(object):
def __init__(self, n_input, n_hidden, n_label, n_t):
self.loss = cross_entropy
self.n_hidden, self.n_label = n_hidden, n_label
self.lr, self.batch_size, self.epochs = 1, 32, 200
self.eps = 1e-20
self.n_t = n_t
self.w_f, self.w_i, self.w_c, self.w_o = [np.random.randn(
n_input, self.n_hidden) / n_input for _ in range(4)]
self.u_f, self.u_i, self.u_c, self.u_o = [np.random.randn(
self.n_hidden, self.n_hidden) / self.n_hidden for _ in range(4)]
self.b_f, self.b_i, self.b_c, self.b_o = [
np.random.randn(1, self.n_hidden) for _ in range(4)]
self.u_v, self.b_v = np.random.randn(
self.n_hidden, self.n_label) / self.n_hidden, np.random.randn(1, self.n_label)
self.param_list = [
self.w_f, self.w_i, self.w_c, self.w_o,
self.u_f, self.u_i, self.u_c, self.u_o, self.u_v,
self.b_f, self.b_i, self.b_c, self.b_o, self.b_v
]
self.mom_list = [np.zeros_like(param) for param in self.param_list]
self.cache_list = [np.zeros_like(param) for param in self.param_list]
def fit(self, x, label):
b_size = self.batch_size
n_t, n_data, n_input = x.shape
y = np.zeros((n_t * n_data, self.n_label))
y[np.arange(n_t * n_data), label.flatten()] = 1
y = y.reshape((n_t, n_data, self.n_label))
constant = np.ones((1, self.batch_size * n_t))
for epoch in range(self.epochs):
permut = np.random.permutation(
n_data // b_size * b_size).reshape(-1, b_size)
for b_idx in range(permut.shape[0]):
x_batch = x[:, permut[b_idx, :]].reshape(n_t * b_size, n_input)
y_batch = y[:, permut[b_idx, :]].reshape(
n_t * b_size, self.n_label)
h, f, i, c, o, c_bar, grad_f, grad_i, grad_o, grad_c, grad_c_bar = [
np.zeros((n_t * b_size, self.n_hidden)) for _ in range(11)
]
# forward pass
for t in range(n_t):
t_idx = np.arange(t * b_size, (t + 1) * b_size)
t_idx_prev = t_idx - b_size if t > 0 else t_idx
xt_batch, ht_prev = x_batch[t_idx], h[t_idx_prev]
f[t_idx] = sigmoid(xt_batch @ self.w_f + ht_prev @ self.u_f + self.b_f)
i[t_idx] = sigmoid(xt_batch @ self.w_i + ht_prev @ self.u_i + self.b_i)
o[t_idx] = sigmoid(xt_batch @ self.w_o + ht_prev @ self.u_o + self.b_o)
c_bar[t_idx] = tanh(xt_batch @ self.w_c + ht_prev @ self.u_c + self.b_c)
c[t_idx] = f[t_idx] * c[t_idx_prev] + \
i[t_idx] * c_bar[t_idx]
h[t_idx] = o[t_idx] * tanh(c[t_idx])
c_prev = np.zeros(c.shape)
c_prev[b_size:, :] = c[:-b_size, :]
h_prev = np.zeros(h.shape)
h_prev[b_size:, :] = h[:-b_size, :]
# back propagation through time
grad_v = softmax(h @ self.u_v + self.b_v) - y_batch
grad_h = grad_v @ self.u_v.T
for t in reversed(range(0, n_t)):
t_idx = np.arange(t * b_size, (t + 1) * b_size)
t_idx_next = t_idx + b_size if t < n_t - 1 else t_idx
grad_h[t_idx] += (
dsigmoid(grad_f[t_idx_next], f[t_idx_next]) @ self.u_f.T +
dsigmoid(grad_i[t_idx_next], i[t_idx_next]) @ self.u_i.T +
dsigmoid(grad_o[t_idx_next], o[t_idx_next]) @ self.u_o.T +
dtanh(grad_c_bar[t_idx_next], c_bar[t_idx_next]) @ self.u_c.T
)
grad_c[t_idx] = o[t_idx] * grad_h[t_idx] * \
(1 - np.square(np.tanh(c[t_idx]))) + \
f[t_idx_next] * grad_c[t_idx_next]
grad_f[t_idx] = grad_c[t_idx] * c_prev[t_idx]
grad_i[t_idx] = grad_c[t_idx] * c_bar[t_idx]
grad_o[t_idx] = grad_h[t_idx] * tanh(c[t_idx])
grad_c_bar[t_idx] = grad_c[t_idx] * i[t_idx]
self.adam(
grad_list=[
x_batch.T @ dsigmoid(grad_f, f), x_batch.T @ dsigmoid(grad_i, i), x_batch.T @ dtanh(grad_c_bar, c_bar), x_batch.T @ dsigmoid(grad_o, o),
h_prev.T @ dsigmoid(grad_f, f), h_prev.T @ dsigmoid(grad_i, i), h_prev.T @ dtanh(grad_c_bar, c_bar), h_prev.T @ dsigmoid(grad_o, o), h.T @ grad_v,
constant @ dsigmoid(grad_f, f), constant @ dsigmoid(grad_i, i), constant @ dtanh(grad_c_bar, c_bar), constant @ dsigmoid(grad_o, o), constant @ grad_v
]
)
self.regularization()
print(self.sample(np.random.randint(n_input), np.random.randn(
1, self.n_hidden), np.random.randn(1, self.n_hidden), n_t * 4))
print(self.loss(self.predict(x).reshape(n_t * n_data,
self.n_label), y.reshape(n_t * n_data, self.n_label)))
def sgd(self, grad_list):
alpha = self.lr / self.batch_size / self.n_t
for params, grads in zip(self.param_list, grad_list):
params -= alpha * grads
def adam(self, grad_list):
beta1 = 0.9
beta2 = 0.999
alpha = self.lr / self.batch_size / self.n_t
for params, grads, mom, cache in zip(
self.param_list, grad_list, self.mom_list, self.cache_list
):
mom += (beta1 - 1) * mom + (1 - beta1) * grads
cache += (beta2 - 1) * cache + (1 - beta2) * np.square(grads)
params -= alpha * mom / (np.sqrt(cache) + self.eps)
def regularization(self):
lbd = 1e-5
for params in self.param_list:
params -= lbd * params
def predict(self, x):
n_t, n_data, n_input = x.shape
h, f, i, c, o = [np.zeros((n_t * n_data, self.n_hidden))
for _ in range(5)]
# forward pass
for t in range(n_t):
t_idx = np.arange(t * n_data, (t + 1) * n_data)
t_idx_prev = t_idx - n_data if t > 0 else t_idx
f[t_idx] = sigmoid(x[t] @ self.w_f + h[t_idx_prev] @ self.u_f + self.b_f)
i[t_idx] = sigmoid(x[t] @ self.w_i + h[t_idx_prev] @ self.u_i + self.b_i)
o[t_idx] = sigmoid(x[t] @ self.w_o + h[t_idx_prev] @ self.u_o + self.b_o)
c[t_idx] = f[t_idx] * c[t_idx_prev] + i[t_idx] * tanh(x[t] @ self.w_c + h[t_idx_prev] @ self.u_c + self.b_c)
h[t_idx] = o[t_idx] * tanh(c[t_idx])
return softmax(h @ self.u_v + self.b_v).reshape(n_t, n_data, self.n_label)
def sample(self, x_idx, h, c, seq_length):
n_input = self.w_f.shape[0]
seq = [x_idx]
for t in range(seq_length):
x = np.zeros((1, n_input))
x[0, seq[-1]] = 1
f = sigmoid(x @ self.w_f + h @ self.u_f + self.b_f)
i = sigmoid(x @ self.w_i + h @ self.u_i + self.b_i)
o = sigmoid(x @ self.w_o + h @ self.u_o + self.b_o)
c = f * c + i * tanh(x @ self.w_c + h @ self.u_c + self.b_c)
h = o * tanh(c)
y = softmax(h @ self.u_v + self.b_v)
seq.append(np.random.choice(range(n_input), p=y.flatten()))
return ''.join(np.vectorize(self.ix_to_word.get)(np.array(seq)).tolist())
def text_generation(use_word=True):
text = requests.get('http://www.gutenberg.org/cache/epub/11/pg11.txt').text
if use_word:
text = [
word + ' ' for word in re.sub("[^a-zA-Z]", " ", text).lower().split()]
words = sorted(list(set(text)))
text_size, vocab_size = len(text), len(words)
print(f'text has {text_size} characters, {vocab_size} unique.')
word_to_ix = {word: i for i, word in enumerate(words)}
ix_to_word = {i: word for i, word in enumerate(words)}
seq_length = 50
indices = np.vectorize(word_to_ix.get)(np.array(list(text)))
data = np.zeros((text_size, vocab_size))
data[np.arange(text_size), indices] = 1
n_text = (text_size - 1) // seq_length
x = data[
:n_text * seq_length].reshape(n_text, seq_length, vocab_size).transpose(1, 0, 2)
y = indices[1: n_text * seq_length + 1].reshape(n_text, seq_length).T
test_ratio = 0.2
test_split = np.random.uniform(0, 1, x.shape[1])
train_x, test_x = x[:, test_split >= test_ratio, :], x[
:, test_split < test_ratio, :]
train_y, test_y = y[:, test_split >= test_ratio], y[
:, test_split < test_ratio]
lstm = LSTM(vocab_size, 500, vocab_size, seq_length)
lstm.ix_to_word = ix_to_word
lstm.fit(train_x, train_y)
print('train loss', (np.argmax(lstm.predict(train_x), axis=2)
== train_y).sum() / (train_y.shape[0] * train_y.shape[1]))
print('test loss', (np.argmax(lstm.predict(test_x), axis=2)
== test_y).sum() / (test_y.shape[0] * test_y.shape[1]))
def main():
text_generation(use_word=False)
if __name__ == "__main__":
main()
| [
"numpy.sum",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.max",
"requests.get",
"re.sub",
"numpy.vectorize",
"numpy.tanh",
"numpy.square",
"numpy.random.permutation",
"numpy.random.uniform",
"numpy.log",
"numpy.ze... | [((347, 357), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (354, 357), True, 'import numpy as np\n'), ((8899, 8932), 'numpy.zeros', 'np.zeros', (['(text_size, vocab_size)'], {}), '((text_size, vocab_size))\n', (8907, 8932), True, 'import numpy as np\n'), ((9236, 9271), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'x.shape[1]'], {}), '(0, 1, x.shape[1])\n', (9253, 9271), True, 'import numpy as np\n'), ((2022, 2060), 'numpy.zeros', 'np.zeros', (['(n_t * n_data, self.n_label)'], {}), '((n_t * n_data, self.n_label))\n', (2030, 2060), True, 'import numpy as np\n'), ((2187, 2222), 'numpy.ones', 'np.ones', (['(1, self.batch_size * n_t)'], {}), '((1, self.batch_size * n_t))\n', (2194, 2222), True, 'import numpy as np\n'), ((8342, 8405), 'requests.get', 'requests.get', (['"""http://www.gutenberg.org/cache/epub/11/pg11.txt"""'], {}), "('http://www.gutenberg.org/cache/epub/11/pg11.txt')\n", (8354, 8405), False, 'import requests\n'), ((8837, 8865), 'numpy.vectorize', 'np.vectorize', (['word_to_ix.get'], {}), '(word_to_ix.get)\n', (8849, 8865), True, 'import numpy as np\n'), ((309, 319), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (315, 319), True, 'import numpy as np\n'), ((414, 428), 'numpy.square', 'np.square', (['act'], {}), '(act)\n', (423, 428), True, 'import numpy as np\n'), ((481, 495), 'numpy.square', 'np.square', (['act'], {}), '(act)\n', (490, 495), True, 'import numpy as np\n'), ((1350, 1383), 'numpy.random.randn', 'np.random.randn', (['(1)', 'self.n_hidden'], {}), '(1, self.n_hidden)\n', (1365, 1383), True, 'import numpy as np\n'), ((1507, 1539), 'numpy.random.randn', 'np.random.randn', (['(1)', 'self.n_label'], {}), '(1, self.n_label)\n', (1522, 1539), True, 'import numpy as np\n'), ((1779, 1799), 'numpy.zeros_like', 'np.zeros_like', (['param'], {}), '(param)\n', (1792, 1799), True, 'import numpy as np\n'), ((1857, 1877), 'numpy.zeros_like', 'np.zeros_like', (['param'], {}), '(param)\n', (1870, 1877), True, 'import numpy as np\n'), ((6818, 6857), 'numpy.zeros', 'np.zeros', (['(n_t * n_data, self.n_hidden)'], {}), '((n_t * n_data, self.n_hidden))\n', (6826, 6857), True, 'import numpy as np\n'), ((6974, 7013), 'numpy.arange', 'np.arange', (['(t * n_data)', '((t + 1) * n_data)'], {}), '(t * n_data, (t + 1) * n_data)\n', (6983, 7013), True, 'import numpy as np\n'), ((7743, 7765), 'numpy.zeros', 'np.zeros', (['(1, n_input)'], {}), '((1, n_input))\n', (7751, 7765), True, 'import numpy as np\n'), ((8942, 8962), 'numpy.arange', 'np.arange', (['text_size'], {}), '(text_size)\n', (8951, 8962), True, 'import numpy as np\n'), ((1061, 1100), 'numpy.random.randn', 'np.random.randn', (['n_input', 'self.n_hidden'], {}), '(n_input, self.n_hidden)\n', (1076, 1100), True, 'import numpy as np\n'), ((1193, 1238), 'numpy.random.randn', 'np.random.randn', (['self.n_hidden', 'self.n_hidden'], {}), '(self.n_hidden, self.n_hidden)\n', (1208, 1238), True, 'import numpy as np\n'), ((1432, 1476), 'numpy.random.randn', 'np.random.randn', (['self.n_hidden', 'self.n_label'], {}), '(self.n_hidden, self.n_label)\n', (1447, 1476), True, 'import numpy as np\n'), ((2071, 2094), 'numpy.arange', 'np.arange', (['(n_t * n_data)'], {}), '(n_t * n_data)\n', (2080, 2094), True, 'import numpy as np\n'), ((3626, 3643), 'numpy.zeros', 'np.zeros', (['c.shape'], {}), '(c.shape)\n', (3634, 3643), True, 'import numpy as np\n'), ((3721, 3738), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (3729, 3738), True, 'import numpy as np\n'), ((552, 569), 'numpy.max', 'np.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (558, 569), True, 'import numpy as np\n'), ((604, 623), 'numpy.sum', 'np.sum', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (610, 623), True, 'import numpy as np\n'), ((704, 724), 'numpy.log', 'np.log', (['(pred + 1e-20)'], {}), '(pred + 1e-20)\n', (710, 724), True, 'import numpy as np\n'), ((2286, 2334), 'numpy.random.permutation', 'np.random.permutation', (['(n_data // b_size * b_size)'], {}), '(n_data // b_size * b_size)\n', (2307, 2334), True, 'import numpy as np\n'), ((2712, 2751), 'numpy.zeros', 'np.zeros', (['(n_t * b_size, self.n_hidden)'], {}), '((n_t * b_size, self.n_hidden))\n', (2720, 2751), True, 'import numpy as np\n'), ((2886, 2925), 'numpy.arange', 'np.arange', (['(t * b_size)', '((t + 1) * b_size)'], {}), '(t * b_size, (t + 1) * b_size)\n', (2895, 2925), True, 'import numpy as np\n'), ((4032, 4071), 'numpy.arange', 'np.arange', (['(t * b_size)', '((t + 1) * b_size)'], {}), '(t * b_size, (t + 1) * b_size)\n', (4041, 4071), True, 'import numpy as np\n'), ((5667, 5693), 'numpy.random.randint', 'np.random.randint', (['n_input'], {}), '(n_input)\n', (5684, 5693), True, 'import numpy as np\n'), ((5695, 5728), 'numpy.random.randn', 'np.random.randn', (['(1)', 'self.n_hidden'], {}), '(1, self.n_hidden)\n', (5710, 5728), True, 'import numpy as np\n'), ((5747, 5780), 'numpy.random.randn', 'np.random.randn', (['(1)', 'self.n_hidden'], {}), '(1, self.n_hidden)\n', (5762, 5780), True, 'import numpy as np\n'), ((6522, 6538), 'numpy.square', 'np.square', (['grads'], {}), '(grads)\n', (6531, 6538), True, 'import numpy as np\n'), ((6576, 6590), 'numpy.sqrt', 'np.sqrt', (['cache'], {}), '(cache)\n', (6583, 6590), True, 'import numpy as np\n'), ((8234, 8267), 'numpy.vectorize', 'np.vectorize', (['self.ix_to_word.get'], {}), '(self.ix_to_word.get)\n', (8246, 8267), True, 'import numpy as np\n'), ((8268, 8281), 'numpy.array', 'np.array', (['seq'], {}), '(seq)\n', (8276, 8281), True, 'import numpy as np\n'), ((8480, 8510), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'text'], {}), "('[^a-zA-Z]', ' ', text)\n", (8486, 8510), False, 'import re\n'), ((4646, 4663), 'numpy.tanh', 'np.tanh', (['c[t_idx]'], {}), '(c[t_idx])\n', (4653, 4663), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import h5py
freq = np.linspace(1/30, 3/30, 200)
wav = 1/freq
with h5py.File("../../build/scat.h5", 'r') as f:
inc = f["sources/tfsf/flux"][...]
F = f["monitors/box_monitor/monitor_0/force"][...]
T = f["monitors/box_monitor/monitor_0/torque"][...]
plt.plot(wav, F[0,:]/inc, label = 'Fx, qbox')
plt.plot(wav, F[1,:]/inc, label = 'Fy, qbox')
plt.plot(wav,T/inc/40, label = 'Tz, qbox')
with h5py.File('../../bench/force.h5', 'r') as f:
inc = f['inc'][...]
Fx = f["Fx"][...]
Fy = f["Fy"][...]
plt.plot(wav, Fx/inc, label = 'Fx, meep')
plt.plot(wav, Fy/inc*40, label = 'Fy, meep')
plt.legend()
plt.show()
| [
"h5py.File",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.linspace"
] | [((71, 103), 'numpy.linspace', 'np.linspace', (['(1 / 30)', '(3 / 30)', '(200)'], {}), '(1 / 30, 3 / 30, 200)\n', (82, 103), True, 'import numpy as np\n'), ((676, 688), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (686, 688), True, 'import matplotlib.pyplot as plt\n'), ((689, 699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (697, 699), True, 'import matplotlib.pyplot as plt\n'), ((119, 156), 'h5py.File', 'h5py.File', (['"""../../build/scat.h5"""', '"""r"""'], {}), "('../../build/scat.h5', 'r')\n", (128, 156), False, 'import h5py\n'), ((317, 363), 'matplotlib.pyplot.plot', 'plt.plot', (['wav', '(F[0, :] / inc)'], {'label': '"""Fx, qbox"""'}), "(wav, F[0, :] / inc, label='Fx, qbox')\n", (325, 363), True, 'import matplotlib.pyplot as plt\n'), ((367, 413), 'matplotlib.pyplot.plot', 'plt.plot', (['wav', '(F[1, :] / inc)'], {'label': '"""Fy, qbox"""'}), "(wav, F[1, :] / inc, label='Fy, qbox')\n", (375, 413), True, 'import matplotlib.pyplot as plt\n'), ((417, 462), 'matplotlib.pyplot.plot', 'plt.plot', (['wav', '(T / inc / 40)'], {'label': '"""Tz, qbox"""'}), "(wav, T / inc / 40, label='Tz, qbox')\n", (425, 462), True, 'import matplotlib.pyplot as plt\n'), ((466, 504), 'h5py.File', 'h5py.File', (['"""../../bench/force.h5"""', '"""r"""'], {}), "('../../bench/force.h5', 'r')\n", (475, 504), False, 'import h5py\n'), ((584, 625), 'matplotlib.pyplot.plot', 'plt.plot', (['wav', '(Fx / inc)'], {'label': '"""Fx, meep"""'}), "(wav, Fx / inc, label='Fx, meep')\n", (592, 625), True, 'import matplotlib.pyplot as plt\n'), ((630, 676), 'matplotlib.pyplot.plot', 'plt.plot', (['wav', '(Fy / inc * 40)'], {'label': '"""Fy, meep"""'}), "(wav, Fy / inc * 40, label='Fy, meep')\n", (638, 676), True, 'import matplotlib.pyplot as plt\n')] |
# Author: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
# GitHub: https://github.com/chhwang/cmcl
# ==============================================================================
from __future__ import print_function
from datetime import datetime
import tensorflow as tf
import numpy as np
import cifar
import svhn
import time, sys, os
tf.app.flags.DEFINE_string('data_dir', './dataset', 'Directoty to store input dataset')
tf.app.flags.DEFINE_string('dataset', 'cifar', 'Supported: cifar, svhn')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Number of images to process in a batch.')
tf.app.flags.DEFINE_string('model_type', 'resnet', 'Supported: vggnet, googlenet, resnet')
tf.app.flags.DEFINE_integer('num_model', 5, 'How many models to ensemble.')
tf.app.flags.DEFINE_string('loss_type', 'cmcl_v1', 'Supported: independent, mcl, cmcl_v0, cmcl_v1')
tf.app.flags.DEFINE_integer('k', 4, 'Overlap parameter')
tf.app.flags.DEFINE_integer('gpu', 0, 'GPU to use')
tf.app.flags.DEFINE_float('beta', 0.75, '')
tf.app.flags.DEFINE_boolean('feature_sharing', True, 'Use feature sharing if True.')
tf.app.flags.DEFINE_boolean('test', True, 'Run test if True else run train')
FLAGS = tf.app.flags.FLAGS
import model
# Set GPU to use. Only one GPU supported.
os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.gpu)
# Set dataset
if FLAGS.dataset == 'cifar':
dataset = cifar
elif FLAGS.dataset == 'svhn':
dataset = svhn
else:
raise ValueError('Not supported dataset: %s' % FLAGS.dataset)
def run_train(sess):
"""Train the model.
Args:
sess: TensorFlow session to run the model.
"""
# get data
train_images, train_labels = dataset.inputs(FLAGS.data_dir, test=False)
test_images, test_labels = dataset.inputs(FLAGS.data_dir, test=True)
# shuffle train data
train_images, train_labels = dataset.shuffle(train_images, train_labels)
# period of evaluation
eval_period = dataset.TRAIN_SIZE // FLAGS.batch_size
# get placeholders
is_train = tf.get_collection('is_train')[0]
batch_images = tf.get_collection('batch_images')[0]
batch_labels = tf.get_collection('batch_labels')[0]
# record the time when training starts
start_time = time.time()
curr_time = start_time
epoch = 0
max_test_step = dataset.TEST_SIZE // FLAGS.batch_size
# loop through training steps
train_idx = np.array(range(FLAGS.batch_size))
for step in xrange(model.MAX_STEPS):
# range of the next train data
train_idx[train_idx >= dataset.TRAIN_SIZE] -= dataset.TRAIN_SIZE
# run training
_, gstep, lr, loss = sess.run(tf.get_collection('train_ops'),
feed_dict={is_train: True,
batch_images: train_images[train_idx],
batch_labels: train_labels[train_idx]})
train_idx += FLAGS.batch_size
# periodic evaluation
if step % eval_period == 0:
elapsed_time = time.time() - curr_time
# run evaluation with test dataset
top1_err_sum = 0
oracle_err_sum = 0
err_list_sum = np.zeros((FLAGS.num_model,), dtype=np.float)
test_idx = np.array(range(FLAGS.batch_size))
for test_step in range(max_test_step):
test_idx[test_idx >= dataset.TEST_SIZE] -= dataset.TEST_SIZE
result = sess.run(tf.get_collection('test_ops'),
feed_dict={is_train: False,
batch_images: test_images[test_idx],
batch_labels: test_labels[test_idx]})
top1_err_sum += result[0]
oracle_err_sum += result[1]
err_list_sum += np.asarray(result[2:], dtype=np.float)
test_idx += FLAGS.batch_size
# take average
top1_err = top1_err_sum / float(max_test_step)
oracle_err = oracle_err_sum / float(max_test_step)
err_list = err_list_sum / float(max_test_step)
# print progress
sys.stdout.write('[%s(+%.1f min)] '
'Epoch %d; LR %f; Loss %.6f; Top-1 %.2f%%, Oracle %.2f%%, '
'Model Avg %.2f%% (%.1f ms/step)\n' %
(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
float(time.time()-start_time)/60.,
epoch, lr, loss, top1_err, oracle_err,
np.average(err_list), 1000*elapsed_time/eval_period))
sys.stdout.flush()
# shuffle train data
train_images, train_labels = dataset.shuffle(train_images, train_labels)
epoch += 1
curr_time = time.time()
def run_test(sess):
"""Test the model read from checkpoint.
Args:
sess: TensorFlow session to run the model.
"""
# get data
test_images, test_labels = dataset.inputs(FLAGS.data_dir, test=True)
# get placeholders
is_train = tf.get_collection('is_train')[0]
batch_images = tf.get_collection('batch_images')[0]
batch_labels = tf.get_collection('batch_labels')[0]
top1_err_sum = 0
oracle_err_sum = 0
err_list_sum = np.zeros((FLAGS.num_model,), dtype=np.float)
test_idx = np.array(range(FLAGS.batch_size))
max_test_step = dataset.TEST_SIZE // FLAGS.batch_size
print('Running Test ... ')
start_time = time.time()
for test_step in range(max_test_step):
test_idx[test_idx >= dataset.TEST_SIZE] -= dataset.TEST_SIZE
result = sess.run(tf.get_collection('test_ops'),
feed_dict={is_train: False,
batch_images: test_images[test_idx],
batch_labels: test_labels[test_idx]})
top1_err_sum += result[0]
oracle_err_sum += result[1]
err_list_sum += np.asarray(result[2:], dtype=np.float)
test_idx += FLAGS.batch_size
elapsed_time = float(time.time() - start_time)
print(' Elapsed Time %.2f sec' % elapsed_time)
# take average
top1_err = top1_err_sum / float(max_test_step)
oracle_err = oracle_err_sum / float(max_test_step)
err_list = err_list_sum / float(max_test_step)
print(' Top-1 Error: %.2f%%' % top1_err)
print(' Oracle Error: %.2f%%' % oracle_err)
print(' Best Model Error: %.2f%%' % min(err_list))
print(' Worst Model Error: %.2f%%' % max(err_list))
print(' Average Model Error: %.2f%%' % np.average(err_list))
def main(argv=None):
"""Main function.
"""
# log directory
log_dir = './log'
# prepare for checkpoint
ckpt_dir = './ckpt'
ckpt_path = ckpt_dir + '/train_result.ckpt'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# build model
model.build()
# create a local session to run training
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# log the graph data
writer = tf.summary.FileWriter(log_dir, sess.graph)
# ckpt saver
saver = tf.train.Saver()
if FLAGS.test:
#if not os.path.exists(ckpt_path):
# raise ValueError('Checkpoint %s does not exist.' % ckpt_path)
saver.restore(sess, ckpt_path)
print('Restored variables from %s.' % ckpt_path)
run_test(sess)
else:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
print('Initialized!')
run_train(sess)
# last checkpoint
saver.save(sess, ckpt_path)
print(' * Variables are saved: %s *' % ckpt_path)
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.app.flags.DEFINE_float",
"tensorflow.get_collection",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.app.flags.DEFINE_boolean",
"sys.stdout.flush",
"tensorflow.app.flags.DEFINE_integer",
"os.path.exists",
"model.build",
"tensorflow.summary.FileWriter",
... | [((326, 417), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""./dataset"""', '"""Directoty to store input dataset"""'], {}), "('data_dir', './dataset',\n 'Directoty to store input dataset')\n", (352, 417), True, 'import tensorflow as tf\n'), ((414, 486), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset"""', '"""cifar"""', '"""Supported: cifar, svhn"""'], {}), "('dataset', 'cifar', 'Supported: cifar, svhn')\n", (440, 486), True, 'import tensorflow as tf\n'), ((487, 580), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(128)', '"""Number of images to process in a batch."""'], {}), "('batch_size', 128,\n 'Number of images to process in a batch.')\n", (514, 580), True, 'import tensorflow as tf\n'), ((577, 671), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_type"""', '"""resnet"""', '"""Supported: vggnet, googlenet, resnet"""'], {}), "('model_type', 'resnet',\n 'Supported: vggnet, googlenet, resnet')\n", (603, 671), True, 'import tensorflow as tf\n'), ((668, 743), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_model"""', '(5)', '"""How many models to ensemble."""'], {}), "('num_model', 5, 'How many models to ensemble.')\n", (695, 743), True, 'import tensorflow as tf\n'), ((744, 847), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""loss_type"""', '"""cmcl_v1"""', '"""Supported: independent, mcl, cmcl_v0, cmcl_v1"""'], {}), "('loss_type', 'cmcl_v1',\n 'Supported: independent, mcl, cmcl_v0, cmcl_v1')\n", (770, 847), True, 'import tensorflow as tf\n'), ((844, 900), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""k"""', '(4)', '"""Overlap parameter"""'], {}), "('k', 4, 'Overlap parameter')\n", (871, 900), True, 'import tensorflow as tf\n'), ((901, 952), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""gpu"""', '(0)', '"""GPU to use"""'], {}), "('gpu', 0, 'GPU to use')\n", (928, 952), True, 'import tensorflow as tf\n'), ((953, 996), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""beta"""', '(0.75)', '""""""'], {}), "('beta', 0.75, '')\n", (978, 996), True, 'import tensorflow as tf\n'), ((997, 1085), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""feature_sharing"""', '(True)', '"""Use feature sharing if True."""'], {}), "('feature_sharing', True,\n 'Use feature sharing if True.')\n", (1024, 1085), True, 'import tensorflow as tf\n'), ((1082, 1158), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""test"""', '(True)', '"""Run test if True else run train"""'], {}), "('test', True, 'Run test if True else run train')\n", (1109, 1158), True, 'import tensorflow as tf\n'), ((2192, 2203), 'time.time', 'time.time', ([], {}), '()\n', (2201, 2203), False, 'import time, sys, os\n'), ((5307, 5351), 'numpy.zeros', 'np.zeros', (['(FLAGS.num_model,)'], {'dtype': 'np.float'}), '((FLAGS.num_model,), dtype=np.float)\n', (5315, 5351), True, 'import numpy as np\n'), ((5508, 5519), 'time.time', 'time.time', ([], {}), '()\n', (5517, 5519), False, 'import time, sys, os\n'), ((6904, 6917), 'model.build', 'model.build', ([], {}), '()\n', (6915, 6917), False, 'import model\n'), ((6977, 7018), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (6991, 7018), True, 'import tensorflow as tf\n'), ((7877, 7889), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (7887, 7889), True, 'import tensorflow as tf\n'), ((1986, 2015), 'tensorflow.get_collection', 'tf.get_collection', (['"""is_train"""'], {}), "('is_train')\n", (2003, 2015), True, 'import tensorflow as tf\n'), ((2038, 2071), 'tensorflow.get_collection', 'tf.get_collection', (['"""batch_images"""'], {}), "('batch_images')\n", (2055, 2071), True, 'import tensorflow as tf\n'), ((2094, 2127), 'tensorflow.get_collection', 'tf.get_collection', (['"""batch_labels"""'], {}), "('batch_labels')\n", (2111, 2127), True, 'import tensorflow as tf\n'), ((5098, 5127), 'tensorflow.get_collection', 'tf.get_collection', (['"""is_train"""'], {}), "('is_train')\n", (5115, 5127), True, 'import tensorflow as tf\n'), ((5150, 5183), 'tensorflow.get_collection', 'tf.get_collection', (['"""batch_images"""'], {}), "('batch_images')\n", (5167, 5183), True, 'import tensorflow as tf\n'), ((5206, 5239), 'tensorflow.get_collection', 'tf.get_collection', (['"""batch_labels"""'], {}), "('batch_labels')\n", (5223, 5239), True, 'import tensorflow as tf\n'), ((5986, 6024), 'numpy.asarray', 'np.asarray', (['result[2:]'], {'dtype': 'np.float'}), '(result[2:], dtype=np.float)\n', (5996, 6024), True, 'import numpy as np\n'), ((6825, 6849), 'os.path.exists', 'os.path.exists', (['ckpt_dir'], {}), '(ckpt_dir)\n', (6839, 6849), False, 'import time, sys, os\n'), ((6859, 6880), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {}), '(ckpt_dir)\n', (6870, 6880), False, 'import time, sys, os\n'), ((7072, 7097), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (7082, 7097), True, 'import tensorflow as tf\n'), ((7153, 7195), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir', 'sess.graph'], {}), '(log_dir, sess.graph)\n', (7174, 7195), True, 'import tensorflow as tf\n'), ((7234, 7250), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7248, 7250), True, 'import tensorflow as tf\n'), ((2603, 2633), 'tensorflow.get_collection', 'tf.get_collection', (['"""train_ops"""'], {}), "('train_ops')\n", (2620, 2633), True, 'import tensorflow as tf\n'), ((3168, 3212), 'numpy.zeros', 'np.zeros', (['(FLAGS.num_model,)'], {'dtype': 'np.float'}), '((FLAGS.num_model,), dtype=np.float)\n', (3176, 3212), True, 'import numpy as np\n'), ((4641, 4659), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4657, 4659), False, 'import time, sys, os\n'), ((4826, 4837), 'time.time', 'time.time', ([], {}), '()\n', (4835, 4837), False, 'import time, sys, os\n'), ((5658, 5687), 'tensorflow.get_collection', 'tf.get_collection', (['"""test_ops"""'], {}), "('test_ops')\n", (5675, 5687), True, 'import tensorflow as tf\n'), ((6087, 6098), 'time.time', 'time.time', ([], {}), '()\n', (6096, 6098), False, 'import time, sys, os\n'), ((6595, 6615), 'numpy.average', 'np.average', (['err_list'], {}), '(err_list)\n', (6605, 6615), True, 'import numpy as np\n'), ((3009, 3020), 'time.time', 'time.time', ([], {}), '()\n', (3018, 3020), False, 'import time, sys, os\n'), ((3808, 3846), 'numpy.asarray', 'np.asarray', (['result[2:]'], {'dtype': 'np.float'}), '(result[2:], dtype=np.float)\n', (3818, 3846), True, 'import numpy as np\n'), ((3432, 3461), 'tensorflow.get_collection', 'tf.get_collection', (['"""test_ops"""'], {}), "('test_ops')\n", (3449, 3461), True, 'import tensorflow as tf\n'), ((7558, 7591), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7589, 7591), True, 'import tensorflow as tf\n'), ((7610, 7642), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (7640, 7642), True, 'import tensorflow as tf\n'), ((4575, 4595), 'numpy.average', 'np.average', (['err_list'], {}), '(err_list)\n', (4585, 4595), True, 'import numpy as np\n'), ((4365, 4379), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4377, 4379), False, 'from datetime import datetime\n'), ((4447, 4458), 'time.time', 'time.time', ([], {}), '()\n', (4456, 4458), False, 'import time, sys, os\n')] |
# -*- coding: utf-8 -*-
# Copyright © 2017-2018 <NAME>
# https://github.com/jnsebgosselin/pygld
#
# This file is part of PyGLD.
# Licensed under the terms of the MIT License.
# ---- Standard imports
import copy
# ---- Third party imports
import numpy as np
from scipy import interpolate
import os.path as osp
# ---- Local imports
from pygld.api import __datadir__
from pygld.utils.strformating import array_to_str
FLUIDS = ['prop_glycol', 'ethyl_glycol', 'water']
class HeatCarrierFluid(object):
"""
The :attr:`~pygld.HeatCarrierFluid` class holds all the thermophysical
properties related to the fluid circulating in the ground-loop of the
heat exchanger system. The :attr:`~pygld.HeatCarrierFluid` is initialized
by default as a pure water :attr:`~pygld.HeatCarrierFluid.fluid` with an
antifreeze volumetric fraction (:attr:`~pygld.HeatCarrierFluid.fr`) of 0
and at a reference temperature (:attr:`~pygld.HeatCarrierFluid.Tref`)
of 20°C.
The fluid type of :attr:`~pygld.HeatCarrierFluid` can be changed with the
:meth:`~pygld.HeatCarrierFluid.set_fluid` method and a list of all
available heat carrier fluid types can be obtained from the
:meth:`~pygld.HeatCarrierFluid.get_avail_fluid_types` method. The
reference temperature and antifreeze volumetric fraction can be changed
by setting directly the value of the :attr:`~pygld.HeatCarrierFluid.Tref`
and :attr:`~pygld.HeatCarrierFluid.fr` attributes. Printing an instance of
:attr:`~pygld.HeatCarrierFluid` will print a summary of the fluid's
independent and dependent properties.
The freezing and boiling points of the
:attr:`~pygld.HeatCarrierFluid.fluid` are evaluated for a given value of
:attr:`~pygld.HeatCarrierFluid.fr` from a piecewise one-dimensional cubic
interpolation of the fluid's thermophysical properties table.
The thermophysical properties of the
:attr:`~pygld.HeatCarrierFluid.fluid` are determined for a given set of
:attr:`~pygld.HeatCarrierFluid.Tref` and :attr:`~pygld.HeatCarrierFluid.fr`
values from a two dimensional piecewise cubic interpolation of the
fluid's thermophysical properties table.
The derived thermophysical properties of the
:attr:`~pygld.HeatCarrierFluid.fluid` are calculated from the interpolated
values of the primary properties.
An `Example`_ is available at the end of this section.
.. note::
Extrapolation is not allowed when evaluating the
thermophysical properties of the fluid.
A `nan` value is returned for sets of
:attr:`~pygld.HeatCarrierFluid.Tref` and
:attr:`~pygld.HeatCarrierFluid.fr` whose values fall outside of the
thermophysical properties table of the fluid.
"""
def __init__(self, fluid='water', Tref=20, fr=0):
self.set_fluid(fluid)
self.Tref = Tref
self.fr = fr
def __str__(self):
str_ = "Type of fluid: %s" % self.fluid
str_ += '\nAntifreeze volumetric fraction: %0.2f' % self.fr
str_ += '\nFreezing point temperature (°C): %0.1f' % self.Tfp
str_ += '\nBoiling point temperature (°C): %0.1f' % self.Tbp
str_ += '\nTemperature of reference (°C): '
str_ += array_to_str(self.Tref, "{:.1f}")
str_ += '\nFluid density in (kg/m³): '
str_ += array_to_str(self.rho, "{:.2f}")
str_ += '\nCinematic viscosity (Pa·s): '
str_ += array_to_str(self.mu, "{:.2e}")
str_ += '\nThermal conductivity (W/m·k): '
str_ += array_to_str(self.kth, "{:.3f}")
str_ += '\nSpecific heat capacity (J/kg·K): '
str_ += array_to_str(self.cp, "{:.1f}")
str_ += '\nKynematic viscosity (m²/s): '
str_ += array_to_str(self.nu, "{:.2e}")
str_ += '\nPrantl number: '
str_ += array_to_str(self.Pr, "{:.1f}")
str_ += '\nThermal diffusivity (m²/s): '
str_ += array_to_str(self.al, "{:.2e}")
str_ += '\nVolumetric Heat Capacity (J/m³·K): '
str_ += array_to_str(self.Cp, "{:.2e}")
return str_
# ---- Fluid type and data
@property
def hcfdata(self):
"""
Return a `dict` containing the tables of thermophysical properties
of the heat carrier :attr:`~pygld.HeatCarrierFluid.fluid`.
"""
return copy.copy(self.__TTP)
@property
def fluid(self):
"""Return the type of fluid of the heat carrier.
The type of fluid of the heat carrier is set to pure water by default.
The heat carrier fluid is assumed to be pure water when
:attr:`~pygld.HeatCarrierFluid.fr` is set to 0.
"""
return self._fluid
def set_fluid(self, x):
"""Set the type of fluid of the heat carrier fluid and
load the tables of thermophysical properties of the fluid.
If the specified fluid type is not available, an error is raised.
The list of available heat carrier fluid types can be obtained with
the :meth:`~pygld.HeatCarrierFluid.get_avail_fluid_types` method.
"""
if x == 'prop_glycol':
self._fluid = x
filename = 'proptables_propglycol.npy'
elif x == 'ethyl_glycol':
self._fluid = x
filename = 'proptables_ethylglycol.npy'
elif x == 'water':
self._fluid = 'water'
filename = 'proptables_purewater.npy'
else:
raise ValueError('Supported fluid value are', FLUIDS)
pathname = osp.join(__datadir__, filename)
# TTP: Table of Thermophysical Properties
self.__TTP = np.load(pathname)
# ---- Independent properties
@property
def Tref(self):
"""Temperature of the fluid in °C.
Get or set the temperature of the heat carrier fluid as a single value
or a series of values stored in a dict, a tuple or a numpy array.
A numpy array will always be returned when getting
:attr:`~pygld.HeatCarrierFluid.Tref`, independently of the
format used to set the attribute.
"""
return np.copy(self._Tref)
@Tref.setter
def Tref(self, x):
x = np.array([x]) if not hasattr(x, '__iter__') else np.array(x)
self._Tref = x
@property
def fr(self):
"""Antifreeze volumetric fraction of the heat carrier fluid in m³/m³
(0 ≤ fr ≤ 1).
Get or set the antifreeze volumetric fraction of the heat carrier
fluid. The value of `fr` must be between 0 and 1 and is assumed to be
0 when :attr:`~pygld.HeatCarrierFluid.fluid` is set to 'water'.
"""
return 0 if self.fluid == 'water' else self._fr
@fr.setter
def fr(self, x):
if x > 1 or x < 0:
raise ValueError('fr must be between 0 and 1 (0 <= fr < 1).')
else:
self._fr = x
# ---- Primary dependent properties
@property
def Tfp(self):
"""Freezing point temperature in °C.
Return the freezing point temperature of the fluid calculated
from a piecewise one-dimensional linear interpolation of the
fluid's thermophysical properties table.
"""
if self.fluid == 'water':
return self.__TTP['freez_point']
else:
x = self.fr
xp = self.__TTP['freez_point'][0]
yp = self.__TTP['freez_point'][1]
return np.interp(x, xp, yp)
@property
def Tbp(self):
"""Boiling point temperature in °C.
Return the boiling point temperature of the fluid calculated
from a piecewise one-dimensional linear interpolation of the
fluid's thermophysical properties table.
"""
if self.fluid == 'water':
return self.__TTP['boil_point']
else:
x = self.fr
xp = self.__TTP['boil_point'][0]
yp = self.__TTP['boil_point'][1]
return np.interp(x, xp, yp)
@property
def rho(self):
"""Fluid density in kg/m³.
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the density of the fluid calculated
from a piecewise two-dimensional cubic interpolation of the
fluid's thermophysical properties table.
"""
x = self.__TTP['density'][0]
y = self.__TTP['density'][1]
z = self.__TTP['density'][2]
return self._interp(x, z, y)
@property
def mu(self):
"""Cinematic viscosity in Pa·s.
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the cinematic viscosity of the fluid calculated
from a piecewise two-dimensional cubic interpolation of the
fluid's thermophysical properties table.
"""
x = self.__TTP['viscosity'][0]
y = self.__TTP['viscosity'][1]
z = self.__TTP['viscosity'][2]/1000
return self._interp(x, z, y)
@property
def kth(self):
"""Thermal conductivity in W/(m·k).
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the thermal conductivity of the fluid calculated
from a piecewise two-dimensional cubic interpolation of the
fluid's thermophysical properties table.
"""
x = self.__TTP['ther_cond'][0]
y = self.__TTP['ther_cond'][1]
z = self.__TTP['ther_cond'][2]
return self._interp(x, z, y)
@property
def cp(self):
"""Specific heat capacity in J/(kg·K)
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the specific heat capacity of the fluid calculated
from a two dimensional piecewise cubic interpolation of the
fluid's thermophysical properties table.
"""
x = self.__TTP['spec_heat'][0]
y = self.__TTP['spec_heat'][1]
z = self.__TTP['spec_heat'][2]*1000
return self._interp(x, z, y)
# ---- Derived dependent properties
@property
def nu(self):
"""Kynematic viscosity in m²/s.
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the kynematic viscosity of the fluid calculated as:
.. math::
nu[i] = mu[i] \\, / \\, rho[i]
where :math:`i` is the index at which :math:`nu` is computed,
:math:`mu` is the cinematic viscosity in Pa·s,
and :math:`rho` is the density of the fluid in kg/m³.
"""
return self.mu/self.rho
@property
def Pr(self):
"""Prantl number.
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the Prantl number of the fluid calculated as:
.. math::
Pr[i] = \\frac{mu[i]}{cp[i] \\cdot kth[i]}
where :math:`i` is the index at which :math:`Pr` is computed,
:math:`mu` is the cinematic viscosity in Pa·s,
:math:`cp` is the specific heat capacity in J/(kg·K),
and :math:`kth` is the thermal conductivity in W/(m·k).
"""
return self.mu * self.cp / self.kth
@property
def al(self):
"""Thermal diffusivity in m²/s.
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the thermal diffusivity of the fluid calculated as:
.. math::
al[i] = \\frac{kth[i]}{cp[i] \\cdot rho[i]}
where :math:`i` is the index at which :math:`al` is computed,
:math:`kth` is the thermal conductivity in W/(m·k),
:math:`cp` is the specific heat capacity in J/(kg·K),
and :math:`rho` is the density of the fluid in kg/m³.
"""
return self.kth / (self.cp * self.rho)
@property
def Cp(self):
"""Volumetric Heat Capacity in J/(m³·K).
Return a numpy array of a length that match that of
:attr:`~pygld.HeatCarrierFluid.Tref` containing the values
for the volumetric heat capacity of the fluid calculated as:
.. math::
Cp[i] = cp[i] \\cdot rho[i]
where :math:`i` is the index at which :math:`Cp` is computed,
:math:`cp` is the specific heat capacity in J/(kg·K),
and :math:`rho` is the density of the fluid in kg/m³.
"""
return self.cp * self.rho
# ---- Calculs
def _interp(self, x, z, y=None):
"""
Interpolate a value from table for aqueous solutions and pure water
x is temperature
y is the antifreeze volumetric fraction
z is thermodynamic variable
"""
if self.fluid == 'water':
x1 = self.Tref
z1 = interpolate.griddata(x, z, x1, method='cubic')
else:
x1 = self.Tref
if np.size(x1) == 1:
y1 = self.fr
else:
y1 = np.ones(np.size(x1)) * self.fr
z1 = interpolate.griddata((x, y), z, (x1, y1), method='cubic')
return z1
def get_avail_fluid_types(self):
"""Return a list of all available heat carrier fluid types."""
return copy.copy(FLUIDS)
if __name__ == '__main__':
hcfluid = HeatCarrierFluid()
print(hcfluid)
print('')
print(hcfluid.get_avail_fluid_types())
print('')
hcfluid.set_fluid('prop_glycol')
hcfluid.fr = 0.3
hcfluid.Tref = [28, 14, 0]
print(hcfluid)
| [
"numpy.load",
"numpy.size",
"numpy.copy",
"pygld.utils.strformating.array_to_str",
"scipy.interpolate.griddata",
"copy.copy",
"numpy.array",
"numpy.interp",
"os.path.join"
] | [((3249, 3282), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.Tref', '"""{:.1f}"""'], {}), "(self.Tref, '{:.1f}')\n", (3261, 3282), False, 'from pygld.utils.strformating import array_to_str\n'), ((3346, 3378), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.rho', '"""{:.2f}"""'], {}), "(self.rho, '{:.2f}')\n", (3358, 3378), False, 'from pygld.utils.strformating import array_to_str\n'), ((3444, 3475), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.mu', '"""{:.2e}"""'], {}), "(self.mu, '{:.2e}')\n", (3456, 3475), False, 'from pygld.utils.strformating import array_to_str\n'), ((3543, 3575), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.kth', '"""{:.3f}"""'], {}), "(self.kth, '{:.3f}')\n", (3555, 3575), False, 'from pygld.utils.strformating import array_to_str\n'), ((3646, 3677), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.cp', '"""{:.1f}"""'], {}), "(self.cp, '{:.1f}')\n", (3658, 3677), False, 'from pygld.utils.strformating import array_to_str\n'), ((3744, 3775), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.nu', '"""{:.2e}"""'], {}), "(self.nu, '{:.2e}')\n", (3756, 3775), False, 'from pygld.utils.strformating import array_to_str\n'), ((3828, 3859), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.Pr', '"""{:.1f}"""'], {}), "(self.Pr, '{:.1f}')\n", (3840, 3859), False, 'from pygld.utils.strformating import array_to_str\n'), ((3925, 3956), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.al', '"""{:.2e}"""'], {}), "(self.al, '{:.2e}')\n", (3937, 3956), False, 'from pygld.utils.strformating import array_to_str\n'), ((4029, 4060), 'pygld.utils.strformating.array_to_str', 'array_to_str', (['self.Cp', '"""{:.2e}"""'], {}), "(self.Cp, '{:.2e}')\n", (4041, 4060), False, 'from pygld.utils.strformating import array_to_str\n'), ((4332, 4353), 'copy.copy', 'copy.copy', (['self.__TTP'], {}), '(self.__TTP)\n', (4341, 4353), False, 'import copy\n'), ((5517, 5548), 'os.path.join', 'osp.join', (['__datadir__', 'filename'], {}), '(__datadir__, filename)\n', (5525, 5548), True, 'import os.path as osp\n'), ((5621, 5638), 'numpy.load', 'np.load', (['pathname'], {}), '(pathname)\n', (5628, 5638), True, 'import numpy as np\n'), ((6100, 6119), 'numpy.copy', 'np.copy', (['self._Tref'], {}), '(self._Tref)\n', (6107, 6119), True, 'import numpy as np\n'), ((13378, 13395), 'copy.copy', 'copy.copy', (['FLUIDS'], {}), '(FLUIDS)\n', (13387, 13395), False, 'import copy\n'), ((6173, 6186), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (6181, 6186), True, 'import numpy as np\n'), ((6222, 6233), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6230, 6233), True, 'import numpy as np\n'), ((7408, 7428), 'numpy.interp', 'np.interp', (['x', 'xp', 'yp'], {}), '(x, xp, yp)\n', (7417, 7428), True, 'import numpy as np\n'), ((7933, 7953), 'numpy.interp', 'np.interp', (['x', 'xp', 'yp'], {}), '(x, xp, yp)\n', (7942, 7953), True, 'import numpy as np\n'), ((12940, 12986), 'scipy.interpolate.griddata', 'interpolate.griddata', (['x', 'z', 'x1'], {'method': '"""cubic"""'}), "(x, z, x1, method='cubic')\n", (12960, 12986), False, 'from scipy import interpolate\n'), ((13177, 13234), 'scipy.interpolate.griddata', 'interpolate.griddata', (['(x, y)', 'z', '(x1, y1)'], {'method': '"""cubic"""'}), "((x, y), z, (x1, y1), method='cubic')\n", (13197, 13234), False, 'from scipy import interpolate\n'), ((13043, 13054), 'numpy.size', 'np.size', (['x1'], {}), '(x1)\n', (13050, 13054), True, 'import numpy as np\n'), ((13137, 13148), 'numpy.size', 'np.size', (['x1'], {}), '(x1)\n', (13144, 13148), True, 'import numpy as np\n')] |
#!/bin/env python
import argparse
import numpy as np
import matplotlib.pyplot as plt
import yt
def doit(outfile="hse.png"):
low = yt.load("flame_wave_128_plt01600")
med = yt.load("flame_wave_256_plt03200")
high = yt.load("flame_wave_512_plt06400")
fig, ax = plt.subplots(1,1)
for ds, l in [(low, "128"), (med, "256"), (high, "512")]:
ray = ds.ray((0, 0, 0), (2.56e3, 0, 0))
isrt = np.argsort(ray["x"])
ax.plot(ray["x"][isrt], ray["magvel"][isrt], label=l)
ax.legend(frameon=False)
ax.set_yscale("log")
fig.tight_layout()
fig.savefig(outfile)
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("outfile", type=str, nargs="?", default="hse.png")
args = p.parse_args()
doit(outfile=args.outfile)
| [
"numpy.argsort",
"yt.load",
"matplotlib.pyplot.subplots",
"argparse.ArgumentParser"
] | [((139, 173), 'yt.load', 'yt.load', (['"""flame_wave_128_plt01600"""'], {}), "('flame_wave_128_plt01600')\n", (146, 173), False, 'import yt\n'), ((184, 218), 'yt.load', 'yt.load', (['"""flame_wave_256_plt03200"""'], {}), "('flame_wave_256_plt03200')\n", (191, 218), False, 'import yt\n'), ((230, 264), 'yt.load', 'yt.load', (['"""flame_wave_512_plt06400"""'], {}), "('flame_wave_512_plt06400')\n", (237, 264), False, 'import yt\n'), ((280, 298), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (292, 298), True, 'import matplotlib.pyplot as plt\n'), ((651, 676), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (674, 676), False, 'import argparse\n'), ((426, 446), 'numpy.argsort', 'np.argsort', (["ray['x']"], {}), "(ray['x'])\n", (436, 446), True, 'import numpy as np\n')] |
"""*****************************************************************************************
MIT License
Copyright (c) 2019 <NAME>, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*****************************************************************************************"""
################################### NOTES ###########################################
# - Please cite our paper when using the code:
# "Accurate Coresets"
# <NAME> and <NAME> and <NAME>
#
# - Code for other coresets, both accurate and eps-coresets, will be published soon.
#####################################################################################
import matplotlib.pyplot as plt
import numpy as np
import scipy.linalg
import time
from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model
from sklearn import linear_model
from sklearn.decomposition import PCA
class WeightedSet:
def __init__(self, P, W, Y=None):
# P is of size nXd
# W is of size 1Xn
# Y is of size nXk
##todo add checker
self.P = np.array(P)
if (P.ndim == 1):
self.P = self.P.reshape(-1, 1)
self.n = self.P.shape[0]
self.d = self.P.shape[1]
self.Y = Y
self.W = np.array(W).reshape(1, -1)
self.dtype = P.dtype
if (self.Y is not None) and (P.shape[0] != Y.shape[0]):
self.Y = self.Y.reshape(self.n, -1)
self.sum_W = np.sum(self.W); # print (self.W.shape)
self.weighted_sum = self.P.T.dot(self.W.T)
# p must be a 1 dimensional nparray (p.shape = (d,) )
# w is a number
def add_point(self, p, w, y):
self.P = np.append(self.P, [p], axis=0)
self.W = np.append(self.W, w)
if not self.Y is None: self.Y = np.append(self.Y, y)
self.n = self.n + 1
self.sum_W = self.sum_W + w
self.weighted_sum = self.weighted_sum + w * p
# Accurate 1 center coreset
# input: unweighted data P (nXd) on a line in R^d (ignores the weights in Pset if they exist)
# Output: row indices of the coreset points C \subseteq P
def one_center(Pset):
if check_weights_if_ones(Pset) == 0:
return Pset
idx = findFarthestPointFromCenter(Pset, Pset.P[0])
p = Pset.P[idx]
idx = findFarthestPointFromCenter(Pset, p)
p2 = Pset.P[idx]
C = np.array([p,p2])
Cset = WeightedSet(C, np.ones(C.shape[0]))
return Cset
def get_normalized_weighted_set(Pset):
W_normalized = Pset.W / Pset.sum_W
return WeightedSet(Pset.P, W_normalized)
# vectors sum coreset
# Input: a weighted set in R^d
def vectors_sum_1(Pset):
P_normalized = get_normalized_weighted_set(Pset)
Cset = WeightedSet(P_normalized.weighted_sum, Pset.sum_W)
return Cset
# Vectors sum coreset 3
# Input: a weighted set in R^d. The sum of weights is not necesarrily 1. Therefore, we first divide the input weights by
# their sum, compute the subset coreset of bounded weights using Caratheodory's theorem, then multiply the obtained
# weights by the original sum of weights.
def vectors_sum_3(Pset):
pre_norm_sum_W = Pset.sum_W
pre_norm_weighted_sum = Pset.weighted_sum
Pset = get_normalized_weighted_set(Pset)
# Add another dimension of 1's to the points of Pset
Q = Pset.P
Q = np.concatenate((Q, np.ones((Q.shape[0], 1))), axis=1)
# Create another weighted set with Q and the same weights of Pset
Qset = WeightedSet(Q, Pset.W)
# Compute the weighted sum of Qset as a convex combination of at most d+2 points of Qset. d is the dimension of P
# p, res, rnk, s = lstsq(Qset.P.transpose(), Qset.weighted_sum)
W_cara = Fast_Caratheodory(Qset.P, Qset.W, Qset.d + 1)
C_idx = np.nonzero(W_cara)
W_cara = W_cara[C_idx]
# Output coreset: the points of P that correspond to the points of Q that were chosen in the function updated_cara
Cset = WeightedSet(Pset.P[C_idx], W_cara * pre_norm_sum_W)
# Check that the weighted coreset points = weighted input points
if (np.linalg.norm(Cset.weighted_sum - pre_norm_weighted_sum) > small_number):
print("Bad result - wrong weighted sum!!");
# check that the sum of weights of the coreset = sum of weights of the input points
if (abs(Cset.sum_W - pre_norm_sum_W) > small_number):
print("Bad result - wrong sum of weights")
return Cset
# 1 mean coreset 3
# Input: a weighted set in R^d. The sum of weights is not necesarrily 1. Therefore, we first divide the input weights by
# their sum, compute the subset coreset of bounded weights using Caratheodory's theorem, then multiply the obtained
# weights by the original sum of weights.
def one_mean_3(Pset):
pre_norm_sum_W = Pset.sum_W
pre_norm_weighted_sum = Pset.weighted_sum
Pset = get_normalized_weighted_set(Pset)
# Add 2 more dimensions to each point: p -> (p, ||p||, 1)
Q = Pset.P
Q_norms = np.linalg.norm(Q, axis=1)
Q = np.concatenate((Q, np.power(Q_norms.reshape(Q_norms.shape[0], 1),2), np.ones((Q.shape[0], 1))), axis=1)
# Create another weighted set with Q and the same weights of Pset
Qset = WeightedSet(Q, Pset.W)
# Compute the weighted sum of Qset as a convex combination of at most d+3 points from Qset. d is the dimension of P
W_cara = Fast_Caratheodory(Qset.P, Qset.W, Qset.d + 1)
C_idx = np.nonzero(W_cara)
P_cara = Qset.P[C_idx]
W_cara = W_cara[C_idx]
# testing
Tset = WeightedSet(P_cara, W_cara)
if (np.linalg.norm(Tset.weighted_sum - Qset.weighted_sum) > small_number):
print("Bad coreset!!")
return Pset
# Output coreset: the points of P that correspond to the points of Q that were chosen in the function updated_cara
Cset = WeightedSet(Pset.P[C_idx], W_cara * pre_norm_sum_W)
# Check that the weighted coreset points = weighted input points
if (np.linalg.norm(Cset.weighted_sum - pre_norm_weighted_sum) > small_number):
print("Bad result - wrong weighted sum!!")
return Pset
# check that the sum of weights of the coreset = sum of weights of the input points
if (abs(Cset.sum_W - pre_norm_sum_W) > small_number):
print("Bad result - wrong sum of weights")
return Pset
return Cset
# 1 segment coreset
# Input: a set P of R^{d+1}, where P = {(t_i | p_i)}_{i=1}^n and non-negative weights W
def one_segment(Pset):
P = Pset.P
W = Pset.W
sqrt_W = np.sqrt(W)
d = Pset.d - 1 # Dimension of the points p_i
X_unweighted = np.concatenate((np.ones((P.shape[0], 1)), P), axis=1)
X = (X_unweighted.transpose()*sqrt_W).transpose() # Multiply the i'th row of X_unweighted by the i'th entry of sqrt_W
U, D, Vt = scipy.linalg.svd(X, full_matrices=False)
D = np.diag(D)
# y is the leftmost column of DV^T
u = np.matmul(D, Vt)[:, 0]
c = (np.linalg.norm(u) ** 2) / (d + 2)
w_vec = np.sqrt(c) * np.ones(d + 2)
# compute a matrix Y such that Yu = w_vec
Y = align_vectors(u, w_vec)
# B is the (d+1) rightmost columns of YDV^T/sqrt(c)
B = (np.matmul(Y, np.matmul(D, Vt)) / np.sqrt(c))[:, 1:]
# the coreset is the rows of B
Cset = WeightedSet(B, c*np.ones(B.shape[0]))
one_segment_coreset_checker(Pset, Cset)
return Cset, c
def one_segment_coreset_checker(Pset, Cset):
d = Pset.d - 1
a = np.random.rand(1, d)
b = np.random.rand(1, d)
sum_all_coreset = 0
sum_all = 0
for i in range(Pset.n):
sum_all += Pset.W[0,i]*np.linalg.norm(a + b * Pset.P[i, 0] - Pset.P[i, 1:]) ** 2
for i in range(Cset.n):
sum_all_coreset += Cset.W[0,i]*np.linalg.norm(a + b * Cset.P[i, 0] - Cset.P[i, 1:]) ** 2
if np.abs(sum_all_coreset - sum_all) > small_number:
print("Bad Coreset, {} - {}".format(sum_all_coreset, sum_all))
# Input: u,v \in R^d
# Find a rotation matrix R such that Ru/||u|| = v/||v||
def align_vectors(u, v):
d = u.size
u = np.divide(u, np.linalg.norm(u))
v = np.divide(v, np.linalg.norm(v))
u_bot = orthogonal_complement(u)
v_bot = orthogonal_complement(v)
# Rotation matrices that align u,v with the x axis
R_u = np.concatenate((u.reshape(1, d), u_bot.transpose()), axis=0)
R_v = np.concatenate((v.reshape(1, d), v_bot.transpose()), axis=0)
# align u and v by first aligning u with the x axis, then aligning the x axis with v
R_uv = np.matmul(R_v.transpose(), R_u)
return R_uv
# Given a unit vector x, compute the orthogonal complement of x
def orthogonal_complement(x, threshold=1e-15):
"""Compute orthogonal complement of a matrix
this works along axis zero, i.e. rank == column rank,
or number of rows > column rank
otherwise orthogonal complement is empty
TODO possibly: use normalize='top' or 'bottom'
"""
if (abs(np.linalg.norm(x) - 1) > small_number):
x = np.divide(x, np.linalg.norm(x))
if (x.shape[0] == x.size):
x = x.reshape(x.size, 1)
# x = np.asarray(x)
r, c = x.shape
if r < c:
import warnings
warnings.warn('fewer rows than columns', UserWarning)
# we assume svd is ordered by decreasing singular value, o.w. need sort
s, v, d = scipy.linalg.svd(x)
rank = (v > threshold).sum()
oc = s[:, rank:]
return oc
# Check if the weights of a weighted set are all ones
def check_weights_if_ones(Pset):
if (np.sum(Pset.W - np.ones(Pset.n)) > small_number):
print("Weights should all be ones!")
return 0
return 1
# Check if the sum(weights) = 1
def check_sum_weights_if_ones(Pset):
if abs(Pset.sum_W - 1) > small_number:
print("Weights do not sum to one! exiting..")
return 0
return 1
# Matrix 2 norm coreset
# Input: An unweighted set P
def matrix_norm(Pset):
if check_weights_if_ones(Pset) == 0:
return Pset
P = Pset.P
U, C = np.linalg.qr(P)
Cset = WeightedSet(C, np.ones(C.shape[0]))
return Cset
# Matrix 2 norm coreset which is a subset of the data
# Input: A set P with with positive weights W
def matrix_norm2(Pset, coreset_size=None):
P = Pset.P
Y = Pset.Y
if Pset.Y is not None:
P_tag = np.append(P, Y, axis=1)
else:
P_tag = P
W = Pset.W
# reshape so we can use the einsum
n_tag = P_tag.shape[0];
d_tag = P_tag.shape[1]
P_tag = P_tag.reshape(n_tag, d_tag, 1);
# build the tensor off all covariance matrices of the row vector of P_tag
P_tag = np.einsum("ikj,ijk->ijk", P_tag, P_tag)
P_tag = P_tag.reshape(n_tag, -1)
coreset_weigts = Fast_Caratheodory(P_tag, W, coreset_size, dtype=Pset.dtype)
new_idx_array = np.nonzero(coreset_weigts)
coreset_weigts = coreset_weigts[new_idx_array]
coreset_weigts = coreset_weigts.reshape(-1,1)
if Pset.Y is not None:
Cset = WeightedSet(P[new_idx_array], coreset_weigts.reshape(1, -1), Y[new_idx_array])
else:
Cset = WeightedSet(P[new_idx_array], coreset_weigts.reshape(1, -1), None)
#matrix_norm_checker(Pset, Cset)
return Cset
def matrix_norm_checker(Pset, Cset):
x = np.random.rand(3, 1)
weighted_C = np.diag(np.sqrt(Cset.W).reshape(-1)).dot(Cset.P)
real_data = np.abs(np.linalg.norm(np.matmul(Pset.P, x)))
coreset_data = np.abs(np.linalg.norm(np.matmul(weighted_C, x)))
if (np.abs(real_data - coreset_data) > small_number):
print("Not good!", np.abs(real_data - coreset_data))
def LMS_solvers(Pset, solver,coreset_size=None):
Cset = matrix_norm2(Pset, coreset_size)
if solver in ["linear", "ridge"] :
return Cset
elif solver in ["lasso", "elastic"]:
Cset = WeightedSet(Cset.P, (Cset.n / Pset.n) * Cset.W,Cset.Y)
return Cset
else :
print ("wrong solver name")
def coreset_train_model(Pset, clf, solver):
t0 =time.time()
Cset = LMS_solvers(Pset, solver, coreset_size=None)
X = Cset.P * np.sqrt(Cset.W.reshape(-1,1))
Y = Cset.Y * np.sqrt(Cset.W.reshape(-1,1))
clf.fit(X,Y)
t1 = time.time()
return t1 - t0, clf
def regressions_checker():
n = 240000
d = 3
data_range = 100
data = np.floor(np.random.rand(n, d) * data_range)
labels = np.floor(np.random.rand(n, 1) * data_range)
weights = np.ones(n)
Pset = WeightedSet(data, weights, labels)
for solver in ["lasso", "ridge", "elastic"]:
#########RIDGE REGRESSION#############
clf = get_new_clf(solver)
time_coreset, clf_coreset = coreset_train_model(Pset, clf, solver=solver)
score_coreset = test_model(Pset, clf)
clf = get_new_clf(solver)
time_real, clf_real = train_model(Pset, clf)
score_real = test_model(Pset, clf)
"""
print (" solver: {}\nscore_diff = {}\n---->coef diff = {}\n---->coreset_time = {}\n---->data time = {}".format(
solver,
np.abs(score_coreset - score_real),
np.sum(np.abs(clf_real.coef_ - clf_coreset.coef_)),
time_coreset,
time_real))
"""
if np.abs(score_coreset - score_real) > small_number :
print ("Not good. Error in LMS CORESET")
def findclosestcenter(p_w, centers):
dists = []
for t,center in enumerate(centers):
total = dist2point(p_w,center)
dists.append(total)
return np.min(np.array(dists))
def dist2point(p_w,center,cost="sum"):
distance = p_w.P - center
norm_distance = np.linalg.norm(distance, axis=1)
squared_norm = np.power(norm_distance, 2)
if cost == "sum":
total = np.sum(squared_norm * p_w.W)
else:
total = np.max(squared_norm * p_w.W)
return total
def findFarthestPointFromCenter(p_w,centers):
dists = []
for t, center in enumerate(centers):
total = dist2point(p_w, center,cost="max")
dists.append(total)
return np.argmin(np.array(dists))
def main():
global small_number
small_number = 0.000001
regressions_checker()
P = np.array([[1.7, 0, 2], [3, 1.5, 0], [-7, 5, 0], [6, 2, 1], [1, 1, 2.2], [2, 5, 1], [6, 5, 2]])
W = np.array([1, 2, 3, 1, 2, 1.5, 0.5])
W = np.divide(W, np.sum(W))
Pset = WeightedSet(P, W)
Qset = WeightedSet(np.array([1, 2, 3]), [5, 5, 5])
# ########## test one_center
R, _ = np.linalg.qr(np.random.randn(3, 3))
P_one_center = np.array([[1, 0, 0], [3, 0, 0], [-7, 0, 0], [6, 0, 0], [-1, 0, 0], [20, 0, 0]])
P_one_center = np.matmul(P_one_center, R)
Pset_one_center = WeightedSet(P_one_center, np.ones(P_one_center.shape[0]))
Cset = one_center(Pset_one_center)
# ########## test vectors_sum_1
Cset = vectors_sum_1(Qset)
########## test vectors_sum_3
vectors_sum_3(Pset)
########## test one_mean_3
one_mean_3(Pset)
########## test one_segment
P_one_segment = np.array([[1, 0, 2], [2, 1.5, 0], [3, 5, 0], [4, 2, 1], [5, 1, 2.2], [6, 5, 1], [7, 5, 2]])
W_one_segment = np.random.rand(P.shape[0])
Pset_one_segment = WeightedSet(P_one_segment, W_one_segment)
Cset, c = one_segment(Pset_one_segment)
########## test matrix 2 norm
P = np.floor(np.random.rand(10, 3) * 10)
W_matix_norm = np.ones(P.shape[0])
P_matrix_norm = WeightedSet(P, W_matix_norm)
Cset = matrix_norm2(P_matrix_norm)
matrix_norm_checker(P_matrix_norm, Cset)
print("All good!")
if __name__ == '__main__':
main()
| [
"numpy.sum",
"numpy.abs",
"numpy.linalg.qr",
"numpy.einsum",
"numpy.ones",
"helper_functions.test_model",
"numpy.linalg.norm",
"numpy.diag",
"helper_functions.get_new_clf",
"numpy.random.randn",
"numpy.power",
"numpy.append",
"numpy.max",
"helper_functions.Fast_Caratheodory",
"helper_fun... | [((3415, 3432), 'numpy.array', 'np.array', (['[p, p2]'], {}), '([p, p2])\n', (3423, 3432), True, 'import numpy as np\n'), ((4763, 4808), 'helper_functions.Fast_Caratheodory', 'Fast_Caratheodory', (['Qset.P', 'Qset.W', '(Qset.d + 1)'], {}), '(Qset.P, Qset.W, Qset.d + 1)\n', (4780, 4808), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((4822, 4840), 'numpy.nonzero', 'np.nonzero', (['W_cara'], {}), '(W_cara)\n', (4832, 4840), True, 'import numpy as np\n'), ((6034, 6059), 'numpy.linalg.norm', 'np.linalg.norm', (['Q'], {'axis': '(1)'}), '(Q, axis=1)\n', (6048, 6059), True, 'import numpy as np\n'), ((6419, 6464), 'helper_functions.Fast_Caratheodory', 'Fast_Caratheodory', (['Qset.P', 'Qset.W', '(Qset.d + 1)'], {}), '(Qset.P, Qset.W, Qset.d + 1)\n', (6436, 6464), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((6478, 6496), 'numpy.nonzero', 'np.nonzero', (['W_cara'], {}), '(W_cara)\n', (6488, 6496), True, 'import numpy as np\n'), ((7586, 7596), 'numpy.sqrt', 'np.sqrt', (['W'], {}), '(W)\n', (7593, 7596), True, 'import numpy as np\n'), ((7911, 7921), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (7918, 7921), True, 'import numpy as np\n'), ((8520, 8540), 'numpy.random.rand', 'np.random.rand', (['(1)', 'd'], {}), '(1, d)\n', (8534, 8540), True, 'import numpy as np\n'), ((8550, 8570), 'numpy.random.rand', 'np.random.rand', (['(1)', 'd'], {}), '(1, d)\n', (8564, 8570), True, 'import numpy as np\n'), ((11124, 11139), 'numpy.linalg.qr', 'np.linalg.qr', (['P'], {}), '(P)\n', (11136, 11139), True, 'import numpy as np\n'), ((11744, 11783), 'numpy.einsum', 'np.einsum', (['"""ikj,ijk->ijk"""', 'P_tag', 'P_tag'], {}), "('ikj,ijk->ijk', P_tag, P_tag)\n", (11753, 11783), True, 'import numpy as np\n'), ((11846, 11905), 'helper_functions.Fast_Caratheodory', 'Fast_Caratheodory', (['P_tag', 'W', 'coreset_size'], {'dtype': 'Pset.dtype'}), '(P_tag, W, coreset_size, dtype=Pset.dtype)\n', (11863, 11905), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((11927, 11953), 'numpy.nonzero', 'np.nonzero', (['coreset_weigts'], {}), '(coreset_weigts)\n', (11937, 11953), True, 'import numpy as np\n'), ((12386, 12406), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (12400, 12406), True, 'import numpy as np\n'), ((13125, 13136), 'time.time', 'time.time', ([], {}), '()\n', (13134, 13136), False, 'import time\n'), ((13318, 13329), 'time.time', 'time.time', ([], {}), '()\n', (13327, 13329), False, 'import time\n'), ((13567, 13577), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (13574, 13577), True, 'import numpy as np\n'), ((14780, 14812), 'numpy.linalg.norm', 'np.linalg.norm', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (14794, 14812), True, 'import numpy as np\n'), ((14833, 14859), 'numpy.power', 'np.power', (['norm_distance', '(2)'], {}), '(norm_distance, 2)\n', (14841, 14859), True, 'import numpy as np\n'), ((15340, 15438), 'numpy.array', 'np.array', (['[[1.7, 0, 2], [3, 1.5, 0], [-7, 5, 0], [6, 2, 1], [1, 1, 2.2], [2, 5, 1], [\n 6, 5, 2]]'], {}), '([[1.7, 0, 2], [3, 1.5, 0], [-7, 5, 0], [6, 2, 1], [1, 1, 2.2], [2,\n 5, 1], [6, 5, 2]])\n', (15348, 15438), True, 'import numpy as np\n'), ((15444, 15479), 'numpy.array', 'np.array', (['[1, 2, 3, 1, 2, 1.5, 0.5]'], {}), '([1, 2, 3, 1, 2, 1.5, 0.5])\n', (15452, 15479), True, 'import numpy as np\n'), ((15703, 15782), 'numpy.array', 'np.array', (['[[1, 0, 0], [3, 0, 0], [-7, 0, 0], [6, 0, 0], [-1, 0, 0], [20, 0, 0]]'], {}), '([[1, 0, 0], [3, 0, 0], [-7, 0, 0], [6, 0, 0], [-1, 0, 0], [20, 0, 0]])\n', (15711, 15782), True, 'import numpy as np\n'), ((15803, 15829), 'numpy.matmul', 'np.matmul', (['P_one_center', 'R'], {}), '(P_one_center, R)\n', (15812, 15829), True, 'import numpy as np\n'), ((16196, 16291), 'numpy.array', 'np.array', (['[[1, 0, 2], [2, 1.5, 0], [3, 5, 0], [4, 2, 1], [5, 1, 2.2], [6, 5, 1], [7, \n 5, 2]]'], {}), '([[1, 0, 2], [2, 1.5, 0], [3, 5, 0], [4, 2, 1], [5, 1, 2.2], [6, 5,\n 1], [7, 5, 2]])\n', (16204, 16291), True, 'import numpy as np\n'), ((16309, 16335), 'numpy.random.rand', 'np.random.rand', (['P.shape[0]'], {}), '(P.shape[0])\n', (16323, 16335), True, 'import numpy as np\n'), ((16550, 16569), 'numpy.ones', 'np.ones', (['P.shape[0]'], {}), '(P.shape[0])\n', (16557, 16569), True, 'import numpy as np\n'), ((2112, 2123), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (2120, 2123), True, 'import numpy as np\n'), ((2498, 2512), 'numpy.sum', 'np.sum', (['self.W'], {}), '(self.W)\n', (2504, 2512), True, 'import numpy as np\n'), ((2725, 2755), 'numpy.append', 'np.append', (['self.P', '[p]'], {'axis': '(0)'}), '(self.P, [p], axis=0)\n', (2734, 2755), True, 'import numpy as np\n'), ((2774, 2794), 'numpy.append', 'np.append', (['self.W', 'w'], {}), '(self.W, w)\n', (2783, 2794), True, 'import numpy as np\n'), ((3459, 3478), 'numpy.ones', 'np.ones', (['C.shape[0]'], {}), '(C.shape[0])\n', (3466, 3478), True, 'import numpy as np\n'), ((5136, 5193), 'numpy.linalg.norm', 'np.linalg.norm', (['(Cset.weighted_sum - pre_norm_weighted_sum)'], {}), '(Cset.weighted_sum - pre_norm_weighted_sum)\n', (5150, 5193), True, 'import numpy as np\n'), ((6619, 6672), 'numpy.linalg.norm', 'np.linalg.norm', (['(Tset.weighted_sum - Qset.weighted_sum)'], {}), '(Tset.weighted_sum - Qset.weighted_sum)\n', (6633, 6672), True, 'import numpy as np\n'), ((7010, 7067), 'numpy.linalg.norm', 'np.linalg.norm', (['(Cset.weighted_sum - pre_norm_weighted_sum)'], {}), '(Cset.weighted_sum - pre_norm_weighted_sum)\n', (7024, 7067), True, 'import numpy as np\n'), ((7973, 7989), 'numpy.matmul', 'np.matmul', (['D', 'Vt'], {}), '(D, Vt)\n', (7982, 7989), True, 'import numpy as np\n'), ((8057, 8067), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (8064, 8067), True, 'import numpy as np\n'), ((8070, 8084), 'numpy.ones', 'np.ones', (['(d + 2)'], {}), '(d + 2)\n', (8077, 8084), True, 'import numpy as np\n'), ((8869, 8902), 'numpy.abs', 'np.abs', (['(sum_all_coreset - sum_all)'], {}), '(sum_all_coreset - sum_all)\n', (8875, 8902), True, 'import numpy as np\n'), ((9138, 9155), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (9152, 9155), True, 'import numpy as np\n'), ((9179, 9196), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (9193, 9196), True, 'import numpy as np\n'), ((10267, 10320), 'warnings.warn', 'warnings.warn', (['"""fewer rows than columns"""', 'UserWarning'], {}), "('fewer rows than columns', UserWarning)\n", (10280, 10320), False, 'import warnings\n'), ((11169, 11188), 'numpy.ones', 'np.ones', (['C.shape[0]'], {}), '(C.shape[0])\n', (11176, 11188), True, 'import numpy as np\n'), ((11438, 11461), 'numpy.append', 'np.append', (['P', 'Y'], {'axis': '(1)'}), '(P, Y, axis=1)\n', (11447, 11461), True, 'import numpy as np\n'), ((12616, 12648), 'numpy.abs', 'np.abs', (['(real_data - coreset_data)'], {}), '(real_data - coreset_data)\n', (12622, 12648), True, 'import numpy as np\n'), ((13740, 13759), 'helper_functions.get_new_clf', 'get_new_clf', (['solver'], {}), '(solver)\n', (13751, 13759), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((13868, 13889), 'helper_functions.test_model', 'test_model', (['Pset', 'clf'], {}), '(Pset, clf)\n', (13878, 13889), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((13907, 13926), 'helper_functions.get_new_clf', 'get_new_clf', (['solver'], {}), '(solver)\n', (13918, 13926), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((13958, 13980), 'helper_functions.train_model', 'train_model', (['Pset', 'clf'], {}), '(Pset, clf)\n', (13969, 13980), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((14003, 14024), 'helper_functions.test_model', 'test_model', (['Pset', 'clf'], {}), '(Pset, clf)\n', (14013, 14024), False, 'from helper_functions import Fast_Caratheodory, train_model, get_new_clf, test_model\n'), ((14667, 14682), 'numpy.array', 'np.array', (['dists'], {}), '(dists)\n', (14675, 14682), True, 'import numpy as np\n'), ((14900, 14928), 'numpy.sum', 'np.sum', (['(squared_norm * p_w.W)'], {}), '(squared_norm * p_w.W)\n', (14906, 14928), True, 'import numpy as np\n'), ((14957, 14985), 'numpy.max', 'np.max', (['(squared_norm * p_w.W)'], {}), '(squared_norm * p_w.W)\n', (14963, 14985), True, 'import numpy as np\n'), ((15212, 15227), 'numpy.array', 'np.array', (['dists'], {}), '(dists)\n', (15220, 15227), True, 'import numpy as np\n'), ((15502, 15511), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (15508, 15511), True, 'import numpy as np\n'), ((15567, 15586), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (15575, 15586), True, 'import numpy as np\n'), ((15660, 15681), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (15675, 15681), True, 'import numpy as np\n'), ((15879, 15909), 'numpy.ones', 'np.ones', (['P_one_center.shape[0]'], {}), '(P_one_center.shape[0])\n', (15886, 15909), True, 'import numpy as np\n'), ((2836, 2856), 'numpy.append', 'np.append', (['self.Y', 'y'], {}), '(self.Y, y)\n', (2845, 2856), True, 'import numpy as np\n'), ((4415, 4439), 'numpy.ones', 'np.ones', (['(Q.shape[0], 1)'], {}), '((Q.shape[0], 1))\n', (4422, 4439), True, 'import numpy as np\n'), ((6138, 6162), 'numpy.ones', 'np.ones', (['(Q.shape[0], 1)'], {}), '((Q.shape[0], 1))\n', (6145, 6162), True, 'import numpy as np\n'), ((7684, 7708), 'numpy.ones', 'np.ones', (['(P.shape[0], 1)'], {}), '((P.shape[0], 1))\n', (7691, 7708), True, 'import numpy as np\n'), ((8008, 8025), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (8022, 8025), True, 'import numpy as np\n'), ((8269, 8279), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (8276, 8279), True, 'import numpy as np\n'), ((8355, 8374), 'numpy.ones', 'np.ones', (['B.shape[0]'], {}), '(B.shape[0])\n', (8362, 8374), True, 'import numpy as np\n'), ((10084, 10101), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (10098, 10101), True, 'import numpy as np\n'), ((12513, 12533), 'numpy.matmul', 'np.matmul', (['Pset.P', 'x'], {}), '(Pset.P, x)\n', (12522, 12533), True, 'import numpy as np\n'), ((12578, 12602), 'numpy.matmul', 'np.matmul', (['weighted_C', 'x'], {}), '(weighted_C, x)\n', (12587, 12602), True, 'import numpy as np\n'), ((12694, 12726), 'numpy.abs', 'np.abs', (['(real_data - coreset_data)'], {}), '(real_data - coreset_data)\n', (12700, 12726), True, 'import numpy as np\n'), ((13459, 13479), 'numpy.random.rand', 'np.random.rand', (['n', 'd'], {}), '(n, d)\n', (13473, 13479), True, 'import numpy as np\n'), ((13517, 13537), 'numpy.random.rand', 'np.random.rand', (['n', '(1)'], {}), '(n, 1)\n', (13531, 13537), True, 'import numpy as np\n'), ((14374, 14408), 'numpy.abs', 'np.abs', (['(score_coreset - score_real)'], {}), '(score_coreset - score_real)\n', (14380, 14408), True, 'import numpy as np\n'), ((16502, 16523), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (16516, 16523), True, 'import numpy as np\n'), ((2301, 2312), 'numpy.array', 'np.array', (['W'], {}), '(W)\n', (2309, 2312), True, 'import numpy as np\n'), ((8249, 8265), 'numpy.matmul', 'np.matmul', (['D', 'Vt'], {}), '(D, Vt)\n', (8258, 8265), True, 'import numpy as np\n'), ((8676, 8728), 'numpy.linalg.norm', 'np.linalg.norm', (['(a + b * Pset.P[i, 0] - Pset.P[i, 1:])'], {}), '(a + b * Pset.P[i, 0] - Pset.P[i, 1:])\n', (8690, 8728), True, 'import numpy as np\n'), ((8803, 8855), 'numpy.linalg.norm', 'np.linalg.norm', (['(a + b * Cset.P[i, 0] - Cset.P[i, 1:])'], {}), '(a + b * Cset.P[i, 0] - Cset.P[i, 1:])\n', (8817, 8855), True, 'import numpy as np\n'), ((10018, 10035), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (10032, 10035), True, 'import numpy as np\n'), ((10628, 10643), 'numpy.ones', 'np.ones', (['Pset.n'], {}), '(Pset.n)\n', (10635, 10643), True, 'import numpy as np\n'), ((12433, 12448), 'numpy.sqrt', 'np.sqrt', (['Cset.W'], {}), '(Cset.W)\n', (12440, 12448), True, 'import numpy as np\n')] |
"""
Here lie methods for ouputing as images--aka Graphics. R.I.P
"""
import matplotlib.pyplot as plt
import numpy as np
from .generate import error
def draw_canvas(canvas, max_height):
"""
Draw the given table
"""
fig, ax = plt.subplots(1, 1)
ax.pcolor(canvas, cmap="RdBu")
ax.set_ylim(0, np.max(max_height) + 10)
ax.set_title("Ballistic Deposition")
# Save to fig
plt.savefig("canvas.jpg", dpi=500, bbox_inches='tight')
def plot_dist(dist, x_axis):
# Find means and error bars
yerr = []
means = []
for _ in range(dist.shape[1]):
yerr.append(error(dist[:, _]))
means.append(np.mean(dist[:, _]))
# Make subplot
fig, ax = plt.subplots(1, 1)
# Plot with error bars, errorbar color is default(blue)
ax.errorbar(x_axis, means, yerr=yerr, ls='', marker='*',
markersize=5, markerfacecolor='red', markeredgecolor='black',
markeredgewidth=0.2, label='scatter data')
# Axis Labels
ax.set_xlabel("Time t")
ax.set_ylabel("Distance of the right most from the left most")
# Show legend
plt.legend()
# Save to fig
plt.savefig("dist_plot.jpg", dpi=500, bbox_inches='tight')
| [
"matplotlib.pyplot.legend",
"numpy.max",
"numpy.mean",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((243, 261), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (255, 261), True, 'import matplotlib.pyplot as plt\n'), ((404, 459), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""canvas.jpg"""'], {'dpi': '(500)', 'bbox_inches': '"""tight"""'}), "('canvas.jpg', dpi=500, bbox_inches='tight')\n", (415, 459), True, 'import matplotlib.pyplot as plt\n'), ((701, 719), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (713, 719), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1125), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1123, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1206), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dist_plot.jpg"""'], {'dpi': '(500)', 'bbox_inches': '"""tight"""'}), "('dist_plot.jpg', dpi=500, bbox_inches='tight')\n", (1159, 1206), True, 'import matplotlib.pyplot as plt\n'), ((316, 334), 'numpy.max', 'np.max', (['max_height'], {}), '(max_height)\n', (322, 334), True, 'import numpy as np\n'), ((647, 666), 'numpy.mean', 'np.mean', (['dist[:, _]'], {}), '(dist[:, _])\n', (654, 666), True, 'import numpy as np\n')] |
###### IMPORTS ######
# General
import os
import sys
import cv2
import glob
import json
import shutil
import subprocess
import argparse
import numpy as np
# Import other python files
import utils.general as general_utils
import utils.image as image_utils
import utils.ffmpeg as ffmpeg_utils
import modules.user_interaction as user_interaction
import modules.hyper_param_search as hyper_param_search
COLORS = {
"individual" : (0,255,0),
"cluster" : (138,43,226),
"cluster_individual" : (0,255,0),
"junk" : (255,0,0)
}
def create_helper_dirs(data_root, dirname):
# Create Directories
drawn_frames_dir = general_utils.make_directory(dirname, root=data_root, remove_old=True)
return drawn_frames_dir
def read_in_frames(denoised_frames_dir):
denoised_filepaths = np.sort(glob.glob(f'{denoised_frames_dir}/**/denoised*/*.png', recursive=True))
return denoised_filepaths
def filter_by_area(img, stats, min_area, max_area):
areas = stats[:,-1]
if min_area is None:
min_area = np.min(areas)
if max_area is None:
max_area = np.max(areas)
condition_1 = areas > min_area
condition_2 = areas < max_area
filter_idxs = np.logical_and(condition_1, condition_2)
return filter_idxs
def run_group_filtering(denoised_img, filtered_stats, min_area, max_area, max_permitable_detection_area):
# Mask image
img_mask = image_utils.adaptive_thresholding(denoised_img)
first_found = False
for stat_i, (x, y, w, h, a) in enumerate(filtered_stats):
bbox_area = w*h
if bbox_area > max_permitable_detection_area:
continue
# GLOBAL: Compute block mask (1's where group bbox is; 0's everywhere else)
group_mask = np.zeros_like(img_mask)
group_mask[y:y+h,x:x+w] = 1
# GLOBAL: Apply group mask to full image mask (make all pixels 0 where group is not, 1s where group is)
masked_group_img = img_mask*group_mask
# LOCAL: Crop out group mask
cropped_group_img = img_mask[y:y+h,x:x+w]
# LOCAL: Create mask by adaptive thresholding + morphological opening
cropped_group_mask = image_utils.adaptive_filter_plus_opening(cropped_group_img, kernel_dim=(11,11))
# LOCAL: Run connected components on group mask
num_regions, regions, stats, centroids = cv2.connectedComponentsWithStats(cropped_group_mask)
# Readjust local coords -> global coords
global_stats_i = np.zeros_like(stats)
for stat_i, stat in enumerate(stats):
new_x, new_y, new_w, new_h, new_a = stat
global_stats_i[stat_i] = np.array([x+new_x, y+new_y, new_w, new_h, new_a])
if not first_found:
total_global_stats = global_stats_i
first_found = True
else:
total_global_stats = np.concatenate([total_global_stats, global_stats_i], axis=0)
# Filter out by area
area_filter_idxs = filter_by_area(denoised_img, total_global_stats, min_area, max_area)
filtered_global_stats = total_global_stats[area_filter_idxs]
return filtered_global_stats
def draw_group_on_frame(data_logger, denoised_img, draw_img, stats, group, min_area=None, max_area=None, box_width=1, draw_centroid=True):
color = COLORS[group]
total_img_area = int(np.product(denoised_img.shape[:2]))
max_permitable_detection_area = total_img_area * 1/16
# Get filter idxs
if group == 'individual':
min_area_i = min_area
max_area_i = max_area
elif group == 'cluster':
min_area_i = max_area
max_area_i = None
elif group == 'junk':
min_area_i = None
max_area_i = min_area
area_filter_idxs = filter_by_area(denoised_img, stats, min_area_i, max_area_i)
# Filter the stats on area conditions
filtered_stats = stats[area_filter_idxs]
# Iterate over stats
for stat_i, (x, y, w, h, a) in enumerate(filtered_stats):
# Filter out detections with too large an area
# -------------------------------------------------
bbox_area = w*h
if bbox_area > max_permitable_detection_area:
continue
# -------------------------------------------------
# Filter out oblong detections
# -------------------------------------------------
skew_factor = 4
bad_condition_1 = w > skew_factor*h
bad_condition_2 = h > skew_factor*w
if bad_condition_1 or bad_condition_2:
continue
# -------------------------------------------------
data_logger = general_utils.log_data(data_logger, x,y,w,h,group)
image_utils.draw_box(draw_img, x,y,w,h, box_width, color=color, draw_centroid=draw_centroid)
# Check if cluster - try to separate out with thresholding + morphological opening
if group == 'cluster':
group_stats = run_group_filtering(denoised_img, filtered_stats, min_area, max_area, max_permitable_detection_area)
for stat_i, (x, y, w, h, a) in enumerate(group_stats):
bbox_area = w*h
if bbox_area > max_permitable_detection_area:
continue
data_logger = general_utils.log_data(data_logger, x,y,w,h,'individual')
color = COLORS['cluster_individual']
image_utils.draw_box(draw_img, x,y,w,h, box_width, color=color, draw_centroid=draw_centroid)
def process_images(denoised_filepaths, drawn_frames_dir, min_area, max_area):
data_logger = {}
for denoised_img_i, denoised_filepath in enumerate(denoised_filepaths):
if args.limit and denoised_img_i > args.limit:
break
sys.stdout.write(f'\rProcessing frame {denoised_img_i+1}/{len(denoised_filepaths)}')
sys.stdout.flush()
denoised_img = cv2.cvtColor(cv2.imread(denoised_filepath), cv2.COLOR_BGR2GRAY)
# Frame id
frame_id = os.path.splitext(os.path.basename(denoised_filepath))[0]
# Initialize frame_id logger
data_logger[frame_id] = []
# Make single channel
if len(denoised_img.shape) == 3:
denoised_img = cv2.cvtColor(denoised_img, cv2.COLOR_BGR2GRAY)
# Setup image for drawing on
draw_img = general_utils.setup_draw_img(denoised_img)
# Mask image
img_mask = image_utils.adaptive_filter_plus_opening(denoised_img, invert=True)
# Run Connected Components
num_regions, regions, stats, centroids = cv2.connectedComponentsWithStats(img_mask)
# Draw Individual detections
draw_group_on_frame(data_logger[frame_id], denoised_img, draw_img, stats, group='individual',
min_area=min_area, max_area=max_area, box_width=5)
# Draw Cluster detections
if args.draw_clusters:
draw_group_on_frame(data_logger[frame_id], denoised_img, draw_img, stats, group='cluster',
min_area=min_area, max_area=max_area, box_width=5)
# Draw Trash detections
if args.draw_trash:
draw_group_on_frame(None, denoised_img, draw_img, stats, group='junk',
min_area=min_area, max_area=max_area, box_width=5)
# Write draw image
path = os.path.join(drawn_frames_dir, f"frame_{denoised_img_i+1:05d}.png")
cv2.imwrite(path, draw_img[...,::-1])
return data_logger
def main(UI, args):
# Select the video
print("-- Select video/data folder from list...")
src_processed_root = general_utils.select_file(args.data_root)
# Create helper dirs
print("Setting up helper directories...")
drawn_frames_dir = create_helper_dirs(src_processed_root, dirname='detection_frames')
# Read in frames
print("\n-- Reading in frames...")
denoised_filepaths = read_in_frames(src_processed_root)
# User interface for selecting proper stuff...
# Pick an image somewhere in middle of video - x seconds in
img_i = args.img_i
denoised_img_i = cv2.cvtColor(cv2.imread(denoised_filepaths[img_i]), cv2.COLOR_BGR2GRAY)
# Setup UI
UI.run(denoised_img_i, src_processed_root, load_previous=args.load_prev_UI_results)
min_area, max_area = UI.min_area, UI.max_area
# Process the images
data_logger = process_images(denoised_filepaths, drawn_frames_dir, min_area, max_area)
print("\n")
# Log data
logger_path = os.path.join(src_processed_root, 'data_log.json')
with open(logger_path, 'w') as outfile:
json.dump(data_logger, outfile)
ffmpeg_utils.frame2vid(drawn_frames_dir, src_processed_root, args)
def setup_args():
parser = argparse.ArgumentParser(description='Process bee videos!')
parser.add_argument('-p', '--data_root', dest='data_root', type=str, default='data/processed',
help='Set path to processed video frames')
parser.add_argument('-r', '--fps', dest='FPS', type=float, default=25,
help='Frames per second (FPS)')
parser.add_argument('-l', '--limit', dest='limit', type=int, default=0,
help='Processing limit')
parser.add_argument('-v', '--verbose', dest='verbose', type=bool, default=False,
help='FFMPEG Verbosity')
parser.add_argument('-f', '--force', dest='force', type=bool, default=True,
help='Force overwrite: True/False')
parser.add_argument('-c', '--draw_clusters', dest='draw_clusters', type=bool, default=True,
help='Draw cluster detections')
parser.add_argument('-t', '--draw_trash', dest='draw_trash', type=bool, default=False,
help='Draw trash detections')
parser.add_argument('-u', '--prevUI', dest='load_prev_UI_results', type=bool, default=False,
help='Use previous UI results?')
parser.add_argument('--img_idx', dest='img_i', type=int, default=1, help='Image index to label for this video')
args = parser.parse_args()
return args
if __name__ == '__main__':
print("\n---------- Detecting bees ----------")
args = setup_args()
UI = hyper_param_search.UserInterface()
main(UI, args)
print("\n")
| [
"utils.general.log_data",
"argparse.ArgumentParser",
"utils.general.select_file",
"numpy.product",
"utils.general.setup_draw_img",
"sys.stdout.flush",
"glob.glob",
"os.path.join",
"utils.image.adaptive_thresholding",
"numpy.zeros_like",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.max",
"utils.ff... | [((662, 732), 'utils.general.make_directory', 'general_utils.make_directory', (['dirname'], {'root': 'data_root', 'remove_old': '(True)'}), '(dirname, root=data_root, remove_old=True)\n', (690, 732), True, 'import utils.general as general_utils\n'), ((1223, 1263), 'numpy.logical_and', 'np.logical_and', (['condition_1', 'condition_2'], {}), '(condition_1, condition_2)\n', (1237, 1263), True, 'import numpy as np\n'), ((1426, 1473), 'utils.image.adaptive_thresholding', 'image_utils.adaptive_thresholding', (['denoised_img'], {}), '(denoised_img)\n', (1459, 1473), True, 'import utils.image as image_utils\n'), ((7527, 7568), 'utils.general.select_file', 'general_utils.select_file', (['args.data_root'], {}), '(args.data_root)\n', (7552, 7568), True, 'import utils.general as general_utils\n'), ((8405, 8454), 'os.path.join', 'os.path.join', (['src_processed_root', '"""data_log.json"""'], {}), "(src_processed_root, 'data_log.json')\n", (8417, 8454), False, 'import os\n'), ((8544, 8610), 'utils.ffmpeg.frame2vid', 'ffmpeg_utils.frame2vid', (['drawn_frames_dir', 'src_processed_root', 'args'], {}), '(drawn_frames_dir, src_processed_root, args)\n', (8566, 8610), True, 'import utils.ffmpeg as ffmpeg_utils\n'), ((8643, 8701), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process bee videos!"""'}), "(description='Process bee videos!')\n", (8666, 8701), False, 'import argparse\n'), ((10129, 10163), 'modules.hyper_param_search.UserInterface', 'hyper_param_search.UserInterface', ([], {}), '()\n', (10161, 10163), True, 'import modules.hyper_param_search as hyper_param_search\n'), ((837, 907), 'glob.glob', 'glob.glob', (['f"""{denoised_frames_dir}/**/denoised*/*.png"""'], {'recursive': '(True)'}), "(f'{denoised_frames_dir}/**/denoised*/*.png', recursive=True)\n", (846, 907), False, 'import glob\n'), ((1061, 1074), 'numpy.min', 'np.min', (['areas'], {}), '(areas)\n', (1067, 1074), True, 'import numpy as np\n'), ((1119, 1132), 'numpy.max', 'np.max', (['areas'], {}), '(areas)\n', (1125, 1132), True, 'import numpy as np\n'), ((1767, 1790), 'numpy.zeros_like', 'np.zeros_like', (['img_mask'], {}), '(img_mask)\n', (1780, 1790), True, 'import numpy as np\n'), ((2183, 2268), 'utils.image.adaptive_filter_plus_opening', 'image_utils.adaptive_filter_plus_opening', (['cropped_group_img'], {'kernel_dim': '(11, 11)'}), '(cropped_group_img, kernel_dim=(11, 11)\n )\n', (2223, 2268), True, 'import utils.image as image_utils\n'), ((2369, 2421), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['cropped_group_mask'], {}), '(cropped_group_mask)\n', (2401, 2421), False, 'import cv2\n'), ((2497, 2517), 'numpy.zeros_like', 'np.zeros_like', (['stats'], {}), '(stats)\n', (2510, 2517), True, 'import numpy as np\n'), ((3342, 3376), 'numpy.product', 'np.product', (['denoised_img.shape[:2]'], {}), '(denoised_img.shape[:2])\n', (3352, 3376), True, 'import numpy as np\n'), ((4613, 4667), 'utils.general.log_data', 'general_utils.log_data', (['data_logger', 'x', 'y', 'w', 'h', 'group'], {}), '(data_logger, x, y, w, h, group)\n', (4635, 4667), True, 'import utils.general as general_utils\n'), ((4672, 4771), 'utils.image.draw_box', 'image_utils.draw_box', (['draw_img', 'x', 'y', 'w', 'h', 'box_width'], {'color': 'color', 'draw_centroid': 'draw_centroid'}), '(draw_img, x, y, w, h, box_width, color=color,\n draw_centroid=draw_centroid)\n', (4692, 4771), True, 'import utils.image as image_utils\n'), ((5767, 5785), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5783, 5785), False, 'import sys\n'), ((6246, 6288), 'utils.general.setup_draw_img', 'general_utils.setup_draw_img', (['denoised_img'], {}), '(denoised_img)\n', (6274, 6288), True, 'import utils.general as general_utils\n'), ((6330, 6397), 'utils.image.adaptive_filter_plus_opening', 'image_utils.adaptive_filter_plus_opening', (['denoised_img'], {'invert': '(True)'}), '(denoised_img, invert=True)\n', (6370, 6397), True, 'import utils.image as image_utils\n'), ((6483, 6525), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['img_mask'], {}), '(img_mask)\n', (6515, 6525), False, 'import cv2\n'), ((7267, 7336), 'os.path.join', 'os.path.join', (['drawn_frames_dir', 'f"""frame_{denoised_img_i + 1:05d}.png"""'], {}), "(drawn_frames_dir, f'frame_{denoised_img_i + 1:05d}.png')\n", (7279, 7336), False, 'import os\n'), ((7343, 7381), 'cv2.imwrite', 'cv2.imwrite', (['path', 'draw_img[..., ::-1]'], {}), '(path, draw_img[..., ::-1])\n', (7354, 7381), False, 'import cv2\n'), ((8025, 8062), 'cv2.imread', 'cv2.imread', (['denoised_filepaths[img_i]'], {}), '(denoised_filepaths[img_i])\n', (8035, 8062), False, 'import cv2\n'), ((8507, 8538), 'json.dump', 'json.dump', (['data_logger', 'outfile'], {}), '(data_logger, outfile)\n', (8516, 8538), False, 'import json\n'), ((2654, 2707), 'numpy.array', 'np.array', (['[x + new_x, y + new_y, new_w, new_h, new_a]'], {}), '([x + new_x, y + new_y, new_w, new_h, new_a])\n', (2662, 2707), True, 'import numpy as np\n'), ((2859, 2919), 'numpy.concatenate', 'np.concatenate', (['[total_global_stats, global_stats_i]'], {'axis': '(0)'}), '([total_global_stats, global_stats_i], axis=0)\n', (2873, 2919), True, 'import numpy as np\n'), ((5204, 5265), 'utils.general.log_data', 'general_utils.log_data', (['data_logger', 'x', 'y', 'w', 'h', '"""individual"""'], {}), "(data_logger, x, y, w, h, 'individual')\n", (5226, 5265), True, 'import utils.general as general_utils\n'), ((5323, 5422), 'utils.image.draw_box', 'image_utils.draw_box', (['draw_img', 'x', 'y', 'w', 'h', 'box_width'], {'color': 'color', 'draw_centroid': 'draw_centroid'}), '(draw_img, x, y, w, h, box_width, color=color,\n draw_centroid=draw_centroid)\n', (5343, 5422), True, 'import utils.image as image_utils\n'), ((5823, 5852), 'cv2.imread', 'cv2.imread', (['denoised_filepath'], {}), '(denoised_filepath)\n', (5833, 5852), False, 'import cv2\n'), ((6142, 6188), 'cv2.cvtColor', 'cv2.cvtColor', (['denoised_img', 'cv2.COLOR_BGR2GRAY'], {}), '(denoised_img, cv2.COLOR_BGR2GRAY)\n', (6154, 6188), False, 'import cv2\n'), ((5930, 5965), 'os.path.basename', 'os.path.basename', (['denoised_filepath'], {}), '(denoised_filepath)\n', (5946, 5965), False, 'import os\n')] |
from io import StringIO
from numpy import zeros, array
def bytes_to_octal(bytestr):
# calculate best possible number of digits
digits = int(len(bytestr) * 8/3)
# generate format string
fmt = b"%%0%do" % digits
# convert bytes to int and format as octal string
octal = fmt % int.from_bytes(bytestr, byteorder='big')
# return maximum "saturated" digits
return octal[-digits:]
def movements(octal):
# associate octal digit with movement vector
directions = {
ord('4'):(-1,-1), ord('5'):(-1, 0), ord('6'):(-1, 1),
ord('3'):( 0,-1), ord('7'):( 0, 1),
ord('2'):( 1,-1), ord('1'):( 1, 0), ord('0'):( 1, 1),
}
# return generator mapping each digit
return (directions[c] for c in octal)
# compute randomart matrix from hash digest
def drunkenwalk(digest, size=(9, 18)):
# initialize "drawing board" and positional vector
matrix = zeros(size).astype(int)
position = (array(size) / 2).astype(int)
# perform movements and compute matrix
for move in movements(bytes_to_octal(digest)):
p = tuple(position)
matrix.itemset(p, matrix.item(p) + 1)
position = (position + move) % size
return matrix
# character palette for drawing
PALETTE = " .*=%!~R_EWS0123456789abcdefghijklmnop"
# translation hash for ascii output
TRANSLATION = {
ord("╭"): "/", ord("╮"): "\\", ord("╰"): "\\", ord("╯"): "/",
ord("│"): "|", ord("─"): "-", ord("╴"): "[", ord("╶"): "]",
}
def draw_card(matrix, palette=PALETTE):
symbol = lambda n: PALETTE[n % len(PALETTE)]
art = StringIO()
for line in matrix:
art.write("%s\n" % "".join((symbol(el) for el in line)))
return art.getvalue()
# deprecated but here for reference:
# def draw_cards(matrices, palette=PALETTE):
# symbol = lambda n: PALETTE[n % len(PALETTE)]
# art = StringIO()
# art.write("╭──────────────────╮"*3 + "\n")
# for idx in range(len(matrices[0])):
# line = ''
# for matrix in matrices:
# line += "│%s│" % "".join((symbol(el) for el in list(matrix[idx])))
# art.write(line + '\n')
# art.write("╰───────~ %s ~──────╯"*3 %(1,2,3) + '\n')
# return art.getvalue()
| [
"numpy.array",
"io.StringIO",
"numpy.zeros"
] | [((1594, 1604), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1602, 1604), False, 'from io import StringIO\n'), ((926, 937), 'numpy.zeros', 'zeros', (['size'], {}), '(size)\n', (931, 937), False, 'from numpy import zeros, array\n'), ((966, 977), 'numpy.array', 'array', (['size'], {}), '(size)\n', (971, 977), False, 'from numpy import zeros, array\n')] |
"""
File Name: test_set_random_seed.py
Project: dl-project-template
File Description:
"""
import random
import unittest
from typing import Tuple
import torch
import numpy as np
from src.utilities import set_random_seed
_RANDOM_SEED: int = random.randint(0, 100)
_TEST_TUPLE_SIZE: Tuple[int, int] = (32, 1024)
def _set_random_seed():
set_random_seed(
random_seed=_RANDOM_SEED,
deterministic_cudnn_flag=True,
)
class TestSetRandomSeed(unittest.TestCase):
"""unittest class for 'set_random_seed' function
"""
def test_torch_randomness(self):
"""test 'set_random_seed' function for torch
"""
_set_random_seed()
_tensor = torch.rand(size=_TEST_TUPLE_SIZE)
_set_random_seed()
assert (_tensor == torch.rand(size=_TEST_TUPLE_SIZE)).all()
def test_randomness(self):
"""test 'set_random_seed' function for random
"""
_set_random_seed()
_random = random.random()
_set_random_seed()
assert _random == random.random()
def test_numpy_randomness(self):
"""test 'set_random_seed' function for numpy
"""
_set_random_seed()
_array = np.random.random(size=_TEST_TUPLE_SIZE)
_set_random_seed()
assert (_array == np.random.random(size=_TEST_TUPLE_SIZE)).all()
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"random.randint",
"src.utilities.set_random_seed",
"random.random",
"numpy.random.random",
"torch.rand"
] | [((265, 287), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (279, 287), False, 'import random\n'), ((365, 437), 'src.utilities.set_random_seed', 'set_random_seed', ([], {'random_seed': '_RANDOM_SEED', 'deterministic_cudnn_flag': '(True)'}), '(random_seed=_RANDOM_SEED, deterministic_cudnn_flag=True)\n', (380, 437), False, 'from src.utilities import set_random_seed\n'), ((1396, 1411), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1409, 1411), False, 'import unittest\n'), ((716, 749), 'torch.rand', 'torch.rand', ([], {'size': '_TEST_TUPLE_SIZE'}), '(size=_TEST_TUPLE_SIZE)\n', (726, 749), False, 'import torch\n'), ((989, 1004), 'random.random', 'random.random', ([], {}), '()\n', (1002, 1004), False, 'import random\n'), ((1222, 1261), 'numpy.random.random', 'np.random.random', ([], {'size': '_TEST_TUPLE_SIZE'}), '(size=_TEST_TUPLE_SIZE)\n', (1238, 1261), True, 'import numpy as np\n'), ((1059, 1074), 'random.random', 'random.random', ([], {}), '()\n', (1072, 1074), False, 'import random\n'), ((805, 838), 'torch.rand', 'torch.rand', ([], {'size': '_TEST_TUPLE_SIZE'}), '(size=_TEST_TUPLE_SIZE)\n', (815, 838), False, 'import torch\n'), ((1316, 1355), 'numpy.random.random', 'np.random.random', ([], {'size': '_TEST_TUPLE_SIZE'}), '(size=_TEST_TUPLE_SIZE)\n', (1332, 1355), True, 'import numpy as np\n')] |
import gym
import numpy as np
import pytest
import torch as T
from pearll.buffers import ReplayBuffer
from pearll.buffers.rollout_buffer import RolloutBuffer
from pearll.common.type_aliases import Trajectories
env = gym.make("CartPole-v0")
@pytest.mark.parametrize("buffer_class", [ReplayBuffer, RolloutBuffer])
def test_buffer_init(buffer_class):
buffer = buffer_class(env, buffer_size=5)
assert buffer.observations.shape == (5, 4)
assert buffer.actions.shape == (5, 1)
assert buffer.rewards.shape == (5, 1)
assert buffer.dones.shape == (5, 1)
@pytest.mark.parametrize("buffer_class", [ReplayBuffer, RolloutBuffer])
def test_buffer_add_trajectory_and_sample(buffer_class):
action_space = env.action_space
buffer = buffer_class(env, buffer_size=2)
obs = env.reset()
action = action_space.sample()
next_obs, reward, done, _ = env.step(action)
buffer.add_trajectory(
observation=obs,
action=action,
reward=reward,
next_observation=next_obs,
done=done,
)
trajectory_numpy = buffer.sample(batch_size=1, dtype="numpy")
trajectory_torch = buffer.sample(batch_size=1, dtype="torch")
for field in trajectory_numpy.__dataclass_fields__:
value = getattr(trajectory_numpy, field)
assert len(value.shape) == 2
np.testing.assert_array_almost_equal(
obs,
trajectory_numpy.observations.reshape(
4,
),
)
np.testing.assert_array_almost_equal(
next_obs,
trajectory_numpy.next_observations.reshape(
4,
),
)
assert reward == trajectory_numpy.rewards.item()
assert done == trajectory_numpy.dones.item()
assert action == trajectory_numpy.actions.item()
assert isinstance(trajectory_numpy.observations, np.ndarray)
assert isinstance(trajectory_torch.observations, T.Tensor)
@pytest.mark.parametrize("buffer_class", [ReplayBuffer, RolloutBuffer])
def test_add_batch_trajectories_and_sample(buffer_class):
action_space = env.action_space
buffer = buffer_class(env, buffer_size=5)
observations = np.zeros(shape=(5, 4))
next_observations = np.zeros(shape=(5, 4))
actions = np.zeros(5)
rewards = np.zeros(5)
dones = np.zeros(5)
obs = env.reset()
for i in range(5):
action = action_space.sample()
observations[i] = obs
actions[i] = action
obs, reward, done, _ = env.step(action)
next_observations[i] = obs
rewards[i] = reward
dones[i] = done
buffer.add_batch_trajectories(
observations, actions, rewards, next_observations, dones
)
trajectories_numpy = buffer.sample(batch_size=2, dtype="numpy")
trajectories_torch = buffer.sample(batch_size=2, dtype="torch")
if buffer_class == ReplayBuffer:
np.testing.assert_array_almost_equal(
buffer.observations.reshape(5, 4),
np.concatenate([[next_observations[-1]], next_observations[:-1]]),
)
assert isinstance(trajectories_numpy.observations, np.ndarray)
assert isinstance(trajectories_torch.observations, T.Tensor)
@pytest.mark.parametrize("buffer_class", [ReplayBuffer, RolloutBuffer])
def test_last(buffer_class):
num_steps = 10
action_space = env.action_space
observation_space = env.observation_space
buffer = buffer_class(env, buffer_size=2)
for _ in range(num_steps):
obs = observation_space.sample()
action = action_space.sample()
next_obs, reward, done, _ = env.step(action)
trajectory = Trajectories(
observations=obs[np.newaxis, :],
actions=np.array([[action]], dtype=np.float32),
rewards=np.array([[reward]]),
next_observations=next_obs[np.newaxis, :],
dones=np.array([[done]], dtype=np.float32),
)
buffer.add_trajectory(
observation=obs,
action=action,
reward=reward,
next_observation=next_obs,
done=done,
)
most_recent = buffer.last(batch_size=1)
np.testing.assert_array_almost_equal(
most_recent.observations, trajectory.observations
)
np.testing.assert_array_almost_equal(most_recent.actions, trajectory.actions)
np.testing.assert_array_almost_equal(most_recent.rewards, trajectory.rewards)
np.testing.assert_array_almost_equal(
most_recent.next_observations, trajectory.next_observations
)
np.testing.assert_array_almost_equal(most_recent.dones, trajectory.dones)
buffer = buffer_class(env, buffer_size=5)
num_most_recent = 2
obs = env.reset()
for _ in range(num_steps):
trajectories = [None] * num_most_recent
for i in range(num_most_recent):
action = action_space.sample()
next_obs, reward, done, _ = env.step(action)
if i == 0:
trajectories = Trajectories(
observations=obs[np.newaxis, :],
actions=np.array([[action]], dtype=np.float32),
rewards=np.array([[reward]]),
next_observations=next_obs[np.newaxis, :],
dones=np.array([[done]], dtype=np.float32),
)
else:
trajectories.observations = np.concatenate(
(trajectories.observations, obs[np.newaxis, :])
)
trajectories.actions = np.concatenate(
(trajectories.actions, np.array([[action]], dtype=np.float32))
)
trajectories.rewards = np.concatenate(
(trajectories.rewards, np.array([[reward]]))
)
trajectories.next_observations = np.concatenate(
(
trajectories.next_observations,
next_obs[np.newaxis, :],
)
)
trajectories.dones = np.concatenate(
(trajectories.dones, np.array([[done]], dtype=np.float32))
)
buffer.add_trajectory(
observation=obs,
action=action,
reward=reward,
next_observation=next_obs,
done=done,
)
obs = next_obs
most_recent = buffer.last(batch_size=num_most_recent)
np.testing.assert_array_almost_equal(
most_recent.observations, trajectories.observations
)
np.testing.assert_array_almost_equal(most_recent.actions, trajectories.actions)
np.testing.assert_array_almost_equal(most_recent.rewards, trajectories.rewards)
np.testing.assert_array_almost_equal(
most_recent.next_observations, trajectories.next_observations
)
np.testing.assert_array_almost_equal(most_recent.dones, trajectories.dones)
def test_flatten_env_axis():
arr = np.random.rand(7, 3, 2)
# First test with a single environment
env = gym.make("CartPole-v0")
buffer = ReplayBuffer(env, buffer_size=100)
actual_result = buffer._flatten_env_axis(arr).shape
expected_result = (7, 3, 2)
assert actual_result == expected_result
# Test with multiple environments
env = gym.vector.make("CartPole-v0", 3)
buffer = ReplayBuffer(env, buffer_size=100)
actual_result = buffer._flatten_env_axis(arr).shape
expected_result = (6, 2)
assert actual_result == expected_result
@pytest.mark.parametrize("buffer_class", [ReplayBuffer, RolloutBuffer])
def test_buffer_multiple_envs(buffer_class):
env = gym.vector.make("CartPole-v0", 2)
buffer = buffer_class(env, buffer_size=10)
obs = env.reset()
for _ in range(10):
action = env.action_space.sample()
next_obs, reward, done, _ = env.step(action)
buffer.add_trajectory(
observation=obs,
action=action,
reward=reward,
next_observation=next_obs,
done=done,
)
obs = next_obs
trajectories = buffer.last(batch_size=5)
assert trajectories.observations.shape == (2, 5, 4)
assert trajectories.actions.shape == (2, 5, 1)
assert trajectories.rewards.shape == (2, 5, 1)
assert trajectories.next_observations.shape == (2, 5, 4)
assert trajectories.dones.shape == (2, 5, 1)
| [
"gym.vector.make",
"gym.make",
"numpy.testing.assert_array_almost_equal",
"numpy.zeros",
"numpy.array",
"numpy.random.rand",
"pytest.mark.parametrize",
"pearll.buffers.ReplayBuffer",
"numpy.concatenate"
] | [((218, 241), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (226, 241), False, 'import gym\n'), ((245, 315), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""buffer_class"""', '[ReplayBuffer, RolloutBuffer]'], {}), "('buffer_class', [ReplayBuffer, RolloutBuffer])\n", (268, 315), False, 'import pytest\n'), ((573, 643), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""buffer_class"""', '[ReplayBuffer, RolloutBuffer]'], {}), "('buffer_class', [ReplayBuffer, RolloutBuffer])\n", (596, 643), False, 'import pytest\n'), ((1889, 1959), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""buffer_class"""', '[ReplayBuffer, RolloutBuffer]'], {}), "('buffer_class', [ReplayBuffer, RolloutBuffer])\n", (1912, 1959), False, 'import pytest\n'), ((3144, 3214), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""buffer_class"""', '[ReplayBuffer, RolloutBuffer]'], {}), "('buffer_class', [ReplayBuffer, RolloutBuffer])\n", (3167, 3214), False, 'import pytest\n'), ((7541, 7611), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""buffer_class"""', '[ReplayBuffer, RolloutBuffer]'], {}), "('buffer_class', [ReplayBuffer, RolloutBuffer])\n", (7564, 7611), False, 'import pytest\n'), ((2120, 2142), 'numpy.zeros', 'np.zeros', ([], {'shape': '(5, 4)'}), '(shape=(5, 4))\n', (2128, 2142), True, 'import numpy as np\n'), ((2167, 2189), 'numpy.zeros', 'np.zeros', ([], {'shape': '(5, 4)'}), '(shape=(5, 4))\n', (2175, 2189), True, 'import numpy as np\n'), ((2204, 2215), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2212, 2215), True, 'import numpy as np\n'), ((2230, 2241), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2238, 2241), True, 'import numpy as np\n'), ((2254, 2265), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2262, 2265), True, 'import numpy as np\n'), ((6993, 7016), 'numpy.random.rand', 'np.random.rand', (['(7)', '(3)', '(2)'], {}), '(7, 3, 2)\n', (7007, 7016), True, 'import numpy as np\n'), ((7070, 7093), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (7078, 7093), False, 'import gym\n'), ((7107, 7141), 'pearll.buffers.ReplayBuffer', 'ReplayBuffer', (['env'], {'buffer_size': '(100)'}), '(env, buffer_size=100)\n', (7119, 7141), False, 'from pearll.buffers import ReplayBuffer\n'), ((7325, 7358), 'gym.vector.make', 'gym.vector.make', (['"""CartPole-v0"""', '(3)'], {}), "('CartPole-v0', 3)\n", (7340, 7358), False, 'import gym\n'), ((7372, 7406), 'pearll.buffers.ReplayBuffer', 'ReplayBuffer', (['env'], {'buffer_size': '(100)'}), '(env, buffer_size=100)\n', (7384, 7406), False, 'from pearll.buffers import ReplayBuffer\n'), ((7667, 7700), 'gym.vector.make', 'gym.vector.make', (['"""CartPole-v0"""', '(2)'], {}), "('CartPole-v0', 2)\n", (7682, 7700), False, 'import gym\n'), ((4105, 4197), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.observations', 'trajectory.observations'], {}), '(most_recent.observations, trajectory.\n observations)\n', (4141, 4197), True, 'import numpy as np\n'), ((4223, 4300), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.actions', 'trajectory.actions'], {}), '(most_recent.actions, trajectory.actions)\n', (4259, 4300), True, 'import numpy as np\n'), ((4309, 4386), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.rewards', 'trajectory.rewards'], {}), '(most_recent.rewards, trajectory.rewards)\n', (4345, 4386), True, 'import numpy as np\n'), ((4395, 4496), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.next_observations', 'trajectory.next_observations'], {}), '(most_recent.next_observations,\n trajectory.next_observations)\n', (4431, 4496), True, 'import numpy as np\n'), ((4523, 4596), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.dones', 'trajectory.dones'], {}), '(most_recent.dones, trajectory.dones)\n', (4559, 4596), True, 'import numpy as np\n'), ((6450, 6544), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.observations', 'trajectories.observations'], {}), '(most_recent.observations, trajectories\n .observations)\n', (6486, 6544), True, 'import numpy as np\n'), ((6570, 6649), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.actions', 'trajectories.actions'], {}), '(most_recent.actions, trajectories.actions)\n', (6606, 6649), True, 'import numpy as np\n'), ((6658, 6737), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.rewards', 'trajectories.rewards'], {}), '(most_recent.rewards, trajectories.rewards)\n', (6694, 6737), True, 'import numpy as np\n'), ((6746, 6849), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.next_observations', 'trajectories.next_observations'], {}), '(most_recent.next_observations,\n trajectories.next_observations)\n', (6782, 6849), True, 'import numpy as np\n'), ((6876, 6951), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['most_recent.dones', 'trajectories.dones'], {}), '(most_recent.dones, trajectories.dones)\n', (6912, 6951), True, 'import numpy as np\n'), ((2931, 2996), 'numpy.concatenate', 'np.concatenate', (['[[next_observations[-1]], next_observations[:-1]]'], {}), '([[next_observations[-1]], next_observations[:-1]])\n', (2945, 2996), True, 'import numpy as np\n'), ((3657, 3695), 'numpy.array', 'np.array', (['[[action]]'], {'dtype': 'np.float32'}), '([[action]], dtype=np.float32)\n', (3665, 3695), True, 'import numpy as np\n'), ((3717, 3737), 'numpy.array', 'np.array', (['[[reward]]'], {}), '([[reward]])\n', (3725, 3737), True, 'import numpy as np\n'), ((3812, 3848), 'numpy.array', 'np.array', (['[[done]]'], {'dtype': 'np.float32'}), '([[done]], dtype=np.float32)\n', (3820, 3848), True, 'import numpy as np\n'), ((5358, 5421), 'numpy.concatenate', 'np.concatenate', (['(trajectories.observations, obs[np.newaxis, :])'], {}), '((trajectories.observations, obs[np.newaxis, :]))\n', (5372, 5421), True, 'import numpy as np\n'), ((5803, 5876), 'numpy.concatenate', 'np.concatenate', (['(trajectories.next_observations, next_obs[np.newaxis, :])'], {}), '((trajectories.next_observations, next_obs[np.newaxis, :]))\n', (5817, 5876), True, 'import numpy as np\n'), ((5061, 5099), 'numpy.array', 'np.array', (['[[action]]'], {'dtype': 'np.float32'}), '([[action]], dtype=np.float32)\n', (5069, 5099), True, 'import numpy as np\n'), ((5129, 5149), 'numpy.array', 'np.array', (['[[reward]]'], {}), '([[reward]])\n', (5137, 5149), True, 'import numpy as np\n'), ((5240, 5276), 'numpy.array', 'np.array', (['[[done]]'], {'dtype': 'np.float32'}), '([[done]], dtype=np.float32)\n', (5248, 5276), True, 'import numpy as np\n'), ((5558, 5596), 'numpy.array', 'np.array', (['[[action]]'], {'dtype': 'np.float32'}), '([[action]], dtype=np.float32)\n', (5566, 5596), True, 'import numpy as np\n'), ((5714, 5734), 'numpy.array', 'np.array', (['[[reward]]'], {}), '([[reward]])\n', (5722, 5734), True, 'import numpy as np\n'), ((6080, 6116), 'numpy.array', 'np.array', (['[[done]]'], {'dtype': 'np.float32'}), '([[done]], dtype=np.float32)\n', (6088, 6116), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import tensorflow as tf
try:
from tensorflow.python.framework import ops
except:
print("No module named 'tensorflow.python.framework'"+tf.__version__)
ops.reset_default_graph()
# In[2]:
print(tf.__version__)
# In[3]:
tf.compat.v1.disable_eager_execution()
# In[4]:
sess = tf.compat.v1.Session()
# In[5]:
my_tensor = tf.zeros([1,20])
# In[6]:
sess.run(my_tensor)
# In[7]:
row_dim, col_dim = 3, 5
zero_tsr = tf.zeros([row_dim, col_dim])
sess.run(zero_tsr)
# In[8]:
row_dim, col_dim = 6, 7
ones_tsr = tf.ones([row_dim, col_dim])
sess.run(ones_tsr)
# In[9]:
row_dim, col_dim = 6, 7
filled_tsr = tf.fill([row_dim, col_dim],42)
sess.run(filled_tsr)
# In[10]:
constant1_tsr = tf.constant([1,2,3])
print(sess.run(constant1_tsr))
constant2_tsr = tf.constant([[1,2,3],[4,5,6],[7,8,9]])
print(sess.run(constant2_tsr))
# In[11]:
zeros_similar=tf.zeros_like(constant1_tsr)
sess.run(zeros_similar)
# In[12]:
ones_similar = tf.ones_like(constant2_tsr)
sess.run(ones_similar)
# In[13]:
linear_tsr = tf.linspace(start=0.0,stop=100,num=11)
sess.run(linear_tsr)
# In[14]:
integer_seq_tsr = tf.range(start=6,limit=15,delta=3)
sess.run(integer_seq_tsr)
# In[15]:
row_dim, col_dim = 8, 8
randuif_tsr = tf.compat.v1.random_uniform([row_dim, col_dim], minval=0, maxval=1)
sess.run(randuif_tsr)
# In[16]:
row_dim, col_dim = 8, 8
randnorm_tsr = tf.compat.v1.random_normal([row_dim,col_dim], mean=0.0, stddev=1.0)
sess.run(randnorm_tsr)
# In[17]:
row_dim, col_dim = 8, 8
runcnomr_tsr = tf.compat.v1.truncated_normal([row_dim,col_dim],mean=0.0, stddev=1.0)
sess.run(runcnomr_tsr)
# In[18]:
shuffled_output = tf.compat.v1.random_shuffle(runcnomr_tsr)
sess.run(shuffled_output)
# In[19]:
cropped_output = tf.compat.v1.random_crop(runcnomr_tsr,[4,4])
sess.run(cropped_output)
# In[20]:
sess.run(runcnomr_tsr)
# In[21]:
import matplotlib.pyplot as plt
import tensorflow as tf
get_ipython().run_line_magic('matplotlib', 'inline')
image_raw_data_jpg=tf.compat.v1.gfile.GFile("Huawei logo.jpg","rb").read()
with sess as session:
img_data=tf.image.decode_jpeg(image_raw_data_jpg)
plt.figure(1)
print(session.run(img_data))
plt.imshow(img_data.eval())
# In[22]:
cropped_image = tf.compat.v1.random_crop(img_data, [3, 1, 3])
# In[23]:
sess = tf.compat.v1.Session()
sess.run(cropped_image)
# *Variable*
# In[24]:
my_var = tf.Variable(tf.zeros([1,20]))
sess.run(my_var.initializer)
sess.run(my_var)
# In[25]:
my_var1 = tf.Variable(tf.zeros([2,3]))
sess = tf.compat.v1.Session()
initialize_op = tf.compat.v1.global_variables_initializer()
sess.run(initialize_op)
# In[26]:
import numpy as np
sess = tf.compat.v1.Session()
x = tf.compat.v1.placeholder(tf.float32,shape=[2,2])
y = tf.compat.v1.identity(x)
x_vals = np.random.rand(2,2)
sess.run(y, feed_dict={x: x_vals})
# In[27]:
sess.run(x,feed_dict={x: x_vals})
# In[28]:
sess = tf.compat.v1.Session()
first_var = tf.Variable(tf.zeros([2,3]))
sess.run(first_var.initializer)
# 取决于第一个变量
second_var = tf.Variable(tf.zeros_like(first_var))
sess.run(second_var.initializer)
# In[29]:
row_dim, col_dim = 2, 3
zero_var = tf.Variable(tf.zeros([row_dim, col_dim]))
ones_var = tf.Variable(tf.ones([row_dim,col_dim]))
sess.run(zero_var.initializer)
sess.run(ones_var.initializer)
print(sess.run(zero_var))
print(sess.run(ones_var))
# In[30]:
zero_similar = tf.Variable(tf.zeros_like(zero_var))
ones_similar = tf.Variable(tf.ones_like(ones_var))
sess.run(ones_similar.initializer)
sess.run(zero_similar.initializer)
print(sess.run(ones_similar))
print(sess.run(zero_similar))
# In[31]:
fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1))
sess.run(fill_var.initializer)
print(sess.run(fill_var))
# In[32]:
# 通过常数列表来创建张量
const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9]))
# 通过常数数组来创建变量张量
const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim]))
sess.run(const_var.initializer)
sess.run(const_fill_var.initializer)
print(sess.run(const_var))
print(sess.run(const_fill_var))
# In[33]:
# TensorFlow的中linspace
linear_var = tf.Variable(tf.linspace(start=0.0, stop=1.0, num=3))
# Generates [0.0, 0.5, 1.0] includes the end
# TensorFlow的range
sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3))
# Generates [6, 9, 12] doesn't include the end
sess.run(linear_var.initializer)
sess.run(sequence_var.initializer)
print(sess.run(linear_var))
print(sess.run(sequence_var))
# In[34]:
rnorm_var = tf.compat.v1.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0)
runif_var = tf.compat.v1.random_uniform([row_dim, col_dim], minval=0, maxval=4)
print(sess.run(rnorm_var))
print(sess.run(runif_var))
# In[ ]:
# 重设计算图
ops.reset_default_graph()
# 开始一个graph session
sess = tf.compat.v1.Session()
# 创建变量张量
my_var = tf.Variable(tf.zeros([1,20]))
# 将summary加到Tensorboard上
merged = tf.compat.v1.summary.merge_all()
# 初始化图形写入
writer = tf.compat.v1.summary.FileWriter("/tmp/variable_logs", graph=sess.graph)
# 全局变量初始器
initialize_op = tf.compat.v1.global_variables_initializer()
# 变量初始化
sess.run(initialize_op)
# In[ ]:
get_ipython().system('tensorboard --logdir=/tmpc')
# In[ ]:
| [
"tensorflow.compat.v1.identity",
"tensorflow.zeros_like",
"tensorflow.compat.v1.disable_eager_execution",
"matplotlib.pyplot.figure",
"tensorflow.compat.v1.truncated_normal",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.compat.v1.random_shuffle",
"tensorflow.compat.v1.global_vari... | [((209, 234), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (232, 234), False, 'from tensorflow.python.framework import ops\n'), ((283, 321), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (319, 321), True, 'import tensorflow as tf\n'), ((342, 364), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (362, 364), True, 'import tensorflow as tf\n'), ((390, 407), 'tensorflow.zeros', 'tf.zeros', (['[1, 20]'], {}), '([1, 20])\n', (398, 407), True, 'import tensorflow as tf\n'), ((488, 516), 'tensorflow.zeros', 'tf.zeros', (['[row_dim, col_dim]'], {}), '([row_dim, col_dim])\n', (496, 516), True, 'import tensorflow as tf\n'), ((584, 611), 'tensorflow.ones', 'tf.ones', (['[row_dim, col_dim]'], {}), '([row_dim, col_dim])\n', (591, 611), True, 'import tensorflow as tf\n'), ((681, 712), 'tensorflow.fill', 'tf.fill', (['[row_dim, col_dim]', '(42)'], {}), '([row_dim, col_dim], 42)\n', (688, 712), True, 'import tensorflow as tf\n'), ((763, 785), 'tensorflow.constant', 'tf.constant', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (774, 785), True, 'import tensorflow as tf\n'), ((831, 877), 'tensorflow.constant', 'tf.constant', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (842, 877), True, 'import tensorflow as tf\n'), ((929, 957), 'tensorflow.zeros_like', 'tf.zeros_like', (['constant1_tsr'], {}), '(constant1_tsr)\n', (942, 957), True, 'import tensorflow as tf\n'), ((1011, 1038), 'tensorflow.ones_like', 'tf.ones_like', (['constant2_tsr'], {}), '(constant2_tsr)\n', (1023, 1038), True, 'import tensorflow as tf\n'), ((1089, 1129), 'tensorflow.linspace', 'tf.linspace', ([], {'start': '(0.0)', 'stop': '(100)', 'num': '(11)'}), '(start=0.0, stop=100, num=11)\n', (1100, 1129), True, 'import tensorflow as tf\n'), ((1181, 1217), 'tensorflow.range', 'tf.range', ([], {'start': '(6)', 'limit': '(15)', 'delta': '(3)'}), '(start=6, limit=15, delta=3)\n', (1189, 1217), True, 'import tensorflow as tf\n'), ((1294, 1361), 'tensorflow.compat.v1.random_uniform', 'tf.compat.v1.random_uniform', (['[row_dim, col_dim]'], {'minval': '(0)', 'maxval': '(1)'}), '([row_dim, col_dim], minval=0, maxval=1)\n', (1321, 1361), True, 'import tensorflow as tf\n'), ((1437, 1505), 'tensorflow.compat.v1.random_normal', 'tf.compat.v1.random_normal', (['[row_dim, col_dim]'], {'mean': '(0.0)', 'stddev': '(1.0)'}), '([row_dim, col_dim], mean=0.0, stddev=1.0)\n', (1463, 1505), True, 'import tensorflow as tf\n'), ((1581, 1652), 'tensorflow.compat.v1.truncated_normal', 'tf.compat.v1.truncated_normal', (['[row_dim, col_dim]'], {'mean': '(0.0)', 'stddev': '(1.0)'}), '([row_dim, col_dim], mean=0.0, stddev=1.0)\n', (1610, 1652), True, 'import tensorflow as tf\n'), ((1706, 1747), 'tensorflow.compat.v1.random_shuffle', 'tf.compat.v1.random_shuffle', (['runcnomr_tsr'], {}), '(runcnomr_tsr)\n', (1733, 1747), True, 'import tensorflow as tf\n'), ((1805, 1851), 'tensorflow.compat.v1.random_crop', 'tf.compat.v1.random_crop', (['runcnomr_tsr', '[4, 4]'], {}), '(runcnomr_tsr, [4, 4])\n', (1829, 1851), True, 'import tensorflow as tf\n'), ((2299, 2344), 'tensorflow.compat.v1.random_crop', 'tf.compat.v1.random_crop', (['img_data', '[3, 1, 3]'], {}), '(img_data, [3, 1, 3])\n', (2323, 2344), True, 'import tensorflow as tf\n'), ((2366, 2388), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2386, 2388), True, 'import tensorflow as tf\n'), ((2586, 2608), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2606, 2608), True, 'import tensorflow as tf\n'), ((2625, 2668), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2666, 2668), True, 'import tensorflow as tf\n'), ((2733, 2755), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2753, 2755), True, 'import tensorflow as tf\n'), ((2760, 2810), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '[2, 2]'}), '(tf.float32, shape=[2, 2])\n', (2784, 2810), True, 'import tensorflow as tf\n'), ((2813, 2837), 'tensorflow.compat.v1.identity', 'tf.compat.v1.identity', (['x'], {}), '(x)\n', (2834, 2837), True, 'import tensorflow as tf\n'), ((2847, 2867), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (2861, 2867), True, 'import numpy as np\n'), ((2971, 2993), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2991, 2993), True, 'import tensorflow as tf\n'), ((4538, 4606), 'tensorflow.compat.v1.random_normal', 'tf.compat.v1.random_normal', (['[row_dim, col_dim]'], {'mean': '(0.0)', 'stddev': '(1.0)'}), '([row_dim, col_dim], mean=0.0, stddev=1.0)\n', (4564, 4606), True, 'import tensorflow as tf\n'), ((4619, 4686), 'tensorflow.compat.v1.random_uniform', 'tf.compat.v1.random_uniform', (['[row_dim, col_dim]'], {'minval': '(0)', 'maxval': '(4)'}), '([row_dim, col_dim], minval=0, maxval=4)\n', (4646, 4686), True, 'import tensorflow as tf\n'), ((4762, 4787), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (4785, 4787), False, 'from tensorflow.python.framework import ops\n'), ((4816, 4838), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (4836, 4838), True, 'import tensorflow as tf\n'), ((4923, 4955), 'tensorflow.compat.v1.summary.merge_all', 'tf.compat.v1.summary.merge_all', ([], {}), '()\n', (4953, 4955), True, 'import tensorflow as tf\n'), ((4976, 5047), 'tensorflow.compat.v1.summary.FileWriter', 'tf.compat.v1.summary.FileWriter', (['"""/tmp/variable_logs"""'], {'graph': 'sess.graph'}), "('/tmp/variable_logs', graph=sess.graph)\n", (5007, 5047), True, 'import tensorflow as tf\n'), ((5075, 5118), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (5116, 5118), True, 'import tensorflow as tf\n'), ((2145, 2185), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_raw_data_jpg'], {}), '(image_raw_data_jpg)\n', (2165, 2185), True, 'import tensorflow as tf\n'), ((2190, 2203), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2200, 2203), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2479), 'tensorflow.zeros', 'tf.zeros', (['[1, 20]'], {}), '([1, 20])\n', (2470, 2479), True, 'import tensorflow as tf\n'), ((2562, 2578), 'tensorflow.zeros', 'tf.zeros', (['[2, 3]'], {}), '([2, 3])\n', (2570, 2578), True, 'import tensorflow as tf\n'), ((3018, 3034), 'tensorflow.zeros', 'tf.zeros', (['[2, 3]'], {}), '([2, 3])\n', (3026, 3034), True, 'import tensorflow as tf\n'), ((3103, 3127), 'tensorflow.zeros_like', 'tf.zeros_like', (['first_var'], {}), '(first_var)\n', (3116, 3127), True, 'import tensorflow as tf\n'), ((3223, 3251), 'tensorflow.zeros', 'tf.zeros', (['[row_dim, col_dim]'], {}), '([row_dim, col_dim])\n', (3231, 3251), True, 'import tensorflow as tf\n'), ((3276, 3303), 'tensorflow.ones', 'tf.ones', (['[row_dim, col_dim]'], {}), '([row_dim, col_dim])\n', (3283, 3303), True, 'import tensorflow as tf\n'), ((3459, 3482), 'tensorflow.zeros_like', 'tf.zeros_like', (['zero_var'], {}), '(zero_var)\n', (3472, 3482), True, 'import tensorflow as tf\n'), ((3511, 3533), 'tensorflow.ones_like', 'tf.ones_like', (['ones_var'], {}), '(ones_var)\n', (3523, 3533), True, 'import tensorflow as tf\n'), ((3702, 3733), 'tensorflow.fill', 'tf.fill', (['[row_dim, col_dim]', '(-1)'], {}), '([row_dim, col_dim], -1)\n', (3709, 3733), True, 'import tensorflow as tf\n'), ((3844, 3878), 'tensorflow.constant', 'tf.constant', (['[8, 6, 7, 5, 3, 0, 9]'], {}), '([8, 6, 7, 5, 3, 0, 9])\n', (3855, 3878), True, 'import tensorflow as tf\n'), ((3925, 3966), 'tensorflow.constant', 'tf.constant', (['(-1)'], {'shape': '[row_dim, col_dim]'}), '(-1, shape=[row_dim, col_dim])\n', (3936, 3966), True, 'import tensorflow as tf\n'), ((4164, 4203), 'tensorflow.linspace', 'tf.linspace', ([], {'start': '(0.0)', 'stop': '(1.0)', 'num': '(3)'}), '(start=0.0, stop=1.0, num=3)\n', (4175, 4203), True, 'import tensorflow as tf\n'), ((4298, 4334), 'tensorflow.range', 'tf.range', ([], {'start': '(6)', 'limit': '(15)', 'delta': '(3)'}), '(start=6, limit=15, delta=3)\n', (4306, 4334), True, 'import tensorflow as tf\n'), ((4870, 4887), 'tensorflow.zeros', 'tf.zeros', (['[1, 20]'], {}), '([1, 20])\n', (4878, 4887), True, 'import tensorflow as tf\n'), ((2054, 2103), 'tensorflow.compat.v1.gfile.GFile', 'tf.compat.v1.gfile.GFile', (['"""Huawei logo.jpg"""', '"""rb"""'], {}), "('Huawei logo.jpg', 'rb')\n", (2078, 2103), True, 'import tensorflow as tf\n')] |
import numpy as np
import pytest
import torch
from ase import Atoms
from schnetpack import environment as env
__all__ = [
"single_atom",
"two_atoms",
"single_site_crystal",
"two_site_crystal",
"crystal",
"h2o",
"o2",
"simple_env",
"ase_env",
"torch_env",
]
@pytest.fixture
def single_atom():
return Atoms([6], positions=[[0.0, 0.0, 0.0]])
@pytest.fixture
def two_atoms():
return Atoms([6, 6], positions=[[0.0, 0.0, 0.0], [0.1, 0.0, 0.0]])
@pytest.fixture
def single_site_crystal():
return Atoms([6], positions=[[0.0, 0.0, 0.0]], cell=np.eye(3), pbc=True)
@pytest.fixture
def two_site_crystal():
return Atoms(
[6, 6], positions=[[0.0, 0.0, 0.0], [0.1, 0.0, 0.0]], cell=np.eye(3), pbc=True
)
@pytest.fixture(params=[0, 1])
def crystal(request, single_site_crystal, two_site_crystal):
crystals = [single_site_crystal, two_site_crystal]
yield crystals[request.param]
@pytest.fixture
def h2o():
return Atoms(positions=np.random.rand(3, 3), numbers=[1, 1, 8])
@pytest.fixture
def o2():
return Atoms(positions=np.random.rand(2, 3), numbers=[8, 8])
# environment providers
@pytest.fixture
def simple_env():
return env.SimpleEnvironmentProvider()
@pytest.fixture
def ase_env():
return env.AseEnvironmentProvider(10.0)
@pytest.fixture
def torch_env():
# Select torch.device('cuda') to test on GPU
return env.TorchEnvironmentProvider(10.0, device=torch.device("cpu"))
| [
"schnetpack.environment.SimpleEnvironmentProvider",
"pytest.fixture",
"torch.device",
"schnetpack.environment.AseEnvironmentProvider",
"numpy.random.rand",
"numpy.eye",
"ase.Atoms"
] | [((771, 800), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[0, 1]'}), '(params=[0, 1])\n', (785, 800), False, 'import pytest\n'), ((347, 386), 'ase.Atoms', 'Atoms', (['[6]'], {'positions': '[[0.0, 0.0, 0.0]]'}), '([6], positions=[[0.0, 0.0, 0.0]])\n', (352, 386), False, 'from ase import Atoms\n'), ((433, 492), 'ase.Atoms', 'Atoms', (['[6, 6]'], {'positions': '[[0.0, 0.0, 0.0], [0.1, 0.0, 0.0]]'}), '([6, 6], positions=[[0.0, 0.0, 0.0], [0.1, 0.0, 0.0]])\n', (438, 492), False, 'from ase import Atoms\n'), ((1212, 1243), 'schnetpack.environment.SimpleEnvironmentProvider', 'env.SimpleEnvironmentProvider', ([], {}), '()\n', (1241, 1243), True, 'from schnetpack import environment as env\n'), ((1288, 1320), 'schnetpack.environment.AseEnvironmentProvider', 'env.AseEnvironmentProvider', (['(10.0)'], {}), '(10.0)\n', (1314, 1320), True, 'from schnetpack import environment as env\n'), ((594, 603), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (600, 603), True, 'import numpy as np\n'), ((742, 751), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (748, 751), True, 'import numpy as np\n'), ((1007, 1027), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (1021, 1027), True, 'import numpy as np\n'), ((1103, 1123), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (1117, 1123), True, 'import numpy as np\n'), ((1458, 1477), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1470, 1477), False, 'import torch\n')] |
"""Performs vector and matrix operations.
2020, <NAME> <<EMAIL>>
"""
# external imports
import numpy as np
def shift_vector(*, vec: np.ndarray, shift: int) -> np.ndarray:
"""Shifts positions of a 1d array.
If `shift` is negative, values move down.
Replaces null values with 0.
Output vector has same length as input vector.
Args:
vec: Input vector to shift, 1d array
shift: Shift positions up (positive) or down (negative).
Returns:
Shifted vector.
Note:
With shift=k, w(i)=v(i+k).
"""
if shift < 0:
return np.append(np.zeros_like(vec[shift:]), vec[:shift])
elif shift > 0:
return np.append(vec[shift:], np.zeros_like(vec[:shift]))
else:
return vec
def shift_matrix_rows(*, mat: np.ndarray, shift: int) -> np.ndarray:
"""Shifts rows of a 2d array.
If `shift` is negative, rows move down.
Replaces null values with 0.
Output array has same shape as input array.
Args:
mat: Input matrix to shift, 2d array
shift: Shift rows up (positive) or down (negative).
Returns:
Shifted matrix.
Note:
With shift=k, B(i,j)=A(i+k,j).
"""
if shift < 0:
aux = np.roll(mat, -shift, axis=0)
aux[:-shift, :] = 0.0
return aux
elif shift > 0:
aux = np.roll(mat, -shift, axis=0)
aux[-shift:, :] = 0.0
return aux
else:
return mat
def reverse_cumsum(vec: np.ndarray, shift: int = 0) -> np.ndarray:
"""Anti-cumulative sum of (shifted) vector.
Sums from position forward.
Shifts positions of summed vector.
Replaces null values with 0.
Output vector has same length as input vector.
Args:
vec: Input vector to sum, 1d array.
shift: Shift positions up (positive) or down (negative).
Returns:
Anti-cumulative sum of input vector, 1d array.
Note:
With shift=k, w(i)=sum_{j=i+k)^{end}v(j).
"""
aux = np.flip(np.cumsum(np.flip(vec)))
return shift_vector(vec=aux, shift=shift)
def triangular_dot(*, mat: np.ndarray, vec: np.ndarray, shift: int = 0) -> np.ndarray:
"""(Shifted) Matrix product of upper-triangular terms.
Computes matrix product reduced to upper-triangular terms.
Shifts positions of summed vector.
Replaces null values with 0.
Output array has same shape as input array.
Args:
mat: Arbitrary matrix, 2d array.
vec: Arbitrary vector, 1d array.
shift: Shift rows up (positive) or down (negative).
Returns:
Shifted
Note:
With shift=k, w(i)=sum_{j=i+k}^{end}A(i+k,j)*v(j).
"""
aux_mat = mat * vec
return triangular_sum_rows(mat=aux_mat, shift=shift)
def triangular_sum_rows(*, mat: np.ndarray, shift: int = 0) -> np.ndarray:
"""Sum matrix rows of upper-triangular terms.
Sums matrix by rows, reduced to upper-triangular terms.
Shifts positions of summed vector.
Replaces null values with 0.
Args:
mat: Arbitrary matrix, 2d array.
shift: Shift positions up (positive) or down (negative).
Returns:
Summed vector, 1d array.
Note:
With shift=k, w(i)=sum_{j=i+k}^{end}A(i+k,j).
"""
aux_mat = np.triu(mat)
aux_vec = np.sum(aux_mat, axis=1)
return shift_vector(vec=aux_vec, shift=shift)
def triangular_sum_columns(*, mat: np.ndarray, row_shift: int = 0) -> np.ndarray:
"""Sum matrix columns of triangular terms.
Sums matrix by columns, reduced to triangular terms.
Optionally, exclude above-diagonal or include below-diagonal terms.
Args:
mat: Arbitrary matrix, 2d array.
row_shift: Include below-diagonal terms (positive) or exclude
above-diagonal terms (negative).
Returns:
Summed vector, 1d array.
Note:
With row_shift=k, w(i)=sum_{j=1}^{i+k}M(j,i).
"""
aux = np.triu(mat, k=-row_shift)
return np.sum(aux, axis=0)
def triangular_sum_chunks(
*, mat: np.ndarray, row_shift: int = 0, col_shift: int = 0
) -> np.ndarray:
"""Sum matrix as upper-triangular chunks.
Sum submatrix defined by diagonal element and upper-right corner.
Optionally, exclude above-diagonal or include below-diagonal terms.
Replaces null values with 0.
Output array has same shape as input array.
Args:
mat: Arbitrary matrix, 2d array.
row_shift: Include below-diagonal terms (positive) or exclude
above-diagonal terms (negative).
col_shift: Include left-diagonal terms (positive) or exclude
right-diagonal terms (negative).
Returns:
Summed matrix, 2d array.
Note:
With With row_shift=k_row and col_shift=k_col,
w(i)=sum_{m=1}^{i+k_row}sum_{j=i+k_col}^{end}A(m,j)
"""
aux_mat = np.apply_along_axis(reverse_cumsum, 1, mat)
aux_shift = col_shift - row_shift
aux_mat = np.triu(aux_mat, k=aux_shift)
aux_vec = np.sum(aux_mat, axis=0)
return shift_vector(vec=aux_vec, shift=col_shift)
| [
"numpy.zeros_like",
"numpy.triu",
"numpy.sum",
"numpy.flip",
"numpy.roll",
"numpy.apply_along_axis"
] | [((3256, 3268), 'numpy.triu', 'np.triu', (['mat'], {}), '(mat)\n', (3263, 3268), True, 'import numpy as np\n'), ((3283, 3306), 'numpy.sum', 'np.sum', (['aux_mat'], {'axis': '(1)'}), '(aux_mat, axis=1)\n', (3289, 3306), True, 'import numpy as np\n'), ((3915, 3941), 'numpy.triu', 'np.triu', (['mat'], {'k': '(-row_shift)'}), '(mat, k=-row_shift)\n', (3922, 3941), True, 'import numpy as np\n'), ((3953, 3972), 'numpy.sum', 'np.sum', (['aux'], {'axis': '(0)'}), '(aux, axis=0)\n', (3959, 3972), True, 'import numpy as np\n'), ((4832, 4875), 'numpy.apply_along_axis', 'np.apply_along_axis', (['reverse_cumsum', '(1)', 'mat'], {}), '(reverse_cumsum, 1, mat)\n', (4851, 4875), True, 'import numpy as np\n'), ((4928, 4957), 'numpy.triu', 'np.triu', (['aux_mat'], {'k': 'aux_shift'}), '(aux_mat, k=aux_shift)\n', (4935, 4957), True, 'import numpy as np\n'), ((4972, 4995), 'numpy.sum', 'np.sum', (['aux_mat'], {'axis': '(0)'}), '(aux_mat, axis=0)\n', (4978, 4995), True, 'import numpy as np\n'), ((1232, 1260), 'numpy.roll', 'np.roll', (['mat', '(-shift)'], {'axis': '(0)'}), '(mat, -shift, axis=0)\n', (1239, 1260), True, 'import numpy as np\n'), ((601, 627), 'numpy.zeros_like', 'np.zeros_like', (['vec[shift:]'], {}), '(vec[shift:])\n', (614, 627), True, 'import numpy as np\n'), ((1344, 1372), 'numpy.roll', 'np.roll', (['mat', '(-shift)'], {'axis': '(0)'}), '(mat, -shift, axis=0)\n', (1351, 1372), True, 'import numpy as np\n'), ((2010, 2022), 'numpy.flip', 'np.flip', (['vec'], {}), '(vec)\n', (2017, 2022), True, 'import numpy as np\n'), ((700, 726), 'numpy.zeros_like', 'np.zeros_like', (['vec[:shift]'], {}), '(vec[:shift])\n', (713, 726), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import tensorflow as tf
from itertools import cycle
DISTRIBUTED_LAYERS = False
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(
q, k, v, None)
print ('Attention weights are:')
print (temp_attn)
print ('Output is:')
print (temp_out)
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, wq_weights=None, wk_weights=None, wv_weights=None, w_dense=None):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
# Загрузка предобученных весов
# Default
# kernel_initializer='glorot_uniform',
# bias_initializer='zeros'
if wq_weights is not None:
wq_k = tf.initializers.Constant(wq_weights[0])
wq_b = tf.initializers.Constant(wq_weights[1])
else:
wq_k = 'glorot_uniform'
wq_b = 'zeros'
if wk_weights is not None:
wk_k = tf.initializers.Constant(wk_weights[0])
wk_b = tf.initializers.Constant(wk_weights[1])
else:
wk_k = 'glorot_uniform'
wk_b = 'zeros'
if wv_weights is not None:
wv_k = tf.initializers.Constant(wv_weights[0])
wv_b = tf.initializers.Constant(wv_weights[1])
else:
wv_k = 'glorot_uniform'
wv_b = 'zeros'
if w_dense is not None:
wd_k = tf.initializers.Constant(w_dense[0])
wd_b = tf.initializers.Constant(w_dense[1])
else:
wd_k = 'glorot_uniform'
wd_b = 'zeros'
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model, kernel_initializer=wq_k, bias_initializer=wq_b)
self.wk = tf.keras.layers.Dense(d_model, kernel_initializer=wk_k, bias_initializer=wk_b)
self.wv = tf.keras.layers.Dense(d_model, kernel_initializer=wv_k, bias_initializer=wv_b)
self.dense = tf.keras.layers.Dense(d_model, kernel_initializer=wd_k, bias_initializer=wd_b)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff, l1_weights=None, l2_weights=None, inner_activation_type="relu"):
if l1_weights is not None:
l1_k = tf.initializers.Constant(l1_weights[0])
l1_b = tf.initializers.Constant(l1_weights[1])
else:
l1_k = "glorot_uniform"
l1_b = "zeros"
if l2_weights is not None:
l2_k = tf.initializers.Constant(l2_weights[0])
l2_b = tf.initializers.Constant(l2_weights[1])
else:
l2_k = "glorot_uniform"
l2_b = "zeros"
if inner_activation_type == "relu":
activation_inner = "relu"
elif inner_activation_type == "gelu":
activation_inner = gelu
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation=activation_inner, kernel_initializer=l1_k, bias_initializer=l1_b),
# (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model, kernel_initializer=l2_k, bias_initializer=l2_b) # (batch_size, seq_len, d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1, BERT_layer_ind=None, BERT_weights_dir=None):
super(EncoderLayer, self).__init__()
if BERT_layer_ind is not None:
wq_weights, wk_weights, wv_weights, w_dense = load_BERT_qkv_weights(BERT_layer_ind, BERT_weights_dir)
l1_weights, l2_weights = load_BERT_feed_worward_weights(BERT_layer_ind, BERT_weights_dir)
norm_weights = load_BERT_normalization_weigths(BERT_layer_ind, BERT_weights_dir)
inner_activation_type = "gelu"
else:
wq_weights = None
wk_weights = None
wv_weights = None
w_dense = None
l1_weights = None
l2_weights = None
norm_weights = None
inner_activation_type = "relu"
self.mha = MultiHeadAttention(d_model, num_heads,
wq_weights=wq_weights, wk_weights=wk_weights, wv_weights=wv_weights,
w_dense=w_dense)
self.ffn = point_wise_feed_forward_network(d_model, dff,
l1_weights=l1_weights, l2_weights=l2_weights,
inner_activation_type=inner_activation_type)
if norm_weights is not None:
w1 = norm_weights[0]
wn_1_b = tf.initializers.Constant(w1[0])
wn_1_g = tf.initializers.Constant(w1[1])
w2 = norm_weights[1]
wn_2_b = tf.initializers.Constant(w2[0])
wn_2_g = tf.initializers.Constant(w2[1])
else:
wn_1_b = "zeros"
wn_1_g = "ones"
wn_2_b = "zeros"
wn_2_g = "ones"
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6, beta_initializer=wn_1_b,
gamma_initializer=wn_1_g)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6, beta_initializer=wn_2_b,
gamma_initializer=wn_2_g)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Encoder(tf.keras.layers.Layer):
def __init__(self, encoderConfig, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1, BERT_weights_dir=None, trainable=True, vocab_inds=None):
super(Encoder, self).__init__(trainable=trainable)
self.BERT_weights_dir = BERT_weights_dir
self.d_model = d_model
self.num_layers = num_layers
if self.BERT_weights_dir is not None:
if vocab_inds is None:
we_tokens = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_word_embeddings:0.npy".format(self.BERT_weights_dir)))
else:
we_tokens = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_word_embeddings:0.npy".format(self.BERT_weights_dir))[vocab_inds, :])
we_tokens_type = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_token_type_embeddings:0.npy".format(self.BERT_weights_dir)))
we_pos = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_position_embeddings:0.npy".format(self.BERT_weights_dir)))
we_norm = [tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_LayerNorm_beta:0.npy".format(self.BERT_weights_dir))),
tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_LayerNorm_gamma:0.npy".format(self.BERT_weights_dir)))]
self.tokens_embedding = tf.keras.layers.Embedding(input_vocab_size, d_model,
embeddings_initializer=we_tokens, trainable=trainable)
self.tokens_type_embedding = tf.keras.layers.Embedding(2, d_model, embeddings_initializer=we_tokens_type,
trainable=trainable)
self.pos_embedding = tf.keras.layers.Embedding(512, d_model, embeddings_initializer=we_pos,
trainable=trainable)
self.tokens_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6, beta_initializer=we_norm[0],
gamma_initializer=we_norm[1], trainable=trainable)
self.enc_layers = []
for layer_ind in range(0, num_layers):
enc_layer = EncoderLayer(d_model, num_heads, dff, rate,
BERT_layer_ind=layer_ind, BERT_weights_dir=self.BERT_weights_dir)
enc_layer.trainable = trainable
self.enc_layers.append(enc_layer)
else:
if encoderConfig.get("pretrained_emb_path", None) is not None:
self.embedding = tf.keras.layers.Embedding(input_vocab_size, encoderConfig["pretrained_emb_size"], embeddings_initializer=tf.initializers.Constant(np.load(encoderConfig["pretrained_emb_path"])))
else:
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = Data.positional_encoding(maximum_position_encoding, self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
if encoderConfig.get("dense_emb", False):
self.dense_emb = tf.keras.layers.Dense(d_model)
else:
self.dense_emb = None
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask, tokens_type_inds=None):
seq_len = tf.shape(x)[1]
batch_size = tf.shape(x)[0]
if self.BERT_weights_dir is not None:
x = self.tokens_embedding(x)
if tokens_type_inds is None:
tokens_type_inds = tf.zeros(shape=(batch_size, seq_len), dtype=tf.int32)
tokens_type = self.tokens_type_embedding(tokens_type_inds)
x += tokens_type
seq_inds = tf.range(0, seq_len, dtype=tf.int32)
seq_inds = tf.broadcast_to(seq_inds, (batch_size, seq_len))
pos = self.pos_embedding(seq_inds)
x += pos
x = self.tokens_norm(x)
else:
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
if self.dense_emb is not None:
x = self.dense_emb(x)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
gpus = cycle(tf.config.experimental.list_physical_devices('GPU'))
for i in range(self.num_layers):
if DISTRIBUTED_LAYERS:
gpuName = next(gpus).name.replace("/physical_device:", "")
with tf.device(gpuName):
x = self.enc_layers[i](x, training, mask)
else:
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
def save_weight_to_dir(self, dir):
tokens_embedding_path = "{0}/module_bert_embeddings_word_embeddings:0.npy".format(dir)
np.save(tokens_embedding_path, self.tokens_embedding.get_weights())
tokens_type_embedding_path = "{0}/module_bert_embeddings_token_type_embeddings:0.npy".format(dir)
np.save(tokens_type_embedding_path, self.tokens_type_embedding.get_weights())
pos_embedding_name = "{0}/module_bert_embeddings_position_embeddings:0.npy".format(dir)
np.save(pos_embedding_name, self.pos_embedding.get_weights())
tokens_norm_beta_path = "{0}/module_bert_embeddings_LayerNorm_beta:0.npy".format(dir)
tokens_norm_gamma_path = "{0}/module_bert_embeddings_LayerNorm_gamma:0.npy".format(dir)
np.save(tokens_norm_beta_path, self.tokens_norm.beta.numpy())
np.save(tokens_norm_gamma_path, self.tokens_norm.gamma.numpy())
for layer_ind, layer in enumerate(self.enc_layers):
np.save("{0}/module_bert_encoder_layer_{1}_attention_self_query_kernel:0.npy".format(dir, layer_ind),
layer.mha.wq.kernel.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_self_query_bias:0.npy".format(dir, layer_ind),
layer.mha.wq.bias.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_self_key_kernel:0.npy".format(dir, layer_ind),
layer.mha.wk.kernel.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_self_key_bias:0.npy".format(dir, layer_ind),
layer.mha.wk.bias.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_self_value_kernel:0.npy".format(dir, layer_ind),
layer.mha.wv.kernel.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_self_value_bias:0.npy".format(dir, layer_ind),
layer.mha.wv.bias.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_output_dense_kernel:0.npy".format(dir, layer_ind),
layer.mha.dense.kernel.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_output_dense_bias:0.npy".format(dir, layer_ind),
layer.mha.dense.bias.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_intermediate_dense_kernel:0.npy".format(dir, layer_ind),
layer.ffn.layers[0].kernel.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_intermediate_dense_bias:0.npy".format(dir, layer_ind),
layer.ffn.layers[0].bias.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_output_dense_kernel:0.npy".format(dir, layer_ind),
layer.ffn.layers[1].kernel.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_output_dense_bias:0.npy".format(dir, layer_ind),
layer.ffn.layers[1].bias.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_output_LayerNorm_beta:0.npy".format(dir, layer_ind),
layer.layernorm1.beta.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_attention_output_LayerNorm_gamma:0.npy".format(dir, layer_ind),
layer.layernorm1.gamma.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_output_LayerNorm_beta:0.npy".format(dir, layer_ind),
layer.layernorm2.beta.numpy())
np.save("{0}/module_bert_encoder_layer_{1}_output_LayerNorm_gamma:0.npy".format(dir, layer_ind),
layer.layernorm2.gamma.numpy())
class EncoderLayerAW(EncoderLayer):
def call(self, x, training, mask):
attn_output, attn_weights = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2, attn_weights
class EncoderAW(Encoder):
def __init__(self, encoderConfig, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1, BERT_weights_dir=None, trainable=True, vocab_inds=None):
super(Encoder, self).__init__(trainable=trainable)
self.BERT_weights_dir = BERT_weights_dir
self.d_model = d_model
self.num_layers = num_layers
if self.BERT_weights_dir is not None:
if vocab_inds is None:
we_tokens = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_word_embeddings:0.npy".format(self.BERT_weights_dir)))
else:
we_tokens = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_word_embeddings:0.npy".format(self.BERT_weights_dir))[vocab_inds, :])
we_tokens_type = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_token_type_embeddings:0.npy".format(self.BERT_weights_dir)))
we_pos = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_position_embeddings:0.npy".format(self.BERT_weights_dir)))
we_norm = [tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_LayerNorm_beta:0.npy".format(self.BERT_weights_dir))),
tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_LayerNorm_gamma:0.npy".format(self.BERT_weights_dir)))]
self.tokens_embedding = tf.keras.layers.Embedding(input_vocab_size, d_model,
embeddings_initializer=we_tokens, trainable=trainable)
self.tokens_type_embedding = tf.keras.layers.Embedding(2, d_model, embeddings_initializer=we_tokens_type,
trainable=trainable)
self.pos_embedding = tf.keras.layers.Embedding(512, d_model, embeddings_initializer=we_pos,
trainable=trainable)
self.tokens_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6, beta_initializer=we_norm[0],
gamma_initializer=we_norm[1], trainable=trainable)
self.enc_layers = []
for layer_ind in range(0, num_layers):
enc_layer = EncoderLayerAW(d_model, num_heads, dff, rate,
BERT_layer_ind=layer_ind, BERT_weights_dir=self.BERT_weights_dir)
enc_layer.trainable = trainable
self.enc_layers.append(enc_layer)
else:
if encoderConfig.get("pretrained_emb_path", None) is not None:
self.embedding = tf.keras.layers.Embedding(input_vocab_size, encoderConfig["pretrained_emb_size"], embeddings_initializer=tf.initializers.Constant(np.load(encoderConfig["pretrained_emb_path"])))
else:
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = Data.positional_encoding(maximum_position_encoding, self.d_model)
self.enc_layers = [EncoderLayerAW(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
if encoderConfig.get("dense_emb", False):
self.dense_emb = tf.keras.layers.Dense(d_model)
else:
self.dense_emb = None
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask, tokens_type_inds=None):
seq_len = tf.shape(x)[1]
batch_size = tf.shape(x)[0]
if self.BERT_weights_dir is not None:
x = self.tokens_embedding(x)
if tokens_type_inds is None:
tokens_type_inds = tf.zeros(shape=(batch_size, seq_len), dtype=tf.int32)
tokens_type = self.tokens_type_embedding(tokens_type_inds)
x += tokens_type
seq_inds = tf.range(0, seq_len, dtype=tf.int32)
seq_inds = tf.broadcast_to(seq_inds, (batch_size, seq_len))
pos = self.pos_embedding(seq_inds)
x += pos
x = self.tokens_norm(x)
else:
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
if self.dense_emb is not None:
x = self.dense_emb(x)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
attention_weights = {}
gpus = cycle(tf.config.experimental.list_physical_devices('GPU'))
for i in range(self.num_layers):
if DISTRIBUTED_LAYERS:
gpuName = next(gpus).name.replace("/physical_device:", "")
with tf.device(gpuName):
x, x_attn_weights = self.enc_layers[i](x, training, mask)
else:
x, x_attn_weights = self.enc_layers[i](x, training, mask)
attention_weights["layer_{0}".format(i)] = x_attn_weights
return x, attention_weights # (batch_size, input_seq_len, d_model)
class EncoderBERTSumLayersOut(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1, BERT_weights_dir=None, trainable=False, vocab_inds=None):
"""
Класс для кодировщика Encoder и Transformer, он же модель BERT.
Пока может работать только с предобученными весами BERT.
Этот класс почти аналогичен классу Encoder, есть два отличия:
1. Выходом является взвешенное среднее активностей всех слоёв, коэфициенты для слоёв -- обучаемые переменные.
Выходом сети будет sum(x_i*W_i)/sum(W_i) по всем i, i от 0 до num_layers + 2 (эмбединги слов и положений),
x_i -- i-й слой сети, W_i -- коэфициент для i-того слоя,
W_i от 0 до 1 W_i = sigmoid(W_linear_i), где W_linear_i -- обучаемые веса модели.
2. Добавлен флаг trainable, если он True -- то идёт обучение весов BERT,
если False -- то веса BERT заморожены и обучаеются только веса для коэфициентов взвешенного среднего.
Parameters
----------
num_layers: int
Число слоёв, кроме embedding для слов и позиций.
Можно указать меньше чем в модели BERT, тогда загрузится только n первых слоёв.
d_model: int
Размерность выходного слоя в блоках
num_heads: int
Число голов в MultiHeadAttention
dff: int
Размерность скрытого слоя в блоках
input_vocab_size: int
Размер словаря
maximum_position_encoding: int
*Максимальная длина последовательности, размерность слоя позиционного кодирования.
*Сейчас не используется.
rate: float
Dropout rate от 0 до 1, 0 -- не применяется, 1 -- 100% нейронов отбрасывается.
BERT_weights_dir: str
Путь до весов BERT
trainable: bool
Флаг, являются ли веса BERT обучаемыми
vocab_inds: str
Путь до файла с соотвествием индексов новому маленькому словарю
"""
super(EncoderBERTSumLayersOut, self).__init__()
self.BERT_weights_dir = BERT_weights_dir
self.d_model = d_model
self.num_layers = num_layers
if vocab_inds is None:
we_tokens = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_word_embeddings:0.npy".format(self.BERT_weights_dir)))
else:
we_tokens = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_word_embeddings:0.npy".format(self.BERT_weights_dir))[vocab_inds,
:])
we_tokens_type = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_token_type_embeddings:0.npy".format(self.BERT_weights_dir)))
we_pos = tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_position_embeddings:0.npy".format(self.BERT_weights_dir)))
we_norm = [tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_LayerNorm_beta:0.npy".format(self.BERT_weights_dir))),
tf.initializers.Constant(
np.load("{0}/module_bert_embeddings_LayerNorm_gamma:0.npy".format(self.BERT_weights_dir)))]
self.tokens_embedding = tf.keras.layers.Embedding(input_vocab_size, d_model,
embeddings_initializer=we_tokens, trainable=trainable)
self.tokens_type_embedding = tf.keras.layers.Embedding(2, d_model, embeddings_initializer=we_tokens_type,
trainable=trainable)
self.pos_embedding = tf.keras.layers.Embedding(512, d_model, embeddings_initializer=we_pos,
trainable=trainable)
self.tokens_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6, beta_initializer=we_norm[0],
gamma_initializer=we_norm[1], trainable=trainable)
self.enc_layers = []
for layer_ind in range(0, num_layers):
enc_layer = EncoderLayer(d_model, num_heads, dff, rate,
BERT_layer_ind=layer_ind, BERT_weights_dir=self.BERT_weights_dir)
enc_layer.trainable = trainable
self.enc_layers.append(enc_layer)
self.dropout = tf.keras.layers.Dropout(rate)
def build(self, input_shape):
self.W_layers_sum_coef = self.add_weight("W_layers_sum_coef", shape=[self.num_layers + 2, ])
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
batch_size = tf.shape(x)[0]
l_x = [] # Список выходов всех слоёв
x = self.tokens_embedding(x)
l_x.append(x)
tokens_type_inds = tf.zeros(shape=(batch_size, seq_len), dtype=tf.int32)
tokens_type = self.tokens_type_embedding(tokens_type_inds)
x += tokens_type
seq_inds = tf.range(0, seq_len, dtype=tf.int32)
seq_inds = tf.broadcast_to(seq_inds, (batch_size, seq_len))
pos = self.pos_embedding(seq_inds)
x += pos
x = self.tokens_norm(x)
l_x.append(x)
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
l_x.append(x)
l_out = []
for w_ind, layer_out in enumerate(l_x):
l_out.append(layer_out * tf.sigmoid(self.W_layers_sum_coef[w_ind]))
x = tf.reduce_sum(l_out, axis=0)
x = x / tf.reduce_sum(tf.sigmoid(self.W_layers_sum_coef))
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = Data.positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
gpus = cycle(tf.config.experimental.list_physical_devices('GPU'))
for i in range(self.num_layers):
if DISTRIBUTED_LAYERS:
gpuName = next(gpus).name.replace("/physical_device:", "")
with tf.device(gpuName):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
else:
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class Transformer(tf.keras.Model):
def __init__(self, modelConfig, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(Transformer, self).__init__()
if modelConfig["Encoder"].get("SumEmbedding", False):
self.encoder = EncoderBERTSumLayersOut(modelConfig["Encoder"]["num_layers"], modelConfig["Encoder"]["d_model"], modelConfig["Encoder"]["num_heads"], modelConfig["Encoder"]["dff"], input_vocab_size, pe_input, rate=modelConfig["Encoder"]["dropout_rate"], BERT_weights_dir=modelConfig["Encoder"].get("BERT_weights", None), trainable=not modelConfig["Encoder"].get("freeze_weights", False))
else:
self.encoder = Encoder(modelConfig["Encoder"], modelConfig["Encoder"]["num_layers"], modelConfig["Encoder"]["d_model"], modelConfig["Encoder"]["num_heads"], modelConfig["Encoder"]["dff"],
input_vocab_size, pe_input, modelConfig["Encoder"]["dropout_rate"], BERT_weights_dir=modelConfig["Encoder"].get("BERT_weights", None))
self.decoder = Decoder(modelConfig["Decoder"]["num_layers"], modelConfig["Decoder"]["d_model"], modelConfig["Decoder"]["num_heads"], modelConfig["Decoder"]["dff"],
target_vocab_size, pe_target, modelConfig["Decoder"]["dropout_rate"])
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
def evaluate(inp_sentence, tokenizer, transformer, max_length, usingKeras=False):
if "BERT_tokenizer" in tokenizer.__dict__:
start_token = [tokenizer.BERT_tokenizer.vocab["[CLS]"]]
end_token = [tokenizer.BERT_tokenizer.vocab["[SEP]"]]
else:
if type(tokenizer).__name__=="SentencePieceProcessor":
start_token = [tokenizer.bos_id()]
end_token = [tokenizer.eos_id()]
else:
start_token = [tokenizer.vocab_size]
end_token = [tokenizer.vocab_size + 1]
if type(inp_sentence)==bytes:
if type(tokenizer).__name__=="SentencePieceProcessor":
inp_sentence = start_token + tokenizer.EncodeAsIds(inp_sentence) + end_token
else:
inp_sentence = start_token + tokenizer.encode(inp_sentence) + end_token
encoder_input = tf.expand_dims(inp_sentence, 0)
encoder_input = encoder_input[:,:max_length]
#encoder_input = tf.slice(encoder_input, [0, 0], [tf.shape(encoder_input)[0], max_length])
#encoder_input = tf.slice(encoder_input, [0, 0], [encoder_input.shape[0], max_length])
# as the target is english, the first word to the transformer should be the
# english start token.
decoder_input = start_token
output = tf.expand_dims(decoder_input, 0)
for i in range(max_length):
enc_padding_mask, combined_mask, dec_padding_mask = Data.create_masks(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
if usingKeras:
predictions, attention_weights = transformer((encoder_input,
output,
enc_padding_mask,
combined_mask,
dec_padding_mask),
False)
else:
predictions, attention_weights = transformer(encoder_input,
output,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
# select the last word from the seq_len dimension
predictions = predictions[: ,-1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if predicted_id == end_token[0]:
return tf.squeeze(output, axis=0), attention_weights
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0), attention_weights
class Transformer_keras(tf.keras.Model):
def __init__(self, modelConfig, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(Transformer_keras, self).__init__()
if modelConfig["Encoder"].get("SumEmbedding", False):
self.encoder = EncoderBERTSumLayersOut(modelConfig["Encoder"]["num_layers"], modelConfig["Encoder"]["d_model"], modelConfig["Encoder"]["num_heads"], modelConfig["Encoder"]["dff"],
input_vocab_size, pe_input, rate=modelConfig["Encoder"]["dropout_rate"], BERT_weights_dir=modelConfig["Encoder"].get("BERT_weights", None),
trainable=not modelConfig["Encoder"].get("freeze_weights", False))
else:
self.encoder = Encoder(modelConfig["Encoder"], modelConfig["Encoder"]["num_layers"], modelConfig["Encoder"]["d_model"], modelConfig["Encoder"]["num_heads"], modelConfig["Encoder"]["dff"],
input_vocab_size, pe_input, modelConfig["Encoder"]["dropout_rate"], BERT_weights_dir=modelConfig["Encoder"].get("BERT_weights", None),
trainable=not modelConfig["Encoder"].get("freeze_weights", False))
self.decoder = Decoder(modelConfig["Decoder"]["num_layers"], modelConfig["Decoder"]["d_model"], modelConfig["Decoder"]["num_heads"], modelConfig["Decoder"]["dff"],
target_vocab_size, pe_target, modelConfig["Decoder"]["dropout_rate"])
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inputs, training):
inp, tar, enc_padding_mask, look_ahead_mask, dec_padding_mask = inputs
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
#
return final_output
class Transformer_keras_predict(Transformer_keras):
def call(self, inputs, training):
inp, tar, enc_padding_mask, look_ahead_mask, dec_padding_mask = inputs
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
#
return final_output, attention_weights
def evaluate_batch(encoder_input, tokenizer, transformer, max_length, usingKeras=False):
if "BERT_tokenizer" in tokenizer.__dict__:
start_token = [tokenizer.BERT_tokenizer.vocab["[CLS]"]]
end_token = [tokenizer.BERT_tokenizer.vocab["[SEP]"]]
else:
if type(tokenizer).__name__=="SentencePieceProcessor":
start_token = [tokenizer.bos_id()]
end_token = [tokenizer.eos_id()]
else:
start_token = [tokenizer.vocab_size]
end_token = [tokenizer.vocab_size + 1]
decoder_input = np.tile(np.array(start_token, np.int32), encoder_input.shape[0])
output = tf.expand_dims(decoder_input, 1)
for i in range(max_length):
enc_padding_mask, combined_mask, dec_padding_mask = Data.create_masks(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
if usingKeras:
predictions, attention_weights = transformer((encoder_input,
output,
enc_padding_mask,
combined_mask,
dec_padding_mask),
False)
else:
predictions = transformer(encoder_input,
output,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
# select the last word from the seq_len dimension
predictions = predictions[: ,-1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if (predicted_id == end_token[0]).numpy().all():
return output, attention_weights
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return output, attention_weights
| [
"numpy.load",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.matmul",
"numpy.sqrt",
"tensorflow.nn.softmax",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.broadcast_to",
"te... | [((1279, 1312), 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), '(q, k, transpose_b=True)\n', (1288, 1312), True, 'import tensorflow as tf\n'), ((1714, 1761), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scaled_attention_logits'], {'axis': '(-1)'}), '(scaled_attention_logits, axis=-1)\n', (1727, 1761), True, 'import tensorflow as tf\n'), ((1807, 1838), 'tensorflow.matmul', 'tf.matmul', (['attention_weights', 'v'], {}), '(attention_weights, v)\n', (1816, 1838), True, 'import tensorflow as tf\n'), ((36887, 36918), 'tensorflow.expand_dims', 'tf.expand_dims', (['inp_sentence', '(0)'], {}), '(inp_sentence, 0)\n', (36901, 36918), True, 'import tensorflow as tf\n'), ((37311, 37343), 'tensorflow.expand_dims', 'tf.expand_dims', (['decoder_input', '(0)'], {}), '(decoder_input, 0)\n', (37325, 37343), True, 'import tensorflow as tf\n'), ((42593, 42625), 'tensorflow.expand_dims', 'tf.expand_dims', (['decoder_input', '(1)'], {}), '(decoder_input, 1)\n', (42607, 42625), True, 'import tensorflow as tf\n'), ((1459, 1475), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), '(dk)\n', (1471, 1475), True, 'import tensorflow as tf\n'), ((3566, 3644), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {'kernel_initializer': 'wq_k', 'bias_initializer': 'wq_b'}), '(d_model, kernel_initializer=wq_k, bias_initializer=wq_b)\n', (3587, 3644), True, 'import tensorflow as tf\n'), ((3663, 3741), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {'kernel_initializer': 'wk_k', 'bias_initializer': 'wk_b'}), '(d_model, kernel_initializer=wk_k, bias_initializer=wk_b)\n', (3684, 3741), True, 'import tensorflow as tf\n'), ((3760, 3838), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {'kernel_initializer': 'wv_k', 'bias_initializer': 'wv_b'}), '(d_model, kernel_initializer=wv_k, bias_initializer=wv_b)\n', (3781, 3838), True, 'import tensorflow as tf\n'), ((3861, 3939), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {'kernel_initializer': 'wd_k', 'bias_initializer': 'wd_b'}), '(d_model, kernel_initializer=wd_k, bias_initializer=wd_b)\n', (3882, 3939), True, 'import tensorflow as tf\n'), ((4160, 4219), 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.num_heads, self.depth)'], {}), '(x, (batch_size, -1, self.num_heads, self.depth))\n', (4170, 4219), True, 'import tensorflow as tf\n'), ((4235, 4269), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), '(x, perm=[0, 2, 1, 3])\n', (4247, 4269), True, 'import tensorflow as tf\n'), ((5075, 5124), 'tensorflow.transpose', 'tf.transpose', (['scaled_attention'], {'perm': '[0, 2, 1, 3]'}), '(scaled_attention, perm=[0, 2, 1, 3])\n', (5087, 5124), True, 'import tensorflow as tf\n'), ((5238, 5298), 'tensorflow.reshape', 'tf.reshape', (['scaled_attention', '(batch_size, -1, self.d_model)'], {}), '(scaled_attention, (batch_size, -1, self.d_model))\n', (5248, 5298), True, 'import tensorflow as tf\n'), ((5661, 5700), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['l1_weights[0]'], {}), '(l1_weights[0])\n', (5685, 5700), True, 'import tensorflow as tf\n'), ((5716, 5755), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['l1_weights[1]'], {}), '(l1_weights[1])\n', (5740, 5755), True, 'import tensorflow as tf\n'), ((5868, 5907), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['l2_weights[0]'], {}), '(l2_weights[0])\n', (5892, 5907), True, 'import tensorflow as tf\n'), ((5923, 5962), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['l2_weights[1]'], {}), '(l2_weights[1])\n', (5947, 5962), True, 'import tensorflow as tf\n'), ((8290, 8394), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)', 'beta_initializer': 'wn_1_b', 'gamma_initializer': 'wn_1_g'}), '(epsilon=1e-06, beta_initializer=wn_1_b,\n gamma_initializer=wn_1_g)\n', (8324, 8394), True, 'import tensorflow as tf\n'), ((8477, 8581), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)', 'beta_initializer': 'wn_2_b', 'gamma_initializer': 'wn_2_g'}), '(epsilon=1e-06, beta_initializer=wn_2_b,\n gamma_initializer=wn_2_g)\n', (8511, 8581), True, 'import tensorflow as tf\n'), ((8663, 8692), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8686, 8692), True, 'import tensorflow as tf\n'), ((8717, 8746), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8740, 8746), True, 'import tensorflow as tf\n'), ((9649, 9698), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (9683, 9698), True, 'import tensorflow as tf\n'), ((9724, 9773), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (9758, 9773), True, 'import tensorflow as tf\n'), ((9799, 9848), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (9833, 9848), True, 'import tensorflow as tf\n'), ((9881, 9910), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (9904, 9910), True, 'import tensorflow as tf\n'), ((9935, 9964), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (9958, 9964), True, 'import tensorflow as tf\n'), ((9989, 10018), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (10012, 10018), True, 'import tensorflow as tf\n'), ((14568, 14597), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (14591, 14597), True, 'import tensorflow as tf\n'), ((23846, 23875), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (23869, 23875), True, 'import tensorflow as tf\n'), ((28842, 28954), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {'embeddings_initializer': 'we_tokens', 'trainable': 'trainable'}), '(input_vocab_size, d_model, embeddings_initializer\n =we_tokens, trainable=trainable)\n', (28867, 28954), True, 'import tensorflow as tf\n'), ((29045, 29146), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(2)', 'd_model'], {'embeddings_initializer': 'we_tokens_type', 'trainable': 'trainable'}), '(2, d_model, embeddings_initializer=we_tokens_type,\n trainable=trainable)\n', (29070, 29146), True, 'import tensorflow as tf\n'), ((29235, 29330), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(512)', 'd_model'], {'embeddings_initializer': 'we_pos', 'trainable': 'trainable'}), '(512, d_model, embeddings_initializer=we_pos,\n trainable=trainable)\n', (29260, 29330), True, 'import tensorflow as tf\n'), ((29409, 29543), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)', 'beta_initializer': 'we_norm[0]', 'gamma_initializer': 'we_norm[1]', 'trainable': 'trainable'}), '(epsilon=1e-06, beta_initializer=we_norm[\n 0], gamma_initializer=we_norm[1], trainable=trainable)\n', (29443, 29543), True, 'import tensorflow as tf\n'), ((29961, 29990), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (29984, 29990), True, 'import tensorflow as tf\n'), ((30370, 30423), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(batch_size, seq_len)', 'dtype': 'tf.int32'}), '(shape=(batch_size, seq_len), dtype=tf.int32)\n', (30378, 30423), True, 'import tensorflow as tf\n'), ((30536, 30572), 'tensorflow.range', 'tf.range', (['(0)', 'seq_len'], {'dtype': 'tf.int32'}), '(0, seq_len, dtype=tf.int32)\n', (30544, 30572), True, 'import tensorflow as tf\n'), ((30592, 30640), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['seq_inds', '(batch_size, seq_len)'], {}), '(seq_inds, (batch_size, seq_len))\n', (30607, 30640), True, 'import tensorflow as tf\n'), ((31086, 31114), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['l_out'], {'axis': '(0)'}), '(l_out, axis=0)\n', (31099, 31114), True, 'import tensorflow as tf\n'), ((31568, 31621), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['target_vocab_size', 'd_model'], {}), '(target_vocab_size, d_model)\n', (31593, 31621), True, 'import tensorflow as tf\n'), ((31888, 31917), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (31911, 31917), True, 'import tensorflow as tf\n'), ((34870, 34910), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['target_vocab_size'], {}), '(target_vocab_size)\n', (34891, 34910), True, 'import tensorflow as tf\n'), ((35750, 35783), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (35757, 35783), True, 'import tensorflow as tf\n'), ((35880, 35899), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['step'], {}), '(step)\n', (35893, 35899), True, 'import tensorflow as tf\n'), ((38974, 39016), 'tensorflow.concat', 'tf.concat', (['[output, predicted_id]'], {'axis': '(-1)'}), '([output, predicted_id], axis=-1)\n', (38983, 39016), True, 'import tensorflow as tf\n'), ((39032, 39058), 'tensorflow.squeeze', 'tf.squeeze', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (39042, 39058), True, 'import tensorflow as tf\n'), ((40715, 40755), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['target_vocab_size'], {}), '(target_vocab_size)\n', (40736, 40755), True, 'import tensorflow as tf\n'), ((42522, 42553), 'numpy.array', 'np.array', (['start_token', 'np.int32'], {}), '(start_token, np.int32)\n', (42530, 42553), True, 'import numpy as np\n'), ((44238, 44280), 'tensorflow.concat', 'tf.concat', (['[output, predicted_id]'], {'axis': '(-1)'}), '([output, predicted_id], axis=-1)\n', (44247, 44280), True, 'import tensorflow as tf\n'), ((1388, 1399), 'tensorflow.shape', 'tf.shape', (['k'], {}), '(k)\n', (1396, 1399), True, 'import tensorflow as tf\n'), ((2593, 2632), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['wq_weights[0]'], {}), '(wq_weights[0])\n', (2617, 2632), True, 'import tensorflow as tf\n'), ((2652, 2691), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['wq_weights[1]'], {}), '(wq_weights[1])\n', (2676, 2691), True, 'import tensorflow as tf\n'), ((2824, 2863), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['wk_weights[0]'], {}), '(wk_weights[0])\n', (2848, 2863), True, 'import tensorflow as tf\n'), ((2883, 2922), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['wk_weights[1]'], {}), '(wk_weights[1])\n', (2907, 2922), True, 'import tensorflow as tf\n'), ((3055, 3094), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['wv_weights[0]'], {}), '(wv_weights[0])\n', (3079, 3094), True, 'import tensorflow as tf\n'), ((3114, 3153), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['wv_weights[1]'], {}), '(wv_weights[1])\n', (3138, 3153), True, 'import tensorflow as tf\n'), ((3283, 3319), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['w_dense[0]'], {}), '(w_dense[0])\n', (3307, 3319), True, 'import tensorflow as tf\n'), ((3339, 3375), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['w_dense[1]'], {}), '(w_dense[1])\n', (3363, 3375), True, 'import tensorflow as tf\n'), ((4327, 4338), 'tensorflow.shape', 'tf.shape', (['q'], {}), '(q)\n', (4335, 4338), True, 'import tensorflow as tf\n'), ((6219, 6327), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['dff'], {'activation': 'activation_inner', 'kernel_initializer': 'l1_k', 'bias_initializer': 'l1_b'}), '(dff, activation=activation_inner, kernel_initializer=\n l1_k, bias_initializer=l1_b)\n', (6240, 6327), True, 'import tensorflow as tf\n'), ((6369, 6447), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {'kernel_initializer': 'l2_k', 'bias_initializer': 'l2_b'}), '(d_model, kernel_initializer=l2_k, bias_initializer=l2_b)\n', (6390, 6447), True, 'import tensorflow as tf\n'), ((7911, 7942), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['w1[0]'], {}), '(w1[0])\n', (7935, 7942), True, 'import tensorflow as tf\n'), ((7964, 7995), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['w1[1]'], {}), '(w1[1])\n', (7988, 7995), True, 'import tensorflow as tf\n'), ((8050, 8081), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['w2[0]'], {}), '(w2[0])\n', (8074, 8081), True, 'import tensorflow as tf\n'), ((8103, 8134), 'tensorflow.initializers.Constant', 'tf.initializers.Constant', (['w2[1]'], {}), '(w2[1])\n', (8127, 8134), True, 'import tensorflow as tf\n'), ((12588, 12700), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {'embeddings_initializer': 'we_tokens', 'trainable': 'trainable'}), '(input_vocab_size, d_model, embeddings_initializer\n =we_tokens, trainable=trainable)\n', (12613, 12700), True, 'import tensorflow as tf\n'), ((12799, 12900), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(2)', 'd_model'], {'embeddings_initializer': 'we_tokens_type', 'trainable': 'trainable'}), '(2, d_model, embeddings_initializer=we_tokens_type,\n trainable=trainable)\n', (12824, 12900), True, 'import tensorflow as tf\n'), ((12997, 13092), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(512)', 'd_model'], {'embeddings_initializer': 'we_pos', 'trainable': 'trainable'}), '(512, d_model, embeddings_initializer=we_pos,\n trainable=trainable)\n', (13022, 13092), True, 'import tensorflow as tf\n'), ((13179, 13313), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)', 'beta_initializer': 'we_norm[0]', 'gamma_initializer': 'we_norm[1]', 'trainable': 'trainable'}), '(epsilon=1e-06, beta_initializer=we_norm[\n 0], gamma_initializer=we_norm[1], trainable=trainable)\n', (13213, 13313), True, 'import tensorflow as tf\n'), ((14465, 14495), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (14486, 14495), True, 'import tensorflow as tf\n'), ((14680, 14691), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (14688, 14691), True, 'import tensorflow as tf\n'), ((14716, 14727), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (14724, 14727), True, 'import tensorflow as tf\n'), ((15074, 15110), 'tensorflow.range', 'tf.range', (['(0)', 'seq_len'], {'dtype': 'tf.int32'}), '(0, seq_len, dtype=tf.int32)\n', (15082, 15110), True, 'import tensorflow as tf\n'), ((15134, 15182), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['seq_inds', '(batch_size, seq_len)'], {}), '(seq_inds, (batch_size, seq_len))\n', (15149, 15182), True, 'import tensorflow as tf\n'), ((15707, 15758), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (15751, 15758), True, 'import tensorflow as tf\n'), ((21862, 21974), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {'embeddings_initializer': 'we_tokens', 'trainable': 'trainable'}), '(input_vocab_size, d_model, embeddings_initializer\n =we_tokens, trainable=trainable)\n', (21887, 21974), True, 'import tensorflow as tf\n'), ((22073, 22174), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(2)', 'd_model'], {'embeddings_initializer': 'we_tokens_type', 'trainable': 'trainable'}), '(2, d_model, embeddings_initializer=we_tokens_type,\n trainable=trainable)\n', (22098, 22174), True, 'import tensorflow as tf\n'), ((22271, 22366), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(512)', 'd_model'], {'embeddings_initializer': 'we_pos', 'trainable': 'trainable'}), '(512, d_model, embeddings_initializer=we_pos,\n trainable=trainable)\n', (22296, 22366), True, 'import tensorflow as tf\n'), ((22453, 22587), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)', 'beta_initializer': 'we_norm[0]', 'gamma_initializer': 'we_norm[1]', 'trainable': 'trainable'}), '(epsilon=1e-06, beta_initializer=we_norm[\n 0], gamma_initializer=we_norm[1], trainable=trainable)\n', (22487, 22587), True, 'import tensorflow as tf\n'), ((23743, 23773), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (23764, 23773), True, 'import tensorflow as tf\n'), ((23958, 23969), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (23966, 23969), True, 'import tensorflow as tf\n'), ((23994, 24005), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (24002, 24005), True, 'import tensorflow as tf\n'), ((24352, 24388), 'tensorflow.range', 'tf.range', (['(0)', 'seq_len'], {'dtype': 'tf.int32'}), '(0, seq_len, dtype=tf.int32)\n', (24360, 24388), True, 'import tensorflow as tf\n'), ((24412, 24460), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['seq_inds', '(batch_size, seq_len)'], {}), '(seq_inds, (batch_size, seq_len))\n', (24427, 24460), True, 'import tensorflow as tf\n'), ((25008, 25059), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (25052, 25059), True, 'import tensorflow as tf\n'), ((30185, 30196), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (30193, 30196), True, 'import tensorflow as tf\n'), ((30221, 30232), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (30229, 30232), True, 'import tensorflow as tf\n'), ((32044, 32055), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (32052, 32055), True, 'import tensorflow as tf\n'), ((32196, 32229), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (32203, 32229), True, 'import tensorflow as tf\n'), ((32364, 32415), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (32408, 32415), True, 'import tensorflow as tf\n'), ((35970, 35997), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['self.d_model'], {}), '(self.d_model)\n', (35983, 35997), True, 'import tensorflow as tf\n'), ((36000, 36027), 'tensorflow.math.minimum', 'tf.math.minimum', (['arg1', 'arg2'], {}), '(arg1, arg2)\n', (36015, 36027), True, 'import tensorflow as tf\n'), ((38610, 38641), 'tensorflow.argmax', 'tf.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (38619, 38641), True, 'import tensorflow as tf\n'), ((43876, 43907), 'tensorflow.argmax', 'tf.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (43885, 43907), True, 'import tensorflow as tf\n'), ((14098, 14150), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {}), '(input_vocab_size, d_model)\n', (14123, 14150), True, 'import tensorflow as tf\n'), ((14896, 14949), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(batch_size, seq_len)', 'dtype': 'tf.int32'}), '(shape=(batch_size, seq_len), dtype=tf.int32)\n', (14904, 14949), True, 'import tensorflow as tf\n'), ((15543, 15576), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (15550, 15576), True, 'import tensorflow as tf\n'), ((23374, 23426), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {}), '(input_vocab_size, d_model)\n', (23399, 23426), True, 'import tensorflow as tf\n'), ((24174, 24227), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(batch_size, seq_len)', 'dtype': 'tf.int32'}), '(shape=(batch_size, seq_len), dtype=tf.int32)\n', (24182, 24227), True, 'import tensorflow as tf\n'), ((24821, 24854), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (24828, 24854), True, 'import tensorflow as tf\n'), ((31145, 31179), 'tensorflow.sigmoid', 'tf.sigmoid', (['self.W_layers_sum_coef'], {}), '(self.W_layers_sum_coef)\n', (31155, 31179), True, 'import tensorflow as tf\n'), ((38794, 38820), 'tensorflow.squeeze', 'tf.squeeze', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (38804, 38820), True, 'import tensorflow as tf\n'), ((488, 506), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (495, 506), True, 'import numpy as np\n'), ((15932, 15950), 'tensorflow.device', 'tf.device', (['gpuName'], {}), '(gpuName)\n', (15941, 15950), True, 'import tensorflow as tf\n'), ((25233, 25251), 'tensorflow.device', 'tf.device', (['gpuName'], {}), '(gpuName)\n', (25242, 25251), True, 'import tensorflow as tf\n'), ((31031, 31072), 'tensorflow.sigmoid', 'tf.sigmoid', (['self.W_layers_sum_coef[w_ind]'], {}), '(self.W_layers_sum_coef[w_ind])\n', (31041, 31072), True, 'import tensorflow as tf\n'), ((32589, 32607), 'tensorflow.device', 'tf.device', (['gpuName'], {}), '(gpuName)\n', (32598, 32607), True, 'import tensorflow as tf\n'), ((525, 537), 'tensorflow.pow', 'tf.pow', (['x', '(3)'], {}), '(x, 3)\n', (531, 537), True, 'import tensorflow as tf\n'), ((13999, 14044), 'numpy.load', 'np.load', (["encoderConfig['pretrained_emb_path']"], {}), "(encoderConfig['pretrained_emb_path'])\n", (14006, 14044), True, 'import numpy as np\n'), ((23275, 23320), 'numpy.load', 'np.load', (["encoderConfig['pretrained_emb_path']"], {}), "(encoderConfig['pretrained_emb_path'])\n", (23282, 23320), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np #import numpy library
#integers
i=10 #integer
print(type(i)) #print out the data type of i
a_i = np.zeros(i,dtype=int) #delcare an array of integers
print(type(a_i)) #will return ndarray
print(type(a_i[0])) #will return int64
#floats
x=119.0 #floating point number
print(type(x)) #prints out the data type of x
y= 1.19e2 #float 119 in scientific notation
print(type(y)) #print out the data type of y
z=np.zeros(i,dtype=float) #declare array of floats
print(type(z)) #will return nd array
print(type(z[0])) | [
"numpy.zeros"
] | [((155, 177), 'numpy.zeros', 'np.zeros', (['i'], {'dtype': 'int'}), '(i, dtype=int)\n', (163, 177), True, 'import numpy as np\n'), ((491, 515), 'numpy.zeros', 'np.zeros', (['i'], {'dtype': 'float'}), '(i, dtype=float)\n', (499, 515), True, 'import numpy as np\n')] |
# coding: utf-8
from loss import *
import pandas as pd
import numpy as np
import os
import time
import math
import argparse
import torch
from glob import glob
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch import nn, optim
from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler
from torch.utils.data.dataset import random_split
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch_optimizer
from metric import GAP, AverageMeter, accuracy
from model import EfficientNetEncoderHead
from data import TrainDataset, TestDataset
# arguments
# train_csv_exist, test_csv_exist는 glob.glob이 생각보다 시간을 많이 잡아먹어서 iteration 시간을 줄이기 위해 생성되는 파일입니다.
# 이미 생성되어 있을 경우 train_csv_exist.csv 파일로 Dataset을 생성합니다.
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir', dest='train_dir', default="./public/train/")
parser.add_argument('--train_csv_dir', dest='train_csv_dir', default="./public/train.csv")
parser.add_argument('--train_csv_exist_dir', dest='train_csv_exist_dir', default="./public/train_exist.csv")
parser.add_argument('--test_dir', dest='test_dir', default="./public/test/")
parser.add_argument('--test_csv_dir', dest='test_csv_dir', default="./public/sample_submission.csv")
parser.add_argument('--test_csv_exist_dir', dest='test_csv_exist_dir', default="./public/sample_submission_exist.csv")
parser.add_argument('--test_csv_submission_dir', dest='test_csv_submission_dir', default="./public/my_submission.csv")
parser.add_argument('--model_dir', dest='model_dir', default="./ckpt/")
parser.add_argument('--resume', dest='resume', default=None)
parser.add_argument('--n_classes', dest='n_classes', type=int, default=1049)
parser.add_argument('--max_size', dest='max_size', type=int, default=256)
parser.add_argument('--image_size', dest='image_size', type=int, default=224)
parser.add_argument('--epochs', dest='epochs', type=int, default=100)
parser.add_argument('--learning_rate', dest='learning_rate', type=float, default=0.001)
parser.add_argument('--wd', dest='wd', type=float, default=1e-5)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument('--load_epoch', dest='load_epoch', type=int, default=None)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--num_workers', dest='num_workers', type=int, default=16)
parser.add_argument('--log_freq', dest='log_freq', type=int, default=10)
parser.add_argument('--depth', dest='depth', type=int, default=3)
parser.add_argument('--feat_dim', dest='feat_dim', type=int, default=256)
parser.add_argument('--arcface_s', dest='arcface_s', type=float, default=35)
parser.add_argument('--arcface_m', dest='arcface_m', type=float, default=0.4)
parser.add_argument('--crit', dest='crit', type=str, default='bce')
args = parser.parse_args()
# Set GPU
os.environ["CUDA_VISIBLE_DEVICES"] = f'{args.gpu}'
# 경로 생성
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
# Augmentation
train_transform = A.Compose([
A.SmallestMaxSize(args.max_size),
#A.RandomCrop(args.image_size, args.image_size, p=1.),
#A.HorizontalFlip(p=0.5),
A.Cutout(p=0.5),
A.OneOf([
A.HueSaturationValue(),
A.ShiftScaleRotate()
], p=1),
A.Normalize(mean=[0.4452, 0.4457, 0.4464],
std=[0.2592, 0.2596, 0.2600]),
ToTensorV2(),
])
test_transform = A.Compose([
A.SmallestMaxSize(args.max_size),
#A.CenterCrop(args.image_size, args.image_size, p=1.),
A.Normalize(mean=[0.4452, 0.4457, 0.4464],
std=[0.2592, 0.2596, 0.2600]),
ToTensorV2(),
])
# Dataset, Dataloader 정의
dataset = TrainDataset(args, transform=train_transform)
# Add remained last one data
train_size = int(len(dataset) * 0.8) + 1
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
val_dataset.transform = test_transform
test_dataset = TestDataset(args, transform=test_transform)
train_sampler = RandomSampler(train_dataset)
val_sampler = SequentialSampler(val_dataset)
test_sampler = SequentialSampler(test_dataset)
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=False,
drop_last=True)
val_loader = DataLoader(val_dataset,
sampler=val_sampler,
batch_size=args.batch_size//2,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False)
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False)
model = EfficientNetEncoderHead(depth=args.depth, num_classes=args.n_classes, feat_dim=args.feat_dim)
model.cuda()
def radam(parameters, lr=1e-3, betas=(0.9, 0.999), eps=1e-3, weight_decay=0):
if isinstance(betas, str):
betas = eval(betas)
return torch_optimizer.RAdam(parameters,
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay)
#criterion = nn.CrossEntropyLoss()
criterion = ArcFaceLoss(args.arcface_s, args.arcface_m, crit=args.crit)
#optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.wd, nesterov=True)
#optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.wd)
optimizer = radam(model.parameters(), lr=args.learning_rate, weight_decay=args.wd)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader)*args.epochs, eta_min=1e-6)
# Training
# 매 epoch마다 ./ckpt 파일에 모델이 저장됩니다.
# validation dataset 없이 모든 train data를 train하는 방식입니다.
if not args.test:
batch_time = AverageMeter()
losses = AverageMeter()
acc_score = AverageMeter()
gap_score = AverageMeter()
train_loss, train_acc = [], []
best_acc, best_gap, best_epoch, best_gap_epoch = 0, 0, 0, 0
end = time.time()
start_epoch = 0
if args.resume is not None:
model.load_state_dict(torch.load(args.resume))
start_epoch = int(args.resume[-7:-4])
print(f'Loaded {start_epoch} epoch..')
start_epoch += 1
for epoch in range(start_epoch, args.epochs):
for iter, (image, label) in enumerate(train_loader):
image = image.cuda()
label = label.cuda()
pred = model(image, label)
loss = loss_fn(criterion, label, pred, args.n_classes)
acc = accuracy(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
losses.update(loss.data.item(), image.size(0))
batch_time.update(time.time() - end)
acc_score.update(acc)
gap_score.update(GAP(pred, label))
end = time.time()
if iter % args.log_freq == 0:
print(f'epoch : {epoch} step : [{iter}/{len(train_loader)}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t'
f'gap {gap_score.val:.4f} ({gap_score.avg:.4f})')
# validation
model.eval()
val_start = time.time()
val_time = 0
num_correct, num_cnt = 0, 0
sum_gap = 0
for i, (image, label) in enumerate(tqdm(val_loader)):
image = image.cuda()
label = label.cuda()
pred = model(image)
num_correct += torch.sum(pred.max(1)[1] == label.data)
num_cnt += len(label)
sum_gap += GAP(pred, label)
val_acc = (num_correct.double()/num_cnt).cpu() * 100
val_gap = sum_gap /len(val_loader)
if val_acc > best_acc:
best_acc = val_acc
best_epoch = epoch
torch.save(model.state_dict(), os.path.join(args.model_dir, 'best_model.pth'))
if val_gap > best_gap:
best_gap = val_gap
best_gap_epoch = epoch
torch.save(model.state_dict(), os.path.join(args.model_dir, 'best_gap_model.pth'))
print(f'epoch : {epoch} [{len(val_loader)}]\t'
f'time {time.time()-val_start:.3f}\t'
f'val acc {val_acc:.4f}\t'
f'val gap {val_gap:.4f}\t'
f'best acc {best_acc:.4f} ({best_epoch})\t'
f'best gap {best_gap:.4f} ({best_gap_epoch})\t')
torch.save(model.state_dict(), os.path.join(args.model_dir, "epoch_{0:03}.pth".format(epoch)))
model.train()
# 모든 epoch이 끝난 뒤 test 진행
model.eval()
model.load_state_dict(torch.load(os.path.join(args.model_dir, 'best_model.pth')))
submission = pd.read_csv(args.test_csv_dir)
print(f'Loaded {args.load_epoch} epoch ckpt..')
for iter, (image, label) in enumerate(tqdm(test_loader)):
image = image.cuda()
pred = model(image)
pred = nn.Softmax(dim=1)(pred)
pred = pred.detach().cpu().numpy()
landmark_ids = np.argmax(pred, axis=1)
for offset, landmark_id in enumerate(landmark_ids):
confidence = pred[offset, landmark_id]
cur_idx = (iter*args.batch_size) + offset
submission.loc[cur_idx, 'landmark_id'] = landmark_id
submission.loc[cur_idx, 'conf'] = confidence
submission.to_csv(args.test_csv_submission_dir, index=False)
print(f'Save submission: {len(submission)}')
# Test
# argument의 --train을 False로 두면 Test만 진행합니다.
# Softmax로 confidence score를 계산하고, argmax로 class를 추정하여 csv 파일로 저장합니다.
# 현재 batch=1로 불러와서 조금 느릴 수 있습니다.
else :
if args.load_epoch is not None:
ckpt = f'epoch_{args.load_epoch:03}.pth'
print(f'Loaded {args.load_epoch} epoch ckpt..')
else:
ckpt = 'best_model.pth'
print(f'Loaded best epoch ..')
model.load_state_dict(torch.load(os.path.join(args.model_dir, ckpt)))
model.eval()
submission = pd.read_csv(args.test_csv_dir)
for iter, (image, label) in enumerate(tqdm(test_loader)):
image = image.cuda()
pred = model(image)
pred = nn.Softmax(dim=1)(pred)
pred = pred.detach().cpu().numpy()
landmark_ids = np.argmax(pred, axis=1)
for offset, landmark_id in enumerate(landmark_ids):
confidence = pred[offset, landmark_id]
cur_idx = (iter*args.batch_size) + offset
submission.loc[cur_idx, 'landmark_id'] = landmark_id
submission.loc[cur_idx, 'conf'] = confidence
submission.to_csv(args.test_csv_submission_dir, index=False)
print(f'Save submission: {len(submission)}')
| [
"argparse.ArgumentParser",
"numpy.argmax",
"pandas.read_csv",
"torch.nn.Softmax",
"albumentations.Normalize",
"os.path.join",
"torch.utils.data.dataset.random_split",
"torch.utils.data.DataLoader",
"albumentations.Cutout",
"torch.load",
"albumentations.SmallestMaxSize",
"data.TestDataset",
"... | [((790, 815), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (813, 815), False, 'import argparse\n'), ((3736, 3781), 'data.TrainDataset', 'TrainDataset', (['args'], {'transform': 'train_transform'}), '(args, transform=train_transform)\n', (3748, 3781), False, 'from data import TrainDataset, TestDataset\n'), ((3919, 3964), 'torch.utils.data.dataset.random_split', 'random_split', (['dataset', '[train_size, val_size]'], {}), '(dataset, [train_size, val_size])\n', (3931, 3964), False, 'from torch.utils.data.dataset import random_split\n'), ((4019, 4062), 'data.TestDataset', 'TestDataset', (['args'], {'transform': 'test_transform'}), '(args, transform=test_transform)\n', (4030, 4062), False, 'from data import TrainDataset, TestDataset\n'), ((4080, 4108), 'torch.utils.data.sampler.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (4093, 4108), False, 'from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler\n'), ((4123, 4153), 'torch.utils.data.sampler.SequentialSampler', 'SequentialSampler', (['val_dataset'], {}), '(val_dataset)\n', (4140, 4153), False, 'from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler\n'), ((4169, 4200), 'torch.utils.data.sampler.SequentialSampler', 'SequentialSampler', (['test_dataset'], {}), '(test_dataset)\n', (4186, 4200), False, 'from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler\n'), ((4217, 4361), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'pin_memory': '(False)', 'drop_last': '(True)'}), '(train_dataset, sampler=train_sampler, batch_size=args.batch_size,\n num_workers=args.num_workers, pin_memory=False, drop_last=True)\n', (4227, 4361), False, 'from torch.utils.data import DataLoader\n'), ((4501, 4667), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'sampler': 'val_sampler', 'batch_size': '(args.batch_size // 2)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(val_dataset, sampler=val_sampler, batch_size=args.batch_size // \n 2, shuffle=False, num_workers=args.num_workers, pin_memory=False,\n drop_last=False)\n', (4511, 4667), False, 'from torch.utils.data import DataLoader\n'), ((4815, 4977), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'sampler': 'test_sampler', 'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(test_dataset, sampler=test_sampler, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=False,\n drop_last=False)\n', (4825, 4977), False, 'from torch.utils.data import DataLoader\n'), ((5129, 5226), 'model.EfficientNetEncoderHead', 'EfficientNetEncoderHead', ([], {'depth': 'args.depth', 'num_classes': 'args.n_classes', 'feat_dim': 'args.feat_dim'}), '(depth=args.depth, num_classes=args.n_classes,\n feat_dim=args.feat_dim)\n', (5152, 5226), False, 'from model import EfficientNetEncoderHead\n'), ((2996, 3025), 'os.path.isdir', 'os.path.isdir', (['args.model_dir'], {}), '(args.model_dir)\n', (3009, 3025), False, 'import os\n'), ((3031, 3058), 'os.makedirs', 'os.makedirs', (['args.model_dir'], {}), '(args.model_dir)\n', (3042, 3058), False, 'import os\n'), ((5385, 5479), 'torch_optimizer.RAdam', 'torch_optimizer.RAdam', (['parameters'], {'lr': 'lr', 'betas': 'betas', 'eps': 'eps', 'weight_decay': 'weight_decay'}), '(parameters, lr=lr, betas=betas, eps=eps, weight_decay\n =weight_decay)\n', (5406, 5479), False, 'import torch_optimizer\n'), ((6250, 6264), 'metric.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6262, 6264), False, 'from metric import GAP, AverageMeter, accuracy\n'), ((6278, 6292), 'metric.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6290, 6292), False, 'from metric import GAP, AverageMeter, accuracy\n'), ((6309, 6323), 'metric.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6321, 6323), False, 'from metric import GAP, AverageMeter, accuracy\n'), ((6340, 6354), 'metric.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6352, 6354), False, 'from metric import GAP, AverageMeter, accuracy\n'), ((6470, 6481), 'time.time', 'time.time', ([], {}), '()\n', (6479, 6481), False, 'import time\n'), ((9311, 9341), 'pandas.read_csv', 'pd.read_csv', (['args.test_csv_dir'], {}), '(args.test_csv_dir)\n', (9322, 9341), True, 'import pandas as pd\n'), ((10535, 10565), 'pandas.read_csv', 'pd.read_csv', (['args.test_csv_dir'], {}), '(args.test_csv_dir)\n', (10546, 10565), True, 'import pandas as pd\n'), ((3109, 3141), 'albumentations.SmallestMaxSize', 'A.SmallestMaxSize', (['args.max_size'], {}), '(args.max_size)\n', (3126, 3141), True, 'import albumentations as A\n'), ((3236, 3251), 'albumentations.Cutout', 'A.Cutout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (3244, 3251), True, 'import albumentations as A\n'), ((3345, 3415), 'albumentations.Normalize', 'A.Normalize', ([], {'mean': '[0.4452, 0.4457, 0.4464]', 'std': '[0.2592, 0.2596, 0.26]'}), '(mean=[0.4452, 0.4457, 0.4464], std=[0.2592, 0.2596, 0.26])\n', (3356, 3415), True, 'import albumentations as A\n'), ((3440, 3452), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (3450, 3452), False, 'from albumentations.pytorch import ToTensorV2\n'), ((3491, 3523), 'albumentations.SmallestMaxSize', 'A.SmallestMaxSize', (['args.max_size'], {}), '(args.max_size)\n', (3508, 3523), True, 'import albumentations as A\n'), ((3588, 3658), 'albumentations.Normalize', 'A.Normalize', ([], {'mean': '[0.4452, 0.4457, 0.4464]', 'std': '[0.2592, 0.2596, 0.26]'}), '(mean=[0.4452, 0.4457, 0.4464], std=[0.2592, 0.2596, 0.26])\n', (3599, 3658), True, 'import albumentations as A\n'), ((3683, 3695), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (3693, 3695), False, 'from albumentations.pytorch import ToTensorV2\n'), ((7857, 7868), 'time.time', 'time.time', ([], {}), '()\n', (7866, 7868), False, 'import time\n'), ((9436, 9453), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (9440, 9453), False, 'from tqdm import tqdm\n'), ((9618, 9641), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (9627, 9641), True, 'import numpy as np\n'), ((10608, 10625), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (10612, 10625), False, 'from tqdm import tqdm\n'), ((10790, 10813), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (10799, 10813), True, 'import numpy as np\n'), ((6564, 6587), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (6574, 6587), False, 'import torch\n'), ((7009, 7030), 'metric.accuracy', 'accuracy', (['pred', 'label'], {}), '(pred, label)\n', (7017, 7030), False, 'from metric import GAP, AverageMeter, accuracy\n'), ((7373, 7384), 'time.time', 'time.time', ([], {}), '()\n', (7382, 7384), False, 'import time\n'), ((7989, 8005), 'tqdm.tqdm', 'tqdm', (['val_loader'], {}), '(val_loader)\n', (7993, 8005), False, 'from tqdm import tqdm\n'), ((8230, 8246), 'metric.GAP', 'GAP', (['pred', 'label'], {}), '(pred, label)\n', (8233, 8246), False, 'from metric import GAP, AverageMeter, accuracy\n'), ((9245, 9291), 'os.path.join', 'os.path.join', (['args.model_dir', '"""best_model.pth"""'], {}), "(args.model_dir, 'best_model.pth')\n", (9257, 9291), False, 'import os\n'), ((9528, 9545), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9538, 9545), False, 'from torch import nn, optim\n'), ((10464, 10498), 'os.path.join', 'os.path.join', (['args.model_dir', 'ckpt'], {}), '(args.model_dir, ckpt)\n', (10476, 10498), False, 'import os\n'), ((10700, 10717), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (10710, 10717), False, 'from torch import nn, optim\n'), ((3275, 3297), 'albumentations.HueSaturationValue', 'A.HueSaturationValue', ([], {}), '()\n', (3295, 3297), True, 'import albumentations as A\n'), ((3307, 3327), 'albumentations.ShiftScaleRotate', 'A.ShiftScaleRotate', ([], {}), '()\n', (3325, 3327), True, 'import albumentations as A\n'), ((7324, 7340), 'metric.GAP', 'GAP', (['pred', 'label'], {}), '(pred, label)\n', (7327, 7340), False, 'from metric import GAP, AverageMeter, accuracy\n'), ((8487, 8533), 'os.path.join', 'os.path.join', (['args.model_dir', '"""best_model.pth"""'], {}), "(args.model_dir, 'best_model.pth')\n", (8499, 8533), False, 'import os\n'), ((8675, 8725), 'os.path.join', 'os.path.join', (['args.model_dir', '"""best_gap_model.pth"""'], {}), "(args.model_dir, 'best_gap_model.pth')\n", (8687, 8725), False, 'import os\n'), ((7242, 7253), 'time.time', 'time.time', ([], {}), '()\n', (7251, 7253), False, 'import time\n'), ((8804, 8815), 'time.time', 'time.time', ([], {}), '()\n', (8813, 8815), False, 'import time\n')] |
"""
CNN Model: Handles anything related to the Convolutional Neural Network
"""
import torch
import numpy as np
# import PyTorch Functionalities
import torch.nn.functional as F
import torch.nn as nn
# import own modules
import loghub
'''
////////////////////////////////////////////////////////////////////////////////////
/// Transpose / Normalization Functions ////
////////////////////////////////////////////////////////////////////////////////////
'''
# Creates a Tensor from the Numpy dataset, which is used by the GPU for processing
class ToTensor(object):
def __call__(self, sample):
data, label = sample
# swap color axis if needed : This function is not doing anything for now.
data = data.transpose((0, 1, 2))
return torch.from_numpy(data), torch.from_numpy(label)
# Code for Normalization of the data
class Normalize(object):
def __init__(self, mean, std):
self.mean, self.std = mean, std
def __call__(self, sample):
data, label = sample
data = (data - self.mean)/self.std
return data, label
'''
////////////////////////////////////////////////////////////////////////////////////
/// Convolution Neural Network Model Class ////
////////////////////////////////////////////////////////////////////////////////////
'''
class BaselineASC(nn.Module):
def __init__(self, in_channel):
# the main CNN model -- this function initializes the layers. NOTE THAT we are not performing the conv/pooling operations in this function (this is just the definition)
super(BaselineASC, self).__init__()
# first conv layer, extracts 32 feature maps from 1 channel input
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=32, kernel_size=11, stride=1, padding=5)
# batch normalization layer
self.conv1_bn = nn.BatchNorm2d(32)
# max pooling of 5x5
self.mp1 = nn.MaxPool2d((5,5))
# dropout layer, for regularization of the model (efficient learning)
self.drop1 = nn.Dropout(0.3)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2)
self.conv2_bn = nn.BatchNorm2d(64)
self.mp2 = nn.MaxPool2d((4,100))
self.drop2 = nn.Dropout(0.3)
# a dense layer
self.fc1 = nn.Linear(1*2*64, 100)
self.drop3 = nn.Dropout(0.3)
self.fc2 = nn.Linear(100, 10)
def forward(self, x):
# feed-forward propagation of the model. Here we have the input x, which is propagated through the layers
# x has dimension (batch_size, channels, mel_bins, time_indices) - for this model (16, 1, 40, 500)
# perfrom first convolution
x = self.conv1(x)
# batch normalization
x = self.conv1_bn(x)
# ReLU activation
x = F.relu(x)
# Max pooling, results in 32 8x100 feature maps [output -> (16, 32, 8, 100)]
x = self.mp1(x)
# apply dropout
x = self.drop1(x)
# next convolution layer (results in 64 feature maps) output: (16, 64, 4, 100)
x = self.conv2(x)
x = self.conv2_bn(x)
x = F.relu(x)
# max pooling of 4, 100. Results in 64 2x1 feature maps (16, 64, 2, 1)
x = self.mp2(x)
x = self.drop2(x)
# Flatten the layer into 64x2x1 neurons, results in a 128D feature vector (16, 128)
x = x.view(-1, 1*2*64)
# add a dense layer, results in 100D feature vector (16, 100)
x = self.fc1(x)
x = F.relu(x)
x = self.drop3(x)
# add the final output layer, results in 10D feature vector (16, 10)
x = self.fc2(x)
# add log_softmax for the label
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
# training module
for batch_idx, sample_batched in enumerate(train_loader):
# for every batch, extract data (16, 1, 40, 500) and label (16, 1)
data, label = sample_batched
# Map the variables to the current device (CPU or GPU)
data = data.to(device, dtype=torch.float)
label = label.to(device, dtype=torch.long)
# set initial gradients to zero : https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/9
optimizer.zero_grad()
# pass the data into the model
output = model(data)
# get the loss using the predictions and the label
loss = F.nll_loss(output, label)
# backpropagate the losses
loss.backward()
# update the model parameters : https://discuss.pytorch.org/t/how-are-optimizer-step-and-loss-backward-related/7350
optimizer.step()
# Printing the results
if batch_idx % args.log_interval == 0:
#print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item()))
loghub.logMsg(msg="{}: Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
__name__, epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()), otherlogs=["test_acc"])
def test(args, model, device, test_loader, data_type):
# evaluate the model
model.eval()
# init test loss
test_loss = 0
correct = 0
pred_results = np.asarray([])
#print('Testing..')
loghub.logMsg(msg="{}: Testing...".format(__name__), otherlogs=["test_acc"])
# Use no gradient backpropagations (as we are just testing)
with torch.no_grad():
# for every testing batch
for i_batch, sample_batched in enumerate(test_loader):
# for every batch, extract data (16, 1, 40, 500) and label (16, 1)
data, label = sample_batched
# Map the variables to the current device (CPU or GPU)
data = data.to(device, dtype=torch.float)
label = label.to(device, dtype=torch.long)
# get the predictions
output = model(data)
# accumulate the batchwise loss
test_loss += F.nll_loss(output, label, reduction='sum').item()
# get the predictions
pred = output.argmax(dim=1, keepdim=True)
# accumulate the correct predictions
correct += pred.eq(label.view_as(pred)).sum().item()
# collate the predicted results
pred = np.squeeze(pred.cpu().numpy())
pred_results = np.concatenate((pred_results, pred))
# normalize the test loss with the number of test samples
test_loss /= len(test_loader.dataset)
# print the results
#print('Model prediction on ' + data_type + ': Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
# test_loss, correct, len(test_loader.dataset),
# 100. * correct / len(test_loader.dataset)))
loghub.logMsg(msg="{}: Model prediction on {}: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
__name__, data_type, test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)), otherlogs=["test_acc"])
return pred_results
def predict(model, device, test_loader):
# evaluate the model
model.eval()
pred_results = np.asarray([])
#test_pred = torch.LongTensor()
#print('Testing..')
loghub.logMsg(msg="{}: Predicting...".format(__name__), otherlogs=["test_acc"])
# Use no gradient backpropagations (as we are just testing)
with torch.no_grad():
# for every testing batch
for i_batch, sample_batched in enumerate(test_loader):
# for every batch, extract data (16, 1, 40, 500) and label (16, 1)
data, invalid_label = sample_batched
# Map the variables to the current device (CPU or GPU)
data = data.to(device, dtype=torch.float)
# get the predictions
output = model(data)
# get the predictions
pred = output.argmax(dim=1, keepdim=True)
# collate the predicted results
pred = np.squeeze(pred.cpu().numpy())
pred_results = np.concatenate((pred_results, pred))
#test_pred = torch.cat((test_pred, pred), dim=0)
return pred_results
| [
"torch.nn.Dropout",
"numpy.asarray",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu",
"torch.no_grad",
"numpy.concatenate",
"torch.from_numpy"
] | [((4963, 4977), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (4973, 4977), True, 'import numpy as np\n'), ((6643, 6657), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (6653, 6657), True, 'import numpy as np\n'), ((1639, 1730), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channel', 'out_channels': '(32)', 'kernel_size': '(11)', 'stride': '(1)', 'padding': '(5)'}), '(in_channels=in_channel, out_channels=32, kernel_size=11, stride=1,\n padding=5)\n', (1648, 1730), True, 'import torch.nn as nn\n'), ((1775, 1793), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1789, 1793), True, 'import torch.nn as nn\n'), ((1830, 1850), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(5, 5)'], {}), '((5, 5))\n', (1842, 1850), True, 'import torch.nn as nn\n'), ((1937, 1952), 'torch.nn.Dropout', 'nn.Dropout', (['(0.3)'], {}), '(0.3)\n', (1947, 1952), True, 'import torch.nn as nn\n'), ((1968, 2046), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)'}), '(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2)\n', (1977, 2046), True, 'import torch.nn as nn\n'), ((2065, 2083), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2079, 2083), True, 'import torch.nn as nn\n'), ((2097, 2119), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(4, 100)'], {}), '((4, 100))\n', (2109, 2119), True, 'import torch.nn as nn\n'), ((2134, 2149), 'torch.nn.Dropout', 'nn.Dropout', (['(0.3)'], {}), '(0.3)\n', (2144, 2149), True, 'import torch.nn as nn\n'), ((2181, 2207), 'torch.nn.Linear', 'nn.Linear', (['(1 * 2 * 64)', '(100)'], {}), '(1 * 2 * 64, 100)\n', (2190, 2207), True, 'import torch.nn as nn\n'), ((2219, 2234), 'torch.nn.Dropout', 'nn.Dropout', (['(0.3)'], {}), '(0.3)\n', (2229, 2234), True, 'import torch.nn as nn\n'), ((2248, 2266), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(10)'], {}), '(100, 10)\n', (2257, 2266), True, 'import torch.nn as nn\n'), ((2626, 2635), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2632, 2635), True, 'import torch.nn.functional as F\n'), ((2905, 2914), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2911, 2914), True, 'import torch.nn.functional as F\n'), ((3229, 3238), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3235, 3238), True, 'import torch.nn.functional as F\n'), ((3393, 3416), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (3406, 3416), True, 'import torch.nn.functional as F\n'), ((4106, 4131), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'label'], {}), '(output, label)\n', (4116, 4131), True, 'import torch.nn.functional as F\n'), ((5145, 5160), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5158, 5160), False, 'import torch\n'), ((6861, 6876), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6874, 6876), False, 'import torch\n'), ((755, 777), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (771, 777), False, 'import torch\n'), ((779, 802), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (795, 802), False, 'import torch\n'), ((5915, 5951), 'numpy.concatenate', 'np.concatenate', (['(pred_results, pred)'], {}), '((pred_results, pred))\n', (5929, 5951), True, 'import numpy as np\n'), ((7393, 7429), 'numpy.concatenate', 'np.concatenate', (['(pred_results, pred)'], {}), '((pred_results, pred))\n', (7407, 7429), True, 'import numpy as np\n'), ((5602, 5644), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'label'], {'reduction': '"""sum"""'}), "(output, label, reduction='sum')\n", (5612, 5644), True, 'import torch.nn.functional as F\n')] |
from __future__ import print_function
from django.core.cache import cache
from rest_framework.authentication import BasicAuthentication
from drf_disable_csrf import CsrfExemptSessionAuthentication
import hashlib
import json
import logging
import numpy as np
from os import path
from django.http import HttpResponse, JsonResponse
from rest_framework.decorators import api_view, authentication_classes
from tilesets.models import Tileset
from fragments.models import ChromInfo, ChromSizes
from operator import itemgetter
from fragments.utils import (
calc_measure_dtd,
calc_measure_size,
calc_measure_noise,
calc_measure_sharpness,
get_frag_by_loc,
get_intra_chr_loops_from_looplist,
rel_loci_2_obj
)
logger = logging.getLogger(__name__)
SUPPORTED_MEASURES = ['distance-to-diagonal', 'noise', 'size', 'sharpness']
@api_view(['POST'])
@authentication_classes((CsrfExemptSessionAuthentication, BasicAuthentication))
def fragments_by_loci(request):
'''
Retrieve a list of locations and return the corresponding matrix fragments
Args:
request (django.http.HTTPRequest): The request object containing the
list of loci.
Return:
'''
loci = request.data.get('loci', [])
try:
precision = int(request.GET.get('precision', False))
except ValueError:
precision = False
try:
no_cache = bool(request.GET.get('no-cache', False))
except ValueError:
no_cache = False
try:
dims = int(request.GET.get('dims', 22))
except ValueError:
dims = 22
try:
no_balance = bool(request.GET.get('no-balance', False))
except ValueError:
no_balance = False
'''
Loci list must be of type:
0: chrom1
1: start1
2: end1
3: chrom2
4: start2
5: end2
6: dataset
7: zoomOutLevel [0]
'''
i = 0
loci_lists = {}
try:
for locus in loci:
cooler_file = ''
if locus[6]:
if locus[6].endswith('.cool'):
cooler_file = path.join('data', locus[6])
else:
try:
cooler_file = Tileset.objects.get(
uuid=locus[6]
).datafile
except AttributeError:
return JsonResponse({
'error': 'Dataset (cooler file) not in database',
}, status=500)
else:
return JsonResponse({
'error': 'Dataset (cooler file) not specified',
}, status=500)
if cooler_file not in loci_lists:
loci_lists[cooler_file] = {}
if locus[7] not in loci_lists[cooler_file]:
loci_lists[cooler_file][locus[7]] = []
loci_lists[cooler_file][locus[7]].append(locus[0:6] + [i])
i += 1
except Exception as e:
return JsonResponse({
'error': 'Could not convert loci.',
'error_message': str(e)
}, status=500)
# Get a unique string for caching
uuid = hashlib.md5(
json.dumps(loci, sort_keys=True) + str(precision) + str(dims)
).hexdigest()
# Check if something is cached
if not no_cache:
results = cache.get('frag_by_loci_%s' % uuid, False)
if results:
return JsonResponse(results)
matrices = []
try:
for dataset in loci_lists:
for zoomout_level in loci_lists[dataset]:
raw_matrices = get_frag_by_loc(
dataset,
loci_lists[dataset][zoomout_level],
zoomout_level=zoomout_level,
dim=dims,
balanced=not no_balance
)
if precision > 0:
raw_matrices = np.around(raw_matrices, decimals=precision)
i = 0
for raw_matrix in raw_matrices:
matrices.append({
'id': loci_lists[dataset][zoomout_level][i][6],
'raw': raw_matrix
})
i += 1
except Exception as e:
return JsonResponse({
'error': 'Could not retrieve fragments.',
'error_message': str(e)
}, status=500)
# Sort matrices
matrices_sorted = sorted(matrices, key=itemgetter('id'))
fragments = []
i = 0
for matrix in matrices_sorted:
fragments.append(matrix['raw'].tolist())
i += 1
# Create results
results = {
'fragments': fragments
}
# Cache results for 8 hour
cache.set('frag_by_loci_%s' % uuid, results, 60 * 60 * 8)
return JsonResponse(results)
@api_view(['GET'])
@authentication_classes((CsrfExemptSessionAuthentication, BasicAuthentication))
def fragments_by_chr(request):
chrom = request.GET.get('chrom', False)
cooler_file = request.GET.get('cooler', False)
loop_list = request.GET.get('loop-list', False)
if cooler_file:
if cooler_file.endswith('.cool'):
cooler_file = path.join('data', cooler_file)
else:
try:
cooler_file = Tileset.objects.get(uuid=cooler_file).datafile
except AttributeError:
return JsonResponse({
'error': 'Cooler file not in database',
}, status=500)
else:
return JsonResponse({
'error': 'Cooler file not specified',
}, status=500)
try:
measures = request.GET.getlist('measures', [])
except ValueError:
measures = []
try:
zoomout_level = int(request.GET.get('zoomout-level', -1))
except ValueError:
zoomout_level = -1
try:
limit = int(request.GET.get('limit', -1))
except ValueError:
limit = -1
try:
precision = int(request.GET.get('precision', False))
except ValueError:
precision = False
try:
no_cache = bool(request.GET.get('no-cache', False))
except ValueError:
no_cache = False
try:
for_config = bool(request.GET.get('for-config', False))
except ValueError:
for_config = False
# Get a unique string for the URL query string
uuid = hashlib.md5(
'-'.join([
cooler_file,
chrom,
loop_list,
str(limit),
str(precision),
str(zoomout_level)
])
).hexdigest()
# Check if something is cached
if not no_cache:
results = cache.get('frag_by_chrom_%s' % uuid, False)
if results:
return JsonResponse(results)
# Get relative loci
try:
(loci_rel, chroms) = get_intra_chr_loops_from_looplist(
path.join('data', loop_list), chrom
)
except Exception as e:
return JsonResponse({
'error': 'Could not retrieve loci.',
'error_message': str(e)
}, status=500)
# Convert to chromosome-relative loci list
loci_rel_chroms = np.column_stack(
(chroms[:, 0], loci_rel[:, 0:2], chroms[:, 1], loci_rel[:, 2:4])
)
if limit > 0:
loci_rel_chroms = loci_rel_chroms[:limit]
# Get fragments
try:
matrices = get_frag_by_loc(
cooler_file,
loci_rel_chroms,
zoomout_level=zoomout_level
)
except Exception as e:
return JsonResponse({
'error': 'Could not retrieve fragments.',
'error_message': str(e)
}, status=500)
if precision > 0:
matrices = np.around(matrices, decimals=precision)
fragments = []
loci_struct = rel_loci_2_obj(loci_rel_chroms)
# Check supported measures
measures_applied = []
for measure in measures:
if measure in SUPPORTED_MEASURES:
measures_applied.append(measure)
i = 0
for matrix in matrices:
measures_values = []
for measure in measures:
if measure == 'distance-to-diagonal':
measures_values.append(
calc_measure_dtd(matrix, loci_struct[i])
)
if measure == 'size':
measures_values.append(
calc_measure_size(matrix, loci_struct[i])
)
if measure == 'noise':
measures_values.append(calc_measure_noise(matrix))
if measure == 'sharpness':
measures_values.append(calc_measure_sharpness(matrix))
frag_obj = {
# 'matrix': matrix.tolist()
}
frag_obj.update(loci_struct[i])
frag_obj.update({
"measures": measures_values
})
fragments.append(frag_obj)
i += 1
# Create results
results = {
'count': matrices.shape[0],
'dims': matrices.shape[1],
'fragments': fragments,
'measures': measures_applied,
'relativeLoci': True,
'zoomoutLevel': zoomout_level
}
if for_config:
results['fragmentsHeader'] = [
'chrom1',
'start1',
'end1',
'strand1',
'chrom2',
'start2',
'end2',
'strand2'
] + measures_applied
fragments_arr = []
for fragment in fragments:
tmp = [
fragment['chrom1'],
fragment['start1'],
fragment['end1'],
fragment['strand1'],
fragment['chrom2'],
fragment['start2'],
fragment['end2'],
fragment['strand2'],
] + fragment['measures']
fragments_arr.append(tmp)
results['fragments'] = fragments_arr
# Cache results for an hour
cache.set('frag_by_chrom_%s' % uuid, results, 60 * 60)
return JsonResponse(results)
@api_view(['GET'])
@authentication_classes((CsrfExemptSessionAuthentication, BasicAuthentication))
def loci(request):
chrom = request.GET.get('chrom', False)
loop_list = request.GET.get('loop-list', False)
# Get relative loci
(loci_rel, chroms) = get_intra_chr_loops_from_looplist(
path.join('data', loop_list), chrom
)
loci_rel_chroms = np.column_stack(
(chroms[:, 0], loci_rel[:, 0:2], chroms[:, 1], loci_rel[:, 2:4])
)
# Create results
results = {
'loci': rel_loci_2_obj(loci_rel_chroms)
}
return JsonResponse(results)
@api_view(['GET'])
@authentication_classes((CsrfExemptSessionAuthentication, BasicAuthentication))
def chromInfo(request):
coords = request.GET.get('coords', False)
try:
chrom_info = ChromInfo.objects.get(uuid=coords)
except Exception as e:
return JsonResponse({})
try:
with open(chrom_info.datafile) as f:
data = json.load(f)
except Exception as e:
return JsonResponse({})
return JsonResponse(data)
@api_view(['GET'])
@authentication_classes((CsrfExemptSessionAuthentication, BasicAuthentication))
def chromSizes(request):
coords = request.GET.get('coords', False)
try:
chrom_sizes = ChromSizes.objects.get(uuid=coords)
except Exception as e:
return HttpResponse({
error: 'ChromSizes for %s not found' % coords
})
try:
with open(chrom_sizes.datafile) as f:
data = f.readlines()
except Exception as e:
return HttpResponse({
error: 'Could not load file %s' % chrom_sizes.datafile
})
return HttpResponse(data)
| [
"fragments.utils.rel_loci_2_obj",
"django.http.JsonResponse",
"json.dumps",
"rest_framework.decorators.authentication_classes",
"numpy.around",
"fragments.models.ChromInfo.objects.get",
"os.path.join",
"fragments.models.ChromSizes.objects.get",
"django.http.HttpResponse",
"fragments.utils.calc_mea... | [((744, 771), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (761, 771), False, 'import logging\n'), ((852, 870), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (860, 870), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((872, 950), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(CsrfExemptSessionAuthentication, BasicAuthentication)'], {}), '((CsrfExemptSessionAuthentication, BasicAuthentication))\n', (894, 950), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((4801, 4818), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (4809, 4818), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((4820, 4898), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(CsrfExemptSessionAuthentication, BasicAuthentication)'], {}), '((CsrfExemptSessionAuthentication, BasicAuthentication))\n', (4842, 4898), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((9974, 9991), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (9982, 9991), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((9993, 10071), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(CsrfExemptSessionAuthentication, BasicAuthentication)'], {}), '((CsrfExemptSessionAuthentication, BasicAuthentication))\n', (10015, 10071), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((10570, 10587), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (10578, 10587), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((10589, 10667), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(CsrfExemptSessionAuthentication, BasicAuthentication)'], {}), '((CsrfExemptSessionAuthentication, BasicAuthentication))\n', (10611, 10667), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((11043, 11060), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (11051, 11060), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((11062, 11140), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(CsrfExemptSessionAuthentication, BasicAuthentication)'], {}), '((CsrfExemptSessionAuthentication, BasicAuthentication))\n', (11084, 11140), False, 'from rest_framework.decorators import api_view, authentication_classes\n'), ((4706, 4763), 'django.core.cache.cache.set', 'cache.set', (["('frag_by_loci_%s' % uuid)", 'results', '(60 * 60 * 8)'], {}), "('frag_by_loci_%s' % uuid, results, 60 * 60 * 8)\n", (4715, 4763), False, 'from django.core.cache import cache\n'), ((4776, 4797), 'django.http.JsonResponse', 'JsonResponse', (['results'], {}), '(results)\n', (4788, 4797), False, 'from django.http import HttpResponse, JsonResponse\n'), ((7128, 7214), 'numpy.column_stack', 'np.column_stack', (['(chroms[:, 0], loci_rel[:, 0:2], chroms[:, 1], loci_rel[:, 2:4])'], {}), '((chroms[:, 0], loci_rel[:, 0:2], chroms[:, 1], loci_rel[:, \n 2:4]))\n', (7143, 7214), True, 'import numpy as np\n'), ((7754, 7785), 'fragments.utils.rel_loci_2_obj', 'rel_loci_2_obj', (['loci_rel_chroms'], {}), '(loci_rel_chroms)\n', (7768, 7785), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((9882, 9936), 'django.core.cache.cache.set', 'cache.set', (["('frag_by_chrom_%s' % uuid)", 'results', '(60 * 60)'], {}), "('frag_by_chrom_%s' % uuid, results, 60 * 60)\n", (9891, 9936), False, 'from django.core.cache import cache\n'), ((9949, 9970), 'django.http.JsonResponse', 'JsonResponse', (['results'], {}), '(results)\n', (9961, 9970), False, 'from django.http import HttpResponse, JsonResponse\n'), ((10345, 10431), 'numpy.column_stack', 'np.column_stack', (['(chroms[:, 0], loci_rel[:, 0:2], chroms[:, 1], loci_rel[:, 2:4])'], {}), '((chroms[:, 0], loci_rel[:, 0:2], chroms[:, 1], loci_rel[:, \n 2:4]))\n', (10360, 10431), True, 'import numpy as np\n'), ((10545, 10566), 'django.http.JsonResponse', 'JsonResponse', (['results'], {}), '(results)\n', (10557, 10566), False, 'from django.http import HttpResponse, JsonResponse\n'), ((11021, 11039), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (11033, 11039), False, 'from django.http import HttpResponse, JsonResponse\n'), ((11642, 11660), 'django.http.HttpResponse', 'HttpResponse', (['data'], {}), '(data)\n', (11654, 11660), False, 'from django.http import HttpResponse, JsonResponse\n'), ((3330, 3372), 'django.core.cache.cache.get', 'cache.get', (["('frag_by_loci_%s' % uuid)", '(False)'], {}), "('frag_by_loci_%s' % uuid, False)\n", (3339, 3372), False, 'from django.core.cache import cache\n'), ((5494, 5558), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'Cooler file not specified'}"], {'status': '(500)'}), "({'error': 'Cooler file not specified'}, status=500)\n", (5506, 5558), False, 'from django.http import HttpResponse, JsonResponse\n'), ((6631, 6674), 'django.core.cache.cache.get', 'cache.get', (["('frag_by_chrom_%s' % uuid)", '(False)'], {}), "('frag_by_chrom_%s' % uuid, False)\n", (6640, 6674), False, 'from django.core.cache import cache\n'), ((7342, 7416), 'fragments.utils.get_frag_by_loc', 'get_frag_by_loc', (['cooler_file', 'loci_rel_chroms'], {'zoomout_level': 'zoomout_level'}), '(cooler_file, loci_rel_chroms, zoomout_level=zoomout_level)\n', (7357, 7416), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((7675, 7714), 'numpy.around', 'np.around', (['matrices'], {'decimals': 'precision'}), '(matrices, decimals=precision)\n', (7684, 7714), True, 'import numpy as np\n'), ((10280, 10308), 'os.path.join', 'path.join', (['"""data"""', 'loop_list'], {}), "('data', loop_list)\n", (10289, 10308), False, 'from os import path\n'), ((10495, 10526), 'fragments.utils.rel_loci_2_obj', 'rel_loci_2_obj', (['loci_rel_chroms'], {}), '(loci_rel_chroms)\n', (10509, 10526), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((10769, 10803), 'fragments.models.ChromInfo.objects.get', 'ChromInfo.objects.get', ([], {'uuid': 'coords'}), '(uuid=coords)\n', (10790, 10803), False, 'from fragments.models import ChromInfo, ChromSizes\n'), ((11244, 11279), 'fragments.models.ChromSizes.objects.get', 'ChromSizes.objects.get', ([], {'uuid': 'coords'}), '(uuid=coords)\n', (11266, 11279), False, 'from fragments.models import ChromInfo, ChromSizes\n'), ((3413, 3434), 'django.http.JsonResponse', 'JsonResponse', (['results'], {}), '(results)\n', (3425, 3434), False, 'from django.http import HttpResponse, JsonResponse\n'), ((4447, 4463), 'operator.itemgetter', 'itemgetter', (['"""id"""'], {}), "('id')\n", (4457, 4463), False, 'from operator import itemgetter\n'), ((5166, 5196), 'os.path.join', 'path.join', (['"""data"""', 'cooler_file'], {}), "('data', cooler_file)\n", (5175, 5196), False, 'from os import path\n'), ((6715, 6736), 'django.http.JsonResponse', 'JsonResponse', (['results'], {}), '(results)\n', (6727, 6736), False, 'from django.http import HttpResponse, JsonResponse\n'), ((6847, 6875), 'os.path.join', 'path.join', (['"""data"""', 'loop_list'], {}), "('data', loop_list)\n", (6856, 6875), False, 'from os import path\n'), ((10846, 10862), 'django.http.JsonResponse', 'JsonResponse', (['{}'], {}), '({})\n', (10858, 10862), False, 'from django.http import HttpResponse, JsonResponse\n'), ((10937, 10949), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10946, 10949), False, 'import json\n'), ((10992, 11008), 'django.http.JsonResponse', 'JsonResponse', (['{}'], {}), '({})\n', (11004, 11008), False, 'from django.http import HttpResponse, JsonResponse\n'), ((11322, 11383), 'django.http.HttpResponse', 'HttpResponse', (["{error: 'ChromSizes for %s not found' % coords}"], {}), "({error: 'ChromSizes for %s not found' % coords})\n", (11334, 11383), False, 'from django.http import HttpResponse, JsonResponse\n'), ((11537, 11607), 'django.http.HttpResponse', 'HttpResponse', (["{error: 'Could not load file %s' % chrom_sizes.datafile}"], {}), "({error: 'Could not load file %s' % chrom_sizes.datafile})\n", (11549, 11607), False, 'from django.http import HttpResponse, JsonResponse\n'), ((2529, 2603), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'Dataset (cooler file) not specified'}"], {'status': '(500)'}), "({'error': 'Dataset (cooler file) not specified'}, status=500)\n", (2541, 2603), False, 'from django.http import HttpResponse, JsonResponse\n'), ((3583, 3712), 'fragments.utils.get_frag_by_loc', 'get_frag_by_loc', (['dataset', 'loci_lists[dataset][zoomout_level]'], {'zoomout_level': 'zoomout_level', 'dim': 'dims', 'balanced': '(not no_balance)'}), '(dataset, loci_lists[dataset][zoomout_level], zoomout_level=\n zoomout_level, dim=dims, balanced=not no_balance)\n', (3598, 3712), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((2071, 2098), 'os.path.join', 'path.join', (['"""data"""', 'locus[6]'], {}), "('data', locus[6])\n", (2080, 2098), False, 'from os import path\n'), ((3896, 3939), 'numpy.around', 'np.around', (['raw_matrices'], {'decimals': 'precision'}), '(raw_matrices, decimals=precision)\n', (3905, 3939), True, 'import numpy as np\n'), ((5258, 5295), 'tilesets.models.Tileset.objects.get', 'Tileset.objects.get', ([], {'uuid': 'cooler_file'}), '(uuid=cooler_file)\n', (5277, 5295), False, 'from tilesets.models import Tileset\n'), ((5363, 5429), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'Cooler file not in database'}"], {'status': '(500)'}), "({'error': 'Cooler file not in database'}, status=500)\n", (5375, 5429), False, 'from django.http import HttpResponse, JsonResponse\n'), ((8172, 8212), 'fragments.utils.calc_measure_dtd', 'calc_measure_dtd', (['matrix', 'loci_struct[i]'], {}), '(matrix, loci_struct[i])\n', (8188, 8212), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((8326, 8367), 'fragments.utils.calc_measure_size', 'calc_measure_size', (['matrix', 'loci_struct[i]'], {}), '(matrix, loci_struct[i])\n', (8343, 8367), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((8461, 8487), 'fragments.utils.calc_measure_noise', 'calc_measure_noise', (['matrix'], {}), '(matrix)\n', (8479, 8487), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((8568, 8598), 'fragments.utils.calc_measure_sharpness', 'calc_measure_sharpness', (['matrix'], {}), '(matrix)\n', (8590, 8598), False, 'from fragments.utils import calc_measure_dtd, calc_measure_size, calc_measure_noise, calc_measure_sharpness, get_frag_by_loc, get_intra_chr_loops_from_looplist, rel_loci_2_obj\n'), ((3175, 3207), 'json.dumps', 'json.dumps', (['loci'], {'sort_keys': '(True)'}), '(loci, sort_keys=True)\n', (3185, 3207), False, 'import json\n'), ((2184, 2218), 'tilesets.models.Tileset.objects.get', 'Tileset.objects.get', ([], {'uuid': 'locus[6]'}), '(uuid=locus[6])\n', (2203, 2218), False, 'from tilesets.models import Tileset\n'), ((2356, 2432), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'Dataset (cooler file) not in database'}"], {'status': '(500)'}), "({'error': 'Dataset (cooler file) not in database'}, status=500)\n", (2368, 2432), False, 'from django.http import HttpResponse, JsonResponse\n')] |
import numpy as np
import cirq
import matplotlib.pyplot as plt
class QftInverse(cirq.Gate):
"""Quantum gate for the inverse Quantum Fourier Transformation
"""
def __init__(self, num_qubits):
super(QftInverse, self)
self._num_qubits = num_qubits
def num_qubits(self):
return self._num_qubits
def _decompose_(self, qubits):
"""A quantum circuit (QFT_inv) with the following structure.
---H--@-------@--------@----------------------------------------------
| | |
------@^-0.5--+--------+---------H--@-------@-------------------------
| | | |
--------------@^-0.25--+------------@^-0.5--+---------H--@------------
| | |
-----------------------@^-0.125-------------@^-0.25------@^-0.5---H---
The number of qubits can be arbitrary.
"""
qubits = list(qubits)
while len(qubits) > 0:
q_head = qubits.pop(0)
yield cirq.H(q_head)
for i, qubit in enumerate(qubits):
yield (cirq.CZ**(-1/2.0**(i+1)))(qubit, q_head)
def __str__(self):
return '-QFT_inv'
"""
class unitary_gate(cirq.Gate):
def __init__(self, num_qubits, total_phase,power):
super(unitary_gate, self)
self._num_qubits = num_qubits
self.total_phase = total_phase
self.power = power
def num_qubits(self):
return self._num_qubits
def _unitary_(self):
#print([excited_phase_converter(i)/(2*np.pi) for i in self.excited_phase])
#raise Exception
U = np.array(np.diag([ np.exp(-1j * 2 * np.pi * phase_converter(i)) for i in self.total_phase]))
#U = np.array(np.diag([np.exp(-1j*2*np.pi*k/4) for k in range(4)]))
return U**(2**self.power)
def matrix(self):
gate = cirq.Gate(matrix = self._unitary_)
return gate
def __str__(self):
return '-U^(2^{})'.format(self.power)
"""
def two_qubit(power,total_phase):
phases = [phase_converter(i) for i in total_phase]
U = np.array(np.diag([np.exp(2*np.pi*1.0j*i) for i in phases]))**(2**power)
gate = cirq.TwoQubitMatrixGate(
matrix = U)
return gate
def one_qubit(power,total_phase):
phases = [phase_converter(i) for i in total_phase]
U = np.array(np.diag([np.exp(2*np.pi*1.0j*i) for i in phases]))**(2**power)
#U = np.array([[0,1],[1,0]])
gate = cirq.SingleQubitMatrixGate(
matrix = U)
return gate
class PEA(cirq.Gate):
def __init__(self, num_qubits, total_phase):
super(PEA, self)
self._num_qubits = num_qubits
self.total_phase = total_phase
def num_qubits(self):
return self._num_qubits
def _decompose_(self, qubits):
yield cirq.H.on_each(*qubits[:-N])
yield cirq.H.on_each(*qubits[-N:]) #Initializing/scrambling Unitary
if N ==1:
for i,q in enumerate(qubits[:-N][::-1]):
yield one_qubit(i,total_phase)(*qubits[-N:]).controlled_by(q)
elif N==2:
for i,q in enumerate(qubits[:-N][::-1]):
yield two_qubit(i,total_phase)(*qubits[-N:]).controlled_by(q)
yield QftInverse(self._num_qubits-N)(*qubits[:-N])
def __str__(self):
return 'PEA'
class Or_gate(cirq.Gate):
def __init__(self, num_qubits, precision):
super(Or_gate, self)
self._num_qubits = num_qubits
self.precision = precision
def num_qubits(self):
return self._num_qubits
def _decompose_(self,qubits):
yield cirq.X.on_each(*qubits)
for i in range(self.precision-1):
if i != self.precision - 2:
yield cirq.TOFFOLI.on(*qubits[2*i:2*(i+1)], qubits[self.precision+i])
yield cirq.X.on(qubits[self.precision+i])
else:
yield cirq.TOFFOLI.on(*qubits[2*i:2*(i+1)], qubits[self.precision+i])
def __str__(self):
return 'OR'
def phase_converter(phase):
decimal = 0
for step,i in enumerate(phase):
decimal += int(i) * (2**-(step + 1))
return decimal
def display_energy_levels(total_phase):
plt.clf()
energy_levels = [phase_converter(i) for i in total_phase]
for i in energy_levels:
plt.axhline(i)
plt.show()
def experiment(t,p,N,total_phase,print_circuit):
PEA_gate = PEA(t+N,total_phase) # create GateOperation with correct n_qubits
n_register_qubits = p*t
n_state_qubits = N
n_or_qubits = (p-1) * (t-1)
register_qubits = [cirq.GridQubit(0, i) for i in range(n_register_qubits)]
or_qubits = [cirq.GridQubit(1,i) for i in range(n_or_qubits)]
state_qubits = [cirq.GridQubit(2, i) for i in range(n_state_qubits)] # use grid to separate register and state qubits
## Build Circuit
circuit = cirq.Circuit()
circuit.append(PEA_gate(*register_qubits[:t], *state_qubits)) # first step of the algorithm. Use first ancillae first
for i in range(1, p):
if t == 1:
circuit.append(PEA_gate(*register_qubits[i*t:(i+1)*t], # Use next batch of ancillae
*state_qubits).controlled_by(register_qubits[i-1]))
else:
circuit.append(Or_gate(2*t -1,t)(*register_qubits[(i-1)*t:i*t], *or_qubits[(i - 1) * (t - 1): i * (t-1)]))
circuit.append(PEA_gate(*register_qubits[i*t:(i+1)*t],
*state_qubits).controlled_by(or_qubits[i * (t-1) -1]))
if print_circuit:
print('Full Circuit:\n',circuit)
first_moment = cirq.Circuit.from_ops(cirq.decompose_once(cirq.decompose_once(cirq.Circuit((circuit[0],)))[0]))
print('First application of PEA:\n', first_moment)
if p > 1:
second_moment = cirq.Circuit.from_ops(cirq.decompose_once(cirq.decompose_once(cirq.Circuit((circuit[2],)))[0]))
print('Second application of PEA:\n', second_moment)
total_qubits = register_qubits + or_qubits + state_qubits
simulator = cirq.Simulator()
result = simulator.simulate(circuit,qubit_order = total_qubits[::-1])
state_vector = result.final_state
prob_of_failure = np.real(np.sum((state_vector * state_vector.conj().T)[2**( (p * t) + (p-1)*(t-1) ):]))
return np.round(prob_of_failure,4)
p = 7 # number of rounds
t = 2 # number of ancillae
total_phase = ['0','01']
N = int( np.log2(len(total_phase))) # number of state qubits
Success = []
for i in range(1,p+1):
Success.append(1-experiment(t,i,N,total_phase,print_circuit= False))
print('{} / {}'.format(i,p))
xint = [int(2*i*t - (t+i-1)) for i in range(1,p+1)]
plt.plot(xint,Success,label = '1 qubit Hamiltonian')
print(Success)
total_phase = ['0','01','10','11']
N = int( np.log2(len(total_phase))) # number of state qubits
Success = []
for i in range(1,p+1):
Success.append(1-experiment(t,i,N,total_phase,print_circuit= False))
print('{} / {}'.format(i,p))
print(Success)
plt.plot(xint,Success,label = '2 qubit Hamiltonian')
plt.xticks(xint)
plt.axhline(1,linestyle = 'dashed',color = 'black')
plt.xlim([t, 2*p*t - (t+p-1)])
plt.xlabel('$Number \ of \ Ancillae$')
plt.ylabel('$Groundstate \ Fidelity, \ \ F(|\\psi_G\\rangle,|\\psi_N\\rangle)$')
plt.legend()
plt.savefig('Proposal_PEA.pdf')
plt.show()
| [
"matplotlib.pyplot.clf",
"numpy.exp",
"numpy.round",
"cirq.TwoQubitMatrixGate",
"cirq.Simulator",
"cirq.X.on_each",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"cirq.GridQubit",
"cirq.H.on_each",
"cirq.H",
"cirq.SingleQubit... | [((6752, 6804), 'matplotlib.pyplot.plot', 'plt.plot', (['xint', 'Success'], {'label': '"""1 qubit Hamiltonian"""'}), "(xint, Success, label='1 qubit Hamiltonian')\n", (6760, 6804), True, 'import matplotlib.pyplot as plt\n'), ((7076, 7128), 'matplotlib.pyplot.plot', 'plt.plot', (['xint', 'Success'], {'label': '"""2 qubit Hamiltonian"""'}), "(xint, Success, label='2 qubit Hamiltonian')\n", (7084, 7128), True, 'import matplotlib.pyplot as plt\n'), ((7129, 7145), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xint'], {}), '(xint)\n', (7139, 7145), True, 'import matplotlib.pyplot as plt\n'), ((7146, 7195), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(1)'], {'linestyle': '"""dashed"""', 'color': '"""black"""'}), "(1, linestyle='dashed', color='black')\n", (7157, 7195), True, 'import matplotlib.pyplot as plt\n'), ((7198, 7236), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[t, 2 * p * t - (t + p - 1)]'], {}), '([t, 2 * p * t - (t + p - 1)])\n', (7206, 7236), True, 'import matplotlib.pyplot as plt\n'), ((7229, 7269), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Number \\\\ of \\\\ Ancillae$"""'], {}), "('$Number \\\\ of \\\\ Ancillae$')\n", (7239, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7268, 7356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Groundstate \\\\ Fidelity, \\\\ \\\\ F(|\\\\psi_G\\\\rangle,|\\\\psi_N\\\\rangle)$"""'], {}), "(\n '$Groundstate \\\\ Fidelity, \\\\ \\\\ F(|\\\\psi_G\\\\rangle,|\\\\psi_N\\\\rangle)$')\n", (7278, 7356), True, 'import matplotlib.pyplot as plt\n'), ((7349, 7361), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7359, 7361), True, 'import matplotlib.pyplot as plt\n'), ((7362, 7393), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Proposal_PEA.pdf"""'], {}), "('Proposal_PEA.pdf')\n", (7373, 7393), True, 'import matplotlib.pyplot as plt\n'), ((7394, 7404), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7402, 7404), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2292), 'cirq.TwoQubitMatrixGate', 'cirq.TwoQubitMatrixGate', ([], {'matrix': 'U'}), '(matrix=U)\n', (2282, 2292), False, 'import cirq\n'), ((2558, 2594), 'cirq.SingleQubitMatrixGate', 'cirq.SingleQubitMatrixGate', ([], {'matrix': 'U'}), '(matrix=U)\n', (2584, 2594), False, 'import cirq\n'), ((4302, 4311), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4309, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4429, 4439), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4437, 4439), True, 'import matplotlib.pyplot as plt\n'), ((4962, 4976), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (4974, 4976), False, 'import cirq\n'), ((6138, 6154), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (6152, 6154), False, 'import cirq\n'), ((6388, 6416), 'numpy.round', 'np.round', (['prob_of_failure', '(4)'], {}), '(prob_of_failure, 4)\n', (6396, 6416), True, 'import numpy as np\n'), ((4410, 4424), 'matplotlib.pyplot.axhline', 'plt.axhline', (['i'], {}), '(i)\n', (4421, 4424), True, 'import matplotlib.pyplot as plt\n'), ((4680, 4700), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', 'i'], {}), '(0, i)\n', (4694, 4700), False, 'import cirq\n'), ((4753, 4773), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', 'i'], {}), '(1, i)\n', (4767, 4773), False, 'import cirq\n'), ((4822, 4842), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', 'i'], {}), '(2, i)\n', (4836, 4842), False, 'import cirq\n'), ((2914, 2942), 'cirq.H.on_each', 'cirq.H.on_each', (['*qubits[:-N]'], {}), '(*qubits[:-N])\n', (2928, 2942), False, 'import cirq\n'), ((2958, 2986), 'cirq.H.on_each', 'cirq.H.on_each', (['*qubits[-N:]'], {}), '(*qubits[-N:])\n', (2972, 2986), False, 'import cirq\n'), ((3711, 3734), 'cirq.X.on_each', 'cirq.X.on_each', (['*qubits'], {}), '(*qubits)\n', (3725, 3734), False, 'import cirq\n'), ((1085, 1099), 'cirq.H', 'cirq.H', (['q_head'], {}), '(q_head)\n', (1091, 1099), False, 'import cirq\n'), ((2190, 2218), 'numpy.exp', 'np.exp', (['(2 * np.pi * 1.0j * i)'], {}), '(2 * np.pi * 1.0j * i)\n', (2196, 2218), True, 'import numpy as np\n'), ((2452, 2480), 'numpy.exp', 'np.exp', (['(2 * np.pi * 1.0j * i)'], {}), '(2 * np.pi * 1.0j * i)\n', (2458, 2480), True, 'import numpy as np\n'), ((3840, 3911), 'cirq.TOFFOLI.on', 'cirq.TOFFOLI.on', (['*qubits[2 * i:2 * (i + 1)]', 'qubits[self.precision + i]'], {}), '(*qubits[2 * i:2 * (i + 1)], qubits[self.precision + i])\n', (3855, 3911), False, 'import cirq\n'), ((3926, 3963), 'cirq.X.on', 'cirq.X.on', (['qubits[self.precision + i]'], {}), '(qubits[self.precision + i])\n', (3935, 3963), False, 'import cirq\n'), ((4002, 4073), 'cirq.TOFFOLI.on', 'cirq.TOFFOLI.on', (['*qubits[2 * i:2 * (i + 1)]', 'qubits[self.precision + i]'], {}), '(*qubits[2 * i:2 * (i + 1)], qubits[self.precision + i])\n', (4017, 4073), False, 'import cirq\n'), ((5757, 5784), 'cirq.Circuit', 'cirq.Circuit', (['(circuit[0],)'], {}), '((circuit[0],))\n', (5769, 5784), False, 'import cirq\n'), ((5958, 5985), 'cirq.Circuit', 'cirq.Circuit', (['(circuit[2],)'], {}), '((circuit[2],))\n', (5970, 5985), False, 'import cirq\n')] |
print(__doc__)
# ref: http://www.agcross.com/blog/2015/02/05/random-forests-in-python-with-scikit-learn/
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.datasets import load_iris
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['is_train'] = np.random.uniform(0, 1, len(df)) <= .75
df['species'] = pd.Categorical.from_codes(iris.target, iris.target_names)
train, test = df[df['is_train']==True], df[df['is_train']==False]
features = df.columns[0:4]
forest = RFC(n_jobs=2, n_estimators=50)
y, _ = pd.factorize(train['species'])
forest.fit(train[features], y)
preds = iris.target_names[forest.predict(test[features])]
print(pd.crosstab(index=test['species'], columns=preds, rownames=['actual'], colnames=['preds']))
importances = forest.feature_importances_
indices = np.argsort(importances)
#花萼长度,花萼宽度,花瓣长度,花瓣宽度
plt.figure(1)
plt.title('Feature Importances Test')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), features[indices])
plt.xlabel('Relative Importance')
plt.show()
| [
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.crosstab",
"pandas.Categorical.from_codes",
"numpy.argsort",
"matplotlib.pyplot.figure",
"pandas.factorize",
"matplotlib.pyplot.xlabel"
] | [((283, 294), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (292, 294), False, 'from sklearn.datasets import load_iris\n'), ((300, 351), 'pandas.DataFrame', 'pd.DataFrame', (['iris.data'], {'columns': 'iris.feature_names'}), '(iris.data, columns=iris.feature_names)\n', (312, 351), True, 'import pandas as pd\n'), ((425, 482), 'pandas.Categorical.from_codes', 'pd.Categorical.from_codes', (['iris.target', 'iris.target_names'], {}), '(iris.target, iris.target_names)\n', (450, 482), True, 'import pandas as pd\n'), ((587, 617), 'sklearn.ensemble.RandomForestClassifier', 'RFC', ([], {'n_jobs': '(2)', 'n_estimators': '(50)'}), '(n_jobs=2, n_estimators=50)\n', (590, 617), True, 'from sklearn.ensemble import RandomForestClassifier as RFC\n'), ((625, 655), 'pandas.factorize', 'pd.factorize', (["train['species']"], {}), "(train['species'])\n", (637, 655), True, 'import pandas as pd\n'), ((897, 920), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (907, 920), True, 'import numpy as np\n'), ((943, 956), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (953, 956), True, 'import matplotlib.pyplot as plt\n'), ((957, 994), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importances Test"""'], {}), "('Feature Importances Test')\n", (966, 994), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Relative Importance"""'], {}), "('Relative Importance')\n", (1135, 1158), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1167, 1169), True, 'import matplotlib.pyplot as plt\n'), ((752, 846), 'pandas.crosstab', 'pd.crosstab', ([], {'index': "test['species']", 'columns': 'preds', 'rownames': "['actual']", 'colnames': "['preds']"}), "(index=test['species'], columns=preds, rownames=['actual'],\n colnames=['preds'])\n", (763, 846), True, 'import pandas as pd\n')] |
from typing import List, Iterable, Callable
import numpy as np
from plotly import graph_objects as go
import plotly.express as px
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn.manifold import TSNE
from sklearn.preprocessing import MinMaxScaler
def visualize_3d_data(x_data: np.ndarray,
y_data: np.ndarray,
categories: List[str],
scatter_alpha: float = 0.5) -> go.Figure:
"""Visualizes 3D data
Args:
x_data: A (num_examples, 3) array.
y_data: A (num_examples, ) array of labels
corresponding to `x_data`. categories: Category
names of the data, ordered by indices
categories: The category names in the dataset
scatter_alpha: Marker alpha value. 0 is solid, 1 is
fully transparent.
Returns:
A plotly figure object
"""
fig = go.Figure()
fig = _add_scatter_dataset3d(fig,
x_data,
y_data,
scatter_alpha=scatter_alpha,
scatter_size=4,
categories=categories)
fig.update_layout(
scene={
'xaxis_title': 'x1',
'yaxis_title': 'x2',
'zaxis_title': 'x3',
'aspectmode': 'data' # Fixed aspect ratio
})
return fig
def visualize_reduced_dataset3d(x_data: np.ndarray, y_data: np.ndarray,
categories: List[str]) -> go.Figure:
"""Visualizes 3D data
Args:
x_data: A (num_examples, n_dims) array, where
`n_dims <= 3`.
y_data: A (num_examples, ) array of labels
corresponding to `x_data`. categories: Category
names of the data, ordered by indices
categories: The category names in the dataset
Returns:
A plotly figure object
"""
fig = go.Figure()
n_dims = x_data.shape[1]
if n_dims < 3:
padding = 3 - n_dims
zeros = np.zeros((len(x_data), padding))
x_data = np.concatenate([x_data, zeros], axis=1)
fig = _add_scatter_dataset3d(fig,
x_data,
y_data,
scatter_alpha=0.5,
scatter_size=4,
categories=categories)
fig.update_layout(
scene={
'xaxis_title': 'x1',
'yaxis_title': 'x2',
'zaxis_title': 'x3',
'aspectmode': 'data' # Fixed aspect ratio
})
return fig
def _add_scatter_dataset3d(fig: go.Figure, x_data, y_data, scatter_alpha: float,
scatter_size: int, categories: List[str]):
"""Adds Dataset2D scatter plot
Args:
fig: A Figure object
x_data: Dataset3D features
y_data: Dataset3D labels
scatter_alpha: Transparency
scatter_size: Scatter size
categories: Category names of the data, ordered by indices
Returns:
A Figure object with scatter
"""
possible_labels = sorted(set(y_data))
if not ((0 <= min(possible_labels)) and
(max(possible_labels) < len(categories))):
raise ValueError(
f'Possible labels are not consistent with the labels {categories}')
if x_data.shape[1] != 3:
raise ValueError('Invalid number of dimensions')
for label in possible_labels:
category_name = categories[label]
selected_indices = (y_data == label)
x_selected = x_data[selected_indices]
fig.add_trace(
go.Scatter3d(x=x_selected[:, 0],
y=x_selected[:, 1],
z=x_selected[:, 2],
mode='markers',
name=category_name,
marker={
'size': scatter_size,
'opacity': 1 - scatter_alpha
}))
return fig
def simple_line_plot(x, y, x_title: str, y_title: str, error=None) -> go.Figure:
"""Generates a simple line plot
Args:
x: x-axis data
y: y-axis data
x_title: x-axis title
y_title: y-axis title
error: Optional error vector. If given, continuous
error bar will be added.
Returns:
A go.Figure object with corresponding x and y data.
"""
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y, line={'color': 'rgb(0,100,80)'}))
if error is not None:
error_np = np.array(error)
x_np = np.array(x)
y_np = np.array(y)
# x, then x reversed
xx = np.concatenate([x_np, x_np[::-1]], axis=0)
# upper then lower
yy = np.concatenate([y_np + error_np, y_np[::-1] - error_np[::-1]],
axis=0)
fig.add_trace(
go.Scatter(x=xx,
y=yy,
fill='toself',
fillcolor='rgba(0,100,80,0.2)',
line=dict(color='rgba(255,255,255,0)'),
hoverinfo="skip",
showlegend=False))
fig.update_layout({'xaxis_title': x_title, 'yaxis_title': y_title})
return fig
class MatplotlibAdapter:
"""Adapter for a Matplotlib figure to plotly go.Figure()
Matplotlib offers low-level operations for advanced
figure manipulations. However, most of the time,
it is faster to work with plotly. This class can be used
with code that expects plotly's `write_image()`.
"""
def __init__(self, fig, ax):
self.fig = fig
self.ax = ax
plt.close()
def write_image(self, path_to_image):
self.fig.savefig(path_to_image)
def visualize_fashion_mnist(x_data: np.ndarray, y_data: np.ndarray,
categories: List[str]):
"""Visualizes Fashion-Mnist dataset
The t-SNE algorithm is used to map a the 784-dimensional
vectors into 2-dimensional data.
Some thumbnails will be displayed, along with the
t-SNE projection.
The Matplotlib visualization is adapted from
https://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
Args:
x_data: Fashion-MNIST feature array
y_data: The `x_data` labels
categories: An ordered list of category names
Returns:
A MatplotlibAdapter object, which has a
`write_image()` method, just like go.Figure.
"""
if len(x_data) != len(y_data):
raise ValueError('x_data and y_data must have the same length.')
if x_data.shape[1] != 784:
raise ValueError('x_data of Fashion-MNIST must be 784-dimensional')
return _tsne_fashion_mnist_visualization(x_data, y_data, x_data, categories)
def get_visualize_reduced_dataset3d_fn(x_original: np.ndarray):
if x_original.shape[1] != 3:
raise ValueError('x_data of Dataset3D must be 784-dimensional')
len_data = len(x_original)
def visualize_reduced_dataset3d_fn(x_data: np.ndarray, y_data: np.ndarray,
categories: List[str]):
if len(x_data) != len_data:
raise ValueError(f'Expecting data of length {len_data}, but got'
f'{len(x_data)}')
# With original space
fig1 = visualize_3d_data(x_original, y_data, categories)
# With reduced space
fig2 = visualize_reduced_dataset3d(x_data, y_data, categories)
return fig1, fig2
return visualize_reduced_dataset3d_fn
def get_visualize_reduced_fashion_mnist_fn(x_original: np.ndarray):
"""Gets a function to visualize reduced Fashion-MNIST
A reduced Fashion-MNIST dataset is Fashion-MNIST after
a dimensionality reduction algorithm.
To visualize it, the t-SNE algorithm is used to map the
reduced data, `x_data` to a 2D plane. Then, the
visualization contains some thumbnails of the original
data.
Args:
x_original: The original features of shape
`(N, n_original_features)`. Note that
`n_original_features = 784`
Returns:
A function which takes `x_data, y_data, categories`
and returns a `go.Figure` object.
"""
if x_original.shape[1] != 784:
raise ValueError('x_data of Fashion-MNIST must be 784-dimensional')
len_data = len(x_original)
def visualize_reduced_fashion_mnist(x_data: np.ndarray, y_data: np.ndarray,
categories: List[str]):
if len(x_data) != len_data:
raise ValueError(f'Expecting data of length {len_data}, but got'
f'{len(x_data)}')
# With original space
fig1 = _tsne_fashion_mnist_visualization(x_original, y_data, x_original,
categories)
# With reduced space
fig2 = _tsne_fashion_mnist_visualization(x_data, y_data, x_original,
categories)
return fig1, fig2
return visualize_reduced_fashion_mnist
def _tsne_fashion_mnist_visualization(x_data: np.ndarray, y_data: np.ndarray,
x_original: np.ndarray,
categories: List[str]):
"""Implementation of Fashion-MNIST t-SNE visualization,
but the interface is not compatible with the
visualization function interface.
"""
tsne = TSNE()
transformed = tsne.fit_transform(x_data)
assert len(transformed.shape) == 2
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
transformed = MinMaxScaler().fit_transform(transformed)
possible_labels = sorted(set(y_data))
if not ((0 <= min(possible_labels)) and
(max(possible_labels) < len(categories))):
raise ValueError(
f'Possible labels are not consistent with the labels {categories}')
original_indices = np.arange(len(x_data))
shown_images = np.array([[1.0, 1.0]]) # initially, just something big
for label in possible_labels:
category_name = categories[label]
selected_indices = original_indices[y_data == label]
x_selected = transformed[selected_indices]
plt.scatter(x_selected[:, 0], x_selected[:, 1], label=category_name)
for i in selected_indices:
# show an annotation box for a group of digits
dist = np.sum((transformed[i] - shown_images)**2, 1)
if np.min(dist) < 6e-3:
# don't show points that are too close
continue
shown_images = np.concatenate([shown_images, [transformed[i]]],
axis=0)
x_disp = x_original[i]
thumbnail = np.reshape(x_disp, (28, 28))
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(thumbnail, cmap=plt.cm.gray_r),
transformed[i])
ax.add_artist(imagebox)
ax.legend()
adapter_figure = MatplotlibAdapter(fig, ax)
return adapter_figure
def feature_importance_chart(feature_importances: np.ndarray):
"""Feature importance bar chart
The bar plot shows the feature importance scores,
sorted from the highest to lowest.
Args:
feature_importances:
Returns:
A go.Figure() object containing the bar chart.
"""
sorted_importances = -np.sort(-feature_importances)
x = np.arange(0, len(feature_importances))
fig = go.Figure()
fig.add_trace(go.Bar(x=x, y=sorted_importances))
return fig
def visualize_dataset3d_vectors(vectors: np.ndarray, x_data: np.ndarray,
y_data: np.ndarray):
_validate_vector_visualization_inputs(vectors, x_data, y_data)
if x_data.shape[1] != 3:
raise ValueError('Expecting 3-dimensional x_data')
# Calculate the length of vectors in figure
scale = np.max(np.max(x_data, axis=0) - np.min(x_data, axis=0))
categories = sorted(set(y_data))
fig = visualize_3d_data(x_data,
y_data, [f'{c}' for c in categories],
scatter_alpha=0.9)
x_mean = x_data.mean(axis=0)
for vector in vectors:
vector = vector / np.linalg.norm(vector) * scale
x_arrow = np.array([0, vector[0]]) + x_mean[0]
y_arrow = np.array([0, vector[1]]) + x_mean[1]
z_arrow = np.array([0, vector[2]]) + x_mean[2]
fig.add_trace(
go.Scatter3d(x=x_arrow,
y=y_arrow,
z=z_arrow,
mode='lines',
line={'width': 5}))
fig.update_layout(
scene={
'xaxis_title': 'x1',
'yaxis_title': 'x2',
'zaxis_title': 'x3',
'aspectmode': 'data' # Fixed aspect ratio
})
return fig
def visualize_fashionmnist_vectors(vectors: np.ndarray, x_data: np.ndarray,
y_data: np.ndarray):
_validate_vector_visualization_inputs(vectors, x_data, y_data)
n_dims = x_data.shape[1]
thumbnail_length = 28
expected_n_dims = thumbnail_length**2
if n_dims != expected_n_dims:
raise ValueError('Wrong number of features. Expecting '
f'{expected_n_dims}, but got {n_dims}')
# Just pick at most 64 vectors
vectors = vectors[:64]
# Rescale all vectors to 0-255
min_vectors = np.min(vectors, axis=1)
max_vectors = np.max(vectors, axis=1)
numerator = vectors - min_vectors.reshape((-1, 1))
denominator = max_vectors - min_vectors
rescaled_vectors = (numerator / denominator.reshape(
(-1, 1)) * 255).astype(np.uint8)
# Use m x m subplot to include the vectors as thumbnails
m = np.ceil(np.sqrt(len(vectors)))
# Gap between images
gap = 10
canvas_size = int((thumbnail_length + gap) * m + gap)
canvas = np.zeros((canvas_size, canvas_size, 3))
canvas[:, :] = [255, 255, 0] # Yellow background
for i, vector in enumerate(rescaled_vectors):
row = int((i // m) * (gap + thumbnail_length) + gap)
col = int(i % m * (gap + thumbnail_length) + gap)
reshaped = np.reshape(vector, (thumbnail_length, thumbnail_length))
stacked = np.stack([reshaped] * 3, axis=-1)
canvas[row:row + thumbnail_length, col:col + thumbnail_length] = stacked
fig = px.imshow(canvas)
fig.update_layout(xaxis_visible=False,
xaxis_showticklabels=False,
yaxis_visible=False,
yaxis_showticklabels=False)
# fig.show()
return fig
def _validate_vector_visualization_inputs(vectors: np.ndarray,
x_data: np.ndarray,
y_data: np.ndarray):
if len(x_data) != len(y_data):
raise ValueError('x_data and y_data must have the same length.')
if len(x_data.shape) != 2:
raise ValueError('x_data must be a 2-dimensional array')
if len(vectors.shape) != 2:
raise ValueError('vectors must be a 2-dimensional array')
if vectors.shape[1] != x_data.shape[1]:
raise ValueError('vectors and x_data must have the same number of '
'features')
| [
"numpy.sum",
"plotly.graph_objects.Scatter3d",
"sklearn.preprocessing.MinMaxScaler",
"numpy.linalg.norm",
"matplotlib.offsetbox.OffsetImage",
"matplotlib.pyplot.close",
"numpy.max",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"numpy.stack",
"plotly.graph_objects.Scatter",
"plotly.express.im... | [((923, 934), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (932, 934), True, 'from plotly import graph_objects as go\n'), ((1983, 1994), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (1992, 1994), True, 'from plotly import graph_objects as go\n'), ((4533, 4544), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (4542, 4544), True, 'from plotly import graph_objects as go\n'), ((9609, 9615), 'sklearn.manifold.TSNE', 'TSNE', ([], {}), '()\n', (9613, 9615), False, 'from sklearn.manifold import TSNE\n'), ((9715, 9729), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9727, 9729), True, 'import matplotlib.pyplot as plt\n'), ((10137, 10159), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (10145, 10159), True, 'import numpy as np\n'), ((11660, 11671), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (11669, 11671), True, 'from plotly import graph_objects as go\n'), ((13629, 13652), 'numpy.min', 'np.min', (['vectors'], {'axis': '(1)'}), '(vectors, axis=1)\n', (13635, 13652), True, 'import numpy as np\n'), ((13671, 13694), 'numpy.max', 'np.max', (['vectors'], {'axis': '(1)'}), '(vectors, axis=1)\n', (13677, 13694), True, 'import numpy as np\n'), ((14104, 14143), 'numpy.zeros', 'np.zeros', (['(canvas_size, canvas_size, 3)'], {}), '((canvas_size, canvas_size, 3))\n', (14112, 14143), True, 'import numpy as np\n'), ((14590, 14607), 'plotly.express.imshow', 'px.imshow', (['canvas'], {}), '(canvas)\n', (14599, 14607), True, 'import plotly.express as px\n'), ((2139, 2178), 'numpy.concatenate', 'np.concatenate', (['[x_data, zeros]'], {'axis': '(1)'}), '([x_data, zeros], axis=1)\n', (2153, 2178), True, 'import numpy as np\n'), ((4563, 4616), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y', 'line': "{'color': 'rgb(0,100,80)'}"}), "(x=x, y=y, line={'color': 'rgb(0,100,80)'})\n", (4573, 4616), True, 'from plotly import graph_objects as go\n'), ((4664, 4679), 'numpy.array', 'np.array', (['error'], {}), '(error)\n', (4672, 4679), True, 'import numpy as np\n'), ((4695, 4706), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4703, 4706), True, 'import numpy as np\n'), ((4722, 4733), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4730, 4733), True, 'import numpy as np\n'), ((4777, 4819), 'numpy.concatenate', 'np.concatenate', (['[x_np, x_np[::-1]]'], {'axis': '(0)'}), '([x_np, x_np[::-1]], axis=0)\n', (4791, 4819), True, 'import numpy as np\n'), ((4861, 4931), 'numpy.concatenate', 'np.concatenate', (['[y_np + error_np, y_np[::-1] - error_np[::-1]]'], {'axis': '(0)'}), '([y_np + error_np, y_np[::-1] - error_np[::-1]], axis=0)\n', (4875, 4931), True, 'import numpy as np\n'), ((5776, 5787), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5785, 5787), True, 'import matplotlib.pyplot as plt\n'), ((10392, 10460), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_selected[:, 0]', 'x_selected[:, 1]'], {'label': 'category_name'}), '(x_selected[:, 0], x_selected[:, 1], label=category_name)\n', (10403, 10460), True, 'import matplotlib.pyplot as plt\n'), ((11572, 11601), 'numpy.sort', 'np.sort', (['(-feature_importances)'], {}), '(-feature_importances)\n', (11579, 11601), True, 'import numpy as np\n'), ((11690, 11723), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'x', 'y': 'sorted_importances'}), '(x=x, y=sorted_importances)\n', (11696, 11723), True, 'from plotly import graph_objects as go\n'), ((14388, 14444), 'numpy.reshape', 'np.reshape', (['vector', '(thumbnail_length, thumbnail_length)'], {}), '(vector, (thumbnail_length, thumbnail_length))\n', (14398, 14444), True, 'import numpy as np\n'), ((14463, 14496), 'numpy.stack', 'np.stack', (['([reshaped] * 3)'], {'axis': '(-1)'}), '([reshaped] * 3, axis=-1)\n', (14471, 14496), True, 'import numpy as np\n'), ((3712, 3889), 'plotly.graph_objects.Scatter3d', 'go.Scatter3d', ([], {'x': 'x_selected[:, 0]', 'y': 'x_selected[:, 1]', 'z': 'x_selected[:, 2]', 'mode': '"""markers"""', 'name': 'category_name', 'marker': "{'size': scatter_size, 'opacity': 1 - scatter_alpha}"}), "(x=x_selected[:, 0], y=x_selected[:, 1], z=x_selected[:, 2],\n mode='markers', name=category_name, marker={'size': scatter_size,\n 'opacity': 1 - scatter_alpha})\n", (3724, 3889), True, 'from plotly import graph_objects as go\n'), ((9780, 9794), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (9792, 9794), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((10575, 10622), 'numpy.sum', 'np.sum', (['((transformed[i] - shown_images) ** 2)', '(1)'], {}), '((transformed[i] - shown_images) ** 2, 1)\n', (10581, 10622), True, 'import numpy as np\n'), ((10764, 10820), 'numpy.concatenate', 'np.concatenate', (['[shown_images, [transformed[i]]]'], {'axis': '(0)'}), '([shown_images, [transformed[i]]], axis=0)\n', (10778, 10820), True, 'import numpy as np\n'), ((10923, 10951), 'numpy.reshape', 'np.reshape', (['x_disp', '(28, 28)'], {}), '(x_disp, (28, 28))\n', (10933, 10951), True, 'import numpy as np\n'), ((12093, 12115), 'numpy.max', 'np.max', (['x_data'], {'axis': '(0)'}), '(x_data, axis=0)\n', (12099, 12115), True, 'import numpy as np\n'), ((12118, 12140), 'numpy.min', 'np.min', (['x_data'], {'axis': '(0)'}), '(x_data, axis=0)\n', (12124, 12140), True, 'import numpy as np\n'), ((12466, 12490), 'numpy.array', 'np.array', (['[0, vector[0]]'], {}), '([0, vector[0]])\n', (12474, 12490), True, 'import numpy as np\n'), ((12521, 12545), 'numpy.array', 'np.array', (['[0, vector[1]]'], {}), '([0, vector[1]])\n', (12529, 12545), True, 'import numpy as np\n'), ((12576, 12600), 'numpy.array', 'np.array', (['[0, vector[2]]'], {}), '([0, vector[2]])\n', (12584, 12600), True, 'import numpy as np\n'), ((12648, 12726), 'plotly.graph_objects.Scatter3d', 'go.Scatter3d', ([], {'x': 'x_arrow', 'y': 'y_arrow', 'z': 'z_arrow', 'mode': '"""lines"""', 'line': "{'width': 5}"}), "(x=x_arrow, y=y_arrow, z=z_arrow, mode='lines', line={'width': 5})\n", (12660, 12726), True, 'from plotly import graph_objects as go\n'), ((10636, 10648), 'numpy.min', 'np.min', (['dist'], {}), '(dist)\n', (10642, 10648), True, 'import numpy as np\n'), ((11018, 11070), 'matplotlib.offsetbox.OffsetImage', 'offsetbox.OffsetImage', (['thumbnail'], {'cmap': 'plt.cm.gray_r'}), '(thumbnail, cmap=plt.cm.gray_r)\n', (11039, 11070), False, 'from matplotlib import offsetbox\n'), ((12417, 12439), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (12431, 12439), True, 'import numpy as np\n')] |
import os
import sys
import sys,os
target_prefix = sys.prefix
for i in range(len(sys.argv)):
a = sys.argv[i]
if a=='--prefix':
target_prefix=sys.argv[i+1]
sp = a.split("--prefix=")
if len(sp)==2:
target_prefix=sp[1]
sys.path.insert(0,os.path.join(target_prefix,'lib','python%i.%i' % sys.version_info[:2],'site-packages'))
from getopt import gnu_getopt
# Gather up all the files we need.
files = ['Src/compall.pyf','Src/compall_lib.F',]
## scypy_distutils Script
from numpy.distutils.core import setup, Extension
# Some useful directories.
## from distutils.sysconfig import get_python_inc, get_python_lib
## python_incdir = os.path.join( get_python_inc(plat_specific=1) )
## python_libdir = os.path.join( get_python_lib(plat_specific=1) )
extra_link_args=[]
if sys.platform=='darwin':
extra_link_args = ['-bundle','-bundle_loader '+sys.prefix+'/bin/python']
## setup the python module
setup(name="ComparisonStatistics",
version='1.1',
description="Karl Taylor's compall code",
author="Fortran: K.E.Taylor, Python: C. Doutriaux",
author_email="<EMAIL> , <EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
## Build fortran wrappers, uses f2py
## directories to search for libraries defined in setup.cfg
ext_modules = [Extension('ComparisonStatistics.compall',
files,
# libraries=["ioapi", "netcdf"],
# library_dirs=libDirs,
include_dirs=['Src'],
# extra_link_args=extra_link_args,
),
],
license="GNU GPL",
## Install these to their own directory
package_dir={'ComparisonStatistics':'Lib'},
packages = ["ComparisonStatistics"],
)
| [
"os.path.join",
"numpy.distutils.core.Extension"
] | [((266, 359), 'os.path.join', 'os.path.join', (['target_prefix', '"""lib"""', "('python%i.%i' % sys.version_info[:2])", '"""site-packages"""'], {}), "(target_prefix, 'lib', 'python%i.%i' % sys.version_info[:2],\n 'site-packages')\n", (278, 359), False, 'import sys, os\n'), ((1326, 1396), 'numpy.distutils.core.Extension', 'Extension', (['"""ComparisonStatistics.compall"""', 'files'], {'include_dirs': "['Src']"}), "('ComparisonStatistics.compall', files, include_dirs=['Src'])\n", (1335, 1396), False, 'from numpy.distutils.core import setup, Extension\n')] |
"""
This script computes the persistence landscape of the Cantor triangle.
authors: <NAME>, <NAME>
"""
import numpy as np
from ripser import ripser
from persim.landscapes import PersLandscapeApprox
from persim.landscapes import plot_landscape
import matplotlib.pyplot as plt
from persim import plot_diagrams
#%% Initialize the Cantor triangle example
def phi0(x):
return x / 3
def phi20(x):
return (x + np.array([2, 0])) / 3
def phi02(x):
return (x + np.array([0, 2])) / 3
SArray = np.array([[0, 0], [1, 0], [0, 1]])
#%% Chose a scale to compute the ifs up to
# Keep the scale less than 30
scale = 4
for _ in range(scale):
out0 = phi0(SArray)
out20 = phi20(SArray)
out02 = phi02(SArray)
SArray = np.concatenate((out0, out20, out02), axis=0)
#%% Plot at scale
plt.figure(figsize=(5, 5))
# Color the shape
x = SArray[:, 0]
y = SArray[:, 1]
plt.scatter(SArray[:, 0], SArray[:, 1], linewidth=0.1, Color="blue", zorder=2)
plt.title("$S_3$", size=20)
#%% Compute the persistence
Dgmprox = ripser(SArray, maxdim=1)["dgms"]
plot_diagrams(Dgmprox, show=True, lifetime=False)
print(Dgmprox)
#%% Compute the landscape
Landprox = PersLandscapeApprox(dgms=Dgmprox, hom_deg=0)
ttl = f"PL of Cantor triangle at scale {scale}"
plot_landscape(landscape=Landprox, labels=["", "", ""], title=ttl)
| [
"matplotlib.pyplot.title",
"ripser.ripser",
"persim.landscapes.plot_landscape",
"persim.landscapes.PersLandscapeApprox",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.array",
"persim.plot_diagrams",
"numpy.concatenate"
] | [((505, 539), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [0, 1]]'], {}), '([[0, 0], [1, 0], [0, 1]])\n', (513, 539), True, 'import numpy as np\n'), ((801, 827), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (811, 827), True, 'import matplotlib.pyplot as plt\n'), ((880, 958), 'matplotlib.pyplot.scatter', 'plt.scatter', (['SArray[:, 0]', 'SArray[:, 1]'], {'linewidth': '(0.1)', 'Color': '"""blue"""', 'zorder': '(2)'}), "(SArray[:, 0], SArray[:, 1], linewidth=0.1, Color='blue', zorder=2)\n", (891, 958), True, 'import matplotlib.pyplot as plt\n'), ((959, 986), 'matplotlib.pyplot.title', 'plt.title', (['"""$S_3$"""'], {'size': '(20)'}), "('$S_3$', size=20)\n", (968, 986), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1107), 'persim.plot_diagrams', 'plot_diagrams', (['Dgmprox'], {'show': '(True)', 'lifetime': '(False)'}), '(Dgmprox, show=True, lifetime=False)\n', (1071, 1107), False, 'from persim import plot_diagrams\n'), ((1160, 1204), 'persim.landscapes.PersLandscapeApprox', 'PersLandscapeApprox', ([], {'dgms': 'Dgmprox', 'hom_deg': '(0)'}), '(dgms=Dgmprox, hom_deg=0)\n', (1179, 1204), False, 'from persim.landscapes import PersLandscapeApprox\n'), ((1253, 1319), 'persim.landscapes.plot_landscape', 'plot_landscape', ([], {'landscape': 'Landprox', 'labels': "['', '', '']", 'title': 'ttl'}), "(landscape=Landprox, labels=['', '', ''], title=ttl)\n", (1267, 1319), False, 'from persim.landscapes import plot_landscape\n'), ((737, 781), 'numpy.concatenate', 'np.concatenate', (['(out0, out20, out02)'], {'axis': '(0)'}), '((out0, out20, out02), axis=0)\n', (751, 781), True, 'import numpy as np\n'), ((1025, 1049), 'ripser.ripser', 'ripser', (['SArray'], {'maxdim': '(1)'}), '(SArray, maxdim=1)\n', (1031, 1049), False, 'from ripser import ripser\n'), ((418, 434), 'numpy.array', 'np.array', (['[2, 0]'], {}), '([2, 0])\n', (426, 434), True, 'import numpy as np\n'), ((472, 488), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (480, 488), True, 'import numpy as np\n')] |
#!/usr/local/bin/python3
"""
This module has the utilities to normalize the reads by a threshold T (the smallest number of total
reads among all the samples). To normalize, we get the threshold and then we randomly pick T reads
from each sample.
NOTE:
- column -> sample
- row -> family
- cell -> number of reads of a certain family in a sample
"""
import argparse
import os
import numpy
import pandas
from tqdm import tqdm
import dataloader
def get_reads_threshold(dataframe: pandas.DataFrame) -> (int, str):
"""
This function gets the reads-threshold from the dataframe, which means taking the minimum number
of the total reads per sample (columns).
It returns a tuple with the threshold (int) and the sample name (str)
"""
# Calculate the total reads for each sample
total_reads = {}
for sample in dataframe.columns[1:]:
total_reads[sample] = dataframe[sample].sum()
# Select the minimum number of total reads
threshold = min([val for val in total_reads.values()])
for sample, total in total_reads.items():
if total == threshold:
break
return threshold, sample
def get_occurrences(non_zero_rows_values, non_zero_indexes, threshold):
"""Dictionary with:
key: row index
value: occurrences
"""
# print(non_zero_indexes)
# print(non_zero_rows_values)
index_list = numpy.array(non_zero_indexes)
index_list = numpy.repeat(non_zero_indexes, non_zero_rows_values)
numpy.random.shuffle(index_list)
sampled_index_list = index_list[:threshold]
occurrences = {}
total_reads = 0
for index in non_zero_indexes:
occurrence = numpy.count_nonzero(sampled_index_list==index)
occurrences[index] = occurrence
total_reads += occurrence
# print(f'total_reads: {total_reads}')
return occurrences
def normalize_data(dataframe: pandas.DataFrame, threshold: int) -> pandas.DataFrame:
"""
To normalize, we get the threshold T and then we randomly pick T reads from each sample.
"""
# Initialize the selected dataframe to zeros
selected = pandas.DataFrame().reindex_like(dataframe).fillna(0).astype(int)
selected['ID'] = dataframe['ID']
# For each sample, select T reads if not empty
for sample_name in tqdm(dataframe.columns[1:], unit=' samples'):
# Get occurrences of each family
sample_column = dataframe[sample_name]
non_zero_families_reads = sample_column.loc[sample_column != 0].tolist()
non_zero_families_indexes = dataframe.index[sample_column != 0].tolist()
occurrences = get_occurrences(non_zero_families_reads, non_zero_families_indexes, threshold)
# Replace the zeros with the sampled reads
for family_index, num_reads in occurrences.items():
selected.loc[family_index, sample_name] = num_reads
return selected
def normalize():
"""
Entrypoint fuctionto normalize the reads by a threshold T (the smallest number of total
reads among all the samples). To normalize, we get the threshold and then we randomly pick T reads
from each sample.
"""
parser = argparse.ArgumentParser(description=normalize.__doc__)
parser.add_argument('filepath', metavar='CSV_FILE', type=str,
help='path to a csv file representing a gene reads table to normalize')
parser.add_argument('--seed', type=int, default=0,
help='the seed used to initialize the random generator for the sampling '
'operation. Defaults to 0')
args = parser.parse_args()
# Load CSV file
csv_ext = os.extsep + 'csv'
filepath = args.filepath if args.filepath.lower().endswith(csv_ext) else args.filepath + csv_ext
dataframe = dataloader.load_csv(filepath)
# Select threshold for normalization
threshold, min_sample = get_reads_threshold(dataframe)
print(f'Threshold selected {threshold} which is {min_sample}')
# Set seed of NumPy random number generator to be reproducible
numpy.random.seed(args.seed)
# Normalize
normalized_dataframe = normalize_data(dataframe, threshold)
# Save result in the same folder, but with 'normalized_' prefix
result_path = os.path.join(os.path.dirname(filepath), 'normalized_' + os.path.basename(filepath))
dataloader.to_csv(normalized_dataframe, result_path)
if __name__ == u'__main__':
normalize()
| [
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.random.seed",
"argparse.ArgumentParser",
"dataloader.to_csv",
"numpy.count_nonzero",
"os.path.basename",
"os.path.dirname",
"dataloader.load_csv",
"numpy.array",
"numpy.random.shuffle",
"numpy.repeat"
] | [((1378, 1407), 'numpy.array', 'numpy.array', (['non_zero_indexes'], {}), '(non_zero_indexes)\n', (1389, 1407), False, 'import numpy\n'), ((1425, 1477), 'numpy.repeat', 'numpy.repeat', (['non_zero_indexes', 'non_zero_rows_values'], {}), '(non_zero_indexes, non_zero_rows_values)\n', (1437, 1477), False, 'import numpy\n'), ((1482, 1514), 'numpy.random.shuffle', 'numpy.random.shuffle', (['index_list'], {}), '(index_list)\n', (1502, 1514), False, 'import numpy\n'), ((2286, 2330), 'tqdm.tqdm', 'tqdm', (['dataframe.columns[1:]'], {'unit': '""" samples"""'}), "(dataframe.columns[1:], unit=' samples')\n", (2290, 2330), False, 'from tqdm import tqdm\n'), ((3144, 3198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'normalize.__doc__'}), '(description=normalize.__doc__)\n', (3167, 3198), False, 'import argparse\n'), ((3773, 3802), 'dataloader.load_csv', 'dataloader.load_csv', (['filepath'], {}), '(filepath)\n', (3792, 3802), False, 'import dataloader\n'), ((4043, 4071), 'numpy.random.seed', 'numpy.random.seed', (['args.seed'], {}), '(args.seed)\n', (4060, 4071), False, 'import numpy\n'), ((4332, 4384), 'dataloader.to_csv', 'dataloader.to_csv', (['normalized_dataframe', 'result_path'], {}), '(normalized_dataframe, result_path)\n', (4349, 4384), False, 'import dataloader\n'), ((1661, 1709), 'numpy.count_nonzero', 'numpy.count_nonzero', (['(sampled_index_list == index)'], {}), '(sampled_index_list == index)\n', (1680, 1709), False, 'import numpy\n'), ((4257, 4282), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (4272, 4282), False, 'import os\n'), ((4300, 4326), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (4316, 4326), False, 'import os\n'), ((2109, 2127), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (2125, 2127), False, 'import pandas\n')] |
import numpy as np
from matplotlib import pyplot as plt
ts = np.load("data/33cd9a03-acf3-4b36-8ebc-6937f066196d/output.npy")
x = ts[:,0]
y = ts[:,1]
plt.plot(x,y)
plt.show()
| [
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
] | [((62, 125), 'numpy.load', 'np.load', (['"""data/33cd9a03-acf3-4b36-8ebc-6937f066196d/output.npy"""'], {}), "('data/33cd9a03-acf3-4b36-8ebc-6937f066196d/output.npy')\n", (69, 125), True, 'import numpy as np\n'), ((152, 166), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (160, 166), True, 'from matplotlib import pyplot as plt\n'), ((166, 176), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (174, 176), True, 'from matplotlib import pyplot as plt\n')] |
# python align.py --camera=nikon --focal_length=21
import argparse
import glob
import tqdm
import os
import os.path as op
import json
import utils
from dataset import Dataset, SUPPORTED_ANNO_EXT
import rawpy
import numpy as np
import cv2
from skimage.exposure import match_histograms, equalize_hist
import skimage.filters
import datetime
import draw_anno
MIN_MATCH_COUNT = 10
# XXX scaling
# processed_raw_img = skimage.filters.median(raw_img, np.ones([15,15]))
# processed_raw_img = equalize_hist(processed_raw_img, nbins=2**16)
# raw_img = _match_cumulative_cdf(processed_raw_img, jpeg_img, raw_img)
# raw_img = processed_raw_img
def log_msg(msg):
print(msg)
with open('align.log', 'a') as f:
f.write(msg+"\n")
return True
class Transforms:
def __init__(self):
self.transforms = []
self.descriptions = []
self.jpeg_transforms = []
self.raw_transforms = []
self.jpeg_descriptions = []
self.raw_descriptions = []
def log(self, mtx, img, desc=None):
trnsfm = mtx.copy()
assert trnsfm.shape == (2,2) or trnsfm.shape == (3,3)
if trnsfm.shape == (2,2):
trnsfm = np.eye(3)
trnsfm[0:2,0:2] = mtx
if not desc:
desc = ""
else:
desc = "_" + desc
desc = img + desc
if img == "jpeg":
self.jpeg_transforms.append(trnsfm)
self.jpeg_descriptions.append(desc)
elif img == "raw":
self.raw_transforms.append(trnsfm)
self.raw_descriptions.append(desc)
else:
raise NotImplementedError
self._update()
def _update(self):
self.transforms = self._raw_operator(self.raw_transforms) + \
self._jpeg_operator(self.jpeg_transforms)
self.descriptions = self._raw_operator(self.raw_descriptions) + \
self._jpeg_operator(self.jpeg_descriptions)
def compose(self):
t = np.eye(3)
for m in self.transforms:
t = m @ t
return t
def _is_l_of_a(self, l):
for e in l:
if not isinstance(e, np.ndarray): return False
return True
def _jpeg_operator(self, l):
return l.copy()[::-1]
def _raw_operator(self, l):
if self._is_l_of_a(l):
return [np.linalg.inv(a) for a in l]
else:
return l.copy()
def print(self):
for idx, (trnsfm, desc) in enumerate(zip(self.transforms, self.descriptions)):
print(f"{idx}. {desc}: ")
print(f"{trnsfm}")
class Transform_Fnc:
def __init__(self, mtx, floor=True) -> None:
assert mtx.shape == (3,3)
self.mtx = mtx
self.floor = floor
def apply(self, x, y) -> list:
_result = self.mtx @ np.array([x, y, 1])
if self.floor:
_result = _result.astype(int)
return (_result[0], _result[1])
SUPPORTED_RAW_PATTERNS = [b'RGBG']
def image_diff(img1, img2, postprocessing_fncs=None, postprocessing_args=None):
diff = img1 - img2
if not postprocessing_fncs:
return diff
for fnc, args in zip(postprocessing_fncs, postprocessing_args):
diff = fnc(diff, **args)
return diff
def read_avg_green_raw(filepath):
_img = rawpy.imread(filepath)
assert _img.color_desc in SUPPORTED_RAW_PATTERNS
img = _img.raw_image.copy()
img = np.expand_dims(img,axis=2)
black_level = _img.black_level_per_channel[0] # assume all black level is the same
img = (img - black_level)/2**16
H = img.shape[0]
W = img.shape[1]
packed_img = np.concatenate((img[0:H:2, 0:W:2, :], # R
img[0:H:2, 1:W:2, :], # GR
img[1:H:2, 0:W:2, :], # GB
img[1:H:2, 1:W:2, :]), axis=2) # B
greens = (packed_img[:, :, 1]+packed_img[:, :, 2])/2 # RGGB
return greens
def read_rawpy_rgb(filepath):
_img = rawpy.imread(filepath)
bgr = _img.postprocess()
return bgr[:, :, ::-1]
def read_rawpy_grayscale(filepath):
_img = rawpy.imread(filepath)
rgb = _img.postprocess()
return cv2.cvtColor(rgb[:, :, ::-1], cv2.COLOR_BGR2GRAY)
def read_avg_colors_raw(filepath, gamma=True, rgb_weights=[.2126, .7152, .0722]):
# https://stackoverflow.com/questions/687261/converting-rgb-to-grayscale-intensity
_img = rawpy.imread(filepath)
assert _img.color_desc in SUPPORTED_RAW_PATTERNS
img = _img.raw_image.copy()
img = np.expand_dims(img,axis=2)
black_level = _img.black_level_per_channel[0] # assume all black level is the same
img = (img - black_level)/2**16
H = img.shape[0]
W = img.shape[1]
Rw, Gw, Bw = rgb_weights
Gw/=2
packed_img = np.concatenate((img[0:H:2, 0:W:2, :], # R
img[0:H:2, 1:W:2, :], # GR
img[1:H:2, 0:W:2, :], # GB
img[1:H:2, 1:W:2, :]), axis=2) # B
r, gr, gb, b = packed_img[:, :, 0], packed_img[:, :, 1], packed_img[:, :, 2], packed_img[:, :, 3]
gamma = 1/2.2
grayscale = Rw * r**gamma + Gw * gr**gamma + Gw * gb**gamma + Bw * b**gamma
return grayscale
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--camera', type=str, required=True)
parser.add_argument('--focal_length', type=int, required=True)
args = parser.parse_args()
log_msg(datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S"))
print(f"{args.camera}")
camera_config = utils.read_json(op.join(utils.CONFIGS_DIR, args.camera+".json"))
dataset = Dataset(camera_config)
print("Original stats:")
dataset.show_stats()
dataset.filter_by_focal_length(args.focal_length)
print(f"Stats for focal length {args.focal_length}:")
dataset.show_stats()
# XXX
transforms = Transforms()
detector = cv2.SIFT_create()
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
for sample in tqdm.tqdm(dataset.samples[6::7][-1:]):
''' 1. Prepare images. '''
# raw_img = np.array(read_avg_green_raw(sample.raw_filepath)*2**16, dtype=np.uint16)
''' 1.1. Rawpy+grayscale. '''
raw_img = read_rawpy_grayscale(sample.raw_filepath)
jpeg_img = cv2.imread(sample.jpeg_filepath, 0)
cv2.imwrite("steps/1_1_jpeg.jpg", jpeg_img)
cv2.imwrite("steps/1_1_raw.jpg", raw_img)
''' 1.2. Equalize and match histograms. '''
jpeg_img = equalize_hist(jpeg_img)
raw_img = match_histograms(raw_img, jpeg_img)
cv2.imwrite("steps/1_2_jpeg.jpg", jpeg_img*255)
cv2.imwrite("steps/1_2_raw.jpg", raw_img*255)
''' 1.3. Downsample each by 4'''
jpeg_scale = 1/2
raw_scale = 1/2
jpeg_img = cv2.resize(jpeg_img, None, fx=jpeg_scale, fy=jpeg_scale)
raw_img = cv2.resize(raw_img, None, fx=raw_scale, fy=raw_scale)
transforms.log(jpeg_scale*np.eye(2), img="jpeg")
transforms.log(raw_scale*np.eye(2), img="raw")
''' 1.-1. np.UINT8'''
if jpeg_img.max() < 2:
jpeg_img *= 255
if raw_img.max() < 2:
raw_img *= 255
jpeg_img = jpeg_img.astype(np.uint8)
raw_img = raw_img.astype(np.uint8)
''' 2. Estimate homography'''
img1 = jpeg_img
img2 = raw_img
kp1, des1 = detector.detectAndCompute(img1,None)
kp2, des2 = detector.detectAndCompute(img2,None)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
else:
log_msg("[WARNING] {} ignored, (not enough good matches)")
M, mask = None, None
transforms.log(M, img="jpeg")
transforms.print()
mapping = Transform_Fnc(transforms.compose())
rgb_raw = read_rawpy_rgb(sample.raw_filepath)
result = draw_anno.annotate(rgb_raw, dataset.get_anno()[sample.file_name], transform_fnc=mapping.apply, draw_org=True)
cv2.imwrite("result.jpg", result)
# print(raw_img.shape)
# print(jpeg_img.shape)
cv2.imwrite("raw.jpg", raw_img)
cv2.imwrite("jpeg.jpg", jpeg_img)
| [
"argparse.ArgumentParser",
"cv2.SIFT_create",
"os.path.join",
"dataset.Dataset",
"cv2.cvtColor",
"cv2.imwrite",
"rawpy.imread",
"datetime.datetime.now",
"cv2.resize",
"tqdm.tqdm",
"skimage.exposure.match_histograms",
"skimage.exposure.equalize_hist",
"cv2.FlannBasedMatcher",
"numpy.linalg.... | [((3312, 3334), 'rawpy.imread', 'rawpy.imread', (['filepath'], {}), '(filepath)\n', (3324, 3334), False, 'import rawpy\n'), ((3430, 3457), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (3444, 3457), True, 'import numpy as np\n'), ((3639, 3756), 'numpy.concatenate', 'np.concatenate', (['(img[0:H:2, 0:W:2, :], img[0:H:2, 1:W:2, :], img[1:H:2, 0:W:2, :], img[1:H:\n 2, 1:W:2, :])'], {'axis': '(2)'}), '((img[0:H:2, 0:W:2, :], img[0:H:2, 1:W:2, :], img[1:H:2, 0:W:\n 2, :], img[1:H:2, 1:W:2, :]), axis=2)\n', (3653, 3756), True, 'import numpy as np\n'), ((4012, 4034), 'rawpy.imread', 'rawpy.imread', (['filepath'], {}), '(filepath)\n', (4024, 4034), False, 'import rawpy\n'), ((4139, 4161), 'rawpy.imread', 'rawpy.imread', (['filepath'], {}), '(filepath)\n', (4151, 4161), False, 'import rawpy\n'), ((4202, 4251), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb[:, :, ::-1]', 'cv2.COLOR_BGR2GRAY'], {}), '(rgb[:, :, ::-1], cv2.COLOR_BGR2GRAY)\n', (4214, 4251), False, 'import cv2\n'), ((4433, 4455), 'rawpy.imread', 'rawpy.imread', (['filepath'], {}), '(filepath)\n', (4445, 4455), False, 'import rawpy\n'), ((4551, 4578), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (4565, 4578), True, 'import numpy as np\n'), ((4799, 4916), 'numpy.concatenate', 'np.concatenate', (['(img[0:H:2, 0:W:2, :], img[0:H:2, 1:W:2, :], img[1:H:2, 0:W:2, :], img[1:H:\n 2, 1:W:2, :])'], {'axis': '(2)'}), '((img[0:H:2, 0:W:2, :], img[0:H:2, 1:W:2, :], img[1:H:2, 0:W:\n 2, :], img[1:H:2, 1:W:2, :]), axis=2)\n', (4813, 4916), True, 'import numpy as np\n'), ((5313, 5352), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (5336, 5352), False, 'import argparse\n'), ((5707, 5729), 'dataset.Dataset', 'Dataset', (['camera_config'], {}), '(camera_config)\n', (5714, 5729), False, 'from dataset import Dataset, SUPPORTED_ANNO_EXT\n'), ((5976, 5993), 'cv2.SIFT_create', 'cv2.SIFT_create', ([], {}), '()\n', (5991, 5993), False, 'import cv2\n'), ((6138, 6188), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (6159, 6188), False, 'import cv2\n'), ((6207, 6244), 'tqdm.tqdm', 'tqdm.tqdm', (['dataset.samples[6::7][-1:]'], {}), '(dataset.samples[6::7][-1:])\n', (6216, 6244), False, 'import tqdm\n'), ((8533, 8566), 'cv2.imwrite', 'cv2.imwrite', (['"""result.jpg"""', 'result'], {}), "('result.jpg', result)\n", (8544, 8566), False, 'import cv2\n'), ((8626, 8657), 'cv2.imwrite', 'cv2.imwrite', (['"""raw.jpg"""', 'raw_img'], {}), "('raw.jpg', raw_img)\n", (8637, 8657), False, 'import cv2\n'), ((8662, 8695), 'cv2.imwrite', 'cv2.imwrite', (['"""jpeg.jpg"""', 'jpeg_img'], {}), "('jpeg.jpg', jpeg_img)\n", (8673, 8695), False, 'import cv2\n'), ((1988, 1997), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1994, 1997), True, 'import numpy as np\n'), ((5644, 5693), 'os.path.join', 'op.join', (['utils.CONFIGS_DIR', "(args.camera + '.json')"], {}), "(utils.CONFIGS_DIR, args.camera + '.json')\n", (5651, 5693), True, 'import os.path as op\n'), ((6491, 6526), 'cv2.imread', 'cv2.imread', (['sample.jpeg_filepath', '(0)'], {}), '(sample.jpeg_filepath, 0)\n', (6501, 6526), False, 'import cv2\n'), ((6535, 6578), 'cv2.imwrite', 'cv2.imwrite', (['"""steps/1_1_jpeg.jpg"""', 'jpeg_img'], {}), "('steps/1_1_jpeg.jpg', jpeg_img)\n", (6546, 6578), False, 'import cv2\n'), ((6587, 6628), 'cv2.imwrite', 'cv2.imwrite', (['"""steps/1_1_raw.jpg"""', 'raw_img'], {}), "('steps/1_1_raw.jpg', raw_img)\n", (6598, 6628), False, 'import cv2\n'), ((6700, 6723), 'skimage.exposure.equalize_hist', 'equalize_hist', (['jpeg_img'], {}), '(jpeg_img)\n', (6713, 6723), False, 'from skimage.exposure import match_histograms, equalize_hist\n'), ((6742, 6777), 'skimage.exposure.match_histograms', 'match_histograms', (['raw_img', 'jpeg_img'], {}), '(raw_img, jpeg_img)\n', (6758, 6777), False, 'from skimage.exposure import match_histograms, equalize_hist\n'), ((6786, 6835), 'cv2.imwrite', 'cv2.imwrite', (['"""steps/1_2_jpeg.jpg"""', '(jpeg_img * 255)'], {}), "('steps/1_2_jpeg.jpg', jpeg_img * 255)\n", (6797, 6835), False, 'import cv2\n'), ((6842, 6889), 'cv2.imwrite', 'cv2.imwrite', (['"""steps/1_2_raw.jpg"""', '(raw_img * 255)'], {}), "('steps/1_2_raw.jpg', raw_img * 255)\n", (6853, 6889), False, 'import cv2\n'), ((6997, 7053), 'cv2.resize', 'cv2.resize', (['jpeg_img', 'None'], {'fx': 'jpeg_scale', 'fy': 'jpeg_scale'}), '(jpeg_img, None, fx=jpeg_scale, fy=jpeg_scale)\n', (7007, 7053), False, 'import cv2\n'), ((7072, 7125), 'cv2.resize', 'cv2.resize', (['raw_img', 'None'], {'fx': 'raw_scale', 'fy': 'raw_scale'}), '(raw_img, None, fx=raw_scale, fy=raw_scale)\n', (7082, 7125), False, 'import cv2\n'), ((1187, 1196), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1193, 1196), True, 'import numpy as np\n'), ((2830, 2849), 'numpy.array', 'np.array', (['[x, y, 1]'], {}), '([x, y, 1])\n', (2838, 2849), True, 'import numpy as np\n'), ((8074, 8127), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (8092, 8127), False, 'import cv2\n'), ((2356, 2372), 'numpy.linalg.inv', 'np.linalg.inv', (['a'], {}), '(a)\n', (2369, 2372), True, 'import numpy as np\n'), ((5524, 5547), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5545, 5547), False, 'import datetime\n'), ((7160, 7169), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7166, 7169), True, 'import numpy as np\n'), ((7216, 7225), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7222, 7225), True, 'import numpy as np\n'), ((7900, 7946), 'numpy.float32', 'np.float32', (['[kp1[m.queryIdx].pt for m in good]'], {}), '([kp1[m.queryIdx].pt for m in good])\n', (7910, 7946), True, 'import numpy as np\n'), ((7987, 8033), 'numpy.float32', 'np.float32', (['[kp2[m.trainIdx].pt for m in good]'], {}), '([kp2[m.trainIdx].pt for m in good])\n', (7997, 8033), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from network import network
def test_network_build():
"""
check whether the network is built sucessfully or not
"""
x = np.float32(np.random.random((3, 128, 128, 3)))
blazeface_extractor = network((128, 128, 3))
feature = blazeface_extractor(x)
print(feature)
assert feature[0].shape == (
3, 16, 16, 96) or feature[1].shape == (3, 8, 8, 96)
if __name__ == "__main__":
test_network_build()
| [
"numpy.random.random",
"network.network"
] | [((257, 279), 'network.network', 'network', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (264, 279), False, 'from network import network\n'), ((194, 228), 'numpy.random.random', 'np.random.random', (['(3, 128, 128, 3)'], {}), '((3, 128, 128, 3))\n', (210, 228), True, 'import numpy as np\n')] |
"""Collection of functions to run Viterbi algorithms on haploid genotype data, where the data is structured as samples x variants."""
import numba as nb
import numpy as np
@nb.jit
def viterbi_naive_init(n, m, H, s, e, r):
"""Initialise naive implementation of LS viterbi."""
V = np.zeros((n, m))
P = np.zeros((n, m)).astype(np.int64)
V[:, 0] = 1 / n * e[np.equal(H[:, 0], s[0, 0]).astype(np.int64), 0]
P[:, 0] = 0 # Reminder
r_n = r / n
return V, P, r_n
@nb.jit
def viterbi_init(n, m, H, s, e, r):
"""Initialise naive, but more space memory efficient implementation of LS viterbi."""
V_previous = 1 / n * e[np.equal(H[:, 0], s[0, 0]).astype(np.int64), 0]
V = np.zeros(n)
P = np.zeros((n, m)).astype(np.int64)
P[:, 0] = 0 # Reminder
r_n = r / n
return V, V_previous, P, r_n
@nb.jit
def forwards_viterbi_hap_naive(n, m, H, s, e, r):
"""Naive implementation of LS haploid Viterbi algorithm."""
# Initialise
V, P, r_n = viterbi_naive_init(n, m, H, s, e, r)
for j in range(1, m):
for i in range(n):
# Get the vector to maximise over
v = np.zeros(n)
for k in range(n):
v[k] = e[np.int64(np.equal(H[i, j], s[0, j])), j] * V[k, j - 1]
if k == i:
v[k] *= 1 - r[j] + r_n[j]
else:
v[k] *= r_n[j]
P[i, j] = np.argmax(v)
V[i, j] = v[P[i, j]]
ll = np.log10(np.amax(V[:, m - 1]))
return V, P, ll
@nb.jit
def forwards_viterbi_hap_naive_vec(n, m, H, s, e, r):
"""Naive matrix based implementation of LS haploid forward Viterbi algorithm using numpy."""
# Initialise
V, P, r_n = viterbi_naive_init(n, m, H, s, e, r)
for j in range(1, m):
v_tmp = V[:, j - 1] * r_n[j]
for i in range(n):
v = np.copy(v_tmp)
v[i] += V[i, j - 1] * (1 - r[j])
v *= e[np.int64(np.equal(H[i, j], s[0, j])), j]
P[i, j] = np.argmax(v)
V[i, j] = v[P[i, j]]
ll = np.log10(np.amax(V[:, m - 1]))
return V, P, ll
def forwards_viterbi_hap_naive_full_vec(n, m, H, s, e, r):
"""Fully vectorised naive implementation of LS haploid forward Viterbi algorithm using numpy."""
# Initialise
V, P, r_n = viterbi_naive_init(n, m, H, s, e, r)
for j in range(1, m):
v = np.tile(V[:, j - 1] * r_n[j], (n, 1)) + np.diag(V[:, j - 1] * (1 - r[j]))
P[:, j] = np.argmax(v, 1)
V[:, j] = (
v[range(n), P[:, j]] * e[np.equal(H[:, j], s[0, j]).astype(np.int64), j]
)
ll = np.log10(np.amax(V[:, m - 1]))
return V, P, ll
@nb.jit
def forwards_viterbi_hap_naive_low_mem(n, m, H, s, e, r):
"""Naive implementation of LS haploid Viterbi algorithm, with reduced memory."""
# Initialise
V, V_previous, P, r_n = viterbi_init(n, m, H, s, e, r)
for j in range(1, m):
for i in range(n):
# Get the vector to maximise over
v = np.zeros(n)
for k in range(n):
v[k] = e[np.int64(np.equal(H[i, j], s[0, j])), j] * V_previous[k]
if k == i:
v[k] *= (1 - r[j]) + r_n[j]
else:
v[k] *= r_n[j]
P[i, j] = np.argmax(v)
V[i] = v[P[i, j]]
V_previous = np.copy(V)
ll = np.log10(np.amax(V))
return V, P, ll
@nb.jit
def forwards_viterbi_hap_naive_low_mem_rescaling(n, m, H, s, e, r):
"""Naive implementation of LS haploid Viterbi algorithm, with reduced memory and rescaling."""
# Initialise
V, V_previous, P, r_n = viterbi_init(n, m, H, s, e, r)
c = np.ones(m)
for j in range(1, m):
c[j] = np.amax(V_previous)
V_previous *= 1 / c[j]
for i in range(n):
# Get the vector to maximise over
v = np.zeros(n)
for k in range(n):
v[k] = e[np.int64(np.equal(H[i, j], s[0, j])), j] * V_previous[k]
if k == i:
v[k] *= (1 - r[j]) + r_n[j]
else:
v[k] *= r_n[j]
P[i, j] = np.argmax(v)
V[i] = v[P[i, j]]
V_previous = np.copy(V)
ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
return V, P, ll
@nb.jit
def forwards_viterbi_hap_low_mem_rescaling(n, m, H, s, e, r):
"""LS haploid Viterbi algorithm, with reduced memory and exploits the Markov process structure."""
# Initialise
V, V_previous, P, r_n = viterbi_init(n, m, H, s, e, r)
c = np.ones(m)
for j in range(1, m):
argmax = np.argmax(V_previous)
c[j] = V_previous[argmax]
V_previous *= 1 / c[j]
V = np.zeros(n)
for i in range(n):
V[i] = V_previous[i] * (1 - r[j] + r_n[j])
P[i, j] = i
if V[i] < r_n[j]:
V[i] = r_n[j]
P[i, j] = argmax
V[i] *= e[np.int64(np.equal(H[i, j], s[0, j])), j]
V_previous = np.copy(V)
ll = np.sum(np.log10(c)) + np.log10(np.max(V))
return V, P, ll
@nb.jit
def forwards_viterbi_hap_lower_mem_rescaling(n, m, H, s, e, r):
"""LS haploid Viterbi algorithm with even smaller memory footprint and exploits the Markov process structure."""
# Initialise
V = 1 / n * e[np.equal(H[:, 0], s[0, 0]).astype(np.int64), 0]
P = np.zeros((n, m)).astype(np.int64)
P[:, 0] = 0
r_n = r / n
c = np.ones(m)
for j in range(1, m):
argmax = np.argmax(V)
c[j] = V[argmax]
V *= 1 / c[j]
for i in range(n):
V[i] = V[i] * (1 - r[j] + r_n[j])
P[i, j] = i
if V[i] < r_n[j]:
V[i] = r_n[j]
P[i, j] = argmax
V[i] *= e[np.int64(np.equal(H[i, j], s[0, j])), j]
ll = np.sum(np.log10(c)) + np.log10(np.max(V))
return V, P, ll
@nb.jit
def backwards_viterbi_hap(m, V_last, P):
"""Run a backwards pass to determine the most likely path."""
# Initialise
assert len(V_last.shape) == 1
path = np.zeros(m).astype(np.int64)
path[m - 1] = np.argmax(V_last)
for j in range(m - 2, -1, -1):
path[j] = P[path[j + 1], j + 1]
return path
@nb.jit
def path_ll_hap(n, m, H, path, s, e, r):
"""Evaluate log-likelihood path through a reference panel which results in sequence s."""
index = np.int64(np.equal(H[path[0], 0], s[0, 0]))
log_prob_path = np.log10((1 / n) * e[index, 0])
old = path[0]
r_n = r / n
for l in range(1, m):
index = np.int64(np.equal(H[path[l], l], s[0, l]))
current = path[l]
same = old == current
if same:
log_prob_path += np.log10((1 - r[l]) + r_n[l])
else:
log_prob_path += np.log10(r_n[l])
log_prob_path += np.log10(e[index, l])
old = current
return log_prob_path
| [
"numpy.copy",
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.equal",
"numpy.amax",
"numpy.max",
"numpy.tile",
"numpy.diag",
"numpy.log10"
] | [((289, 305), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (297, 305), True, 'import numpy as np\n'), ((705, 716), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (713, 716), True, 'import numpy as np\n'), ((3686, 3696), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (3693, 3696), True, 'import numpy as np\n'), ((4567, 4577), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (4574, 4577), True, 'import numpy as np\n'), ((5456, 5466), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (5463, 5466), True, 'import numpy as np\n'), ((6123, 6140), 'numpy.argmax', 'np.argmax', (['V_last'], {}), '(V_last)\n', (6132, 6140), True, 'import numpy as np\n'), ((6454, 6483), 'numpy.log10', 'np.log10', (['(1 / n * e[index, 0])'], {}), '(1 / n * e[index, 0])\n', (6462, 6483), True, 'import numpy as np\n'), ((1487, 1507), 'numpy.amax', 'np.amax', (['V[:, m - 1]'], {}), '(V[:, m - 1])\n', (1494, 1507), True, 'import numpy as np\n'), ((2075, 2095), 'numpy.amax', 'np.amax', (['V[:, m - 1]'], {}), '(V[:, m - 1])\n', (2082, 2095), True, 'import numpy as np\n'), ((2481, 2496), 'numpy.argmax', 'np.argmax', (['v', '(1)'], {}), '(v, 1)\n', (2490, 2496), True, 'import numpy as np\n'), ((2631, 2651), 'numpy.amax', 'np.amax', (['V[:, m - 1]'], {}), '(V[:, m - 1])\n', (2638, 2651), True, 'import numpy as np\n'), ((3362, 3372), 'numpy.copy', 'np.copy', (['V'], {}), '(V)\n', (3369, 3372), True, 'import numpy as np\n'), ((3392, 3402), 'numpy.amax', 'np.amax', (['V'], {}), '(V)\n', (3399, 3402), True, 'import numpy as np\n'), ((3739, 3758), 'numpy.amax', 'np.amax', (['V_previous'], {}), '(V_previous)\n', (3746, 3758), True, 'import numpy as np\n'), ((4223, 4233), 'numpy.copy', 'np.copy', (['V'], {}), '(V)\n', (4230, 4233), True, 'import numpy as np\n'), ((4622, 4643), 'numpy.argmax', 'np.argmax', (['V_previous'], {}), '(V_previous)\n', (4631, 4643), True, 'import numpy as np\n'), ((4721, 4732), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4729, 4732), True, 'import numpy as np\n'), ((5016, 5026), 'numpy.copy', 'np.copy', (['V'], {}), '(V)\n', (5023, 5026), True, 'import numpy as np\n'), ((5511, 5523), 'numpy.argmax', 'np.argmax', (['V'], {}), '(V)\n', (5520, 5523), True, 'import numpy as np\n'), ((6400, 6432), 'numpy.equal', 'np.equal', (['H[path[0], 0]', 's[0, 0]'], {}), '(H[path[0], 0], s[0, 0])\n', (6408, 6432), True, 'import numpy as np\n'), ((6825, 6846), 'numpy.log10', 'np.log10', (['e[index, l]'], {}), '(e[index, l])\n', (6833, 6846), True, 'import numpy as np\n'), ((314, 330), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (322, 330), True, 'import numpy as np\n'), ((725, 741), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (733, 741), True, 'import numpy as np\n'), ((1147, 1158), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1155, 1158), True, 'import numpy as np\n'), ((1422, 1434), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (1431, 1434), True, 'import numpy as np\n'), ((1868, 1882), 'numpy.copy', 'np.copy', (['v_tmp'], {}), '(v_tmp)\n', (1875, 1882), True, 'import numpy as np\n'), ((2010, 2022), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (2019, 2022), True, 'import numpy as np\n'), ((2389, 2426), 'numpy.tile', 'np.tile', (['(V[:, j - 1] * r_n[j])', '(n, 1)'], {}), '(V[:, j - 1] * r_n[j], (n, 1))\n', (2396, 2426), True, 'import numpy as np\n'), ((2429, 2462), 'numpy.diag', 'np.diag', (['(V[:, j - 1] * (1 - r[j]))'], {}), '(V[:, j - 1] * (1 - r[j]))\n', (2436, 2462), True, 'import numpy as np\n'), ((3019, 3030), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3027, 3030), True, 'import numpy as np\n'), ((3298, 3310), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (3307, 3310), True, 'import numpy as np\n'), ((3879, 3890), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3887, 3890), True, 'import numpy as np\n'), ((4158, 4170), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (4167, 4170), True, 'import numpy as np\n'), ((4251, 4262), 'numpy.log10', 'np.log10', (['c'], {}), '(c)\n', (4259, 4262), True, 'import numpy as np\n'), ((4275, 4285), 'numpy.amax', 'np.amax', (['V'], {}), '(V)\n', (4282, 4285), True, 'import numpy as np\n'), ((5044, 5055), 'numpy.log10', 'np.log10', (['c'], {}), '(c)\n', (5052, 5055), True, 'import numpy as np\n'), ((5068, 5077), 'numpy.max', 'np.max', (['V'], {}), '(V)\n', (5074, 5077), True, 'import numpy as np\n'), ((5382, 5398), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (5390, 5398), True, 'import numpy as np\n'), ((5841, 5852), 'numpy.log10', 'np.log10', (['c'], {}), '(c)\n', (5849, 5852), True, 'import numpy as np\n'), ((5865, 5874), 'numpy.max', 'np.max', (['V'], {}), '(V)\n', (5871, 5874), True, 'import numpy as np\n'), ((6076, 6087), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (6084, 6087), True, 'import numpy as np\n'), ((6572, 6604), 'numpy.equal', 'np.equal', (['H[path[l], l]', 's[0, l]'], {}), '(H[path[l], l], s[0, l])\n', (6580, 6604), True, 'import numpy as np\n'), ((6709, 6736), 'numpy.log10', 'np.log10', (['(1 - r[l] + r_n[l])'], {}), '(1 - r[l] + r_n[l])\n', (6717, 6736), True, 'import numpy as np\n'), ((6782, 6798), 'numpy.log10', 'np.log10', (['r_n[l]'], {}), '(r_n[l])\n', (6790, 6798), True, 'import numpy as np\n'), ((372, 398), 'numpy.equal', 'np.equal', (['H[:, 0]', 's[0, 0]'], {}), '(H[:, 0], s[0, 0])\n', (380, 398), True, 'import numpy as np\n'), ((649, 675), 'numpy.equal', 'np.equal', (['H[:, 0]', 's[0, 0]'], {}), '(H[:, 0], s[0, 0])\n', (657, 675), True, 'import numpy as np\n'), ((1956, 1982), 'numpy.equal', 'np.equal', (['H[i, j]', 's[0, j]'], {}), '(H[i, j], s[0, j])\n', (1964, 1982), True, 'import numpy as np\n'), ((4963, 4989), 'numpy.equal', 'np.equal', (['H[i, j]', 's[0, j]'], {}), '(H[i, j], s[0, j])\n', (4971, 4989), True, 'import numpy as np\n'), ((5326, 5352), 'numpy.equal', 'np.equal', (['H[:, 0]', 's[0, 0]'], {}), '(H[:, 0], s[0, 0])\n', (5334, 5352), True, 'import numpy as np\n'), ((5792, 5818), 'numpy.equal', 'np.equal', (['H[i, j]', 's[0, j]'], {}), '(H[i, j], s[0, j])\n', (5800, 5818), True, 'import numpy as np\n'), ((2554, 2580), 'numpy.equal', 'np.equal', (['H[:, j]', 's[0, j]'], {}), '(H[:, j], s[0, j])\n', (2562, 2580), True, 'import numpy as np\n'), ((1224, 1250), 'numpy.equal', 'np.equal', (['H[i, j]', 's[0, j]'], {}), '(H[i, j], s[0, j])\n', (1232, 1250), True, 'import numpy as np\n'), ((3096, 3122), 'numpy.equal', 'np.equal', (['H[i, j]', 's[0, j]'], {}), '(H[i, j], s[0, j])\n', (3104, 3122), True, 'import numpy as np\n'), ((3956, 3982), 'numpy.equal', 'np.equal', (['H[i, j]', 's[0, j]'], {}), '(H[i, j], s[0, j])\n', (3964, 3982), True, 'import numpy as np\n')] |
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
from progress.bar import Bar
from network_simulator.components import simulator
from network_simulator.helpers import writeSimCache, readSimCache
def main():
return simulator(g_init_vars, g_aplist, g_usrlist)
def mab(init_vars, aplist, usrlist):
global g_init_vars, g_aplist, g_usrlist
g_init_vars = init_vars
g_aplist = aplist
g_usrlist = usrlist
plot_from_saved = 0
total_runs = range(1)
init_vars["SHARE_ENERGY"] = 6
init_vars["SMART_PARAM"] = [0.5, 2]
ucbscale = np.arange(0.5, 3, 0.5)
epsilons = np.arange(0.01, 0.5, 0.01)
avg_serviced_user_mab = []
bar = Bar("MAB epsilon ", max=len(epsilons))
init_vars["SHARE_ENERGY"] = 5
for epsilon in epsilons:
init_vars["SMART_PARAM"] = [epsilon, 12]
serviced_user_mab = []
for run in total_runs:
serviced_user_mab.append(simulator(init_vars, aplist, usrlist))
avg_serviced_user_mab.append(sum(serviced_user_mab)/ len(total_runs))
bar.next()
bar.finish()
plt.figure(1)
plt.plot(epsilons, avg_serviced_user_mab)
plt.show()
# if plot_from_saved == 0:
# bar = Bar("UCB1", max=len(ucbscale))
# # init_vars["SMART_PARAM"] = [0, 2]
# init_vars["ENERGY_BUDGET"] = 0.03
# _avg_serviced_users = []
# # serviced_user_mab = []
# for scale in ucbscale:
# init_vars["SMART_PARAM"] = [scale, 2]
# pool = Pool(10)
# _serviced_users = [pool.apply_async(main, ()) for run in total_runs]
# _avg_serviced_users.append(sum([result.get() for result in _serviced_users]) / len(total_runs))
# bar.next()
# bar.finish()
# # writeSimCache("UCB MAB - Scale(0.5,3,0.5)", _avg_serviced_users)
# else:
# _avg_serviced_users = readSimCache("UCB MAB - Scale(0.5,3,0.5)")
# print(_avg_serviced_users)
# print(ucbscale)
# # print(main())
# plt.figure(1)
# plt.plot(ucbscale, _avg_serviced_users)
# plt.show()
# print(simulator(init_vars, aplist, usrlist))
return plt
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"network_simulator.components.simulator",
"numpy.arange"
] | [((254, 297), 'network_simulator.components.simulator', 'simulator', (['g_init_vars', 'g_aplist', 'g_usrlist'], {}), '(g_init_vars, g_aplist, g_usrlist)\n', (263, 297), False, 'from network_simulator.components import simulator\n'), ((601, 623), 'numpy.arange', 'np.arange', (['(0.5)', '(3)', '(0.5)'], {}), '(0.5, 3, 0.5)\n', (610, 623), True, 'import numpy as np\n'), ((639, 665), 'numpy.arange', 'np.arange', (['(0.01)', '(0.5)', '(0.01)'], {}), '(0.01, 0.5, 0.01)\n', (648, 665), True, 'import numpy as np\n'), ((1120, 1133), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1130, 1133), True, 'import matplotlib.pyplot as plt\n'), ((1138, 1179), 'matplotlib.pyplot.plot', 'plt.plot', (['epsilons', 'avg_serviced_user_mab'], {}), '(epsilons, avg_serviced_user_mab)\n', (1146, 1179), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1194), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1192, 1194), True, 'import matplotlib.pyplot as plt\n'), ((960, 997), 'network_simulator.components.simulator', 'simulator', (['init_vars', 'aplist', 'usrlist'], {}), '(init_vars, aplist, usrlist)\n', (969, 997), False, 'from network_simulator.components import simulator\n')] |
import shutil
import os
import fire
import numpy as np
import cv2
import geopandas as gpd
import skimage.io
from tqdm import tqdm
from scipy import ndimage as ndi
from skimage import measure
from skimage.morphology import dilation, square, watershed
from scipy.ndimage import binary_erosion
from multiprocessing.pool import Pool
from multiprocessing import cpu_count
from rasterio import features
from scipy.ndimage import binary_dilation
def create_separation(labels):
tmp = dilation(labels > 0, square(3))
tmp2 = watershed(tmp, labels, mask=tmp, watershed_line=True) > 0
tmp = tmp ^ tmp2
tmp = dilation(tmp, square(3))
msk1 = np.zeros_like(labels, dtype='bool')
for y0 in range(labels.shape[0]):
for x0 in range(labels.shape[1]):
if not tmp[y0, x0]:
continue
sz = 1
uniq = np.unique(labels[max(0, y0 - sz):min(labels.shape[0], y0 + sz + 1),
max(0, x0 - sz):min(labels.shape[1], x0 + sz + 1)])
if len(uniq[uniq > 0]) > 1:
msk1[y0, x0] = True
return msk1
def mask_fro_id(param):
_id, labels_path, rasters_path, result_path = param
label_path = os.path.join(labels_path, _id + '_Buildings.geojson')
raster_path = os.path.join(rasters_path, _id + '.tif')
geoms = gpd.read_file(label_path)['geometry'].tolist()
image = cv2.imread(raster_path)
h, w, c = image.shape
buildings = np.zeros((h, w), dtype=np.int64)
outer_contur = np.zeros((h, w), dtype=np.int64)
inter_contur = np.zeros((h, w), dtype=np.int64)
contour_size = 1
for i in range(len(geoms)):
mask = features.rasterize([(geoms[i], 1)], out_shape=(h, w))
buildings += mask
dilated = binary_dilation(mask, iterations=contour_size)
countour_mask = dilated ^ mask
outer_contur += countour_mask
eroded = binary_erosion(mask, iterations=contour_size)
countour_mask = eroded ^ mask
inter_contur += countour_mask
outer_contur = (outer_contur > 0).astype(np.uint8)
inter_contur = (inter_contur > 0).astype(np.uint8)
buildings = (buildings > 0).astype(np.uint8)
buildings[outer_contur == 1] = 0
labels = ndi.label(buildings, output=np.uint32)[0]
separation = create_separation(labels)
separation = separation > 0
separation = separation.astype(np.uint8)
result = np.zeros((h, w, 3), dtype=np.uint8)
result[:, :, 0] = buildings * 255
result[:, :, 1] = inter_contur * 255
result[:, :, 2] = separation * 255
out_path = os.path.join(result_path, _id + '.tif')
skimage.io.imsave(out_path, result, plugin='tifffile')
def create_masks(data_root_path='/data/SN7_buildings/train/',
result_path='/wdata/train_masks/'):
if os.path.exists(result_path):
shutil.rmtree(result_path)
os.mkdir(result_path)
ids = os.listdir(data_root_path)
all_params = []
for _id in tqdm(ids[:]):
id_path = os.path.join(data_root_path, _id)
if not os.path.isdir(id_path):
continue
sub_res_path = os.path.join(result_path, _id)
os.mkdir(sub_res_path)
labels_path = os.path.join(id_path, 'labels_match_pix')
rasters_path = os.path.join(id_path, 'images')
files = sorted(os.listdir(labels_path))
files = [el for el in files if 'UDM' not in el]
files = ['_'.join(el.split('.')[0] .split('_')[:-1]) for el in files]
params = [(el, labels_path, rasters_path, sub_res_path) for el in files]
all_params += params
n_cpus = cpu_count()
pool = Pool(n_cpus)
for _ in tqdm(pool.imap_unordered(mask_fro_id, all_params), total=len(all_params)):
pass
if __name__ == '__main__':
fire.Fire(create_masks)
| [
"os.mkdir",
"shutil.rmtree",
"os.path.join",
"multiprocessing.cpu_count",
"numpy.zeros_like",
"os.path.exists",
"multiprocessing.pool.Pool",
"rasterio.features.rasterize",
"geopandas.read_file",
"tqdm.tqdm",
"scipy.ndimage.binary_dilation",
"skimage.morphology.watershed",
"skimage.morphology... | [((651, 686), 'numpy.zeros_like', 'np.zeros_like', (['labels'], {'dtype': '"""bool"""'}), "(labels, dtype='bool')\n", (664, 686), True, 'import numpy as np\n'), ((1203, 1256), 'os.path.join', 'os.path.join', (['labels_path', "(_id + '_Buildings.geojson')"], {}), "(labels_path, _id + '_Buildings.geojson')\n", (1215, 1256), False, 'import os\n'), ((1275, 1315), 'os.path.join', 'os.path.join', (['rasters_path', "(_id + '.tif')"], {}), "(rasters_path, _id + '.tif')\n", (1287, 1315), False, 'import os\n'), ((1387, 1410), 'cv2.imread', 'cv2.imread', (['raster_path'], {}), '(raster_path)\n', (1397, 1410), False, 'import cv2\n'), ((1454, 1486), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.int64'}), '((h, w), dtype=np.int64)\n', (1462, 1486), True, 'import numpy as np\n'), ((1506, 1538), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.int64'}), '((h, w), dtype=np.int64)\n', (1514, 1538), True, 'import numpy as np\n'), ((1558, 1590), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.int64'}), '((h, w), dtype=np.int64)\n', (1566, 1590), True, 'import numpy as np\n'), ((2409, 2444), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.uint8'}), '((h, w, 3), dtype=np.uint8)\n', (2417, 2444), True, 'import numpy as np\n'), ((2578, 2617), 'os.path.join', 'os.path.join', (['result_path', "(_id + '.tif')"], {}), "(result_path, _id + '.tif')\n", (2590, 2617), False, 'import os\n'), ((2802, 2829), 'os.path.exists', 'os.path.exists', (['result_path'], {}), '(result_path)\n', (2816, 2829), False, 'import os\n'), ((2870, 2891), 'os.mkdir', 'os.mkdir', (['result_path'], {}), '(result_path)\n', (2878, 2891), False, 'import os\n'), ((2902, 2928), 'os.listdir', 'os.listdir', (['data_root_path'], {}), '(data_root_path)\n', (2912, 2928), False, 'import os\n'), ((2964, 2976), 'tqdm.tqdm', 'tqdm', (['ids[:]'], {}), '(ids[:])\n', (2968, 2976), False, 'from tqdm import tqdm\n'), ((3602, 3613), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (3611, 3613), False, 'from multiprocessing import cpu_count\n'), ((3625, 3637), 'multiprocessing.pool.Pool', 'Pool', (['n_cpus'], {}), '(n_cpus)\n', (3629, 3637), False, 'from multiprocessing.pool import Pool\n'), ((3770, 3793), 'fire.Fire', 'fire.Fire', (['create_masks'], {}), '(create_masks)\n', (3779, 3793), False, 'import fire\n'), ((503, 512), 'skimage.morphology.square', 'square', (['(3)'], {}), '(3)\n', (509, 512), False, 'from skimage.morphology import dilation, square, watershed\n'), ((525, 578), 'skimage.morphology.watershed', 'watershed', (['tmp', 'labels'], {'mask': 'tmp', 'watershed_line': '(True)'}), '(tmp, labels, mask=tmp, watershed_line=True)\n', (534, 578), False, 'from skimage.morphology import dilation, square, watershed\n'), ((628, 637), 'skimage.morphology.square', 'square', (['(3)'], {}), '(3)\n', (634, 637), False, 'from skimage.morphology import dilation, square, watershed\n'), ((1660, 1713), 'rasterio.features.rasterize', 'features.rasterize', (['[(geoms[i], 1)]'], {'out_shape': '(h, w)'}), '([(geoms[i], 1)], out_shape=(h, w))\n', (1678, 1713), False, 'from rasterio import features\n'), ((1758, 1804), 'scipy.ndimage.binary_dilation', 'binary_dilation', (['mask'], {'iterations': 'contour_size'}), '(mask, iterations=contour_size)\n', (1773, 1804), False, 'from scipy.ndimage import binary_dilation\n'), ((1900, 1945), 'scipy.ndimage.binary_erosion', 'binary_erosion', (['mask'], {'iterations': 'contour_size'}), '(mask, iterations=contour_size)\n', (1914, 1945), False, 'from scipy.ndimage import binary_erosion\n'), ((2233, 2271), 'scipy.ndimage.label', 'ndi.label', (['buildings'], {'output': 'np.uint32'}), '(buildings, output=np.uint32)\n', (2242, 2271), True, 'from scipy import ndimage as ndi\n'), ((2839, 2865), 'shutil.rmtree', 'shutil.rmtree', (['result_path'], {}), '(result_path)\n', (2852, 2865), False, 'import shutil\n'), ((2996, 3029), 'os.path.join', 'os.path.join', (['data_root_path', '_id'], {}), '(data_root_path, _id)\n', (3008, 3029), False, 'import os\n'), ((3113, 3143), 'os.path.join', 'os.path.join', (['result_path', '_id'], {}), '(result_path, _id)\n', (3125, 3143), False, 'import os\n'), ((3152, 3174), 'os.mkdir', 'os.mkdir', (['sub_res_path'], {}), '(sub_res_path)\n', (3160, 3174), False, 'import os\n'), ((3197, 3238), 'os.path.join', 'os.path.join', (['id_path', '"""labels_match_pix"""'], {}), "(id_path, 'labels_match_pix')\n", (3209, 3238), False, 'import os\n'), ((3262, 3293), 'os.path.join', 'os.path.join', (['id_path', '"""images"""'], {}), "(id_path, 'images')\n", (3274, 3293), False, 'import os\n'), ((3045, 3067), 'os.path.isdir', 'os.path.isdir', (['id_path'], {}), '(id_path)\n', (3058, 3067), False, 'import os\n'), ((3318, 3341), 'os.listdir', 'os.listdir', (['labels_path'], {}), '(labels_path)\n', (3328, 3341), False, 'import os\n'), ((1328, 1353), 'geopandas.read_file', 'gpd.read_file', (['label_path'], {}), '(label_path)\n', (1341, 1353), True, 'import geopandas as gpd\n')] |
import torch
from torch.utils.data import DataLoader
import numpy as np
import json
from os.path import join
from models import utils, caption
from datasets import coco
from configuration import Config
from engine_cap import train_one_epoch, evaluate, get_model_cider_score
from vocab import Vocabulary
from train_utils import save_ckp, load_ckp
CUDA_LAUNCH_BLOCKING = 1
def main(config):
device = torch.device(config.device)
print(f'Initializing Device: {device}')
seed = config.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
vocab = Vocabulary()
vocab.read_json(config.coco_vocab_path)
model, criterion = caption.build_model(config, len(vocab))
model.to(device)
n_parameters = sum(p.numel()
for p in model.parameters() if p.requires_grad)
print(f"Number of params: {n_parameters}")
param_dicts = [
{"params": [p for n, p in model.named_parameters(
) if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
"lr": config.lr_backbone,
},
]
optimizer = torch.optim.AdamW(
param_dicts, lr=config.lr, weight_decay=config.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.lr_drop)
dataset_train = coco.build_dataset(config, vocab, mode='training')
dataset_val = coco.build_dataset(config, vocab, mode='validation')
print(f"Train: {len(dataset_train)}")
print(f"Valid: {len(dataset_val)}")
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, config.batch_size, drop_last=True
)
data_loader_train = DataLoader(
dataset_train, batch_sampler=batch_sampler_train, num_workers=config.num_workers)
data_loader_val = DataLoader(dataset_val, config.batch_size,
sampler=sampler_val, drop_last=False, num_workers=config.num_workers)
# CHANGED: build another data loader for cider eval (val set w/ unique images)
dataset_cider = coco.build_dataset(
config, vocab, mode='validation', return_unique=True)
sampler_cider = torch.utils.data.SequentialSampler(dataset_cider)
data_loader_cider = DataLoader(
dataset_cider, config.batch_size, sampler=sampler_cider,
drop_last=False, num_workers=config.num_workers
)
# TODO: finish
return_training = False
if return_training:
print("Loading Checkpoint...")
path = ''
epoch, model, optimizer, lr_scheduler, _, _, _ = load_ckp(
model, optimizer, lr_scheduler, path
)
config.start_epoch = epoch + 1
print("Start Training..")
for epoch in range(config.start_epoch, config.epochs):
print(f"Epoch: {epoch}")
epoch_loss = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch, config.clip_max_norm)
lr_scheduler.step()
print(f"Training Loss: {epoch_loss}")
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
}, config.checkpoint)
validation_loss = evaluate(model, criterion, data_loader_val, device)
print(f"Validation Loss: {validation_loss}")
cider, hypotheses, references, ids = get_model_cider_score(
model, vocab, data_loader_cider,
device, max_len=config.max_position_embeddings
)
print(f"CIDEr score: {cider}")
if config.save_intermediate_caps:
out_caps_path = join(
config.model_dir, f'{config.coco_model_name}_generated_e{epoch}.json')
ids = [int(i) for i in ids]
out_dict = {
i: {'hypotheses': h, 'references': r}
for i, h, r in zip(ids, hypotheses, references)
}
print(f"Save Generated Captions for Epoch to File: {out_caps_path}")
with open(out_caps_path, 'w') as f:
json.dump(out_dict, f)
fname = f'{config.coco_model_name}_e{epoch}.pth'
path = join(config.model_dir, fname)
save_ckp(epoch, model, optimizer, lr_scheduler, epoch_loss, validation_loss, cider, path)
# TODO: early stopping w/ cider condition
print()
if __name__ == "__main__":
config = Config()
main(config)
| [
"numpy.random.seed",
"torch.optim.lr_scheduler.StepLR",
"torch.utils.data.RandomSampler",
"datasets.coco.build_dataset",
"torch.optim.AdamW",
"models.utils.get_rank",
"torch.device",
"os.path.join",
"train_utils.save_ckp",
"vocab.Vocabulary",
"torch.utils.data.DataLoader",
"engine_cap.train_on... | [((407, 434), 'torch.device', 'torch.device', (['config.device'], {}), '(config.device)\n', (419, 434), False, 'import torch\n'), ((526, 549), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (543, 549), False, 'import torch\n'), ((554, 574), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (568, 574), True, 'import numpy as np\n'), ((588, 600), 'vocab.Vocabulary', 'Vocabulary', ([], {}), '()\n', (598, 600), False, 'from vocab import Vocabulary\n'), ((1201, 1279), 'torch.optim.AdamW', 'torch.optim.AdamW', (['param_dicts'], {'lr': 'config.lr', 'weight_decay': 'config.weight_decay'}), '(param_dicts, lr=config.lr, weight_decay=config.weight_decay)\n', (1218, 1279), False, 'import torch\n'), ((1308, 1366), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer', 'config.lr_drop'], {}), '(optimizer, config.lr_drop)\n', (1339, 1366), False, 'import torch\n'), ((1388, 1438), 'datasets.coco.build_dataset', 'coco.build_dataset', (['config', 'vocab'], {'mode': '"""training"""'}), "(config, vocab, mode='training')\n", (1406, 1438), False, 'from datasets import coco\n'), ((1457, 1509), 'datasets.coco.build_dataset', 'coco.build_dataset', (['config', 'vocab'], {'mode': '"""validation"""'}), "(config, vocab, mode='validation')\n", (1475, 1509), False, 'from datasets import coco\n'), ((1613, 1658), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['dataset_train'], {}), '(dataset_train)\n', (1643, 1658), False, 'import torch\n'), ((1677, 1724), 'torch.utils.data.SequentialSampler', 'torch.utils.data.SequentialSampler', (['dataset_val'], {}), '(dataset_val)\n', (1711, 1724), False, 'import torch\n'), ((1752, 1831), 'torch.utils.data.BatchSampler', 'torch.utils.data.BatchSampler', (['sampler_train', 'config.batch_size'], {'drop_last': '(True)'}), '(sampler_train, config.batch_size, drop_last=True)\n', (1781, 1831), False, 'import torch\n'), ((1871, 1968), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'batch_sampler': 'batch_sampler_train', 'num_workers': 'config.num_workers'}), '(dataset_train, batch_sampler=batch_sampler_train, num_workers=\n config.num_workers)\n', (1881, 1968), False, 'from torch.utils.data import DataLoader\n'), ((1995, 2112), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_val', 'config.batch_size'], {'sampler': 'sampler_val', 'drop_last': '(False)', 'num_workers': 'config.num_workers'}), '(dataset_val, config.batch_size, sampler=sampler_val, drop_last=\n False, num_workers=config.num_workers)\n', (2005, 2112), False, 'from torch.utils.data import DataLoader\n'), ((2245, 2317), 'datasets.coco.build_dataset', 'coco.build_dataset', (['config', 'vocab'], {'mode': '"""validation"""', 'return_unique': '(True)'}), "(config, vocab, mode='validation', return_unique=True)\n", (2263, 2317), False, 'from datasets import coco\n'), ((2347, 2396), 'torch.utils.data.SequentialSampler', 'torch.utils.data.SequentialSampler', (['dataset_cider'], {}), '(dataset_cider)\n', (2381, 2396), False, 'import torch\n'), ((2421, 2541), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_cider', 'config.batch_size'], {'sampler': 'sampler_cider', 'drop_last': '(False)', 'num_workers': 'config.num_workers'}), '(dataset_cider, config.batch_size, sampler=sampler_cider,\n drop_last=False, num_workers=config.num_workers)\n', (2431, 2541), False, 'from torch.utils.data import DataLoader\n'), ((4605, 4613), 'configuration.Config', 'Config', ([], {}), '()\n', (4611, 4613), False, 'from configuration import Config\n'), ((505, 521), 'models.utils.get_rank', 'utils.get_rank', ([], {}), '()\n', (519, 521), False, 'from models import utils, caption\n'), ((2746, 2792), 'train_utils.load_ckp', 'load_ckp', (['model', 'optimizer', 'lr_scheduler', 'path'], {}), '(model, optimizer, lr_scheduler, path)\n', (2754, 2792), False, 'from train_utils import save_ckp, load_ckp\n'), ((2998, 3102), 'engine_cap.train_one_epoch', 'train_one_epoch', (['model', 'criterion', 'data_loader_train', 'optimizer', 'device', 'epoch', 'config.clip_max_norm'], {}), '(model, criterion, data_loader_train, optimizer, device,\n epoch, config.clip_max_norm)\n', (3013, 3102), False, 'from engine_cap import train_one_epoch, evaluate, get_model_cider_score\n'), ((3438, 3489), 'engine_cap.evaluate', 'evaluate', (['model', 'criterion', 'data_loader_val', 'device'], {}), '(model, criterion, data_loader_val, device)\n', (3446, 3489), False, 'from engine_cap import train_one_epoch, evaluate, get_model_cider_score\n'), ((3589, 3696), 'engine_cap.get_model_cider_score', 'get_model_cider_score', (['model', 'vocab', 'data_loader_cider', 'device'], {'max_len': 'config.max_position_embeddings'}), '(model, vocab, data_loader_cider, device, max_len=\n config.max_position_embeddings)\n', (3610, 3696), False, 'from engine_cap import train_one_epoch, evaluate, get_model_cider_score\n'), ((4367, 4396), 'os.path.join', 'join', (['config.model_dir', 'fname'], {}), '(config.model_dir, fname)\n', (4371, 4396), False, 'from os.path import join\n'), ((4405, 4498), 'train_utils.save_ckp', 'save_ckp', (['epoch', 'model', 'optimizer', 'lr_scheduler', 'epoch_loss', 'validation_loss', 'cider', 'path'], {}), '(epoch, model, optimizer, lr_scheduler, epoch_loss, validation_loss,\n cider, path)\n', (4413, 4498), False, 'from train_utils import save_ckp, load_ckp\n'), ((3836, 3911), 'os.path.join', 'join', (['config.model_dir', 'f"""{config.coco_model_name}_generated_e{epoch}.json"""'], {}), "(config.model_dir, f'{config.coco_model_name}_generated_e{epoch}.json')\n", (3840, 3911), False, 'from os.path import join\n'), ((4271, 4293), 'json.dump', 'json.dump', (['out_dict', 'f'], {}), '(out_dict, f)\n', (4280, 4293), False, 'import json\n')] |
"""
===========================
Partial Dependence Plots 2D
===========================
Hvass-Labs Dec 2017
<NAME> 2020
.. currentmodule:: skopt
Simple example to show the new 2D plots.
"""
print(__doc__)
import numpy as np
from math import exp
from skopt import gp_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_histogram, plot_objective_2D, plot_objective
from skopt.utils import point_asdict
np.random.seed(123)
import matplotlib.pyplot as plt
#############################################################################
dim_learning_rate = Real(name='learning_rate', low=1e-6, high=1e-2, prior='log-uniform')
dim_num_dense_layers = Integer(name='num_dense_layers', low=1, high=5)
dim_num_dense_nodes = Integer(name='num_dense_nodes', low=5, high=512)
dim_activation = Categorical(name='activation', categories=['relu', 'sigmoid'])
dimensions = [dim_learning_rate,
dim_num_dense_layers,
dim_num_dense_nodes,
dim_activation]
default_parameters = [1e-4, 1, 64, 'relu']
def model_fitness(x):
learning_rate, num_dense_layers, num_dense_nodes, activation = x
fitness = ((exp(learning_rate) - 1.0) * 1000) ** 2 + \
(num_dense_layers) ** 2 + \
(num_dense_nodes/100) ** 2
fitness *= 1.0 + 0.1 * np.random.rand()
if activation == 'sigmoid':
fitness += 10
return fitness
print(model_fitness(x=default_parameters))
#############################################################################
search_result = gp_minimize(func=model_fitness,
dimensions=dimensions,
n_calls=30,
x0=default_parameters,
random_state=123
)
print(search_result.x)
print(search_result.fun)
#############################################################################
for fitness, x in sorted(zip(search_result.func_vals, search_result.x_iters)):
print(fitness, x)
#############################################################################
space = search_result.space
print(search_result.x_iters)
search_space = {name: space[name][1] for name in space.dimension_names}
print(point_asdict(search_space, default_parameters))
#############################################################################
print("Plotting now ...")
_ = plot_histogram(result=search_result, dimension_identifier='learning_rate',
bins=20)
plt.show()
#############################################################################
_ = plot_objective_2D(result=search_result,
dimension_identifier1='learning_rate',
dimension_identifier2='num_dense_nodes')
plt.show()
#############################################################################
_ = plot_objective_2D(result=search_result,
dimension_identifier1='num_dense_layers',
dimension_identifier2='num_dense_nodes')
plt.show()
#############################################################################
_ = plot_objective(result=search_result,
plot_dims=['num_dense_layers',
'num_dense_nodes'])
plt.show()
| [
"skopt.gp_minimize",
"math.exp",
"numpy.random.seed",
"skopt.space.Categorical",
"matplotlib.pyplot.show",
"skopt.plots.plot_objective",
"skopt.plots.plot_objective_2D",
"skopt.space.Integer",
"skopt.plots.plot_histogram",
"skopt.space.Real",
"numpy.random.rand",
"skopt.utils.point_asdict"
] | [((441, 460), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (455, 460), True, 'import numpy as np\n'), ((592, 661), 'skopt.space.Real', 'Real', ([], {'name': '"""learning_rate"""', 'low': '(1e-06)', 'high': '(0.01)', 'prior': '"""log-uniform"""'}), "(name='learning_rate', low=1e-06, high=0.01, prior='log-uniform')\n", (596, 661), False, 'from skopt.space import Real, Categorical, Integer\n'), ((684, 731), 'skopt.space.Integer', 'Integer', ([], {'name': '"""num_dense_layers"""', 'low': '(1)', 'high': '(5)'}), "(name='num_dense_layers', low=1, high=5)\n", (691, 731), False, 'from skopt.space import Real, Categorical, Integer\n'), ((754, 802), 'skopt.space.Integer', 'Integer', ([], {'name': '"""num_dense_nodes"""', 'low': '(5)', 'high': '(512)'}), "(name='num_dense_nodes', low=5, high=512)\n", (761, 802), False, 'from skopt.space import Real, Categorical, Integer\n'), ((820, 882), 'skopt.space.Categorical', 'Categorical', ([], {'name': '"""activation"""', 'categories': "['relu', 'sigmoid']"}), "(name='activation', categories=['relu', 'sigmoid'])\n", (831, 882), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1559, 1671), 'skopt.gp_minimize', 'gp_minimize', ([], {'func': 'model_fitness', 'dimensions': 'dimensions', 'n_calls': '(30)', 'x0': 'default_parameters', 'random_state': '(123)'}), '(func=model_fitness, dimensions=dimensions, n_calls=30, x0=\n default_parameters, random_state=123)\n', (1570, 1671), False, 'from skopt import gp_minimize\n'), ((2414, 2501), 'skopt.plots.plot_histogram', 'plot_histogram', ([], {'result': 'search_result', 'dimension_identifier': '"""learning_rate"""', 'bins': '(20)'}), "(result=search_result, dimension_identifier='learning_rate',\n bins=20)\n", (2428, 2501), False, 'from skopt.plots import plot_histogram, plot_objective_2D, plot_objective\n'), ((2517, 2527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2525, 2527), True, 'import matplotlib.pyplot as plt\n'), ((2611, 2735), 'skopt.plots.plot_objective_2D', 'plot_objective_2D', ([], {'result': 'search_result', 'dimension_identifier1': '"""learning_rate"""', 'dimension_identifier2': '"""num_dense_nodes"""'}), "(result=search_result, dimension_identifier1=\n 'learning_rate', dimension_identifier2='num_dense_nodes')\n", (2628, 2735), False, 'from skopt.plots import plot_histogram, plot_objective_2D, plot_objective\n'), ((2775, 2785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2783, 2785), True, 'import matplotlib.pyplot as plt\n'), ((2870, 2997), 'skopt.plots.plot_objective_2D', 'plot_objective_2D', ([], {'result': 'search_result', 'dimension_identifier1': '"""num_dense_layers"""', 'dimension_identifier2': '"""num_dense_nodes"""'}), "(result=search_result, dimension_identifier1=\n 'num_dense_layers', dimension_identifier2='num_dense_nodes')\n", (2887, 2997), False, 'from skopt.plots import plot_histogram, plot_objective_2D, plot_objective\n'), ((3037, 3047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3045, 3047), True, 'import matplotlib.pyplot as plt\n'), ((3132, 3223), 'skopt.plots.plot_objective', 'plot_objective', ([], {'result': 'search_result', 'plot_dims': "['num_dense_layers', 'num_dense_nodes']"}), "(result=search_result, plot_dims=['num_dense_layers',\n 'num_dense_nodes'])\n", (3146, 3223), False, 'from skopt.plots import plot_histogram, plot_objective_2D, plot_objective\n'), ((3269, 3279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3277, 3279), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2302), 'skopt.utils.point_asdict', 'point_asdict', (['search_space', 'default_parameters'], {}), '(search_space, default_parameters)\n', (2268, 2302), False, 'from skopt.utils import point_asdict\n'), ((1327, 1343), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1341, 1343), True, 'import numpy as np\n'), ((1171, 1189), 'math.exp', 'exp', (['learning_rate'], {}), '(learning_rate)\n', (1174, 1189), False, 'from math import exp\n')] |
import numpy as np
def eos_trim(v):
try:
return (''.join(v[0:np.where(v=='<eos>')[0][0]])).strip()
except:
return (''.join(v)).strip()
| [
"numpy.where"
] | [((74, 96), 'numpy.where', 'np.where', (["(v == '<eos>')"], {}), "(v == '<eos>')\n", (82, 96), True, 'import numpy as np\n')] |
# isolated cassie env
from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis
from loadstep import CassieTrajectory
from quaternion_function import *
from phase_function import *
from math import floor
import gym
from gym import spaces
import numpy as np
import os
import random
import copy
import pickle
import torch
class CassieEnv(gym.Env):
def __init__(self, simrate=60, dynamics_randomization=True,
visual=True, config="./model/cassie.xml", **kwargs):
super(CassieEnv, self).__init__()
self.config = config
self.visual = visual
self.sim = CassieSim(self.config)
if self.visual:
self.vis = CassieVis(self.sim)
self.dynamics_randomization = dynamics_randomization
# Observation space and State space
self.observation_space, self.clock_inds, self.mirrored_obs = self.set_up_state_space()
self.action_space = spaces.Box(low=np.array([-3.14]*10), high=np.array([3.14]*10))
self.P = np.array([100, 100, 88, 96, 50])
self.D = np.array([10.0, 10.0, 8.0, 9.6, 5.0])
self.u = pd_in_t()
self.mirrored_acts = [-5, -6, 7, 8, 9, -0.1, -1, 2, 3, 4]
# TODO: should probably initialize this to current state
self.cassie_state = state_out_t()
self.simrate = simrate # simulate X mujoco steps with same pd target. 50 brings simulation from 2000Hz to exactly 40Hz
self.time = 0 # number of time steps in current episode
self.phase = 0 # portion of the phase the robot is in
self.counter = 0 # number of phase cycles completed in episode
self.strict_relaxer = 0.1
self.early_reward = False
# see include/cassiemujoco.h for meaning of these indices
self.pos_idx = [7, 8, 9, 14, 20, 21, 22, 23, 28, 34]
self.vel_idx = [6, 7, 8, 12, 18, 19, 20, 21, 25, 31]
self.pos_index = np.array([1,2,3,4,5,6,7,8,9,14,15,16,20,21,22,23,28,29,30,34])
self.vel_index = np.array([0,1,2,3,4,5,6,7,8,12,13,14,18,19,20,21,25,26,27,31])
# CONFIGURE OFFSET for No Delta Policies
self.offset = np.array([0.0045, 0.0, 0.4973, -1.1997, -1.5968, 0.0045, 0.0, 0.4973, -1.1997, -1.5968])
self.max_speed = 4.0
self.min_speed = -0.3
self.max_side_speed = 0.3
self.min_side_speed = -0.3
# global flat foot orientation, can be useful part of reward function:
self.neutral_foot_orient = np.array([-0.24790886454547323, -0.24679713195445646, -0.6609396704367185, 0.663921021343526])
# TODO: should this be mujoco tracking var or use state estimator. real command interface will use state est
# Track pelvis position as baseline for pelvis tracking command inputs
self.last_pelvis_pos = self.sim.qpos()[0:3]
#### Dynamics Randomization ####
self.max_pitch_incline = 0.03
self.max_roll_incline = 0.03
self.encoder_noise = 0.01
self.damping_low = 0.3
self.damping_high = 5.0
self.mass_low = 0.5
self.mass_high = 1.5
self.fric_low = 0.4
self.fric_high = 1.1
self.speed = 4.0
self.side_speed = 0.0
self.orient_add = 0
# Record default dynamics parameters
self.default_damping = self.sim.get_dof_damping()
self.default_mass = self.sim.get_body_mass()
self.default_ipos = self.sim.get_body_ipos()
self.default_fric = self.sim.get_geom_friction()
self.default_rgba = self.sim.get_geom_rgba()
self.default_quat = self.sim.get_geom_quat()
self.motor_encoder_noise = np.zeros(10)
self.joint_encoder_noise = np.zeros(6)
def set_up_state_space(self):
full_state_est_size = 40
speed_size = 2 # x speed, y speed
clock_size = 2 # sin, cos
base_mir_obs = np.array([0.1, 1, -2, 3, -4, -10, -11, 12, 13, 14, -5, -6, 7, 8, 9, 15, -16, 17, -18, 19, -20, -26, -27, 28, 29, 30, -21, -22, 23, 24, 25, 31, -32, 33, 37, 38, 39, 34, 35, 36, 43, 44, 45, 40, 41, 42])
obs_size = full_state_est_size
append_obs = np.array([len(base_mir_obs) + i for i in range(clock_size+speed_size)])
mirrored_obs = np.concatenate([base_mir_obs, append_obs])
clock_inds = append_obs[0:clock_size].tolist()
obs_size += clock_size + speed_size
observation_space = spaces.Box(low=-np.inf,high=np.inf,shape=(full_state_est_size,))
mirrored_obs = mirrored_obs.tolist()
return observation_space, clock_inds, mirrored_obs
def rotate_to_orient(self, vec):
quaternion = euler2quat(z=self.orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
if len(vec) == 3:
return rotate_by_quaternion(vec, iquaternion)
elif len(vec) == 4:
new_orient = quaternion_product(iquaternion, vec)
if new_orient[0] < 0:
new_orient = -new_orient
return new_orient
def step_simulation(self,action):
target = action + self.offset
target -= self.motor_encoder_noise
self.u = pd_in_t()
# foot_pos = np.zeros(6)
# self.sim.foot_pos(foot_pos)
# prev_foot = copy.deepcopy(foot_pos)
for i in range(5):
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0 # Feedforward torque
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.cassie_state = self.sim.step_pd(self.u)
# self.sim.foot_pos(foot_pos)
# self.l_foot_vel = (foot_pos[0:3] - prev_foot[0:3]) / 0.0005
# self.r_foot_vel = (foot_pos[3:6] - prev_foot[3:6]) / 0.0005
def step(self, action):
# self.l_foot_frc = 0
# self.r_foot_frc = 0
# foot_pos = np.zeros(6)
# self.l_foot_pos = np.zeros(3)
# self.r_foot_pos = np.zeros(3)
# self.l_foot_orient_cost = 0
# self.r_foot_orient_cost = 0
# self.hiproll_cost = 0
# self.hiproll_act = 0
for _ in range(self.simrate):
self.step_simulation(action)
# # Foot Force Tracking
# foot_forces = self.sim.get_foot_forces()
# self.l_foot_frc += foot_forces[0]
# self.r_foot_frc += foot_forces[1]
# # Relative Foot Position tracking
# self.sim.foot_pos(foot_pos)
# self.l_foot_pos += foot_pos[0:3]
# self.r_foot_pos += foot_pos[3:6]
# # Foot Orientation Cost
# self.l_foot_orient_cost += (1 - np.inner(self.neutral_foot_orient, self.sim.xquat("left-foot")) ** 2)
# self.r_foot_orient_cost += (1 - np.inner(self.neutral_foot_orient, self.sim.xquat("right-foot")) ** 2)
# # Hip Yaw velocity cost
# self.hiproll_cost += (np.abs(self.qvel[6]) + np.abs(self.qvel[19])) / 3
# if self.prev_action is not None:
# self.hiproll_act += 2*np.linalg.norm(self.prev_action[[0, 5]] - action[[0, 5]])
# else:
# self.hiproll_act += 0
# self.l_foot_frc /= self.simrate
# self.r_foot_frc /= self.simrate
# self.l_foot_pos /= self.simrate
# self.r_foot_pos /= self.simrate
# self.l_foot_orient_cost /= self.simrate
# self.r_foot_orient_cost /= self.simrate
# self.hiproll_cost /= self.simrate
# self.hiproll_act /= self.simrate
obs = self.get_state()
height = self.qpos[2]
self.curr_action = action
self.time += 1
if height < 0.4 or height > 3.0:
done = True
else:
done = False
# if self.prev_action is None:
# self.prev_action = action
# if self.prev_torque is None:
# self.prev_torque = np.asarray(self.cassie_state.motor.torque[:])
# self.prev_action = action
# self.prev_torque = np.asarray(self.cassie_state.motor.torque[:])
if self.visual:
self.render()
reward = self.compute_reward(action)
# if reward < 0.3:
# done = True
return obs, reward, done, {}
def reset(self):
# print('----reset----')
self.speed = 4.0 # np.random.uniform(self.min_speed, self.max_speed)
self.side_speed = 0.0 # np.random.uniform(self.min_side_speed, self.max_side_speed)
self.time = 0
self.counter = 0
# Randomize dynamics:
if self.dynamics_randomization:
damp = self.default_damping
pelvis_damp_range = [[damp[0], damp[0]],
[damp[1], damp[1]],
[damp[2], damp[2]],
[damp[3], damp[3]],
[damp[4], damp[4]],
[damp[5], damp[5]]] # 0->5
hip_damp_range = [[damp[6]*self.damping_low, damp[6]*self.damping_high],
[damp[7]*self.damping_low, damp[7]*self.damping_high],
[damp[8]*self.damping_low, damp[8]*self.damping_high]] # 6->8 and 19->21
achilles_damp_range = [[damp[9]*self.damping_low, damp[9]*self.damping_high],
[damp[10]*self.damping_low, damp[10]*self.damping_high],
[damp[11]*self.damping_low, damp[11]*self.damping_high]] # 9->11 and 22->24
knee_damp_range = [[damp[12]*self.damping_low, damp[12]*self.damping_high]] # 12 and 25
shin_damp_range = [[damp[13]*self.damping_low, damp[13]*self.damping_high]] # 13 and 26
tarsus_damp_range = [[damp[14]*self.damping_low, damp[14]*self.damping_high]] # 14 and 27
heel_damp_range = [[damp[15], damp[15]]] # 15 and 28
fcrank_damp_range = [[damp[16]*self.damping_low, damp[16]*self.damping_high]] # 16 and 29
prod_damp_range = [[damp[17], damp[17]]] # 17 and 30
foot_damp_range = [[damp[18]*self.damping_low, damp[18]*self.damping_high]] # 18 and 31
side_damp = hip_damp_range + achilles_damp_range + knee_damp_range + shin_damp_range + tarsus_damp_range + heel_damp_range + fcrank_damp_range + prod_damp_range + foot_damp_range
damp_range = pelvis_damp_range + side_damp + side_damp
damp_noise = [np.random.uniform(a, b) for a, b in damp_range]
m = self.default_mass
pelvis_mass_range = [[self.mass_low*m[1], self.mass_high*m[1]]] # 1
hip_mass_range = [[self.mass_low*m[2], self.mass_high*m[2]], # 2->4 and 14->16
[self.mass_low*m[3], self.mass_high*m[3]],
[self.mass_low*m[4], self.mass_high*m[4]]]
achilles_mass_range = [[self.mass_low*m[5], self.mass_high*m[5]]] # 5 and 17
knee_mass_range = [[self.mass_low*m[6], self.mass_high*m[6]]] # 6 and 18
knee_spring_mass_range = [[self.mass_low*m[7], self.mass_high*m[7]]] # 7 and 19
shin_mass_range = [[self.mass_low*m[8], self.mass_high*m[8]]] # 8 and 20
tarsus_mass_range = [[self.mass_low*m[9], self.mass_high*m[9]]] # 9 and 21
heel_spring_mass_range = [[self.mass_low*m[10], self.mass_high*m[10]]] # 10 and 22
fcrank_mass_range = [[self.mass_low*m[11], self.mass_high*m[11]]] # 11 and 23
prod_mass_range = [[self.mass_low*m[12], self.mass_high*m[12]]] # 12 and 24
foot_mass_range = [[self.mass_low*m[13], self.mass_high*m[13]]] # 13 and 25
side_mass = hip_mass_range + achilles_mass_range \
+ knee_mass_range + knee_spring_mass_range \
+ shin_mass_range + tarsus_mass_range \
+ heel_spring_mass_range + fcrank_mass_range \
+ prod_mass_range + foot_mass_range
mass_range = [[0, 0]] + pelvis_mass_range + side_mass + side_mass
mass_noise = [np.random.uniform(a, b) for a, b in mass_range]
delta = 0.0
com_noise = [0, 0, 0] + [np.random.uniform(val - delta, val + delta) for val in self.default_ipos[3:]]
fric_noise = []
translational = np.random.uniform(self.fric_low, self.fric_high)
torsional = np.random.uniform(1e-4, 5e-4)
rolling = np.random.uniform(1e-4, 2e-4)
for _ in range(int(len(self.default_fric)/3)):
fric_noise += [translational, torsional, rolling]
self.sim.set_dof_damping(np.clip(damp_noise, 0, None))
self.sim.set_body_mass(np.clip(mass_noise, 0, None))
self.sim.set_body_ipos(com_noise)
self.sim.set_geom_friction(np.clip(fric_noise, 0, None))
else:
self.sim.set_body_mass(self.default_mass)
self.sim.set_body_ipos(self.default_ipos)
self.sim.set_dof_damping(self.default_damping)
self.sim.set_geom_friction(self.default_fric)
# if self.dynamics_randomization:
# geom_plane = [np.random.uniform(-self.max_roll_incline, self.max_roll_incline), np.random.uniform(-self.max_pitch_incline, self.max_pitch_incline), 0]
# quat_plane = euler2quat(z=geom_plane[2], y=geom_plane[1], x=geom_plane[0])
# geom_quat = list(quat_plane) + list(self.default_quat[4:])
# self.sim.set_geom_quat(geom_quat)
# else:
self.sim.set_geom_quat(self.default_quat)
# # reset mujoco tracking variables
# self.l_foot_frc = 0
# self.r_foot_frc = 0
# self.l_foot_orient_cost = 0
# self.r_foot_orient_cost = 0
# self.hiproll_cost = 0
# self.hiproll_act = 0
self.sim.set_const()
return self.get_state()
def get_state(self):
self.qpos = np.copy(self.sim.qpos()) # dim=35 see cassiemujoco.h for details
self.qvel = np.copy(self.sim.qvel()) # dim=32
# print('qpos:',self.qpos)
# print('qvel:',self.qvel)
'''
Position [1], [2] -> Pelvis y, z
[3], [4], [5], [6] -> Pelvis Orientation qw, qx, qy, qz
[7], [8], [9] -> Left Hip Roll (Motor[0]), Yaw (Motor[1]), Pitch (Motor[2])
[14] -> Left Knee (Motor[3])
[15] -> Left Shin (Joint[0])
[16] -> Left Tarsus (Joint[1])
[20] -> Left Foot (Motor[4], Joint[2])
[21], [22], [23] -> Right Hip Roll (Motor[5]), Yaw (Motor[6]), Pitch (Motor[7])
[28] -> Rigt Knee (Motor[8])
[29] -> Rigt Shin (Joint[3])
[30] -> Rigt Tarsus (Joint[4])
[34] -> Rigt Foot (Motor[9], Joint[5])
'''
pos_index = np.array([1,2,3,4,5,6,7,8,9,14,15,16,20,21,22,23,28,29,30,34])
'''
Velocity [0], [1], [2] -> Pelvis x, y, z
[3], [4], [5] -> Pelvis Orientation wx, wy, wz
[6], [7], [8] -> Left Hip Roll (Motor[0]), Yaw (Motor[1]), Pitch (Motor[2])
[12] -> Left Knee (Motor[3])
[13] -> Left Shin (Joint[0])
[14] -> Left Tarsus (Joint[1])
[18] -> Left Foot (Motor[4], Joint[2])
[19], [20], [21] -> Right Hip Roll (Motor[5]), Yaw (Motor[6]), Pitch (Motor[7])
[25] -> Rigt Knee (Motor[8])
[26] -> Rigt Shin (Joint[3])
[27] -> Rigt Tarsus (Joint[4])
[31] -> Rigt Foot (Motor[9], Joint[5])
'''
vel_index = np.array([0,1,2,3,4,5,6,7,8,12,13,14,18,19,20,21,25,26,27,31])
return np.concatenate([self.qpos[pos_index], self.qvel[vel_index]])
# Update orientation
# new_orient = self.rotate_to_orient(self.cassie_state.pelvis.orientation[:])
# new_translationalVelocity = self.rotate_to_orient(self.cassie_state.pelvis.translationalVelocity[:])
# new_translationalAcceleleration = self.rotate_to_orient(self.cassie_state.pelvis.translationalAcceleration[:])
# motor_pos = self.cassie_state.motor.position[:] + self.motor_encoder_noise
# joint_pos = self.cassie_state.joint.position[:] + self.joint_encoder_noise
# print("item 1 pelvis height:", self.cassie_state.pelvis.position[0],self.cassie_state.pelvis.position[1],self.cassie_state.pelvis.position[2])
# print("item 2 pelvis orientation:", new_orient)
# print("item 3 actuated joint positions:", motor_pos)
# print("item 4 pelvis translational velocity:", new_translationalVelocity)
# print("item 5 pelvis rotational velocity:", self.cassie_state.pelvis.rotationalVelocity[:])
# print("item 6 actuated joint velocities:", self.cassie_state.motor.velocity[:])
# print("item 7 pelvis translational acceleration:", new_translationalAcceleleration)
# print("item 8 unactuated joint positions:", joint_pos)
# print("item 9 unactuated joint velocities:", self.cassie_state.joint.velocity[:])
# print("height of terrain:",self.cassie_state.terrain.height)
# robot_state = np.concatenate([
# [self.cassie_state.pelvis.position[2] - self.cassie_state.terrain.height], # pelvis height 0.8
# new_orient, # pelvis orientation in quaternion
# motor_pos, # actuated joint positions dim=10
# new_translationalVelocity, # pelvis translational velocity dim=3
# self.cassie_state.pelvis.rotationalVelocity[:], # pelvis rotational velocity dim=3
# self.cassie_state.motor.velocity[:], # actuated joint velocities dim=10
# new_translationalAcceleleration, # pelvis translational acceleration dim=3
# joint_pos, # unactuated joint positions dim=6
# self.cassie_state.joint.velocity[:] # unactuated joint velocities dim=6
# ])
# self.observation_space = np.concatenate([robot_state, [self.speed, self.side_speed]])
# return self.observation_space
def compute_reward(self, action):
height = self.qpos[2]
joint_penalty = np.sum(action * action)
orientation_penalty = (self.qpos[4])**2+(self.qpos[5])**2+(self.qpos[6])**2
vel_penalty = (self.speed - self.qvel[0])**2 + (self.side_speed - self.qvel[1])**2 + (self.qvel[2])**2
spring_penalty = (self.sim.qpos()[15])**2+(self.sim.qpos()[29])**2
spring_penalty *= 1000
# reward = 0.5*np.exp(-joint_penalty)+0.3*np.exp(-vel_penalty)+0.1*np.exp(-orientation_penalty)+0.1*np.exp(-spring_penalty)
self.rew_height = 0.7 * height
self.rew_joint = 0.1*np.exp(-joint_penalty)
reward = self.rew_height + self.rew_joint
return reward
def render(self):
return self.vis.draw(self.sim)
class CassieRefEnv(gym.Env):
def __init__(self, simrate=60, dynamics_randomization=True,
visual=True, config="./model/cassie.xml", **kwargs):
self.config = config
self.visual = visual
self.sim = CassieSim(self.config)
if self.visual:
self.vis = CassieVis(self.sim)
self.dynamics_randomization = dynamics_randomization
self.termination = False
# Observation space and State space
self.observation_space = spaces.Box(low=-np.inf,high=np.inf,shape=(80,))
self.action_space = spaces.Box(low=np.array([-1]*10), high=np.array([1]*10))
self.trajectory = CassieTrajectory("../ref/stepdata.bin")
self.P = np.array([100, 100, 88, 96, 50])
self.D = np.array([10.0, 10.0, 8.0, 9.6, 5.0])
self.u = pd_in_t()
self.cassie_state = state_out_t()
self.simrate = simrate # simulate X mujoco steps with same pd target. 50 brings simulation from 2000Hz to exactly 40Hz
self.time = 0 # number of time steps in current episode
self.phase = 0 # portion of the phase the robot is in
self.counter = 0 # number of phase cycles completed in episode
self.time_limit = 400
self.offset = np.array([0.0045, 0.0, 0.4973, -1.1997, -1.5968, 0.0045, 0.0, 0.4973, -1.1997, -1.5968])
self.max_speed = 4.0
self.min_speed = -0.3
self.max_side_speed = 0.3
self.min_side_speed = -0.3
#### Dynamics Randomization ####
self.max_pitch_incline = 0.03
self.max_roll_incline = 0.03
self.encoder_noise = 0.01
self.damping_low = 0.3
self.damping_high = 5.0
self.mass_low = 0.5
self.mass_high = 1.5
self.fric_low = 0.4
self.fric_high = 1.1
self.speed = 4.0
self.side_speed = 0.0
self.orient_add = 0
# Default dynamics parameters
self.default_damping = self.sim.get_dof_damping()
self.default_mass = self.sim.get_body_mass()
self.default_ipos = self.sim.get_body_ipos()
self.default_fric = self.sim.get_geom_friction()
self.default_rgba = self.sim.get_geom_rgba()
self.default_quat = self.sim.get_geom_quat()
self.motor_encoder_noise = np.zeros(10)
self.joint_encoder_noise = np.zeros(6)
def step_simulation(self,action):
target = action + self.offset
# target -= self.motor_encoder_noise
# pos_index = [7, 8, 9, 14, 20, 21, 22, 23, 28, 34]
# ref_pos, ref_vel = self.get_kin_next_state()
# target = action + ref_pos[pos_index]
self.u = pd_in_t()
for i in range(5):
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0 # Feedforward torque
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.cassie_state = self.sim.step_pd(self.u) # cassie_state is different from qpos state???
def step(self, action):
for _ in range(self.simrate):
self.step_simulation(action)
obs = self.get_state()
height = self.qpos[2]
self.time += 1
self.phase += 1
if self.phase >= 28:
self.phase = 0
self.counter +=1
self.termination = height < 0.6 or height > 1.2
done = self.termination or self.time >= self.time_limit
if self.visual:
self.render()
reward = self.compute_reward(action)
if reward < 0.3:
done = True
return obs, reward, done, {}
def reset(self):
self.phase = 0
# self.phase = random.randint(0,27)
self.speed = 0.7 # np.random.uniform(self.min_speed, self.max_speed)
self.side_speed = 0.0 # np.random.uniform(self.min_side_speed, self.max_side_speed)
self.time = 0
self.counter = 0
self.termination = False
# Randomize dynamics:
if self.dynamics_randomization:
damp = self.default_damping
pelvis_damp_range = [[damp[0], damp[0]],
[damp[1], damp[1]],
[damp[2], damp[2]],
[damp[3], damp[3]],
[damp[4], damp[4]],
[damp[5], damp[5]]] # 0->5
hip_damp_range = [[damp[6]*self.damping_low, damp[6]*self.damping_high],
[damp[7]*self.damping_low, damp[7]*self.damping_high],
[damp[8]*self.damping_low, damp[8]*self.damping_high]] # 6->8 and 19->21
achilles_damp_range = [[damp[9]*self.damping_low, damp[9]*self.damping_high],
[damp[10]*self.damping_low, damp[10]*self.damping_high],
[damp[11]*self.damping_low, damp[11]*self.damping_high]] # 9->11 and 22->24
knee_damp_range = [[damp[12]*self.damping_low, damp[12]*self.damping_high]] # 12 and 25
shin_damp_range = [[damp[13]*self.damping_low, damp[13]*self.damping_high]] # 13 and 26
tarsus_damp_range = [[damp[14]*self.damping_low, damp[14]*self.damping_high]] # 14 and 27
heel_damp_range = [[damp[15], damp[15]]] # 15 and 28
fcrank_damp_range = [[damp[16]*self.damping_low, damp[16]*self.damping_high]] # 16 and 29
prod_damp_range = [[damp[17], damp[17]]] # 17 and 30
foot_damp_range = [[damp[18]*self.damping_low, damp[18]*self.damping_high]] # 18 and 31
side_damp = hip_damp_range + achilles_damp_range + knee_damp_range + shin_damp_range + tarsus_damp_range + heel_damp_range + fcrank_damp_range + prod_damp_range + foot_damp_range
damp_range = pelvis_damp_range + side_damp + side_damp
damp_noise = [np.random.uniform(a, b) for a, b in damp_range]
m = self.default_mass
pelvis_mass_range = [[self.mass_low*m[1], self.mass_high*m[1]]] # 1
hip_mass_range = [[self.mass_low*m[2], self.mass_high*m[2]], # 2->4 and 14->16
[self.mass_low*m[3], self.mass_high*m[3]],
[self.mass_low*m[4], self.mass_high*m[4]]]
achilles_mass_range = [[self.mass_low*m[5], self.mass_high*m[5]]] # 5 and 17
knee_mass_range = [[self.mass_low*m[6], self.mass_high*m[6]]] # 6 and 18
knee_spring_mass_range = [[self.mass_low*m[7], self.mass_high*m[7]]] # 7 and 19
shin_mass_range = [[self.mass_low*m[8], self.mass_high*m[8]]] # 8 and 20
tarsus_mass_range = [[self.mass_low*m[9], self.mass_high*m[9]]] # 9 and 21
heel_spring_mass_range = [[self.mass_low*m[10], self.mass_high*m[10]]] # 10 and 22
fcrank_mass_range = [[self.mass_low*m[11], self.mass_high*m[11]]] # 11 and 23
prod_mass_range = [[self.mass_low*m[12], self.mass_high*m[12]]] # 12 and 24
foot_mass_range = [[self.mass_low*m[13], self.mass_high*m[13]]] # 13 and 25
side_mass = hip_mass_range + achilles_mass_range \
+ knee_mass_range + knee_spring_mass_range \
+ shin_mass_range + tarsus_mass_range \
+ heel_spring_mass_range + fcrank_mass_range \
+ prod_mass_range + foot_mass_range
mass_range = [[0, 0]] + pelvis_mass_range + side_mass + side_mass
mass_noise = [np.random.uniform(a, b) for a, b in mass_range]
delta = 0.0
com_noise = [0, 0, 0] + [np.random.uniform(val - delta, val + delta) for val in self.default_ipos[3:]]
fric_noise = []
translational = np.random.uniform(self.fric_low, self.fric_high)
torsional = np.random.uniform(1e-4, 5e-4)
rolling = np.random.uniform(1e-4, 2e-4)
for _ in range(int(len(self.default_fric)/3)):
fric_noise += [translational, torsional, rolling]
self.sim.set_dof_damping(np.clip(damp_noise, 0, None))
self.sim.set_body_mass(np.clip(mass_noise, 0, None))
self.sim.set_body_ipos(com_noise)
self.sim.set_geom_friction(np.clip(fric_noise, 0, None))
else:
self.sim.set_body_mass(self.default_mass)
self.sim.set_body_ipos(self.default_ipos)
self.sim.set_dof_damping(self.default_damping)
self.sim.set_geom_friction(self.default_fric)
# if self.dynamics_randomization:
# geom_plane = [np.random.uniform(-self.max_roll_incline, self.max_roll_incline), np.random.uniform(-self.max_pitch_incline, self.max_pitch_incline), 0]
# quat_plane = euler2quat(z=geom_plane[2], y=geom_plane[1], x=geom_plane[0])
# geom_quat = list(quat_plane) + list(self.default_quat[4:])
# self.sim.set_geom_quat(geom_quat)
# else:
self.sim.set_geom_quat(self.default_quat)
self.sim.set_const()
# return self.get_state()
# # xie's code:
self.phase = random.randint(0, 27)
self.time = 0
self.counter = 0
qpos, qvel = self.get_kin_state()
self.sim.set_qpos(qpos)
self.sim.set_qvel(qvel)
return self.get_state()
def get_state(self):
self.qpos = np.copy(self.sim.qpos()) # dim=35 see cassiemujoco.h for details
self.qvel = np.copy(self.sim.qvel()) # dim=32
self.ref_pos, self.ref_vel = self.get_kin_next_state()
'''
Position [1], [2] -> Pelvis y, z
[3], [4], [5], [6] -> Pelvis Orientation qw, qx, qy, qz
[7], [8], [9] -> Left Hip Roll (Motor[0]), Yaw (Motor[1]), Pitch (Motor[2])
[14] -> Left Knee (Motor[3])
[15] -> Left Shin (Joint[0])
[16] -> Left Tarsus (Joint[1])
[20] -> Left Foot (Motor[4], Joint[2])
[21], [22], [23] -> Right Hip Roll (Motor[5]), Yaw (Motor[6]), Pitch (Motor[7])
[28] -> Rigt Knee (Motor[8])
[29] -> Rigt Shin (Joint[3])
[30] -> Rigt Tarsus (Joint[4])
[34] -> Rigt Foot (Motor[9], Joint[5])
'''
pos_index = np.array([1,2,3,4,5,6,7,8,9,14,15,16,20,21,22,23,28,29,30,34])
'''
Velocity [0], [1], [2] -> Pelvis x, y, z
[3], [4], [5] -> Pelvis Orientation wx, wy, wz
[6], [7], [8] -> Left Hip Roll (Motor[0]), Yaw (Motor[1]), Pitch (Motor[2])
[12] -> Left Knee (Motor[3])
[13] -> Left Shin (Joint[0])
[14] -> Left Tarsus (Joint[1])
[18] -> Left Foot (Motor[4], Joint[2])
[19], [20], [21] -> Right Hip Roll (Motor[5]), Yaw (Motor[6]), Pitch (Motor[7])
[25] -> Rigt Knee (Motor[8])
[26] -> Rigt Shin (Joint[3])
[27] -> Rigt Tarsus (Joint[4])
[31] -> Rigt Foot (Motor[9], Joint[5])
'''
vel_index = np.array([0,1,2,3,4,5,6,7,8,12,13,14,18,19,20,21,25,26,27,31])
return np.concatenate([self.qpos[pos_index], self.qvel[vel_index], self.ref_pos[pos_index], self.ref_vel[vel_index]])
def compute_reward(self, action):
ref_pos, ref_vel = self.get_kin_state()
height = self.qpos[2]
joint_penalty = np.sum(action * action)
ref_penalty = 0
joint_index = [7, 8, 9, 14, 20, 21, 22, 23, 28, 34]
weight = [0.15, 0.15, 0.1, 0.05, 0.05, 0.15, 0.15, 0.1, 0.05, 0.05]
for i in range(10):
error = weight[i] * (ref_pos[joint_index[i]]-self.sim.qpos()[joint_index[i]])**2
ref_penalty += error*30
orientation_penalty = (self.qpos[4])**2+(self.qpos[5])**2+(self.qpos[6])**2
com_penalty = (ref_pos[0] - self.sim.qpos()[0])**2 + (self.sim.qpos()[1])**2 + (self.sim.qpos()[2]-ref_pos[2])**2
vel_penalty = (self.speed - self.qvel[0])**2 + (self.side_speed - self.qvel[1])**2 + (self.qvel[2])**2
spring_penalty = (self.sim.qpos()[15])**2+(self.sim.qpos()[29])**2
spring_penalty *= 1000
# reward = 0.5*np.exp(-ref_penalty)+0.3*np.exp(-vel_penalty)+0.1*np.exp(-orientation_penalty)+0.1*np.exp(-spring_penalty)
self.rew_ref = 0.5*np.exp(-ref_penalty)
self.rew_spring = 0.1*np.exp(-spring_penalty)
self.rew_ori = 0.1*np.exp(-orientation_penalty)
self.rew_vel = 0.3*np.exp(-vel_penalty)
self.rew_termin = -10 * self.termination
reward = self.rew_ref + self.rew_spring + self.rew_ori + self.rew_vel + self.rew_termin
return reward
def render(self):
return self.vis.draw(self.sim)
def get_kin_state(self):
pose = np.copy(self.trajectory.qpos[self.phase*2*30])
pose[0] += (self.trajectory.qpos[1681, 0]- self.trajectory.qpos[0, 0])* self.counter
pose[1] = 0
vel = np.copy(self.trajectory.qvel[self.phase*2*30])
return pose, vel
def get_kin_next_state(self):
phase = self.phase + 1
if phase >= 28:
phase = 0
pose = np.copy(self.trajectory.qpos[phase*2*30])
vel = np.copy(self.trajectory.qvel[phase*2*30])
pose[0] += (self.trajectory.qpos[1681, 0]- self.trajectory.qpos[0, 0])* self.counter
pose[1] = 0
return pose, vel
class CassieRefBuf(CassieRefEnv):
def __init__(self, simrate=60, dynamics_randomization=True,
visual=True, config="./model/cassie.xml", delay=True, **kwargs):
super.__init__(simrate=60, dynamics_randomization=True,
visual=True, config="./model/cassie.xml")
self.delay = delay
self.state_buffer = []
self.buffer_size = 3
self.observation_space = spaces.Box(low=-np.inf,high=np.inf,shape=(self.buffer_size*40+40,))
def get_state(self):
self.qpos = np.copy(self.sim.qpos()) # dim=35 see cassiemujoco.h for details
self.qvel = np.copy(self.sim.qvel()) # dim=32
self.state_buffer.append((self.qpos, self.qvel))
if len(self.state_buffer) > self.buffer_size:
self.state_buffer.pop(0)
else:
while len(self.state_buffer) < self.buffer_size:
self.state_buffer.append((self.qpos, self.qvel))
pos = np.array([x[0] for x in self.state_buffer])
vel = np.array([x[1] for x in self.state_buffer])
self.ref_pos, self.ref_vel = self.get_kin_next_state()
'''
Position [1], [2] -> Pelvis y, z
[3], [4], [5], [6] -> Pelvis Orientation qw, qx, qy, qz
[7], [8], [9] -> Left Hip Roll (Motor[0]), Yaw (Motor[1]), Pitch (Motor[2])
[14] -> Left Knee (Motor[3])
[15] -> Left Shin (Joint[0])
[16] -> Left Tarsus (Joint[1])
[20] -> Left Foot (Motor[4], Joint[2])
[21], [22], [23] -> Right Hip Roll (Motor[5]), Yaw (Motor[6]), Pitch (Motor[7])
[28] -> Rigt Knee (Motor[8])
[29] -> Rigt Shin (Joint[3])
[30] -> Rigt Tarsus (Joint[4])
[34] -> Rigt Foot (Motor[9], Joint[5])
'''
pos_index = np.array([1,2,3,4,5,6,7,8,9,14,15,16,20,21,22,23,28,29,30,34])
'''
Velocity [0], [1], [2] -> Pelvis x, y, z
[3], [4], [5] -> Pelvis Orientation wx, wy, wz
[6], [7], [8] -> Left Hip Roll (Motor[0]), Yaw (Motor[1]), Pitch (Motor[2])
[12] -> Left Knee (Motor[3])
[13] -> Left Shin (Joint[0])
[14] -> Left Tarsus (Joint[1])
[18] -> Left Foot (Motor[4], Joint[2])
[19], [20], [21] -> Right Hip Roll (Motor[5]), Yaw (Motor[6]), Pitch (Motor[7])
[25] -> Rigt Knee (Motor[8])
[26] -> Rigt Shin (Joint[3])
[27] -> Rigt Tarsus (Joint[4])
[31] -> Rigt Foot (Motor[9], Joint[5])
'''
vel_index = np.array([0,1,2,3,4,5,6,7,8,12,13,14,18,19,20,21,25,26,27,31])
return np.concatenate([pos[:,pos_index].reshape(-1), vel[:,vel_index].reshape(-1), self.ref_pos[pos_index], self.ref_vel[vel_index]])
| [
"numpy.random.uniform",
"cassie_m.cassiemujoco.CassieVis",
"numpy.sum",
"random.randint",
"numpy.copy",
"numpy.zeros",
"numpy.clip",
"numpy.array",
"gym.spaces.Box",
"loadstep.CassieTrajectory",
"cassie_m.cassiemujoco.pd_in_t",
"numpy.exp",
"cassie_m.cassiemujoco.CassieSim",
"cassie_m.cass... | [((622, 644), 'cassie_m.cassiemujoco.CassieSim', 'CassieSim', (['self.config'], {}), '(self.config)\n', (631, 644), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((1031, 1063), 'numpy.array', 'np.array', (['[100, 100, 88, 96, 50]'], {}), '([100, 100, 88, 96, 50])\n', (1039, 1063), True, 'import numpy as np\n'), ((1086, 1123), 'numpy.array', 'np.array', (['[10.0, 10.0, 8.0, 9.6, 5.0]'], {}), '([10.0, 10.0, 8.0, 9.6, 5.0])\n', (1094, 1123), True, 'import numpy as np\n'), ((1141, 1150), 'cassie_m.cassiemujoco.pd_in_t', 'pd_in_t', ([], {}), '()\n', (1148, 1150), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((1313, 1326), 'cassie_m.cassiemujoco.state_out_t', 'state_out_t', ([], {}), '()\n', (1324, 1326), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((1962, 2047), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30, 34]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30,\n 34])\n', (1970, 2047), True, 'import numpy as np\n'), ((2050, 2135), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27, 31]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27,\n 31])\n', (2058, 2135), True, 'import numpy as np\n'), ((2185, 2278), 'numpy.array', 'np.array', (['[0.0045, 0.0, 0.4973, -1.1997, -1.5968, 0.0045, 0.0, 0.4973, -1.1997, -1.5968]'], {}), '([0.0045, 0.0, 0.4973, -1.1997, -1.5968, 0.0045, 0.0, 0.4973, -\n 1.1997, -1.5968])\n', (2193, 2278), True, 'import numpy as np\n'), ((2529, 2628), 'numpy.array', 'np.array', (['[-0.24790886454547323, -0.24679713195445646, -0.6609396704367185, \n 0.663921021343526]'], {}), '([-0.24790886454547323, -0.24679713195445646, -0.6609396704367185, \n 0.663921021343526])\n', (2537, 2628), True, 'import numpy as np\n'), ((3739, 3751), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (3747, 3751), True, 'import numpy as np\n'), ((3787, 3798), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (3795, 3798), True, 'import numpy as np\n'), ((4003, 4212), 'numpy.array', 'np.array', (['[0.1, 1, -2, 3, -4, -10, -11, 12, 13, 14, -5, -6, 7, 8, 9, 15, -16, 17, -18,\n 19, -20, -26, -27, 28, 29, 30, -21, -22, 23, 24, 25, 31, -32, 33, 37, \n 38, 39, 34, 35, 36, 43, 44, 45, 40, 41, 42]'], {}), '([0.1, 1, -2, 3, -4, -10, -11, 12, 13, 14, -5, -6, 7, 8, 9, 15, -16,\n 17, -18, 19, -20, -26, -27, 28, 29, 30, -21, -22, 23, 24, 25, 31, -32, \n 33, 37, 38, 39, 34, 35, 36, 43, 44, 45, 40, 41, 42])\n', (4011, 4212), True, 'import numpy as np\n'), ((4376, 4418), 'numpy.concatenate', 'np.concatenate', (['[base_mir_obs, append_obs]'], {}), '([base_mir_obs, append_obs])\n', (4390, 4418), True, 'import numpy as np\n'), ((4555, 4621), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(full_state_est_size,)'}), '(low=-np.inf, high=np.inf, shape=(full_state_est_size,))\n', (4565, 4621), False, 'from gym import spaces\n'), ((5312, 5321), 'cassie_m.cassiemujoco.pd_in_t', 'pd_in_t', ([], {}), '()\n', (5319, 5321), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((15622, 15707), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30, 34]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30,\n 34])\n', (15630, 15707), True, 'import numpy as np\n'), ((16341, 16426), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27, 31]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27,\n 31])\n', (16349, 16426), True, 'import numpy as np\n'), ((16419, 16479), 'numpy.concatenate', 'np.concatenate', (['[self.qpos[pos_index], self.qvel[vel_index]]'], {}), '([self.qpos[pos_index], self.qvel[vel_index]])\n', (16433, 16479), True, 'import numpy as np\n'), ((19259, 19282), 'numpy.sum', 'np.sum', (['(action * action)'], {}), '(action * action)\n', (19265, 19282), True, 'import numpy as np\n'), ((20191, 20213), 'cassie_m.cassiemujoco.CassieSim', 'CassieSim', (['self.config'], {}), '(self.config)\n', (20200, 20213), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((20462, 20511), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(80,)'}), '(low=-np.inf, high=np.inf, shape=(80,))\n', (20472, 20511), False, 'from gym import spaces\n'), ((20621, 20660), 'loadstep.CassieTrajectory', 'CassieTrajectory', (['"""../ref/stepdata.bin"""'], {}), "('../ref/stepdata.bin')\n", (20637, 20660), False, 'from loadstep import CassieTrajectory\n'), ((20678, 20710), 'numpy.array', 'np.array', (['[100, 100, 88, 96, 50]'], {}), '([100, 100, 88, 96, 50])\n', (20686, 20710), True, 'import numpy as np\n'), ((20733, 20770), 'numpy.array', 'np.array', (['[10.0, 10.0, 8.0, 9.6, 5.0]'], {}), '([10.0, 10.0, 8.0, 9.6, 5.0])\n', (20741, 20770), True, 'import numpy as np\n'), ((20788, 20797), 'cassie_m.cassiemujoco.pd_in_t', 'pd_in_t', ([], {}), '()\n', (20795, 20797), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((20836, 20849), 'cassie_m.cassiemujoco.state_out_t', 'state_out_t', ([], {}), '()\n', (20847, 20849), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((21253, 21346), 'numpy.array', 'np.array', (['[0.0045, 0.0, 0.4973, -1.1997, -1.5968, 0.0045, 0.0, 0.4973, -1.1997, -1.5968]'], {}), '([0.0045, 0.0, 0.4973, -1.1997, -1.5968, 0.0045, 0.0, 0.4973, -\n 1.1997, -1.5968])\n', (21261, 21346), True, 'import numpy as np\n'), ((22327, 22339), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (22335, 22339), True, 'import numpy as np\n'), ((22375, 22386), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (22383, 22386), True, 'import numpy as np\n'), ((22689, 22698), 'cassie_m.cassiemujoco.pd_in_t', 'pd_in_t', ([], {}), '()\n', (22696, 22698), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((29791, 29812), 'random.randint', 'random.randint', (['(0)', '(27)'], {}), '(0, 27)\n', (29805, 29812), False, 'import random\n'), ((30887, 30972), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30, 34]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30,\n 34])\n', (30895, 30972), True, 'import numpy as np\n'), ((31606, 31691), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27, 31]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27,\n 31])\n', (31614, 31691), True, 'import numpy as np\n'), ((31693, 31808), 'numpy.concatenate', 'np.concatenate', (['[self.qpos[pos_index], self.qvel[vel_index], self.ref_pos[pos_index], self.\n ref_vel[vel_index]]'], {}), '([self.qpos[pos_index], self.qvel[vel_index], self.ref_pos[\n pos_index], self.ref_vel[vel_index]])\n', (31707, 31808), True, 'import numpy as np\n'), ((31956, 31979), 'numpy.sum', 'np.sum', (['(action * action)'], {}), '(action * action)\n', (31962, 31979), True, 'import numpy as np\n'), ((33368, 33418), 'numpy.copy', 'np.copy', (['self.trajectory.qpos[self.phase * 2 * 30]'], {}), '(self.trajectory.qpos[self.phase * 2 * 30])\n', (33375, 33418), True, 'import numpy as np\n'), ((33542, 33592), 'numpy.copy', 'np.copy', (['self.trajectory.qvel[self.phase * 2 * 30]'], {}), '(self.trajectory.qvel[self.phase * 2 * 30])\n', (33549, 33592), True, 'import numpy as np\n'), ((33744, 33789), 'numpy.copy', 'np.copy', (['self.trajectory.qpos[phase * 2 * 30]'], {}), '(self.trajectory.qpos[phase * 2 * 30])\n', (33751, 33789), True, 'import numpy as np\n'), ((33800, 33845), 'numpy.copy', 'np.copy', (['self.trajectory.qvel[phase * 2 * 30]'], {}), '(self.trajectory.qvel[phase * 2 * 30])\n', (33807, 33845), True, 'import numpy as np\n'), ((34413, 34486), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(self.buffer_size * 40 + 40,)'}), '(low=-np.inf, high=np.inf, shape=(self.buffer_size * 40 + 40,))\n', (34423, 34486), False, 'from gym import spaces\n'), ((34973, 35016), 'numpy.array', 'np.array', (['[x[0] for x in self.state_buffer]'], {}), '([x[0] for x in self.state_buffer])\n', (34981, 35016), True, 'import numpy as np\n'), ((35031, 35074), 'numpy.array', 'np.array', (['[x[1] for x in self.state_buffer]'], {}), '([x[1] for x in self.state_buffer])\n', (35039, 35074), True, 'import numpy as np\n'), ((35796, 35881), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30, 34]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 20, 21, 22, 23, 28, 29, 30,\n 34])\n', (35804, 35881), True, 'import numpy as np\n'), ((36515, 36600), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27, 31]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27,\n 31])\n', (36523, 36600), True, 'import numpy as np\n'), ((692, 711), 'cassie_m.cassiemujoco.CassieVis', 'CassieVis', (['self.sim'], {}), '(self.sim)\n', (701, 711), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((13165, 13213), 'numpy.random.uniform', 'np.random.uniform', (['self.fric_low', 'self.fric_high'], {}), '(self.fric_low, self.fric_high)\n', (13182, 13213), True, 'import numpy as np\n'), ((13238, 13271), 'numpy.random.uniform', 'np.random.uniform', (['(0.0001)', '(0.0005)'], {}), '(0.0001, 0.0005)\n', (13255, 13271), True, 'import numpy as np\n'), ((13290, 13323), 'numpy.random.uniform', 'np.random.uniform', (['(0.0001)', '(0.0002)'], {}), '(0.0001, 0.0002)\n', (13307, 13323), True, 'import numpy as np\n'), ((19784, 19806), 'numpy.exp', 'np.exp', (['(-joint_penalty)'], {}), '(-joint_penalty)\n', (19790, 19806), True, 'import numpy as np\n'), ((20261, 20280), 'cassie_m.cassiemujoco.CassieVis', 'CassieVis', (['self.sim'], {}), '(self.sim)\n', (20270, 20280), False, 'from cassie_m.cassiemujoco import pd_in_t, state_out_t, CassieSim, CassieVis\n'), ((28423, 28471), 'numpy.random.uniform', 'np.random.uniform', (['self.fric_low', 'self.fric_high'], {}), '(self.fric_low, self.fric_high)\n', (28440, 28471), True, 'import numpy as np\n'), ((28496, 28529), 'numpy.random.uniform', 'np.random.uniform', (['(0.0001)', '(0.0005)'], {}), '(0.0001, 0.0005)\n', (28513, 28529), True, 'import numpy as np\n'), ((28548, 28581), 'numpy.random.uniform', 'np.random.uniform', (['(0.0001)', '(0.0002)'], {}), '(0.0001, 0.0002)\n', (28565, 28581), True, 'import numpy as np\n'), ((32907, 32927), 'numpy.exp', 'np.exp', (['(-ref_penalty)'], {}), '(-ref_penalty)\n', (32913, 32927), True, 'import numpy as np\n'), ((32958, 32981), 'numpy.exp', 'np.exp', (['(-spring_penalty)'], {}), '(-spring_penalty)\n', (32964, 32981), True, 'import numpy as np\n'), ((33009, 33037), 'numpy.exp', 'np.exp', (['(-orientation_penalty)'], {}), '(-orientation_penalty)\n', (33015, 33037), True, 'import numpy as np\n'), ((33065, 33085), 'numpy.exp', 'np.exp', (['(-vel_penalty)'], {}), '(-vel_penalty)\n', (33071, 33085), True, 'import numpy as np\n'), ((956, 978), 'numpy.array', 'np.array', (['([-3.14] * 10)'], {}), '([-3.14] * 10)\n', (964, 978), True, 'import numpy as np\n'), ((983, 1004), 'numpy.array', 'np.array', (['([3.14] * 10)'], {}), '([3.14] * 10)\n', (991, 1004), True, 'import numpy as np\n'), ((11198, 11221), 'numpy.random.uniform', 'np.random.uniform', (['a', 'b'], {}), '(a, b)\n', (11215, 11221), True, 'import numpy as np\n'), ((12920, 12943), 'numpy.random.uniform', 'np.random.uniform', (['a', 'b'], {}), '(a, b)\n', (12937, 12943), True, 'import numpy as np\n'), ((13483, 13511), 'numpy.clip', 'np.clip', (['damp_noise', '(0)', 'None'], {}), '(damp_noise, 0, None)\n', (13490, 13511), True, 'import numpy as np\n'), ((13548, 13576), 'numpy.clip', 'np.clip', (['mass_noise', '(0)', 'None'], {}), '(mass_noise, 0, None)\n', (13555, 13576), True, 'import numpy as np\n'), ((13663, 13691), 'numpy.clip', 'np.clip', (['fric_noise', '(0)', 'None'], {}), '(fric_noise, 0, None)\n', (13670, 13691), True, 'import numpy as np\n'), ((20553, 20572), 'numpy.array', 'np.array', (['([-1] * 10)'], {}), '([-1] * 10)\n', (20561, 20572), True, 'import numpy as np\n'), ((20577, 20595), 'numpy.array', 'np.array', (['([1] * 10)'], {}), '([1] * 10)\n', (20585, 20595), True, 'import numpy as np\n'), ((26456, 26479), 'numpy.random.uniform', 'np.random.uniform', (['a', 'b'], {}), '(a, b)\n', (26473, 26479), True, 'import numpy as np\n'), ((28178, 28201), 'numpy.random.uniform', 'np.random.uniform', (['a', 'b'], {}), '(a, b)\n', (28195, 28201), True, 'import numpy as np\n'), ((28741, 28769), 'numpy.clip', 'np.clip', (['damp_noise', '(0)', 'None'], {}), '(damp_noise, 0, None)\n', (28748, 28769), True, 'import numpy as np\n'), ((28806, 28834), 'numpy.clip', 'np.clip', (['mass_noise', '(0)', 'None'], {}), '(mass_noise, 0, None)\n', (28813, 28834), True, 'import numpy as np\n'), ((28921, 28949), 'numpy.clip', 'np.clip', (['fric_noise', '(0)', 'None'], {}), '(fric_noise, 0, None)\n', (28928, 28949), True, 'import numpy as np\n'), ((13030, 13073), 'numpy.random.uniform', 'np.random.uniform', (['(val - delta)', '(val + delta)'], {}), '(val - delta, val + delta)\n', (13047, 13073), True, 'import numpy as np\n'), ((28288, 28331), 'numpy.random.uniform', 'np.random.uniform', (['(val - delta)', '(val + delta)'], {}), '(val - delta, val + delta)\n', (28305, 28331), True, 'import numpy as np\n')] |
import os
import pickle
import numpy as np
def parse_sigma_output(f):
"""
Extract the GW results of a calculation with BerkeleyGW.
Returns
-------
dict :
nspin : int
Number of spins
nkpt : int
Number of k-points
nband : int
Number of bands
kpt : array[nkpt, 3]
K-points wavevectors (reduced coord)
n : array[nband]
band indices
elda : array[nspin, nkpt, nband]
energy eigenvalue
ecor : array[nspin, nkpt, nband]
corrected energy eigenvalue
x : array[nkpt, nband]
bare exchange
sx-x : array[nspin, nkpt, nband]
sx = screened exchange at energy ecor
ch : array[nspin, nkpt, nband]
coulomb hole at energy ecor
sig : array[nspin, nkpt, nband]
sx + ch = self-energy at energy ecor
vxc : array[nspin, nkpt, nband]
vxc = exchange-correlation potential
eqp0 : array[nspin, nkpt, nband]
elda - vxc + sig(ecor)
eqp1 : array[nspin, nkpt, nband]
eqp0 + (dsig/de) / (1 - dsig/de) * (eqp0 - ecor)
Znk array[nspin, nkpt, nband]
quasiparticle renormalization factor
"""
if isinstance(f, str):
with open(f, 'r') as fi:
return parse_sigma_output(fi)
# Break the output into large kpoint block.
large_kpt_blocks = break_output_in_kpt_blocks(f)
# Count k-points from number of kpt blocks
nkpt = len(large_kpt_blocks)
# Break the blocks into smaller kpt and spin block
kpt_spin_blocks = list()
for block in large_kpt_blocks:
subblock = break_kpt_spin_blocks(block)
kpt_spin_blocks.extend(subblock)
# Count spins from number of sub-blocks
nspin = len(subblock)
# Parse each block
block_results = list()
for block in kpt_spin_blocks:
block_result = parse_sigma_output_block(block)
block_results.append(block_result)
# Count number of bands from the lines of last block
nband = len(block_result['n'])
# Initialize the results dict
results = dict(nspin=nspin, nkpt=nkpt, nband=nband)
# keys = ['<KEY>']
keys=['<KEY>']
for key in keys:
results[key] = np.zeros((nspin, nkpt, nband), dtype=np.float)
results['kpt'] = np.zeros((nkpt,3), dtype=np.float)
results['n'] = np.zeros(nband, dtype=np.int)
# Merge the blocks
for block_result in block_results:
ispin = block_result['spin'] - 1
ikpt = block_result['ik'] - 1
for key in keys:
for iband, val in enumerate(block_result[key]):
results[key][ispin,ikpt,iband] = val
results['kpt'][ikpt] = np.array(block_result['k'])
results['n'] = np.array(block_result['n'], dtype=np.int)
return results
def break_output_in_kpt_blocks(f):
"""Break the output into large kpoint block."""
blocks = list()
S = ''
reading_block = False
for line in f.readlines():
if 'Dealing with k' in line:
reading_block = True
if S:
blocks.append(S)
S = ''
if reading_block:
S += line
if S:
blocks.append(S)
return blocks
def break_kpt_spin_blocks(S):
"""Break a block into smaller kpt and spin blocks."""
if not 'Symmetrized values from band-averaging' in S:
raise Exception('Could not find symmetrized values in block:\n' + S)
S = S.split('Symmetrized values from band-averaging')[-1]
S = S.split('======================')[0]
blocks = list()
iterlines = iter(S.splitlines())
while True:
try:
line = iterlines.next()
except StopIteration:
break
if line.startswith(' k ='):
lines = list()
lines.append(line) # kpoint line
line = iterlines.next() # empty line
lines.append(line)
line = iterlines.next() # labels line
while line.strip():
lines.append(line)
line = iterlines.next()
blocks.append('\n'.join(lines))
return blocks
def parse_sigma_output_block(S):
"""
Extract the GW results of a calculation with BerkeleyGW
from a block containing a single k-point and spin results.
The block should be of the form
k = 0.000000 0.000000 0.000000 ik = 1 spin = 1
n elda ecor x sx-x ch sig vxc eqp0 eqp1 Znk
8 -13.367 -13.367 -39.235 20.589 -12.126 -30.771 -32.926 -11.213 -11.682 0.782
9 -13.367 -13.367 -39.235 20.589 -12.126 -30.771 -32.926 -11.213 -11.682 0.782
10 -13.365 -13.365 -37.756 20.136 -11.844 -29.464 -31.133 -11.696 -12.065 0.779
11 -12.638 -12.638 -22.446 15.295 -9.207 -16.359 -12.714 -16.283 -15.398 0.757
12 -12.440 -12.440 -23.654 15.693 -9.512 -17.473 -14.285 -15.627 -14.865 0.761
with no leading or trailing empty line.
Returns
-------
dict:
nband: int
Number of bands
k: array[3]
K-points wavevectors (reduced coord)
ik: int
index of K-point
spin: int
index of spin
n: array[nband]
band indices
elda: array[nband]
energy eigenvalue
ecor: array[nband]
corrected energy eigenvalue
x: array[nband]
bare exchange
sx-x: array[nband]
sx = screened exchange at energy ecor
ch: array[nband]
coulomb hole at energy ecor
sig: array[nband]
sx + ch = self-energy at energy ecor
vxc: array[nband]
vxc = exchange-correlation potential
eqp0: array[nband]
elda - vxc + sig(ecor)
eqp1: array[nband]
eqp0 + (dsig/de) / (1 - dsig/de) * (eqp0 - ecor)
Znk array[nband]
quasiparticle renormalization factor
"""
results = dict()
results_lines = S.splitlines()
kpt_line = results_lines.pop(0)
del results_lines[:2]
results['k'] = np.array(list(map(float, kpt_line.split()[2:5])))
results['ik'] = int(kpt_line.split()[7])
results['spin'] = int(kpt_line.split()[10])
# keys = ['<KEY>']
# types = [int] + 10 * [float]
keys=['<KEY>']
types = [int] + 8 * [float]
for key in keys:
results[key] = list()
for line in results_lines:
tokens = line.split()
for key, typ, token in zip(keys, types, tokens):
results[key].append(typ(token))
return results
def extract_multiple_GW_results(dirnames):
"""Extract GW results from a list of directories."""
data = dict()
data['variables'] = list()
data['results'] = list()
data['ndata'] = len(dirnames)
for dname in dirnames:
with open(os.path.join(dname, 'variables.pkl'), 'r') as f:
variables = pickle.load(f)
with open(os.path.join(dname, 'sigma.out'), 'r') as f:
out = parse_sigma_output(f)
data['variables'].append(variables)
data['results'].append(out)
return data
| [
"numpy.array",
"pickle.load",
"numpy.zeros",
"os.path.join"
] | [((2375, 2410), 'numpy.zeros', 'np.zeros', (['(nkpt, 3)'], {'dtype': 'np.float'}), '((nkpt, 3), dtype=np.float)\n', (2383, 2410), True, 'import numpy as np\n'), ((2429, 2458), 'numpy.zeros', 'np.zeros', (['nband'], {'dtype': 'np.int'}), '(nband, dtype=np.int)\n', (2437, 2458), True, 'import numpy as np\n'), ((2306, 2352), 'numpy.zeros', 'np.zeros', (['(nspin, nkpt, nband)'], {'dtype': 'np.float'}), '((nspin, nkpt, nband), dtype=np.float)\n', (2314, 2352), True, 'import numpy as np\n'), ((2772, 2799), 'numpy.array', 'np.array', (["block_result['k']"], {}), "(block_result['k'])\n", (2780, 2799), True, 'import numpy as np\n'), ((2823, 2864), 'numpy.array', 'np.array', (["block_result['n']"], {'dtype': 'np.int'}), "(block_result['n'], dtype=np.int)\n", (2831, 2864), True, 'import numpy as np\n'), ((7105, 7119), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7116, 7119), False, 'import pickle\n'), ((7032, 7068), 'os.path.join', 'os.path.join', (['dname', '"""variables.pkl"""'], {}), "(dname, 'variables.pkl')\n", (7044, 7068), False, 'import os\n'), ((7139, 7171), 'os.path.join', 'os.path.join', (['dname', '"""sigma.out"""'], {}), "(dname, 'sigma.out')\n", (7151, 7171), False, 'import os\n')] |
import bilby
import numpy as np
# This tutorial demonstrates how we can sample a prior in the shape of a ball
# Note that this will not end up sampling uniformly in that space, constraint priors are more suitable for that.
# This implementation will draw a value for the x-coordinate from p(x), and given that draw a value for the
# y-coordinate from p(y|x), and given that draw a value for the z-coordinate from p(z|x,y).
# Only the x-coordinate will end up being uniform for this
class ZeroLikelihood(bilby.core.likelihood.Likelihood):
""" Flat likelihood. This always returns 0.
This way our posterior distribution is exactly the prior distribution."""
def log_likelihood(self):
return 0
def condition_func_y(reference_params, x):
""" Condition function for our p(y|x) prior."""
radius = 0.5 * (reference_params['maximum'] - reference_params['minimum'])
y_max = np.sqrt(radius**2 - x**2)
return dict(minimum=-y_max, maximum=y_max)
def condition_func_z(reference_params, x, y):
""" Condition function for our p(z|x, y) prior."""
radius = 0.5 * (reference_params['maximum'] - reference_params['minimum'])
z_max = np.sqrt(radius**2 - x**2 - y**2)
return dict(minimum=-z_max, maximum=z_max)
# Set up the conditional priors and the flat likelihood
priors = bilby.core.prior.ConditionalPriorDict()
priors['x'] = bilby.core.prior.Uniform(minimum=-1, maximum=1, latex_label="$x$")
priors['y'] = bilby.core.prior.ConditionalUniform(condition_func=condition_func_y, minimum=-1,
maximum=1, latex_label="$y$")
priors['z'] = bilby.core.prior.ConditionalUniform(condition_func=condition_func_z, minimum=-1,
maximum=1, latex_label="$z$")
likelihood = ZeroLikelihood(parameters=dict(x=0, y=0, z=0))
# Sample the prior distribution
res = bilby.run_sampler(likelihood=likelihood, priors=priors, sampler='dynesty', npoints=5000, walks=100,
label='conditional_prior', outdir='outdir', resume=False, clean=True)
res.plot_corner()
| [
"bilby.core.prior.ConditionalUniform",
"bilby.core.prior.ConditionalPriorDict",
"bilby.core.prior.Uniform",
"bilby.run_sampler",
"numpy.sqrt"
] | [((1316, 1355), 'bilby.core.prior.ConditionalPriorDict', 'bilby.core.prior.ConditionalPriorDict', ([], {}), '()\n', (1353, 1355), False, 'import bilby\n'), ((1370, 1436), 'bilby.core.prior.Uniform', 'bilby.core.prior.Uniform', ([], {'minimum': '(-1)', 'maximum': '(1)', 'latex_label': '"""$x$"""'}), "(minimum=-1, maximum=1, latex_label='$x$')\n", (1394, 1436), False, 'import bilby\n'), ((1451, 1565), 'bilby.core.prior.ConditionalUniform', 'bilby.core.prior.ConditionalUniform', ([], {'condition_func': 'condition_func_y', 'minimum': '(-1)', 'maximum': '(1)', 'latex_label': '"""$y$"""'}), "(condition_func=condition_func_y,\n minimum=-1, maximum=1, latex_label='$y$')\n", (1486, 1565), False, 'import bilby\n'), ((1626, 1740), 'bilby.core.prior.ConditionalUniform', 'bilby.core.prior.ConditionalUniform', ([], {'condition_func': 'condition_func_z', 'minimum': '(-1)', 'maximum': '(1)', 'latex_label': '"""$z$"""'}), "(condition_func=condition_func_z,\n minimum=-1, maximum=1, latex_label='$z$')\n", (1661, 1740), False, 'import bilby\n'), ((1886, 2063), 'bilby.run_sampler', 'bilby.run_sampler', ([], {'likelihood': 'likelihood', 'priors': 'priors', 'sampler': '"""dynesty"""', 'npoints': '(5000)', 'walks': '(100)', 'label': '"""conditional_prior"""', 'outdir': '"""outdir"""', 'resume': '(False)', 'clean': '(True)'}), "(likelihood=likelihood, priors=priors, sampler='dynesty',\n npoints=5000, walks=100, label='conditional_prior', outdir='outdir',\n resume=False, clean=True)\n", (1903, 2063), False, 'import bilby\n'), ((902, 931), 'numpy.sqrt', 'np.sqrt', (['(radius ** 2 - x ** 2)'], {}), '(radius ** 2 - x ** 2)\n', (909, 931), True, 'import numpy as np\n'), ((1169, 1207), 'numpy.sqrt', 'np.sqrt', (['(radius ** 2 - x ** 2 - y ** 2)'], {}), '(radius ** 2 - x ** 2 - y ** 2)\n', (1176, 1207), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 20:52:19 2017
@author: rahul
"""
import random
import math
import numpy
import config
import data_handler
import algorithm
def init():
qubit = []
for i in range(0, config.NUM_FEATURES):
qubit.append(random.uniform(0, 1)*math.pi*2)
return qubit
def pop_init():
pop_qubits = []
for i in range(0, config.POPULATION_SIZE):
pop_qubits.append(init())
return pop_qubits
def qubit_observe(qubits):
clabits = []
for el in qubits:
if math.pow(math.cos(el), 2) > random.uniform(0, 1):
clabits.append(0)
else:
clabits.append(1)
return clabits
def pop_observe(pop_qubits):
pop_obs_qubits = []
for qubits in pop_qubits:
pop_obs_qubits.append(qubit_observe(qubits))
return pop_obs_qubits
#def pop_accuracy(pop_obs_qubits):
# accuracy = []
# for clabits in pop_obs_qubits:
# dataX, dataY = data_handler.dataset_with_feature_subset(clabits)
# trainX, trainY, testX, testY = data_handler.train_test_set(dataX, dataY)
# LR_model = algorithm.train_LR(trainX, trainY)
# test_acc = algorithm.test_LR(LR_model, testX, testY)
# accuracy.append(test_acc)
# return accuracy
def pop_accuracy(pop_obs_qubits):
mean_accuracy = []
cross_validation_score = []
for clabits in pop_obs_qubits:
dataX, dataY = data_handler.dataset_with_feature_subset(clabits)
LR_model = algorithm.train_LR(dataX, dataY)
cv_score = algorithm.cross_validation_score(LR_model, dataX, dataY, 10)
cross_validation_score.append(cv_score)
mean_accuracy.append(numpy.mean(cv_score))
return mean_accuracy, cross_validation_score
#def pop_accuracy(pop_obs_qubits):
# accuracy = []
# for clabits in pop_obs_qubits:
# trainX, trainY = data_handler.read_train_data()
# testX, testY = data_handler.read_test_data()
# LR_model = algorithm.train_LR(trainX, trainY)
# test_acc = algorithm.test_LR(LR_model, testX, testY)
# accuracy.append(test_acc)
# return accuracy
def mutation(qubits, pop_qubits, ind_num):
index = list(range(0, config.POPULATION_SIZE))
index.remove(ind_num)
rand_indexes = random.sample(index, 3)
Rand1 = pop_qubits[rand_indexes[0]]
Rand2 = pop_qubits[rand_indexes[1]]
Rand3 = pop_qubits[rand_indexes[2]]
mut_qubits = []
for i in range(0, config.NUM_FEATURES):
mut_qubit = Rand1[i] + config.F * (Rand2[i] - Rand3[i])
mut_qubits.append(mut_qubit)
return mut_qubits
def pop_mutation(pop_qubits):
pop_mut_qubits = []
for ind_num, qubits in enumerate(pop_qubits):
pop_mut_qubits.append(mutation(qubits, pop_qubits, ind_num))
return pop_mut_qubits
def crossover(qubits, mut_qubits, Irand):
cross_qubits = []
for i in range(0, config.NUM_FEATURES):
if random.uniform(0, 1) <= config.CR and i == Irand:
cross_qubits.append(mut_qubits[i])
else:
cross_qubits.append(qubits[i])
return cross_qubits
def get_Irand():
Irand = int(1 + random.uniform(0, 1) * config.NUM_FEATURES)
return Irand
def pop_crossover(pop_qubits, pop_mut_qubits, accuracy):
pop_cross_qubits = []
for qubits, mut_qubits in zip(pop_qubits, pop_mut_qubits):
Irand = get_Irand()
pop_cross_qubits.append(crossover(qubits, mut_qubits, Irand))
return pop_cross_qubits
def selection(solution, cross_solution, solution_accuracy, cross_solution_accuracy):
if cross_solution_accuracy > solution_accuracy:
return cross_solution
else:
return solution
def pop_selection(pop_qubits, pop_obs_qubits, qubits_accuracy, pop_cross_qubits, pop_obs_cross_qubits, cross_qubits_accuracy):
final_pop_qubits = []
final_pop_obs_qubits = []
for qubit, obs_qubit, qubit_accuracy, cross_qubit, obs_cross_qubit, cross_qubit_accuracy in zip(pop_qubits, pop_obs_qubits, qubits_accuracy, pop_cross_qubits, pop_obs_cross_qubits, cross_qubits_accuracy):
if qubit_accuracy < config.ELITISM:
final_pop_qubits.append(selection(qubit, cross_qubit, qubit_accuracy, cross_qubit_accuracy))
final_pop_obs_qubits.append(selection(obs_qubit, obs_cross_qubit, qubit_accuracy, cross_qubit_accuracy))
else:
final_pop_qubits.append(qubit)
final_pop_obs_qubits.append(obs_qubit)
return final_pop_qubits, final_pop_obs_qubits
def print_output(final_pop_qubits, final_pop_obs_qubits, final_qubits_cross_val_score):
f = open(config.OUTPUT_FILEPATH, 'w')
f.write('VERSION : ' + config.VERSION + '\n')
f.write('NUM_FEATURES : ' + str(config.NUM_FEATURES) + '\n')
f.write('POPULATION_SIZE : ' + str(config.POPULATION_SIZE) + '\n')
f.write('EQDE_MAXITER : ' + str(config.EQDE_MAXITER) + '\n')
f.write('ELITISM : ' + str(config.ELITISM) + '\n')
f.write('F : ' + str(config.F) + '\n')
f.write('CR : ' + str(config.CR) + '\n')
f.write('TEST_SIZE : ' + str(config.TEST_SIZE) + '\n' + '\n')
for fold in range(0, 10):
f.write("FOLD_" + str(fold) + " --------------------\n\n")
for solution in range(0, config.POPULATION_SIZE):
f.write("Solution_" + str(solution) + ":" + '\n')
f.write("ACCURACY: " + str(final_qubits_cross_val_score[solution][fold]) + '\n')
f.write('QUBITS: ' + ' '.join(str(el) for el in final_pop_qubits[solution]) + '\n')
f.write('OBSERVED_QUBITS: ' + ' '.join(str(el) for el in final_pop_obs_qubits[solution]) + '\n\n')
| [
"algorithm.cross_validation_score",
"random.uniform",
"random.sample",
"data_handler.dataset_with_feature_subset",
"numpy.mean",
"algorithm.train_LR",
"math.cos"
] | [((2285, 2308), 'random.sample', 'random.sample', (['index', '(3)'], {}), '(index, 3)\n', (2298, 2308), False, 'import random\n'), ((1438, 1487), 'data_handler.dataset_with_feature_subset', 'data_handler.dataset_with_feature_subset', (['clabits'], {}), '(clabits)\n', (1478, 1487), False, 'import data_handler\n'), ((1507, 1539), 'algorithm.train_LR', 'algorithm.train_LR', (['dataX', 'dataY'], {}), '(dataX, dataY)\n', (1525, 1539), False, 'import algorithm\n'), ((1559, 1619), 'algorithm.cross_validation_score', 'algorithm.cross_validation_score', (['LR_model', 'dataX', 'dataY', '(10)'], {}), '(LR_model, dataX, dataY, 10)\n', (1591, 1619), False, 'import algorithm\n'), ((589, 609), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (603, 609), False, 'import random\n'), ((1697, 1717), 'numpy.mean', 'numpy.mean', (['cv_score'], {}), '(cv_score)\n', (1707, 1717), False, 'import numpy\n'), ((570, 582), 'math.cos', 'math.cos', (['el'], {}), '(el)\n', (578, 582), False, 'import math\n'), ((2938, 2958), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2952, 2958), False, 'import random\n'), ((3155, 3175), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3169, 3175), False, 'import random\n'), ((292, 312), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (306, 312), False, 'import random\n')] |
import numpy as np
from ml_tooling.utils import DataType, MetricError
def lift_score(y_target: DataType, y_predicted: DataType) -> float:
"""
Calculates lift score for a given estimator. The lift score quantifies how much better
the estimator is compared to a random baseline.
The formula is defined as follows:
lift = (TP/(TP+FN)(TP+FP)/(TP+TN+FP+FN)
Source: https://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score
:param y_target:
Target labels
:param y_predicted:
Predicted labels
:return:
Lift score
"""
y_target = np.array(y_target)
y_predicted = np.array(y_predicted)
if y_target.ndim > 1 or y_predicted.ndim > 1:
raise MetricError("Input must be 1-dimensional")
n = len(y_target)
percent_positives_target = np.sum(y_target == 1) / n
percent_positives_predicted = np.sum(y_predicted == 1) / n
all_prod = np.column_stack([y_target, y_predicted])
percent_correct_positives = (all_prod == 1).all(axis=1).sum() / n
return percent_correct_positives / (
percent_positives_target * percent_positives_predicted
)
| [
"ml_tooling.utils.MetricError",
"numpy.array",
"numpy.sum",
"numpy.column_stack"
] | [((609, 627), 'numpy.array', 'np.array', (['y_target'], {}), '(y_target)\n', (617, 627), True, 'import numpy as np\n'), ((646, 667), 'numpy.array', 'np.array', (['y_predicted'], {}), '(y_predicted)\n', (654, 667), True, 'import numpy as np\n'), ((935, 975), 'numpy.column_stack', 'np.column_stack', (['[y_target, y_predicted]'], {}), '([y_target, y_predicted])\n', (950, 975), True, 'import numpy as np\n'), ((733, 775), 'ml_tooling.utils.MetricError', 'MetricError', (['"""Input must be 1-dimensional"""'], {}), "('Input must be 1-dimensional')\n", (744, 775), False, 'from ml_tooling.utils import DataType, MetricError\n'), ((830, 851), 'numpy.sum', 'np.sum', (['(y_target == 1)'], {}), '(y_target == 1)\n', (836, 851), True, 'import numpy as np\n'), ((890, 914), 'numpy.sum', 'np.sum', (['(y_predicted == 1)'], {}), '(y_predicted == 1)\n', (896, 914), True, 'import numpy as np\n')] |
"""Tests for nonlinear equations lecture."""
import numpy as np
from nonlinear_algorithms import bisect
from nonlinear_algorithms import fixpoint
from nonlinear_algorithms import newton_method
from scipy.optimize import bisect as sp_bisect
def test_1():
"""Bisection method is working."""
def example(x):
return x ** 3 - 2
y = bisect(example, 1, 2)[0]
np.testing.assert_almost_equal(y, 1.259921)
np.testing.assert_almost_equal(sp_bisect(example, 1, 2), y)
def test_2():
"""Fixpoint method is working."""
def example(x):
return np.sqrt(x)
y = fixpoint(example, 2)[0]
np.testing.assert_almost_equal(y, 1.0, decimal=3)
def test_3():
"""Newton method is working."""
def _jacobian(x):
return 3 * x ** 2
def _value(x):
return x ** 3 - 2
def f(x):
return _value(x), _jacobian(x)
x = newton_method(f, 0.4)
np.testing.assert_almost_equal(f(x)[0], 0)
| [
"nonlinear_algorithms.bisect",
"numpy.testing.assert_almost_equal",
"nonlinear_algorithms.newton_method",
"nonlinear_algorithms.fixpoint",
"scipy.optimize.bisect",
"numpy.sqrt"
] | [((381, 424), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', '(1.259921)'], {}), '(y, 1.259921)\n', (411, 424), True, 'import numpy as np\n'), ((627, 676), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', '(1.0)'], {'decimal': '(3)'}), '(y, 1.0, decimal=3)\n', (657, 676), True, 'import numpy as np\n'), ((887, 908), 'nonlinear_algorithms.newton_method', 'newton_method', (['f', '(0.4)'], {}), '(f, 0.4)\n', (900, 908), False, 'from nonlinear_algorithms import newton_method\n'), ((351, 372), 'nonlinear_algorithms.bisect', 'bisect', (['example', '(1)', '(2)'], {}), '(example, 1, 2)\n', (357, 372), False, 'from nonlinear_algorithms import bisect\n'), ((460, 484), 'scipy.optimize.bisect', 'sp_bisect', (['example', '(1)', '(2)'], {}), '(example, 1, 2)\n', (469, 484), True, 'from scipy.optimize import bisect as sp_bisect\n'), ((579, 589), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (586, 589), True, 'import numpy as np\n'), ((599, 619), 'nonlinear_algorithms.fixpoint', 'fixpoint', (['example', '(2)'], {}), '(example, 2)\n', (607, 619), False, 'from nonlinear_algorithms import fixpoint\n')] |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
"""
Created on Sat Jul 22 21:44:00 2017
@author: <NAME>
"""
from sklearn.model_selection import TimeSeriesSplit
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
import numpy as np
class TimeSeriesSplitImproved(TimeSeriesSplit):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide `.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
>>> for train_index, test_index in tscv.split(X, fixed_length=True):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [1] TEST: [2]
TRAIN: [2] TEST: [3]
>>> for train_index, test_index in tscv.split(X, fixed_length=True,
... train_splits=2):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0 1] TEST: [2]
TRAIN: [1 2] TEST: [3]
Notes
-----
When ``fixed_length`` is ``False``, the training set has size
``i * train_splits * n_samples // (n_splits + 1) + n_samples %
(n_splits + 1)`` in the ``i``th split, with a test set of size
``n_samples//(n_splits + 1) * test_splits``, where ``n_samples``
is the number of samples. If fixed_length is True, replace ``i``
in the above formulation with 1, and ignore ``n_samples %
(n_splits + 1)`` except for the first training set. The number
of test sets is ``n_splits + 2 - train_splits - test_splits``.
"""
def split(self, X, y=None, groups=None, fixed_length=False,
train_splits=1, test_splits=1):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
fixed_length : bool, hether training sets should always have
common length
train_splits : positive int, for the minimum number of
splits to include in training sets
test_splits : positive int, for the number of splits to
include in the test set
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
train_splits, test_splits = int(train_splits), int(test_splits)
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
if ((n_folds - train_splits - test_splits) < 0 and test_splits > 0):
raise ValueError(
("Both train_splits and test_splits must be positive"
" integers."))
indices = np.arange(n_samples)
split_size = (n_samples // n_folds)
test_size = split_size * test_splits
train_size = split_size * train_splits
test_starts = range(train_size + n_samples % n_folds,
n_samples - (test_size - split_size),
split_size)
if fixed_length:
for i, test_start in zip(range(len(test_starts)),
test_starts):
rem = 0
if i == 0:
rem = n_samples % n_folds
yield (indices[(test_start - train_size - rem):test_start], indices[test_start:test_start + test_size])
else:
for test_start in test_starts:
yield (indices[:test_start], indices[test_start:test_start + test_size])
| [
"sklearn.utils.validation._num_samples",
"numpy.arange",
"sklearn.utils.indexable"
] | [((4050, 4073), 'sklearn.utils.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (4059, 4073), False, 'from sklearn.utils import indexable\n'), ((4094, 4109), 'sklearn.utils.validation._num_samples', '_num_samples', (['X'], {}), '(X)\n', (4106, 4109), False, 'from sklearn.utils.validation import _num_samples\n'), ((4738, 4758), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (4747, 4758), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from numpy import ptp, var, array, sort, abs, mean, log, exp
from numpy import sum as npsum, max as npmax
from scipy.optimize import fmin
def FitGenParetoMLFP(x, p):
# This function finds the parameters csi and theta determining the
# Generalized Pareto Distribution that best fits the dataset x with
# Flexible Probabilities p.
# INPUTS
# x :[row vector] dataset of invariants below the threshold
# p :[row vector] Flexible Probabilities associated to x
# OPS
# csi_MLFP :[scalar] first parameter of Generalized Pareto Distribution
# sigma_MLFP :[scalar] second parameter of Generalized Pareto Distribution
## Code
p = p.flatten() # make sure p is a column vector
if not isinstance(x, np.ndarray):
raise ValueError('stats:gpfit:VectorRequired. X must be a vector')
elif any(x <= 0):
raise ValueError('stats:gpfit:BadData. The data in X must be positive')
# if exist(OCTAVE_VERSION,builtin) == 0:
# options = statset(gpfit)
# else:
# options = options(TolX,1.0000e-06, MaxFunEvals,400, MaxIter,200, Display,off)
#
#
n = len(x)
x = sort(x.flatten())
xmax = x[-1]
rangex = ptp(x)
# # Can't make a fit.
# if n == 0 or isfinite(rangex):
# csi_MLFP = zeros((1,1))
# sigma_MLFP = zeros((1,1))
# return
# elif rangex < min(classX):
# # When all observations are equal, try to return something reasonable.
# if xmax <= sqrt(realmax(classX)):
# csi_MLFP = cast(zeros,classX)
# sigma_MLFP = cast(0,classX)
# else:
# csi_MLFP = cast(-Inf)
# sigma_MLFP = cast(Inf)
#
# return
# Otherwise the data are ok to fit GP distr, go on.
# This initial guess is the method of moments:
xbar = mean(x)
s2 = var(x)
k0 = -.5 * (xbar ** 2 / s2 - 1)
sigma0 = .5 * xbar * (xbar ** 2 / s2 + 1)
if k0 < 0 and (xmax >= -sigma0 / k0):
# Method of moments failed, start with an exponential fit
k0 = 0
sigma0 = xbar
parmhat = array([k0, log(sigma0)])
# Maximize the log-likelihood with respect to k and lnsigma.
# opts = {'maxiter':200}
res = fmin(negloglike, parmhat, args=(x, p), maxiter=200)
parmhat = res
csi_MLFP = parmhat[0]
sigma_MLFP = exp(parmhat[1])
parmhat[1] = exp(parmhat[1])
# if (err == 0):
# # fminsearch may print its own output text in any case give something
# # more statistical here, controllable via warning IDs.
# if output.funcCount >= options.MaxFunEvals:
# wmsg = 'Maximum likelihood estimation did not converge. Function evaluation limit exceeded.'
# else:
# wmsg = 'Maximum likelihood estimation did not converge. Iteration limit exceeded.'
# raise Warning(''.join(['stats:gpfit:IterOrEvalLimit.T',wmsg]))
# elif (err < 0):
# raise ValueError('stats:gpfit:NoSolution. Unable to reach a maximum likelihood solution')
# tolBnd = options.TolBnd
# atBoundary = False
# if (parmhat[0] < 0) and (xmax > -parmhat[1]/parmhat[0] - tolBnd):
# raise Warning('stats:gpfit:ConvergeoBoundary, Maximum likelihood has converged to a boundary point of the parameter space.'
# '\n Confidence intervals and standard errors can not be computed reliably')
# atBoundary = True
# elif (parmhat[0] <= -1/2):
# raise Warning('stats:gpfit:ConvergeoBoundary, Maximum likelihood has converged to an estimate of K < -1/2 \n'
# 'Confidence intervals and standard errors can not be computed reliably')
# atBoundary = True
return csi_MLFP, sigma_MLFP
def negloglike(parms, data, FP):
# Negative log-likelihood for the GP (log(sigma) parameterization).
k = parms[0]
lnsigma = parms[1]
sigma = exp(lnsigma)
n = len(data)
z = data / sigma
if abs(k) > np.finfo(float).eps:
if k > 0 or npmax(z) < -1 / k:
u = 1 + k * z
lnu = log(u)
nll = npsum(FP * (lnsigma + (1 + 1 / k) * lnu))
else:
# The support of the GP when k<0 is 0 < x < abs((sigma/k)).
nll = np.inf
else: # limiting exponential dist.Tn as k->0
nll = npsum(FP * (lnsigma + z))
return nll
| [
"scipy.optimize.fmin",
"numpy.abs",
"numpy.sum",
"numpy.log",
"numpy.ptp",
"numpy.finfo",
"numpy.max",
"numpy.mean",
"numpy.exp",
"numpy.var"
] | [((1274, 1280), 'numpy.ptp', 'ptp', (['x'], {}), '(x)\n', (1277, 1280), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((1907, 1914), 'numpy.mean', 'mean', (['x'], {}), '(x)\n', (1911, 1914), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((1924, 1930), 'numpy.var', 'var', (['x'], {}), '(x)\n', (1927, 1930), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((2303, 2354), 'scipy.optimize.fmin', 'fmin', (['negloglike', 'parmhat'], {'args': '(x, p)', 'maxiter': '(200)'}), '(negloglike, parmhat, args=(x, p), maxiter=200)\n', (2307, 2354), False, 'from scipy.optimize import fmin\n'), ((2417, 2432), 'numpy.exp', 'exp', (['parmhat[1]'], {}), '(parmhat[1])\n', (2420, 2432), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((2450, 2465), 'numpy.exp', 'exp', (['parmhat[1]'], {}), '(parmhat[1])\n', (2453, 2465), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((3964, 3976), 'numpy.exp', 'exp', (['lnsigma'], {}), '(lnsigma)\n', (3967, 3976), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((4025, 4031), 'numpy.abs', 'abs', (['k'], {}), '(k)\n', (4028, 4031), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((4380, 4405), 'numpy.sum', 'npsum', (['(FP * (lnsigma + z))'], {}), '(FP * (lnsigma + z))\n', (4385, 4405), True, 'from numpy import sum as npsum, max as npmax\n'), ((2184, 2195), 'numpy.log', 'log', (['sigma0'], {}), '(sigma0)\n', (2187, 2195), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((4034, 4049), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (4042, 4049), True, 'import numpy as np\n'), ((4138, 4144), 'numpy.log', 'log', (['u'], {}), '(u)\n', (4141, 4144), False, 'from numpy import ptp, var, array, sort, abs, mean, log, exp\n'), ((4163, 4204), 'numpy.sum', 'npsum', (['(FP * (lnsigma + (1 + 1 / k) * lnu))'], {}), '(FP * (lnsigma + (1 + 1 / k) * lnu))\n', (4168, 4204), True, 'from numpy import sum as npsum, max as npmax\n'), ((4075, 4083), 'numpy.max', 'npmax', (['z'], {}), '(z)\n', (4080, 4083), True, 'from numpy import sum as npsum, max as npmax\n')] |
from scipy.io import readsav
import matplotlib.dates as mdates
import matplotlib as mpl
from .lofarJ2000xySun import j2000xy
import datetime
import glob
import os
from astropy import units as u
from astropy.io import fits
from astropy.time import Time
import numpy as np
from skimage import measure
from scipy.interpolate import griddata
from scipy.ndimage import gaussian_filter
from scipy.interpolate import interp2d
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import cv2
import sunpy
import sunpy.map
import scipy
import scipy.ndimage
from matplotlib.patches import Ellipse
# try to use the precise epoch
mpl.rcParams['date.epoch']='1970-01-01T00:00:00'
try:
mdates.set_epoch('1970-01-01T00:00:00')
except:
pass
class BFdata:
def __init__(self):
self.fname = ''
self.havedata = False
self.title = ''
self.data_cube = 0
self.freqs_ds = 0
self.time_ds = 0
self.xb = 0
self.yb = 0
def load_sav(self, fname):
self.fname = fname
self.havedata = True
data = readsav(fname, python_dict=True)
self.title = str(data['ds'][0]['TITLE'],'utf-8')
self.data_cube = data['ds'][0]['CUBE']
self.freqs_ds = data['ds'][0]['FREQS']
self.time_ds = (data['ds'][0]['TIME']) / 3600 / 24 + mdates.date2num(datetime.datetime(1979, 1, 1))
self.xb = data['ds'][0]['XB']
self.yb = data['ds'][0]['YB']
def load_sav_cube(self, fname):
self.fname = fname
self.havedata = True
data = readsav(fname, python_dict=True)
header_name = 'cube_ds'
#self.title = str(data[header_name][0]['TITLE'],'utf-8')
self.title = 'LOFAR BFcube'
self.data_cube = data[header_name][0]['CUBE']
self.freqs_ds = data[header_name][0]['FREQS']
self.time_ds = (data[header_name][0]['TIME']) / 3600 / 24 + mdates.date2num(datetime.datetime(1979, 1, 1))
ra_beam = data[header_name][0]['RA']
dec_beam = data[header_name][0]['DEC']
[self.xb, self.yb] = j2000xy(ra_beam, dec_beam, mdates.num2date(self.time_ds[0]))
def load_fits(self, fname):
self.fname = fname
self.havedata = True
hdu = fits.open(fname)
self.title = 'LOFAR BFcube'
self.data_cube = hdu[0].data
self.freqs_ds = hdu[1].data['FREQ'][:]
self.time_ds = hdu[2].data['TIME'][:]
self.xb = hdu[3].data['X']
self.yb = hdu[3].data['Y']
def bf_image_by_idx(self,f_idx,t_idx,fov=3000,asecpix=20,extrap=True,interpm='cubic'):
data_beam=self.data_cube[f_idx,t_idx,:]
x = np.arange(-fov, fov, asecpix)
y = np.arange(-fov, fov, asecpix)
X, Y = np.meshgrid(x, y)
method = interpm
if extrap:
r = 1.5*np.max(np.sqrt(self.xb**2+self.yb**2))
theta = np.linspace(0,2*np.pi,36)
bf_xb = np.hstack((self.xb,r*np.cos(theta)))
bf_yb = np.hstack((self.yb,r*np.sin(theta)))
data_beam_bf = np.hstack((data_beam,np.ones(np.size(theta))*np.median(data_beam)))-np.min(data_beam)
else:
bf_xb = self.xb
bf_yb = self.yb
data_beam_bf = data_beam-np.min(data_beam)
data_bf = griddata((bf_xb, bf_yb), data_beam_bf,
(X, Y), method=method, fill_value=np.median(data_beam)-np.min(data_beam))
Ibeam = data_beam
return X,Y,data_bf,x,y,Ibeam
def bf_image_by_freq_time(self,freq,time,fov=3000,asecpix=20,extrap=True,interpm='cubic',verbout=True):
t_idx_select = (np.abs(self.time_ds - time)).argmin()
f_idx_select = (np.abs(self.freqs_ds - freq)).argmin()
if verbout:
print([t_idx_select,f_idx_select])
X,Y,data_bf,x,y,Ibeam = self.bf_image_by_idx(f_idx_select,t_idx_select,fov=fov,asecpix=asecpix,extrap=extrap,interpm=interpm)
return [X,Y,data_bf,x,y,Ibeam]
def bf_time_to_idx(self,time):
return (np.abs(self.time_ds - time)).argmin()
def bf_freq_to_idx(self,freq):
return (np.abs(self.freqs_ds - freq)).argmin()
def bf_peak_size(self,X,Y,data_bf,asecpix):
FWHM_thresh = np.max(data_bf)/2.0 #np.min(data_bf)+(np.max(data_bf)-np.min(data_bf))/2.0
img_bi = data_bf > FWHM_thresh
bw_lb = measure.label(img_bi)
rg_lb = measure.regionprops(bw_lb)
x_peak = X[np.where(data_bf == np.max(data_bf))]
y_peak = Y[np.where(data_bf == np.max(data_bf))]
rg_id = bw_lb[np.where(data_bf == np.max(data_bf))]
area_peak = rg_lb[int(rg_id)-1].area
return x_peak,y_peak,area_peak
def bf_fit_gauss_source_by_idx(self,f_idx,t_idx,drawfig=True,verb=True):
X,Y,data_bf,x,y,Ibeam = self.bf_image_by_idx(f_idx,t_idx)
FWHM_thresh = np.max(data_bf)*0.7 #np.min(data_bf)+(np.max(data_bf)-np.min(data_bf))/2.0
img_bi = data_bf > FWHM_thresh
bw_lb = measure.label(img_bi)
rg_lb = measure.regionprops(bw_lb)
x_peak = X[np.where(data_bf == np.max(data_bf))]
y_peak = Y[np.where(data_bf == np.max(data_bf))]
rg_id = bw_lb[np.where(data_bf == np.max(data_bf))]
area_peak = rg_lb[int(rg_id)-1].area
bw_peak_area = (np.array(abs(bw_lb-rg_id)<0.1)*255).astype(np.uint8)
dilate_size = int(x.size/30)
kernel = np.ones((dilate_size,dilate_size),np.uint8)
imdilate = cv2.dilate(bw_peak_area,kernel,iterations = 1)
fbeams = interp2d(x,y,imdilate,kind='linear')
peaks_in = np.array([fbeams(self.xb[tmp_id],self.yb[tmp_id]) for tmp_id in range(len(self.xb))]).reshape(-1)
fit_xb = self.xb[peaks_in>0.5]
fit_yb = self.yb[peaks_in>0.5]
fit_Ib = Ibeam[peaks_in>0.5]
#print(fit_Ib)
def func_gaussian(xdata,s0,x_cent,y_cent,tile,x_sig,y_sig):
x,y=xdata
xp = (x-x_cent) * np.cos(tile) - (y-y_cent) * np.sin(tile)
yp = (x-x_cent) * np.sin(tile) + (y-y_cent) * np.cos(tile)
flux = s0 * ( np.exp( -(xp**2)/(2*x_sig**2) - (yp**2)/(2*y_sig**2) ) )
return flux
p0 = [np.max(fit_Ib), np.mean(fit_xb), np.mean(fit_yb),np.pi, np.std(fit_xb), np.std(fit_yb)]
popt, pcov = curve_fit(func_gaussian, (fit_xb,fit_yb) , fit_Ib,p0=p0)
bf_res={}
bf_res["s0"]=popt[0]
bf_res["x_cent"]=popt[1]
bf_res["y_cent"]=popt[2]
bf_res["tile"]=popt[3]
bf_res["x_sig"]=popt[4]
bf_res["y_sig"]=popt[5]
bf_err={}
bf_err["s0_err"]=pcov[0][0]
bf_err["x_cent_err"]=pcov[1][1]
bf_err["y_cent_err"]=pcov[2][2]
bf_err["tile_err"]=pcov[3][3]
bf_err["x_sig_err"]=pcov[4][4]
bf_err["y_sig_err"]=pcov[5][5]
tmp_theta = np.linspace(0,np.pi*2,100)
tmp_xp = bf_res["x_sig"]*np.cos(tmp_theta)
tmp_yp = bf_res["y_sig"]*np.sin(tmp_theta)
tmp_x = bf_res["x_cent"]+tmp_xp*np.cos(-bf_res['tile']) - tmp_yp*np.sin(-bf_res['tile'])
tmp_y = bf_res["y_cent"]+tmp_xp*np.sin(-bf_res['tile']) + tmp_yp*np.cos(-bf_res['tile'])
if drawfig:
ax = plt.gca()
im = ax.imshow(data_bf, cmap='gist_heat',
origin='lower',extent=[np.min(X),np.max(X),np.min(Y),np.max(Y)])
ax.plot(self.xb,self.yb,'g.')
ax.plot(fit_xb,fit_yb,'b+')
ax.plot(tmp_x,tmp_y,'k-')
ax.plot(bf_res["x_cent"],bf_res["y_cent"],"k+")
plt.savefig('test.pdf')
if verb:
print(mdates.num2date(self.time_ds[t_idx]))
print(self.freqs_ds[f_idx])
return bf_res,bf_err
def plot_bf_image_by_idx(self,f_idx,t_idx):
if True:
X,Y,data_bf = self.bf_image_by_idx(f_idx,t_idx,fov=3000,asecpix=20)
ax = plt.gca()
im = ax.imshow(data_bf, cmap='gist_heat',
origin='lower',extent=[np.min(X),np.max(X),np.min(Y),np.max(Y)])
ax.set_xlabel('X (Arcsec)')
ax.set_ylabel('Y (Arcsec)')
ax.set_aspect('equal', 'box')
plt.colorbar(im)
FWHM_thresh = np.max(data_bf)/2.0 #np.min(data_bf)+(np.max(data_bf)-np.min(data_bf))/2.0
img_bi = data_bf > FWHM_thresh
ax.contour(X,Y,data_bf,levels=[FWHM_thresh,FWHM_thresh*2*0.9],colors=['deepskyblue','forestgreen'])
x_peak = X[np.where(data_bf == np.max(data_bf))]
y_peak = Y[np.where(data_bf == np.max(data_bf))]
ax.plot(960*np.sin(np.arange(0,2*np.pi,0.001)),
960*np.cos(np.arange(0,2*np.pi,0.001)),'w')
ax.plot(x_peak,y_peak,'k+')
plt.show()
def plot_bf_dyspec(self,beam_idx=0,ax_cur=None):
if ax_cur is None:
plt.figure()
ax_cur = plt.gca()
dyspec = np.log10(np.array(self.data_cube[:, :, beam_idx]))
data_ds=dyspec-np.tile(np.mean(dyspec,1).T,(dyspec.shape[1],1)).T
ax_cur.imshow(data_ds, aspect='auto', origin='lower',
vmin=(np.mean(data_ds) - 2 * np.std(data_ds)),
vmax=(np.mean(data_ds) + 3 * np.std(data_ds)),
extent=[self.time_ds[0], self.time_ds[-1],
self.freqs_ds[0], self.freqs_ds[-1]], cmap='inferno')
ax_cur.xaxis_date()
ax_cur.set_xlabel('Time (UT)')
ax_cur.set_ylabel('Frequency (MHz)')
ax_cur.set_title('LOFAR Beamform Observation ' + mdates.num2date(self.time_ds[0]).strftime('%Y/%m/%d'))
for tick in ax_cur.get_xticklabels():
tick.set_rotation(25)
pass
def write_fits(self,fdir,fprefix,f_idx,t_idx):
"""
write the data into fits files
"""
if self.havedata:
cube_ds = self.data_cube[f_idx,:,:][:,t_idx,:]
hdu_lofar = fits.PrimaryHDU()
hdu_lofar.data = cube_ds.astype('float32')
print("Data shape:")
print(self.data_cube.shape)
hdu_lofar.header['SIMPLE'] = True
hdu_lofar.header['BITPIX'] = 8
hdu_lofar.header['NAXIS '] = 3
hdu_lofar.header['NAXIS1'] = cube_ds.shape[0]
hdu_lofar.header['NAXIS2'] = cube_ds.shape[1]
hdu_lofar.header['NAXIS3'] = cube_ds.shape[2]
hdu_lofar.header['EXTEND'] = True
hdu_lofar.header['DATE'] = mdates.num2date(self.time_ds[t_idx[0]]).strftime("%Y-%m-%d")
hdu_lofar.header['CONTENT'] = mdates.num2date(self.time_ds[t_idx[0]]).strftime("%Y/%m/%d") + ' LOFAR Beamform observation '
hdu_lofar.header['ORIGIN'] = 'ASTRON Netherlands'
hdu_lofar.header['TELESCOP'] = "LOFAR"
hdu_lofar.header['INSTRUME'] = "LBA"
hdu_lofar.header['OBJECT'] = "Sun"
hdu_lofar.header['DATE-OBS'] = mdates.num2date(self.time_ds[t_idx[0]]).strftime("%Y/%m/%d")
hdu_lofar.header['TIME-OBS'] = mdates.num2date(self.time_ds[t_idx[0]]).strftime("%H:%M:%S.%f")
hdu_lofar.header['DATE-END'] = mdates.num2date(self.time_ds[t_idx[-1]]).strftime("%Y/%m/%d")
hdu_lofar.header['TIME-END'] = mdates.num2date(self.time_ds[t_idx[-1]]).strftime("%H:%M:%S.%f")
hdu_lofar.header['BZERO'] = 0.
hdu_lofar.header['BSCALE'] = 1.
hdu_lofar.header['BUNIT'] = 'digits '
hdu_lofar.header['DATAMIN'] = np.min(cube_ds)
hdu_lofar.header['DATAMAX'] = np.max(cube_ds)
hdu_lofar.header['CRVAL1'] = self.freqs_ds[f_idx[0]]
hdu_lofar.header['CRPIX1'] = 0
hdu_lofar.header['CTYPE1'] = 'FREQ'
hdu_lofar.header['CDELT1'] = self.freqs_ds[f_idx[1]]-self.freqs_ds[f_idx[0]]
hdu_lofar.header['CRVAL2'] = self.time_ds[t_idx[0]]
hdu_lofar.header['CRPIX2'] = 0
hdu_lofar.header['CTYPE2'] = 'TIME'
hdu_lofar.header['CDELT2'] = self.time_ds[t_idx[1]]-self.time_ds[t_idx[0]]
hdu_lofar.header['CRVAL3'] = 0
hdu_lofar.header['CRPIX3'] = 0
hdu_lofar.header['CTYPE3'] = 'BEAM'
hdu_lofar.header['CDELT3'] = 1
hdu_lofar.header['TITLE'] = self.title
hdu_lofar.header['HISTORY'] = ' '
col_f = fits.Column(name='FREQ',array=self.freqs_ds[f_idx],format="D")
col_t = fits.Column(name='TIME',array=self.time_ds[t_idx],format="D")
col_x = fits.Column(name='X',array=self.xb,format="D")
col_y = fits.Column(name='Y',array=self.yb,format="D")
hdu_f = fits.BinTableHDU.from_columns([col_f],name="FREQ")
hdu_t = fits.BinTableHDU.from_columns([col_t],name="TIME")
hdu_xy = fits.BinTableHDU.from_columns([col_x,col_y],name="BeamXY")
hdul = fits.HDUList([hdu_lofar,hdu_f,hdu_t,hdu_xy])
hdul.writeto(fprefix,overwrite=True)
def write_fits_full(self,fdir,fprefix):
if self.havedata:
self.write_fits(fdir,fprefix,np.arange(len(self.freqs_ds)),np.arange(len(self.time_ds)))
| [
"numpy.abs",
"astropy.io.fits.PrimaryHDU",
"numpy.ones",
"skimage.measure.label",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"numpy.exp",
"astropy.io.fits.HDUList",
"skimage.measure.regionprops",
"numpy.meshgrid",
"cv2.dilate",
"numpy.... | [((695, 734), 'matplotlib.dates.set_epoch', 'mdates.set_epoch', (['"""1970-01-01T00:00:00"""'], {}), "('1970-01-01T00:00:00')\n", (711, 734), True, 'import matplotlib.dates as mdates\n'), ((1092, 1124), 'scipy.io.readsav', 'readsav', (['fname'], {'python_dict': '(True)'}), '(fname, python_dict=True)\n', (1099, 1124), False, 'from scipy.io import readsav\n'), ((1570, 1602), 'scipy.io.readsav', 'readsav', (['fname'], {'python_dict': '(True)'}), '(fname, python_dict=True)\n', (1577, 1602), False, 'from scipy.io import readsav\n'), ((2248, 2264), 'astropy.io.fits.open', 'fits.open', (['fname'], {}), '(fname)\n', (2257, 2264), False, 'from astropy.io import fits\n'), ((2653, 2682), 'numpy.arange', 'np.arange', (['(-fov)', 'fov', 'asecpix'], {}), '(-fov, fov, asecpix)\n', (2662, 2682), True, 'import numpy as np\n'), ((2695, 2724), 'numpy.arange', 'np.arange', (['(-fov)', 'fov', 'asecpix'], {}), '(-fov, fov, asecpix)\n', (2704, 2724), True, 'import numpy as np\n'), ((2740, 2757), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2751, 2757), True, 'import numpy as np\n'), ((4329, 4350), 'skimage.measure.label', 'measure.label', (['img_bi'], {}), '(img_bi)\n', (4342, 4350), False, 'from skimage import measure\n'), ((4367, 4393), 'skimage.measure.regionprops', 'measure.regionprops', (['bw_lb'], {}), '(bw_lb)\n', (4386, 4393), False, 'from skimage import measure\n'), ((4949, 4970), 'skimage.measure.label', 'measure.label', (['img_bi'], {}), '(img_bi)\n', (4962, 4970), False, 'from skimage import measure\n'), ((4987, 5013), 'skimage.measure.regionprops', 'measure.regionprops', (['bw_lb'], {}), '(bw_lb)\n', (5006, 5013), False, 'from skimage import measure\n'), ((5366, 5411), 'numpy.ones', 'np.ones', (['(dilate_size, dilate_size)', 'np.uint8'], {}), '((dilate_size, dilate_size), np.uint8)\n', (5373, 5411), True, 'import numpy as np\n'), ((5429, 5475), 'cv2.dilate', 'cv2.dilate', (['bw_peak_area', 'kernel'], {'iterations': '(1)'}), '(bw_peak_area, kernel, iterations=1)\n', (5439, 5475), False, 'import cv2\n'), ((5494, 5533), 'scipy.interpolate.interp2d', 'interp2d', (['x', 'y', 'imdilate'], {'kind': '"""linear"""'}), "(x, y, imdilate, kind='linear')\n", (5502, 5533), False, 'from scipy.interpolate import interp2d\n'), ((6268, 6325), 'scipy.optimize.curve_fit', 'curve_fit', (['func_gaussian', '(fit_xb, fit_yb)', 'fit_Ib'], {'p0': 'p0'}), '(func_gaussian, (fit_xb, fit_yb), fit_Ib, p0=p0)\n', (6277, 6325), False, 'from scipy.optimize import curve_fit\n'), ((6813, 6843), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(100)'], {}), '(0, np.pi * 2, 100)\n', (6824, 6843), True, 'import numpy as np\n'), ((2109, 2141), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[0]'], {}), '(self.time_ds[0])\n', (2124, 2141), True, 'import matplotlib.dates as mdates\n'), ((2881, 2910), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(36)'], {}), '(0, 2 * np.pi, 36)\n', (2892, 2910), True, 'import numpy as np\n'), ((4199, 4214), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (4205, 4214), True, 'import numpy as np\n'), ((4819, 4834), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (4825, 4834), True, 'import numpy as np\n'), ((6159, 6173), 'numpy.max', 'np.max', (['fit_Ib'], {}), '(fit_Ib)\n', (6165, 6173), True, 'import numpy as np\n'), ((6175, 6190), 'numpy.mean', 'np.mean', (['fit_xb'], {}), '(fit_xb)\n', (6182, 6190), True, 'import numpy as np\n'), ((6192, 6207), 'numpy.mean', 'np.mean', (['fit_yb'], {}), '(fit_yb)\n', (6199, 6207), True, 'import numpy as np\n'), ((6215, 6229), 'numpy.std', 'np.std', (['fit_xb'], {}), '(fit_xb)\n', (6221, 6229), True, 'import numpy as np\n'), ((6231, 6245), 'numpy.std', 'np.std', (['fit_yb'], {}), '(fit_yb)\n', (6237, 6245), True, 'import numpy as np\n'), ((6873, 6890), 'numpy.cos', 'np.cos', (['tmp_theta'], {}), '(tmp_theta)\n', (6879, 6890), True, 'import numpy as np\n'), ((6924, 6941), 'numpy.sin', 'np.sin', (['tmp_theta'], {}), '(tmp_theta)\n', (6930, 6941), True, 'import numpy as np\n'), ((7174, 7183), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7181, 7183), True, 'import matplotlib.pyplot as plt\n'), ((7515, 7538), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.pdf"""'], {}), "('test.pdf')\n", (7526, 7538), True, 'import matplotlib.pyplot as plt\n'), ((7845, 7854), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7852, 7854), True, 'import matplotlib.pyplot as plt\n'), ((8130, 8146), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {}), '(im)\n', (8142, 8146), True, 'import matplotlib.pyplot as plt\n'), ((8705, 8715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8713, 8715), True, 'import matplotlib.pyplot as plt\n'), ((8809, 8821), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8819, 8821), True, 'import matplotlib.pyplot as plt\n'), ((8847, 8856), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8854, 8856), True, 'import matplotlib.pyplot as plt\n'), ((8883, 8923), 'numpy.array', 'np.array', (['self.data_cube[:, :, beam_idx]'], {}), '(self.data_cube[:, :, beam_idx])\n', (8891, 8923), True, 'import numpy as np\n'), ((9887, 9904), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (9902, 9904), False, 'from astropy.io import fits\n'), ((11800, 11815), 'numpy.min', 'np.min', (['cube_ds'], {}), '(cube_ds)\n', (11806, 11815), True, 'import numpy as np\n'), ((11880, 11895), 'numpy.max', 'np.max', (['cube_ds'], {}), '(cube_ds)\n', (11886, 11895), True, 'import numpy as np\n'), ((12929, 12993), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""FREQ"""', 'array': 'self.freqs_ds[f_idx]', 'format': '"""D"""'}), "(name='FREQ', array=self.freqs_ds[f_idx], format='D')\n", (12940, 12993), False, 'from astropy.io import fits\n'), ((13012, 13075), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""TIME"""', 'array': 'self.time_ds[t_idx]', 'format': '"""D"""'}), "(name='TIME', array=self.time_ds[t_idx], format='D')\n", (13023, 13075), False, 'from astropy.io import fits\n'), ((13094, 13142), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""X"""', 'array': 'self.xb', 'format': '"""D"""'}), "(name='X', array=self.xb, format='D')\n", (13105, 13142), False, 'from astropy.io import fits\n'), ((13161, 13209), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""Y"""', 'array': 'self.yb', 'format': '"""D"""'}), "(name='Y', array=self.yb, format='D')\n", (13172, 13209), False, 'from astropy.io import fits\n'), ((13228, 13279), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['[col_f]'], {'name': '"""FREQ"""'}), "([col_f], name='FREQ')\n", (13257, 13279), False, 'from astropy.io import fits\n'), ((13299, 13350), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['[col_t]'], {'name': '"""TIME"""'}), "([col_t], name='TIME')\n", (13328, 13350), False, 'from astropy.io import fits\n'), ((13371, 13431), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['[col_x, col_y]'], {'name': '"""BeamXY"""'}), "([col_x, col_y], name='BeamXY')\n", (13400, 13431), False, 'from astropy.io import fits\n'), ((13450, 13497), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu_lofar, hdu_f, hdu_t, hdu_xy]'], {}), '([hdu_lofar, hdu_f, hdu_t, hdu_xy])\n', (13462, 13497), False, 'from astropy.io import fits\n'), ((1354, 1383), 'datetime.datetime', 'datetime.datetime', (['(1979)', '(1)', '(1)'], {}), '(1979, 1, 1)\n', (1371, 1383), False, 'import datetime\n'), ((1930, 1959), 'datetime.datetime', 'datetime.datetime', (['(1979)', '(1)', '(1)'], {}), '(1979, 1, 1)\n', (1947, 1959), False, 'import datetime\n'), ((3116, 3133), 'numpy.min', 'np.min', (['data_beam'], {}), '(data_beam)\n', (3122, 3133), True, 'import numpy as np\n'), ((3241, 3258), 'numpy.min', 'np.min', (['data_beam'], {}), '(data_beam)\n', (3247, 3258), True, 'import numpy as np\n'), ((3606, 3633), 'numpy.abs', 'np.abs', (['(self.time_ds - time)'], {}), '(self.time_ds - time)\n', (3612, 3633), True, 'import numpy as np\n'), ((3668, 3696), 'numpy.abs', 'np.abs', (['(self.freqs_ds - freq)'], {}), '(self.freqs_ds - freq)\n', (3674, 3696), True, 'import numpy as np\n'), ((3999, 4026), 'numpy.abs', 'np.abs', (['(self.time_ds - time)'], {}), '(self.time_ds - time)\n', (4005, 4026), True, 'import numpy as np\n'), ((4089, 4117), 'numpy.abs', 'np.abs', (['(self.freqs_ds - freq)'], {}), '(self.freqs_ds - freq)\n', (4095, 4117), True, 'import numpy as np\n'), ((6063, 6127), 'numpy.exp', 'np.exp', (['(-xp ** 2 / (2 * x_sig ** 2) - yp ** 2 / (2 * y_sig ** 2))'], {}), '(-xp ** 2 / (2 * x_sig ** 2) - yp ** 2 / (2 * y_sig ** 2))\n', (6069, 6127), True, 'import numpy as np\n'), ((7015, 7038), 'numpy.sin', 'np.sin', (["(-bf_res['tile'])"], {}), "(-bf_res['tile'])\n", (7021, 7038), True, 'import numpy as np\n'), ((7112, 7135), 'numpy.cos', 'np.cos', (["(-bf_res['tile'])"], {}), "(-bf_res['tile'])\n", (7118, 7135), True, 'import numpy as np\n'), ((7574, 7610), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[t_idx]'], {}), '(self.time_ds[t_idx])\n', (7589, 7610), True, 'import matplotlib.dates as mdates\n'), ((8173, 8188), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (8179, 8188), True, 'import numpy as np\n'), ((2829, 2865), 'numpy.sqrt', 'np.sqrt', (['(self.xb ** 2 + self.yb ** 2)'], {}), '(self.xb ** 2 + self.yb ** 2)\n', (2836, 2865), True, 'import numpy as np\n'), ((3370, 3390), 'numpy.median', 'np.median', (['data_beam'], {}), '(data_beam)\n', (3379, 3390), True, 'import numpy as np\n'), ((3391, 3408), 'numpy.min', 'np.min', (['data_beam'], {}), '(data_beam)\n', (3397, 3408), True, 'import numpy as np\n'), ((4433, 4448), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (4439, 4448), True, 'import numpy as np\n'), ((4490, 4505), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (4496, 4505), True, 'import numpy as np\n'), ((4550, 4565), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (4556, 4565), True, 'import numpy as np\n'), ((5053, 5068), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (5059, 5068), True, 'import numpy as np\n'), ((5110, 5125), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (5116, 5125), True, 'import numpy as np\n'), ((5170, 5185), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (5176, 5185), True, 'import numpy as np\n'), ((5919, 5931), 'numpy.cos', 'np.cos', (['tile'], {}), '(tile)\n', (5925, 5931), True, 'import numpy as np\n'), ((5947, 5959), 'numpy.sin', 'np.sin', (['tile'], {}), '(tile)\n', (5953, 5959), True, 'import numpy as np\n'), ((5990, 6002), 'numpy.sin', 'np.sin', (['tile'], {}), '(tile)\n', (5996, 6002), True, 'import numpy as np\n'), ((6018, 6030), 'numpy.cos', 'np.cos', (['tile'], {}), '(tile)\n', (6024, 6030), True, 'import numpy as np\n'), ((6982, 7005), 'numpy.cos', 'np.cos', (["(-bf_res['tile'])"], {}), "(-bf_res['tile'])\n", (6988, 7005), True, 'import numpy as np\n'), ((7079, 7102), 'numpy.sin', 'np.sin', (["(-bf_res['tile'])"], {}), "(-bf_res['tile'])\n", (7085, 7102), True, 'import numpy as np\n'), ((9086, 9102), 'numpy.mean', 'np.mean', (['data_ds'], {}), '(data_ds)\n', (9093, 9102), True, 'import numpy as np\n'), ((9151, 9167), 'numpy.mean', 'np.mean', (['data_ds'], {}), '(data_ds)\n', (9158, 9167), True, 'import numpy as np\n'), ((10624, 10663), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[t_idx[0]]'], {}), '(self.time_ds[t_idx[0]])\n', (10639, 10663), True, 'import matplotlib.dates as mdates\n'), ((11117, 11156), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[t_idx[0]]'], {}), '(self.time_ds[t_idx[0]])\n', (11132, 11156), True, 'import matplotlib.dates as mdates\n'), ((11232, 11271), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[t_idx[0]]'], {}), '(self.time_ds[t_idx[0]])\n', (11247, 11271), True, 'import matplotlib.dates as mdates\n'), ((11348, 11388), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[t_idx[-1]]'], {}), '(self.time_ds[t_idx[-1]])\n', (11363, 11388), True, 'import matplotlib.dates as mdates\n'), ((11464, 11504), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[t_idx[-1]]'], {}), '(self.time_ds[t_idx[-1]])\n', (11479, 11504), True, 'import matplotlib.dates as mdates\n'), ((2948, 2961), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2954, 2961), True, 'import numpy as np\n'), ((3005, 3018), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3011, 3018), True, 'import numpy as np\n'), ((7281, 7290), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (7287, 7290), True, 'import numpy as np\n'), ((7291, 7300), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (7297, 7300), True, 'import numpy as np\n'), ((7301, 7310), 'numpy.min', 'np.min', (['Y'], {}), '(Y)\n', (7307, 7310), True, 'import numpy as np\n'), ((7311, 7320), 'numpy.max', 'np.max', (['Y'], {}), '(Y)\n', (7317, 7320), True, 'import numpy as np\n'), ((7954, 7963), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (7960, 7963), True, 'import numpy as np\n'), ((7964, 7973), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (7970, 7973), True, 'import numpy as np\n'), ((7974, 7983), 'numpy.min', 'np.min', (['Y'], {}), '(Y)\n', (7980, 7983), True, 'import numpy as np\n'), ((7984, 7993), 'numpy.max', 'np.max', (['Y'], {}), '(Y)\n', (7990, 7993), True, 'import numpy as np\n'), ((8446, 8461), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (8452, 8461), True, 'import numpy as np\n'), ((8507, 8522), 'numpy.max', 'np.max', (['data_bf'], {}), '(data_bf)\n', (8513, 8522), True, 'import numpy as np\n'), ((8556, 8586), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.001)'], {}), '(0, 2 * np.pi, 0.001)\n', (8565, 8586), True, 'import numpy as np\n'), ((8620, 8650), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.001)'], {}), '(0, 2 * np.pi, 0.001)\n', (8629, 8650), True, 'import numpy as np\n'), ((8956, 8974), 'numpy.mean', 'np.mean', (['dyspec', '(1)'], {}), '(dyspec, 1)\n', (8963, 8974), True, 'import numpy as np\n'), ((9109, 9124), 'numpy.std', 'np.std', (['data_ds'], {}), '(data_ds)\n', (9115, 9124), True, 'import numpy as np\n'), ((9174, 9189), 'numpy.std', 'np.std', (['data_ds'], {}), '(data_ds)\n', (9180, 9189), True, 'import numpy as np\n'), ((9502, 9534), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[0]'], {}), '(self.time_ds[0])\n', (9517, 9534), True, 'import matplotlib.dates as mdates\n'), ((10739, 10778), 'matplotlib.dates.num2date', 'mdates.num2date', (['self.time_ds[t_idx[0]]'], {}), '(self.time_ds[t_idx[0]])\n', (10754, 10778), True, 'import matplotlib.dates as mdates\n'), ((3093, 3113), 'numpy.median', 'np.median', (['data_beam'], {}), '(data_beam)\n', (3102, 3113), True, 'import numpy as np\n'), ((3077, 3091), 'numpy.size', 'np.size', (['theta'], {}), '(theta)\n', (3084, 3091), True, 'import numpy as np\n')] |
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
from keras import backend as K
from keras.models import Sequential, load_model
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Flatten, Dense, Activation, Dropout
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
import os
import sys
from PIL import Image, ImageDraw, ImageFont
from random import shuffle
from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names
import numpy as np
import json
MODEL_NAME = "model"
TRAIN_SIZE = 2172
VALIDATION_SIZE = 140
TEST_SIZE = 80
BATCH_SIZE = 25
TRAIN_STEPS = TRAIN_SIZE // BATCH_SIZE
VALIDATION_STEPS = VALIDATION_SIZE // BATCH_SIZE
EPOCHS = 50
TEST_STEPS = TEST_SIZE // BATCH_SIZE
IMAGE_SIZE = 224
#Generator that yields the input and true output data
def data_generator(is_training = False, is_validation = False, is_evaluate = False):
#Create the placeholders for input and output
X = np.random.rand(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3)
Y = np.random.rand(BATCH_SIZE, 1)
image_names = get_image_names(is_training, is_validation)
index = 0
if(is_training or is_validation or is_evaluate):
shuffle(image_names)
#Generates the data indefinitely
while True:
#For each image in the batch, returns its pixels and description
for i in range(BATCH_SIZE):
image_number = image_names[index]
index += 1
X[i] = get_image_pixels(image_number, is_training = is_training, is_validation = is_validation)
if(is_training or is_validation or is_evaluate):
Y[i] = get_image_description(image_number, is_training = is_training, is_validation = is_validation)
if(index == len(image_names) - 1):
index = 0
shuffle(image_names)
#Once the whole batch is ready, yield the inputs and true outputs
if(is_training or is_validation or is_evaluate):
yield X, Y
else:
yield X
#Create the architecture of the model as well as compile it
def create_model():
model = Sequential()
#Conv Layer 1
model.add(Conv2D(16, (3, 3), input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3), padding = "same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(strides = (2, 2), padding = "same"))
#Conv Layer 2
model.add(Conv2D(8, (3, 3), padding = "same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(strides = (2, 2), padding = "same"))
#Conv Layer 3
model.add(Conv2D(4, (3, 3), padding = "same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(strides = (2, 2), padding = "same"))
#Conv Layer 4
model.add(Conv2D(2, (3, 3), padding = "same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(strides = (2, 2), padding = "same"))
#Conv Layer 5
model.add(Conv2D(1, (3, 3), padding = "same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(strides = (2, 2), padding = "same"))
model.add(Flatten())
#Fully Connected Layer 1
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
#Fully Connected Layer 2
model.add(Dense(16, activation = "relu"))
model.add(Dropout(0.5))
#Final Activation Layer
model.add(Dense(1, activation = "sigmoid"))
model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])
print(model.summary())
return model
#Train the model with training and validation images using data augmentation or without it
def train(data_augmentation = True):
model = create_model()
#Add some checkpoints
tensorboard = TensorBoard(log_dir = './Graph', histogram_freq = 0, write_graph = True, write_images = True)
checkpoint_train = ModelCheckpoint("model_train.h5", monitor = "loss", save_best_only = True)
checkpoint_validation = ModelCheckpoint("model_validation.h5", monitor = "val_loss", save_best_only = True)
if(data_augmentation):
#Generators with data augmentation for the training and validation images
train_data_generator = ImageDataGenerator(rotation_range=10,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
rescale=1/255.,
fill_mode='nearest',
channel_shift_range=0.2*255)
validation_data_generator = ImageDataGenerator(rescale=1/255.)
train_generator = train_data_generator.flow_from_directory(
get_directory(is_training = True),
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
shuffle = True,
class_mode='binary')
validation_generator = validation_data_generator.flow_from_directory(
get_directory(is_validation = True),
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
shuffle = True,
class_mode='binary')
#Train the model on the images
model.fit_generator(train_generator, steps_per_epoch = TRAIN_STEPS, validation_data = validation_generator,
validation_steps = VALIDATION_STEPS, epochs = EPOCHS, callbacks = [tensorboard, checkpoint_train, checkpoint_validation])
else:
#Train the model on the images without data augmentation
model.fit_generator(data_generator(True), steps_per_epoch = TRAIN_STEPS, validation_data = data_generator(is_validation = True),
validation_steps = VALIDATION_STEPS, epochs = EPOCHS, callbacks = [tensorboard, checkpoint_train, checkpoint_validation])
#Test the model either by predicting or evaluating test images
def test(predict_or_evaluate = "predict"):
#Different results due to loading the model - model is compiled exactly, meaning Dropout still remains
model = load_model("model_validation.h5") #Up to 87% correct on test
#Predict the inputted images' output
if(predict_or_evaluate == "predict"):
predictions = model.predict_generator(data_generator(), steps = TEST_STEPS, verbose = 1)
image_names = get_image_names()
#Iterate through each test image
for i in range(len(predictions)):
image_name = image_names[i]
image = Image.open("Data/images_test/" + image_name + ".jpg").convert("RGBA")
#Create empty image for text
text = Image.new('RGBA', image.size, (255, 255, 255, 0))
font = ImageFont.truetype('arial.ttf', 40)
#Drawing context
draw = ImageDraw.Draw(text)
#Prediction is from 0-1. <0.5 for benign and 0.5-1 for malignant
if(predictions[i] < 0.5):
prediction = "benign"
else:
prediction = "malignant"
#Draw the text
draw.text((10, 10), prediction, font = font, fill = (255, 255, 255, 255))
#Combine the original image with text image
output = Image.alpha_composite(image, text)
output.save("Data/output/" + image_name + ".png", "PNG")
else:
#Evaluate the model on test images
evaluations = model.evaluate_generator(data_generator(is_evaluate = True), steps = TEST_STEPS)
print(evaluations)
#Convert the model into a photobuf (.pb) file
def export_model(saver, model, input_node_names, output_node_name):
tf.train.write_graph(K.get_session().graph_def, 'out', \
MODEL_NAME + '_graph.pbtxt')
saver.save(K.get_session(), 'out/' + MODEL_NAME + '.chkp')
freeze_graph.freeze_graph('out/' + MODEL_NAME + '_graph.pbtxt', None, \
False, 'out/' + MODEL_NAME + '.chkp', output_node_name, \
"save/restore_all", "save/Const:0", \
'out/frozen_' + MODEL_NAME + '.pb', True, "")
input_graph_def = tf.GraphDef()
with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
input_graph_def.ParseFromString(f.read())
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def, input_node_names, [output_node_name],
tf.float32.as_datatype_enum)
with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
f.write(output_graph_def.SerializeToString())
print("graph saved!")
#Code for running the program from the terminal
terminal_length = len(sys.argv)
if(terminal_length >= 2):
#Help command
if((sys.argv[1] == "-h" or sys.argv[1] == "--help") and terminal_length == 2):
print("-h or --help for the list of all possible commands")
print("--train for training the model with data augmentation by default")
print(" --no_data_augmentation subcommand for training if you don't want to use data augmentation")
print("--predict for predicting an output for a set of input images")
print("--evaluate for evaluating the model on a set of input images and their corresponding outputs")
#Train command
elif(sys.argv[1] == "--train" and terminal_length == 2):
print("Training with data_augmentation...")
train()
#Train with no data augmentation command
elif(sys.argv[1] == "--train" and terminal_length == 3 and sys.argv[2] == "--no_data_augmentation"):
print("Training with no data augmentation...")
set_directories()
train(False)
#Predict command
elif(sys.argv[1] == "--predict" and terminal_length == 2):
print("Predicting images...")
test()
#Evaluate command
elif(sys.argv[1] == "--evaluate" and terminal_length == 2):
print("Evaluating images...")
test("evaluate")
elif(sys.argv[1] == "--export" and terminal_length == 2):
print("Exporting the model into .pb file")
model = load_model("model_validation.h5")
export_model(tf.train.Saver(), model, ["conv2d_1_input"], "dense_3/Sigmoid")
#Invalid command
else:
print("Invalid command.")
print("Use -h or --help for the list of all possible commands")
else:
print("No arguments given. Please be sure to include one.")
print("Use -h or --help for the list of all possible commands")
| [
"keras.models.load_model",
"keras.preprocessing.image.ImageDataGenerator",
"PIL.Image.new",
"cnn_utils.set_directories",
"random.shuffle",
"cnn_utils.get_image_pixels",
"keras.layers.Flatten",
"PIL.ImageDraw.Draw",
"tensorflow.GraphDef",
"tensorflow.gfile.FastGFile",
"tensorflow.train.Saver",
... | [((1272, 1325), 'numpy.random.rand', 'np.random.rand', (['BATCH_SIZE', 'IMAGE_SIZE', 'IMAGE_SIZE', '(3)'], {}), '(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3)\n', (1286, 1325), True, 'import numpy as np\n'), ((1333, 1362), 'numpy.random.rand', 'np.random.rand', (['BATCH_SIZE', '(1)'], {}), '(BATCH_SIZE, 1)\n', (1347, 1362), True, 'import numpy as np\n'), ((1381, 1424), 'cnn_utils.get_image_names', 'get_image_names', (['is_training', 'is_validation'], {}), '(is_training, is_validation)\n', (1396, 1424), False, 'from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names\n'), ((2326, 2338), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2336, 2338), False, 'from keras.models import Sequential, load_model\n'), ((3826, 3915), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""./Graph"""', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), "(log_dir='./Graph', histogram_freq=0, write_graph=True,\n write_images=True)\n", (3837, 3915), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint\n'), ((3941, 4011), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model_train.h5"""'], {'monitor': '"""loss"""', 'save_best_only': '(True)'}), "('model_train.h5', monitor='loss', save_best_only=True)\n", (3956, 4011), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint\n'), ((4042, 4121), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model_validation.h5"""'], {'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "('model_validation.h5', monitor='val_loss', save_best_only=True)\n", (4057, 4121), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint\n'), ((6107, 6140), 'keras.models.load_model', 'load_model', (['"""model_validation.h5"""'], {}), "('model_validation.h5')\n", (6117, 6140), False, 'from keras.models import Sequential, load_model\n'), ((7668, 7883), 'tensorflow.python.tools.freeze_graph.freeze_graph', 'freeze_graph.freeze_graph', (["('out/' + MODEL_NAME + '_graph.pbtxt')", 'None', '(False)', "('out/' + MODEL_NAME + '.chkp')", 'output_node_name', '"""save/restore_all"""', '"""save/Const:0"""', "('out/frozen_' + MODEL_NAME + '.pb')", '(True)', '""""""'], {}), "('out/' + MODEL_NAME + '_graph.pbtxt', None, False,\n 'out/' + MODEL_NAME + '.chkp', output_node_name, 'save/restore_all',\n 'save/Const:0', 'out/frozen_' + MODEL_NAME + '.pb', True, '')\n", (7693, 7883), False, 'from tensorflow.python.tools import freeze_graph\n'), ((7934, 7947), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (7945, 7947), True, 'import tensorflow as tf\n'), ((8097, 8234), 'tensorflow.python.tools.optimize_for_inference_lib.optimize_for_inference', 'optimize_for_inference_lib.optimize_for_inference', (['input_graph_def', 'input_node_names', '[output_node_name]', 'tf.float32.as_datatype_enum'], {}), '(input_graph_def,\n input_node_names, [output_node_name], tf.float32.as_datatype_enum)\n', (8146, 8234), False, 'from tensorflow.python.tools import optimize_for_inference_lib\n'), ((1493, 1513), 'random.shuffle', 'shuffle', (['image_names'], {}), '(image_names)\n', (1500, 1513), False, 'from random import shuffle\n'), ((2369, 2444), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'input_shape': '(IMAGE_SIZE, IMAGE_SIZE, 3)', 'padding': '"""same"""'}), "(16, (3, 3), input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), padding='same')\n", (2375, 2444), False, 'from keras.layers.convolutional import Conv2D\n'), ((2462, 2480), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2472, 2480), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((2494, 2538), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(strides=(2, 2), padding='same')\n", (2506, 2538), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((2574, 2607), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {'padding': '"""same"""'}), "(8, (3, 3), padding='same')\n", (2580, 2607), False, 'from keras.layers.convolutional import Conv2D\n'), ((2623, 2641), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2633, 2641), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((2655, 2699), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(strides=(2, 2), padding='same')\n", (2667, 2699), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((2735, 2768), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'padding': '"""same"""'}), "(4, (3, 3), padding='same')\n", (2741, 2768), False, 'from keras.layers.convolutional import Conv2D\n'), ((2784, 2802), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2794, 2802), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((2816, 2860), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(strides=(2, 2), padding='same')\n", (2828, 2860), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((2896, 2929), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {'padding': '"""same"""'}), "(2, (3, 3), padding='same')\n", (2902, 2929), False, 'from keras.layers.convolutional import Conv2D\n'), ((2945, 2963), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2955, 2963), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((2977, 3021), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(strides=(2, 2), padding='same')\n", (2989, 3021), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((3057, 3090), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(1)', '(3, 3)'], {'padding': '"""same"""'}), "(1, (3, 3), padding='same')\n", (3063, 3090), False, 'from keras.layers.convolutional import Conv2D\n'), ((3106, 3124), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3116, 3124), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((3138, 3182), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(strides=(2, 2), padding='same')\n", (3150, 3182), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((3202, 3211), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3209, 3211), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((3254, 3282), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (3259, 3282), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((3298, 3310), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3305, 3310), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((3353, 3381), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3358, 3381), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((3397, 3409), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3404, 3409), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((3451, 3481), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3456, 3481), False, 'from keras.layers import Flatten, Dense, Activation, Dropout\n'), ((4259, 4459), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(10)', 'width_shift_range': '(0.05)', 'height_shift_range': '(0.05)', 'shear_range': '(0.05)', 'zoom_range': '(0.05)', 'rescale': '(1 / 255.0)', 'fill_mode': '"""nearest"""', 'channel_shift_range': '(0.2 * 255)'}), "(rotation_range=10, width_shift_range=0.05,\n height_shift_range=0.05, shear_range=0.05, zoom_range=0.05, rescale=1 /\n 255.0, fill_mode='nearest', channel_shift_range=0.2 * 255)\n", (4277, 4459), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4628, 4665), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255.0)'}), '(rescale=1 / 255.0)\n', (4646, 4665), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6363, 6380), 'cnn_utils.get_image_names', 'get_image_names', ([], {}), '()\n', (6378, 6380), False, 'from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names\n'), ((7613, 7628), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (7626, 7628), True, 'from keras import backend as K\n'), ((7958, 8013), 'tensorflow.gfile.Open', 'tf.gfile.Open', (["('out/frozen_' + MODEL_NAME + '.pb')", '"""rb"""'], {}), "('out/frozen_' + MODEL_NAME + '.pb', 'rb')\n", (7971, 8013), True, 'import tensorflow as tf\n'), ((8270, 8327), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (["('out/opt_' + MODEL_NAME + '.pb')", '"""wb"""'], {}), "('out/opt_' + MODEL_NAME + '.pb', 'wb')\n", (8288, 8327), True, 'import tensorflow as tf\n'), ((1734, 1823), 'cnn_utils.get_image_pixels', 'get_image_pixels', (['image_number'], {'is_training': 'is_training', 'is_validation': 'is_validation'}), '(image_number, is_training=is_training, is_validation=\n is_validation)\n', (1750, 1823), False, 'from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names\n'), ((4748, 4779), 'cnn_utils.get_directory', 'get_directory', ([], {'is_training': '(True)'}), '(is_training=True)\n', (4761, 4779), False, 'from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names\n'), ((5051, 5084), 'cnn_utils.get_directory', 'get_directory', ([], {'is_validation': '(True)'}), '(is_validation=True)\n', (5064, 5084), False, 'from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names\n'), ((6620, 6669), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'image.size', '(255, 255, 255, 0)'], {}), "('RGBA', image.size, (255, 255, 255, 0))\n", (6629, 6669), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((6683, 6718), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', '(40)'], {}), "('arial.ttf', 40)\n", (6701, 6718), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((6753, 6773), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['text'], {}), '(text)\n', (6767, 6773), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((7104, 7138), 'PIL.Image.alpha_composite', 'Image.alpha_composite', (['image', 'text'], {}), '(image, text)\n', (7125, 7138), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((7521, 7536), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (7534, 7536), True, 'from keras import backend as K\n'), ((1890, 1984), 'cnn_utils.get_image_description', 'get_image_description', (['image_number'], {'is_training': 'is_training', 'is_validation': 'is_validation'}), '(image_number, is_training=is_training, is_validation=\n is_validation)\n', (1911, 1984), False, 'from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names\n'), ((2048, 2068), 'random.shuffle', 'shuffle', (['image_names'], {}), '(image_names)\n', (2055, 2068), False, 'from random import shuffle\n'), ((9514, 9531), 'cnn_utils.set_directories', 'set_directories', ([], {}), '()\n', (9529, 9531), False, 'from cnn_utils import set_directories, get_directory, get_image_pixels, get_image_description, get_image_names\n'), ((6504, 6557), 'PIL.Image.open', 'Image.open', (["('Data/images_test/' + image_name + '.jpg')"], {}), "('Data/images_test/' + image_name + '.jpg')\n", (6514, 6557), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((10022, 10055), 'keras.models.load_model', 'load_model', (['"""model_validation.h5"""'], {}), "('model_validation.h5')\n", (10032, 10055), False, 'from keras.models import Sequential, load_model\n'), ((10075, 10091), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10089, 10091), True, 'import tensorflow as tf\n')] |
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.optimizers import SGD
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
from Chapter15.minivgg import MiniVGGNet
from captcha_breaker.helper.utils.captchahelper import preprocess
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
ap.add_argument("-m", "--model", required=True, help="path to output model")
args = vars(ap.parse_args())
os.environ["CUDA_VISIBLE_DEVICES"] = ''
data = []
labels = []
print("[INFO] start reading images...")
for imagePath in paths.list_images(args["dataset"]):
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = preprocess(image, 28, 28)
image = img_to_array(image)
data.append(image)
label = imagePath.split(os.path.sep)[-2]
labels.append(label)
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
lb = LabelBinarizer().fit(trainY)
trainY = lb.transform(trainY)
testY = lb.transform(testY)
print("[INFO] compiling model...")
model = MiniVGGNet.build(width=28, height=28, depth=1, classes=24)
opt = SGD(lr=0.01, decay=0.01 / 30, nesterov=True, momentum=0.9)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
print("[INFO] training network...")
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=30, verbose=1)
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=lb.classes_))
print("[INFO] serializing network...")
model.save(args["model"])
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 30), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, 30), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, 30), H.history["accuracy"], label="acc")
plt.plot(np.arange(0, 30), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show() | [
"matplotlib.pyplot.title",
"sklearn.preprocessing.LabelBinarizer",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.optimizers.SGD",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"Chapter15.minivgg.MiniVGGNet.build",
"numpy.arange",
"imutils.pat... | [((486, 511), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (509, 511), False, 'import argparse\n'), ((819, 853), 'imutils.paths.list_images', 'paths.list_images', (["args['dataset']"], {}), "(args['dataset'])\n", (836, 853), False, 'from imutils import paths\n'), ((1161, 1177), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1169, 1177), True, 'import numpy as np\n'), ((1212, 1275), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(data, labels, test_size=0.25, random_state=42)\n', (1228, 1275), False, 'from sklearn.model_selection import train_test_split\n'), ((1413, 1471), 'Chapter15.minivgg.MiniVGGNet.build', 'MiniVGGNet.build', ([], {'width': '(28)', 'height': '(28)', 'depth': '(1)', 'classes': '(24)'}), '(width=28, height=28, depth=1, classes=24)\n', (1429, 1471), False, 'from Chapter15.minivgg import MiniVGGNet\n'), ((1478, 1536), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(0.01 / 30)', 'nesterov': '(True)', 'momentum': '(0.9)'}), '(lr=0.01, decay=0.01 / 30, nesterov=True, momentum=0.9)\n', (1481, 1536), False, 'from tensorflow.keras.optimizers import SGD\n'), ((2018, 2041), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2031, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2054), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2052, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2323, 2362), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy"""'], {}), "('Training Loss and Accuracy')\n", (2332, 2362), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2384), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (2373, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2412), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (2395, 2412), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2425), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2423, 2425), True, 'import matplotlib.pyplot as plt\n'), ((2426, 2436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2434, 2436), True, 'import matplotlib.pyplot as plt\n'), ((868, 889), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (878, 889), False, 'import cv2\n'), ((902, 941), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (914, 941), False, 'import cv2\n'), ((954, 979), 'captcha_breaker.helper.utils.captchahelper.preprocess', 'preprocess', (['image', '(28)', '(28)'], {}), '(image, 28, 28)\n', (964, 979), False, 'from captcha_breaker.helper.utils.captchahelper import preprocess\n'), ((992, 1011), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (1004, 1011), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((1114, 1143), 'numpy.array', 'np.array', (['data'], {'dtype': '"""float"""'}), "(data, dtype='float')\n", (1122, 1143), True, 'import numpy as np\n'), ((2064, 2080), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {}), '(0, 30)\n', (2073, 2080), True, 'import numpy as np\n'), ((2130, 2146), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {}), '(0, 30)\n', (2139, 2146), True, 'import numpy as np\n'), ((2198, 2214), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {}), '(0, 30)\n', (2207, 2214), True, 'import numpy as np\n'), ((2261, 2277), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {}), '(0, 30)\n', (2270, 2277), True, 'import numpy as np\n'), ((1282, 1298), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1296, 1298), False, 'from sklearn.preprocessing import LabelBinarizer\n')] |
import math
import numpy as np
import tensorflow as tf
default_settings = {
'batch_size': 16,
'permute_data': False,
'multi_input': False,
'multi_output': False,
'inputs_num': 1,
'outputs_num': 1
}
class DataLoader(tf.keras.utils.Sequence):
"""
Define data sequence for training loop.
Class is responsible for:
- batching the data for model training.
"""
def __init__(self, settings, dataset):
"""
This method initializes parameters.
:param settings: dictionary that contains dataset settings.
:param dataset: instance of dataset class that implements __get_item__() and __len__() methods.
:return: None.
"""
super(DataLoader, self).__init__()
self.settings = settings
# Fields to be filled during execution
self.dataset = dataset
self.indices = np.arange(len(self.dataset))
# Permute indices of dataset
self.permute()
def __len__(self):
"""
Return the number of batches in the data sequence.
:return: number of batches.
"""
return int(math.ceil(len(self.dataset) / self.settings['batch_size']))
def __getitem__(self, idx):
"""
Construct the current batch of inputs and labels for model training.
:param idx: The index of the current batch.
:return: Batch of inputs and labels.
"""
inputs = []
labels = []
metadata = []
for data_idx in self.indices[idx * self.settings['batch_size']:(idx + 1) * self.settings['batch_size']]:
data, label, metadata_ = self.dataset[data_idx]
inputs.append(data)
labels.append(label)
metadata.append(metadata_)
# Rearrange multi-input data
if self.settings['multi_input']:
inputs = self.rearrange_multi_io(inputs, self.settings['inputs_num'])
else:
inputs = np.array(inputs)
# Rearrange multi-output data
if self.settings['multi_output']:
labels = self.rearrange_multi_io(labels, self.settings['outputs_num'])
else:
labels = np.array(labels)
# Permute the data on the end of the epoch
if idx + 1 == len(self):
self.permute()
self.dataset.on_epoch_end()
return inputs, labels, metadata
def __call__(self):
"""
This method defines 'call' method of the sequence to get batch of model inputs and corresponding labels
:return: Batch of inputs and labels
"""
for item in (self[i] for i in range(len(self))):
yield item
def permute(self):
"""
Permute indices of the dataset.
:return: None.
"""
if self.settings['permute_data']:
self.indices = np.random.permutation(len(self.dataset))
@staticmethod
def rearrange_multi_io(data, io_num):
"""
This method rearrange data to match the tf.keras backend multi-input/multi-output format
The format is changed from [ [io_11, io_21, ...], [io_12, io_22, ...], ... ] to [ [io_11, io_12, ...], [io_21, io_22, ...], ... ]
:param data: multi-input/multi-output data
:param io_num: number of inputs/outputs
:return: rearranged multi-input/multi-output data
"""
# Create placeholder for multi input/output
rearranged_data = list()
for _ in range(io_num):
rearranged_data.append(list())
for batch_item in data: # iterate on data items in batch
for io_idx in range(io_num): # iterate on inputs/outputs in batch item
rearranged_data[io_idx].append(batch_item[io_idx])
rearranged_data = [np.array(data_list) for data_list in rearranged_data]
return rearranged_data
| [
"numpy.array"
] | [((2049, 2065), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (2057, 2065), True, 'import numpy as np\n'), ((2271, 2287), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2279, 2287), True, 'import numpy as np\n'), ((3927, 3946), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (3935, 3946), True, 'import numpy as np\n')] |
from dataclasses import dataclass, field, fields
from typing import Mapping, Optional, Sequence, Type, Union
import numpy as np
import xarray as xr
from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std
from bioimageio.spec.model.raw_nodes import PostprocessingName, PreprocessingName
from ._utils import ComputedMeasures, DatasetMode, FIXED, Mode, PER_DATASET, PER_SAMPLE, RequiredMeasures, SampleMode
try:
from typing import Literal, get_args, TypedDict
except ImportError:
from typing_extensions import Literal, get_args, TypedDict # type: ignore
def _get_fixed(
fixed: Union[float, Sequence[float]], tensor: xr.DataArray, axes: Optional[Sequence[str]]
) -> Union[float, xr.DataArray]:
if axes is None:
return fixed
fixed_shape = tuple(s for d, s in tensor.sizes.items() if d not in axes)
fixed_dims = tuple(d for d in tensor.dims if d not in axes)
fixed = np.array(fixed).reshape(fixed_shape)
return xr.DataArray(fixed, dims=fixed_dims)
TensorName = str
MISSING = "MISSING"
@dataclass
class Processing:
"""base class for all Pre- and Postprocessing transformations"""
tensor_name: str
# todo: in python>=3.10 we should use dataclasses.KW_ONLY instead of MISSING (see child classes) to make inheritance work properly
computed_measures: ComputedMeasures = field(default_factory=dict)
mode: Mode = FIXED
def get_required_measures(self) -> RequiredMeasures:
return {}
def set_computed_measures(self, computed: ComputedMeasures):
# check if computed contains all required measures
for mode, req_per_mode in self.get_required_measures().items():
for tn, req_per_tn in req_per_mode.items():
comp_measures = computed.get(mode, {}).get(tn, {})
for req_measure in req_per_tn:
if req_measure not in comp_measures:
raise ValueError(f"Missing required {req_measure} for {tn} {mode}.")
self.computed_measures = computed
def get_computed_measure(self, tensor_name: TensorName, measure: Measure, *, mode: Optional[Mode] = None):
"""helper to unpack self.computed_measures"""
ret = self.computed_measures.get(mode or self.mode, {}).get(tensor_name, {}).get(measure)
if ret is None:
raise RuntimeError(f"Missing computed {measure} for {tensor_name} {mode}.")
return ret
def __call__(self, tensor: xr.DataArray) -> xr.DataArray:
return self.apply(tensor)
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
"""apply processing"""
raise NotImplementedError
def __post_init__(self):
# validate common kwargs by their annotations
for f in fields(self):
# check MISSING
if getattr(self, f.name) is MISSING:
raise TypeError(f"missing required argument {f.name}")
if f.name == "mode":
# mode is always annotated as literals (or literals of literals)
valid_modes = get_args(f.type)
for inner in get_args(f.type):
valid_modes += get_args(inner)
if self.mode not in valid_modes:
raise NotImplementedError(f"Unsupported mode {self.mode} for {self.__class__.__name__}")
#
# helpers
#
def ensure_dtype(tensor: xr.DataArray, *, dtype) -> xr.DataArray:
"""
Convert array to a given datatype
"""
return tensor.astype(dtype)
#
# Pre- and Postprocessing implementations
#
@dataclass
class Binarize(Processing):
threshold: float = MISSING # make dataclass inheritance work for py<3.10 by using an explicit MISSING value.
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
return ensure_dtype(tensor > self.threshold, dtype="float32")
@dataclass
class Clip(Processing):
min: float = MISSING
max: float = MISSING
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
return ensure_dtype(tensor.clip(min=self.min, max=self.max), dtype="float32")
@dataclass
class EnsureDtype(Processing):
dtype: str = MISSING
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
return ensure_dtype(tensor, dtype=self.dtype)
@dataclass
class ScaleLinear(Processing):
"""scale the tensor with a fixed multiplicative and additive factor"""
gain: Union[float, Sequence[float]] = MISSING
offset: Union[float, Sequence[float]] = MISSING
axes: Optional[Sequence[str]] = None
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
scale_axes = tuple(ax for ax in tensor.dims if (ax not in self.axes and ax != "b"))
if scale_axes:
gain = xr.DataArray(np.atleast_1d(self.gain), dims=scale_axes)
offset = xr.DataArray(np.atleast_1d(self.offset), dims=scale_axes)
else:
gain = self.gain
offset = self.offset
return ensure_dtype(tensor * gain + offset, dtype="float32")
def __post_init__(self):
super().__post_init__()
if self.axes is None:
assert isinstance(self.gain, (int, float))
assert isinstance(self.offset, (int, float))
@dataclass
class ScaleMeanVariance(Processing):
...
@dataclass
class ScaleRange(Processing):
mode: Literal[SampleMode, DatasetMode] = PER_SAMPLE
axes: Optional[Sequence[str]] = None
min_percentile: float = 0.0
max_percentile: float = 100.0
eps: float = 1e-6
reference_tensor: Optional[TensorName] = None
def get_required_measures(self) -> RequiredMeasures:
axes = None if self.axes is None else tuple(self.axes)
measures = {Percentile(self.min_percentile, axes=axes), Percentile(self.max_percentile, axes=axes)}
return {self.mode: {self.reference_tensor or self.tensor_name: measures}}
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
ref_name = self.reference_tensor or self.tensor_name
axes = None if self.axes is None else tuple(self.axes)
v_lower = self.get_computed_measure(ref_name, Percentile(self.min_percentile, axes=axes))
v_upper = self.get_computed_measure(ref_name, Percentile(self.max_percentile, axes=axes))
return ensure_dtype((tensor - v_lower) / (v_upper - v_lower + self.eps), dtype="float32")
def __post_init__(self):
super().__post_init__()
self.axes = None if self.axes is None else tuple(self.axes) # make sure axes is Tuple[str] or None
@dataclass
class Sigmoid(Processing):
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
return 1.0 / (1.0 + xr.ufuncs.exp(-tensor))
@dataclass
class ZeroMeanUnitVariance(Processing):
mode: Mode = PER_SAMPLE
mean: Optional[Union[float, Sequence[float]]] = None
std: Optional[Union[float, Sequence[float]]] = None
axes: Optional[Sequence[str]] = None
eps: float = 1.0e-6
def get_required_measures(self) -> RequiredMeasures:
if self.mode == FIXED:
return {}
else:
axes = None if self.axes is None else tuple(self.axes)
return {self.mode: {self.tensor_name: {Mean(axes=axes), Std(axes=axes)}}}
def apply(self, tensor: xr.DataArray) -> xr.DataArray:
axes = None if self.axes is None else tuple(self.axes)
if self.mode == FIXED:
assert self.mean is not None and self.std is not None
mean = _get_fixed(self.mean, tensor, axes)
std = _get_fixed(self.std, tensor, axes)
elif self.mode in (PER_SAMPLE, PER_DATASET):
assert self.mean is None and self.std is None
mean = self.get_computed_measure(self.tensor_name, Mean(axes), mode=self.mode)
std = self.get_computed_measure(self.tensor_name, Std(axes), mode=self.mode)
else:
raise ValueError(self.mode)
tensor = (tensor - mean) / (std + self.eps)
return ensure_dtype(tensor, dtype="float32")
_KnownProcessing = TypedDict(
"_KnownProcessing",
dict(pre=Mapping[PreprocessingName, Type[Processing]], post=Mapping[PostprocessingName, Type[Processing]]),
)
KNOWN_PROCESSING: _KnownProcessing = dict(
pre={
"binarize": Binarize,
"clip": Clip,
"scale_linear": ScaleLinear,
"scale_range": ScaleRange,
"sigmoid": Sigmoid,
"zero_mean_unit_variance": ZeroMeanUnitVariance,
},
post={
"binarize": Binarize,
"clip": Clip,
"scale_linear": ScaleLinear,
"scale_mean_variance": ScaleMeanVariance,
"scale_range": ScaleRange,
"sigmoid": Sigmoid,
"zero_mean_unit_variance": ZeroMeanUnitVariance,
},
)
| [
"numpy.atleast_1d",
"bioimageio.core.statistical_measures.Std",
"typing_extensions.get_args",
"bioimageio.core.statistical_measures.Mean",
"dataclasses.field",
"numpy.array",
"xarray.DataArray",
"dataclasses.fields",
"bioimageio.core.statistical_measures.Percentile",
"xarray.ufuncs.exp"
] | [((976, 1012), 'xarray.DataArray', 'xr.DataArray', (['fixed'], {'dims': 'fixed_dims'}), '(fixed, dims=fixed_dims)\n', (988, 1012), True, 'import xarray as xr\n'), ((1352, 1379), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1357, 1379), False, 'from dataclasses import dataclass, field, fields\n'), ((2758, 2770), 'dataclasses.fields', 'fields', (['self'], {}), '(self)\n', (2764, 2770), False, 'from dataclasses import dataclass, field, fields\n'), ((928, 943), 'numpy.array', 'np.array', (['fixed'], {}), '(fixed)\n', (936, 943), True, 'import numpy as np\n'), ((5675, 5717), 'bioimageio.core.statistical_measures.Percentile', 'Percentile', (['self.min_percentile'], {'axes': 'axes'}), '(self.min_percentile, axes=axes)\n', (5685, 5717), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n'), ((5719, 5761), 'bioimageio.core.statistical_measures.Percentile', 'Percentile', (['self.max_percentile'], {'axes': 'axes'}), '(self.max_percentile, axes=axes)\n', (5729, 5761), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n'), ((6083, 6125), 'bioimageio.core.statistical_measures.Percentile', 'Percentile', (['self.min_percentile'], {'axes': 'axes'}), '(self.min_percentile, axes=axes)\n', (6093, 6125), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n'), ((6181, 6223), 'bioimageio.core.statistical_measures.Percentile', 'Percentile', (['self.max_percentile'], {'axes': 'axes'}), '(self.max_percentile, axes=axes)\n', (6191, 6223), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n'), ((3065, 3081), 'typing_extensions.get_args', 'get_args', (['f.type'], {}), '(f.type)\n', (3073, 3081), False, 'from typing_extensions import Literal, get_args, TypedDict\n'), ((3111, 3127), 'typing_extensions.get_args', 'get_args', (['f.type'], {}), '(f.type)\n', (3119, 3127), False, 'from typing_extensions import Literal, get_args, TypedDict\n'), ((4726, 4750), 'numpy.atleast_1d', 'np.atleast_1d', (['self.gain'], {}), '(self.gain)\n', (4739, 4750), True, 'import numpy as np\n'), ((4803, 4829), 'numpy.atleast_1d', 'np.atleast_1d', (['self.offset'], {}), '(self.offset)\n', (4816, 4829), True, 'import numpy as np\n'), ((6621, 6643), 'xarray.ufuncs.exp', 'xr.ufuncs.exp', (['(-tensor)'], {}), '(-tensor)\n', (6634, 6643), True, 'import xarray as xr\n'), ((3164, 3179), 'typing_extensions.get_args', 'get_args', (['inner'], {}), '(inner)\n', (3172, 3179), False, 'from typing_extensions import Literal, get_args, TypedDict\n'), ((7684, 7694), 'bioimageio.core.statistical_measures.Mean', 'Mean', (['axes'], {}), '(axes)\n', (7688, 7694), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n'), ((7774, 7783), 'bioimageio.core.statistical_measures.Std', 'Std', (['axes'], {}), '(axes)\n', (7777, 7783), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n'), ((7147, 7162), 'bioimageio.core.statistical_measures.Mean', 'Mean', ([], {'axes': 'axes'}), '(axes=axes)\n', (7151, 7162), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n'), ((7164, 7178), 'bioimageio.core.statistical_measures.Std', 'Std', ([], {'axes': 'axes'}), '(axes=axes)\n', (7167, 7178), False, 'from bioimageio.core.statistical_measures import Mean, Measure, Percentile, Std\n')] |
import os, subprocess, argparse, shutil, yaml
import SimpleITK as sitk
import numpy as np
from skimage.io import imread, imsave
from glob import glob
from skimage.transform import resize
from h5py import File
def parse_args():
#setup the argument parser
parser = argparse.ArgumentParser(description='Setup benchmark dataset directories for NN training and testing')
parser.add_argument('save_dir', type=str, metavar='save_dir', help='Directory in which to save the benchmark datasets')
args = vars(parser.parse_args())
return args
if __name__ == '__main__':
#parse the save directory
args = parse_args()
save_dir = args['save_dir']
script_dir = os.path.dirname(os.path.realpath(__file__))
#run the download_benchmarks.sh script
download_script = os.path.join(script_dir, 'download_benchmarks.sh')
create_slices_script = os.path.join(script_dir, 'create_slices.py')
create_patches_script = os.path.join(script_dir, 'create_patches.py')
command = f'bash {download_script} {save_dir}'
subprocess.call(command.split(' '))
#first, we need to fix up the lucchi_pp dataset:
#1. the image and mask filenames are not the same
#2. mask pixels are 255, should be 1 instead
#process the train and test datasets separately
for setname in ['train', 'test']:
mskpaths = glob(os.path.join(save_dir, f'lucchi_pp/{setname}/masks/*.png'))
for mp in mskpaths:
#load the image
mask = imread(mp)
#convert labelmap values from 255 to 1
if mask.ndim == 3:
mask = mask[..., 0]
#background padding values are non-zero
#dividing by 255 rounds them down to zero
mask = (mask / 255).astype(np.uint8)
#fix the name to match image names: mask9999.png
slice_num = mp.split('/')[-1].split('.png')[0]
#save under the new name
imsave(mp.replace(f'{slice_num}.png', f'mask{slice_num.zfill(4)}.png'), mask, check_contrast=False)
#remove the old file
os.remove(mp)
#next, fix up the kasthuri_pp dataset:
#1. mask pixels are 255, should be 1 instead
#2. masks have 3 channels, should only have 1
#process the train and test datasets separately
for setname in ['train', 'test']:
mskpaths = glob(os.path.join(save_dir, f'kasthuri_pp/{setname}/masks/*.png'))
for mp in mskpaths:
#load the image
mask = imread(mp)
#convert labelmap values from 255 to 1
#take the first channel only (all 3 are the same anyway)
if mask.ndim == 3:
mask = mask[..., 0]
#background padding values are non-zero
#dividing by 255 rounds them down to zero
mask = (mask / 255).astype(np.uint8)
#overwrite the mask file
imsave(mp, mask, check_contrast=False)
#next, fix up the perez datasets:
#1. the image and mask filenames are not the same, all have different prefixes
#so we'll just remove the prefix (*_orig.999.png --> 999.png)
#process all the images in one big group
perez_fpaths = glob(os.path.join(save_dir, f'perez/*/*/*/*.png')) #e.g. perez/mito/train/images/*.png
for fp in perez_fpaths:
orig_name = fp.split('/')[-1]
prefix = orig_name.split('.')[0]
new_name = orig_name.replace(f'{prefix}.', '') #remove the prefix and the trailing dot
os.rename(fp, fp.replace(orig_name, new_name))
#next, the guay dataset:
#1. image volumes are 16-bit signed pixels, convert them to 8-bit unsigned
#2. mask volumes are 16-bit unsigned pixels, convert them to 8-bit unsigned
#3. mask volumes have a different name that image volumes change '-labels' to '-images'
#4. slice cross sections to make 2d versions of the train, validation, and test sets
guay_impaths = glob(os.path.join(save_dir, f'guay/3d/*/images/*.tif')) #e.g. guay/3d/train/images/*.tif
for ip in guay_impaths:
#load the volume
vol = sitk.ReadImage(ip)
#convert to numpy
vol = sitk.GetArrayFromImage(vol)
#convert the volume to float
vol = vol.astype('float')
#subtract min, divide by max, multiply by 255
vol -= vol.min()
vol /= vol.max()
vol *= 255
#convert to uint8
vol = vol.astype(np.uint8)
#create a new volume
sitk.WriteImage(sitk.GetImageFromArray(vol), ip.replace('.tif', '.nrrd'))
#remove the original volume
os.remove(ip)
guay_mskpaths = glob(os.path.join(save_dir, f'guay/3d/*/masks/*.tif')) #e.g. guay/3d/test/masks/*.tif
for mp in guay_mskpaths:
#load the volume
vol = sitk.ReadImage(mp)
#convert to uint8
vol = sitk.Cast(vol, sitk.sitkUInt8)
#replace '-labels' with '-images' in the filename
sitk.WriteImage(vol, mp.replace('-labels.tif', '-images.nrrd'))
#remove the original volume
os.remove(mp)
#now we can slice the volumes into cross sections
#sort the impaths and mskpaths just to ensure that they line up
guay_impaths = np.sort(glob(os.path.join(save_dir, f'guay/3d/*/images/*.nrrd')))
guay_mskpaths = np.sort(glob(os.path.join(save_dir, f'guay/3d/*/masks/*.nrrd')))
for ip, mp in zip(guay_impaths, guay_mskpaths):
assert(ip.replace('/images/', '/blank/') == mp.replace('/masks/', '/blank/')), "Image and mask volumes are not aligned!"
#call the create_slices.py script to save results in the guay/2d folder
setname = ip.split('/')[-3] #.../guay/3d/train/images/train-images.tif --> train
slice_dir = os.path.join(save_dir, f'guay/2d/{setname}/')
command = f'python {create_slices_script} {ip} {mp} {slice_dir} -a 2 -s 1'
subprocess.call(command.split(' '))
#now onto UroCell:
#1. mito and lyso labelmaps, are separate, we need to combine them into a single mask volume
#2. slice cross sections to make 2d versions of the train and test set
#get paths of the mito and lyso labelmap volumes
lysopaths = np.sort(glob(os.path.join(save_dir, f'urocell/3d/*/lyso/*.nii.gz'))) #e.g. urocell/3d/train/lyso/*.nii.gz
mitopaths = np.sort(glob(os.path.join(save_dir, f'urocell/3d/*/mito/*.nii.gz'))) #e.g. urocell/3d/train/mito/*.nii.gz
for lp, mp in zip(lysopaths, mitopaths):
assert(lp.replace('/lyso/', '/mito/') == mp), "Lyso and mito label volumes are not aligned!"
#load the volumes
lyso = sitk.ReadImage(lp)
mito = sitk.ReadImage(mp)
#add them together into a single label volume
#such that 1 == lyso and 2 == mito
labelmap = lyso + 2 * mito
#make sure the datatype is uint8
labelmap = sitk.Cast(labelmap, sitk.sitkUInt8)
#for two of the volumes we need to crop out some
#regions with missing data
if lp.split('/')[-1] == 'fib1-0-0-0.nii.gz':
labelmap = labelmap[:, 12:]
elif lp.split('/')[-1] == 'fib1-1-0-3.nii.gz':
labelmap = labelmap[:, 54:]
#save the result
sitk.WriteImage(labelmap, lp.replace('/lyso/', '/masks/'))
#now we're ready to slice into cross sections
#get impaths and mskpaths
urocell_impaths = np.sort(glob(os.path.join(save_dir, f'urocell/3d/*/images/*.nii.gz'))) #e.g. urocell/3d/test/images/*.nii.gz
urocell_mskpaths = np.sort(glob(os.path.join(save_dir, f'urocell/3d/*/masks/*.nii.gz'))) #e.g. urocell/3d/train/masks/*.nii.gz
for ip, mp in zip(urocell_impaths, urocell_mskpaths):
assert(ip.replace('/images/', '/blank/') == mp.replace('/masks/', '/blank/')), "Image and mask volumes are not aligned!"
#convert image from float to uint8
image = sitk.Cast(sitk.ReadImage(ip), sitk.sitkUInt8)
#for two of the volumes we need to crop out some
#regions with missing data
if ip.split('/')[-1] == 'fib1-0-0-0.nii.gz':
image = image[:, 12:]
elif ip.split('/')[-1] == 'fib1-1-0-3.nii.gz':
image = image[:, 54:]
sitk.WriteImage(image, ip)
#call the create_slices.py script to save results in the urocell/2d folder
setname = ip.split('/')[-3] #.../urocell/3d/train/images/fib1-4-3-0.nii.gz --> train
slice_dir = os.path.join(save_dir, f'urocell/2d/{setname}/')
command = f'python {create_slices_script} {ip} {mp} {slice_dir} -a 0 1 2 -s 1'
subprocess.call(command.split(' '))
#next we're going to handle CREMI
#1. extract image volumes and labelmap volumes from .hdf files
#2. binarize the synaptic cleft labelmaps and convert to uint8
#3. Slice into cross sections
cremi_hdfpaths = glob(os.path.join(save_dir, f'cremi/3d/*/*.hdf')) #e.g. cremi/3d/test/*.hdf
for hdfp in cremi_hdfpaths:
#extract the setname (train or test)
setname = hdfp.split('/')[-2] #e.g. cremi/3d/test/*.hdf --> test
#load the hdf file
dataset = File(hdfp, mode='r')['volumes']
#get the image vol, which is already uint8
imvol = dataset['raw'].__array__()
#get the mask volume which needs to be binarized
#and inverted and saved as uint8
mskvol = dataset['labels']['clefts'].__array__()
mskvol = np.invert(mskvol == 0xffffffffffffffff).astype(np.uint8)
#save both image and mask as .nrrd files
imvol = sitk.GetImageFromArray(imvol)
new_path = hdfp.replace('.hdf', '.nrrd').replace(f'/{setname}/', f'/{setname}/images/')
sitk.WriteImage(imvol, new_path)
mskvol = sitk.GetImageFromArray(mskvol)
new_path = hdfp.replace('.hdf', '.nrrd').replace(f'/{setname}/', f'/{setname}/masks/')
sitk.WriteImage(mskvol, new_path)
#remove the hdf file
os.remove(hdfp)
#now we can slice the volumes into cross sections
#sort the impaths and mskpaths just to ensure that they line up
cremi_impaths = np.sort(glob(os.path.join(save_dir, f'cremi/3d/*/images/*.nrrd')))
cremi_mskpaths = np.sort(glob(os.path.join(save_dir, f'cremi/3d/*/masks/*.nrrd')))
for ip, mp in zip(cremi_impaths, cremi_mskpaths):
assert(ip.replace('/images/', '/blank/') == mp.replace('/masks/', '/blank/')), "Image and mask volumes are not aligned!"
#call the create_slices.py script to save results in the cremi/2d folder
setname = ip.split('/')[-3] #.../cremi/3d/train/images/sampleA.nrrd --> train
slice_dir = os.path.join(save_dir, f'cremi/2d/{setname}/')
command = f'python {create_slices_script} {ip} {mp} {slice_dir} -a 2 -s 1'
subprocess.call(command.split(' '))
#finally, let's make the All Mitochondria dataset
#from <NAME>, lucchi, kasthuri, urocell, and guay
#1. create directories
#2. crop images from each benchmark into 256x256 patches
#this is needed so that datasets like Kasthuri with 85 large images
#have a similar number of patches relative to a dataset like
#UroCell with 3200 small images
#3. A portion of patches from the Kasthuri dataset contain
#nothing by background padding; we want to remove them
#make the directories
if not os.path.isdir(os.path.join(save_dir, 'all_mito')):
os.mkdir(os.path.join(save_dir, 'all_mito'))
os.makedirs(os.path.join(save_dir, 'all_mito/train/images'))
os.mkdir(os.path.join(save_dir, 'all_mito/train/masks'))
os.makedirs(os.path.join(save_dir, 'all_mito/test/2d/'))
os.mkdir(os.path.join(save_dir, 'all_mito/test/3d/'))
#crop images in 256x256 patches from their sources directories
benchmarks = ['perez/mito', 'lucchi_pp', 'kasthuri_pp', 'urocell/2d', 'guay/2d']
benchmark_mito_labels = [1, 1, 1, 2, 2]
for l, bmk in zip(benchmark_mito_labels, benchmarks):
command = f'python {create_patches_script} {save_dir}/{bmk}/train/images/ {save_dir}/all_mito/train/images/'
subprocess.call(command.split(' '))
command = f'python {create_patches_script} {save_dir}/{bmk}/train/masks/ {save_dir}/all_mito/train/masks/ -l {l}'
subprocess.call(command.split(' '))
#remove any blank images and their corresponding masks
impaths = np.sort(glob(os.path.join(save_dir, f'all_mito/train/images/*')))
mskpaths = np.sort(glob(os.path.join(save_dir, f'all_mito/train/masks/*')))
for ip, mp in zip(impaths, mskpaths):
assert(ip.replace('/images/', '/blank/') == mp.replace('/masks/', '/blank/')), "Image and mask file paths are not aligned!"
#load the image file
image = imread(ip)
#if more than 95% of the image is black padding
#remove the image and it's corresponding mask
thr = (256 ** 2) * 0.05
if (image > 0).sum() < thr:
os.remove(ip)
os.remove(mp)
#copy the test images from 2d datasets into the 2d test directory
benchmarks = ['perez/mito', 'lucchi_pp', 'kasthuri_pp']
benchmark_mito_labels = [1, 1, 1]
for l, bmk in zip(benchmark_mito_labels, benchmarks):
#glob all the images in the given test directory
impaths = glob(os.path.join(save_dir, f'{bmk}/test/images/*'))
bmk = 'perez_mito' if bmk == 'perez/mito' else bmk
im_dst_dir = os.path.join(save_dir, f'all_mito/test/2d/{bmk}/images/')
msk_dst_dir = os.path.join(save_dir, f'all_mito/test/2d/{bmk}/masks/')
os.makedirs(im_dst_dir)
os.makedirs(msk_dst_dir)
for imp in impaths:
fname = imp.split('/')[-1]
shutil.copy(imp, im_dst_dir + fname)
#do the same for the corresponding mask
imp = imp.replace('/images/', '/masks/')
shutil.copy(imp, msk_dst_dir + fname)
#now copy only the mito label from the 3d benchmarks
benchmarks = ['urocell/3d', 'guay/3d']
benchmark_mito_labels = [2, 2]
for l, bmk in zip(benchmark_mito_labels, benchmarks):
#glob all the images in the given test directory
impaths = glob(os.path.join(save_dir, f'{bmk}/test/images/*'))
bn_name = bmk.split('/')[0]
im_dst_dir = os.path.join(save_dir, f'all_mito/test/3d/{bn_name}/images/')
msk_dst_dir = os.path.join(save_dir, f'all_mito/test/3d/{bn_name}/masks/')
os.makedirs(im_dst_dir)
os.makedirs(msk_dst_dir)
for imp in impaths:
#copy the image volume directly
fname = imp.split('/')[-1]
shutil.copy(imp, im_dst_dir + fname)
#open the mask volume with simpleitk
#and remove everything but the mito label
imp = imp.replace('/images/', '/masks/')
if bn_name == 'urocell':
vol = sitk.ReadImage(imp)
labelmap = vol == 2
mito_vol = sitk.Cast(labelmap, sitk.sitkUInt8)
else:
#keep labels 0, 1, 2 for guay (label 1 is the mask in which
#the ground truth is defined)
#load the volumes
vol = sitk.ReadImage(imp)
cell = vol == 1
other = vol > 2
mito = vol == 2
#make the mito label 1 and the cell label 2
labelmap = mito + 2 * (cell + other)
mito_vol = sitk.Cast(labelmap, sitk.sitkUInt8)
#save the result
sitk.WriteImage(mito_vol, msk_dst_dir + fname)
#overwrite the data and test directories in each benchmarks' yaml file
benchmarks = ['all_mito', 'cremi', 'guay', 'kasthuri_pp', 'lucchi_pp',
'perez_lyso', 'perez_mito', 'perez_nuclei', 'perez_nucleoli', 'urocell']
data_dirs = ['all_mito/', 'cremi/2d/', 'guay/2d/', 'kasthuri_pp/', 'lucchi_pp/',
'perez/lyso/', 'perez/mito/', 'perez/nuclei/', 'perez/nucleoli/', 'urocell/2d/']
test_dirs = ['all_mito/test/', 'cremi/3d/test/', 'guay/3d/test/', 'kasthuri_pp/test/', 'lucchi_pp/test/', 'perez/lyso/test/',
'perez/mito/test/', 'perez/nuclei/test/', 'perez/nucleoli/test/', 'urocell/3d/test/']
config_dir = os.path.join(script_dir, '../benchmark_configs/')
#overwrite the data_dir and test_dir lines to match
#the directories created in this setup script
for bmk, dd, td in zip(benchmarks, data_dirs, test_dirs):
with open(os.path.join(config_dir, f'{bmk}.yaml'), mode='r') as f:
lines = f.read().splitlines()
data_dir_ln = -1
test_dir_ln = -1
test_dir_name = 'test_dir'
for ix, l in enumerate(lines):
if l.startswith('data_dir:'):
data_dir_ln = ix
elif l.startswith('test_dir2d:'):
test_dir_ln = ix
test_dir_name = 'test_dir2d'
elif l.startswith('test_dir3d:'):
test_dir_ln = ix
test_dir_name = 'test_dir3d'
elif l.startswith('test_dir:'):
test_dir_ln = ix
test_dir_name = 'test_dir'
lines[data_dir_ln] = 'data_dir: ' + '"' + os.path.join(save_dir, dd) + '"'
lines[test_dir_ln] = f'{test_dir_name}: ' + '"' + os.path.join(save_dir, td) + '"'
lines = '\n'.join(lines)
with open(os.path.join(config_dir, f'{bmk}.yaml'), mode='w') as f:
f.write(lines) | [
"os.remove",
"h5py.File",
"argparse.ArgumentParser",
"os.makedirs",
"skimage.io.imsave",
"numpy.invert",
"SimpleITK.ReadImage",
"os.path.realpath",
"shutil.copy",
"SimpleITK.GetArrayFromImage",
"SimpleITK.WriteImage",
"SimpleITK.GetImageFromArray",
"os.path.join",
"SimpleITK.Cast",
"skim... | [((272, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Setup benchmark dataset directories for NN training and testing"""'}), "(description=\n 'Setup benchmark dataset directories for NN training and testing')\n", (295, 379), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((802, 852), 'os.path.join', 'os.path.join', (['script_dir', '"""download_benchmarks.sh"""'], {}), "(script_dir, 'download_benchmarks.sh')\n", (814, 852), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((880, 924), 'os.path.join', 'os.path.join', (['script_dir', '"""create_slices.py"""'], {}), "(script_dir, 'create_slices.py')\n", (892, 924), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((953, 998), 'os.path.join', 'os.path.join', (['script_dir', '"""create_patches.py"""'], {}), "(script_dir, 'create_patches.py')\n", (965, 998), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((16630, 16679), 'os.path.join', 'os.path.join', (['script_dir', '"""../benchmark_configs/"""'], {}), "(script_dir, '../benchmark_configs/')\n", (16642, 16679), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((704, 730), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (720, 730), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((3350, 3394), 'os.path.join', 'os.path.join', (['save_dir', 'f"""perez/*/*/*/*.png"""'], {}), "(save_dir, f'perez/*/*/*/*.png')\n", (3362, 3394), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((4087, 4136), 'os.path.join', 'os.path.join', (['save_dir', 'f"""guay/3d/*/images/*.tif"""'], {}), "(save_dir, f'guay/3d/*/images/*.tif')\n", (4099, 4136), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((4238, 4256), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['ip'], {}), '(ip)\n', (4252, 4256), True, 'import SimpleITK as sitk\n'), ((4306, 4333), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['vol'], {}), '(vol)\n', (4328, 4333), True, 'import SimpleITK as sitk\n'), ((4789, 4802), 'os.remove', 'os.remove', (['ip'], {}), '(ip)\n', (4798, 4802), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((4837, 4885), 'os.path.join', 'os.path.join', (['save_dir', 'f"""guay/3d/*/masks/*.tif"""'], {}), "(save_dir, f'guay/3d/*/masks/*.tif')\n", (4849, 4885), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((4986, 5004), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mp'], {}), '(mp)\n', (5000, 5004), True, 'import SimpleITK as sitk\n'), ((5054, 5084), 'SimpleITK.Cast', 'sitk.Cast', (['vol', 'sitk.sitkUInt8'], {}), '(vol, sitk.sitkUInt8)\n', (5063, 5084), True, 'import SimpleITK as sitk\n'), ((5277, 5290), 'os.remove', 'os.remove', (['mp'], {}), '(mp)\n', (5286, 5290), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((5962, 6007), 'os.path.join', 'os.path.join', (['save_dir', 'f"""guay/2d/{setname}/"""'], {}), "(save_dir, f'guay/2d/{setname}/')\n", (5974, 6007), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((6829, 6847), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['lp'], {}), '(lp)\n', (6843, 6847), True, 'import SimpleITK as sitk\n'), ((6863, 6881), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mp'], {}), '(mp)\n', (6877, 6881), True, 'import SimpleITK as sitk\n'), ((7092, 7127), 'SimpleITK.Cast', 'sitk.Cast', (['labelmap', 'sitk.sitkUInt8'], {}), '(labelmap, sitk.sitkUInt8)\n', (7101, 7127), True, 'import SimpleITK as sitk\n'), ((8464, 8490), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['image', 'ip'], {}), '(image, ip)\n', (8479, 8490), True, 'import SimpleITK as sitk\n'), ((8696, 8744), 'os.path.join', 'os.path.join', (['save_dir', 'f"""urocell/2d/{setname}/"""'], {}), "(save_dir, f'urocell/2d/{setname}/')\n", (8708, 8744), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((9117, 9160), 'os.path.join', 'os.path.join', (['save_dir', 'f"""cremi/3d/*/*.hdf"""'], {}), "(save_dir, f'cremi/3d/*/*.hdf')\n", (9129, 9160), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((9839, 9868), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['imvol'], {}), '(imvol)\n', (9861, 9868), True, 'import SimpleITK as sitk\n'), ((9973, 10005), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['imvol', 'new_path'], {}), '(imvol, new_path)\n', (9988, 10005), True, 'import SimpleITK as sitk\n'), ((10032, 10062), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['mskvol'], {}), '(mskvol)\n', (10054, 10062), True, 'import SimpleITK as sitk\n'), ((10166, 10199), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['mskvol', 'new_path'], {}), '(mskvol, new_path)\n', (10181, 10199), True, 'import SimpleITK as sitk\n'), ((10246, 10261), 'os.remove', 'os.remove', (['hdfp'], {}), '(hdfp)\n', (10255, 10261), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((10929, 10975), 'os.path.join', 'os.path.join', (['save_dir', 'f"""cremi/2d/{setname}/"""'], {}), "(save_dir, f'cremi/2d/{setname}/')\n", (10941, 10975), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((13041, 13051), 'skimage.io.imread', 'imread', (['ip'], {}), '(ip)\n', (13047, 13051), False, 'from skimage.io import imread, imsave\n'), ((13734, 13791), 'os.path.join', 'os.path.join', (['save_dir', 'f"""all_mito/test/2d/{bmk}/images/"""'], {}), "(save_dir, f'all_mito/test/2d/{bmk}/images/')\n", (13746, 13791), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((13814, 13870), 'os.path.join', 'os.path.join', (['save_dir', 'f"""all_mito/test/2d/{bmk}/masks/"""'], {}), "(save_dir, f'all_mito/test/2d/{bmk}/masks/')\n", (13826, 13870), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((13879, 13902), 'os.makedirs', 'os.makedirs', (['im_dst_dir'], {}), '(im_dst_dir)\n', (13890, 13902), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((13911, 13935), 'os.makedirs', 'os.makedirs', (['msk_dst_dir'], {}), '(msk_dst_dir)\n', (13922, 13935), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14622, 14683), 'os.path.join', 'os.path.join', (['save_dir', 'f"""all_mito/test/3d/{bn_name}/images/"""'], {}), "(save_dir, f'all_mito/test/3d/{bn_name}/images/')\n", (14634, 14683), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14706, 14766), 'os.path.join', 'os.path.join', (['save_dir', 'f"""all_mito/test/3d/{bn_name}/masks/"""'], {}), "(save_dir, f'all_mito/test/3d/{bn_name}/masks/')\n", (14718, 14766), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14775, 14798), 'os.makedirs', 'os.makedirs', (['im_dst_dir'], {}), '(im_dst_dir)\n', (14786, 14798), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14807, 14831), 'os.makedirs', 'os.makedirs', (['msk_dst_dir'], {}), '(msk_dst_dir)\n', (14818, 14831), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((1366, 1424), 'os.path.join', 'os.path.join', (['save_dir', 'f"""lucchi_pp/{setname}/masks/*.png"""'], {}), "(save_dir, f'lucchi_pp/{setname}/masks/*.png')\n", (1378, 1424), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((1501, 1511), 'skimage.io.imread', 'imread', (['mp'], {}), '(mp)\n', (1507, 1511), False, 'from skimage.io import imread, imsave\n'), ((2164, 2177), 'os.remove', 'os.remove', (['mp'], {}), '(mp)\n', (2173, 2177), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((2439, 2499), 'os.path.join', 'os.path.join', (['save_dir', 'f"""kasthuri_pp/{setname}/masks/*.png"""'], {}), "(save_dir, f'kasthuri_pp/{setname}/masks/*.png')\n", (2451, 2499), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((2576, 2586), 'skimage.io.imread', 'imread', (['mp'], {}), '(mp)\n', (2582, 2586), False, 'from skimage.io import imread, imsave\n'), ((3037, 3075), 'skimage.io.imsave', 'imsave', (['mp', 'mask'], {'check_contrast': '(False)'}), '(mp, mask, check_contrast=False)\n', (3043, 3075), False, 'from skimage.io import imread, imsave\n'), ((4678, 4705), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['vol'], {}), '(vol)\n', (4700, 4705), True, 'import SimpleITK as sitk\n'), ((5454, 5504), 'os.path.join', 'os.path.join', (['save_dir', 'f"""guay/3d/*/images/*.nrrd"""'], {}), "(save_dir, f'guay/3d/*/images/*.nrrd')\n", (5466, 5504), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((5540, 5589), 'os.path.join', 'os.path.join', (['save_dir', 'f"""guay/3d/*/masks/*.nrrd"""'], {}), "(save_dir, f'guay/3d/*/masks/*.nrrd')\n", (5552, 5589), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((6418, 6471), 'os.path.join', 'os.path.join', (['save_dir', 'f"""urocell/3d/*/lyso/*.nii.gz"""'], {}), "(save_dir, f'urocell/3d/*/lyso/*.nii.gz')\n", (6430, 6471), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((6540, 6593), 'os.path.join', 'os.path.join', (['save_dir', 'f"""urocell/3d/*/mito/*.nii.gz"""'], {}), "(save_dir, f'urocell/3d/*/mito/*.nii.gz')\n", (6552, 6593), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((7642, 7697), 'os.path.join', 'os.path.join', (['save_dir', 'f"""urocell/3d/*/images/*.nii.gz"""'], {}), "(save_dir, f'urocell/3d/*/images/*.nii.gz')\n", (7654, 7697), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((7774, 7828), 'os.path.join', 'os.path.join', (['save_dir', 'f"""urocell/3d/*/masks/*.nii.gz"""'], {}), "(save_dir, f'urocell/3d/*/masks/*.nii.gz')\n", (7786, 7828), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((8134, 8152), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['ip'], {}), '(ip)\n', (8148, 8152), True, 'import SimpleITK as sitk\n'), ((9392, 9412), 'h5py.File', 'File', (['hdfp'], {'mode': '"""r"""'}), "(hdfp, mode='r')\n", (9396, 9412), False, 'from h5py import File\n'), ((10418, 10469), 'os.path.join', 'os.path.join', (['save_dir', 'f"""cremi/3d/*/images/*.nrrd"""'], {}), "(save_dir, f'cremi/3d/*/images/*.nrrd')\n", (10430, 10469), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((10506, 10556), 'os.path.join', 'os.path.join', (['save_dir', 'f"""cremi/3d/*/masks/*.nrrd"""'], {}), "(save_dir, f'cremi/3d/*/masks/*.nrrd')\n", (10518, 10556), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((11650, 11684), 'os.path.join', 'os.path.join', (['save_dir', '"""all_mito"""'], {}), "(save_dir, 'all_mito')\n", (11662, 11684), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((11704, 11738), 'os.path.join', 'os.path.join', (['save_dir', '"""all_mito"""'], {}), "(save_dir, 'all_mito')\n", (11716, 11738), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((11760, 11807), 'os.path.join', 'os.path.join', (['save_dir', '"""all_mito/train/images"""'], {}), "(save_dir, 'all_mito/train/images')\n", (11772, 11807), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((11826, 11872), 'os.path.join', 'os.path.join', (['save_dir', '"""all_mito/train/masks"""'], {}), "(save_dir, 'all_mito/train/masks')\n", (11838, 11872), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((11894, 11937), 'os.path.join', 'os.path.join', (['save_dir', '"""all_mito/test/2d/"""'], {}), "(save_dir, 'all_mito/test/2d/')\n", (11906, 11937), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((11956, 11999), 'os.path.join', 'os.path.join', (['save_dir', '"""all_mito/test/3d/"""'], {}), "(save_dir, 'all_mito/test/3d/')\n", (11968, 11999), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((12679, 12729), 'os.path.join', 'os.path.join', (['save_dir', 'f"""all_mito/train/images/*"""'], {}), "(save_dir, f'all_mito/train/images/*')\n", (12691, 12729), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((12760, 12809), 'os.path.join', 'os.path.join', (['save_dir', 'f"""all_mito/train/masks/*"""'], {}), "(save_dir, f'all_mito/train/masks/*')\n", (12772, 12809), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((13251, 13264), 'os.remove', 'os.remove', (['ip'], {}), '(ip)\n', (13260, 13264), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((13277, 13290), 'os.remove', 'os.remove', (['mp'], {}), '(mp)\n', (13286, 13290), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((13606, 13652), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{bmk}/test/images/*"""'], {}), "(save_dir, f'{bmk}/test/images/*')\n", (13618, 13652), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14024, 14060), 'shutil.copy', 'shutil.copy', (['imp', '(im_dst_dir + fname)'], {}), '(imp, im_dst_dir + fname)\n', (14035, 14060), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14191, 14228), 'shutil.copy', 'shutil.copy', (['imp', '(msk_dst_dir + fname)'], {}), '(imp, msk_dst_dir + fname)\n', (14202, 14228), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14508, 14554), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{bmk}/test/images/*"""'], {}), "(save_dir, f'{bmk}/test/images/*')\n", (14520, 14554), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((14964, 15000), 'shutil.copy', 'shutil.copy', (['imp', '(im_dst_dir + fname)'], {}), '(imp, im_dst_dir + fname)\n', (14975, 15000), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((15891, 15937), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['mito_vol', '(msk_dst_dir + fname)'], {}), '(mito_vol, msk_dst_dir + fname)\n', (15906, 15937), True, 'import SimpleITK as sitk\n'), ((9708, 9749), 'numpy.invert', 'np.invert', (['(mskvol == 18446744073709551615)'], {}), '(mskvol == 18446744073709551615)\n', (9717, 9749), True, 'import numpy as np\n'), ((15229, 15248), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['imp'], {}), '(imp)\n', (15243, 15248), True, 'import SimpleITK as sitk\n'), ((15312, 15347), 'SimpleITK.Cast', 'sitk.Cast', (['labelmap', 'sitk.sitkUInt8'], {}), '(labelmap, sitk.sitkUInt8)\n', (15321, 15347), True, 'import SimpleITK as sitk\n'), ((15545, 15564), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['imp'], {}), '(imp)\n', (15559, 15564), True, 'import SimpleITK as sitk\n'), ((15801, 15836), 'SimpleITK.Cast', 'sitk.Cast', (['labelmap', 'sitk.sitkUInt8'], {}), '(labelmap, sitk.sitkUInt8)\n', (15810, 15836), True, 'import SimpleITK as sitk\n'), ((16871, 16910), 'os.path.join', 'os.path.join', (['config_dir', 'f"""{bmk}.yaml"""'], {}), "(config_dir, f'{bmk}.yaml')\n", (16883, 16910), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((17836, 17875), 'os.path.join', 'os.path.join', (['config_dir', 'f"""{bmk}.yaml"""'], {}), "(config_dir, f'{bmk}.yaml')\n", (17848, 17875), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((17652, 17678), 'os.path.join', 'os.path.join', (['save_dir', 'dd'], {}), '(save_dir, dd)\n', (17664, 17678), False, 'import os, subprocess, argparse, shutil, yaml\n'), ((17747, 17773), 'os.path.join', 'os.path.join', (['save_dir', 'td'], {}), '(save_dir, td)\n', (17759, 17773), False, 'import os, subprocess, argparse, shutil, yaml\n')] |
import numpy as np
# ALPHABET_ID = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "H": 8, "I": 9, "J": 10, "K": 11, "L": 12,
# "M": 13, "N": 14, "O": 15, "P": 16, "Q": 17, "R": 18, "S": 19, "T": 20, "U": 21, "V": 22,
# "W": 23, "X": 24, "Y": 25, "Z": 0}
#
# CIPHER_ID = "ZABCDEFGHIJKLMNOPQRSTUVWXY"
def str_to_vec(text, set_size, let_map_num):
ret = np.array([], dtype='int64')
for i in range(0, len(text), set_size):
p = np.zeros((set_size,), dtype='int64')
for j in range(i, i + set_size):
if j < len(text):
p[j % set_size] = (let_map_num.get(text[j]))
ret = np.append(ret, p, axis=0)
dummy_count = set_size - (len(text) % set_size)
if dummy_count < set_size:
dummy_id = let_map_num.get(text[len(text) -1])
for i in range(dummy_count):
ret[ret.shape[0] -1 - i] = dummy_id
return ret.reshape(-1, set_size).transpose()
def vec_to_str(matrix, num_map_let):
text = ""
for col in range(matrix.shape[1]):
for row in range(matrix.shape[0]):
text += num_map_let[int(matrix[row][col])]
return text
def vec_to_vec(from_matrix, key, alpha_size):
to_matrix = np.dot(key, from_matrix)
for row in range(to_matrix.shape[0]):
for col in range(to_matrix.shape[1]):
if to_matrix[row][col] >= alpha_size:
to_matrix[row][col] %= alpha_size
return to_matrix
| [
"numpy.zeros",
"numpy.dot",
"numpy.append",
"numpy.array"
] | [((398, 425), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (406, 425), True, 'import numpy as np\n'), ((1240, 1264), 'numpy.dot', 'np.dot', (['key', 'from_matrix'], {}), '(key, from_matrix)\n', (1246, 1264), True, 'import numpy as np\n'), ((482, 518), 'numpy.zeros', 'np.zeros', (['(set_size,)'], {'dtype': '"""int64"""'}), "((set_size,), dtype='int64')\n", (490, 518), True, 'import numpy as np\n'), ((665, 690), 'numpy.append', 'np.append', (['ret', 'p'], {'axis': '(0)'}), '(ret, p, axis=0)\n', (674, 690), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import norm
from ..coordinates import _convertSphericalToCartesian
__all__ = ["calcNae",
"calcDelta",
"calcXae",
"calcXa",
"calcNhat",
"calcR1",
"calcR2"]
x_axis = np.array([1, 0, 0])
y_axis = np.array([0, 1, 0])
z_axis = np.array([0, 0, 1])
def calcNae(coords_ec_ang):
"""
Convert angular ecliptic coordinates to
to a cartesian unit vector.
Input coordinate array should have shape (N, 2):
np.array([[lon1, lat1],
[lon2, lat2],
[lon3, lat3],
.....
[lonN, latN]])
Parameters
----------
coords_ec_ang : `~numpy.ndarray` (N, 2)
Ecliptic longitude and latitude in degrees.
Returns
-------
N_ae : `~numpy.ndarray` (N, 3)
Cartesian unit vector in direction of provided
angular coordinates.
"""
rho = np.ones(len(coords_ec_ang))
lon = np.radians(coords_ec_ang[:, 0])
lat = np.radians(coords_ec_ang[:, 1])
velocities = np.zeros(len(rho))
x, y, z, vx, vy, vz = _convertSphericalToCartesian(rho, lon, lat, velocities, velocities, velocities)
return np.array([x, y, z]).T
def calcDelta(r, x_e, n_ae):
"""
Calculate topocentric distance to the asteroid.
Parameters
----------
r : float
Heliocentric/barycentric distance in arbitrary units.
x_e : `~numpy.ndarray` (3)
Topocentric position vector in same units as r.
n_ae : `~numpy.ndarray` (3)
Unit vector in direction of asteroid from the topocentric position
in same units as r.
Returns
-------
delta : float
Distance from topocenter to asteroid in units of r.
"""
return - np.dot(n_ae, x_e) + np.sqrt(np.dot(n_ae, x_e)**2 + r**2 - norm(x_e)**2)
def calcXae(delta, n_ae):
"""
Calculate the topocenter to asteroid position vector.
Parameters
----------
delta : float
Distance from the topocenter to asteroid in arbitrary units.
n_ae : `~numpy.ndarray` (3)
Unit vector in direction of asteroid from the topocentric position
in same units as delta.
Returns
-------
x_ae : `~numpy.ndarray` (3)
Topocenter to asteroid position vector in units of delta.
"""
return np.dot(delta, n_ae)
def calcXa(x_ae, x_e):
"""
Calculate the asteroid position vector.
Parameters
----------
x_ae : `~numpy.ndarray` (3)
Topocenter to asteroid position vector in arbitrary units.
x_e : `~numpy.ndarray` (3)
Topocentric position vector in same units as x_ae.
Returns
-------
x_a : `~numpy.ndarray` (3)
Asteroid position vector in units of x_ae.
"""
return x_ae + x_e
def calcNhat(x_a):
"""
Calulate the unit vector normal to the plane of the orbit.
Parameters
----------
x_a : `~numpy.ndarray` (3)
Asteroid position vector in arbitrary units.
Returns
-------
n_hat : `~numpy.ndarray` (3)
Unit vector normal to plane of orbit.
"""
# Make n_a unit vector
n_a = x_a / norm(x_a)
# Find the normal to the plane of the orbit n
n = np.cross(n_a, np.cross(z_axis, n_a))
# Make n a unit vector
n_hat = n / norm(n)
return n_hat
def calcR1(x_a, n_hat):
"""
Calculate the rotation matrix that would rotate the
position vector x_ae to the x-y plane.
Parameters
----------
x_a : `~numpy.ndarray` (3)
Asteroid position vector in arbitrary units.
n_hat : `~numpy.ndarray` (3)
Unit vector normal to plane of orbit.
Returns
-------
R1 : `~numpy.matrix` (3, 3)
Rotation matrix.
"""
# Find the rotation axis v
v = np.cross(n_hat, z_axis)
# Calculate the cosine of the rotation angle, equivalent to the cosine of the
# inclination
c = np.dot(n_hat, z_axis)
# Compute the skew-symmetric cross-product of the rotation axis vector v
vp = np.matrix([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
# Calculate R1 and return
return np.identity(3) + vp + vp**2 * (1 / (1 + c))
def calcR2(x_a_xy):
"""
Calculate the rotation matrix that would rotate a vector in
the x-y plane to the x-axis.
Parameters
----------
x_a_xy : `~numpy.ndarray` (3)
Barycentric asteroid position vector rotated to the x-y plane.
Returns
-------
R2 : `~numpy.matrix` (3, 3)
Rotation matrix.
"""
x_a_xy = x_a_xy / norm(x_a_xy)
# Assuming the vector x_a_xy has been normalized, and is in the xy plane.
ca = x_a_xy[0]
sa = x_a_xy[1]
return np.array([[ca, sa, 0],
[-sa, ca, 0],
[0, 0, 1]])
| [
"numpy.radians",
"numpy.matrix",
"numpy.cross",
"numpy.identity",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot"
] | [((269, 288), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (277, 288), True, 'import numpy as np\n'), ((298, 317), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (306, 317), True, 'import numpy as np\n'), ((327, 346), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (335, 346), True, 'import numpy as np\n'), ((984, 1015), 'numpy.radians', 'np.radians', (['coords_ec_ang[:, 0]'], {}), '(coords_ec_ang[:, 0])\n', (994, 1015), True, 'import numpy as np\n'), ((1026, 1057), 'numpy.radians', 'np.radians', (['coords_ec_ang[:, 1]'], {}), '(coords_ec_ang[:, 1])\n', (1036, 1057), True, 'import numpy as np\n'), ((2342, 2361), 'numpy.dot', 'np.dot', (['delta', 'n_ae'], {}), '(delta, n_ae)\n', (2348, 2361), True, 'import numpy as np\n'), ((3804, 3827), 'numpy.cross', 'np.cross', (['n_hat', 'z_axis'], {}), '(n_hat, z_axis)\n', (3812, 3827), True, 'import numpy as np\n'), ((3936, 3957), 'numpy.dot', 'np.dot', (['n_hat', 'z_axis'], {}), '(n_hat, z_axis)\n', (3942, 3957), True, 'import numpy as np\n'), ((4044, 4109), 'numpy.matrix', 'np.matrix', (['[[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]'], {}), '([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n', (4053, 4109), True, 'import numpy as np\n'), ((4750, 4798), 'numpy.array', 'np.array', (['[[ca, sa, 0], [-sa, ca, 0], [0, 0, 1]]'], {}), '([[ca, sa, 0], [-sa, ca, 0], [0, 0, 1]])\n', (4758, 4798), True, 'import numpy as np\n'), ((1211, 1230), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1219, 1230), True, 'import numpy as np\n'), ((3174, 3183), 'numpy.linalg.norm', 'norm', (['x_a'], {}), '(x_a)\n', (3178, 3183), False, 'from numpy.linalg import norm\n'), ((3256, 3277), 'numpy.cross', 'np.cross', (['z_axis', 'n_a'], {}), '(z_axis, n_a)\n', (3264, 3277), True, 'import numpy as np\n'), ((3322, 3329), 'numpy.linalg.norm', 'norm', (['n'], {}), '(n)\n', (3326, 3329), False, 'from numpy.linalg import norm\n'), ((4610, 4622), 'numpy.linalg.norm', 'norm', (['x_a_xy'], {}), '(x_a_xy)\n', (4614, 4622), False, 'from numpy.linalg import norm\n'), ((1777, 1794), 'numpy.dot', 'np.dot', (['n_ae', 'x_e'], {}), '(n_ae, x_e)\n', (1783, 1794), True, 'import numpy as np\n'), ((4191, 4205), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (4202, 4205), True, 'import numpy as np\n'), ((1835, 1844), 'numpy.linalg.norm', 'norm', (['x_e'], {}), '(x_e)\n', (1839, 1844), False, 'from numpy.linalg import norm\n'), ((1805, 1822), 'numpy.dot', 'np.dot', (['n_ae', 'x_e'], {}), '(n_ae, x_e)\n', (1811, 1822), True, 'import numpy as np\n')] |
import numpy as np
import networkx as nx
import w20data
import itin
servername = 'memex.locale'
serverport = 18000
G = nx.Graph()
wstore = w20data.Wstorage(ediff=itin.ediff, fpdiff=itin.dist, ntyp=itin.ntyp)
saddlehistory = []
localhistory = []
types = np.array([0])
wdis = []
wmodes = []
wsads = []
wlocs = []
wvs = []
pmodes = []
gmodes = []
bestdist = 1.0
bestmdist = 100.
gbests = []
leader = None
pbests = []
gbest = None
fitpbest = []
x0s = []
ifpso = []
xslist = []
yslist = []
xllist = []
yllist = []
gbestx = None
gbesty = None
pbestx = []
pbesty = []
pdistx = []
pdisty = []
ifpsox = []
ifpsoy = []
xlocs = []
ylocs = []
vx = []
vy = []
gdir = ''
ddir = ''
xmdb = [] # minima database from x side
xsdb = [] # saddle points database from x side
ymdb = [] # minima database from y side
ysdb = [] # saddle points database from y side
nidp = 0
reace = 0.0
evodata = []
product = None
reactant = None
xdirs = []
ydirs = []
evox = []
evoy = []
stepx = []
stepy = []
bestbe = 10000.
bestpath = []
xen = []
yen = []
| [
"numpy.array",
"w20data.Wstorage",
"networkx.Graph"
] | [((121, 131), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (129, 131), True, 'import networkx as nx\n'), ((141, 209), 'w20data.Wstorage', 'w20data.Wstorage', ([], {'ediff': 'itin.ediff', 'fpdiff': 'itin.dist', 'ntyp': 'itin.ntyp'}), '(ediff=itin.ediff, fpdiff=itin.dist, ntyp=itin.ntyp)\n', (157, 209), False, 'import w20data\n'), ((257, 270), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (265, 270), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
#decelMax = 0.5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=1)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size=1)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
"""
Initialize variables before callback is called
"""
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.traffic = -1
self.decelMax = 0.5
"""
Arrenge publishing frequency less than 50 since Autoware
works on 50
"""
rate = rospy.Rate(30)
while not rospy.is_shutdown():
if self.pose and self.waypoint_tree:
final_lane = self.closestWp2Lane()
self.final_waypoints_pub.publish(final_lane)
rate.sleep()
def closestWp2Lane(self):
"""
Obtain x,y pose points from custom message type Lane
Use query to return position and index
query:
1st arg ... an array of points to query.
2nd arg ... the number of nearest neighbors to return.
returns:
d: the distances to the nearest neighbors.
i: the locations of the neighbors in self.data.
"""
x = self.pose.pose.position.x
y = self.pose.pose.position.y
indexClosest = self.waypoint_tree.query([x,y], 1)[1]
"""
Obtain the index of closest waypoint and the previos point
Ensure the point is in front of the current point by checing with
dot product calculation. If dot product > 0 it is behind and
take the closest which is next one.
"""
ptClosest = self.waypoints_2d[indexClosest]
ptPrev = self.waypoints_2d[indexClosest - 1]
if np.dot(np.array(ptClosest) - np.array(ptPrev), np.array([x,y]) - np.array(ptClosest)) > 0:
indexClosest = (indexClosest + 1) % len(self.waypoints_2d)
"""
Create lane object
Check if traffic light waypoint is further than created waypoints,
no need for slowing down. Use created waypoints and dont care about
traffic lights. Otherwise car needs to slow down.
"""
lane = Lane()
lane.header = self.base_waypoints.header
createdWaypoints = self.base_waypoints.waypoints[indexClosest:(indexClosest + LOOKAHEAD_WPS)]
if self.traffic == -1 or (self.traffic >= (indexClosest + LOOKAHEAD_WPS)):
lane.waypoints = createdWaypoints
else:
lane.waypoints = self.decelWaypoints(createdWaypoints, indexClosest)
return lane
def decelWaypoints(self, waypoints, indexClosest):
"""
Create a new list of waypoints
To arrenge safe stoppic distance take the 3 previous index to stop
on time behind the line.
Calculate the stoppind distance by using "distance" function
Obtain the velocity. Decreasing level is related with the distance.
If distance is smaller, velocity should be smaller too to be prepared
to stop on time. Multiply but constant to have a linear deceleration
If velocity is small enough make the car stop.
Append it to waypoint object linear twist
"""
temp = []
indexStop = max(self.traffic - indexClosest - 3, 0)
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
distStop = self.distance(waypoints, i, indexStop)
vel = math.sqrt(2 * self.decelMax * distStop)
if vel < 1.:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
"""
Log info to better check on cmd window
"""
d = self.distance(waypoints, 0, indexStop)
rospy.loginfo('target vel={}, distance to the stopline={}'.format(temp[0].twist.twist.linear.x, d))
return temp
def pose_cb(self, msg):
"""
# Callback function of subscriber to Car's pose
"""
self.pose = msg
def waypoints_cb(self, waypoints):
"""
# Callback function of subscriber to waypoints
Store waypoints in in object
Latched subscriber -> callback is called once
In order to find closest waypoint to the car use KDTree from scipy
Search through waypoints
Obtain waypoints x and y and use on KDTree
"""
self.base_waypoints = waypoints
ref_v = waypoints.waypoints[0].twist.twist.linear.x
self.decelMax = ref_v * ref_v / 20. # Begin applying the brake about 20m before
rospy.logwarn('MAX_DECEL={}'.format(self.decelMax))
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
"""
# Callback function of traffic light waypoint
"""
self.traffic = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
"""
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
"""
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.') | [
"rospy.logerr",
"rospy.Subscriber",
"math.sqrt",
"styx_msgs.msg.Lane",
"rospy.Publisher",
"rospy.Rate",
"rospy.is_shutdown",
"numpy.array",
"rospy.init_node",
"scipy.spatial.KDTree",
"styx_msgs.msg.Waypoint"
] | [((1010, 1045), 'rospy.init_node', 'rospy.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (1025, 1045), False, 'import rospy\n'), ((1063, 1137), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {'queue_size': '(1)'}), "('/current_pose', PoseStamped, self.pose_cb, queue_size=1)\n", (1079, 1137), False, 'import rospy\n'), ((1146, 1220), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {'queue_size': '(1)'}), "('/base_waypoints', Lane, self.waypoints_cb, queue_size=1)\n", (1162, 1220), False, 'import rospy\n'), ((1314, 1375), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1330, 1375), False, 'import rospy\n'), ((1486, 1540), 'rospy.Publisher', 'rospy.Publisher', (['"""final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('final_waypoints', Lane, queue_size=1)\n", (1501, 1540), False, 'import rospy\n'), ((2011, 2025), 'rospy.Rate', 'rospy.Rate', (['(30)'], {}), '(30)\n', (2021, 2025), False, 'import rospy\n'), ((3702, 3708), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (3706, 3708), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((2044, 2063), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2061, 2063), False, 'import rospy\n'), ((4895, 4905), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (4903, 4905), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((5016, 5055), 'math.sqrt', 'math.sqrt', (['(2 * self.decelMax * distStop)'], {}), '(2 * self.decelMax * distStop)\n', (5025, 5055), False, 'import math\n'), ((6421, 6446), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2d'], {}), '(self.waypoints_2d)\n', (6427, 6446), False, 'from scipy.spatial import KDTree\n'), ((7122, 7187), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (7131, 7187), False, 'import math\n'), ((7455, 7509), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (7467, 7509), False, 'import rospy\n'), ((3254, 3273), 'numpy.array', 'np.array', (['ptClosest'], {}), '(ptClosest)\n', (3262, 3273), True, 'import numpy as np\n'), ((3276, 3292), 'numpy.array', 'np.array', (['ptPrev'], {}), '(ptPrev)\n', (3284, 3292), True, 'import numpy as np\n'), ((3294, 3310), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (3302, 3310), True, 'import numpy as np\n'), ((3312, 3331), 'numpy.array', 'np.array', (['ptClosest'], {}), '(ptClosest)\n', (3320, 3331), True, 'import numpy as np\n')] |
import numpy as np
import math
import pandas as pd
from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series
# Functions acting on cov, corrcoef matrices and other square matrices
# If square pd.DataFrame are supplied instead, index and columns are preserved
def cov_to_corrcoef(a):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, cov_to_corrcoef)
else:
variances = np.diagonal(a)
denominator = np.sqrt(variances[np.newaxis, :] * variances[:, np.newaxis])
return a / denominator
def normalize(x):
if isinstance(x,dict):
return normalize_dict_values(x)
else:
try:
return x/sum(x)
except TypeError:
return [xi/sum(x) for xi in x]
def normalize_dict_values(d):
return dict( zip(d.keys(), normalize(list(d.values())) ))
def multiply_diag(a, phi, copy=True):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(df=a, cov_func=multiply_diag, phi=phi, copy=copy)
else:
if copy:
b = np.copy(a)
return multiply_diag(b, phi=phi, copy=False)
else:
n = np.shape(a)[0]
for i in range(n):
a[i, i] = a[i, i] * phi
return a
def multiply_off_diag(a, phi, copy=True):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(df=a, cov_func=multiply_off_diag, phi=phi, copy=copy)
else:
if copy:
b = np.copy(a)
return multiply_off_diag(b, phi=phi, copy=False)
else:
n = np.shape(a)[0]
d = np.diag(a)
a = phi*a
for i in range(n):
a[i, i] = d[i] # restore diag
return a
def grand_mean(a):
# np.trace(a)/n might be faster, haven't checked
if isinstance(a,pd.DataFrame):
return grand_mean(a.values)
else:
return np.mean(a.diagonal())
def grand_shrink(a, lmbd, copy=True):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, grand_shrink, a=a, lmbd=lmbd)
else:
if copy:
B = np.copy(a)
return grand_shrink(B, lmbd=lmbd, copy=False)
else:
n = np.shape(a)[0]
mu = grand_mean(a)
return (1-lmbd) * a + lmbd * mu * np.eye(n)
def affine_inversion(a, phi=1.01, lmbd=0.01, copy=True):
""" Combination of "ridge" (multiply diagonal) and "shrinkage" (towards mu*I)
:param a:
:param phi:
:param lmbd:
:return:
"""
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, affine_inversion, phi=phi, lmbd=lmbd)
else:
shrunk_cov = affine_shrink(a=a, phi=phi, lmbd=lmbd)
try:
pre = np.linalg.inv(shrunk_cov)
except np.linalg.LinAlgError:
pre = np.linalg.pinv(shrunk_cov)
return pre
def affine_shrink(a, phi=1.01, lmbd=0.01, copy=True):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, affine_shrink, phi=phi, lmbd=lmbd)
else:
ridge_cov = multiply_diag(a, phi=phi, copy=copy)
shrunk_cov = grand_shrink(ridge_cov, lmbd=lmbd, copy=copy)
return shrunk_cov
def is_symmetric(a, rtol=1e-05, atol=1e-08):
if isinstance(a, pd.DataFrame):
return is_symmetric(a.values, rtol=rtol, atol=atol)
else:
return np.allclose(a, a.T, rtol=rtol, atol=atol)
def to_symmetric(a):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, to_symmetric)
else:
return (a + a.T) / 2.0
def dense_weights_from_dict(d:dict, shape=None, n_dim:int=None):
""" Convert {1:3.141,0:123} -> [ 123, 3.141 ]
:param d: Dictionary whose keys should be 0..n
:param shape:
:param n_dim:
:return:
"""
if shape is None:
if n_dim is not None:
shape = (n_dim,)
else:
n_dim = max(d.values())+1
w = np.ndarray(shape=shape)
for i in range(n_dim):
w[i] = d[i]
return w
def nearest_pos_def(a):
"""Find the ensure_rel_entropy positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a ensure_rel_entropy symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, nearest_pos_def)
else:
b = to_symmetric(a)
_, s, V = np.linalg.svd(b)
H = np.dot(V.T, np.dot(np.diag(s), V))
a2 = (b + H) / 2
a3 = (a2 + a2.T) / 2
if is_positive_def(a3):
return a3
spacing = np.spacing(np.linalg.norm(a))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(a.shape[0])
k = 1
while not is_positive_def(a3):
mineig = np.min(np.real(np.linalg.eigvals(a3)))
a3 += I * (-mineig * k**2 + spacing)
k += 1
return a3
def is_positive_def(a):
"""Returns true when input is positive-definite, via Cholesky"""
if isinstance(a,pd.DataFrame):
return is_positive_def(a.values)
else:
try:
_ = np.linalg.cholesky(a)
return True
except np.linalg.LinAlgError:
return False
def make_diagonal(a):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, make_diagonal)
else:
return np.diag(np.diag(a))
def mean_off_diag(a):
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, mean_off_diag)
else:
n = np.shape(a)[0]
b = np.vectorize(int)(a)
b = b - np.eye(n)
the_sum = np.sum(b,axis=None)
return the_sum/(n*(n-1))
def corr_distance(corr, expon=0.5):
"""
Convert correlations into a distance between variables
"""
if isinstance(corr, pd.DataFrame):
return square_to_square_dataframe(corr, corr_distance, expon=expon)
else:
return ((1 - np.array(corr)) / 2.) ** expon
def cov_distance(cov, expon=0.5):
"""
Convert covariance into a distance between variables
"""
if isinstance(cov, pd.DataFrame):
return square_to_square_dataframe(cov, cov_distance, expon=expon)
else:
corr = cov_to_corrcoef(cov)
return corr_distance(corr=corr, expon=expon)
def try_invert(a, **affine_inversion_kwargs):
"""
Attempt to invert a matrix by whatever means, falling back to ridge + shrinkage as required
"""
if isinstance(a, pd.DataFrame):
return square_to_square_dataframe(a, try_invert, **affine_inversion_kwargs)
else:
try:
return np.linalg.inv(a)
except np.linalg.LinAlgError:
try:
return np.linalg.pinv(a)
except np.linalg.LinAlgError:
return affine_inversion(a, **affine_inversion_kwargs)
def weaken_cov(cov, diag_multipliers:[float], off_diag_additional_factor=0.9):
""" Augment a covariance matrix
:param cov:
:param diag_multipliers: Vector to multiply diagonals by
:param off_diag_additional_factor: Additional multiplicative factor
:return:
"""
if isinstance(cov, pd.DataFrame):
return square_to_square_dataframe(cov, weaken_cov, diag_multipliers=diag_multipliers, off_diag_additional_factor=off_diag_additional_factor)
else:
covs = np.copy(cov)
for i, di in enumerate(diag_multipliers):
covs[i, i] = covs[i, i] * di
for j, dj in enumerate(diag_multipliers):
if j != i:
covs[i, j] = off_diag_additional_factor * math.sqrt(di * dj) * covs[i, j]
return covs
def approx_diag_of_inv(a):
"""
Approximate diagonal entries of the inverse of a matrix
"""
# TODO: https://cholmod-extra.readthedocs.io/en/latest/functions.html#sparse-inverse
raise NotImplementedError
def bottom_schur_complement(A, B, C, D, gamma=1.0):
# D - gamma C A^{-1} B
return schur_complement(A=D, B=C, C=B, D=A, gamma=gamma)
def schur_complement(A,B,C,D, gamma=1.0):
# A - gamma B D^{-1} C
return _schur_complement_solve(A=A, B=B, C=C, D=D, gamma=gamma)
def _schur_complement_solve(A, B, C, D, gamma, warn=False, throw=False):
# A - B D^{-1} C
DinvC = inverse_multiply(a=D, b=C, warn=warn, throw=throw)
M = A - gamma*np.dot( B, DinvC )
CAREFUL = warn or throw
if CAREFUL:
rankD = np.linalg.matrix_rank(D)
dimD = max(np.shape(D))
if rankD==dimD:
checkM = _schur_complement_pseudo(A=A,B=B,C=C,D=D, gamma=gamma)
if not np.allclose( M, checkM ):
print('schurly not')
elif warn:
print(' D is rank deficient, so schur complement is method dependent ')
return M
def _schur_complement_pseudo(A, B, C, D, gamma):
return A - gamma*np.dot( np.dot(B, np.linalg.pinv(D)),C)
def _schur_complement_direct(A, B, C, D, gamma):
return A - gamma*np.dot( np.dot(B, np.linalg.pinv(D)),C)
def inverse_multiply(a, b, warn=False, throw=False):
# Want x = a^{-1} b
# a x = b
# x = solve(a,b)
x = np.linalg.solve(a, b)
if (warn or throw):
if np.linalg.matrix_rank(x)<max(np.shape(a)):
print('a is rank deficient so result is method dependent ')
else:
x_ = np.dot(np.linalg.inv(a), b)
if not np.allclose(x_, x):
if warn:
print('schurly not inverse multiply')
if throw:
raise ValueError
return x
def multiply_by_inverse(a, b, throw=True):
# Want x = a b^{-1}
# xt = bt^{-1} at = inverse_multiply(bt, at)
# bt xt = at
# xt = solve(bt, at)
if not np.allclose( np.shape(b)[1], np.shape(b)[0], np.shape(a)[1]):
raise ValueError('dims wrong')
bt = np.transpose(b)
at = np.transpose(a)
xt = np.linalg.solve(bt,at)
x = np.transpose(xt)
x_check = np.dot(a, np.linalg.inv(b))
if not np.allclose(x, x_check ) and throw:
raise Exception('schurly not')
return x
def parity(cov,w):
np.dot(cov,w)
| [
"numpy.linalg.eigvals",
"numpy.sum",
"numpy.allclose",
"numpy.shape",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.diag",
"numpy.ndarray",
"numpy.linalg.solve",
"numpy.linalg.pinv",
"numpy.copy",
"numpy.transpose",
"numpy.linalg.matrix_rank",
"numpy.linalg.cholesky",
"numpy.diagonal",... | [((4022, 4045), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'shape'}), '(shape=shape)\n', (4032, 4045), True, 'import numpy as np\n'), ((10048, 10069), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (10063, 10069), True, 'import numpy as np\n'), ((10778, 10793), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (10790, 10793), True, 'import numpy as np\n'), ((10803, 10818), 'numpy.transpose', 'np.transpose', (['a'], {}), '(a)\n', (10815, 10818), True, 'import numpy as np\n'), ((10828, 10851), 'numpy.linalg.solve', 'np.linalg.solve', (['bt', 'at'], {}), '(bt, at)\n', (10843, 10851), True, 'import numpy as np\n'), ((10860, 10876), 'numpy.transpose', 'np.transpose', (['xt'], {}), '(xt)\n', (10872, 10876), True, 'import numpy as np\n'), ((11045, 11059), 'numpy.dot', 'np.dot', (['cov', 'w'], {}), '(cov, w)\n', (11051, 11059), True, 'import numpy as np\n'), ((405, 451), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'cov_to_corrcoef'], {}), '(a, cov_to_corrcoef)\n', (431, 451), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((482, 496), 'numpy.diagonal', 'np.diagonal', (['a'], {}), '(a)\n', (493, 496), True, 'import numpy as np\n'), ((519, 579), 'numpy.sqrt', 'np.sqrt', (['(variances[np.newaxis, :] * variances[:, np.newaxis])'], {}), '(variances[np.newaxis, :] * variances[:, np.newaxis])\n', (526, 579), True, 'import numpy as np\n'), ((1003, 1079), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', ([], {'df': 'a', 'cov_func': 'multiply_diag', 'phi': 'phi', 'copy': 'copy'}), '(df=a, cov_func=multiply_diag, phi=phi, copy=copy)\n', (1029, 1079), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((1423, 1508), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', ([], {'df': 'a', 'cov_func': 'multiply_off_diag', 'phi': 'phi', 'copy': 'copy'}), '(df=a, cov_func=multiply_off_diag, phi=phi, copy=copy\n )\n', (1449, 1508), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((2094, 2153), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'grand_shrink'], {'a': 'a', 'lmbd': 'lmbd'}), '(a, grand_shrink, a=a, lmbd=lmbd)\n', (2120, 2153), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((2658, 2725), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'affine_inversion'], {'phi': 'phi', 'lmbd': 'lmbd'}), '(a, affine_inversion, phi=phi, lmbd=lmbd)\n', (2684, 2725), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((3062, 3126), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'affine_shrink'], {'phi': 'phi', 'lmbd': 'lmbd'}), '(a, affine_shrink, phi=phi, lmbd=lmbd)\n', (3088, 3126), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((3455, 3496), 'numpy.allclose', 'np.allclose', (['a', 'a.T'], {'rtol': 'rtol', 'atol': 'atol'}), '(a, a.T, rtol=rtol, atol=atol)\n', (3466, 3496), True, 'import numpy as np\n'), ((3571, 3614), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'to_symmetric'], {}), '(a, to_symmetric)\n', (3597, 3614), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((4575, 4621), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'nearest_pos_def'], {}), '(a, nearest_pos_def)\n', (4601, 4621), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((4678, 4694), 'numpy.linalg.svd', 'np.linalg.svd', (['b'], {}), '(b)\n', (4691, 4694), True, 'import numpy as np\n'), ((5593, 5611), 'numpy.eye', 'np.eye', (['a.shape[0]'], {}), '(a.shape[0])\n', (5599, 5611), True, 'import numpy as np\n'), ((6206, 6250), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'make_diagonal'], {}), '(a, make_diagonal)\n', (6232, 6250), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((6371, 6415), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'mean_off_diag'], {}), '(a, mean_off_diag)\n', (6397, 6415), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((6530, 6550), 'numpy.sum', 'np.sum', (['b'], {'axis': 'None'}), '(b, axis=None)\n', (6536, 6550), True, 'import numpy as np\n'), ((6754, 6814), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['corr', 'corr_distance'], {'expon': 'expon'}), '(corr, corr_distance, expon=expon)\n', (6780, 6814), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((7046, 7104), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['cov', 'cov_distance'], {'expon': 'expon'}), '(cov, cov_distance, expon=expon)\n', (7072, 7104), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((7418, 7486), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['a', 'try_invert'], {}), '(a, try_invert, **affine_inversion_kwargs)\n', (7444, 7486), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((8109, 8247), 'precise.skaters.covarianceutil.pdutil.square_to_square_dataframe', 'square_to_square_dataframe', (['cov', 'weaken_cov'], {'diag_multipliers': 'diag_multipliers', 'off_diag_additional_factor': 'off_diag_additional_factor'}), '(cov, weaken_cov, diag_multipliers=\n diag_multipliers, off_diag_additional_factor=off_diag_additional_factor)\n', (8135, 8247), False, 'from precise.skaters.covarianceutil.pdutil import square_to_square_dataframe, square_to_column_series, square_to_index_series\n'), ((8268, 8280), 'numpy.copy', 'np.copy', (['cov'], {}), '(cov)\n', (8275, 8280), True, 'import numpy as np\n'), ((9333, 9357), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['D'], {}), '(D)\n', (9354, 9357), True, 'import numpy as np\n'), ((10901, 10917), 'numpy.linalg.inv', 'np.linalg.inv', (['b'], {}), '(b)\n', (10914, 10917), True, 'import numpy as np\n'), ((1123, 1133), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (1130, 1133), True, 'import numpy as np\n'), ((1547, 1557), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (1554, 1557), True, 'import numpy as np\n'), ((1680, 1690), 'numpy.diag', 'np.diag', (['a'], {}), '(a)\n', (1687, 1690), True, 'import numpy as np\n'), ((2197, 2207), 'numpy.copy', 'np.copy', (['a'], {}), '(a)\n', (2204, 2207), True, 'import numpy as np\n'), ((2827, 2852), 'numpy.linalg.inv', 'np.linalg.inv', (['shrunk_cov'], {}), '(shrunk_cov)\n', (2840, 2852), True, 'import numpy as np\n'), ((4881, 4898), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (4895, 4898), True, 'import numpy as np\n'), ((6022, 6043), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['a'], {}), '(a)\n', (6040, 6043), True, 'import numpy as np\n'), ((6284, 6294), 'numpy.diag', 'np.diag', (['a'], {}), '(a)\n', (6291, 6294), True, 'import numpy as np\n'), ((6438, 6449), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (6446, 6449), True, 'import numpy as np\n'), ((6465, 6482), 'numpy.vectorize', 'np.vectorize', (['int'], {}), '(int)\n', (6477, 6482), True, 'import numpy as np\n'), ((6502, 6511), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (6508, 6511), True, 'import numpy as np\n'), ((7529, 7545), 'numpy.linalg.inv', 'np.linalg.inv', (['a'], {}), '(a)\n', (7542, 7545), True, 'import numpy as np\n'), ((9253, 9269), 'numpy.dot', 'np.dot', (['B', 'DinvC'], {}), '(B, DinvC)\n', (9259, 9269), True, 'import numpy as np\n'), ((9377, 9388), 'numpy.shape', 'np.shape', (['D'], {}), '(D)\n', (9385, 9388), True, 'import numpy as np\n'), ((10105, 10129), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['x'], {}), '(x)\n', (10126, 10129), True, 'import numpy as np\n'), ((10930, 10953), 'numpy.allclose', 'np.allclose', (['x', 'x_check'], {}), '(x, x_check)\n', (10941, 10953), True, 'import numpy as np\n'), ((1221, 1232), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1229, 1232), True, 'import numpy as np\n'), ((1649, 1660), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1657, 1660), True, 'import numpy as np\n'), ((2296, 2307), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (2304, 2307), True, 'import numpy as np\n'), ((2909, 2935), 'numpy.linalg.pinv', 'np.linalg.pinv', (['shrunk_cov'], {}), '(shrunk_cov)\n', (2923, 2935), True, 'import numpy as np\n'), ((4726, 4736), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (4733, 4736), True, 'import numpy as np\n'), ((9509, 9531), 'numpy.allclose', 'np.allclose', (['M', 'checkM'], {}), '(M, checkM)\n', (9520, 9531), True, 'import numpy as np\n'), ((10134, 10145), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (10142, 10145), True, 'import numpy as np\n'), ((10258, 10274), 'numpy.linalg.inv', 'np.linalg.inv', (['a'], {}), '(a)\n', (10271, 10274), True, 'import numpy as np\n'), ((10298, 10316), 'numpy.allclose', 'np.allclose', (['x_', 'x'], {}), '(x_, x)\n', (10309, 10316), True, 'import numpy as np\n'), ((10681, 10692), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (10689, 10692), True, 'import numpy as np\n'), ((10697, 10708), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (10705, 10708), True, 'import numpy as np\n'), ((10713, 10724), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (10721, 10724), True, 'import numpy as np\n'), ((2388, 2397), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2394, 2397), True, 'import numpy as np\n'), ((5701, 5722), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['a3'], {}), '(a3)\n', (5718, 5722), True, 'import numpy as np\n'), ((6846, 6860), 'numpy.array', 'np.array', (['corr'], {}), '(corr)\n', (6854, 6860), True, 'import numpy as np\n'), ((7624, 7641), 'numpy.linalg.pinv', 'np.linalg.pinv', (['a'], {}), '(a)\n', (7638, 7641), True, 'import numpy as np\n'), ((9779, 9796), 'numpy.linalg.pinv', 'np.linalg.pinv', (['D'], {}), '(D)\n', (9793, 9796), True, 'import numpy as np\n'), ((9891, 9908), 'numpy.linalg.pinv', 'np.linalg.pinv', (['D'], {}), '(D)\n', (9905, 9908), True, 'import numpy as np\n'), ((8515, 8533), 'math.sqrt', 'math.sqrt', (['(di * dj)'], {}), '(di * dj)\n', (8524, 8533), False, 'import math\n')] |
#!/usr/bin/env python3
import os
import sys
import argparse
import re
import itertools
import numpy as np
#PREFIX = os.environ['STATIC_ANALYSIS_KERNEL_DIR']
PREFIX = ""
parser = argparse.ArgumentParser(description='Get aliased pair from the SVF result.')
parser.add_argument('mssa')
parser.add_argument('--aset', dest='aset', action='store_true')
args = parser.parse_args()
mssa = args.mssa
aset = args.aset
def remove_column(text):
if aset:
return text
toks = text.split(':')
if len(toks) < 3:
return text + ":0"
toks[2] = '0'
return ':'.join(toks)
def strip_start(text, prefix):
if not text.startswith(prefix):
return text
ret = text[-(len(text) - len(prefix)):]
if ret.startswith("./"):
return ret[2:]
return ret
class Instruction:
def __init__(self):
self.load_from = set()
self.store_to = set()
self.source_loc = None
self.poitner_type = None
def is_integer(self):
return self.poitner_type in ['i8', 'i16', 'i32', 'i64']
def is_general_pointer(self):
return self.poitner_type in ['i8*', 'i16*', 'i32*', 'i64*']
def extract_type(self, line):
typ = line[line.find("({") + 2 : line.find("})")]
for regex in re.findall('struct\.[^\ ]*\.[0-9]+[^\ \*]*', typ):
newregex = re.sub(r'\.[0-9]+$', '', regex)
typ = typ.replace(regex, newregex)
for regex in re.findall('\.[0-9]+:', typ):
newregex = re.sub(r'\.[0-9]+:', ':', regex)
typ = typ.replace(regex, newregex)
self.poitner_type = typ
def extract_source_location(self, line):
loc = line.strip().split("[[")[1]
if loc.find("@[") != -1:
# It is inlined at somewhere, but I don't care where it is inlined at
delim = "@["
else:
# No inlined
delim = "]]"
self.source_loc = loc.split(delim)[0].strip()
def __parse_pts(line):
line = line.strip()
line = line[line.index("{") + 1 : len(line) - 1]
return set(map(int, line.split()))
def feed_line(self, line, is_write):
pts = Instruction.__parse_pts(line)
if is_write:
self.store_to |= pts
else:
self.load_from |= pts
def get_accessed_memory_location(self):
return list(zip(self.store_to, [True]*len(self.store_to))) + \
list(zip(self.load_from, [False]*len(self.load_from)))
def get_source_location(self):
return remove_column(strip_start(self.source_loc, PREFIX))
def get_pointer_type(self):
return self.poitner_type
class MemoryLocation:
def __init__(self, id):
self.id = id
self.load_insn = set()
self.store_insn = set()
def add_instruction(self, insn, is_write):
source_loc = insn.get_source_location()[:-2]
if is_write:
self.store_insn.add(source_loc)
else:
self.load_insn.add(source_loc)
def generate_result(self):
if aset:
return self.__generate_aliased_set()
else:
return self.__generate_mempair()
def __generate_aliased_set(self):
return self.id, \
list(self.load_insn) + list(self.store_insn), \
['R']*len(self.load_insn) + ['W']*len(self.store_insn)
def __generate_mempair(self):
st_st = list(itertools.product(self.store_insn, self.store_insn))
st_ld = list(itertools.product(self.store_insn, self.load_insn))
return self.id, \
(st_st + st_ld), \
[('W', 'W')]*len(st_st) + [('W', 'R')]*len(st_ld)
class MempairResult:
def __init__(self):
self.deduped_mempair = {}
def __sort(mempair, typ):
if mempair[0] > mempair[1]:
return (mempair[1], mempair[0]), (typ[1], typ[0])
return mempair, typ
def add(self, locid, mempairs, types):
for mempair,typ in zip(mempairs, types):
sorted_mempair, sorted_typ = MempairResult.__sort(mempair, typ)
sorted_typ = sorted_typ + (locid, locid) # add locid (wcventure)
self.deduped_mempair[sorted_mempair] = sorted_typ
def print_all(self):
for mempair, typ in sorted(self.deduped_mempair.items()):
print(mempair[0], mempair[1], typ[0], typ[1], typ[2], typ[3]) # add locid (wcventure)
class AliasedSetResult:
def __init__(self):
self.aliased_set_per_memloc = {}
class AliasedSet:
def __init__(self, aliased_set, typ):
self.set = set()
for source_loc, typ in zip(aliased_set, types):
self.set.add((source_loc, typ))
def __iter__(self):
return self.set.__iter__()
def __is_subset(self, sub_memlocid, super_memlocid):
for insn in self.aliased_set_per_memloc[sub_memlocid]:
if not insn in self.aliased_set_per_memloc[super_memlocid]:
return False
return True
def __remove_duplicate(self, memlocid):
for other_memlocid in list(self.aliased_set_per_memloc):
if other_memlocid != memlocid and self.__is_subset(memlocid, other_memlocid):
del self.aliased_set_per_memloc[memlocid]
return
for other_memlocid in list(self.aliased_set_per_memloc):
if other_memlocid != memlocid and self.__is_subset(other_memlocid, memlocid):
del self.aliased_set_per_memloc[other_memlocid]
def add(self, memlocid, aliased_set, types):
if len(aliased_set) == 1:
return
self.aliased_set_per_memloc[memlocid] = AliasedSetResult.AliasedSet(aliased_set, types)
self.__remove_duplicate(memlocid)
def print_all(self):
for memlocid, aliased_set in sorted(self.aliased_set_per_memloc.items()):
print("[Memory location ID: %d]" % memlocid[0])
for insn in sorted(aliased_set, key = lambda x: (x[1], x[0])):
print("\tType: ", insn[1], "\t", insn[0])
if __name__ == '__main__':
with open(mssa, 'r') as mssa_file:
memory_locations = {}
memory_locationsFullKey = {}
reset = True
for line in mssa_file:
if "@[" and "]]]" in line:
continue
if "cpp:0]]" in line:
continue
if len(line.strip()) == 0:
continue
if reset:
reset = False
insn = Instruction()
if "LDMU" in line:
insn.feed_line(line, is_write= False)
elif "STCHI" in line:
insn.feed_line(line, is_write= True)
elif "[[" in line:
reset = True
# set a source location
insn.extract_source_location(line)
# set a type of the pointer
insn.extract_type(line)
# if insn.is_integer() or insn.is_general_pointer():
# continue
typ = insn.get_pointer_type()
for memloc, is_write in insn.get_accessed_memory_location():
if memloc == 1:
continue
#key = (memloc, typ)
key = (memloc)
if not key in memory_locations:
memory_locations[key] = MemoryLocation(key)
memory_locations[key].add_instruction(insn, is_write)
SaveList = []
for key in memory_locations:
SaveList.append([key,memory_locations[key].store_insn,memory_locations[key].load_insn])
# save list
savenpylist = np.array(SaveList)
np.save('memGroup.npy',savenpylist)
# load list
#loadedList=np.load('memGroup.npy', allow_pickle=True)
#memGroupList=loadedList.tolist()
#print(memGroupList)
locSet = set()
for each in SaveList:
for eachstore in each[1]:
locSet.add(eachstore)
for eachload in each[2]:
locSet.add(eachload)
for each in sorted(locSet):
if "/usr/lib/gcc/x86_64-linux-gnu" not in each and not each.endswith(":0"):
print(each)
| [
"numpy.save",
"argparse.ArgumentParser",
"re.findall",
"numpy.array",
"itertools.product",
"re.sub"
] | [((181, 257), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get aliased pair from the SVF result."""'}), "(description='Get aliased pair from the SVF result.')\n", (204, 257), False, 'import argparse\n'), ((7692, 7710), 'numpy.array', 'np.array', (['SaveList'], {}), '(SaveList)\n', (7700, 7710), True, 'import numpy as np\n'), ((7715, 7751), 'numpy.save', 'np.save', (['"""memGroup.npy"""', 'savenpylist'], {}), "('memGroup.npy', savenpylist)\n", (7722, 7751), True, 'import numpy as np\n'), ((1268, 1322), 're.findall', 're.findall', (['"""struct\\\\.[^\\\\ ]*\\\\.[0-9]+[^\\\\ \\\\*]*"""', 'typ'], {}), "('struct\\\\.[^\\\\ ]*\\\\.[0-9]+[^\\\\ \\\\*]*', typ)\n", (1278, 1322), False, 'import re\n'), ((1442, 1471), 're.findall', 're.findall', (['"""\\\\.[0-9]+:"""', 'typ'], {}), "('\\\\.[0-9]+:', typ)\n", (1452, 1471), False, 'import re\n'), ((1342, 1373), 're.sub', 're.sub', (['"""\\\\.[0-9]+$"""', '""""""', 'regex'], {}), "('\\\\.[0-9]+$', '', regex)\n", (1348, 1373), False, 'import re\n'), ((1495, 1527), 're.sub', 're.sub', (['"""\\\\.[0-9]+:"""', '""":"""', 'regex'], {}), "('\\\\.[0-9]+:', ':', regex)\n", (1501, 1527), False, 'import re\n'), ((3424, 3475), 'itertools.product', 'itertools.product', (['self.store_insn', 'self.store_insn'], {}), '(self.store_insn, self.store_insn)\n', (3441, 3475), False, 'import itertools\n'), ((3498, 3548), 'itertools.product', 'itertools.product', (['self.store_insn', 'self.load_insn'], {}), '(self.store_insn, self.load_insn)\n', (3515, 3548), False, 'import itertools\n')] |
import numpy as np
from glm import mat4
class Robot:
def __init__(self):
self.joints = None
def initialise(self):
"""
Initialise joints
self.joints is structured such that object at index i-1 is the parent of the object at i
:return:
"""
self.joints = [None, Joint(None), Joint(None)]
def return_joint(self, i):
"""
Return specified joint
:param i: Number of the joint to be returned
:return: Specified joint
"""
return self.joints[i + 2]
def append_joint(self):
"""
Add an extra joint at the end up to 10
:return: Success value
"""
if self.len > 10:
return 0
self.joints.append(Joint(self.joints[-1]))
return 1
def insert_joint(self, i):
"""
Inserts a new joint before the one specified up to 10
:param i: Number the new joint will have
:return: Success value
"""
if self.len > 10:
return 0
self.joints.insert(i + 1, Joint(self.joints[i]))
return 1
def pop_joint(self):
"""
Remove a joint from the and down to 2
:return: Success value
"""
if self.len == 2:
return 0
self.joints.pop(-1)
return 1
def remove_joint(self, i):
"""
Remove a specified joint
:param i: Number of the joint to be removed
:return: Success value
"""
if self.len == 2:
return 0
self.joints.pop(i + 1)
return 1
@property
def len(self):
return len(self.joints) - 1
def update_joint(self, joint_num):
"""
Update position of a joint, and all his children
:param joint_num: Number of the joint to be updated
:return:
"""
for i in range(joint_num+2, len(self.joints)):
self.joints[i].update(self.joints[i-1])
def draw(self, proj, view):
"""
Draw all the joints
:param proj: projection matrix
:param view: view matrix
:return:
"""
for joint in reversed(self.joints[1:]):
joint.draw(proj, view)
class Joint:
def __init__(self, parent):
from Objects3D import coord_3d
self.coord_3d = coord_3d.copy()
self.alpha, self.a, self.d, self.theta = 0.0, 0.0, 0.0, 0.0
self.d_var, self.theta_var = False, False # Not used yet
self.mat = np.eye(4)
# place it in the world on initialisation
self.update(parent)
def update(self, parent):
"""
Update joints positions relative to it's parent
:param parent: Joint parent
:return:
"""
# if this is the parent joint then this joint is in the origin so no translation nor rotation
if parent is None:
self.mat = np.eye(4)
else:
# if it has a parent then calculate this joints global position matrix
self.mat = parent.mat @ self.generate_matrix
# apply the matrix
self.coord_3d.apply_mat(mat4(self.mat.T.tolist()))
@property
def generate_matrix(self) -> np.ndarray:
"""
Create and return homogeneous matrix
"""
s_t = self.sin(self.theta)
c_t = self.cos(self.theta)
s_a = self.sin(self.alpha)
c_a = self.cos(self.alpha)
return np.array([[c_t, -s_t * c_a, s_t * s_a, self.a * c_t],
[s_t, c_t * c_a, -c_t * s_a, self.a * s_t],
[0, s_a, c_a, self.d],
[0, 0, 0, 1]])
# Sin i cos that return whole numbers for popular angles
@staticmethod
def sin(x):
if x in [0, 180]:
return 0
if x == 90:
return 1
if x == 270:
return -1
return np.sin(np.deg2rad(x))
@staticmethod
def cos(x):
if x == 0:
return 1
if x in [90, 270]:
return 0
if x == 180:
return -1
return np.cos(np.deg2rad(x))
def draw(self, proj, view):
self.coord_3d.draw(proj, view)
| [
"numpy.eye",
"numpy.array",
"Objects3D.coord_3d.copy",
"numpy.deg2rad"
] | [((2353, 2368), 'Objects3D.coord_3d.copy', 'coord_3d.copy', ([], {}), '()\n', (2366, 2368), False, 'from Objects3D import coord_3d\n'), ((2524, 2533), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2530, 2533), True, 'import numpy as np\n'), ((3464, 3603), 'numpy.array', 'np.array', (['[[c_t, -s_t * c_a, s_t * s_a, self.a * c_t], [s_t, c_t * c_a, -c_t * s_a, \n self.a * s_t], [0, s_a, c_a, self.d], [0, 0, 0, 1]]'], {}), '([[c_t, -s_t * c_a, s_t * s_a, self.a * c_t], [s_t, c_t * c_a, -c_t *\n s_a, self.a * s_t], [0, s_a, c_a, self.d], [0, 0, 0, 1]])\n', (3472, 3603), True, 'import numpy as np\n'), ((2929, 2938), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2935, 2938), True, 'import numpy as np\n'), ((3924, 3937), 'numpy.deg2rad', 'np.deg2rad', (['x'], {}), '(x)\n', (3934, 3937), True, 'import numpy as np\n'), ((4127, 4140), 'numpy.deg2rad', 'np.deg2rad', (['x'], {}), '(x)\n', (4137, 4140), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as pl
import platform
import os
from astropy.io import fits
from ipdb import set_trace as stop
from astropy.io import fits
import scipy.io
import time
import argparse
import h5py
from matplotlib.widgets import Slider
import matplotlib.animation as manimation
from tqdm import tqdm
import scipy.misc as mi
import congrid
if (platform.node() == 'viga'):
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32"
os.environ["KERAS_BACKEND"] = "tensorflow"
if (platform.node() != 'viga'):
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import keras.backend.tensorflow_backend as ktf
import sys
sys.path.append('../training')
import models as nn_model
def contrast(x):
return 100 * np.std(x) / np.mean(x)
class deep_network(object):
def __init__(self, root, depth, model, activation, n_filters):
# Only allocate needed memory
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
session = tf.Session(config=config)
ktf.set_session(session)
self.root = root
self.depth = depth
self.network_type = model
self.activation = activation
self.n_filters = n_filters
self.input_file_images = "/net/viga/scratch1/cdiazbas/DATABASE/database_validation_x2.h5"
self.input_file_images_HMI = "/net/viga/scratch1/cdiazbas/DATABASE/database_prediction.h5"
def define_network(self, nx=50, ny=50):
print("Setting up network...")
self.nx = nx
self.ny = ny
if (self.network_type == 'encdec'):
self.model = nn_model.encdec(self.nx, self.ny, 0.0, self.depth, activation=self.activation, n_filters=self.n_filters)
if (self.network_type == 'keepsize'):
self.model = nn_model.keepsize(self.nx, self.ny, 0.0, self.depth, activation=self.activation, n_filters=self.n_filters)
print("Loading weights...")
self.model.load_weights("{0}_{1}_weights.hdf5".format(self.root, self.depth))
def predict(self):
print("Predicting validation data...")
tmp = np.loadtxt('/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt')
self.median_HMI, self.median_SST = tmp[0], tmp[1]
f = h5py.File(self.input_file_images_training, 'r')
input_validation = np.zeros((1,self.nx,self.ny,12), dtype='float32')
input_validation[0,:,:,:] = f[0].data[200,:,:,1:].astype('float32') / self.median
start = time.time()
out = self.model.predict(input_validation)
end = time.time()
print("Prediction took {0} seconds...".format(end-start))
print("Plotting validation data...")
ff, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,8))
dat = f[0].data[200,:,:,1] / self.median
res = ax[0,0].imshow(dat, cmap=pl.cm.gray)
pl.colorbar(res, ax=ax[0,0])
contrast = np.std(dat) / np.mean(dat)
ax[0,0].set_title('contrast: {0:4.1f}%'.format(contrast*100))
dat = f[0].data[200,:,:,2] / self.median
res = ax[0,1].imshow(dat, cmap=pl.cm.gray)
pl.colorbar(res, ax=ax[0,1])
contrast = np.std(dat) / np.mean(dat)
ax[0,1].set_title('contrast: {0:4.1f}%'.format(contrast*100))
dat = f[0].data[200,10:-10,10:-10,0] / self.median
res = ax[1,0].imshow(dat, cmap=pl.cm.gray)
pl.colorbar(res, ax=ax[1,0])
contrast = np.std(dat) / np.mean(dat)
ax[1,0].set_title('contrast: {0:4.1f}%'.format(contrast*100))
dat = out[0,10:-10,10:-10,0]
res = ax[1,1].imshow(dat, cmap=pl.cm.gray)
pl.colorbar(res, ax=ax[1,1])
contrast = np.std(dat) / np.mean(dat)
ax[1,1].set_title('contrast: {0:4.1f}%'.format(contrast*100))
pl.tight_layout()
pl.show()
stop()
def cube_view(self):
axis = 0
print("Predicting validation data...")
tmp = np.loadtxt('/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt')
self.median_HMI, self.median_SST = tmp[0], tmp[1]
f = h5py.File(self.input_file_images, 'r')
input_validation = np.zeros((100,self.nx,self.ny,1), dtype='float32')
input_validation[:,:,:,:] = f['imHMI'][0:100,:,:,0:1].astype('float32') / self.median_HMI
output_validation = f['imSST'][0:100,:,:,0:1].astype('float32') / self.median_SST
start = time.time()
out = self.model.predict(input_validation)
end = time.time()
print("Prediction took {0} seconds...".format(end-start))
fig, ax = pl.subplots(nrows=1, ncols=4, figsize=(14,8))
fig.subplots_adjust(left=0.25, bottom=0.25)
# select first image
s = [slice(0, 1) if i == axis else slice(None) for i in range(3)]
im_validation = output_validation[s].squeeze()
im_network = out[s].squeeze()
im_original = input_validation[s].squeeze()
# display image
l_validation = ax[0].imshow(im_validation)
l_bilinear = ax[2].imshow(mi.imresize(im_original, 200))
l_network = ax[3].imshow(im_network)
l_original = ax[1].imshow(im_original)
ax[0].set_title('Target')
ax[1].set_title('HMI')
ax[2].set_title('HMI bilinear')
ax[3].set_title('Network')
# define slider
axcolor = 'lightgoldenrodyellow'
ax_bar = fig.add_axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
slider = Slider(ax_bar, 'Axis %i index' % axis, 0, output_validation.shape[axis] - 1,
valinit=0, valfmt='%i')
def update(val):
ind = int(slider.val)
s = [slice(ind, ind + 1) if i == axis else slice(None)
for i in range(3)]
im_validation = output_validation[s].squeeze()
im_network = out[s].squeeze()
im_original = input_validation[s].squeeze()
l_validation.set_data(im_validation)
l_network.set_data(im_network)
l_original.set_data(im_original)
l_bilinear.set_data(mi.imresize(im_original, 200))
fig.canvas.draw()
ax[0].set_title('Target')
ax[1].set_title('HMI')
ax[2].set_title('HMI bilinear')
ax[3].set_title('Network')
slider.on_changed(update)
pl.show()
def gen_movie(self):
# print("Predicting validation data...")
# tmp = np.loadtxt('/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt')
# self.median_HMI, self.median_SST = tmp[0], tmp[1]
# f = h5py.File(self.input_file_images, 'r')
input_validation = np.zeros((100,self.nx,self.ny,1), dtype='float32')
# input_validation[:,:,:,:] = f['imHMI'][0:100,:,:,0:1].astype('float32') / self.median_HMI
# output_validation = f['imSST'][0:100,:,:,0:1].astype('float32') / self.median_SST
start = time.time()
out = self.model.predict(input_validation)
end = time.time()
print("Prediction took {0} seconds...".format(end-start))
# fig, ax = pl.subplots(nrows=1, ncols=4, figsize=(13,8))
# n_frames = 100
nx_up, ny_up = out[0,:,:,0].shape
# FFMpegWriter = manimation.writers['ffmpeg']
# metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!')
# writer = FFMpegWriter(codec='libx264', fps=1, bitrate=20000, metadata=metadata, extra_args=['-pix_fmt', 'yuv420p'])
# with writer.saving(fig, "movie.mp4", n_frames):
# for i in tqdm(range(n_frames)):
# ax[0].imshow(input_validation[i,:,:,0])
# ax[1].imshow(congrid.resample(input_validation[i,:,:,0], (nx_up, ny_up), minusone=True))
# ax[2].imshow(out[i,:,:,0])
# ax[3].imshow(output_validation[i,:,:,0])
# ax[0].set_title('HMI c={0:4.1f}%'.format(contrast(input_validation[i,:,:,0])))
# ax[1].set_title('HMI bilinear c={0:4.1f}%'.format(contrast(congrid.resample(input_validation[i,:,:,0], (nx_up,ny_up), minusone=True))))
# ax[2].set_title('Network c={0:4.1f}%'.format(contrast(out[i,:,:,0])))
# ax[3].set_title('Target c={0:4.1f}%'.format(contrast(output_validation[i,:,:,0])))
# writer.grab_frame()
# ax[0].cla()
# ax[1].cla()
# ax[2].cla()
# ax[3].cla()
print("Predicting HMI data...")
f = h5py.File(self.input_file_images_HMI, 'r')
input_validation = np.zeros((100,self.nx,self.ny,1), dtype='float32')
input_validation[:,:,:,:] = f['imHMI'][0:100,:,:,0:1].astype('float32')
input_validation /= np.median(input_validation)
start = time.time()
out = self.model.predict(input_validation)
end = time.time()
print("Prediction took {0} seconds...".format(end-start))
fig, ax = pl.subplots(nrows=1, ncols=3, figsize=(12,8))
n_frames = 100
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!')
writer = FFMpegWriter(codec='libx264', fps=1, bitrate=20000, metadata=metadata, extra_args=['-pix_fmt', 'yuv420p'])
with writer.saving(fig, "movie_HMI.mp4", n_frames):
for i in tqdm(range(n_frames)):
v_min = np.min(out[i,:,:,0])
v_max = np.max(out[i,:,:,0])
ax[0].imshow(input_validation[i,:,:,0], vmin=v_min, vmax=v_max)
ax[1].imshow(congrid.resample(input_validation[i,:,:,0], (nx_up, ny_up), minusone=True), vmin=v_min, vmax=v_max)
ax[2].imshow(out[i,:,:,0], vmin=v_min, vmax=v_max)
ax[0].set_title('HMI c={0:4.1f}%'.format(contrast(input_validation[i,:,:,0])))
ax[1].set_title('HMI bilinear c={0:4.1f}%'.format(contrast(congrid.resample(input_validation[i,:,:,0], (nx_up,ny_up), minusone=True))))
ax[2].set_title('Network c={0:4.1f}%'.format(contrast(out[i,:,:,0])))
writer.grab_frame()
ax[0].cla()
ax[1].cla()
ax[2].cla()
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Predict for MFBD')
parser.add_argument('-i','--input', help='Input files')
parser.add_argument('-d','--depth', help='Depth', default=5)
parser.add_argument('-k','--kernels', help='N. kernels', default=64)
parser.add_argument('-m','--model', help='Model', choices=['encdec', 'keepsize'], required=True, default='keepsize')
parser.add_argument('-c','--activation', help='Activation', choices=['relu', 'elu'], required=True, default='relu')
parser.add_argument('-a','--action', help='action', choices=['cube', 'movie', 'large_frame'], default='cube')
parsed = vars(parser.parse_args())
print('Model : {0}'.format(parsed['model']))
print('Depth : {0}'.format(parsed['depth']))
print('Activation : {0}'.format(parsed['activation']))
out = deep_network('../training/networks/{0}'.format(parsed['input']), depth=int(parsed['depth']), model=parsed['model'], activation=parsed['activation'],
n_filters=int(parsed['kernels']))
pl.close('all')
if (parsed['action'] == 'cube'):
out.define_network()
out.cube_view()
if (parsed['action'] == 'movie'):
out.define_network()
out.gen_movie()
if (parsed['action'] == 'large_frame'):
out.define_network(nx=100, ny=100)
print("Not yet implemented")
# out.gen_movie()
# out.predict() | [
"argparse.ArgumentParser",
"ipdb.set_trace",
"matplotlib.widgets.Slider",
"tensorflow.ConfigProto",
"numpy.mean",
"keras.backend.tensorflow_backend.set_session",
"matplotlib.pyplot.tight_layout",
"sys.path.append",
"platform.node",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.col... | [((678, 708), 'sys.path.append', 'sys.path.append', (['"""../training"""'], {}), "('../training')\n", (693, 708), False, 'import sys\n'), ((369, 384), 'platform.node', 'platform.node', ([], {}), '()\n', (382, 384), False, 'import platform\n'), ((521, 536), 'platform.node', 'platform.node', ([], {}), '()\n', (534, 536), False, 'import platform\n'), ((10684, 10739), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict for MFBD"""'}), "(description='Predict for MFBD')\n", (10707, 10739), False, 'import argparse\n'), ((11696, 11711), 'matplotlib.pyplot.close', 'pl.close', (['"""all"""'], {}), "('all')\n", (11704, 11711), True, 'import matplotlib.pyplot as pl\n'), ((783, 793), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (790, 793), True, 'import numpy as np\n'), ((939, 955), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (953, 955), True, 'import tensorflow as tf\n'), ((1019, 1044), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1029, 1044), True, 'import tensorflow as tf\n'), ((1053, 1077), 'keras.backend.tensorflow_backend.set_session', 'ktf.set_session', (['session'], {}), '(session)\n', (1068, 1077), True, 'import keras.backend.tensorflow_backend as ktf\n'), ((2141, 2244), 'numpy.loadtxt', 'np.loadtxt', (['"""/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt"""'], {}), "(\n '/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt'\n )\n", (2151, 2244), True, 'import numpy as np\n'), ((2306, 2353), 'h5py.File', 'h5py.File', (['self.input_file_images_training', '"""r"""'], {}), "(self.input_file_images_training, 'r')\n", (2315, 2353), False, 'import h5py\n'), ((2382, 2434), 'numpy.zeros', 'np.zeros', (['(1, self.nx, self.ny, 12)'], {'dtype': '"""float32"""'}), "((1, self.nx, self.ny, 12), dtype='float32')\n", (2390, 2434), True, 'import numpy as np\n'), ((2548, 2559), 'time.time', 'time.time', ([], {}), '()\n', (2557, 2559), False, 'import time\n'), ((2625, 2636), 'time.time', 'time.time', ([], {}), '()\n', (2634, 2636), False, 'import time\n'), ((2791, 2837), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(10, 8)'}), '(nrows=2, ncols=2, figsize=(10, 8))\n', (2802, 2837), True, 'import matplotlib.pyplot as pl\n'), ((2946, 2975), 'matplotlib.pyplot.colorbar', 'pl.colorbar', (['res'], {'ax': 'ax[0, 0]'}), '(res, ax=ax[0, 0])\n', (2957, 2975), True, 'import matplotlib.pyplot as pl\n'), ((3200, 3229), 'matplotlib.pyplot.colorbar', 'pl.colorbar', (['res'], {'ax': 'ax[0, 1]'}), '(res, ax=ax[0, 1])\n', (3211, 3229), True, 'import matplotlib.pyplot as pl\n'), ((3472, 3501), 'matplotlib.pyplot.colorbar', 'pl.colorbar', (['res'], {'ax': 'ax[1, 0]'}), '(res, ax=ax[1, 0])\n', (3483, 3501), True, 'import matplotlib.pyplot as pl\n'), ((3714, 3743), 'matplotlib.pyplot.colorbar', 'pl.colorbar', (['res'], {'ax': 'ax[1, 1]'}), '(res, ax=ax[1, 1])\n', (3725, 3743), True, 'import matplotlib.pyplot as pl\n'), ((3876, 3893), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (3891, 3893), True, 'import matplotlib.pyplot as pl\n'), ((3903, 3912), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (3910, 3912), True, 'import matplotlib.pyplot as pl\n'), ((3930, 3936), 'ipdb.set_trace', 'stop', ([], {}), '()\n', (3934, 3936), True, 'from ipdb import set_trace as stop\n'), ((4043, 4146), 'numpy.loadtxt', 'np.loadtxt', (['"""/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt"""'], {}), "(\n '/net/vena/scratch/Dropbox/GIT/DeepLearning/hmi_super/training/normalization.txt'\n )\n", (4053, 4146), True, 'import numpy as np\n'), ((4208, 4246), 'h5py.File', 'h5py.File', (['self.input_file_images', '"""r"""'], {}), "(self.input_file_images, 'r')\n", (4217, 4246), False, 'import h5py\n'), ((4275, 4328), 'numpy.zeros', 'np.zeros', (['(100, self.nx, self.ny, 1)'], {'dtype': '"""float32"""'}), "((100, self.nx, self.ny, 1), dtype='float32')\n", (4283, 4328), True, 'import numpy as np\n'), ((4540, 4551), 'time.time', 'time.time', ([], {}), '()\n', (4549, 4551), False, 'import time\n'), ((4617, 4628), 'time.time', 'time.time', ([], {}), '()\n', (4626, 4628), False, 'import time\n'), ((4727, 4773), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'nrows': '(1)', 'ncols': '(4)', 'figsize': '(14, 8)'}), '(nrows=1, ncols=4, figsize=(14, 8))\n', (4738, 4773), True, 'import matplotlib.pyplot as pl\n'), ((5635, 5739), 'matplotlib.widgets.Slider', 'Slider', (['ax_bar', "('Axis %i index' % axis)", '(0)', '(output_validation.shape[axis] - 1)'], {'valinit': '(0)', 'valfmt': '"""%i"""'}), "(ax_bar, 'Axis %i index' % axis, 0, output_validation.shape[axis] - 1,\n valinit=0, valfmt='%i')\n", (5641, 5739), False, 'from matplotlib.widgets import Slider\n'), ((6540, 6549), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (6547, 6549), True, 'import matplotlib.pyplot as pl\n'), ((6878, 6931), 'numpy.zeros', 'np.zeros', (['(100, self.nx, self.ny, 1)'], {'dtype': '"""float32"""'}), "((100, self.nx, self.ny, 1), dtype='float32')\n", (6886, 6931), True, 'import numpy as np\n'), ((7147, 7158), 'time.time', 'time.time', ([], {}), '()\n', (7156, 7158), False, 'import time\n'), ((7224, 7235), 'time.time', 'time.time', ([], {}), '()\n', (7233, 7235), False, 'import time\n'), ((8860, 8902), 'h5py.File', 'h5py.File', (['self.input_file_images_HMI', '"""r"""'], {}), "(self.input_file_images_HMI, 'r')\n", (8869, 8902), False, 'import h5py\n'), ((8931, 8984), 'numpy.zeros', 'np.zeros', (['(100, self.nx, self.ny, 1)'], {'dtype': '"""float32"""'}), "((100, self.nx, self.ny, 1), dtype='float32')\n", (8939, 8984), True, 'import numpy as np\n'), ((9099, 9126), 'numpy.median', 'np.median', (['input_validation'], {}), '(input_validation)\n', (9108, 9126), True, 'import numpy as np\n'), ((9144, 9155), 'time.time', 'time.time', ([], {}), '()\n', (9153, 9155), False, 'import time\n'), ((9221, 9232), 'time.time', 'time.time', ([], {}), '()\n', (9230, 9232), False, 'import time\n'), ((9330, 9376), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(12, 8)'}), '(nrows=1, ncols=3, figsize=(12, 8))\n', (9341, 9376), True, 'import matplotlib.pyplot as pl\n'), ((771, 780), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (777, 780), True, 'import numpy as np\n'), ((1640, 1749), 'models.encdec', 'nn_model.encdec', (['self.nx', 'self.ny', '(0.0)', 'self.depth'], {'activation': 'self.activation', 'n_filters': 'self.n_filters'}), '(self.nx, self.ny, 0.0, self.depth, activation=self.\n activation, n_filters=self.n_filters)\n', (1655, 1749), True, 'import models as nn_model\n'), ((1817, 1928), 'models.keepsize', 'nn_model.keepsize', (['self.nx', 'self.ny', '(0.0)', 'self.depth'], {'activation': 'self.activation', 'n_filters': 'self.n_filters'}), '(self.nx, self.ny, 0.0, self.depth, activation=self.\n activation, n_filters=self.n_filters)\n', (1834, 1928), True, 'import models as nn_model\n'), ((2994, 3005), 'numpy.std', 'np.std', (['dat'], {}), '(dat)\n', (3000, 3005), True, 'import numpy as np\n'), ((3008, 3020), 'numpy.mean', 'np.mean', (['dat'], {}), '(dat)\n', (3015, 3020), True, 'import numpy as np\n'), ((3248, 3259), 'numpy.std', 'np.std', (['dat'], {}), '(dat)\n', (3254, 3259), True, 'import numpy as np\n'), ((3262, 3274), 'numpy.mean', 'np.mean', (['dat'], {}), '(dat)\n', (3269, 3274), True, 'import numpy as np\n'), ((3520, 3531), 'numpy.std', 'np.std', (['dat'], {}), '(dat)\n', (3526, 3531), True, 'import numpy as np\n'), ((3534, 3546), 'numpy.mean', 'np.mean', (['dat'], {}), '(dat)\n', (3541, 3546), True, 'import numpy as np\n'), ((3770, 3781), 'numpy.std', 'np.std', (['dat'], {}), '(dat)\n', (3776, 3781), True, 'import numpy as np\n'), ((3784, 3796), 'numpy.mean', 'np.mean', (['dat'], {}), '(dat)\n', (3791, 3796), True, 'import numpy as np\n'), ((5200, 5229), 'scipy.misc.imresize', 'mi.imresize', (['im_original', '(200)'], {}), '(im_original, 200)\n', (5211, 5229), True, 'import scipy.misc as mi\n'), ((6278, 6307), 'scipy.misc.imresize', 'mi.imresize', (['im_original', '(200)'], {}), '(im_original, 200)\n', (6289, 6307), True, 'import scipy.misc as mi\n'), ((9797, 9820), 'numpy.min', 'np.min', (['out[i, :, :, 0]'], {}), '(out[i, :, :, 0])\n', (9803, 9820), True, 'import numpy as np\n'), ((9842, 9865), 'numpy.max', 'np.max', (['out[i, :, :, 0]'], {}), '(out[i, :, :, 0])\n', (9848, 9865), True, 'import numpy as np\n'), ((10006, 10083), 'congrid.resample', 'congrid.resample', (['input_validation[i, :, :, 0]', '(nx_up, ny_up)'], {'minusone': '(True)'}), '(input_validation[i, :, :, 0], (nx_up, ny_up), minusone=True)\n', (10022, 10083), False, 'import congrid\n'), ((10344, 10421), 'congrid.resample', 'congrid.resample', (['input_validation[i, :, :, 0]', '(nx_up, ny_up)'], {'minusone': '(True)'}), '(input_validation[i, :, :, 0], (nx_up, ny_up), minusone=True)\n', (10360, 10421), False, 'import congrid\n')] |
import numpy as np
import matplotlib.pyplot as plt
class Neuron:
# Constructor
def __init__(self, n_inputs, lRate):
self.lRate = lRate
self.weights = np.random.rand(n_inputs + 1) * 0.2 - 0.1 # Num. features + 1 (bias)
# Prediction
def predict(self, inputs):
activation = np.dot(np.append(inputs, 1.0), self.weights)
return 1.0 if activation >= 0.0 else 0.0
# Learn
def learn(self, trainingSet_inputs, trainingSet_outputs, nEpoch):
#weights = np.random.rand(61) * 0.2 - 0.1 # Num. features + 1 (bias)
error_list = []
accuracy_list = []
for epoch in range(nEpoch):
sumError = 0.0
for inputs, output in zip(trainingSet_inputs, trainingSet_outputs) :
prediction = self.predict(inputs)
error = output - prediction
sumError += error ** 2
self.weights = self.weights + self.lRate * error * np.append(inputs, 1.0)
error_list.append(sumError)
accuracy = 100*(len(trainingSet_outputs)-sumError)/len(trainingSet_outputs)
accuracy_list.append(accuracy)
if epoch % 1000 == 0:
print ('Epoch %d Error: %.3f Accuracy: %.3f (%%)' % (epoch, sumError, accuracy))
plt.plot(error_list)
plt.title('Quadratic Error x Epoch')
plt.ylabel('Quadratic Error')
plt.xlabel('Epoch')
fig = plt.gcf() # stores figure to save it
fig.savefig('error.png')
plt.show()
plt.plot(accuracy_list)
plt.title('Accuracy (%) x Epoch')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
fig = plt.gcf() # stores figure to save it
fig.savefig('accuracy.png')
plt.show()
return self.weights | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.append",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel"
] | [((1324, 1344), 'matplotlib.pyplot.plot', 'plt.plot', (['error_list'], {}), '(error_list)\n', (1332, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1353, 1389), 'matplotlib.pyplot.title', 'plt.title', (['"""Quadratic Error x Epoch"""'], {}), "('Quadratic Error x Epoch')\n", (1362, 1389), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1427), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Quadratic Error"""'], {}), "('Quadratic Error')\n", (1408, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1455), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1446, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1479), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1477, 1479), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1556, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1599), 'matplotlib.pyplot.plot', 'plt.plot', (['accuracy_list'], {}), '(accuracy_list)\n', (1584, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1641), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy (%) x Epoch"""'], {}), "('Accuracy (%) x Epoch')\n", (1617, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1660, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1681, 1700), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1691, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1724), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1722, 1724), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1806), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1804, 1806), True, 'import matplotlib.pyplot as plt\n'), ((335, 357), 'numpy.append', 'np.append', (['inputs', '(1.0)'], {}), '(inputs, 1.0)\n', (344, 357), True, 'import numpy as np\n'), ((185, 213), 'numpy.random.rand', 'np.random.rand', (['(n_inputs + 1)'], {}), '(n_inputs + 1)\n', (199, 213), True, 'import numpy as np\n'), ((978, 1000), 'numpy.append', 'np.append', (['inputs', '(1.0)'], {}), '(inputs, 1.0)\n', (987, 1000), True, 'import numpy as np\n')] |
import time
import base64
from io import BytesIO
import numpy as np
from PIL import Image
import cv2
def log_error(e):
'''
use this function to log your errors
'''
print(e)
time.sleep(1)
def decode_img(image):
'''
Decode String Image getting from request
'''
image = base64.b64decode(image)
image = BytesIO(image)
image = np.asarray(Image.open(image))
return image
def imgPath_to_txt(filename):
'''
Encoding Image
'''
with open(filename, "rb") as imageFile:
image = base64.b64encode(imageFile.read()).decode()
return image
def npImage_to_txt(image):
'''
Convert numpy image to base64
'''
_, im_arr = cv2.imencode('.jpg', image) # im_arr: image in Numpy one-dim array format.
im_bytes = im_arr.tobytes()
im_b64 = base64.b64encode(im_bytes)
return im_b64
def txt_to_npImage(im_64):
'''
convert base64 to image
'''
im_bytes = base64.b64decode(im_64)
im_arr = np.frombuffer(im_bytes, dtype=np.uint8) # im_arr is one-dim Numpy array
img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)
return img
| [
"io.BytesIO",
"numpy.frombuffer",
"cv2.imdecode",
"base64.b64decode",
"time.sleep",
"PIL.Image.open",
"base64.b64encode",
"cv2.imencode"
] | [((200, 213), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (210, 213), False, 'import time\n'), ((316, 339), 'base64.b64decode', 'base64.b64decode', (['image'], {}), '(image)\n', (332, 339), False, 'import base64\n'), ((352, 366), 'io.BytesIO', 'BytesIO', (['image'], {}), '(image)\n', (359, 366), False, 'from io import BytesIO\n'), ((709, 736), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (721, 736), False, 'import cv2\n'), ((830, 856), 'base64.b64encode', 'base64.b64encode', (['im_bytes'], {}), '(im_bytes)\n', (846, 856), False, 'import base64\n'), ((968, 991), 'base64.b64decode', 'base64.b64decode', (['im_64'], {}), '(im_64)\n', (984, 991), False, 'import base64\n'), ((1005, 1044), 'numpy.frombuffer', 'np.frombuffer', (['im_bytes'], {'dtype': 'np.uint8'}), '(im_bytes, dtype=np.uint8)\n', (1018, 1044), True, 'import numpy as np\n'), ((1088, 1132), 'cv2.imdecode', 'cv2.imdecode', (['im_arr'], {'flags': 'cv2.IMREAD_COLOR'}), '(im_arr, flags=cv2.IMREAD_COLOR)\n', (1100, 1132), False, 'import cv2\n'), ((390, 407), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (400, 407), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
"""
@author: mojtabasah
"""
from ntk_equivalence import fc_kernel, nn_fc
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy.linalg import sqrtm
import matplotlib.pyplot as plt
import pickle
from time import time
if __name__ == '__main__':
plt.style.use('ggplot')
n = 3000 #number of training samples
runs = 1
n_ts = 200 #Number of test samples
p = 1000 #Dimension of input x
output_dim = 1 #Dimesnion of output of neural network
hidden_dim_true = [100, 100] #Hidden dimesnions of data generating network
var_noise = 0.1 #noise variance
S = np.eye(p) #Signal Covariance
Ssqrt = sqrtm(S) #Square root of covariance matrix
# Neural network parameters
lam = 5e-3 #1e-8 r#regularization parameter
lr = 1e-3 #learning rate
momentum = 0.4
hidden_dim = [10000] #True network list of hidden dimensions
n_epoch = 100 #number of epochs
bs = None #batch size
optimizer='SGD' #optimization algorithm
scheduler_step = n_epoch + 0 #Scheduler step
remove_f0 = True #Remove initial network values when regressing
around_init = True #Regularize parametrs around initial values
X = np.random.normal(size=(n,p)) @ Ssqrt
X_ts = np.random.normal(size=(n_ts,p)) @ Ssqrt
true_nn = nn_fc(input_dim=p, output_dim=output_dim, hidden_dim=hidden_dim_true,
nonlin='relu', bias=False)
f = lambda x: true_nn(torch.tensor(x, dtype=torch.float)).numpy()
with torch.no_grad():
y = f(X) + np.random.normal(scale=np.sqrt(var_noise), size=(n,1))
y_ts = f(X_ts) + np.random.normal(scale=np.sqrt(var_noise), size=(n_ts,1))
device = 'cuda' if torch.cuda.is_available() else 'cpu'
for r in range(runs):
lr = 4e-4
#second derivative of the kernel
ntk = fc_kernel(input_dim=p, hidden_dim=hidden_dim, output_dim=output_dim)
lam1, lam2, lam3 = ntk.reg_from_lam(S, lam, eq_kernel=False)
t0 = time()
#%% scaled linear fit
ntk.scaled_linear_fit(X, y, lam1=lam1, lam2=lam2, bs=bs, lr=lr, n_epoch=n_epoch,
optimizer=optimizer, test_samples=(X_ts, y_ts),
scheduler_step=scheduler_step, momentum=momentum)
#%% NN fit
ntk.nn_fit(X, y, lam=lam, bs=bs, lr=lr, n_epoch=n_epoch, optimizer=optimizer,
remove_f0=remove_f0, around_init=around_init, test_samples=(X_ts, y_ts),
scheduler_step=scheduler_step, momentum=momentum)
t1 = time()
plt.figure()
plt.plot(ntk.test_loss_list_lin, label='linear model')
plt.plot(ntk.test_loss_list_nn, label='neural network')
plt.xlabel('epochs')
plt.ylabel('average test error')
plt.legend()
plt.savefig('GD_equivalence.png', dpi=600)
| [
"ntk_equivalence.nn_fc",
"matplotlib.pyplot.plot",
"ntk_equivalence.fc_kernel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"time.time",
"matplotlib.pyplot.style.use",
"scipy.linalg.sqrtm",
"torch.cuda.is_available",
"matplotlib.pyplot.figure",
"numpy.random.normal",
"numpy.eye",
... | [((308, 331), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (321, 331), True, 'import matplotlib.pyplot as plt\n'), ((672, 681), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (678, 681), True, 'import numpy as np\n'), ((716, 724), 'scipy.linalg.sqrtm', 'sqrtm', (['S'], {}), '(S)\n', (721, 724), False, 'from scipy.linalg import sqrtm\n'), ((1432, 1532), 'ntk_equivalence.nn_fc', 'nn_fc', ([], {'input_dim': 'p', 'output_dim': 'output_dim', 'hidden_dim': 'hidden_dim_true', 'nonlin': '"""relu"""', 'bias': '(False)'}), "(input_dim=p, output_dim=output_dim, hidden_dim=hidden_dim_true,\n nonlin='relu', bias=False)\n", (1437, 1532), False, 'from ntk_equivalence import fc_kernel, nn_fc\n'), ((1322, 1351), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, p)'}), '(size=(n, p))\n', (1338, 1351), True, 'import numpy as np\n'), ((1371, 1403), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n_ts, p)'}), '(size=(n_ts, p))\n', (1387, 1403), True, 'import numpy as np\n'), ((1626, 1641), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1639, 1641), False, 'import torch\n'), ((1839, 1864), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1862, 1864), False, 'import torch\n'), ((1987, 2055), 'ntk_equivalence.fc_kernel', 'fc_kernel', ([], {'input_dim': 'p', 'hidden_dim': 'hidden_dim', 'output_dim': 'output_dim'}), '(input_dim=p, hidden_dim=hidden_dim, output_dim=output_dim)\n', (1996, 2055), False, 'from ntk_equivalence import fc_kernel, nn_fc\n'), ((2150, 2156), 'time.time', 'time', ([], {}), '()\n', (2154, 2156), False, 'from time import time\n'), ((2719, 2725), 'time.time', 'time', ([], {}), '()\n', (2723, 2725), False, 'from time import time\n'), ((2745, 2757), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2755, 2757), True, 'import matplotlib.pyplot as plt\n'), ((2767, 2821), 'matplotlib.pyplot.plot', 'plt.plot', (['ntk.test_loss_list_lin'], {'label': '"""linear model"""'}), "(ntk.test_loss_list_lin, label='linear model')\n", (2775, 2821), True, 'import matplotlib.pyplot as plt\n'), ((2831, 2886), 'matplotlib.pyplot.plot', 'plt.plot', (['ntk.test_loss_list_nn'], {'label': '"""neural network"""'}), "(ntk.test_loss_list_nn, label='neural network')\n", (2839, 2886), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2916), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2906, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2926, 2958), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average test error"""'], {}), "('average test error')\n", (2936, 2958), True, 'import matplotlib.pyplot as plt\n'), ((2968, 2980), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2978, 2980), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3032), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""GD_equivalence.png"""'], {'dpi': '(600)'}), "('GD_equivalence.png', dpi=600)\n", (3001, 3032), True, 'import matplotlib.pyplot as plt\n'), ((1572, 1606), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float'}), '(x, dtype=torch.float)\n', (1584, 1606), False, 'import torch\n'), ((1686, 1704), 'numpy.sqrt', 'np.sqrt', (['var_noise'], {}), '(var_noise)\n', (1693, 1704), True, 'import numpy as np\n'), ((1767, 1785), 'numpy.sqrt', 'np.sqrt', (['var_noise'], {}), '(var_noise)\n', (1774, 1785), True, 'import numpy as np\n')] |
import numpy as np
from scipy import optimize
def lognorm_reff(mu, sigma2):
return np.exp(mu + 5./2.*sigma2)
def lognorm_sigma2eff(mu, sigma2):
return (np.exp(4.*mu + 8.*sigma2) - np.exp(4.*mu + 7.*sigma2))/np.exp(2.*mu + 2.*sigma2)
def lognorm_find_mu_sigma2(reff, sigma2eff):
def f(x):
mu = x[0]**2.
sigma2 = x[1]**2.
return [
lognorm_reff(mu, sigma2) - reff,
lognorm_sigma2eff(mu, sigma2) - sigma2eff
]
res = optimize.root(f, [np.sqrt(np.log(reff)), np.sqrt(np.log(sigma2eff))])
mu = res.x[0]**2.
sigma2 = res.x[1]**2.
return mu, sigma2
def lognorm_pdf(r, mu, sigma):
return 1./r*np.exp(-((np.log(r) - mu)**2.)/(2.*sigma**2.))
def gamma_pdf(r, reff, sigmaeff):
nueff = (sigmaeff/reff)**2.
return r**((1. - 3.*nueff)/nueff)*np.exp(-r/(reff*nueff))
def reff_num(r, n):
return np.sum(r**3.*n)/np.sum(r**2.*n)
def sigma2eff_num(r, n, reff):
return np.sum((r - reff)**2.*r**2.*n)/np.sum(r**2.*n)
def calc_sd(r, n):
mean = calc_mean(r, n)
return np.sqrt(np.sum((r - mean)**2.*n)/np.sum(n))
def calc_mean(r, n):
return np.sum(r*n)/np.sum(n)
| [
"numpy.log",
"numpy.sum",
"numpy.exp"
] | [((85, 116), 'numpy.exp', 'np.exp', (['(mu + 5.0 / 2.0 * sigma2)'], {}), '(mu + 5.0 / 2.0 * sigma2)\n', (91, 116), True, 'import numpy as np\n'), ((211, 242), 'numpy.exp', 'np.exp', (['(2.0 * mu + 2.0 * sigma2)'], {}), '(2.0 * mu + 2.0 * sigma2)\n', (217, 242), True, 'import numpy as np\n'), ((755, 782), 'numpy.exp', 'np.exp', (['(-r / (reff * nueff))'], {}), '(-r / (reff * nueff))\n', (761, 782), True, 'import numpy as np\n'), ((808, 828), 'numpy.sum', 'np.sum', (['(r ** 3.0 * n)'], {}), '(r ** 3.0 * n)\n', (814, 828), True, 'import numpy as np\n'), ((824, 844), 'numpy.sum', 'np.sum', (['(r ** 2.0 * n)'], {}), '(r ** 2.0 * n)\n', (830, 844), True, 'import numpy as np\n'), ((880, 920), 'numpy.sum', 'np.sum', (['((r - reff) ** 2.0 * r ** 2.0 * n)'], {}), '((r - reff) ** 2.0 * r ** 2.0 * n)\n', (886, 920), True, 'import numpy as np\n'), ((911, 931), 'numpy.sum', 'np.sum', (['(r ** 2.0 * n)'], {}), '(r ** 2.0 * n)\n', (917, 931), True, 'import numpy as np\n'), ((1053, 1066), 'numpy.sum', 'np.sum', (['(r * n)'], {}), '(r * n)\n', (1059, 1066), True, 'import numpy as np\n'), ((1065, 1074), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (1071, 1074), True, 'import numpy as np\n'), ((156, 187), 'numpy.exp', 'np.exp', (['(4.0 * mu + 8.0 * sigma2)'], {}), '(4.0 * mu + 8.0 * sigma2)\n', (162, 187), True, 'import numpy as np\n'), ((184, 215), 'numpy.exp', 'np.exp', (['(4.0 * mu + 7.0 * sigma2)'], {}), '(4.0 * mu + 7.0 * sigma2)\n', (190, 215), True, 'import numpy as np\n'), ((987, 1016), 'numpy.sum', 'np.sum', (['((r - mean) ** 2.0 * n)'], {}), '((r - mean) ** 2.0 * n)\n', (993, 1016), True, 'import numpy as np\n'), ((1012, 1021), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (1018, 1021), True, 'import numpy as np\n'), ((459, 471), 'numpy.log', 'np.log', (['reff'], {}), '(reff)\n', (465, 471), True, 'import numpy as np\n'), ((482, 499), 'numpy.log', 'np.log', (['sigma2eff'], {}), '(sigma2eff)\n', (488, 499), True, 'import numpy as np\n'), ((619, 628), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (625, 628), True, 'import numpy as np\n')] |
"""
http://www.swharden.com/blog/2013-05-09-realtime-fft-audio-visualization-with-python/
http://julip.co/2012/05/arduino-python-soundlight-spectrum/
"""
import ui_plot
import sys
import numpy as np
from PyQt5 import QtCore, QtWidgets
import qwt as Qwt
from recorder import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
def plot_audio_and_detect_beats():
if not input_recorder.has_new_audio:
return
correlation, bpm = input_recorder.bpm()
ac, tempo = input_recorder.bpm_librosa()
tempo = np.mean(tempo)
input_recorder.has_new_audio = False
uiplot.qwtPlot.setAxisScale(uiplot.qwtPlot.yLeft, 0, np.amax(correlation))
# uiplot.qwtPlot.setAxisScale(uiplot.qwtPlot.yLeft, 0, np.amax(ac))
# plot the data
uiplot.btnC.setText(_fromUtf8("BPM Librosa: {:3.2f}".format(tempo)))
uiplot.btnD.setText(_fromUtf8("BPM DWT: {:3.2f}".format(bpm)))
c.setData(np.arange(len(correlation)),correlation)
# c.setData(np.arange(len(ac)),ac)
uiplot.qwtPlot.replot()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
win_plot = ui_plot.QtWidgets.QMainWindow()
uiplot = ui_plot.Ui_win_plot()
uiplot.setupUi(win_plot)
c = Qwt.QwtPlotCurve()
c.attach(uiplot.qwtPlot)
uiplot.qwtPlot.setAxisScale(uiplot.qwtPlot.yLeft, 0, 100000)
uiplot.timer = QtCore.QTimer()
uiplot.timer.start(1.0)
uiplot.timer.timeout.connect(plot_audio_and_detect_beats)
input_recorder = InputRecorder()
input_recorder.start()
### DISPLAY WINDOWS
win_plot.show()
code = app.exec_()
# clean up
input_recorder.close()
sys.exit(code) | [
"ui_plot.Ui_win_plot",
"PyQt5.QtCore.QTimer",
"qwt.QwtPlotCurve",
"numpy.amax",
"ui_plot.QtWidgets.QMainWindow",
"numpy.mean",
"PyQt5.QtWidgets.QApplication",
"sys.exit"
] | [((571, 585), 'numpy.mean', 'np.mean', (['tempo'], {}), '(tempo)\n', (578, 585), True, 'import numpy as np\n'), ((1105, 1137), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1127, 1137), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1158, 1189), 'ui_plot.QtWidgets.QMainWindow', 'ui_plot.QtWidgets.QMainWindow', ([], {}), '()\n', (1187, 1189), False, 'import ui_plot\n'), ((1203, 1224), 'ui_plot.Ui_win_plot', 'ui_plot.Ui_win_plot', ([], {}), '()\n', (1222, 1224), False, 'import ui_plot\n'), ((1267, 1285), 'qwt.QwtPlotCurve', 'Qwt.QwtPlotCurve', ([], {}), '()\n', (1283, 1285), True, 'import qwt as Qwt\n'), ((1411, 1426), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (1424, 1426), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((1706, 1720), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (1714, 1720), False, 'import sys\n'), ((686, 706), 'numpy.amax', 'np.amax', (['correlation'], {}), '(correlation)\n', (693, 706), True, 'import numpy as np\n')] |
import time
import numpy as np
import tensorflow as tf
import data_saving as dts
class CMESolver(object):
"""The fully connected neural network model."""
def __init__(self, network, config_data):
self.network = network
self.config_data = config_data
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
config_data['net_config']['lr_boundaries'], config_data['net_config']['lr_values'])
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule, epsilon=1e-8)
self.total_num_simulated_trajectories = self.config_data['net_config']['valid_size'] \
+ self.config_data['net_config']['batch_size']
# create data for training and validation..unless it is already available
if config_data['net_config']['training_samples_needed'] == "True":
start_time = time.time()
self.training_data = self.network.generate_sampled_rtc_trajectories(
self.config_data['reaction_network_config']['final_time'],
self.config_data['reaction_network_config']['num_time_interval'],
self.config_data['net_config']['batch_size'])
self.training_data_cpu_time = (time.time() - start_time)
print("Time needed to generate training trajectories: %3u" % self.training_data_cpu_time)
dts.save_sampled_trajectories(config_data['reaction_network_config']['output_folder'] + "/",
self.training_data, sample_type="training")
dts.save_cpu_time(config_data['reaction_network_config']['output_folder'] + "/", self.training_data_cpu_time
, training=True)
else:
self.training_data = dts.load_save_sampled_trajectories(
config_data['reaction_network_config']['output_folder']
+ "/", sample_type="training")
self.training_data_cpu_time = dts.load_cpu_time(config_data['reaction_network_config']['output_folder']
+ "/", training=True)
if config_data['net_config']['validation_samples_needed'] == "True":
start_time = time.time()
self.valid_data = self.network.generate_sampled_rtc_trajectories(
self.config_data['reaction_network_config']['final_time'],
self.config_data['reaction_network_config']['num_time_interval'],
self.config_data['net_config']['valid_size'])
self.validation_data_cpu_time = (time.time() - start_time)
self.total_num_simulated_trajectories = self.config_data['net_config']['valid_size'] + \
self.config_data['net_config']['batch_size']
print("Time needed to generate validation trajectories: %3u" % (time.time() - start_time))
dts.save_sampled_trajectories(config_data['reaction_network_config']['output_folder'] + "/",
self.training_data,
sample_type="validation")
dts.save_cpu_time(config_data['reaction_network_config']['output_folder'] + "/",
self.validation_data_cpu_time, training=False)
else:
self.valid_data = dts.load_save_sampled_trajectories(config_data['reaction_network_config']['output_folder']
+ "/", sample_type="validation")
self.validation_data_cpu_time = dts.load_cpu_time(config_data['reaction_network_config']['output_folder']
+ "/", training=False)
# set initial values for functions
times, states_trajectories, martingale_trajectories = self.training_data
yvals = self.network.output_function(states_trajectories[:, -1, :])
y0 = tf.reduce_mean(yvals, axis=0)
# set func_clipping_thresholds
self.delta_clip = np.ones(shape=[self.network.output_function_size], dtype="float64") + \
tf.math.reduce_mean(yvals, axis=0) + 2 * tf.math.reduce_std(yvals, axis=0)
self.model = NonsharedModel(network, config_data, y0, self.delta_clip)
if config_data['net_config']['use_previous_training_weights'] == "True":
filename = config_data['reaction_network_config']['output_folder'] + "/" + "trained_weights"
self.model.load_weights(filename)
self.y_init = self.model.y_init
def train(self):
start_time = time.time()
training_history = []
function_value_data = []
num_iterations = self.config_data['net_config']['num_iterations']
# num_batch_size = self.config_data['net_config']['batch_size']
logging_frequency = self.config_data['net_config']['logging_frequency']
# training_data_reset_frequency = self.config_data['net_config']['training_data_reset_frequency']
# begin sgd iteration
for step in range(1, num_iterations + 1):
self.train_step(self.training_data)
if step % logging_frequency == 0:
loss = self.loss_fn(self.valid_data, training=False).numpy()
y_init = self.y_init.numpy()
elapsed_time = time.time() - start_time + self.validation_data_cpu_time + self.training_data_cpu_time
training_history.append([step, loss, elapsed_time])
function_value_data.append(y_init)
print("step: %5u, loss: %.4e, elapsed time: %3u" % (
step, loss, elapsed_time))
print_array_nicely(y_init, "Estimated Value")
# if self.config_data['net_config']['allow_batch_reset'] == "True" and \
# step % training_data_reset_frequency == 0:
# self.training_data = self.network.generate_sampled_rtc_trajectories(
# self.config_data['reaction_network_config']['final_time'],
# self.config_data['reaction_network_config']['num_time_interval'],
# num_batch_size)
# print('New training data generated!')
# self.total_num_simulated_trajectories += num_batch_size
return np.array(training_history), np.array(function_value_data), self.total_num_simulated_trajectories
def loss_fn(self, inputs, training):
times, states_trajectories, martingale_trajectories = inputs
y_terminal = self.model(inputs, training)
y_comp = self.network.output_function(states_trajectories[:, -1, :])
delta = (y_terminal - y_comp) / self.delta_clip
loss = tf.reduce_mean(tf.where(tf.abs(delta) < 1, tf.square(delta), 2 * tf.abs(delta) - 1), axis=0)
return tf.reduce_sum(loss)
def grad(self, inputs, training):
with tf.GradientTape(persistent=True) as tape:
loss = self.loss_fn(inputs, training)
grad = tape.gradient(loss, self.model.trainable_variables)
del tape
return grad
@tf.function
def train_step(self, train_data):
grad = self.grad(train_data, training=True)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_variables))
def estimate_parameter_sensitivities(self):
times, states_trajectories, martingale_trajectories = self.training_data
return self.model.compute_parameter_jacobian(states_trajectories, times, len(self.network.parameter_dict),
training=False)
class NonsharedModel(tf.keras.Model):
def __init__(self, network, config_data, y0, delta_clip):
super(NonsharedModel, self).__init__()
self.network = network
self.delta_clip = delta_clip
self.stop_time = config_data['reaction_network_config']['final_time']
self.num_exponential_features = config_data['net_config']['num_exponential_features']
self.num_temporal_dnns = config_data['net_config']['num_temporal_dnns']
self.num_time_samples = config_data['reaction_network_config']['num_time_interval']
self.y_init = tf.Variable(y0)
self.eigval_real = tf.Variable(-np.random.uniform(0, 1, size=[1, self.num_exponential_features]),
dtype="float64")
self.eigval_imag = tf.Variable(np.zeros([1, self.num_exponential_features], dtype="float64"))
self.eigval_phase = tf.Variable(np.zeros([1, self.num_exponential_features], dtype="float64"))
self.subnet = [FeedForwardSubNet(self.network.num_reactions, self.network.output_function_size, config_data)
for _ in range(self.num_temporal_dnns)]
def call(self, inputs, training):
times, states_trajectories, martingale_trajectories = inputs
batch_size = tf.shape(martingale_trajectories)[0]
all_one_vec = tf.ones(shape=tf.stack([batch_size, 1]), dtype="float64")
y = tf.matmul(all_one_vec, self.y_init[None, :])
for t in range(0, self.num_time_samples - 1):
time_left = self.stop_time - times[t]
temporal_dnn = int(t * self.num_temporal_dnns / self.num_time_samples)
features_real = tf.reshape(tf.tile(tf.exp(self.eigval_real * time_left), [batch_size, 1]),
[batch_size, self.num_exponential_features])
features_imag = tf.reshape(
tf.tile(tf.sin(self.eigval_imag * time_left + self.eigval_phase), [batch_size, 1]),
[batch_size, self.num_exponential_features])
inputs = tf.stack(tf.unstack(states_trajectories[:, t, :], axis=-1)
+ tf.unstack(features_real, axis=-1) + tf.unstack(features_imag, axis=-1), axis=1)
z = self.subnet[temporal_dnn](inputs, training)
z = tf.reshape(z, shape=[batch_size, self.network.output_function_size,
self.network.num_reactions])
martingale_increment = tf.expand_dims(martingale_trajectories[:, t + 1, :]
- martingale_trajectories[:, t, :], axis=1)
y = y + tf.reduce_sum(z * martingale_increment, axis=2)
return y
def compute_parameter_jacobian(self, states_trajectories, times, num_params, training):
batch_size = tf.shape(states_trajectories)[0]
jacobian = tf.zeros(shape=tf.stack([batch_size, num_params, self.network.output_function_size]),
dtype="float64")
for t in range(0, self.num_time_samples - 1):
time_left = self.stop_time - times[t]
temporal_dnn = int(t * self.num_temporal_dnns / self.num_time_samples)
features_real = tf.reshape(tf.tile(tf.exp(self.eigval_real * time_left), [batch_size, 1]),
[batch_size, self.num_exponential_features])
features_imag = tf.reshape(
tf.tile(tf.sin(self.eigval_imag * time_left + self.eigval_phase), [batch_size, 1]),
[batch_size, self.num_exponential_features])
inputs = tf.stack(tf.unstack(states_trajectories[:, t, :], axis=-1)
+ tf.unstack(features_real, axis=-1) + tf.unstack(features_imag, axis=-1), axis=1)
z = self.subnet[temporal_dnn](inputs, training)
z = tf.reshape(z, shape=[batch_size, self.network.output_function_size,
self.network.num_reactions])
propensity_jacobian = tf.stack([self.network.propensity_sensitivity_matrix(states_trajectories[i, t, :])
for i in range(states_trajectories[:, t, :].shape[0])], axis=0)
jacobian = jacobian + tf.matmul(propensity_jacobian, z, transpose_b=True) * (times[t + 1] - times[t])
return tf.reduce_mean(jacobian, axis=0)
class FeedForwardSubNet(tf.keras.Model):
def get_config(self):
pass
def __init__(self, num_reactions, output_function_size, config_data):
super(FeedForwardSubNet, self).__init__()
num_hiddens = config_data['net_config']['num_nodes_per_layer']
num_layers = config_data['net_config']['num_hidden_layers']
self.dense_layers = [tf.keras.layers.Dense(num_hiddens,
use_bias=True,
activation=config_data['net_config']['activation_function'],
kernel_initializer='zeros',
bias_initializer='zeros')
for _ in range(num_layers)]
# final output should be a value of dimension num_reactions*output_function_size
self.dense_layers.append(tf.keras.layers.Dense(num_reactions * output_function_size,
use_bias=True, activation=None,
kernel_initializer='zeros',
bias_initializer='zeros'))
def call(self, x, training):
for i in range(len(self.dense_layers) - 1):
x = self.dense_layers[i](x)
x = self.dense_layers[-1](x)
return x
def print_array_nicely(y, name):
size_input = y.size
y = y.reshape(size_input, )
print(name, ":", end=' (')
for i in range(y.size - 1):
print("%.3f" % y[i], end=', ')
print("%.3f" % y[y.size - 1], end=')\n')
| [
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.math.reduce_std",
"numpy.ones",
"data_saving.load_save_sampled_trajectories",
"tensorflow.matmul",
"tensorflow.Variable",
"data_saving.save_sampled_trajectories",
"tensorflow.abs",
"tensorflow.sin",
"te... | [((299, 440), 'tensorflow.keras.optimizers.schedules.PiecewiseConstantDecay', 'tf.keras.optimizers.schedules.PiecewiseConstantDecay', (["config_data['net_config']['lr_boundaries']", "config_data['net_config']['lr_values']"], {}), "(config_data[\n 'net_config']['lr_boundaries'], config_data['net_config']['lr_values'])\n", (351, 440), True, 'import tensorflow as tf\n'), ((474, 540), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule', 'epsilon': '(1e-08)'}), '(learning_rate=lr_schedule, epsilon=1e-08)\n', (498, 540), True, 'import tensorflow as tf\n'), ((3990, 4019), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['yvals'], {'axis': '(0)'}), '(yvals, axis=0)\n', (4004, 4019), True, 'import tensorflow as tf\n'), ((4652, 4663), 'time.time', 'time.time', ([], {}), '()\n', (4661, 4663), False, 'import time\n'), ((6874, 6893), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (6887, 6893), True, 'import tensorflow as tf\n'), ((8230, 8245), 'tensorflow.Variable', 'tf.Variable', (['y0'], {}), '(y0)\n', (8241, 8245), True, 'import tensorflow as tf\n'), ((9051, 9095), 'tensorflow.matmul', 'tf.matmul', (['all_one_vec', 'self.y_init[None, :]'], {}), '(all_one_vec, self.y_init[None, :])\n', (9060, 9095), True, 'import tensorflow as tf\n'), ((11970, 12002), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['jacobian'], {'axis': '(0)'}), '(jacobian, axis=0)\n', (11984, 12002), True, 'import tensorflow as tf\n'), ((912, 923), 'time.time', 'time.time', ([], {}), '()\n', (921, 923), False, 'import time\n'), ((1407, 1548), 'data_saving.save_sampled_trajectories', 'dts.save_sampled_trajectories', (["(config_data['reaction_network_config']['output_folder'] + '/')", 'self.training_data'], {'sample_type': '"""training"""'}), "(config_data['reaction_network_config'][\n 'output_folder'] + '/', self.training_data, sample_type='training')\n", (1436, 1548), True, 'import data_saving as dts\n'), ((1598, 1726), 'data_saving.save_cpu_time', 'dts.save_cpu_time', (["(config_data['reaction_network_config']['output_folder'] + '/')", 'self.training_data_cpu_time'], {'training': '(True)'}), "(config_data['reaction_network_config']['output_folder'] +\n '/', self.training_data_cpu_time, training=True)\n", (1615, 1726), True, 'import data_saving as dts\n'), ((1801, 1927), 'data_saving.load_save_sampled_trajectories', 'dts.load_save_sampled_trajectories', (["(config_data['reaction_network_config']['output_folder'] + '/')"], {'sample_type': '"""training"""'}), "(config_data['reaction_network_config'][\n 'output_folder'] + '/', sample_type='training')\n", (1835, 1927), True, 'import data_saving as dts\n'), ((1998, 2097), 'data_saving.load_cpu_time', 'dts.load_cpu_time', (["(config_data['reaction_network_config']['output_folder'] + '/')"], {'training': '(True)'}), "(config_data['reaction_network_config']['output_folder'] +\n '/', training=True)\n", (2015, 2097), True, 'import data_saving as dts\n'), ((2256, 2267), 'time.time', 'time.time', ([], {}), '()\n', (2265, 2267), False, 'import time\n'), ((2949, 3092), 'data_saving.save_sampled_trajectories', 'dts.save_sampled_trajectories', (["(config_data['reaction_network_config']['output_folder'] + '/')", 'self.training_data'], {'sample_type': '"""validation"""'}), "(config_data['reaction_network_config'][\n 'output_folder'] + '/', self.training_data, sample_type='validation')\n", (2978, 3092), True, 'import data_saving as dts\n'), ((3184, 3315), 'data_saving.save_cpu_time', 'dts.save_cpu_time', (["(config_data['reaction_network_config']['output_folder'] + '/')", 'self.validation_data_cpu_time'], {'training': '(False)'}), "(config_data['reaction_network_config']['output_folder'] +\n '/', self.validation_data_cpu_time, training=False)\n", (3201, 3315), True, 'import data_saving as dts\n'), ((3386, 3514), 'data_saving.load_save_sampled_trajectories', 'dts.load_save_sampled_trajectories', (["(config_data['reaction_network_config']['output_folder'] + '/')"], {'sample_type': '"""validation"""'}), "(config_data['reaction_network_config'][\n 'output_folder'] + '/', sample_type='validation')\n", (3420, 3514), True, 'import data_saving as dts\n'), ((3619, 3719), 'data_saving.load_cpu_time', 'dts.load_cpu_time', (["(config_data['reaction_network_config']['output_folder'] + '/')"], {'training': '(False)'}), "(config_data['reaction_network_config']['output_folder'] +\n '/', training=False)\n", (3636, 3719), True, 'import data_saving as dts\n'), ((6360, 6386), 'numpy.array', 'np.array', (['training_history'], {}), '(training_history)\n', (6368, 6386), True, 'import numpy as np\n'), ((6388, 6417), 'numpy.array', 'np.array', (['function_value_data'], {}), '(function_value_data)\n', (6396, 6417), True, 'import numpy as np\n'), ((6946, 6978), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (6961, 6978), True, 'import tensorflow as tf\n'), ((8447, 8508), 'numpy.zeros', 'np.zeros', (['[1, self.num_exponential_features]'], {'dtype': '"""float64"""'}), "([1, self.num_exponential_features], dtype='float64')\n", (8455, 8508), True, 'import numpy as np\n'), ((8550, 8611), 'numpy.zeros', 'np.zeros', (['[1, self.num_exponential_features]'], {'dtype': '"""float64"""'}), "([1, self.num_exponential_features], dtype='float64')\n", (8558, 8611), True, 'import numpy as np\n'), ((8922, 8955), 'tensorflow.shape', 'tf.shape', (['martingale_trajectories'], {}), '(martingale_trajectories)\n', (8930, 8955), True, 'import tensorflow as tf\n'), ((9940, 10041), 'tensorflow.reshape', 'tf.reshape', (['z'], {'shape': '[batch_size, self.network.output_function_size, self.network.num_reactions]'}), '(z, shape=[batch_size, self.network.output_function_size, self.\n network.num_reactions])\n', (9950, 10041), True, 'import tensorflow as tf\n'), ((10109, 10208), 'tensorflow.expand_dims', 'tf.expand_dims', (['(martingale_trajectories[:, t + 1, :] - martingale_trajectories[:, t, :])'], {'axis': '(1)'}), '(martingale_trajectories[:, t + 1, :] -\n martingale_trajectories[:, t, :], axis=1)\n', (10123, 10208), True, 'import tensorflow as tf\n'), ((10454, 10483), 'tensorflow.shape', 'tf.shape', (['states_trajectories'], {}), '(states_trajectories)\n', (10462, 10483), True, 'import tensorflow as tf\n'), ((11482, 11583), 'tensorflow.reshape', 'tf.reshape', (['z'], {'shape': '[batch_size, self.network.output_function_size, self.network.num_reactions]'}), '(z, shape=[batch_size, self.network.output_function_size, self.\n network.num_reactions])\n', (11492, 11583), True, 'import tensorflow as tf\n'), ((12378, 12551), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_hiddens'], {'use_bias': '(True)', 'activation': "config_data['net_config']['activation_function']", 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(num_hiddens, use_bias=True, activation=config_data[\n 'net_config']['activation_function'], kernel_initializer='zeros',\n bias_initializer='zeros')\n", (12399, 12551), True, 'import tensorflow as tf\n'), ((12926, 13075), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(num_reactions * output_function_size)'], {'use_bias': '(True)', 'activation': 'None', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(num_reactions * output_function_size, use_bias=True,\n activation=None, kernel_initializer='zeros', bias_initializer='zeros')\n", (12947, 13075), True, 'import tensorflow as tf\n'), ((1267, 1278), 'time.time', 'time.time', ([], {}), '()\n', (1276, 1278), False, 'import time\n'), ((2610, 2621), 'time.time', 'time.time', ([], {}), '()\n', (2619, 2621), False, 'import time\n'), ((4085, 4152), 'numpy.ones', 'np.ones', ([], {'shape': '[self.network.output_function_size]', 'dtype': '"""float64"""'}), "(shape=[self.network.output_function_size], dtype='float64')\n", (4092, 4152), True, 'import numpy as np\n'), ((4183, 4217), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['yvals'], {'axis': '(0)'}), '(yvals, axis=0)\n', (4202, 4217), True, 'import tensorflow as tf\n'), ((4224, 4257), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['yvals'], {'axis': '(0)'}), '(yvals, axis=0)\n', (4242, 4257), True, 'import tensorflow as tf\n'), ((6809, 6825), 'tensorflow.square', 'tf.square', (['delta'], {}), '(delta)\n', (6818, 6825), True, 'import tensorflow as tf\n'), ((8286, 8350), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '[1, self.num_exponential_features]'}), '(0, 1, size=[1, self.num_exponential_features])\n', (8303, 8350), True, 'import numpy as np\n'), ((8995, 9020), 'tensorflow.stack', 'tf.stack', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (9003, 9020), True, 'import tensorflow as tf\n'), ((10275, 10322), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(z * martingale_increment)'], {'axis': '(2)'}), '(z * martingale_increment, axis=2)\n', (10288, 10322), True, 'import tensorflow as tf\n'), ((10521, 10590), 'tensorflow.stack', 'tf.stack', (['[batch_size, num_params, self.network.output_function_size]'], {}), '([batch_size, num_params, self.network.output_function_size])\n', (10529, 10590), True, 'import tensorflow as tf\n'), ((6790, 6803), 'tensorflow.abs', 'tf.abs', (['delta'], {}), '(delta)\n', (6796, 6803), True, 'import tensorflow as tf\n'), ((9330, 9366), 'tensorflow.exp', 'tf.exp', (['(self.eigval_real * time_left)'], {}), '(self.eigval_real * time_left)\n', (9336, 9366), True, 'import tensorflow as tf\n'), ((9534, 9590), 'tensorflow.sin', 'tf.sin', (['(self.eigval_imag * time_left + self.eigval_phase)'], {}), '(self.eigval_imag * time_left + self.eigval_phase)\n', (9540, 9590), True, 'import tensorflow as tf\n'), ((9820, 9854), 'tensorflow.unstack', 'tf.unstack', (['features_imag'], {'axis': '(-1)'}), '(features_imag, axis=-1)\n', (9830, 9854), True, 'import tensorflow as tf\n'), ((10871, 10907), 'tensorflow.exp', 'tf.exp', (['(self.eigval_real * time_left)'], {}), '(self.eigval_real * time_left)\n', (10877, 10907), True, 'import tensorflow as tf\n'), ((11075, 11131), 'tensorflow.sin', 'tf.sin', (['(self.eigval_imag * time_left + self.eigval_phase)'], {}), '(self.eigval_imag * time_left + self.eigval_phase)\n', (11081, 11131), True, 'import tensorflow as tf\n'), ((11362, 11396), 'tensorflow.unstack', 'tf.unstack', (['features_imag'], {'axis': '(-1)'}), '(features_imag, axis=-1)\n', (11372, 11396), True, 'import tensorflow as tf\n'), ((11875, 11926), 'tensorflow.matmul', 'tf.matmul', (['propensity_jacobian', 'z'], {'transpose_b': '(True)'}), '(propensity_jacobian, z, transpose_b=True)\n', (11884, 11926), True, 'import tensorflow as tf\n'), ((2910, 2921), 'time.time', 'time.time', ([], {}), '()\n', (2919, 2921), False, 'import time\n'), ((6831, 6844), 'tensorflow.abs', 'tf.abs', (['delta'], {}), '(delta)\n', (6837, 6844), True, 'import tensorflow as tf\n'), ((9701, 9750), 'tensorflow.unstack', 'tf.unstack', (['states_trajectories[:, t, :]'], {'axis': '(-1)'}), '(states_trajectories[:, t, :], axis=-1)\n', (9711, 9750), True, 'import tensorflow as tf\n'), ((9783, 9817), 'tensorflow.unstack', 'tf.unstack', (['features_real'], {'axis': '(-1)'}), '(features_real, axis=-1)\n', (9793, 9817), True, 'import tensorflow as tf\n'), ((11243, 11292), 'tensorflow.unstack', 'tf.unstack', (['states_trajectories[:, t, :]'], {'axis': '(-1)'}), '(states_trajectories[:, t, :], axis=-1)\n', (11253, 11292), True, 'import tensorflow as tf\n'), ((11325, 11359), 'tensorflow.unstack', 'tf.unstack', (['features_real'], {'axis': '(-1)'}), '(features_real, axis=-1)\n', (11335, 11359), True, 'import tensorflow as tf\n'), ((5387, 5398), 'time.time', 'time.time', ([], {}), '()\n', (5396, 5398), False, 'import time\n')] |
import visdom
import numpy as np
class Visualizer():
def __init__(self, env='main', **kwargs):
self.vis = visdom.Visdom(env=env)
self.index = {} # x, dict
self.log_text = ''
self.env = env
def plot_train_val(self, loss_train=None, loss_val=None):
x = self.index.get('train_val', 0)
if x == 0:
loss = loss_train if loss_train else loss_val
win_y = np.column_stack((loss, loss))
win_x = np.column_stack((x, x))
self.win = self.vis.line(Y=win_y, X=win_x,
env=self.env)
self.index['train_val'] = x + 1
return
if loss_train != None:
self.vis.line(Y=np.array([loss_train]), X=np.array([x]),
win=self.win,
name='1',
update='append',
env=self.env)
self.index['train_val'] = x + 5
else:
self.vis.line(Y=np.array([loss_val]), X=np.array([x]),
win=self.win,
name='2',
update='append',
env=self.env)
def plot_many(self, d):
for k, v in d.iteritems():
self.plot(k, v)
def plot(self, name, y, **kwargs):
x = self.index.get(name, 0) # if none, return 0
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=name,
opts=dict(title=name),
update=None if x == 0 else 'append',
**kwargs)
self.index[name] = x + 1
def log(self, info, win='log_text'):
pass
| [
"numpy.array",
"visdom.Visdom",
"numpy.column_stack"
] | [((120, 142), 'visdom.Visdom', 'visdom.Visdom', ([], {'env': 'env'}), '(env=env)\n', (133, 142), False, 'import visdom\n'), ((432, 461), 'numpy.column_stack', 'np.column_stack', (['(loss, loss)'], {}), '((loss, loss))\n', (447, 461), True, 'import numpy as np\n'), ((482, 505), 'numpy.column_stack', 'np.column_stack', (['(x, x)'], {}), '((x, x))\n', (497, 505), True, 'import numpy as np\n'), ((1432, 1445), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (1440, 1445), True, 'import numpy as np\n'), ((1449, 1462), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (1457, 1462), True, 'import numpy as np\n'), ((735, 757), 'numpy.array', 'np.array', (['[loss_train]'], {}), '([loss_train])\n', (743, 757), True, 'import numpy as np\n'), ((761, 774), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (769, 774), True, 'import numpy as np\n'), ((1021, 1041), 'numpy.array', 'np.array', (['[loss_val]'], {}), '([loss_val])\n', (1029, 1041), True, 'import numpy as np\n'), ((1045, 1058), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (1053, 1058), True, 'import numpy as np\n')] |
"""
Compares LRP maps optimized models
Reference : Barnes et al. [2020, JAMES]
Author : <NAME>
Date : 29 March 2021
"""
### Import packages
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import cmocean
import palettable.cubehelix as cm
import scipy.stats as sts
import calc_Utilities as UT
### Set parameters
variables = [r'T2M']
datasets = [r'AER+',r'GHG+',r'ALL',r'AER+',r'GHG+',r'ALL']
seasons = [r'annual']
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m"]
SAMPLEQ = 100
### Set directories
directorydata = '/Users/zlabe/Documents/Research/InternalSignal/Data/FINAL/R1/Prediction/Trials/'
directoryfigure = '/Users/zlabe/Desktop/PAPER/R1/'
### Read in LRP maps for X(LENS) for L2=0.01 at 500 epochs
data = Dataset(directorydata + 'LRP_YearlyMaps_%s_20ens_%s_%s_R1-Trials_L2-%s_epochs-%s.nc' % (SAMPLEQ,variables[0],seasons[0],0.01,500))
lat1 = data.variables['lat'][:]
lon1 = data.variables['lon'][:]
lrp = data.variables['LRP'][:,:,:,:]
data.close()
lrpghg = np.nanmean(lrp[0,:,:,:],axis=1)
lrpaer = np.nanmean(lrp[1,:,:,:],axis=1)
lrplens = np.nanmean(lrp[2,:,:,:],axis=1)
### Read in LRP maps for X(LENS) for L2=0.001 at 1500 epochs
dataother = Dataset(directorydata + 'LRP_YearlyMaps_%s_20ens_%s_%s_R1-Trials_L2-%s_epochs-%s.nc' % (SAMPLEQ,variables[0],seasons[0],0.001,1500))
lat1other = dataother.variables['lat'][:]
lon1other = dataother.variables['lon'][:]
lrpother = dataother.variables['LRP'][:,:,:,:]
dataother.close()
lrpghgother = np.nanmean(lrpother[0,:,:,:],axis=1)
lrpaerother = np.nanmean(lrpother[1,:,:,:],axis=1)
lrplensother = np.nanmean(lrpother[2,:,:,:],axis=1)
##############################################################################
##############################################################################
##############################################################################
## Assess data
data = [lrpghg,lrpaer,lrplens,
lrpghgother,lrpaerother,lrplensother]
#######################################################################
#######################################################################
#######################################################################
### Plot subplot of LRP means
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
fig = plt.figure(figsize=(8,4))
for i in range(len(datasets)):
ax1 = plt.subplot(2,3,i+1)
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
circle = m.drawmapboundary(fill_color='dimgrey')
circle.set_clip_on(False)
m.drawcoastlines(color='darkgrey',linewidth=0.35)
### Colorbar limits
barlim = np.round(np.arange(0,0.6,0.1),2)
### Take lrp mean over all years
lrpmean = np.nanmean(data[i],axis=0)
var, lons_cyclic = addcyclic(lrpmean, lon1)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lat1)
x, y = m(lon2d, lat2d)
### Make the plot continuous
cs = m.contourf(x,y,var,np.arange(0,0.5001,0.005),
extend='max')
cmap = cm.classic_16.mpl_colormap
cs.set_cmap(cmap)
ax1.annotate(r'\textbf{%s}' % (datasets[i]),xy=(0,0),xytext=(0.88,0.92),
textcoords='axes fraction',color='k',fontsize=14,
rotation=334,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[i],xy=(0,0),xytext=(0.085,0.93),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
if i == 0:
ax1.annotate(r'\textbf{L$_{2}$=0.01}',xy=(0,0),xytext=(-0.4,0.55),
textcoords='axes fraction',color='k',fontsize=8,
rotation=0,ha='left',va='center')
ax1.annotate(r'\textbf{Epochs=500}',xy=(0,0),xytext=(-0.4,0.42),
textcoords='axes fraction',color='k',fontsize=8,
rotation=0,ha='left',va='center')
if i == 3:
ax1.annotate(r'\textbf{L$_{2}$=0.001}',xy=(0,0),xytext=(-0.4,0.55),
textcoords='axes fraction',color='k',fontsize=8,
rotation=0,ha='left',va='center')
ax1.annotate(r'\textbf{Epochs=1500}',xy=(0,0),xytext=(-0.4,0.42),
textcoords='axes fraction',color='k',fontsize=8,
rotation=0,ha='left',va='center')
cbar_ax = fig.add_axes([0.3,0.13,0.4,0.03])
cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',
extend='max',extendfrac=0.07,drawedges=False)
cbar.set_label(r'\textbf{RELEVANCE}',fontsize=11,color='dimgrey',labelpad=1.4)
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)))
cbar.ax.tick_params(axis='x', size=.01,labelsize=6)
cbar.outline.set_edgecolor('dimgrey')
plt.subplots_adjust(hspace=-0.2)
plt.savefig(directoryfigure + 'L2_LRPmean_3XLENS_PAPER.png',dpi=600) | [
"netCDF4.Dataset",
"matplotlib.pyplot.subplot",
"numpy.meshgrid",
"mpl_toolkits.basemap.shiftgrid",
"mpl_toolkits.basemap.addcyclic",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots_adjust",
"mpl_toolkits.basemap.Basemap",
"matplotlib.pyplot.savefi... | [((833, 976), 'netCDF4.Dataset', 'Dataset', (["(directorydata + \n 'LRP_YearlyMaps_%s_20ens_%s_%s_R1-Trials_L2-%s_epochs-%s.nc' % (SAMPLEQ,\n variables[0], seasons[0], 0.01, 500))"], {}), "(directorydata + \n 'LRP_YearlyMaps_%s_20ens_%s_%s_R1-Trials_L2-%s_epochs-%s.nc' % (SAMPLEQ,\n variables[0], seasons[0], 0.01, 500))\n", (840, 976), False, 'from netCDF4 import Dataset\n'), ((1087, 1122), 'numpy.nanmean', 'np.nanmean', (['lrp[0, :, :, :]'], {'axis': '(1)'}), '(lrp[0, :, :, :], axis=1)\n', (1097, 1122), True, 'import numpy as np\n'), ((1128, 1163), 'numpy.nanmean', 'np.nanmean', (['lrp[1, :, :, :]'], {'axis': '(1)'}), '(lrp[1, :, :, :], axis=1)\n', (1138, 1163), True, 'import numpy as np\n'), ((1170, 1205), 'numpy.nanmean', 'np.nanmean', (['lrp[2, :, :, :]'], {'axis': '(1)'}), '(lrp[2, :, :, :], axis=1)\n', (1180, 1205), True, 'import numpy as np\n'), ((1276, 1421), 'netCDF4.Dataset', 'Dataset', (["(directorydata + \n 'LRP_YearlyMaps_%s_20ens_%s_%s_R1-Trials_L2-%s_epochs-%s.nc' % (SAMPLEQ,\n variables[0], seasons[0], 0.001, 1500))"], {}), "(directorydata + \n 'LRP_YearlyMaps_%s_20ens_%s_%s_R1-Trials_L2-%s_epochs-%s.nc' % (SAMPLEQ,\n variables[0], seasons[0], 0.001, 1500))\n", (1283, 1421), False, 'from netCDF4 import Dataset\n'), ((1573, 1613), 'numpy.nanmean', 'np.nanmean', (['lrpother[0, :, :, :]'], {'axis': '(1)'}), '(lrpother[0, :, :, :], axis=1)\n', (1583, 1613), True, 'import numpy as np\n'), ((1624, 1664), 'numpy.nanmean', 'np.nanmean', (['lrpother[1, :, :, :]'], {'axis': '(1)'}), '(lrpother[1, :, :, :], axis=1)\n', (1634, 1664), True, 'import numpy as np\n'), ((1676, 1716), 'numpy.nanmean', 'np.nanmean', (['lrpother[2, :, :, :]'], {'axis': '(1)'}), '(lrpother[2, :, :, :], axis=1)\n', (1686, 1716), True, 'import numpy as np\n'), ((2290, 2317), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (2296, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2317, 2390), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (2323, 2390), True, 'import matplotlib.pyplot as plt\n'), ((2395, 2421), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (2405, 2421), True, 'import matplotlib.pyplot as plt\n'), ((5016, 5048), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(-0.2)'}), '(hspace=-0.2)\n', (5035, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5049, 5118), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + 'L2_LRPmean_3XLENS_PAPER.png')"], {'dpi': '(600)'}), "(directoryfigure + 'L2_LRPmean_3XLENS_PAPER.png', dpi=600)\n", (5060, 5118), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2486), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(i + 1)'], {}), '(2, 3, i + 1)\n', (2473, 2486), True, 'import matplotlib.pyplot as plt\n'), ((2504, 2574), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""moll"""', 'lon_0': '(0)', 'resolution': '"""l"""', 'area_thresh': '(10000)'}), "(projection='moll', lon_0=0, resolution='l', area_thresh=10000)\n", (2511, 2574), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((2841, 2868), 'numpy.nanmean', 'np.nanmean', (['data[i]'], {'axis': '(0)'}), '(data[i], axis=0)\n', (2851, 2868), True, 'import numpy as np\n'), ((2896, 2920), 'mpl_toolkits.basemap.addcyclic', 'addcyclic', (['lrpmean', 'lon1'], {}), '(lrpmean, lon1)\n', (2905, 2920), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((2944, 2991), 'mpl_toolkits.basemap.shiftgrid', 'shiftgrid', (['(180.0)', 'var', 'lons_cyclic'], {'start': '(False)'}), '(180.0, var, lons_cyclic, start=False)\n', (2953, 2991), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((3010, 3040), 'numpy.meshgrid', 'np.meshgrid', (['lons_cyclic', 'lat1'], {}), '(lons_cyclic, lat1)\n', (3021, 3040), True, 'import numpy as np\n'), ((2761, 2783), 'numpy.arange', 'np.arange', (['(0)', '(0.6)', '(0.1)'], {}), '(0, 0.6, 0.1)\n', (2770, 2783), True, 'import numpy as np\n'), ((3134, 3161), 'numpy.arange', 'np.arange', (['(0)', '(0.5001)', '(0.005)'], {}), '(0, 0.5001, 0.005)\n', (3143, 3161), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.