code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
# Simple Vector
a = np.array([1, 2, 3])
print("a", a)
print("type", type(a))
print("a[0]", a[0])
# Simple Matrix and indexing
a=np.array([[1, 2, 3], [4, 5, 6]])
print("a", a)
print("a[0,1]", a[0,1])
print("a[0][1]", a[0][1])
# Matrix properties
a = np.arange(15).reshape(3, 5)
print("a.shape", a.shape)
print("a.ndim", a.ndim)
print("a.dtype", a.dtype)
print("a.dtype.name", a.dtype.name)
print("a.itemsize", a.itemsize)
print("a.size", a.size)
print("a", a)
# Special matrices
a=np.empty((3, 4))
print("a", a)
b=np.zeros((3, 4))
print("b", b)
c=np.ones((3, 4))
print("c", c)
d=np.eye(3)
print("d", d)
e=np.random.randn(12).reshape((3,4))
print("e", e)
# Simple Statistics
print("Max", np.max(a))
print("Min", np.min(b))
print("Mean", np.mean(c))
print("Std Dev", np.std(d))
| [
"numpy.random.randn",
"numpy.std",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.mean",
"numpy.arange",
"numpy.eye"
] | [((40, 59), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (48, 59), True, 'import numpy as np\n'), ((149, 181), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (157, 181), True, 'import numpy as np\n'), ((503, 519), 'numpy.empty', 'np.empty', (['(3, 4)'], {}), '((3, 4))\n', (511, 519), True, 'import numpy as np\n'), ((536, 552), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (544, 552), True, 'import numpy as np\n'), ((569, 584), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (576, 584), True, 'import numpy as np\n'), ((601, 610), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (607, 610), True, 'import numpy as np\n'), ((710, 719), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (716, 719), True, 'import numpy as np\n'), ((734, 743), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (740, 743), True, 'import numpy as np\n'), ((759, 769), 'numpy.mean', 'np.mean', (['c'], {}), '(c)\n', (766, 769), True, 'import numpy as np\n'), ((788, 797), 'numpy.std', 'np.std', (['d'], {}), '(d)\n', (794, 797), True, 'import numpy as np\n'), ((271, 284), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (280, 284), True, 'import numpy as np\n'), ((627, 646), 'numpy.random.randn', 'np.random.randn', (['(12)'], {}), '(12)\n', (642, 646), True, 'import numpy as np\n')] |
import pandas as pd
import datetime
from mimesis import Generic
import random
import numpy as np
products_head = ['ID', 'Name', 'Price', 'Unit Cost', 'Manufacturer']
customers_head = ['id', 'Name', 'Address', 'City', 'Country', 'Website', 'Email', 'Phone', 'Registration Date']
staff_head = ['id', 'Name', 'Title', 'Address', 'Contract Date', 'Telephone', 'Email', 'Termination Date', 'Office', 'Salary']
sales_head = ['Tx Id', 'Customer id', 'Product ID', 'Sales Date', 'Sales Manager', 'Point of Sales', 'Quantity', 'Total Price']
def main():
generate_products(340)
generate_staff(400)
generate_customers(4000)
generate_sales('./products.csv', './customers.csv', './employees.csv', 100000)
def generate_customers(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = pd.DataFrame(columns=customers_head)
### Generate information for n customers and add them into dataframe
for i in range(n):
id = 21000 + i
name = gen.business.company()
address = gen.address.address()
city = gen.address.city()
country = gen.address.country()
web = gen.internet.home_page()
email = gen.person.email()
phone = gen.person.telephone()
registered = gen.datetime.datetime()
df.loc[i] = [id, name, address, city, country, web, email, phone, registered]
print(f'Generated customer-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./customers.csv', index=False)
def generate_products(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = pd.DataFrame(columns=products_head)
### Generate information for n products and add them into dataframe
for i in range(n):
if i % 4 == 0:
name = gen.hardware.graphics()
if i % 5 == 0:
name = gen.hardware.cpu()
else:
name = gen.hardware.phone_model()
id = name[:3].upper() + str(gen.numbers.integer_number(300,900))
price = gen.random.uniform(200,1040, 2)
unit_cost = round(gen.random.uniform(0.2,0.95,2) * price, 2)
manufacturer = name.split(' ')[0]
df.loc[i] = [id, name, price, unit_cost, manufacturer]
print(f'Generated product-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./products.csv', index=False)
def generate_staff(n):
### Initialize timer, mimesis-class and dataframe
begin_timer = datetime.datetime.now()
gen = Generic('en')
df = pd.DataFrame(columns=staff_head)
### Generate information for n employees and add them into dataframe
for i in range(n):
name = gen.person.full_name()
title = gen.person.occupation()
address = gen.address.address()
phone = gen.person.telephone()
office = gen.address.continent()
contract_date = gen.datetime.date(2012, 2021)
email = gen.person.email()
salary = int(np.random.normal(loc=3200, scale=1000))
if i % 4 == 0: # Lazy way to insert expired contracts into the data
contract_len = datetime.timedelta(random.randint(120,900))
termination_date = contract_date + contract_len
else:
termination_date = np.nan
df.loc[i] = [i, name, title, address, contract_date, phone, email, termination_date, office, salary]
print(f'Generated staff-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./employees.csv', index=False)
def generate_sales(products, customers, staff, n):
begin_timer = datetime.datetime.now()
### Reduce input tables to numpy arrays to make sampling faster
df = pd.DataFrame(columns=sales_head)
cust = pd.read_csv(customers).id.values
prod = pd.read_csv(products).values
staff = pd.read_csv(staff).id.values
gen = Generic('en')
### Select random customers, products and employees and generate sales events for them
for i in range(n):
cust_id = cust[np.random.choice(cust.shape[0])]
product = prod[np.random.choice(prod.shape[0])]
sales_person_id = staff[np.random.choice(staff.shape[0])]
sales_date = gen.datetime.datetime(2012, 2021)
pos = random.choice(['Webstore', 'Wholesale', 'Flagship', 'Reseller'])
qty = np.random.randint(6, 400)
total_price = qty * product[2]
df.loc[i] = [i, cust_id, product[0], sales_date, sales_person_id, pos, qty, total_price]
print(f'Generated sales-table in {datetime.datetime.now() - begin_timer}\n')
df.to_csv('./sales.csv', index=False)
main() | [
"pandas.DataFrame",
"random.randint",
"pandas.read_csv",
"random.choice",
"numpy.random.randint",
"mimesis.Generic",
"numpy.random.normal",
"numpy.random.choice",
"datetime.datetime.now"
] | [((812, 835), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (833, 835), False, 'import datetime\n'), ((846, 859), 'mimesis.Generic', 'Generic', (['"""en"""'], {}), "('en')\n", (853, 859), False, 'from mimesis import Generic\n'), ((869, 905), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'customers_head'}), '(columns=customers_head)\n', (881, 905), True, 'import pandas as pd\n'), ((1653, 1676), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1674, 1676), False, 'import datetime\n'), ((1687, 1700), 'mimesis.Generic', 'Generic', (['"""en"""'], {}), "('en')\n", (1694, 1700), False, 'from mimesis import Generic\n'), ((1710, 1745), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'products_head'}), '(columns=products_head)\n', (1722, 1745), True, 'import pandas as pd\n'), ((2554, 2577), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2575, 2577), False, 'import datetime\n'), ((2588, 2601), 'mimesis.Generic', 'Generic', (['"""en"""'], {}), "('en')\n", (2595, 2601), False, 'from mimesis import Generic\n'), ((2611, 2643), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'staff_head'}), '(columns=staff_head)\n', (2623, 2643), True, 'import pandas as pd\n'), ((3669, 3692), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3690, 3692), False, 'import datetime\n'), ((3776, 3808), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'sales_head'}), '(columns=sales_head)\n', (3788, 3808), True, 'import pandas as pd\n'), ((3944, 3957), 'mimesis.Generic', 'Generic', (['"""en"""'], {}), "('en')\n", (3951, 3957), False, 'from mimesis import Generic\n'), ((3864, 3885), 'pandas.read_csv', 'pd.read_csv', (['products'], {}), '(products)\n', (3875, 3885), True, 'import pandas as pd\n'), ((4325, 4389), 'random.choice', 'random.choice', (["['Webstore', 'Wholesale', 'Flagship', 'Reseller']"], {}), "(['Webstore', 'Wholesale', 'Flagship', 'Reseller'])\n", (4338, 4389), False, 'import random\n'), ((4404, 4429), 'numpy.random.randint', 'np.random.randint', (['(6)', '(400)'], {}), '(6, 400)\n', (4421, 4429), True, 'import numpy as np\n'), ((3049, 3087), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(3200)', 'scale': '(1000)'}), '(loc=3200, scale=1000)\n', (3065, 3087), True, 'import numpy as np\n'), ((3820, 3842), 'pandas.read_csv', 'pd.read_csv', (['customers'], {}), '(customers)\n', (3831, 3842), True, 'import pandas as pd\n'), ((3905, 3923), 'pandas.read_csv', 'pd.read_csv', (['staff'], {}), '(staff)\n', (3916, 3923), True, 'import pandas as pd\n'), ((4101, 4132), 'numpy.random.choice', 'np.random.choice', (['cust.shape[0]'], {}), '(cust.shape[0])\n', (4117, 4132), True, 'import numpy as np\n'), ((4157, 4188), 'numpy.random.choice', 'np.random.choice', (['prod.shape[0]'], {}), '(prod.shape[0])\n', (4173, 4188), True, 'import numpy as np\n'), ((4222, 4254), 'numpy.random.choice', 'np.random.choice', (['staff.shape[0]'], {}), '(staff.shape[0])\n', (4238, 4254), True, 'import numpy as np\n'), ((3212, 3236), 'random.randint', 'random.randint', (['(120)', '(900)'], {}), '(120, 900)\n', (3226, 3236), False, 'import random\n'), ((1464, 1487), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1485, 1487), False, 'import datetime\n'), ((2365, 2388), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2386, 2388), False, 'import datetime\n'), ((3510, 3533), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3531, 3533), False, 'import datetime\n'), ((4609, 4632), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4630, 4632), False, 'import datetime\n')] |
"""
==========
SPLIT MESH
==========
Split mesh.
by <NAME> <<EMAIL>>
Feb 13, 2021
"""
import numpy as np
import openmesh as om
__all__ = ['split_mesh']
def split_mesh(V, F, edges, ratios):
"""
Split mesh.
Parameters
----------
V : numpy.array
F : numpy.array
edges : numpy.array
ratios : numpy.array
p0 * (1-r) + p1 * r
Returns
-------
mesh : openmesh.TriMesh
index : list (int)
"""
mesh = om.TriMesh(V, F)
assert mesh.n_vertices() == V.shape[0], "Invalid input mesh"
points = mesh.points()
pts0 = points[edges[:, 0], :]
pts1 = points[edges[:, 1], :]
curve_pts = np.multiply(pts0, 1.-ratios[:, np.newaxis]) + \
np.multiply(pts1, ratios[:, np.newaxis])
new_index = []
for i in range(edges.shape[0]):
if edges[i, 0] == edges[i, 1]:
new_index.append(edges[i, 0])
else:
p = curve_pts[i, :]
heh = mesh.find_halfedge(mesh.vertex_handle(edges[i, 0]),
mesh.vertex_handle(edges[i, 1]))
eh = mesh.edge_handle(heh)
vh = mesh.add_vertex(p)
mesh.split_edge(eh, vh)
new_index.append(vh.idx())
return mesh, new_index
if __name__ == '__main__':
pass
| [
"openmesh.TriMesh",
"numpy.multiply"
] | [((462, 478), 'openmesh.TriMesh', 'om.TriMesh', (['V', 'F'], {}), '(V, F)\n', (472, 478), True, 'import openmesh as om\n'), ((656, 702), 'numpy.multiply', 'np.multiply', (['pts0', '(1.0 - ratios[:, np.newaxis])'], {}), '(pts0, 1.0 - ratios[:, np.newaxis])\n', (667, 702), True, 'import numpy as np\n'), ((712, 752), 'numpy.multiply', 'np.multiply', (['pts1', 'ratios[:, np.newaxis]'], {}), '(pts1, ratios[:, np.newaxis])\n', (723, 752), True, 'import numpy as np\n')] |
""" *** DEPRECIATED ***
Contains functions to view and interrogate chi-squared minimisation
Attributes:
MAIN_FONT (dict): style properties for the main font to use in plot labels
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.colorbar import make_axes_gridspec
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import FixedLocator, ScalarFormatter
import numpy
import echidna.calc.decay as decay
MAIN_FONT = {"size": 22}
BOLD_FONT = {"size": 22, "weight": "bold"}
def chi_squared_vs_signal(signal_config, converter=None, fig_num=1,
n_atoms=None, penalty=None, effective_mass=False,
half_life=False, save_as=None, show=False, **kwargs):
""" Plot the chi squared as a function of signal counts
Args:
signal_config (:class:`echidna.limit.limit_config.LimitConfig`): Signal
config class, where chi squareds have been stored.
converter (:class:`echidna.calc.decay.DBIsotope`, optional): Converter
used to convert between counts and half-life/effective mass.
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
n_atoms (float): Number of atoms for converter to use in
calculations of half life or effective mass.
penalty (:class:`echidna.limit.limit_config.LimitConfig`, optional):
config for signal with penalty term.
effective_mass (bool, optional): if True, plot the x-axis as the
signal contribution effective mass.
half_life (bool, optional): if True, plot the x-axis as the signal
contribution half life.
save_as (string, optional): Name of plot to save. All plots are
saved in .png format.
show (bool, optional): Display the plot to screen. Default is False.
\**kwargs: Keyword arguments to pass to converter methods.
Raises:
TypeError: If 'half_life' or 'effective_mass' keyword arguments
are used without :class:`echidna.calc.decay.DBIsotope` object
to use as converter.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
if (converter is None and half_life or effective_mass):
raise TypeError("converter is None. Cannot use 'half_life' or "
"'effective_mass' keywords without converter")
# Fig. 1 (axes generated automatically)
fig = plt.figure(fig_num, figsize=(10, 10))
# X axis values
if effective_mass:
x = numpy.zeros(shape=(signal_config.get_chi_squareds()[2].shape))
for i_bin, count in enumerate(signal_config.get_chi_squareds()[2]):
effective_mass = converter.counts_to_eff_mass(count, **kwargs)
x[i_bin] = effective_mass
plt.xlabel(r"$m_{\beta\beta}$", **BOLD_FONT)
elif half_life:
x = numpy.zeros(shape=(signal_config.get_chi_squareds()[2].shape))
for i_bin, count in enumerate(signal_config.get_chi_squareds()[2]):
x.append(1./converter.counts_to_half_life(count, **kwargs))
plt.xlabel(r"$1/T_{1/2}^{0\nu}$", **BOLD_FONT)
else:
x = signal_config.get_chi_squareds()[2]
plt.xlabel("Signal counts", **BOLD_FONT)
# Y axis values
y_1 = signal_config.get_chi_squareds()[0]
plt.ylabel(r"$\chi^{2}$", **BOLD_FONT)
if penalty:
y_2 = penalty.get_chi_squareds()[0]
plt.plot(x, y_1, "bo-", label="no systematic uncertainties")
# lines and dots
plt.plot(x, y_2, "ro-", label="systematic uncertainties")
plt.legend(loc="upper left")
else:
plt.plot(x, y_1, "o-") # lines and dots
# Set the tick labels, via Axes instance
ax = fig.gca() # Get current Axes instance
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# Set other properties here e.g. colour, rotation
label.set_fontsize(MAIN_FONT.get("size"))
if save_as:
plt.savefig(save_as + ".png", dpi=400)
if show:
plt.show()
return fig
def chi_squared_map(syst_analyser, fig_num=1, preferred_values=True,
minima=True, contours=False, save_as=None):
""" Plot chi squared surface for systematic vs. signal counts
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
preferred_values (bool, optional): if False "preferred values" curve
is not overlayed on colour map. Default is True.
minima (bool, optional): if False "minima" are not overlayed on
colour map. Default is True.
contours (bool, optional): if True produces a contour plot of chi
squared surface. Default is False.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Default is to produce a colour map, with "preferred values" curve
and "minima" overlayed.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
# Set x and y axes
x = syst_analyser.get_actual_counts()
y = syst_analyser.get_syst_values()
# Set chi squared map values
data = numpy.average(syst_analyser.get_chi_squareds(), axis=1)
data = numpy.transpose(data) # transpose it so that axes are correct
# Set preferred value values
y_2 = numpy.average(syst_analyser.get_preferred_values(), axis=1)
# Set minima values
x_3 = syst_analyser.get_minima()[0]
y_3 = syst_analyser.get_minima()[1]
# Create meshgrid
X, Y = numpy.meshgrid(x, y)
# Set sensible levels, pick the desired colormap and define normalization
color_map = plt.get_cmap('hot_r')
linear = numpy.linspace(numpy.sqrt(data.min()), numpy.sqrt(data.max()),
num=100)
locator = FixedLocator(linear**2)
levels = locator.tick_values(data.min(), data.max())
norm = BoundaryNorm(levels, ncolors=color_map.N)
if contours:
fig = plt.figure(fig_num, figsize=(16, 10)) # Fig. 2
fig.text(0.1, 0.9, syst_analyser._name, **BOLD_FONT)
ax = Axes3D(fig)
ax.view_init(elev=17.0, azim=-136.0) # set intial viewing position
# Plot surface
surf = ax.plot_surface(X, Y, data, rstride=1, cstride=1,
cmap=color_map, norm=norm, linewidth=0,
antialiased=False)
ax.zaxis.set_minor_locator(locator)
ax.ticklabel_format(style="scientific", scilimits=(3, 4))
# Set axis labels
ax.set_xlabel("\nSignal counts", **BOLD_FONT)
ax.set_ylabel("\nValue of systematic", **BOLD_FONT)
for label in (ax.get_xticklabels() +
ax.get_yticklabels() +
ax.get_zticklabels()):
label.set_fontsize(MAIN_FONT.get("size")) # tick label size
ax.dist = 11 # Ensures tick labels are not cut off
ax.margins(0.05, 0.05, 0.05) # Adjusts tick margins
# Draw colorbar
color_bar = fig.colorbar(surf, ax=ax, orientation="vertical",
fraction=0.2, shrink=0.5, aspect=10)
# kwargs here control axes that the colorbar is drawn in
color_bar.set_label(r"$\chi^2$", size=MAIN_FONT.get("size"))
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
plt.show()
if save_as:
fig.savefig(save_as + "_contour.png", dpi=300)
else:
fig = plt.figure(fig_num, figsize=(12, 10)) # Fig. 2
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
# Set labels
ax.set_xlabel("Signal counts", **BOLD_FONT)
ax.set_ylabel("Value of systematic", **BOLD_FONT)
# Plot color map
color_map = ax.pcolormesh(X, Y, data, cmap=color_map, norm=norm)
color_bar = fig.colorbar(color_map)
color_bar.set_label("$\chi^2$", size=MAIN_FONT.get("size"))
# tick label size
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
# Set axes limits
ax.set_xlim([X.min(), X.max()])
ax.set_ylim([Y.min(), Y.max()])
if preferred_values:
ax.plot(x, y_2, "bo-", label="Preferred values")
if minima:
ax.plot(x_3, y_3, "ko", label="Minima")
# Set axes tick label size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(MAIN_FONT.get("size"))
ax.legend(loc="upper left")
if save_as:
fig.savefig(save_as + "_color_map.png", dpi=300)
return fig
def penalty_vs_systematic(syst_analyser, fig_num=1, save_as=None):
""" Plot penalty_value vs. systematic
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5
fig_num (int, optional): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
fig = plt.figure(fig_num, figsize=(9, 7)) # Fig. 3
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
x = syst_analyser._penalty_values[0]
y = syst_analyser._penalty_values[1]
plt.xlabel("Value of systematic", **BOLD_FONT)
plt.ylabel("Value of penalty term", **BOLD_FONT)
plt.plot(x, y, "bo")
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# Set other properties here e.g. colour, rotation
label.set_fontsize(MAIN_FONT.get("size"))
if save_as:
plt.savefig(kwagrs.get("save_as") + ".png")
return fig
def turn_on(syst_analyser, signal_config, fig=1, save_as=None):
""" Plot deviation from chi-squared with no floated systematics.
When does the effect of floating the systematic "turn on"?
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5.
signal_config (:class:`echidna.limit.limit_config.LimitConfig`): Signal
config class, where chi squareds have been stored.
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
# Set x and y axes
x = syst_analyser.get_actual_counts()
y = syst_analyser.get_syst_values()
# Set chi squared map values
data = numpy.average(syst_analyser.get_chi_squareds(), axis=1)
data = numpy.transpose(data) # transpose it so that axes are correct
# Create meshgrid
X, Y = numpy.meshgrid(x, y)
# Define an array of \chi_0 values - chi squared without
# floating systematics
chi_squareds = signal_config.get_chi_squareds()[0]
data_np = numpy.zeros(data.shape) # zeroed array the same size as data
for y in range(len(data_np)):
for x, chi_squared in enumerate(chi_squareds):
data_np[y][x] = chi_squared
#if numpy.any((numpy.average(data_np, axis=0) != chi_squareds)):
# raise AssertionError("Incorrect chi squareds (no floating) array.")
# Make an array of the offsets
offsets = data - data_np
# Set sensible levels, pick the desired colormap and define normalization
color_map = plt.get_cmap('coolwarm')
positives = numpy.linspace(numpy.log10(offsets.max())*-1.,
numpy.log10(offsets.max()), num=50)
# linear array in log space
if offsets.min() < 0.:
negatives = numpy.linspace(offsets.min(), 0.0, num=51)
else:
negatives = numpy.zeros((51))
# Add the positive part to the negative part
full_scale = numpy.append(negatives, numpy.power(10, positives))
locator = FixedLocator(full_scale)
levels = locator.tick_values(offsets.min(), offsets.max())
norm = BoundaryNorm(levels, ncolors=color_map.N)
fig = plt.figure(fig, figsize=(12, 10)) # Fig. 4
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
# Set labels
ax.set_xlabel("Signal counts", **BOLD_FONT)
ax.set_ylabel("Value of systematic", **BOLD_FONT)
# Plot color map
color_map = ax.pcolormesh(X, Y, offsets, cmap=color_map, norm=norm)
color_bar = fig.colorbar(color_map)
color_bar.set_label("$\chi^2 - \chi_0^2$", size=MAIN_FONT.get("size"))
# tick label size
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
# Set axes limits
ax.set_xlim([X.min(), X.max()])
ax.set_ylim([Y.min(), Y.max()])
# Set axes tick label size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(MAIN_FONT.get("size"))
ax.legend(loc="upper left")
if save_as:
fig.savefig(save_as + "_turn_on.png", dpi=300)
return fig
def push_pull(syst_analyser, fig=1, save_as=None):
""" Plot penalty value - poisson likelihood chi squared.
When does minimising chi squared, which wants to "pull" the away
from the data/prior value dominate and when does the penalty term,
which wants to "pull" towards the data/prior, constraining the fit
dominate?
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
# Set x and y axes
x = syst_analyser.get_actual_counts()
y = syst_analyser.get_syst_values()
# Set chi squared map values
data = numpy.average(syst_analyser.get_chi_squareds(), axis=1)
data = numpy.transpose(data) # transpose it so that axes are correct
# Create meshgrid
X, Y = numpy.meshgrid(x, y)
# Define an array penalty values
penalty_values = syst_analyser._penalty_values[1, 0:len(y)]
# zeroed array the same size as data
penalty_array = numpy.zeros(data.shape)
for y, penalty_value in enumerate(penalty_values):
for x in range(len(penalty_array[y])):
penalty_array[y][x] = penalty_value
# Define the push pull array penalty term - chi_squared
# --> push_pull > 0 when penalty_value > chi_squared
# --> push_pull < 1 when penalty_value < chi_squared
push_pull = (2.*penalty_array) - data
# Set sensible levels, pick the desired colormap and define normalization
color_map = plt.get_cmap('coolwarm')
if push_pull.min() < 0.:
negatives = numpy.linspace(push_pull.min(), 0.,
num=50, endpoint=False)
else:
negatives = numpy.zeros((50))
if push_pull.max() > 0.:
positives = numpy.linspace(0., push_pull.max(), num=51)
else:
positives = numpy.zeros((51))
# Add the pull part to the push part
full_scale = numpy.append(negatives, positives)
locator = FixedLocator(full_scale)
levels = locator.tick_values(push_pull.min(), push_pull.max())
norm = BoundaryNorm(levels, ncolors=color_map.N)
fig = plt.figure(fig, figsize=(12, 10)) # Fig. 4
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
# Set labels
ax.set_xlabel("Signal counts", **BOLD_FONT)
ax.set_ylabel("Value of systematic", **BOLD_FONT)
# Plot color map
color_map = ax.pcolormesh(X, Y, push_pull, cmap=color_map, norm=norm)
color_bar = fig.colorbar(color_map)
color_bar.set_label("$s-\chi^{2}_{\lambda,p}$", size=MAIN_FONT.get("size"))
# tick label size
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
# Set axes limits
ax.set_xlim([X.min(), X.max()])
ax.set_ylim([Y.min(), Y.max()])
# Set axes tick label size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(MAIN_FONT.get("size"))
ax.legend(loc="upper left")
if save_as:
fig.savefig(save_as + "_push_pull.png", dpi=300)
return fig
def main(args):
""" Script to produce chi squared plots for a given systematic.
.. note:: Produces
* Plot of chi squared vs. signal counts
* Plot of systematic vs. signal chi squared surface, either
contour plot or color map
* Plot of systematic value vs. penalty term value
Args:
args (dict): command line arguments from argparse.
"""
# Load signal config from hdf5
signal_config = LimitConfig(0, [0])
signal_config = store.load_ndarray(args.signal_config, signal_config)
if args.penalty_config is not None:
penalty_config = LimitConfig(0, [0])
penalty_config = store.load_ndarray(args.penalty_config,
penalty_config)
else:
penalty_config = None
# Loaf systematic analyser from hdf5
syst_analyser = SystAnalyser("", numpy.zeros((1)), numpy.zeros((1)))
syst_analyser = store.load_ndarray(args.syst_analyser, syst_analyser)
# Produce plots
# Currently not possible to produce chi squared vs signal plot with half
# life or effective mass on x-axis, from outside of limit setting code.
# Just produce with signal counts on x-axis here.
fig_1 = chi_squared_vs_signal(signal_config, fig_num=1,
penalty=penalty_config,
save_as=args.image_name)
fig_2 = penalty_vs_systematic(syst_analyser, 2)
fig_3 = turn_on(syst_analyser, signal_config, 3, save_as=args.image_name)
fig_4 = push_pull(syst_analyser, 4, save_as=args.image_name)
fig_5 = chi_squared_map(syst_analyser, 5,
contours=args.contours,
save_as=args.image_name)
plt.show()
raw_input("RETURN to exit")
if __name__ == "__main__":
import echidna.output.store as store
from echidna.limit.limit_config import LimitConfig
from echidna.limit.limit_setting import SystAnalyser
from echidna.scripts.zero_nu_limit import ReadableDir
import argparse
parser = argparse.ArgumentParser(description="Produce chi squared plots "
"for a systematic")
parser.add_argument("-s", "--signal_config", action=ReadableDir,
help="Supply location of signal config hdf5 file")
parser.add_argument("-p", "--penalty_config", action=ReadableDir,
help="Supply location of signal config with "
"penalty term")
parser.add_argument("-a", "--syst_analyser", action=ReadableDir,
help="Supply location of syst analyser hdf5 file")
parser.add_argument("-i", "--image_name", type=str, default="output",
help="Supply an image name")
parser.add_argument("-c", "--contours", action="store_true",
help="If true produces a contour plot, "
"defualt is colour map")
args = parser.parse_args()
main(args)
| [
"echidna.limit.limit_config.LimitConfig",
"argparse.ArgumentParser",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"numpy.power",
"numpy.transpose",
"matplotlib.ticker.FixedLocator",
"numpy.append",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"mpl_toolkits.mplot3d.Axes3D",
"matplot... | [((2450, 2487), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {'figsize': '(10, 10)'}), '(fig_num, figsize=(10, 10))\n', (2460, 2487), True, 'import matplotlib.pyplot as plt\n'), ((3324, 3362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\chi^{2}$"""'], {}), "('$\\\\chi^{2}$', **BOLD_FONT)\n", (3334, 3362), True, 'import matplotlib.pyplot as plt\n'), ((5474, 5495), 'numpy.transpose', 'numpy.transpose', (['data'], {}), '(data)\n', (5489, 5495), False, 'import numpy\n'), ((5780, 5800), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5794, 5800), False, 'import numpy\n'), ((5896, 5917), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""hot_r"""'], {}), "('hot_r')\n", (5908, 5917), True, 'import matplotlib.pyplot as plt\n'), ((6046, 6071), 'matplotlib.ticker.FixedLocator', 'FixedLocator', (['(linear ** 2)'], {}), '(linear ** 2)\n', (6058, 6071), False, 'from matplotlib.ticker import FixedLocator, ScalarFormatter\n'), ((6138, 6179), 'matplotlib.colors.BoundaryNorm', 'BoundaryNorm', (['levels'], {'ncolors': 'color_map.N'}), '(levels, ncolors=color_map.N)\n', (6150, 6179), False, 'from matplotlib.colors import BoundaryNorm\n'), ((9518, 9553), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {'figsize': '(9, 7)'}), '(fig_num, figsize=(9, 7))\n', (9528, 9553), True, 'import matplotlib.pyplot as plt\n'), ((9743, 9789), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value of systematic"""'], {}), "('Value of systematic', **BOLD_FONT)\n", (9753, 9789), True, 'import matplotlib.pyplot as plt\n'), ((9794, 9842), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value of penalty term"""'], {}), "('Value of penalty term', **BOLD_FONT)\n", (9804, 9842), True, 'import matplotlib.pyplot as plt\n'), ((9847, 9867), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""bo"""'], {}), "(x, y, 'bo')\n", (9855, 9867), True, 'import matplotlib.pyplot as plt\n'), ((11222, 11243), 'numpy.transpose', 'numpy.transpose', (['data'], {}), '(data)\n', (11237, 11243), False, 'import numpy\n'), ((11319, 11339), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (11333, 11339), False, 'import numpy\n'), ((11499, 11522), 'numpy.zeros', 'numpy.zeros', (['data.shape'], {}), '(data.shape)\n', (11510, 11522), False, 'import numpy\n'), ((11996, 12020), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (12008, 12020), True, 'import matplotlib.pyplot as plt\n'), ((12455, 12479), 'matplotlib.ticker.FixedLocator', 'FixedLocator', (['full_scale'], {}), '(full_scale)\n', (12467, 12479), False, 'from matplotlib.ticker import FixedLocator, ScalarFormatter\n'), ((12554, 12595), 'matplotlib.colors.BoundaryNorm', 'BoundaryNorm', (['levels'], {'ncolors': 'color_map.N'}), '(levels, ncolors=color_map.N)\n', (12566, 12595), False, 'from matplotlib.colors import BoundaryNorm\n'), ((12606, 12639), 'matplotlib.pyplot.figure', 'plt.figure', (['fig'], {'figsize': '(12, 10)'}), '(fig, figsize=(12, 10))\n', (12616, 12639), True, 'import matplotlib.pyplot as plt\n'), ((14615, 14636), 'numpy.transpose', 'numpy.transpose', (['data'], {}), '(data)\n', (14630, 14636), False, 'import numpy\n'), ((14712, 14732), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (14726, 14732), False, 'import numpy\n'), ((14896, 14919), 'numpy.zeros', 'numpy.zeros', (['data.shape'], {}), '(data.shape)\n', (14907, 14919), False, 'import numpy\n'), ((15382, 15406), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (15394, 15406), True, 'import matplotlib.pyplot as plt\n'), ((15800, 15834), 'numpy.append', 'numpy.append', (['negatives', 'positives'], {}), '(negatives, positives)\n', (15812, 15834), False, 'import numpy\n'), ((15849, 15873), 'matplotlib.ticker.FixedLocator', 'FixedLocator', (['full_scale'], {}), '(full_scale)\n', (15861, 15873), False, 'from matplotlib.ticker import FixedLocator, ScalarFormatter\n'), ((15952, 15993), 'matplotlib.colors.BoundaryNorm', 'BoundaryNorm', (['levels'], {'ncolors': 'color_map.N'}), '(levels, ncolors=color_map.N)\n', (15964, 15993), False, 'from matplotlib.colors import BoundaryNorm\n'), ((16004, 16037), 'matplotlib.pyplot.figure', 'plt.figure', (['fig'], {'figsize': '(12, 10)'}), '(fig, figsize=(12, 10))\n', (16014, 16037), True, 'import matplotlib.pyplot as plt\n'), ((17367, 17386), 'echidna.limit.limit_config.LimitConfig', 'LimitConfig', (['(0)', '[0]'], {}), '(0, [0])\n', (17378, 17386), False, 'from echidna.limit.limit_config import LimitConfig\n'), ((17407, 17460), 'echidna.output.store.load_ndarray', 'store.load_ndarray', (['args.signal_config', 'signal_config'], {}), '(args.signal_config, signal_config)\n', (17425, 17460), True, 'import echidna.output.store as store\n'), ((17846, 17899), 'echidna.output.store.load_ndarray', 'store.load_ndarray', (['args.syst_analyser', 'syst_analyser'], {}), '(args.syst_analyser, syst_analyser)\n', (17864, 17899), True, 'import echidna.output.store as store\n'), ((18656, 18666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18664, 18666), True, 'import matplotlib.pyplot as plt\n'), ((18974, 19060), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Produce chi squared plots for a systematic"""'}), "(description=\n 'Produce chi squared plots for a systematic')\n", (18997, 19060), False, 'import argparse\n'), ((2804, 2849), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$m_{\\\\beta\\\\beta}$"""'], {}), "('$m_{\\\\beta\\\\beta}$', **BOLD_FONT)\n", (2814, 2849), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3491), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_1', '"""bo-"""'], {'label': '"""no systematic uncertainties"""'}), "(x, y_1, 'bo-', label='no systematic uncertainties')\n", (3439, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3525, 3582), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_2', '"""ro-"""'], {'label': '"""systematic uncertainties"""'}), "(x, y_2, 'ro-', label='systematic uncertainties')\n", (3533, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3619), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3601, 3619), True, 'import matplotlib.pyplot as plt\n'), ((3638, 3660), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_1', '"""o-"""'], {}), "(x, y_1, 'o-')\n", (3646, 3660), True, 'import matplotlib.pyplot as plt\n'), ((3970, 4008), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_as + '.png')"], {'dpi': '(400)'}), "(save_as + '.png', dpi=400)\n", (3981, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4030, 4040), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4038, 4040), True, 'import matplotlib.pyplot as plt\n'), ((6212, 6249), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {'figsize': '(16, 10)'}), '(fig_num, figsize=(16, 10))\n', (6222, 6249), True, 'import matplotlib.pyplot as plt\n'), ((6334, 6345), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (6340, 6345), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((7587, 7597), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7595, 7597), True, 'import matplotlib.pyplot as plt\n'), ((7701, 7738), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {'figsize': '(12, 10)'}), '(fig_num, figsize=(12, 10))\n', (7711, 7738), True, 'import matplotlib.pyplot as plt\n'), ((12304, 12319), 'numpy.zeros', 'numpy.zeros', (['(51)'], {}), '(51)\n', (12315, 12319), False, 'import numpy\n'), ((12413, 12439), 'numpy.power', 'numpy.power', (['(10)', 'positives'], {}), '(10, positives)\n', (12424, 12439), False, 'import numpy\n'), ((15582, 15597), 'numpy.zeros', 'numpy.zeros', (['(50)'], {}), '(50)\n', (15593, 15597), False, 'import numpy\n'), ((15723, 15738), 'numpy.zeros', 'numpy.zeros', (['(51)'], {}), '(51)\n', (15734, 15738), False, 'import numpy\n'), ((17526, 17545), 'echidna.limit.limit_config.LimitConfig', 'LimitConfig', (['(0)', '[0]'], {}), '(0, [0])\n', (17537, 17545), False, 'from echidna.limit.limit_config import LimitConfig\n'), ((17571, 17626), 'echidna.output.store.load_ndarray', 'store.load_ndarray', (['args.penalty_config', 'penalty_config'], {}), '(args.penalty_config, penalty_config)\n', (17589, 17626), True, 'import echidna.output.store as store\n'), ((17790, 17804), 'numpy.zeros', 'numpy.zeros', (['(1)'], {}), '(1)\n', (17801, 17804), False, 'import numpy\n'), ((17808, 17822), 'numpy.zeros', 'numpy.zeros', (['(1)'], {}), '(1)\n', (17819, 17822), False, 'import numpy\n'), ((3100, 3146), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$1/T_{1/2}^{0\\\\nu}$"""'], {}), "('$1/T_{1/2}^{0\\\\nu}$', **BOLD_FONT)\n", (3110, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3213, 3253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Signal counts"""'], {}), "('Signal counts', **BOLD_FONT)\n", (3223, 3253), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
import argparse, os, sys, json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
from datetime import datetime
from keras.preprocessing.image import ImageDataGenerator
import keras.backend as K
from keras.utils import to_categorical
from model.triplet import TripletLoss
from model.siamese import Siamese
from model.triplet_pose_model import TripletLossPoseInv
from model.classification_model import Classification
from utils.batch_generators import BatchGenerator, PairsImageDataGenerator
from utils.preprocessing import (
read_dataset,
analyse_dataset,
split_classes,
split_classification,
)
from utils.utils import print_nested, save_res_csv
from evaluation.evaluate_accuracy import evaluate_1_vs_all
argparser = argparse.ArgumentParser(
description='Train and validate a model on any dataset'
)
argparser.add_argument(
'-c', '--conf', help='path to the configuration file', default='config.json'
)
argparser.add_argument(
'-s',
'--split_num',
help='index of split for K-fold: number from [0,4] or -1 if no K-fold',
type=int,
default=-1,
)
def _main_(args):
# Record start time:
startTime = datetime.now()
###############################
# Read config with parameters and command line params
###############################
config_path = args.conf
with open(config_path) as config_buffer:
config = json.loads(config_buffer.read())
split_num = args.split_num
###############################
# Create folders for logs
###############################
exp_folder = os.path.join(config['train']['exp_dir'], config['train']['exp_id'])
if split_num >= 0:
exp_folder = exp_folder + '-split-' + str(split_num)
if not os.path.exists(exp_folder):
os.makedirs(exp_folder)
###############################
# Redirect print output to logs
###############################
FULL_PRINT_LOG = os.path.join(exp_folder, 'full_print_output.log')
if config['general']['stdout-file']:
sys.stdout = open(FULL_PRINT_LOG, 'a+')
print('=' * 40)
print(
'Date: {} / Experiment id: {}'.format(datetime.now(), config['train']['exp_id'])
)
print('Config parameters:')
print_nested(config, nesting=-2)
print('=' * 40)
###############################
# Get dataset and labels
###############################
# Get test set if exists, otherwise split train set
if os.path.exists(config['evaluate']['test_set']):
print('Loading test set from {}'.format(config['evaluate']['test_set']))
valid_imgs, valid_names, _ = read_dataset(
config['evaluate']['test_set'], original_labels=True
)
train_imgs, train_names, _ = read_dataset(
config['data']['train_image_folder'], original_labels=True
)
overlap = all(np.isin(train_names, valid_names))
print('Overlap between train and valid set in individual names: ', overlap)
unique_names = np.unique(np.concatenate((valid_names, train_names)))
name2lab = dict(enumerate(unique_names))
name2lab.update({v: k for k, v in name2lab.items()})
train_labels = np.array([name2lab[name] for name in train_names])
valid_labels = np.array([name2lab[name] for name in valid_names])
# if Classification => convert to one-hot encoding
if config['model']['type'] == 'Classification':
train_labels = to_categorical(train_labels)
valid_labels = to_categorical(valid_labels)
else:
print('No test set. Splitting train set...')
imgs, labels, label_dict = read_dataset(config['data']['train_image_folder'])
print('Label encoding: ', label_dict)
if config['model']['type'] in ('TripletLoss', 'TripletPose', 'Siamese'):
train_imgs, train_labels, valid_imgs, valid_labels = split_classes(
imgs, labels, seed=config['data']['split_seed'], split_num=split_num
)
elif config['model']['type'] == 'Classification':
train_imgs, train_labels, valid_imgs, valid_labels = split_classification(
imgs,
labels,
min_imgs=config['evaluate']['move_to_dataset'],
return_mask=False,
)
# Convert labels to one-hot encoding:
train_labels = to_categorical(train_labels)
valid_labels = to_categorical(valid_labels)
else:
raise Exception('Define Data Split for the model type')
# Delete futher unused variables to clear space
del imgs
del labels
analyse_dataset(train_imgs, train_labels, 'train')
analyse_dataset(valid_imgs, valid_labels, 'valid')
##############################
# Construct the model
##############################
INPUT_SHAPE = (config['model']['input_height'], config['model']['input_width'], 3)
model_args = dict(
backend=config['model']['backend'],
frontend=config['model']['frontend'],
input_shape=INPUT_SHAPE,
embedding_size=config['model']['embedding_size'],
connect_layer=config['model']['connect_layer'],
train_from_layer=config['model']['train_from_layer'],
loss_func=config['model']['loss'],
weights='imagenet',
)
if config['model']['type'] == 'TripletLoss':
mymodel = TripletLoss(**model_args)
elif config['model']['type'] == 'Siamese':
mymodel = Siamese(**model_args)
elif config['model']['type'] == 'TripletPose':
model_args['n_poses'] = config['model']['n_poses']
model_args['bs'] = (
config['train']['cl_per_batch'] * config['train']['sampl_per_class']
)
mymodel = TripletLossPoseInv(**model_args)
elif config['model']['type'] == 'Classification':
model_args['embedding_size'] = train_labels.shape[1]
mymodel = Classification(**model_args)
else:
raise Exception('Model type {} is not supported'.format(config['model']['type']))
##############################
# Load initial weights
##############################
SAVED_WEIGHTS = os.path.join(exp_folder, 'best_weights.h5')
PRETRAINED_WEIGHTS = config['train']['pretrained_weights']
if os.path.exists(SAVED_WEIGHTS):
print('Loading saved weights in ', SAVED_WEIGHTS)
mymodel.load_weights(SAVED_WEIGHTS)
warm_up_flag = False
elif os.path.exists(PRETRAINED_WEIGHTS):
warm_up_flag == False
else:
print('No pre-trained weights are found')
warm_up_flag = True
############################################
# Make train and validation generators
############################################
if config['train']['aug_rate'] == 'manta':
gen_args = dict(
rotation_range=360,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
data_format=K.image_data_format(),
fill_mode='nearest',
preprocessing_function=mymodel.backend_class.normalize,
)
elif config['train']['aug_rate'] == 'whale':
gen_args = dict(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
data_format=K.image_data_format(),
fill_mode='nearest',
preprocessing_function=mymodel.backend_class.normalize,
)
else:
raise Exception('Define augmentation rate in config!')
if config['model']['type'] == 'TripletLoss':
gen = ImageDataGenerator(**gen_args)
train_generator = BatchGenerator(
train_imgs,
train_labels,
aug_gen=gen,
p=config['train']['cl_per_batch'],
k=config['train']['sampl_per_class'],
equal_k=config['train']['equal_k'],
)
valid_generator = BatchGenerator(
valid_imgs,
valid_labels,
aug_gen=gen,
p=config['train']['cl_per_batch'],
k=config['train']['sampl_per_class'],
equal_k=config['train']['equal_k'],
)
elif config['model']['type'] == 'TripletPose':
gen = ImageDataGenerator(**gen_args)
gen_params = dict(
aug_gen=gen,
p=config['train']['cl_per_batch'],
k=config['train']['sampl_per_class'],
equal_k=config['train']['equal_k'],
n_poses=config['model']['n_poses'],
rotate_poses=config['model']['rotate_poses'],
flatten_batch=True,
perspective=config['model']['perspective'],
)
train_generator = BatchGenerator(train_imgs, train_labels, **gen_params)
valid_generator = BatchGenerator(valid_imgs, valid_labels, **gen_params)
elif config['model']['type'] == 'Siamese':
gen = PairsImageDataGenerator(**gen_args)
train_generator = gen.flow(
train_imgs, train_labels, batch_size=config['train']['batch_size'], seed=0
)
valid_generator = gen.flow(
valid_imgs, valid_labels, batch_size=config['train']['batch_size'], seed=1
)
elif config['model']['type'] == 'Classification':
gen = ImageDataGenerator(**gen_args)
train_generator = gen.flow(
train_imgs, train_labels, batch_size=config['train']['batch_size']
)
valid_generator = gen.flow(
valid_imgs, valid_labels, batch_size=config['train']['batch_size']
)
else:
raise Exception('Define Data Generator for the model type')
# Compute preprocessing time:
preprocTime = datetime.now() - startTime
print('Preprocessing time is {}'.format(preprocTime))
###############################
# Training
###############################
LOGS_FILE = os.path.join(exp_folder, 'history.csv')
PLOT_FILE = os.path.join(exp_folder, 'plot.png')
ALL_EXP_LOG = os.path.join(config['train']['exp_dir'], 'experiments_all.csv')
n_iter = ceil(config['train']['nb_epochs'] / config['train']['log_step'])
if config['model']['type'] in ('TripletLoss', 'TripletPose'):
batch_size = config['train']['cl_per_batch'] * config['train']['sampl_per_class']
elif config['model']['type'] in ('Siamese', 'Classification'):
batch_size = config['train']['batch_size']
else:
raise Exception('Define batch size for a model type!')
steps_per_epoch = train_imgs.shape[0] // batch_size + 1
print('Steps per epoch: {}'.format(steps_per_epoch))
if warm_up_flag:
print(
'-----First training. Warm up epochs to train random weights with higher learning rate--------'
)
mymodel.warm_up_train(
train_gen=train_generator,
valid_gen=valid_generator,
nb_epochs=1,
batch_size=config['train']['batch_size'],
learning_rate=config['train']['learning_rate'] * 10,
steps_per_epoch=steps_per_epoch,
distance=config['train']['distance'],
saved_weights_name=SAVED_WEIGHTS,
logs_file=LOGS_FILE,
debug=config['train']['debug'],
)
for iteration in range(n_iter):
print(
'-------------Starting iteration {} -------------------'.format(iteration + 1)
)
startTrainingTime = datetime.now()
# Add weights to balance losses if required
weights = [1.0, 1.0]
mymodel.train(
train_gen=train_generator,
valid_gen=valid_generator,
nb_epochs=config['train']['log_step'],
batch_size=config['train']['batch_size'],
learning_rate=config['train']['learning_rate'],
steps_per_epoch=steps_per_epoch,
distance=config['train']['distance'],
saved_weights_name=SAVED_WEIGHTS,
logs_file=LOGS_FILE,
debug=config['train']['debug'],
weights=weights,
)
############################################
# Plot training history
############################################
mymodel.plot_history(
LOGS_FILE, from_epoch=0, showFig=False, saveFig=True, figName=PLOT_FILE
)
if config['model']['type'] in ('TripletLoss', 'TripletPose', 'Siamese'):
print('Evaluating...')
train_preds = mymodel.preproc_predict(
train_imgs, config['train']['batch_size']
)
valid_preds = mymodel.preproc_predict(
valid_imgs, config['train']['batch_size']
)
print('Shape of computed predictions', train_preds.shape, valid_preds.shape)
acc, stdev = evaluate_1_vs_all(
train_preds,
train_labels,
valid_preds,
valid_labels,
n_eval_runs=config['evaluate']['n_eval_epochs'],
move_to_db=config['evaluate']['move_to_dataset'],
k_list=config['evaluate']['accuracy_at_k'],
)
# Calc execution time for each iteration
iterationTime = datetime.now() - startTrainingTime
print('Iteration {} finished, time {}'.format(iteration + 1, iterationTime))
# Collect data for logs
result = dict()
result['date_time'] = datetime.now()
result['config'] = config_path
result['experiment_id'] = exp_folder
result['iteration_time'] = iterationTime
result['images'] = config['data']['train_image_folder']
result['input_height'] = config['model']['input_height']
result['input_width'] = config['model']['input_width']
result['backend'] = config['model']['backend']
result['connect_layer'] = config['model']['connect_layer']
result['frontend'] = config['model']['frontend']
result['train_from_layer'] = config['model']['train_from_layer']
result['embedding_size'] = config['model']['embedding_size']
result['learning_rate'] = config['train']['learning_rate']
result['nb_epochs'] = config['train']['log_step']
result['acc1'] = round(acc[1], 2)
result['acc5'] = round(acc[5], 2)
result['acc10'] = round(acc[10], 2)
result['move_to_dataset'] = config['evaluate']['move_to_dataset']
save_res_csv(result, ALL_EXP_LOG)
if iteration % 50 and iteration > 0:
time_finish = (
datetime.now().strftime('%Y%m%d-%H%M%S') + '_iter_' + str(iteration)
)
TEMP_WEIGHTS = os.path.join(exp_folder, 'weights_at_' + time_finish + '.h5')
mymodel.model.save_weights(TEMP_WEIGHTS)
# ------End For each Iteration--------------#
# Save weights at the end of experiment
time_finish = datetime.now().strftime('%Y%m%d-%H%M%S') + '_last'
TEMP_WEIGHTS = os.path.join(exp_folder, 'weights_at_' + time_finish + '.h5')
mymodel.model.save_weights(TEMP_WEIGHTS)
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
| [
"keras.preprocessing.image.ImageDataGenerator",
"numpy.isin",
"argparse.ArgumentParser",
"utils.utils.save_res_csv",
"utils.preprocessing.split_classes",
"model.classification_model.Classification",
"evaluation.evaluate_accuracy.evaluate_1_vs_all",
"os.path.join",
"utils.utils.print_nested",
"util... | [((74, 95), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (88, 95), False, 'import matplotlib\n'), ((823, 908), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train and validate a model on any dataset"""'}), "(description='Train and validate a model on any dataset'\n )\n", (846, 908), False, 'import argparse, os, sys, json\n'), ((1241, 1255), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1253, 1255), False, 'from datetime import datetime\n'), ((1663, 1730), 'os.path.join', 'os.path.join', (["config['train']['exp_dir']", "config['train']['exp_id']"], {}), "(config['train']['exp_dir'], config['train']['exp_id'])\n", (1675, 1730), False, 'import argparse, os, sys, json\n'), ((2017, 2066), 'os.path.join', 'os.path.join', (['exp_folder', '"""full_print_output.log"""'], {}), "(exp_folder, 'full_print_output.log')\n", (2029, 2066), False, 'import argparse, os, sys, json\n'), ((2319, 2351), 'utils.utils.print_nested', 'print_nested', (['config'], {'nesting': '(-2)'}), '(config, nesting=-2)\n', (2331, 2351), False, 'from utils.utils import print_nested, save_res_csv\n'), ((2540, 2586), 'os.path.exists', 'os.path.exists', (["config['evaluate']['test_set']"], {}), "(config['evaluate']['test_set'])\n", (2554, 2586), False, 'import argparse, os, sys, json\n'), ((4731, 4781), 'utils.preprocessing.analyse_dataset', 'analyse_dataset', (['train_imgs', 'train_labels', '"""train"""'], {}), "(train_imgs, train_labels, 'train')\n", (4746, 4781), False, 'from utils.preprocessing import read_dataset, analyse_dataset, split_classes, split_classification\n'), ((4786, 4836), 'utils.preprocessing.analyse_dataset', 'analyse_dataset', (['valid_imgs', 'valid_labels', '"""valid"""'], {}), "(valid_imgs, valid_labels, 'valid')\n", (4801, 4836), False, 'from utils.preprocessing import read_dataset, analyse_dataset, split_classes, split_classification\n'), ((6267, 6310), 'os.path.join', 'os.path.join', (['exp_folder', '"""best_weights.h5"""'], {}), "(exp_folder, 'best_weights.h5')\n", (6279, 6310), False, 'import argparse, os, sys, json\n'), ((6382, 6411), 'os.path.exists', 'os.path.exists', (['SAVED_WEIGHTS'], {}), '(SAVED_WEIGHTS)\n', (6396, 6411), False, 'import argparse, os, sys, json\n'), ((9983, 10022), 'os.path.join', 'os.path.join', (['exp_folder', '"""history.csv"""'], {}), "(exp_folder, 'history.csv')\n", (9995, 10022), False, 'import argparse, os, sys, json\n'), ((10039, 10075), 'os.path.join', 'os.path.join', (['exp_folder', '"""plot.png"""'], {}), "(exp_folder, 'plot.png')\n", (10051, 10075), False, 'import argparse, os, sys, json\n'), ((10094, 10157), 'os.path.join', 'os.path.join', (["config['train']['exp_dir']", '"""experiments_all.csv"""'], {}), "(config['train']['exp_dir'], 'experiments_all.csv')\n", (10106, 10157), False, 'import argparse, os, sys, json\n'), ((10172, 10236), 'math.ceil', 'ceil', (["(config['train']['nb_epochs'] / config['train']['log_step'])"], {}), "(config['train']['nb_epochs'] / config['train']['log_step'])\n", (10176, 10236), False, 'from math import ceil\n'), ((15130, 15191), 'os.path.join', 'os.path.join', (['exp_folder', "('weights_at_' + time_finish + '.h5')"], {}), "(exp_folder, 'weights_at_' + time_finish + '.h5')\n", (15142, 15191), False, 'import argparse, os, sys, json\n'), ((1826, 1852), 'os.path.exists', 'os.path.exists', (['exp_folder'], {}), '(exp_folder)\n', (1840, 1852), False, 'import argparse, os, sys, json\n'), ((1862, 1885), 'os.makedirs', 'os.makedirs', (['exp_folder'], {}), '(exp_folder)\n', (1873, 1885), False, 'import argparse, os, sys, json\n'), ((2706, 2772), 'utils.preprocessing.read_dataset', 'read_dataset', (["config['evaluate']['test_set']"], {'original_labels': '(True)'}), "(config['evaluate']['test_set'], original_labels=True)\n", (2718, 2772), False, 'from utils.preprocessing import read_dataset, analyse_dataset, split_classes, split_classification\n'), ((2832, 2904), 'utils.preprocessing.read_dataset', 'read_dataset', (["config['data']['train_image_folder']"], {'original_labels': '(True)'}), "(config['data']['train_image_folder'], original_labels=True)\n", (2844, 2904), False, 'from utils.preprocessing import read_dataset, analyse_dataset, split_classes, split_classification\n'), ((3279, 3329), 'numpy.array', 'np.array', (['[name2lab[name] for name in train_names]'], {}), '([name2lab[name] for name in train_names])\n', (3287, 3329), True, 'import numpy as np\n'), ((3353, 3403), 'numpy.array', 'np.array', (['[name2lab[name] for name in valid_names]'], {}), '([name2lab[name] for name in valid_names])\n', (3361, 3403), True, 'import numpy as np\n'), ((3729, 3779), 'utils.preprocessing.read_dataset', 'read_dataset', (["config['data']['train_image_folder']"], {}), "(config['data']['train_image_folder'])\n", (3741, 3779), False, 'from utils.preprocessing import read_dataset, analyse_dataset, split_classes, split_classification\n'), ((5491, 5516), 'model.triplet.TripletLoss', 'TripletLoss', ([], {}), '(**model_args)\n', (5502, 5516), False, 'from model.triplet import TripletLoss\n'), ((6553, 6587), 'os.path.exists', 'os.path.exists', (['PRETRAINED_WEIGHTS'], {}), '(PRETRAINED_WEIGHTS)\n', (6567, 6587), False, 'import argparse, os, sys, json\n'), ((7710, 7740), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**gen_args)\n', (7728, 7740), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((7767, 7939), 'utils.batch_generators.BatchGenerator', 'BatchGenerator', (['train_imgs', 'train_labels'], {'aug_gen': 'gen', 'p': "config['train']['cl_per_batch']", 'k': "config['train']['sampl_per_class']", 'equal_k': "config['train']['equal_k']"}), "(train_imgs, train_labels, aug_gen=gen, p=config['train'][\n 'cl_per_batch'], k=config['train']['sampl_per_class'], equal_k=config[\n 'train']['equal_k'])\n", (7781, 7939), False, 'from utils.batch_generators import BatchGenerator, PairsImageDataGenerator\n'), ((8039, 8211), 'utils.batch_generators.BatchGenerator', 'BatchGenerator', (['valid_imgs', 'valid_labels'], {'aug_gen': 'gen', 'p': "config['train']['cl_per_batch']", 'k': "config['train']['sampl_per_class']", 'equal_k': "config['train']['equal_k']"}), "(valid_imgs, valid_labels, aug_gen=gen, p=config['train'][\n 'cl_per_batch'], k=config['train']['sampl_per_class'], equal_k=config[\n 'train']['equal_k'])\n", (8053, 8211), False, 'from utils.batch_generators import BatchGenerator, PairsImageDataGenerator\n'), ((9792, 9806), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9804, 9806), False, 'from datetime import datetime\n'), ((11520, 11534), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11532, 11534), False, 'from datetime import datetime\n'), ((2234, 2248), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2246, 2248), False, 'from datetime import datetime\n'), ((2949, 2982), 'numpy.isin', 'np.isin', (['train_names', 'valid_names'], {}), '(train_names, valid_names)\n', (2956, 2982), True, 'import numpy as np\n'), ((3102, 3144), 'numpy.concatenate', 'np.concatenate', (['(valid_names, train_names)'], {}), '((valid_names, train_names))\n', (3116, 3144), True, 'import numpy as np\n'), ((3546, 3574), 'keras.utils.to_categorical', 'to_categorical', (['train_labels'], {}), '(train_labels)\n', (3560, 3574), False, 'from keras.utils import to_categorical\n'), ((3602, 3630), 'keras.utils.to_categorical', 'to_categorical', (['valid_labels'], {}), '(valid_labels)\n', (3616, 3630), False, 'from keras.utils import to_categorical\n'), ((3972, 4060), 'utils.preprocessing.split_classes', 'split_classes', (['imgs', 'labels'], {'seed': "config['data']['split_seed']", 'split_num': 'split_num'}), "(imgs, labels, seed=config['data']['split_seed'], split_num=\n split_num)\n", (3985, 4060), False, 'from utils.preprocessing import read_dataset, analyse_dataset, split_classes, split_classification\n'), ((5582, 5603), 'model.siamese.Siamese', 'Siamese', ([], {}), '(**model_args)\n', (5589, 5603), False, 'from model.siamese import Siamese\n'), ((8351, 8381), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**gen_args)\n', (8369, 8381), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((8811, 8865), 'utils.batch_generators.BatchGenerator', 'BatchGenerator', (['train_imgs', 'train_labels'], {}), '(train_imgs, train_labels, **gen_params)\n', (8825, 8865), False, 'from utils.batch_generators import BatchGenerator, PairsImageDataGenerator\n'), ((8892, 8946), 'utils.batch_generators.BatchGenerator', 'BatchGenerator', (['valid_imgs', 'valid_labels'], {}), '(valid_imgs, valid_labels, **gen_params)\n', (8906, 8946), False, 'from utils.batch_generators import BatchGenerator, PairsImageDataGenerator\n'), ((12882, 13105), 'evaluation.evaluate_accuracy.evaluate_1_vs_all', 'evaluate_1_vs_all', (['train_preds', 'train_labels', 'valid_preds', 'valid_labels'], {'n_eval_runs': "config['evaluate']['n_eval_epochs']", 'move_to_db': "config['evaluate']['move_to_dataset']", 'k_list': "config['evaluate']['accuracy_at_k']"}), "(train_preds, train_labels, valid_preds, valid_labels,\n n_eval_runs=config['evaluate']['n_eval_epochs'], move_to_db=config[\n 'evaluate']['move_to_dataset'], k_list=config['evaluate']['accuracy_at_k'])\n", (12899, 13105), False, 'from evaluation.evaluate_accuracy import evaluate_1_vs_all\n'), ((13529, 13543), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13541, 13543), False, 'from datetime import datetime\n'), ((14598, 14631), 'utils.utils.save_res_csv', 'save_res_csv', (['result', 'ALL_EXP_LOG'], {}), '(result, ALL_EXP_LOG)\n', (14610, 14631), False, 'from utils.utils import print_nested, save_res_csv\n'), ((14832, 14893), 'os.path.join', 'os.path.join', (['exp_folder', "('weights_at_' + time_finish + '.h5')"], {}), "(exp_folder, 'weights_at_' + time_finish + '.h5')\n", (14844, 14893), False, 'import argparse, os, sys, json\n'), ((4209, 4315), 'utils.preprocessing.split_classification', 'split_classification', (['imgs', 'labels'], {'min_imgs': "config['evaluate']['move_to_dataset']", 'return_mask': '(False)'}), "(imgs, labels, min_imgs=config['evaluate'][\n 'move_to_dataset'], return_mask=False)\n", (4229, 4315), False, 'from utils.preprocessing import read_dataset, analyse_dataset, split_classes, split_classification\n'), ((4467, 4495), 'keras.utils.to_categorical', 'to_categorical', (['train_labels'], {}), '(train_labels)\n', (4481, 4495), False, 'from keras.utils import to_categorical\n'), ((4523, 4551), 'keras.utils.to_categorical', 'to_categorical', (['valid_labels'], {}), '(valid_labels)\n', (4537, 4551), False, 'from keras.utils import to_categorical\n'), ((5852, 5884), 'model.triplet_pose_model.TripletLossPoseInv', 'TripletLossPoseInv', ([], {}), '(**model_args)\n', (5870, 5884), False, 'from model.triplet_pose_model import TripletLossPoseInv\n'), ((7077, 7098), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (7096, 7098), True, 'import keras.backend as K\n'), ((9009, 9044), 'utils.batch_generators.PairsImageDataGenerator', 'PairsImageDataGenerator', ([], {}), '(**gen_args)\n', (9032, 9044), False, 'from utils.batch_generators import BatchGenerator, PairsImageDataGenerator\n'), ((13306, 13320), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13318, 13320), False, 'from datetime import datetime\n'), ((15060, 15074), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15072, 15074), False, 'from datetime import datetime\n'), ((6018, 6046), 'model.classification_model.Classification', 'Classification', ([], {}), '(**model_args)\n', (6032, 6046), False, 'from model.classification_model import Classification\n'), ((7439, 7460), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (7458, 7460), True, 'import keras.backend as K\n'), ((9380, 9410), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**gen_args)\n', (9398, 9410), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((14722, 14736), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14734, 14736), False, 'from datetime import datetime\n')] |
# Copyright (c) 2021 Massachusetts Institute of Technology
# SPDX-License-Identifier: MIT
import inspect
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import assume, given
from omegaconf import OmegaConf
from hydra_zen import builds, instantiate, just, to_yaml
from hydra_zen.structured_configs._utils import safe_name
def test_builds_roundtrip_with_ufunc():
assert instantiate(builds(np.add, hydra_partial=True))(1.0, 2.0) == np.array(3.0)
numpy_objects = [
np.array,
np.dtype,
np.add,
np.ufunc, # ufuncs work!
np.linalg.norm,
np.linalg.linalg.eigvalsh,
np.reshape,
np.random.rand,
np.random.Generator,
np.testing.assert_allclose,
np.polynomial.Polynomial,
np.polynomial.polynomial.polyadd,
]
@pytest.mark.parametrize("obj", numpy_objects)
def test_just_roundtrip(obj):
assert instantiate(just(obj)) is obj
@pytest.mark.parametrize(
"obj, expected_name",
[
(np.add, "add"),
(np.shape, "shape"),
(np.array, "array"),
(np.linalg.norm, "norm"),
],
)
def test_safename_known(obj, expected_name):
assert safe_name(obj) == expected_name
@pytest.mark.parametrize("target", numpy_objects)
@given(partial=st.booleans(), full_sig=st.booleans())
def test_fuzz_build_validation_against_a_bunch_of_common_objects(
target, partial: bool, full_sig: bool
):
doesnt_have_sig = False
try:
inspect.signature(target)
except ValueError:
doesnt_have_sig = True
if doesnt_have_sig and full_sig:
assume(False)
conf = builds(target, hydra_partial=partial, populate_full_signature=full_sig)
OmegaConf.create(to_yaml(conf)) # ensure serializable
if partial:
instantiate(conf) # ensure instantiable
def f(reduction_fn=np.add):
return reduction_fn
def test_ufunc_as_default_value():
conf = builds(f, populate_full_signature=True)
to_yaml(conf) # check serializability
assert instantiate(conf) is np.add
def test_ufunc_positional_args():
assert instantiate(builds(np.add, 1.0, 2.0)) == 3.0
| [
"hydra_zen.structured_configs._utils.safe_name",
"hydra_zen.builds",
"hydra_zen.to_yaml",
"hypothesis.strategies.booleans",
"hydra_zen.just",
"numpy.array",
"inspect.signature",
"pytest.mark.parametrize",
"hydra_zen.instantiate",
"hypothesis.assume"
] | [((795, 840), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""obj"""', 'numpy_objects'], {}), "('obj', numpy_objects)\n", (818, 840), False, 'import pytest\n'), ((915, 1051), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""obj, expected_name"""', "[(np.add, 'add'), (np.shape, 'shape'), (np.array, 'array'), (np.linalg.norm,\n 'norm')]"], {}), "('obj, expected_name', [(np.add, 'add'), (np.shape,\n 'shape'), (np.array, 'array'), (np.linalg.norm, 'norm')])\n", (938, 1051), False, 'import pytest\n'), ((1189, 1237), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target"""', 'numpy_objects'], {}), "('target', numpy_objects)\n", (1212, 1237), False, 'import pytest\n'), ((1599, 1670), 'hydra_zen.builds', 'builds', (['target'], {'hydra_partial': 'partial', 'populate_full_signature': 'full_sig'}), '(target, hydra_partial=partial, populate_full_signature=full_sig)\n', (1605, 1670), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((1899, 1938), 'hydra_zen.builds', 'builds', (['f'], {'populate_full_signature': '(True)'}), '(f, populate_full_signature=True)\n', (1905, 1938), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((1943, 1956), 'hydra_zen.to_yaml', 'to_yaml', (['conf'], {}), '(conf)\n', (1950, 1956), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((474, 487), 'numpy.array', 'np.array', (['(3.0)'], {}), '(3.0)\n', (482, 487), True, 'import numpy as np\n'), ((1154, 1168), 'hydra_zen.structured_configs._utils.safe_name', 'safe_name', (['obj'], {}), '(obj)\n', (1163, 1168), False, 'from hydra_zen.structured_configs._utils import safe_name\n'), ((1448, 1473), 'inspect.signature', 'inspect.signature', (['target'], {}), '(target)\n', (1465, 1473), False, 'import inspect\n'), ((1574, 1587), 'hypothesis.assume', 'assume', (['(False)'], {}), '(False)\n', (1580, 1587), False, 'from hypothesis import assume, given\n'), ((1693, 1706), 'hydra_zen.to_yaml', 'to_yaml', (['conf'], {}), '(conf)\n', (1700, 1706), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((1756, 1773), 'hydra_zen.instantiate', 'instantiate', (['conf'], {}), '(conf)\n', (1767, 1773), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((1253, 1266), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1264, 1266), True, 'import hypothesis.strategies as st\n'), ((1277, 1290), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1288, 1290), True, 'import hypothesis.strategies as st\n'), ((1993, 2010), 'hydra_zen.instantiate', 'instantiate', (['conf'], {}), '(conf)\n', (2004, 2010), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((894, 903), 'hydra_zen.just', 'just', (['obj'], {}), '(obj)\n', (898, 903), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((2080, 2104), 'hydra_zen.builds', 'builds', (['np.add', '(1.0)', '(2.0)'], {}), '(np.add, 1.0, 2.0)\n', (2086, 2104), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n'), ((425, 459), 'hydra_zen.builds', 'builds', (['np.add'], {'hydra_partial': '(True)'}), '(np.add, hydra_partial=True)\n', (431, 459), False, 'from hydra_zen import builds, instantiate, just, to_yaml\n')] |
#!/usr/bin/python
"""
Compute log power feature from an audio file
"""
import pickle, numpy
from btk20.common import *
from btk20.stream import *
from btk20.feature import *
D = 160 # 10 msec for 16 kHz audio
fft_len = 256
pow_num = fft_len//2 + 1
input_filename = "../tools/filterbank/Headset1.wav"
output_filename = "log_power.pickle"
# Audio file reader
samplefe = SampleFeaturePtr(block_len=D, shift_len=D, pad_zeros=False)
# Hamming window calculator
hammingfe = HammingFeaturePtr(samplefe)
# FFT feature extractor
fftfe = FFTFeaturePtr(hammingfe, fft_len=fft_len)
# Power (complex square) feature extractor
powerfe = SpectralPowerFeaturePtr(fftfe, pow_num=pow_num)
# Log feature extractor
logfe = LogFeaturePtr(powerfe)
# Reading the audio file
samplefe.read(input_filename)
with open(output_filename, 'w') as ofp:
frame_no = 0
# compute the log power feature at each frame
for log_vector in logfe:
# print the first 10-dimension vector
print('fr. {}: {}..'.format(frame_no, numpy.array2string(log_vector[0:10], formatter={'float_kind':lambda x: "%.2f" % x})))
pickle.dump(log_vector, ofp, True)
frame_no += 1
| [
"pickle.dump",
"numpy.array2string"
] | [((1118, 1152), 'pickle.dump', 'pickle.dump', (['log_vector', 'ofp', '(True)'], {}), '(log_vector, ofp, True)\n', (1129, 1152), False, 'import pickle, numpy\n'), ((1024, 1113), 'numpy.array2string', 'numpy.array2string', (['log_vector[0:10]'], {'formatter': "{'float_kind': lambda x: '%.2f' % x}"}), "(log_vector[0:10], formatter={'float_kind': lambda x: \n '%.2f' % x})\n", (1042, 1113), False, 'import pickle, numpy\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import json
import paddle
import numpy as np
from .category import get_categories
from .map_utils import prune_zero_padding, DetectionMAP
from .coco_utils import get_infer_results, cocoapi_eval
from .widerface_utils import face_eval_run
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = [
'Metric', 'COCOMetric', 'VOCMetric', 'WiderFaceMetric', 'get_infer_results'
]
class Metric(paddle.metric.Metric):
def name(self):
return self.__class__.__name__
def reset(self):
pass
def accumulate(self):
pass
# paddle.metric.Metric defined :metch:`update`, :meth:`accumulate`
# :metch:`reset`, in ppdet, we also need following 2 methods:
# abstract method for logging metric results
def log(self):
pass
# abstract method for getting metric results
def get_results(self):
pass
class COCOMetric(Metric):
def __init__(self, anno_file, **kwargs):
assert os.path.isfile(anno_file), \
"anno_file {} not a file".format(anno_file)
self.anno_file = anno_file
self.clsid2catid, self.catid2name = get_categories('COCO', anno_file)
# TODO: bias should be unified
self.bias = kwargs.get('bias', 0)
self.reset()
def reset(self):
# only bbox and mask evaluation support currently
self.results = {'bbox': [], 'mask': [], 'segm': []}
self.eval_results = {}
def update(self, inputs, outputs):
outs = {}
# outputs Tensor -> numpy.ndarray
for k, v in outputs.items():
outs[k] = v.numpy() if isinstance(v, paddle.Tensor) else v
im_id = inputs['im_id']
outs['im_id'] = im_id.numpy() if isinstance(im_id,
paddle.Tensor) else im_id
infer_results = get_infer_results(
outs, self.clsid2catid, bias=self.bias)
self.results['bbox'] += infer_results[
'bbox'] if 'bbox' in infer_results else []
self.results['mask'] += infer_results[
'mask'] if 'mask' in infer_results else []
self.results['segm'] += infer_results[
'segm'] if 'segm' in infer_results else []
def accumulate(self):
if len(self.results['bbox']) > 0:
with open("bbox.json", 'w') as f:
json.dump(self.results['bbox'], f)
logger.info('The bbox result is saved to bbox.json.')
bbox_stats = cocoapi_eval(
'bbox.json', 'bbox', anno_file=self.anno_file)
self.eval_results['bbox'] = bbox_stats
sys.stdout.flush()
if len(self.results['mask']) > 0:
with open("mask.json", 'w') as f:
json.dump(self.results['mask'], f)
logger.info('The mask result is saved to mask.json.')
seg_stats = cocoapi_eval(
'mask.json', 'segm', anno_file=self.anno_file)
self.eval_results['mask'] = seg_stats
sys.stdout.flush()
if len(self.results['segm']) > 0:
with open("segm.json", 'w') as f:
json.dump(self.results['segm'], f)
logger.info('The segm result is saved to segm.json.')
seg_stats = cocoapi_eval(
'segm.json', 'segm', anno_file=self.anno_file)
self.eval_results['mask'] = seg_stats
sys.stdout.flush()
def log(self):
pass
def get_results(self):
return self.eval_results
class VOCMetric(Metric):
def __init__(self,
anno_file,
class_num=20,
overlap_thresh=0.5,
map_type='11point',
is_bbox_normalized=False,
evaluate_difficult=False):
assert os.path.isfile(anno_file), \
"anno_file {} not a file".format(anno_file)
self.anno_file = anno_file
self.clsid2catid, self.catid2name = get_categories('VOC', anno_file)
self.overlap_thresh = overlap_thresh
self.map_type = map_type
self.evaluate_difficult = evaluate_difficult
self.detection_map = DetectionMAP(
class_num=class_num,
overlap_thresh=overlap_thresh,
map_type=map_type,
is_bbox_normalized=is_bbox_normalized,
evaluate_difficult=evaluate_difficult)
self.reset()
def reset(self):
self.detection_map.reset()
def update(self, inputs, outputs):
bboxes = outputs['bbox'][:, 2:].numpy()
scores = outputs['bbox'][:, 1].numpy()
labels = outputs['bbox'][:, 0].numpy()
bbox_lengths = outputs['bbox_num'].numpy()
if bboxes.shape == (1, 1) or bboxes is None:
return
gt_boxes = inputs['gt_bbox'].numpy()
gt_labels = inputs['gt_class'].numpy()
difficults = inputs['difficult'].numpy() if not self.evaluate_difficult \
else None
scale_factor = inputs['scale_factor'].numpy(
) if 'scale_factor' in inputs else np.ones(
(gt_boxes.shape[0], 2)).astype('float32')
bbox_idx = 0
for i in range(gt_boxes.shape[0]):
gt_box = gt_boxes[i]
h, w = scale_factor[i]
gt_box = gt_box / np.array([w, h, w, h])
gt_label = gt_labels[i]
difficult = None if difficults is None \
else difficults[i]
bbox_num = bbox_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
score = scores[bbox_idx:bbox_idx + bbox_num]
label = labels[bbox_idx:bbox_idx + bbox_num]
gt_box, gt_label, difficult = prune_zero_padding(gt_box, gt_label,
difficult)
self.detection_map.update(bbox, score, label, gt_box, gt_label,
difficult)
bbox_idx += bbox_num
def accumulate(self):
logger.info("Accumulating evaluatation results...")
self.detection_map.accumulate()
def log(self):
map_stat = 100. * self.detection_map.get_map()
logger.info("mAP({:.2f}, {}) = {:.2f}%".format(self.overlap_thresh,
self.map_type, map_stat))
def get_results(self):
self.detection_map.get_map()
class WiderFaceMetric(Metric):
def __init__(self, image_dir, anno_file, multi_scale=True):
self.image_dir = image_dir
self.anno_file = anno_file
self.multi_scale = multi_scale
self.clsid2catid, self.catid2name = get_categories('widerface')
def update(self, model):
face_eval_run(
model,
self.image_dir,
self.anno_file,
pred_dir='output/pred',
eval_mode='widerface',
multi_scale=self.multi_scale)
| [
"json.dump",
"ppdet.utils.logger.setup_logger",
"numpy.ones",
"os.path.isfile",
"numpy.array",
"sys.stdout.flush"
] | [((1065, 1087), 'ppdet.utils.logger.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (1077, 1087), False, 'from ppdet.utils.logger import setup_logger\n'), ((1753, 1778), 'os.path.isfile', 'os.path.isfile', (['anno_file'], {}), '(anno_file)\n', (1767, 1778), False, 'import os\n'), ((4594, 4619), 'os.path.isfile', 'os.path.isfile', (['anno_file'], {}), '(anno_file)\n', (4608, 4619), False, 'import os\n'), ((3410, 3428), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3426, 3428), False, 'import sys\n'), ((3803, 3821), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3819, 3821), False, 'import sys\n'), ((4196, 4214), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4212, 4214), False, 'import sys\n'), ((3139, 3173), 'json.dump', 'json.dump', (["self.results['bbox']", 'f'], {}), "(self.results['bbox'], f)\n", (3148, 3173), False, 'import json\n'), ((3534, 3568), 'json.dump', 'json.dump', (["self.results['mask']", 'f'], {}), "(self.results['mask'], f)\n", (3543, 3568), False, 'import json\n'), ((3927, 3961), 'json.dump', 'json.dump', (["self.results['segm']", 'f'], {}), "(self.results['segm'], f)\n", (3936, 3961), False, 'import json\n'), ((6099, 6121), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (6107, 6121), True, 'import numpy as np\n'), ((5873, 5904), 'numpy.ones', 'np.ones', (['(gt_boxes.shape[0], 2)'], {}), '((gt_boxes.shape[0], 2))\n', (5880, 5904), True, 'import numpy as np\n')] |
import numpy as np
import math
class FuzzyMembership():
def __init__(self):
self.name = "Fuzzy Membership Function"
self.description = ("Reclassifies or transforms the input data to a 0 to 1 "
"scale based on the possibility of being a member of a "
"specified set")
self.parA = {'minimum': 1., 'mid': None, 'meanMultipler': 1.}
self.parB = {'maximum': 1., 'stdMultipler': 1., 'spreadA': 0.1, 'spreadB': 5.}
def getParameterInfo(self):
return [
{
'name': 'raster',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Raster",
'description': ("Fuzzy Membership tool - 0 is assigned to those locations that "
"are definitely not a member of the specified set. "
"1 is assigned to those values that are definitely a member "
"of the specified set, and the entire range of possibilities "
"between 0 and 1 are assigned to some level of possible membership.")
},
{
'name': 'mode',
'dataType': 'string',
'value': 'Linear',
'required': True,
'domain': ('Linear', 'Gaussian', 'Small', 'Large', 'Near', 'MSSmall', 'MSLarge'),
'displayName': "Fuzzy Membership Type",
'description': "Fuzzy Membership type."
},
{
'name': 'par1',
'dataType': 'numeric',
'value': None,
'required': False,
'displayName': "Input Parameter A",
'description': ("Linear : {minimum value}, Gaussian/Near/Small/Large : {mid point}, "
"MSSmall/MSLarge : {mean multiplier}.")
},
{
'name': 'par2',
'dataType': 'numeric',
'value': False,
'required': True,
'displayName': "Input Parameter B",
'description': ("Linear : {maximum value}, Gaussian/Near/Small/Large : {spread}, "
"MSSmall/MSLarge : {std deviation multiplier}. ")
},
{
'name': 'hedge',
'dataType': 'string',
'value': 'None',
'required': False,
'domain': ('None', 'Somewhat', 'Very'),
'displayName': "Hedge",
'description': ("A hedge increases or decreases the fuzzy membership values which modify the meaning of a fuzzy set. "
"None - No hedge applied. "
"Somewhat - The square root of the fuzzy membership function. Increases fuzzy membership functions. "
"Very- The square of the fuzzy membership function. Decreases fuzzy membership functions.")
},
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 2 | 4 | 8, # inherit everything but the pixel type (1)
'invalidateProperties': 2 | 4 | 8, # invalidate these aspects because we are modifying pixel values and updating key properties.
'inputMask': False # we don't need the input mask in .updatePixels()
}
def updateRasterInfo(self, **kwargs):
# output raster information
kwargs['output_info']['bandCount'] = 1
kwargs['output_info']['pixelType'] = 'f4'
kwargs['output_info']['statistics'] = ({'minimum': 0.0, 'maximum': 1.0},)
self.mode = kwargs['mode'].lower() # input fuzzy membership mode
self.hedge = kwargs['hedge'] # to modify fuzzy membership values
# statistics of input raster
stats = kwargs['raster_info']['statistics'][0]
self.mean, self.std = stats['mean'], stats['standardDeviation']
# assignment of fuzzy membership parameters
if kwargs['par1'] != 0.0:
self.parA = self.parA.fromkeys(self.parA, kwargs['par1'])
else:
self.parA['minimum'] = stats['minimum']
self.parA['mid'] = (stats['minimum']+stats['maximum'])/2
if kwargs['par2'] != 0.0:
self.parB = self.parB.fromkeys(self.parB, kwargs['par2'])
else:
self.parB['maximum'] = stats['maximum']
# check range of input range
# linear fuzzy membership min - max
if ((self.parA['minimum'] == self.parB['maximum']) and (self.mode == "linear")):
raise Exception("Linear minimum and maximum must be different.")
# spread values for fuzzy membership function
if ((self.parB['spreadA'] < 0.01 or self.parB['spreadA'] > 1) and (self.mode == 'gauss' or self.mode == 'near')) or \
((self.parB['spreadB'] < 1 or self.parB['spreadB'] > 10) and (self.mode == 'large' or self.mode == 'small')):
raise Exception("Spread value out of range.")
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
# get the input raster pixel block
r = np.array(pixelBlocks['raster_pixels'], dtype='f8', copy=False)
# fuzzy linear membership
if self.mode == "linear":
r = (r - self.parA['minimum']) / (self.parB['maximum'] - self.parA['minimum'])
# fuzzy gaussian membership.
elif self.mode == 'gaussian':
r = (np.e)**((-self.parB['spreadA']) * ((r - self.parA['mid'])**2))
# fuzzy large membership.
elif self.mode == 'large':
r = (1 / (1 + ((r / self.parA['mid'])**(-self.parB['spreadB']))))
# fuzzy small membership.
elif self.mode == 'small':
r = (1 / (1 + ((r / self.parA['mid'])**(self.parB['spreadB']))))
# fuzzy near membership.
elif self.mode == 'near':
r = (1 / (1 + (self.parB['spreadA'] * (r - self.parA['mid'])**2)))
# fuzzy mssmall membership.
elif self.mode == 'mssmall':
rTemp = (self.parB['stdMultipler'] * self.std) / (r - (self.parA['meanMultipler'] * self.mean) + (self.parB['stdMultipler'] * self.std))
np.putmask(r, r <= (self.mean * self.parA['meanMultipler']), 1.0)
np.putmask(r, r > (self.mean * self.parA['meanMultipler']), rTemp)
# fuzzy mslarge membership.
else:
rTemp = 1 - (self.parB['stdMultipler'] * self.std) / (r - (self.parA['meanMultipler'] * self.mean) + (self.parB['stdMultipler'] * self.std))
np.putmask(r, r <= (self.mean * self.parA['meanMultipler']), 0.0)
np.putmask(r, r > (self.mean * self.parA['meanMultipler']), rTemp)
# clip output values between [0.0, 1.0]
r = np.clip(r, 0.0, 1.0)
# hedge calculations
if (self.hedge == "SOMEWHAT"): r = r ** 0.5
elif (self.hedge == "VERY"): r = r ** 2
if len(r.shape) > 2:
pixelBlocks['output_pixels'] = r[0].astype(props['pixelType'], copy=False) # multi band raster
else:
pixelBlocks['output_pixels'] = r.astype(props['pixelType'], copy=False) # single band raster
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
if bandIndex == -1:
keyMetadata['datatype'] = 'Scientific'
keyMetadata['variable'] = 'FuzzyMembership'
elif bandIndex == 0:
keyMetadata['wavelengthmin'] = None # reset inapplicable band-specific key metadata
keyMetadata['wavelengthmax'] = None
keyMetadata['bandname'] = 'FuzzyMembership'
# ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ##
"""
References:
[1]. Esri (2013): ArcGIS Resources. How Fuzzy Membership Works.
http://resources.arcgis.com/en/help/main/10.1/index.html#//009z000000rz000000
[2]. Esri (2013): ArcGIS Resources. An overview of fuzzy classes.
http://resources.arcgis.com/en/help/main/10.1/index.html#/An_overview_of_fuzzy_classes/005m00000019000000/
"""
| [
"numpy.putmask",
"numpy.array",
"numpy.clip"
] | [((5439, 5501), 'numpy.array', 'np.array', (["pixelBlocks['raster_pixels']"], {'dtype': '"""f8"""', 'copy': '(False)'}), "(pixelBlocks['raster_pixels'], dtype='f8', copy=False)\n", (5447, 5501), True, 'import numpy as np\n'), ((7097, 7117), 'numpy.clip', 'np.clip', (['r', '(0.0)', '(1.0)'], {}), '(r, 0.0, 1.0)\n', (7104, 7117), True, 'import numpy as np\n'), ((6520, 6583), 'numpy.putmask', 'np.putmask', (['r', "(r <= self.mean * self.parA['meanMultipler'])", '(1.0)'], {}), "(r, r <= self.mean * self.parA['meanMultipler'], 1.0)\n", (6530, 6583), True, 'import numpy as np\n'), ((6599, 6663), 'numpy.putmask', 'np.putmask', (['r', "(r > self.mean * self.parA['meanMultipler'])", 'rTemp'], {}), "(r, r > self.mean * self.parA['meanMultipler'], rTemp)\n", (6609, 6663), True, 'import numpy as np\n'), ((6887, 6950), 'numpy.putmask', 'np.putmask', (['r', "(r <= self.mean * self.parA['meanMultipler'])", '(0.0)'], {}), "(r, r <= self.mean * self.parA['meanMultipler'], 0.0)\n", (6897, 6950), True, 'import numpy as np\n'), ((6966, 7030), 'numpy.putmask', 'np.putmask', (['r', "(r > self.mean * self.parA['meanMultipler'])", 'rTemp'], {}), "(r, r > self.mean * self.parA['meanMultipler'], rTemp)\n", (6976, 7030), True, 'import numpy as np\n')] |
"""
Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
Paper: https://arxiv.org/abs/2012.07620v2
======================================================================
On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
that our method achieves comparable or even better retrieval results on the other four
image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
with limited time cost.
"""
import pickle
import numpy as np
import torch
def load_pickle(pickle_path):
with open(pickle_path, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(pickle_path, data):
with open(pickle_path, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def pairwise_squared_distance(x):
'''
x : (n_samples, n_points, dims)
return : (n_samples, n_points, n_points)
'''
x2s = (x * x).sum(-1, keepdim=True)
return x2s + x2s.transpose(-1, -2) - 2 * x @ x.transpose(-1, -2)
def pairwise_distance(x, y):
m, n = x.size(0), y.size(0)
x = x.view(m, -1)
y = y.view(n, -1)
dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n,m).t()
dist.addmm_(1, -2, x, y.t())
return dist
def cosine_similarity(x, y):
m, n = x.size(0), y.size(0)
x = x.view(m, -1)
y = y.view(n, -1)
y = y.t()
score = torch.mm(x, y)
return score
def evaluate_ranking_list(indices, query_label, query_cam, gallery_label, gallery_cam):
CMC = np.zeros((len(gallery_label)), dtype=np.int)
ap = 0.0
for i in range(len(query_label)):
ap_tmp, CMC_tmp = evaluate(indices[i],query_label[i], query_cam[i], gallery_label, gallery_cam)
if CMC_tmp[0]==-1:
continue
CMC = CMC + CMC_tmp
ap += ap_tmp
CMC = CMC.astype(np.float32)
CMC = CMC/len(query_label) #average CMC
print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
def evaluate(index, ql,qc,gl,gc):
query_index = np.argwhere(gl==ql)
camera_index = np.argwhere(gc==qc)
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere(gl==-1)
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1) #.flatten())
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = np.zeros((len(index)), dtype=np.int)
if good_index.size==0: # if empty
cmc[0] = -1
return ap,cmc
# remove junk_index
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index)
rows_good = np.argwhere(mask==True)
rows_good = rows_good.flatten()
cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = 1.0/ngood
precision = (i+1)*1.0/(rows_good[i]+1)
if rows_good[i]!=0:
old_precision = i*1.0/rows_good[i]
else:
old_precision=1.0
ap = ap + d_recall*(old_precision + precision)/2
return ap, cmc
| [
"pickle.dump",
"numpy.setdiff1d",
"torch.mm",
"numpy.append",
"pickle.load",
"torch.pow",
"numpy.argwhere",
"numpy.intersect1d",
"numpy.in1d"
] | [((1698, 1712), 'torch.mm', 'torch.mm', (['x', 'y'], {}), '(x, y)\n', (1706, 1712), False, 'import torch\n'), ((2362, 2383), 'numpy.argwhere', 'np.argwhere', (['(gl == ql)'], {}), '(gl == ql)\n', (2373, 2383), True, 'import numpy as np\n'), ((2401, 2422), 'numpy.argwhere', 'np.argwhere', (['(gc == qc)'], {}), '(gc == qc)\n', (2412, 2422), True, 'import numpy as np\n'), ((2439, 2498), 'numpy.setdiff1d', 'np.setdiff1d', (['query_index', 'camera_index'], {'assume_unique': '(True)'}), '(query_index, camera_index, assume_unique=True)\n', (2451, 2498), True, 'import numpy as np\n'), ((2517, 2538), 'numpy.argwhere', 'np.argwhere', (['(gl == -1)'], {}), '(gl == -1)\n', (2528, 2538), True, 'import numpy as np\n'), ((2555, 2596), 'numpy.intersect1d', 'np.intersect1d', (['query_index', 'camera_index'], {}), '(query_index, camera_index)\n', (2569, 2596), True, 'import numpy as np\n'), ((2614, 2649), 'numpy.append', 'np.append', (['junk_index2', 'junk_index1'], {}), '(junk_index2, junk_index1)\n', (2623, 2649), True, 'import numpy as np\n'), ((2970, 3009), 'numpy.in1d', 'np.in1d', (['index', 'junk_index'], {'invert': '(True)'}), '(index, junk_index, invert=True)\n', (2977, 3009), True, 'import numpy as np\n'), ((3102, 3128), 'numpy.in1d', 'np.in1d', (['index', 'good_index'], {}), '(index, good_index)\n', (3109, 3128), True, 'import numpy as np\n'), ((3145, 3170), 'numpy.argwhere', 'np.argwhere', (['(mask == True)'], {}), '(mask == True)\n', (3156, 3170), True, 'import numpy as np\n'), ((860, 874), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (871, 874), False, 'import pickle\n'), ((978, 1032), 'pickle.dump', 'pickle.dump', (['data', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (989, 1032), False, 'import pickle\n'), ((1401, 1416), 'torch.pow', 'torch.pow', (['x', '(2)'], {}), '(x, 2)\n', (1410, 1416), False, 'import torch\n'), ((1457, 1472), 'torch.pow', 'torch.pow', (['y', '(2)'], {}), '(y, 2)\n', (1466, 1472), False, 'import torch\n')] |
"""
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright <NAME>, June 2016.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from .utils import download
from skimage.transform import resize as imresize
def celeb_vaegan_download():
"""Download a pretrained celeb vae/gan network."""
# Load the model and labels
model = download('https://s3.amazonaws.com/cadl/models/celeb.vaegan.tfmodel')
labels = download('https://s3.amazonaws.com/cadl/celeb-align/list_attr_celeba.txt')
return model, labels
def get_celeb_vaegan_model():
"""Get a pretrained model.
Returns
-------
net : dict
{
'graph_def': tf.GraphDef
The graph definition
'labels': list
List of different possible attributes from celeb
'attributes': np.ndarray
One hot encoding of the attributes per image
[n_els x n_labels]
'preprocess': function
Preprocess function
}
"""
# Download the trained net
model, labels = celeb_vaegan_download()
# Parse the ids and synsets
txt = open(labels).readlines()
n_els = int(txt[0].strip())
labels = txt[1].strip().split()
n_labels = len(labels)
attributes = np.zeros((n_els, n_labels), dtype=bool)
for i, txt_i in enumerate(txt[2:]):
attributes[i] = (np.array(txt_i.strip().split()[1:]).astype(int) > 0)
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/issues/582')
net = {
'graph_def': graph_def,
'labels': labels,
'attributes': attributes,
'preprocess': preprocess,
}
return net
def preprocess(img, crop_factor=0.8):
"""Replicate the preprocessing we did on the VAE/GAN.
This model used a crop_factor of 0.8 and crop size of [100, 100, 3].
"""
crop = np.min(img.shape[:2])
r = (img.shape[0] - crop) // 2
c = (img.shape[1] - crop) // 2
cropped = img[r: r + crop, c: c + crop]
r, c, *d = cropped.shape
if crop_factor < 1.0:
amt = (1 - crop_factor) / 2
h, w = int(c * amt), int(r * amt)
cropped = cropped[h:-h, w:-w]
rsz = imresize(cropped, (100, 100), preserve_range=False)
return rsz
| [
"tensorflow.python.platform.gfile.GFile",
"numpy.zeros",
"numpy.min",
"skimage.transform.resize",
"tensorflow.GraphDef"
] | [((1340, 1379), 'numpy.zeros', 'np.zeros', (['(n_els, n_labels)'], {'dtype': 'bool'}), '((n_els, n_labels), dtype=bool)\n', (1348, 1379), True, 'import numpy as np\n'), ((2346, 2367), 'numpy.min', 'np.min', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (2352, 2367), True, 'import numpy as np\n'), ((2663, 2714), 'skimage.transform.resize', 'imresize', (['cropped', '(100, 100)'], {'preserve_range': '(False)'}), '(cropped, (100, 100), preserve_range=False)\n', (2671, 2714), True, 'from skimage.transform import resize as imresize\n'), ((1535, 1559), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['model', '"""rb"""'], {}), "(model, 'rb')\n", (1546, 1559), False, 'from tensorflow.python.platform import gfile\n'), ((1586, 1599), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1597, 1599), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
import numpy as np
# create data
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1+0.3
##create tensorflow structure start ###
Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
biases = tf.Variable(tf.zeros([1]))
y = Weights*x_data + biases
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in range(2001):
sess.run(train)
if step % 200 == 0:
print(step, sess.run(Weights), sess.run(biases))
##create tensorflow structure end ###
| [
"tensorflow.random_uniform",
"tensorflow.Session",
"tensorflow.zeros",
"tensorflow.initialize_all_variables",
"tensorflow.square",
"numpy.random.rand",
"tensorflow.train.GradientDescentOptimizer"
] | [((350, 388), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.5)'], {}), '(0.5)\n', (383, 388), True, 'import tensorflow as tf\n'), ((430, 459), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (457, 459), True, 'import tensorflow as tf\n'), ((468, 480), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (478, 480), True, 'import tensorflow as tf\n'), ((195, 228), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]', '(-1.0)', '(1.0)'], {}), '([1], -1.0, 1.0)\n', (212, 228), True, 'import tensorflow as tf\n'), ((251, 264), 'tensorflow.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (259, 264), True, 'import tensorflow as tf\n'), ((317, 338), 'tensorflow.square', 'tf.square', (['(y - y_data)'], {}), '(y - y_data)\n', (326, 338), True, 'import tensorflow as tf\n'), ((69, 88), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (83, 88), True, 'import numpy as np\n')] |
"""!
@brief A dataset creation which is compatible with pytorch framework
and much faster in loading time depending on the new version of
loading only the appropriate files that might be needed. Moreover
this dataset has minimal input argument requirements in order to be
more user friendly.
@author <NAME> {<EMAIL>}
@copyright University of illinois at Urbana Champaign
"""
import os
import glob2
import numpy as np
from sklearn.externals import joblib
from torch.utils.data import Dataset, DataLoader
class PytorchMixtureDataset(Dataset):
"""
This is a general compatible class for pytorch datasets.
@note Each instance of the dataset should be stored using
joblib.dump() and this is the way that it would be returned.
After some transformations.
The path of all datasets should be defined inside config.
All datasets should be formatted with appropriate subfolders of
train / test and val and under them there should be all the
available files.
"""
def __init__(self,
dataset_dir,
partition='train',
get_top=None,
labels_mask='duet',
only_mask_evaluation=False,
**kwargs):
"""!
Input dataset dir should have the following structure:
./dataset_dir
./train
./test
./val
"""
self.dataset_dirpath = os.path.join(dataset_dir,
partition)
self.dataset_stats_path = self.dataset_dirpath + '_stats'
self.partition = partition
if (labels_mask == 'duet'
or labels_mask == 'ground_truth'
or labels_mask == 'raw_phase_diff'):
self.selected_mask = labels_mask
elif labels_mask is None:
pass
else:
raise NotImplementedError("There is no available mask "
"called: {}".format(labels_mask))
if not os.path.isdir(self.dataset_dirpath):
raise IOError("Dataset folder {} not found!".format(
self.dataset_dirpath))
else:
print("Loading files from {} ...".format(
self.dataset_dirpath))
self.mixture_folders = glob2.glob(os.path.join(
self.dataset_dirpath, '*'))
if get_top is not None:
self.mixture_folders = self.mixture_folders[:get_top]
self.n_samples = len(self.mixture_folders)
self.only_mask_evaluation = only_mask_evaluation
self.n_sources = int(os.path.basename(
dataset_dir).split("_")[4])
# preprocess -- store all absolute spectra values for faster
# loading during run time
self.store_directly_abs_spectra()
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
"""!
Depending on the selected partition it returns accordingly
the following objects:
if self.partition == 'train':
(abs_tfs, selected_mask)
else if partition == 'test' or 'val'
(abs_tfs, selected_mask, wavs_list, real_tfs, imag_tfs)"""
mix_folder = self.mixture_folders[idx]
try:
abs_tfs = joblib.load(os.path.join(mix_folder, 'abs_tfs'))
except:
raise IOError("Failed to load data from path: {} "
"for absolute spectra.".format(mix_folder))
if self.partition == 'val' or self.partition == 'test':
try:
real_p = os.path.join(mix_folder, 'real_tfs')
imag_p = os.path.join(mix_folder, 'imag_tfs')
wavs_p = os.path.join(mix_folder, 'wavs')
real_tfs = joblib.load(real_p)
imag_tfs = joblib.load(imag_p)
wavs_list = joblib.load(wavs_p)
wavs_list = np.array(wavs_list)
except:
raise IOError("Failed to load data from path: {} "
"for real, imag tf of the mixture and "
"wavs".format(mix_folder))
if not self.only_mask_evaluation:
return abs_tfs, wavs_list, real_tfs, imag_tfs
try:
if self.selected_mask == 'duet':
mask = joblib.load(os.path.join(mix_folder,
'soft_labeled_mask'))
elif self.selected_mask == 'ground_truth':
mask = joblib.load(os.path.join(mix_folder,
'ground_truth_mask'))
except Exception as e:
print(e)
raise IOError("Failed to load data from path: {} "
"for tf label masks".format(mix_folder))
return abs_tfs, mask, wavs_list, real_tfs, imag_tfs
if self.partition == 'train':
try:
if self.selected_mask == 'duet':
mask = joblib.load(os.path.join(mix_folder,
'soft_labeled_mask'))
elif self.selected_mask == 'ground_truth':
mask = joblib.load(os.path.join(mix_folder,
'ground_truth_mask'))
else:
mask = joblib.load(os.path.join(mix_folder,
'raw_phase_diff'))
except Exception as e:
print(e)
raise IOError("Failed to load data from path: {} "
"for tf label masks".format(mix_folder))
return abs_tfs, mask
return None
def store_directly_abs_spectra(self):
for mix_folder in self.mixture_folders:
abs_p = os.path.join(mix_folder, 'abs_tfs')
if os.path.lexists(abs_p):
continue
try:
real_p = os.path.join(mix_folder, 'real_tfs')
imag_p = os.path.join(mix_folder, 'imag_tfs')
real_tfs = joblib.load(real_p)
imag_tfs = joblib.load(imag_p)
except:
raise IOError("Failed to load data from path: {} "
"using joblib.".format(mix_folder))
abs_tfs = np.abs(real_tfs + 1j * imag_tfs)
try:
joblib.dump(abs_tfs, abs_p, compress=0)
except:
raise IOError("Failed to save absolute value of "
"spectra in path: {}".format(abs_p))
def extract_stats(self):
if not os.path.lexists(self.dataset_stats_path):
mean = 0.
std = 0.
for mix_folder in self.mixture_folders:
try:
abs_p = os.path.join(mix_folder, 'abs_tfs')
abs_tfs = joblib.load(abs_p)
except:
raise IOError("Failed to load absolute tf "
"representation from path: {} "
"using joblib.".format(abs_p))
mean += np.mean(np.mean(abs_tfs))
std += np.std(abs_tfs)
mean /= self.__len__()
std /= self.__len__()
# store them for later usage
joblib.dump((mean, std), self.dataset_stats_path)
print("Saving dataset mean and variance in: {}".format(
self.dataset_stats_path))
else:
mean, std = joblib.load(self.dataset_stats_path)
return mean, std
def get_data_generator(dataset_dir,
partition='train',
num_workers=1,
return_stats=False,
get_top=None,
batch_size=1,
return_n_batches=True,
labels_mask='duet',
return_n_sources=False,
only_mask_evaluation=False):
data = PytorchMixtureDataset(dataset_dir,
partition=partition,
get_top=get_top,
labels_mask=labels_mask,
only_mask_evaluation=only_mask_evaluation)
generator_params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': num_workers,
'drop_last': True}
data_generator = DataLoader(data,
**generator_params,
pin_memory=False)
results = [data_generator]
if return_stats:
mean, std = data.extract_stats()
results += [mean, std]
if return_n_batches:
n_batches = int(len(data) / batch_size)
results.append(n_batches)
if return_n_sources:
results.append(data.n_sources)
return results
| [
"os.path.lexists",
"sklearn.externals.joblib.dump",
"numpy.abs",
"torch.utils.data.DataLoader",
"os.path.basename",
"os.path.isdir",
"numpy.std",
"numpy.mean",
"numpy.array",
"sklearn.externals.joblib.load",
"os.path.join"
] | [((8536, 8590), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'pin_memory': '(False)'}), '(data, **generator_params, pin_memory=False)\n', (8546, 8590), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1427, 1463), 'os.path.join', 'os.path.join', (['dataset_dir', 'partition'], {}), '(dataset_dir, partition)\n', (1439, 1463), False, 'import os\n'), ((1984, 2019), 'os.path.isdir', 'os.path.isdir', (['self.dataset_dirpath'], {}), '(self.dataset_dirpath)\n', (1997, 2019), False, 'import os\n'), ((2275, 2314), 'os.path.join', 'os.path.join', (['self.dataset_dirpath', '"""*"""'], {}), "(self.dataset_dirpath, '*')\n", (2287, 2314), False, 'import os\n'), ((5839, 5874), 'os.path.join', 'os.path.join', (['mix_folder', '"""abs_tfs"""'], {}), "(mix_folder, 'abs_tfs')\n", (5851, 5874), False, 'import os\n'), ((5890, 5912), 'os.path.lexists', 'os.path.lexists', (['abs_p'], {}), '(abs_p)\n', (5905, 5912), False, 'import os\n'), ((6350, 6384), 'numpy.abs', 'np.abs', (['(real_tfs + 1.0j * imag_tfs)'], {}), '(real_tfs + 1.0j * imag_tfs)\n', (6356, 6384), True, 'import numpy as np\n'), ((6654, 6694), 'os.path.lexists', 'os.path.lexists', (['self.dataset_stats_path'], {}), '(self.dataset_stats_path)\n', (6669, 6694), False, 'import os\n'), ((7361, 7410), 'sklearn.externals.joblib.dump', 'joblib.dump', (['(mean, std)', 'self.dataset_stats_path'], {}), '((mean, std), self.dataset_stats_path)\n', (7372, 7410), False, 'from sklearn.externals import joblib\n'), ((7559, 7595), 'sklearn.externals.joblib.load', 'joblib.load', (['self.dataset_stats_path'], {}), '(self.dataset_stats_path)\n', (7570, 7595), False, 'from sklearn.externals import joblib\n'), ((3290, 3325), 'os.path.join', 'os.path.join', (['mix_folder', '"""abs_tfs"""'], {}), "(mix_folder, 'abs_tfs')\n", (3302, 3325), False, 'import os\n'), ((3583, 3619), 'os.path.join', 'os.path.join', (['mix_folder', '"""real_tfs"""'], {}), "(mix_folder, 'real_tfs')\n", (3595, 3619), False, 'import os\n'), ((3645, 3681), 'os.path.join', 'os.path.join', (['mix_folder', '"""imag_tfs"""'], {}), "(mix_folder, 'imag_tfs')\n", (3657, 3681), False, 'import os\n'), ((3707, 3739), 'os.path.join', 'os.path.join', (['mix_folder', '"""wavs"""'], {}), "(mix_folder, 'wavs')\n", (3719, 3739), False, 'import os\n'), ((3767, 3786), 'sklearn.externals.joblib.load', 'joblib.load', (['real_p'], {}), '(real_p)\n', (3778, 3786), False, 'from sklearn.externals import joblib\n'), ((3814, 3833), 'sklearn.externals.joblib.load', 'joblib.load', (['imag_p'], {}), '(imag_p)\n', (3825, 3833), False, 'from sklearn.externals import joblib\n'), ((3862, 3881), 'sklearn.externals.joblib.load', 'joblib.load', (['wavs_p'], {}), '(wavs_p)\n', (3873, 3881), False, 'from sklearn.externals import joblib\n'), ((3910, 3929), 'numpy.array', 'np.array', (['wavs_list'], {}), '(wavs_list)\n', (3918, 3929), True, 'import numpy as np\n'), ((5982, 6018), 'os.path.join', 'os.path.join', (['mix_folder', '"""real_tfs"""'], {}), "(mix_folder, 'real_tfs')\n", (5994, 6018), False, 'import os\n'), ((6044, 6080), 'os.path.join', 'os.path.join', (['mix_folder', '"""imag_tfs"""'], {}), "(mix_folder, 'imag_tfs')\n", (6056, 6080), False, 'import os\n'), ((6108, 6127), 'sklearn.externals.joblib.load', 'joblib.load', (['real_p'], {}), '(real_p)\n', (6119, 6127), False, 'from sklearn.externals import joblib\n'), ((6155, 6174), 'sklearn.externals.joblib.load', 'joblib.load', (['imag_p'], {}), '(imag_p)\n', (6166, 6174), False, 'from sklearn.externals import joblib\n'), ((6416, 6455), 'sklearn.externals.joblib.dump', 'joblib.dump', (['abs_tfs', 'abs_p'], {'compress': '(0)'}), '(abs_tfs, abs_p, compress=0)\n', (6427, 6455), False, 'from sklearn.externals import joblib\n'), ((7218, 7233), 'numpy.std', 'np.std', (['abs_tfs'], {}), '(abs_tfs)\n', (7224, 7233), True, 'import numpy as np\n'), ((6840, 6875), 'os.path.join', 'os.path.join', (['mix_folder', '"""abs_tfs"""'], {}), "(mix_folder, 'abs_tfs')\n", (6852, 6875), False, 'import os\n'), ((6906, 6924), 'sklearn.externals.joblib.load', 'joblib.load', (['abs_p'], {}), '(abs_p)\n', (6917, 6924), False, 'from sklearn.externals import joblib\n'), ((7177, 7193), 'numpy.mean', 'np.mean', (['abs_tfs'], {}), '(abs_tfs)\n', (7184, 7193), True, 'import numpy as np\n'), ((2585, 2614), 'os.path.basename', 'os.path.basename', (['dataset_dir'], {}), '(dataset_dir)\n', (2601, 2614), False, 'import os\n'), ((4359, 4404), 'os.path.join', 'os.path.join', (['mix_folder', '"""soft_labeled_mask"""'], {}), "(mix_folder, 'soft_labeled_mask')\n", (4371, 4404), False, 'import os\n'), ((5062, 5107), 'os.path.join', 'os.path.join', (['mix_folder', '"""soft_labeled_mask"""'], {}), "(mix_folder, 'soft_labeled_mask')\n", (5074, 5107), False, 'import os\n'), ((4556, 4601), 'os.path.join', 'os.path.join', (['mix_folder', '"""ground_truth_mask"""'], {}), "(mix_folder, 'ground_truth_mask')\n", (4568, 4601), False, 'import os\n'), ((5246, 5291), 'os.path.join', 'os.path.join', (['mix_folder', '"""ground_truth_mask"""'], {}), "(mix_folder, 'ground_truth_mask')\n", (5258, 5291), False, 'import os\n'), ((5393, 5435), 'os.path.join', 'os.path.join', (['mix_folder', '"""raw_phase_diff"""'], {}), "(mix_folder, 'raw_phase_diff')\n", (5405, 5435), False, 'import os\n')] |
# 2019-11-25 16:19:29(JST)
import sys
import numpy as np
def main():
H, W, K = map(int, sys.stdin.readline().split())
N = int(sys.stdin.readline().rstrip())
hw = map(int, sys.stdin.read().split())
hw = list(zip(hw, hw))
vert, hori = [0] * (H + 1), [0] * (W + 1)
candy = [[0] * (W + 1) for _ in range(H + 1)]
for h, w in hw:
candy[h][w] = 1
vert[h] += 1
hori[w] += 1
a = np.array([1, 3, 4, 5, 6])
b = np.array([1, 3, 4, 5, 7])
res = np.array(
[
vert[i] + hori[j] - candy[i][j]
for i in range(1, H + 1)
for j in range(1, W + 1)
]
)
res.sort()
ans = np.searchsorted(res, K, side="right") - np.searchsorted(
res, K, side="left"
)
print(ans)
if __name__ == "__main__":
main()
| [
"sys.stdin.readline",
"sys.stdin.read",
"numpy.array",
"numpy.searchsorted"
] | [((450, 475), 'numpy.array', 'np.array', (['[1, 3, 4, 5, 6]'], {}), '([1, 3, 4, 5, 6])\n', (458, 475), True, 'import numpy as np\n'), ((485, 510), 'numpy.array', 'np.array', (['[1, 3, 4, 5, 7]'], {}), '([1, 3, 4, 5, 7])\n', (493, 510), True, 'import numpy as np\n'), ((713, 750), 'numpy.searchsorted', 'np.searchsorted', (['res', 'K'], {'side': '"""right"""'}), "(res, K, side='right')\n", (728, 750), True, 'import numpy as np\n'), ((753, 789), 'numpy.searchsorted', 'np.searchsorted', (['res', 'K'], {'side': '"""left"""'}), "(res, K, side='left')\n", (768, 789), True, 'import numpy as np\n'), ((102, 122), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (120, 122), False, 'import sys\n'), ((145, 165), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (163, 165), False, 'import sys\n'), ((195, 211), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (209, 211), False, 'import sys\n')] |
# -- coding: utf-8 --
# -- coding: utf-8 --
import tensorflow as tf
import numpy as np
from gcn_model.data_read import *
import argparse
from gcn_model.hyparameter import parameter
class HA():
def __init__(self,
site_id=0,
is_training=True,
time_size=3,
prediction_size=1,
data_divide=0.9,
window_step=1,
normalize=False,
hp=None):
'''
:param is_training: while is_training is True,the model is training state
:param field_len:
:param time_size:
:param prediction_size:
:param target_site:
'''
self.site_id=site_id # ozone ID
self.time_size=time_size # time series length of input
self.prediction_size=prediction_size # the length of prediction
self.is_training=is_training # true or false
self.data_divide=data_divide # the divide between in training set and test set ratio
self.window_step=window_step # windows step
self.para=hp
self.source_data=self.get_source_data(combine_path)
# self.data=self.source_data.loc[self.source_data['ZoneID']==self.site_id]
self.data=self.source_data
self.length=self.data.values.shape[0] #data length
self.normalize=normalize
self.d=self.data.loc[self.data['min-15']==15]
print(self.d.values.shape)
def get_source_data(self,file_path):
'''
:return:
'''
data = pd.read_csv(file_path, encoding='utf-8')
return data
def accuracy(self,label,predict):
'''
:param Label: represents the observed value
:param Predict: represents the predicted value
:param epoch:
:param steps:
:return:
'''
error = label - predict
average_error = np.mean(np.fabs(error.astype(float)))
print("mae is : %.6f" % (average_error))
rmse_error = np.sqrt(np.mean(np.square(label - predict)))
print("rmse is : %.6f" % (rmse_error))
cor = np.mean(np.multiply((label - np.mean(label)),
(predict - np.mean(predict)))) / (np.std(predict) * np.std(label))
print('correlation coefficient is: %.6f' % (cor))
sse = np.sum((label - predict) ** 2)
sst = np.sum((label - np.mean(label)) ** 2)
R2 = 1 - sse / sst
print('r^2 is: %.6f' % (R2))
return average_error,rmse_error,cor,R2
def model(self):
self.dictionary_label = []
self.dictionary_predict = []
for site in range(self.para.site_num):
data1=self.data.loc[self.data['ZoneID']==site]
for h in range(24):
data2=data1.loc[data1['hour']==h]
for min in range(4):
data3 = data2.loc[data2['min-15'] == 15*(min+1)]
# print(data3)
label=np.mean(data3.values[6 : 7,-1])
# predict = np.mean(data3.values[25:26, -1])
predict=np.reshape(data3.values[25: ,-1],newshape=[-1])
# print(predict,predict.shape[-1])
self.dictionary_label.append([label]*predict.shape[-1])
# self.dictionary_label.append(label)
# self.dictionary_predict.append(predict)
self.dictionary_predict.append(list(predict))
#
if __name__=='__main__':
para = parameter(argparse.ArgumentParser())
para = para.get_para()
ha=HA(site_id=0,normalize=False,hp=para)
print(ha.data.keys())
print(ha.data)
ha.model()
ha.accuracy(np.reshape(np.array(ha.dictionary_label),newshape=[-1]),np.reshape(np.array(ha.dictionary_predict),newshape=[-1]))
# print(iter.data.loc[iter.data['ZoneID']==0]) | [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.std",
"numpy.square",
"numpy.mean",
"numpy.array",
"numpy.reshape"
] | [((2396, 2426), 'numpy.sum', 'np.sum', (['((label - predict) ** 2)'], {}), '((label - predict) ** 2)\n', (2402, 2426), True, 'import numpy as np\n'), ((3580, 3605), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3603, 3605), False, 'import argparse\n'), ((3767, 3796), 'numpy.array', 'np.array', (['ha.dictionary_label'], {}), '(ha.dictionary_label)\n', (3775, 3796), True, 'import numpy as np\n'), ((3823, 3854), 'numpy.array', 'np.array', (['ha.dictionary_predict'], {}), '(ha.dictionary_predict)\n', (3831, 3854), True, 'import numpy as np\n'), ((2085, 2111), 'numpy.square', 'np.square', (['(label - predict)'], {}), '(label - predict)\n', (2094, 2111), True, 'import numpy as np\n'), ((2290, 2305), 'numpy.std', 'np.std', (['predict'], {}), '(predict)\n', (2296, 2305), True, 'import numpy as np\n'), ((2308, 2321), 'numpy.std', 'np.std', (['label'], {}), '(label)\n', (2314, 2321), True, 'import numpy as np\n'), ((2457, 2471), 'numpy.mean', 'np.mean', (['label'], {}), '(label)\n', (2464, 2471), True, 'import numpy as np\n'), ((3041, 3071), 'numpy.mean', 'np.mean', (['data3.values[6:7, -1]'], {}), '(data3.values[6:7, -1])\n', (3048, 3071), True, 'import numpy as np\n'), ((3166, 3214), 'numpy.reshape', 'np.reshape', (['data3.values[25:, -1]'], {'newshape': '[-1]'}), '(data3.values[25:, -1], newshape=[-1])\n', (3176, 3214), True, 'import numpy as np\n'), ((2205, 2219), 'numpy.mean', 'np.mean', (['label'], {}), '(label)\n', (2212, 2219), True, 'import numpy as np\n'), ((2267, 2283), 'numpy.mean', 'np.mean', (['predict'], {}), '(predict)\n', (2274, 2283), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
N = 17
men_means = (2.82, 2.8, 2.82, 2.94, 2.66, 2.6, 2.74, 2.8, 2.9, 2.7, 2.92, 2.92, 10.66, 4.12, 3.72, 3.26, 3.44)
men_std = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ind = np.arange(N) # the x locations for the groups
width = 0.1 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, men_means, width, color='r', yerr=men_std)
women_means = (4.24, 4.34, 3.42, 4.18, 3.44, 4.26, 4.62, 4.26, 4.92, 4.16, 4.62, 5.06, 97.3, 87.78, 65.18, 7.54, 5.36)
women_std = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
rects2 = ax.bar(ind + width, women_means, width, color='y', yerr=women_std)
rects3 = ax.bar(ind + width * 2, women_means, width, color='b', yerr=women_std)
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind + width / 2)
# ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.set_xticklabels(('Curve and Line', 'Curve and Line (AA)', 'Cube', 'Cube (AA)', 'Cube (Camera)', 'Monkey head (Texture)', 'Monkey head (Light)', 'Two models (Light)', 'Two models (Complex light)', 'Two models (Texture)' , 'Two models (Transparency)', 'Two models (Tex&Light)', 'Thousands of rings', 'Clipping plane', 'Bubble', 'Compressed Texture', 'Shadow'))
ax.legend((rects1[0], rects2[0], rects3[0]), ('Men', 'Women', 'test'))
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
plt.show() | [
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((246, 258), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (255, 258), True, 'import numpy as np\n'), ((346, 360), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (358, 360), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1768, 1770), True, 'import matplotlib.pyplot as plt\n')] |
"""
pipeline_example.py
------------
Example pipeline for netrd
author: <NAME>
email: <EMAIL>othylarock at gmail dot com
Submitted as part of the 2019 NetSI Collabathon
"""
# NOTE: !IMPORTANT! If you want to play and make changes,
# please make your own copy of this file (with a different name!)
# first and edit that!!! Leave this file alone except to fix a bug!
import netrd
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
## Load datasets
datasets = {'4-clique':netrd.utilities.read_time_series('../data/synth_4clique_N64_simple.csv'),
'BA':netrd.utilities.read_time_series('../data/synth_BAnetwork_N64_simple.csv'),
'ER':netrd.utilities.read_time_series('../data/synth_ERnetwork_N64_simple.csv')}
## Load reconstruction methods
reconstructors = {
'correlation_matrix':netrd.reconstruction.CorrelationMatrixReconstructor(),
'convergent_crossmappings':netrd.reconstruction.ConvergentCrossMappingReconstructor(),
'exact_mean_field':netrd.reconstruction.ExactMeanFieldReconstructor(),
'free_energy_minimization':netrd.reconstruction.FreeEnergyMinimizationReconstructor(),
'graphical_lasso':netrd.reconstruction.GraphicalLassoReconstructor(),
'maximum_likelihood':netrd.reconstruction.MaximumLikelihoodEstimationReconstructor(),
'mutual_information':netrd.reconstruction.MutualInformationMatrixReconstructor(),
'ou_inference':netrd.reconstruction.OUInferenceReconstructor(),
'partial_correlation':netrd.reconstruction.PartialCorrelationMatrixReconstructor(),
'regularized_correlation':netrd.reconstruction.RegularizedCorrelationMatrixReconstructor(),
'thouless_anderson_palmer':netrd.reconstruction.ThoulessAndersonPalmerReconstructor(),
'time_granger_causality':netrd.reconstruction.TimeGrangerCausalityReconstructor(),
'marchenko_pastur':netrd.reconstruction.MarchenkoPastur(),
#'naive_transfer_entropy':netrd.reconstruction.NaiveTransferEntropyReconstructor()
}
## Load distance methods
distance_methods = {'jaccard':netrd.distance.JaccardDistance(),
'hamming':netrd.distance.Hamming(),
'hamming_ipsen_mikhailov':netrd.distance.HammingIpsenMikhailov(),
#'portrait_divergence':netrd.distance.PortraitDivergence(),
#'resistance_perturbation':netrd.distance.ResistancePerturbation(),
'frobenius':netrd.distance.Frobenius(),
#'netsimilie':netrd.distance.NetSimile()
}
## get the names of the methods
reconstruction_methods = [method for method in reconstructors.keys()]
distance_methods_list = [method for method in distance_methods.keys()]
## Dictionary of dictionaries containing the reconstructed networks
## <dataset_name, <recon_method_name, reconstructed_graph>
networks = defaultdict(dict)
print('Computing network reconstructions')
## First get all of the reconstructions for every dataset
for data_name, time_series in datasets.items():
print('dataset: ' + str(data_name))
for reconstruction_method, reconstructor in reconstructors.items():
print(reconstruction_method + '...', end='')
networks[data_name][reconstruction_method] = reconstructor.fit(time_series)
print('done')
## 4-deep dict structure: <dataset_name, <rmethod1, <rmethod2, <dmethod, distance> > > >
distances = dict()
## In order to standardize, I am going to collect all of the
## outputs for each distance
per_distance_values = dict()
print('Computing network distances')
## Then, compute the distance between every reconstruction of every network
for data_name, networks_dict in networks.items():
per_distance_values[data_name] = defaultdict(list)
print('dataset: ' + str(data_name))
distances[data_name] = dict()
for distance_method, distance_function in distance_methods.items():
print(distance_method + '...', end='')
for reconstruction_method1, network1 in networks_dict.items():
distances[data_name].setdefault(reconstruction_method1, dict())
for reconstruction_method2, network2 in networks_dict.items():
distances[data_name][reconstruction_method1].setdefault(reconstruction_method2, dict())
distance = distance_function.dist(network1, network2)
distances[data_name][reconstruction_method1][reconstruction_method2].setdefault(distance_method, dict)
distances[data_name][reconstruction_method1][reconstruction_method2][distance_method] = distance
per_distance_values[data_name][distance_method].append(distance)
print('done')
## For each dataset and distance, store (max,min) tuple to use in standardization
max_min_distance_values = defaultdict(dict)
for data_name in networks.keys():
for distance_method in distance_methods_list:
max_min_distance_values[data_name][distance_method]=(np.max(per_distance_values[data_name][distance_method]), np.min(per_distance_values[data_name][distance_method]))
## Compute the similarity matrix by taking the average of the
## distance between every reconstruction matrix
number_of_reconstructors = len(reconstruction_methods)
name_map = {reconstruction_methods[i]:i for i in range(number_of_reconstructors)}
similarity_matrix = np.zeros((number_of_reconstructors,number_of_reconstructors))
for dataset, dataset_dict in distances.items():
for method1, method1_dict in dataset_dict.items():
for method2, method2_dict in dataset_dict.items():
for distance_method in method1_dict[method2].keys():
max_dist_val, min_dist_val = max_min_distance_values[data_name][distance_method]
similarity_matrix[name_map[method1], name_map[method2]] += (method1_dict[method2][distance_method] - min_dist_val) / (max_dist_val - min_dist_val)
avg_similarity = similarity_matrix / (number_of_reconstructors*len(datasets))
print('Generating collabathon_output.png')
reconstruction_names = list(name_map.keys())
N_methods = len(reconstruction_names)
mat = avg_similarity
#### plotting parameters ####
netrd_cmap = 'bone_r'
method_id = 'test'
width = 1.2
heigh = 1.2
mult = 8.0
###### plot the mat ###########
fig, ax0 = plt.subplots(1, 1, figsize=(width*mult,heigh*mult))
ax0.imshow(mat, aspect='auto', cmap=netrd_cmap)
###### be obsessive about it ###########
ax0.set_xticks(np.arange(0, N_methods, 1))
ax0.set_yticks(np.arange(0, N_methods, 1))
# ax0.set_xticklabels(np.arange(0, N_methods, 1), fontsize=2.0*mult)
# ax0.set_yticklabels(np.arange(0, N_methods, 1), fontsize=2.0*mult)
ax0.set_xticklabels(reconstruction_names, fontsize=1.5*mult, rotation=270)
ax0.set_yticklabels(reconstruction_names, fontsize=1.5*mult)
ax0.set_xticks(np.arange(-.5, N_methods-0.5, 1), minor=True)
ax0.set_yticks(np.arange(-.5, N_methods-0.5, 1), minor=True)
ax0.grid(which='minor', color='#333333', linestyle='-', linewidth=1.5)
ax0.set_title("Collabathon Fun Times Test Plot: \n Averaged Distance Between Reconstructed Networks",
fontsize=2.5*mult)
plt.savefig('collabathon_output.png', bbox_inches='tight', dpi=200)
| [
"netrd.reconstruction.PartialCorrelationMatrixReconstructor",
"collections.defaultdict",
"netrd.utilities.read_time_series",
"netrd.reconstruction.MaximumLikelihoodEstimationReconstructor",
"netrd.distance.HammingIpsenMikhailov",
"numpy.arange",
"netrd.reconstruction.FreeEnergyMinimizationReconstructor"... | [((3063, 3080), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3074, 3080), False, 'from collections import defaultdict\n'), ((4988, 5005), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (4999, 5005), False, 'from collections import defaultdict\n'), ((5534, 5596), 'numpy.zeros', 'np.zeros', (['(number_of_reconstructors, number_of_reconstructors)'], {}), '((number_of_reconstructors, number_of_reconstructors))\n', (5542, 5596), True, 'import numpy as np\n'), ((6461, 6517), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(width * mult, heigh * mult)'}), '(1, 1, figsize=(width * mult, heigh * mult))\n', (6473, 6517), True, 'import matplotlib.pyplot as plt\n'), ((7295, 7362), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""collabathon_output.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('collabathon_output.png', bbox_inches='tight', dpi=200)\n", (7306, 7362), True, 'import matplotlib.pyplot as plt\n'), ((511, 583), 'netrd.utilities.read_time_series', 'netrd.utilities.read_time_series', (['"""../data/synth_4clique_N64_simple.csv"""'], {}), "('../data/synth_4clique_N64_simple.csv')\n", (543, 583), False, 'import netrd\n'), ((602, 676), 'netrd.utilities.read_time_series', 'netrd.utilities.read_time_series', (['"""../data/synth_BAnetwork_N64_simple.csv"""'], {}), "('../data/synth_BAnetwork_N64_simple.csv')\n", (634, 676), False, 'import netrd\n'), ((695, 769), 'netrd.utilities.read_time_series', 'netrd.utilities.read_time_series', (['"""../data/synth_ERnetwork_N64_simple.csv"""'], {}), "('../data/synth_ERnetwork_N64_simple.csv')\n", (727, 769), False, 'import netrd\n'), ((862, 915), 'netrd.reconstruction.CorrelationMatrixReconstructor', 'netrd.reconstruction.CorrelationMatrixReconstructor', ([], {}), '()\n', (913, 915), False, 'import netrd\n'), ((962, 1020), 'netrd.reconstruction.ConvergentCrossMappingReconstructor', 'netrd.reconstruction.ConvergentCrossMappingReconstructor', ([], {}), '()\n', (1018, 1020), False, 'import netrd\n'), ((1059, 1109), 'netrd.reconstruction.ExactMeanFieldReconstructor', 'netrd.reconstruction.ExactMeanFieldReconstructor', ([], {}), '()\n', (1107, 1109), False, 'import netrd\n'), ((1156, 1214), 'netrd.reconstruction.FreeEnergyMinimizationReconstructor', 'netrd.reconstruction.FreeEnergyMinimizationReconstructor', ([], {}), '()\n', (1212, 1214), False, 'import netrd\n'), ((1252, 1302), 'netrd.reconstruction.GraphicalLassoReconstructor', 'netrd.reconstruction.GraphicalLassoReconstructor', ([], {}), '()\n', (1300, 1302), False, 'import netrd\n'), ((1343, 1406), 'netrd.reconstruction.MaximumLikelihoodEstimationReconstructor', 'netrd.reconstruction.MaximumLikelihoodEstimationReconstructor', ([], {}), '()\n', (1404, 1406), False, 'import netrd\n'), ((1447, 1506), 'netrd.reconstruction.MutualInformationMatrixReconstructor', 'netrd.reconstruction.MutualInformationMatrixReconstructor', ([], {}), '()\n', (1504, 1506), False, 'import netrd\n'), ((1541, 1588), 'netrd.reconstruction.OUInferenceReconstructor', 'netrd.reconstruction.OUInferenceReconstructor', ([], {}), '()\n', (1586, 1588), False, 'import netrd\n'), ((1630, 1690), 'netrd.reconstruction.PartialCorrelationMatrixReconstructor', 'netrd.reconstruction.PartialCorrelationMatrixReconstructor', ([], {}), '()\n', (1688, 1690), False, 'import netrd\n'), ((1736, 1800), 'netrd.reconstruction.RegularizedCorrelationMatrixReconstructor', 'netrd.reconstruction.RegularizedCorrelationMatrixReconstructor', ([], {}), '()\n', (1798, 1800), False, 'import netrd\n'), ((1847, 1905), 'netrd.reconstruction.ThoulessAndersonPalmerReconstructor', 'netrd.reconstruction.ThoulessAndersonPalmerReconstructor', ([], {}), '()\n', (1903, 1905), False, 'import netrd\n'), ((1950, 2006), 'netrd.reconstruction.TimeGrangerCausalityReconstructor', 'netrd.reconstruction.TimeGrangerCausalityReconstructor', ([], {}), '()\n', (2004, 2006), False, 'import netrd\n'), ((2045, 2083), 'netrd.reconstruction.MarchenkoPastur', 'netrd.reconstruction.MarchenkoPastur', ([], {}), '()\n', (2081, 2083), False, 'import netrd\n'), ((2263, 2295), 'netrd.distance.JaccardDistance', 'netrd.distance.JaccardDistance', ([], {}), '()\n', (2293, 2295), False, 'import netrd\n'), ((2327, 2351), 'netrd.distance.Hamming', 'netrd.distance.Hamming', ([], {}), '()\n', (2349, 2351), False, 'import netrd\n'), ((2399, 2437), 'netrd.distance.HammingIpsenMikhailov', 'netrd.distance.HammingIpsenMikhailov', ([], {}), '()\n', (2435, 2437), False, 'import netrd\n'), ((2639, 2665), 'netrd.distance.Frobenius', 'netrd.distance.Frobenius', ([], {}), '()\n', (2663, 2665), False, 'import netrd\n'), ((3935, 3952), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3946, 3952), False, 'from collections import defaultdict\n'), ((6619, 6645), 'numpy.arange', 'np.arange', (['(0)', 'N_methods', '(1)'], {}), '(0, N_methods, 1)\n', (6628, 6645), True, 'import numpy as np\n'), ((6662, 6688), 'numpy.arange', 'np.arange', (['(0)', 'N_methods', '(1)'], {}), '(0, N_methods, 1)\n', (6671, 6688), True, 'import numpy as np\n'), ((6979, 7014), 'numpy.arange', 'np.arange', (['(-0.5)', '(N_methods - 0.5)', '(1)'], {}), '(-0.5, N_methods - 0.5, 1)\n', (6988, 7014), True, 'import numpy as np\n'), ((7040, 7075), 'numpy.arange', 'np.arange', (['(-0.5)', '(N_methods - 0.5)', '(1)'], {}), '(-0.5, N_methods - 0.5, 1)\n', (7049, 7075), True, 'import numpy as np\n'), ((5151, 5206), 'numpy.max', 'np.max', (['per_distance_values[data_name][distance_method]'], {}), '(per_distance_values[data_name][distance_method])\n', (5157, 5206), True, 'import numpy as np\n'), ((5208, 5263), 'numpy.min', 'np.min', (['per_distance_values[data_name][distance_method]'], {}), '(per_distance_values[data_name][distance_method])\n', (5214, 5263), True, 'import numpy as np\n')] |
"""
Neural Network Diagram
----------------------
"""
# Author: <NAME> <<EMAIL>>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
import sparse_investigation as si
connections = si.run()
fig = plt.figure(facecolor='w')
ax = fig.add_axes([0, 0, 1, 1],
xticks=[], yticks=[])
plt.box(False)
circ = plt.Circle((1, 1), 2)
radius = 0.1
arrow_kwargs = dict(head_width=0.05, fc='black')
# function to draw arrows
def draw_connecting_arrow(ax, circ1, rad1, circ2, rad2):
theta = np.arctan2(circ2[1] - circ1[1],
circ2[0] - circ1[0])
starting_point = (circ1[0] + rad1 * np.cos(theta),
circ1[1] + rad1 * np.sin(theta))
length = (circ2[0] - circ1[0] - (rad1 + 1.4 * rad2) * np.cos(theta),
circ2[1] - circ1[1] - (rad1 + 1.4 * rad2) * np.sin(theta))
ax.arrow(starting_point[0], starting_point[1],
length[0], length[1], **arrow_kwargs)
# function to draw circles
def draw_circle(ax, center, radius):
circ = plt.Circle(center, radius, fc='none', lw=2)
ax.add_patch(circ)
x1 = -2
x2 = 0
x3 = 2
#y3 = 0
#------------------------------------------------------------
# draw circles
for i, y1 in enumerate(np.linspace(4, -4, 30)):
draw_circle(ax, (x1, y1), radius)
#ax.text(x1 - 0.9, y1, 'Input #%i' % (i + 1),
# ha='right', va='center', fontsize=16)
#draw_connecting_arrow(ax, (x1 - 0.9, y1), 0.1, (x1, y1), radius)
for y2 in np.linspace(-4, 4, 30):
draw_circle(ax, (x2, y2), radius)
for y3 in np.linspace(-4, 4, 30):
draw_circle(ax, (x3, y3), radius)
#draw_circle(ax, (x3, y3), radius)
#ax.text(x3 + 0.8, y3, 'Output', ha='left', va='center', fontsize=16)
#draw_connecting_arrow(ax, (x3, y3), radius, (x3 + 0.8, y3), 0.1)
#------------------------------------------------------------
# draw connecting arrows
for i, y1 in enumerate(np.linspace(-4, 4, 30)):
for j, y2 in enumerate(np.linspace(-4, 4, 30)):
if (connections[1][j][i] == 1):
draw_connecting_arrow(ax, (x1, y1), radius, (x2, y2), radius)
for i, y2 in enumerate(np.linspace(-4, 4, 30)):
for j, y3 in enumerate(np.linspace(-4, 4, 30)):
if (connections[2][j][i] == 1):
draw_connecting_arrow(ax, (x2, y2), radius, (x3, y3), radius)
#for y2 in np.linspace(-4, 4, 30):
# draw_connecting_arrow(ax, (x2, y2), radius, (x3, y3), radius)
#------------------------------------------------------------
# Add text labels
#plt.text(x1, 5, "Input\nLayer", ha='center', va='top', fontsize=16)
plt.text(x2, 5, "Hidden Layers, 30 neurons, sparsity = 0.1", ha='center', va='top', fontsize=16)
#plt.text(x3, 2.7, "Output\nLayer", ha='center', va='top', fontsize=16)
ax.set_aspect('equal')
plt.xlim(-4, 4)
plt.ylim(-6, 6)
plt.show()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.arctan2",
"sparse_investigation.run",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.box",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"numpy.sin",
"matplotlib.pyplot.Circle",
"numpy.linspace",
"numpy.cos"
] | [((396, 404), 'sparse_investigation.run', 'si.run', ([], {}), '()\n', (402, 404), True, 'import sparse_investigation as si\n'), ((412, 437), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""w"""'}), "(facecolor='w')\n", (422, 437), True, 'from matplotlib import pyplot as plt\n'), ((510, 524), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (517, 524), True, 'from matplotlib import pyplot as plt\n'), ((532, 553), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(1, 1)', '(2)'], {}), '((1, 1), 2)\n', (542, 553), True, 'from matplotlib import pyplot as plt\n'), ((1674, 1696), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(30)'], {}), '(-4, 4, 30)\n', (1685, 1696), True, 'import numpy as np\n'), ((1747, 1769), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(30)'], {}), '(-4, 4, 30)\n', (1758, 1769), True, 'import numpy as np\n'), ((2751, 2851), 'matplotlib.pyplot.text', 'plt.text', (['x2', '(5)', '"""Hidden Layers, 30 neurons, sparsity = 0.1"""'], {'ha': '"""center"""', 'va': '"""top"""', 'fontsize': '(16)'}), "(x2, 5, 'Hidden Layers, 30 neurons, sparsity = 0.1', ha='center',\n va='top', fontsize=16)\n", (2759, 2851), True, 'from matplotlib import pyplot as plt\n'), ((2944, 2959), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-4)', '(4)'], {}), '(-4, 4)\n', (2952, 2959), True, 'from matplotlib import pyplot as plt\n'), ((2960, 2975), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-6)', '(6)'], {}), '(-6, 6)\n', (2968, 2975), True, 'from matplotlib import pyplot as plt\n'), ((2976, 2986), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2984, 2986), True, 'from matplotlib import pyplot as plt\n'), ((715, 767), 'numpy.arctan2', 'np.arctan2', (['(circ2[1] - circ1[1])', '(circ2[0] - circ1[0])'], {}), '(circ2[1] - circ1[1], circ2[0] - circ1[0])\n', (725, 767), True, 'import numpy as np\n'), ((1229, 1272), 'matplotlib.pyplot.Circle', 'plt.Circle', (['center', 'radius'], {'fc': '"""none"""', 'lw': '(2)'}), "(center, radius, fc='none', lw=2)\n", (1239, 1272), True, 'from matplotlib import pyplot as plt\n'), ((1429, 1451), 'numpy.linspace', 'np.linspace', (['(4)', '(-4)', '(30)'], {}), '(4, -4, 30)\n', (1440, 1451), True, 'import numpy as np\n'), ((2092, 2114), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(30)'], {}), '(-4, 4, 30)\n', (2103, 2114), True, 'import numpy as np\n'), ((2307, 2329), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(30)'], {}), '(-4, 4, 30)\n', (2318, 2329), True, 'import numpy as np\n'), ((2144, 2166), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(30)'], {}), '(-4, 4, 30)\n', (2155, 2166), True, 'import numpy as np\n'), ((2359, 2381), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(30)'], {}), '(-4, 4, 30)\n', (2370, 2381), True, 'import numpy as np\n'), ((832, 845), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (838, 845), True, 'import numpy as np\n'), ((887, 900), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (893, 900), True, 'import numpy as np\n'), ((961, 974), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (967, 974), True, 'import numpy as np\n'), ((1034, 1047), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1040, 1047), True, 'import numpy as np\n')] |
"""
Class to read mManager format images saved separately and their metadata (JSON) .
"""
import json, os
import numpy as np
import pandas as pd
import cv2
import warnings
from ..utils.imgIO import get_sub_dirs, get_sorted_names
class mManagerReader(object):
"""General mManager metadata and image reader for data saved as separate 2D tiff files
Parameters
----------
img_sample_path : str
full path of the acquisition folder (parent folder of pos folder)
img_output_path : str
full path of the output folder
input_chan : list
list of input channel names
output_chan : list
list of output channel names
Attributes
----------
input_meta_file : dict
input mManager meta file of the acquistion
_meta_pos_list : list
position list in the meta file
_pos_list : list
position list to process
name : str
acquisition folder name
output_meta_file : dict
output meta file
img_sm_path : str
path of the acquisition folder
img_in_pos_path : str
path of the current position folder
img_output_path : str
full path of the output folder
width : int
width of the input image
height : int
height of the input image
channels : list
channels in the meta file
input_chans : list
channels to read
n_input_chans : int
number of channels to read
output_chans : list
output channels
n_output_chans : int
number of output channels
n_pos : int
number of positions in the meta file
n_time : int
number of time points in the meta file
n_z :
number of time slices in the meta file
size_x_um : float
pixel size in x
size_y_um : float
pixel size in y
size_z_um : float
z step
time_stamp : list
time points in the meta file
pos_idx : int
current postion index to process
t_idx : int
current time index to process
z_idx : int
current z index to process
bg : str
background folder name
bg_method : str
"Global" or "Local". Type of background correction. "Global" will correct each image
using the same background. "Local" will do correction with locally estimated
background in addition to global background
bg_correct : bool
Perform background correct (True) or not (False)
binning : int
binning (or pooling) size for the images
"""
def __init__(self, img_sample_path, img_output_path=None, input_chans=[], output_chans=[], binning=1):
pos_path = img_sample_path # mManager 2.0 single position format
sub_dirs = get_sub_dirs(img_sample_path)
if sub_dirs:
sub_dir = sub_dirs[0] # assume all the folders in the sample folder are position folders
pos_path = os.path.join(img_sample_path, sub_dir)
##TODO: check the behavior of 2.0 gamma
metadata_path = os.path.join(pos_path, 'metadata.txt')
with open(metadata_path, 'r') as f:
input_meta_file = json.load(f)
self.input_meta_file = input_meta_file
self.mm_version = input_meta_file['Summary']['MicroManagerVersion']
if self.mm_version == '1.4.22':
self.meta_parser = self._mm1_meta_parser
elif '2.0' in self.mm_version:
self.meta_parser = self._mm2_meta_parser
else:
raise ValueError(
'Current MicroManager reader only supports version 1.4.22 and 2.0 but {} was detected'.
format(self.mm_version))
self.img_sm_path = img_sample_path
self.img_in_pos_path = pos_path
self.img_names = get_sorted_names(pos_path)
self.img_name_format = None
self._detect_img_name_format()
self.img_output_path = img_output_path
self.input_chans = self.channels = input_meta_file['Summary']['ChNames']
if input_chans:
self.input_chans = input_chans
self.n_input_chans = len(input_chans)
self.output_chans = output_chans # output channel names
self.n_output_chans = len(output_chans)
self.output_meta_file = []
self.binning = binning
self.name = input_meta_file["Summary"]["Prefix"]
self.n_pos = input_meta_file['Summary']['Positions']
self.n_time = input_meta_file['Summary']['Frames']
self.n_z = input_meta_file['Summary']['Slices']
self._t_list = self._meta_t_list = list(range(0, self.n_time))
self._z_list = self._meta_z_list = list(range(0, self.n_z))
self.size_z_um = input_meta_file['Summary']['z-step_um']
self.pos_idx = 0 # assuming only single image for background
self.t_idx = 0
self.z_idx = 0
self.chan_idx = 0
self.bg = 'No Background'
self.bg_method = 'Global'
self.bg_correct = True
self.meta_parser()
def _mm1_meta_parser(self):
input_meta_file = self.input_meta_file
self._meta_pos_list = ['Pos0']
pos_dict_list = self.input_meta_file['Summary']['InitialPositionList']
if pos_dict_list:
self._meta_pos_list = [pos_dict['Label'] for pos_dict in pos_dict_list]
self._pos_list = self._meta_pos_list
self.width = input_meta_file['Summary']['Width']
self.height = input_meta_file['Summary']['Height']
self.time_stamp = input_meta_file['Summary']['Time']
def _mm2_meta_parser(self):
input_meta_file = self.input_meta_file
self._meta_pos_list = ['']
if 'StagePositions' in self.input_meta_file['Summary']:
pos_dict_list = self.input_meta_file['Summary']['StagePositions']
self._meta_pos_list = [pos_dict['Label'] for pos_dict in pos_dict_list]
self._pos_list = self._meta_pos_list
self.width = int(input_meta_file['Summary']['UserData']['Width']['PropVal'])
self.height = int(input_meta_file['Summary']['UserData']['Height']['PropVal'])
self.time_stamp = input_meta_file['Summary']['StartTime']
@property
def pos_list(self):
return self._pos_list
@pos_list.setter
def pos_list(self, value):
"""position list to process
Parameters
----------
value: list
position list to process
"""
assert set(value).issubset(self._meta_pos_list), \
'some positions cannot be found in metadata'
self._pos_list = value
@property
def t_list(self):
return self._t_list
@t_list.setter
def t_list(self, value):
"""time list to process
Parameters
----------
value: list
time list to process
"""
assert set(value).issubset(self._meta_t_list), \
'some positions cannot be found in metadata'
self._t_list = value
@property
def z_list(self):
return self._z_list
@z_list.setter
def z_list(self, value):
"""z list to process
Parameters
----------
value: list
z list to process
"""
assert set(value).issubset(self._meta_z_list), \
'some positions cannot be found in metadata'
self._z_list = value
def _detect_img_name_format(self):
img_name = self.img_names[0]
if 'img_000000' in img_name:
self.img_name_format = 'mm_1_4_22'
elif 'position' in img_name:
self.img_name_format = 'mm_2_0'
elif 'img_' in img_name:
self.img_name_format = 'recon_order'
else:
raise ValueError('Unknown image name format')
def get_chan_name(self):
return self.input_chans[self.chan_idx]
def get_img_name(self):
if self.img_name_format == 'mm_1_4_22':
img_name = 'img_000000{:03d}_{}_{:03d}.tif'.\
format(self.t_idx, self.get_chan_name(), self.z_idx)
elif self.img_name_format == 'mm_2_0':
chan_meta_idx = self.channels.index(self.get_chan_name())
img_name = 'img_channel{:03d}_position{:03d}_time{:09d}_z{:03d}.tif'.\
format(chan_meta_idx, self.t_idx, self.pos_idx, self.z_idx)
elif self.img_name_format == 'recon_order':
img_name = 'img_{}_t{:03d}_p{:03d}_z{:03d}.tif'.\
format(self.t_idx, self.pos_idx, self.z_idx, self.get_chan_name())
else:
raise ValueError('Undefined image name format')
return img_name
def read_img(self):
"""read a single image at (c,t,p,z)"""
img_name = self.get_img_name()
img_file = os.path.join(self.img_in_pos_path, img_name)
img = cv2.imread(img_file, -1) # flag -1 to preserve the bit dept of the raw image
if img is None:
warnings.warn('image "{}" cannot be found. Return None instead.'.format(img_name))
else:
img = img.astype(np.float32, copy=False) # convert to float32 without making a copy to save memory
return img
def read_multi_chan_img_stack(self, z_range=None):
"""read multi-channel image stack at a given (t,p)"""
if not os.path.exists(self.img_sm_path):
raise FileNotFoundError(
"image file doesn't exist at:", self.img_sm_path
)
if not z_range:
z_range = [0, self.nZ]
img_chann = [] # list of 2D or 3D images from different channels
for chan_idx in range(self.n_input_chans):
img_stack = []
self.chan_idx = chan_idx
for z_idx in range(z_range[0], z_range[1]):
self.z_idx = z_idx
img = self.read_img()
img_stack += [img]
img_stack = np.stack(img_stack) # follow zyx order
img_chann += [img_stack]
return img_chann
def write_img(self, img):
"""only supports recon_order image name format currently"""
if not os.path.exists(self.img_output_path): # create folder for processed images
os.makedirs(self.img_output_path)
img_name = 'img_'+self.output_chans[self.chan_idx]+'_t%03d_p%03d_z%03d.tif'%(self.t_idx, self.pos_idx, self.z_idx)
if len(img.shape)<3:
cv2.imwrite(os.path.join(self.img_output_path, img_name), img)
else:
cv2.imwrite(os.path.join(self.img_output_path, img_name), cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
def writeMetaData(self):
if not os.path.exists(self.img_output_path): # create folder for processed images
os.makedirs(self.img_output_path)
self.input_meta_file['Summary']['ChNames'] = self.input_chans
self.input_meta_file['Summary']['Channels'] = self.n_input_chans
metaFileName = os.path.join(self.img_output_path, 'metadata.txt')
with open(metaFileName, 'w') as f:
json.dump(self.input_meta_file, f)
df_pos_path = os.path.join(self.img_output_path, 'pos_table.csv')
df_pos = pd.DataFrame(list(enumerate(self.pos_list)),
columns=['pos idx', 'pos dir'])
df_pos.to_csv(df_pos_path, sep=',')
class PolAcquReader(mManagerReader):
"""PolAcquistion mManager metadata and image reader
Parameters
----------
mManagerReader : class
General mManager metadata and image reader for data saved as separate 2D tiff files
Attributes
----------
acquScheme : str
Pol images acquiring schemes. '4-Frame' or '5-Frame'
bg : str
background folder name in metadata
blackLevel : int
black level of the camera
mirror : str
'Yes' or 'No'. Changing this flag will flip the slow axis horizontally
swing : float
swing of the elliptical polarization states in unit of fraction of wavelength
wavelength : int
wavelenhth of the illumination light (nm)
"""
def __init__(self, img_sample_path, img_output_path=None, input_chans=[], output_chans=[], binning=1):
mManagerReader.__init__(self, img_sample_path, img_output_path, input_chans, output_chans, binning)
metaFile = self.input_meta_file
self.acquScheme = metaFile['Summary']['~ Acquired Using']
self.bg = metaFile['Summary']['~ Background']
self.blackLevel = metaFile['Summary']['~ BlackLevel']
self.mirror = metaFile['Summary']['~ Mirror']
self.swing = metaFile['Summary']['~ Swing (fraction)']
self.wavelength = metaFile['Summary']['~ Wavelength (nm)']
| [
"numpy.stack",
"json.dump",
"json.load",
"os.makedirs",
"cv2.cvtColor",
"os.path.exists",
"cv2.imread",
"os.path.join"
] | [((3038, 3076), 'os.path.join', 'os.path.join', (['pos_path', '"""metadata.txt"""'], {}), "(pos_path, 'metadata.txt')\n", (3050, 3076), False, 'import json, os\n'), ((8722, 8766), 'os.path.join', 'os.path.join', (['self.img_in_pos_path', 'img_name'], {}), '(self.img_in_pos_path, img_name)\n', (8734, 8766), False, 'import json, os\n'), ((8781, 8805), 'cv2.imread', 'cv2.imread', (['img_file', '(-1)'], {}), '(img_file, -1)\n', (8791, 8805), False, 'import cv2\n'), ((10876, 10926), 'os.path.join', 'os.path.join', (['self.img_output_path', '"""metadata.txt"""'], {}), "(self.img_output_path, 'metadata.txt')\n", (10888, 10926), False, 'import json, os\n'), ((11041, 11092), 'os.path.join', 'os.path.join', (['self.img_output_path', '"""pos_table.csv"""'], {}), "(self.img_output_path, 'pos_table.csv')\n", (11053, 11092), False, 'import json, os\n'), ((2923, 2961), 'os.path.join', 'os.path.join', (['img_sample_path', 'sub_dir'], {}), '(img_sample_path, sub_dir)\n', (2935, 2961), False, 'import json, os\n'), ((3151, 3163), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3160, 3163), False, 'import json, os\n'), ((9256, 9288), 'os.path.exists', 'os.path.exists', (['self.img_sm_path'], {}), '(self.img_sm_path)\n', (9270, 9288), False, 'import json, os\n'), ((9842, 9861), 'numpy.stack', 'np.stack', (['img_stack'], {}), '(img_stack)\n', (9850, 9861), True, 'import numpy as np\n'), ((10062, 10098), 'os.path.exists', 'os.path.exists', (['self.img_output_path'], {}), '(self.img_output_path)\n', (10076, 10098), False, 'import json, os\n'), ((10149, 10182), 'os.makedirs', 'os.makedirs', (['self.img_output_path'], {}), '(self.img_output_path)\n', (10160, 10182), False, 'import json, os\n'), ((10589, 10625), 'os.path.exists', 'os.path.exists', (['self.img_output_path'], {}), '(self.img_output_path)\n', (10603, 10625), False, 'import json, os\n'), ((10676, 10709), 'os.makedirs', 'os.makedirs', (['self.img_output_path'], {}), '(self.img_output_path)\n', (10687, 10709), False, 'import json, os\n'), ((10984, 11018), 'json.dump', 'json.dump', (['self.input_meta_file', 'f'], {}), '(self.input_meta_file, f)\n', (10993, 11018), False, 'import json, os\n'), ((10359, 10403), 'os.path.join', 'os.path.join', (['self.img_output_path', 'img_name'], {}), '(self.img_output_path, img_name)\n', (10371, 10403), False, 'import json, os\n'), ((10448, 10492), 'os.path.join', 'os.path.join', (['self.img_output_path', 'img_name'], {}), '(self.img_output_path, img_name)\n', (10460, 10492), False, 'import json, os\n'), ((10494, 10530), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (10506, 10530), False, 'import cv2\n')] |
import numpy as np
from graphical_model_learning.utils.core_utils import random_max
from tqdm import trange
def resit(
samples: np.ndarray,
regression_function, # todo: hyperparameters should be CV'd
dependence_function,
progress: bool = False
):
nsamples, nnodes = samples.shape
nodes = set(range(nnodes))
perm = []
r = trange if progress else range
for _ in r(nnodes):
node_dependences = dict()
for node in nodes:
other_nodes = list(nodes - {node})
residuals = regression_function(samples[:, node], samples[:, other_nodes])
dependence = dependence_function(residuals, samples[:, other_nodes])
node_dependences[node] = dependence
print(node_dependences)
weakest_node = random_max(node_dependences, minimize=True)
nodes -= {weakest_node}
perm = [weakest_node, *perm]
print(perm)
return perm
if __name__ == '__main__':
from numpy.linalg import inv
from causaldag.utils.ci_tests import hsic_test_vector
from causaldag.rand.graphs import rand_additive_basis, directed_erdos
from scipy.special import expit
import pygam
def identity(x: np.ndarray):
return x
def square(x: np.ndarray):
return x**2 + x - 1
def cubic(x: np.ndarray):
return x**3
def logistic(x: np.ndarray):
return expit(x)
def linear_reg(target_samples: np.ndarray, cond_samples: np.ndarray):
# target_samples: n*1
# cond_samples: n*p
cond_samples = cond_samples - cond_samples.mean(axis=0)
target_samples = target_samples - target_samples.mean()
cond_cov = cond_samples.T @ cond_samples
coefs = inv(cond_cov) @ cond_samples.T @ target_samples
residuals = target_samples - cond_samples @ coefs
# todo: sample splitting for coefficients and residuals
return residuals
def gam_reg(target_samples, cond_samples):
g = pygam.GAM()
g.fit(cond_samples, target_samples)
print(g.coef_.shape)
residuals = g.deviance_residuals(cond_samples, target_samples)
return residuals
def hsic_dependence(samples1, samples2):
res = hsic_test_vector(samples1, samples2) # TODO: SHOULD CROSS-VALIDATE HYPERPARAMS
return -res['statistic']
d = directed_erdos(10, exp_nbrs=9, random_order=False)
nsamples = 1000
basis = [cubic]
cam_dag = rand_additive_basis(d, basis, snr_dict=.9)
s = cam_dag.sample(nsamples)
perm = resit(s, gam_reg, hsic_dependence, progress=True)
| [
"causaldag.rand.graphs.directed_erdos",
"causaldag.rand.graphs.rand_additive_basis",
"causaldag.utils.ci_tests.hsic_test_vector",
"scipy.special.expit",
"graphical_model_learning.utils.core_utils.random_max",
"numpy.linalg.inv",
"pygam.GAM"
] | [((2375, 2425), 'causaldag.rand.graphs.directed_erdos', 'directed_erdos', (['(10)'], {'exp_nbrs': '(9)', 'random_order': '(False)'}), '(10, exp_nbrs=9, random_order=False)\n', (2389, 2425), False, 'from causaldag.rand.graphs import rand_additive_basis, directed_erdos\n'), ((2480, 2523), 'causaldag.rand.graphs.rand_additive_basis', 'rand_additive_basis', (['d', 'basis'], {'snr_dict': '(0.9)'}), '(d, basis, snr_dict=0.9)\n', (2499, 2523), False, 'from causaldag.rand.graphs import rand_additive_basis, directed_erdos\n'), ((806, 849), 'graphical_model_learning.utils.core_utils.random_max', 'random_max', (['node_dependences'], {'minimize': '(True)'}), '(node_dependences, minimize=True)\n', (816, 849), False, 'from graphical_model_learning.utils.core_utils import random_max\n'), ((1418, 1426), 'scipy.special.expit', 'expit', (['x'], {}), '(x)\n', (1423, 1426), False, 'from scipy.special import expit\n'), ((2010, 2021), 'pygam.GAM', 'pygam.GAM', ([], {}), '()\n', (2019, 2021), False, 'import pygam\n'), ((2252, 2288), 'causaldag.utils.ci_tests.hsic_test_vector', 'hsic_test_vector', (['samples1', 'samples2'], {}), '(samples1, samples2)\n', (2268, 2288), False, 'from causaldag.utils.ci_tests import hsic_test_vector\n'), ((1754, 1767), 'numpy.linalg.inv', 'inv', (['cond_cov'], {}), '(cond_cov)\n', (1757, 1767), False, 'from numpy.linalg import inv\n')] |
# coding=utf-8
""""
Matrix factorization model for item prediction (ranking) optimized using BPR (BPRMF)
[Item Recommendation (Ranking)]
Literature:
<NAME>, <NAME>, <NAME>, <NAME>:
BPR: Bayesian Personalized Ranking from Implicit Feedback.
UAI 2009.
http://www.ismll.uni-hildesheim.de/pub/pdfs/Rendle_et_al2009-Bayesian_Personalized_Ranking.pdf
"""
# © 2018. Case Recommender (MIT License)
import random
import numpy as np
from caserec.recommenders.item_recommendation.base_item_recommendation import BaseItemRecommendation
from caserec.utils.extra_functions import timed
__author__ = 'removed for double blind review'
class BprMF(BaseItemRecommendation):
def __init__(self, train_file=None, test_file=None, output_file=None, factors=10, learn_rate=0.05, epochs=30,
batch_size=0, rank_length=10, init_mean=0, init_stdev=0.1, reg_u=0.0025, reg_i=0.0025,
reg_j=0.00025, reg_bias=0, sep='\t', output_sep='\t', random_seed=None, items_test=False):
"""
BPRMF for Item Recommendation
BPR reduces ranking to pairwise classification. The different variants (settings) of this recommender
roughly optimize the area under the ROC curve (AUC).
Usage::
>> BprMF(train, test).compute()
>> BprMF(train, test, batch_size=30).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param learn_rate: Learning rate (alpha)
:type learn_rate: float, default 0.05
:param epochs: Number of epochs over the training data
:type epochs: int, default 30
:param batch_size: Reduce number of interactions in each epoch, if 0 usage the number of positive interactions
in the train set
:type batch_size: int, default 0
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param init_mean: Mean of the normal distribution used to initialize the latent factors
:type init_mean: float, default 0
:param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors
:type init_stdev: float, default 0.1
:param reg_u: Regularization parameter for user factors
:type reg_u: float, default 0.0025
:param reg_i: Regularization parameter for positive item factors
:type reg_i: float, default 0.0025
:param reg_j: Regularization parameter for negative item factors
:type reg_j: float, default 0.00025
:param reg_bias: Regularization parameter for the bias term
:type reg_bias: default 0
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
:param items_test: If True, update unobserved set of each user with samples in the test set
:type items_test: bool, default False
"""
super(BprMF, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
rank_length=rank_length, sep=sep, output_sep=output_sep)
self.recommender_name = 'BPRMF'
self.factors = factors
self.learn_rate = learn_rate
self.epochs = epochs
self.batch_size = batch_size
self.init_mean = init_mean
self.init_stdev = init_stdev
self.reg_bias = reg_bias
self.reg_u = reg_u
self.reg_i = reg_i
self.reg_j = reg_j
self.items_test = items_test
if random_seed is not None:
np.random.seed(random_seed)
random.seed(random_seed)
# internal vars
self.p = None
self.q = None
self.bias = None
self.num_interactions = None
def init_model(self):
"""
Method to treat and initialize the model
"""
# Upgrade unobserved items with test set samples
if self.items_test:
for u, user in enumerate(self.users):
self.train_set['items_unobserved'][user] = list(set(self.items) -
set(self.train_set['items_seen_by_user'][user]))
# Initialize factors
self.create_factors()
# Define number of interactions in each epoch
if self.batch_size <= 0:
self.num_interactions = self.train_set['number_interactions']
else:
self.num_interactions = int(self.train_set['number_interactions'] / self.batch_size) + 1
def fit(self):
"""
This method performs iterations of stochastic gradient ascent over the training data. One iteration is samples
number of positive entries in the training matrix times, if batch size is 0, else we divide the number of
positive entries per batch size (see in the init_model).
"""
for n in range(self.epochs):
random_users = random.choices(self.train_set['users'], k=self.num_interactions)
for user in random_users:
i, j = self.sample_pair(user)
self.update_factors(self.user_to_user_id[user], self.item_to_item_id[i], self.item_to_item_id[j])
def create_factors(self):
"""
This method create factors for users, items and bias
"""
self.p = np.random.normal(self.init_mean, self.init_stdev, (len(self.users), self.factors))
self.q = np.random.normal(self.init_mean, self.init_stdev, (len(self.items), self.factors))
self.bias = np.zeros(len(self.items), np.double)
def sample_pair(self, user):
"""
Randomly selects a known and unknown item to a particular user.
:param user: User to generate pairs
:type user: int
:return: known item, unknown item
"""
return random.choice(list(self.train_set['items_seen_by_user'][user])), random.choice(
self.train_set['items_unobserved'][user])
def predict_score(self, user, item):
"""
Method to predict a single score for a pair (user, item)
:param user: User ID
:type user: int
:param item: Item ID
:type item: int
:return: Score generate for pair (user, item)
:rtype: float
"""
return np.dot(self.p[user], self.q[item])
def update_factors(self, u, i, j):
"""
Update latent factors according to the stochastic gradient descent update rule
:param u: User ID for update
:type u: int
:param i: Known Item ID
:type i: int
:param j: Unknown Item ID
:type j: int
"""
# Compute Difference
x_uij = self.bias[i] - self.bias[j] + (self.predict_score(u, i) - self.predict_score(u, j))
eps = 1 / (1 + np.exp(x_uij))
self.bias[i] += self.learn_rate * (eps - self.reg_bias * self.bias[i])
self.bias[j] += self.learn_rate * (-eps - self.reg_bias * self.bias[j])
# Adjust the factors
u_f = self.p[u]
i_f = self.q[i]
j_f = self.q[j]
# Compute and apply factor updates
self.p[u] += self.learn_rate * ((i_f - j_f) * eps - self.reg_u * u_f)
self.q[i] += self.learn_rate * (u_f * eps - self.reg_i * i_f)
self.q[j] += self.learn_rate * (-u_f * eps - self.reg_j * j_f)
def predict(self):
"""
This method predict final result, building an rank of each user of the train set.
"""
w = self.bias.T + np.dot(self.p, self.q.T)
for u, user in enumerate(self.users):
partial_ranking = list()
candidate_items = sorted(range(len(w[u])), key=lambda k: w[u][k], reverse=True)
for i in candidate_items:
item = self.item_id_to_item[i]
if item not in self.train_set['items_seen_by_user'].get(user, self.items):
partial_ranking.append((user, item, w[u][i]))
if len(partial_ranking) == self.rank_length:
break
self.ranking += partial_ranking
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(BprMF, self).compute(verbose=verbose)
if verbose:
self.init_model()
print("training_time:: %4f sec" % timed(self.fit))
if self.extra_info_header is not None:
print(self.extra_info_header)
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
# Execute all in silence without prints
self.init_model()
self.fit()
self.predict()
self.write_ranking()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep)
| [
"numpy.random.seed",
"caserec.utils.extra_functions.timed",
"random.choices",
"random.choice",
"random.seed",
"numpy.exp",
"numpy.dot"
] | [((7113, 7147), 'numpy.dot', 'np.dot', (['self.p[user]', 'self.q[item]'], {}), '(self.p[user], self.q[item])\n', (7119, 7147), True, 'import numpy as np\n'), ((4377, 4404), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (4391, 4404), True, 'import numpy as np\n'), ((4417, 4441), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (4428, 4441), False, 'import random\n'), ((5750, 5814), 'random.choices', 'random.choices', (["self.train_set['users']"], {'k': 'self.num_interactions'}), "(self.train_set['users'], k=self.num_interactions)\n", (5764, 5814), False, 'import random\n'), ((6711, 6766), 'random.choice', 'random.choice', (["self.train_set['items_unobserved'][user]"], {}), "(self.train_set['items_unobserved'][user])\n", (6724, 6766), False, 'import random\n'), ((8327, 8351), 'numpy.dot', 'np.dot', (['self.p', 'self.q.T'], {}), '(self.p, self.q.T)\n', (8333, 8351), True, 'import numpy as np\n'), ((7621, 7634), 'numpy.exp', 'np.exp', (['x_uij'], {}), '(x_uij)\n', (7627, 7634), True, 'import numpy as np\n'), ((9862, 9877), 'caserec.utils.extra_functions.timed', 'timed', (['self.fit'], {}), '(self.fit)\n', (9867, 9877), False, 'from caserec.utils.extra_functions import timed\n'), ((10025, 10044), 'caserec.utils.extra_functions.timed', 'timed', (['self.predict'], {}), '(self.predict)\n', (10030, 10044), False, 'from caserec.utils.extra_functions import timed\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 11:35:07 2021
@authors: Dr. <NAME> and Dr. <NAME>
"""
import numpy as np
import math
import cmath
from QuantumInformation import RecurNum
from QuantumInformation import QuantumMechanics as QM
from QuantumInformation import LinearAlgebra as LA
import re
class GatesTools:
def __init__(self):
"""It is a class dealing with Gates, entropy, and
it primarily intrinsi functions of uses numpy, math, cmath.
"""
def sx(self,N=1):
"""
Construct N-qubit Pauli spin matrix sigma_x
Inputs:
N: number of spins
Output:
sigmax: It stores the Pauli spin matrix sx
"""
sigmax=np.zeros([2**N,2**N])
j=(2**N)-1
for i in range(0,2**N):
sigmax[i,j]=1
j=j-1
return sigmax
def sy(self,N=1):
"""
Construct N-qubit Pauli spin matrix sigma_y
Inputs:
N: Number of spins
Outputs:
sigmay: It stores the Pauli spin matrix sy
"""
sigmay2=np.array([[0,complex(0,-1)],[complex(0,1),0]])
if N >1:
for i in range(2,N+1):
if i==2:
sigmay=np.kron(sigmay2, sigmay2)
elif i > 2:
sigmay=np.kron(sigmay, sigmay2)
else:
sigmay=sigmay2
return sigmay
def sz(self,N=1):
"""
Construct N-qubit Pauli spin matrix sigma_z
Inputs:
N: Number of spins
Outputs:
sigmaz: It stores the Pauli spin matrix sz
"""
sigmaz2=np.array([[1,0],[0,-1]])
if N >1:
for i in range(2,N+1):
if i==2:
sigmaz=np.kron(sigmaz2, sigmaz2)
elif i > 2:
sigmaz=np.kron(sigmaz, sigmaz2)
else:
sigmaz=sigmaz2
return sigmaz
def hadamard_mat(self,N=1):
"""
Construct N-qubit Hadamard matrix
Inputs:
N: Number of spins
Outputs:
hadamard: It stores the Hadamard matrix
"""
hadamard2=np.array([[1/np.sqrt(2),1/np.sqrt(2)],\
[1/np.sqrt(2),-1/np.sqrt(2)]])
if N >1:
for i in range(2,N+1):
if i==2:
hadamard=np.kron(hadamard2, hadamard2)
elif i > 2:
hadamard=np.kron(hadamard, hadamard2)
else:
hadamard=hadamard2
return hadamard
def phase_gate(self,N=1):
"""
Construct N-qubit phase gate matrix
Inputs:
N: Number of spins
Outputs:
phaseg: It stores the phase gate matrix
"""
phaseg2=np.array([[1,0],\
[0,complex(0,1)]])
if N >1:
for i in range(2,N+1):
if i==2:
phaseg=np.kron(phaseg2, phaseg2)
elif i > 2:
phaseg=np.kron(phaseg, phaseg2)
else:
phaseg=phaseg2
return phaseg
def rotation_gate(self,k,N=1):
"""
Input:
k: is a positive number
N: number of spins
Returns:
rotg: Rotation gate matrix
"""
assert k > 0, "k is not positive number"
z=complex(0,(2*math.pi)/(2**k))
rotg2=np.array([[1,0],[0,cmath.exp(z)]])
if N >1:
for i in range(2,N+1):
if i==2:
rotg=np.kron(rotg2, rotg2)
elif i > 2:
rotg=np.kron(rotg, rotg2)
else:
rotg=rotg2
return rotg
def cx_gate(self):
"""
It returns controlled NOT gate
"""
return np.array([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]])
def cz_gate(self):
"""
It returns controlled Z gate
"""
return np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,-1]])
def swap_gate(self):
"""
It returns a swap gate
"""
return np.array([[1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])
def toffoli_gate(self):
"""
It returns a Toffoli gate
"""
return np.array([[1,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0],\
[0,0,1,0,0,0,0,0],[0,0,0,1,0,0,0,0],\
[0,0,0,0,1,0,0,0],[0,0,0,0,0,1,0,0],\
[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,1,0]])
def fredkin_gate(self):
"""
It returns a Fredkin gate
"""
return np.array([[1,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0],\
[0,0,1,0,0,0,0,0],[0,0,0,1,0,0,0,0],\
[0,0,0,0,1,0,0,0],[0,0,0,0,0,0,1,0],\
[0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1]])
# RN and LN state having plus sign
def bell1(self,tot_spins=2,shift=0):
"""
Construct N tensor products of the |bell1> or T|bell1> Bell state
Input:
tot_spins: The total number of spins
shift: for value 0 we get |bell1> and for value 1 T|bell1>.
Output:
state: the result will be |bell1> or T|bell1> state.
"""
assert tot_spins%2==0, "the total number of spins is not an even number"
assert shift==0 or shift==1, "Invalid entry of the shift value"
terms=int(tot_spins/2)
row=np.zeros([terms,1])
mylist=[]
icount=-1
RecurNum.RecurChainRL1(row,tot_spins,icount,mylist,shift)
mylist=np.array(mylist)
state=np.zeros([2**tot_spins])
factor=1/math.sqrt(2)
len_mylist=len(mylist)
for x in range(0,len_mylist):
state[mylist.item(x)]=factor**terms
return(state)
# RN and LN state constructed from the singlet state
def bell2(self,tot_spins=2,shift=0):
"""
Construct N tensor products of the |bell2> or T|bell2> Bell state
Input:
tot_spins: The total number of spins
shift: for value 0 we get |bell2> and for value 1 T|bell2>.
Output:
state: the result will be |bell2> or T|bell2> state.
"""
assert tot_spins%2==0, "the total number of spins is not an even number"
assert shift==0 or shift==1, "Invalid entry of the shift value"
terms=int(tot_spins/2)
row=np.zeros([terms,1])
mylist=[]
icount=-1
RecurNum.RecurChainRL2(row,tot_spins,icount,mylist,shift)
mylist=np.array(mylist)
state=np.zeros([2**tot_spins])
factor=1/math.sqrt(2)
len_mylist=len(mylist)
for x in range(0,len_mylist):
if mylist.item(x)<0:
state[-mylist.item(x)]=-factor**terms
elif mylist.item(x)>=0:
state[mylist.item(x)]=factor**terms
return(state)
# 00 and 11 bell state having plus sign
def bell3(self,tot_spins=2,shift=0):
"""
Construct N tensor products of the |bell3> or T|bell3> Bell state
Input:
tot_spins: The total number of spins
shift: for value 0 we get |bell3> and for value 1 T|bell3>.
Output:
state: the result will be |bell3> or T|bell3> state.
"""
assert tot_spins%2==0, "the total number of spins is not an even number"
assert shift==0 or shift==1, "Invalid entry of the shift value"
terms=int(tot_spins/2)
row=np.zeros([terms])
mylist=[]
icount=-1
RecurNum.RecurChainRL3(row,tot_spins,icount,mylist,shift)
mylist=np.array(mylist)
state=np.zeros([2**tot_spins])
factor=1/math.sqrt(2)
len_mylist=len(mylist)
for x in range(0,len_mylist):
state[mylist.item(x)]=factor**terms
return(state)
# 00 and 11 bell state having negative sign
def bell4(self,tot_spins=2,shift=0):
"""
Construct N tensor products of the |bell4> or T|bell4> Bell state
Input:
tot_spins: The total number of spins
shift: for value 0 we get |bell4> and for value 1 T|bell4>.
Output:
state: the result will be |bell4> or T|bell4> state.
"""
assert tot_spins%2==0, "the total number of spins is not an even number"
assert shift==0 or shift==1, "Invalid entry of the shift value"
terms=int(tot_spins/2)
row=np.zeros([terms,1])
mylist=[]
icount=-1
RecurNum.RecurChainRL4(row,tot_spins,icount,mylist,shift)
mylist=np.array(mylist)
state=np.zeros([2**tot_spins])
factor=1/math.sqrt(2)
len_mylist=len(mylist)
for x in range(0,len_mylist):
if mylist.item(x)<0:
state[-mylist.item(x)]=-factor**terms
elif mylist.item(x)>=0:
state[mylist.item(x)]=factor**terms
return(state)
def nGHZ(self,tot_spins=3):
"""
Construct N-qubit GHZ state
Input:
tot_spins: it is the total number of spins, it should be equal to
or greater than 3.
Output:
state: N-qubit GHZ state.
"""
assert tot_spins >= 3, "Total number of spins are less than 3"
state=np.zeros([2**tot_spins])
state[0]=1/np.sqrt(2)
state[(2**tot_spins)-1]=1/np.sqrt(2)
return state
def nW(self,tot_spins=3):
"""
Construct N-qubit W state
Input:
tot_spins: it is the total number of spins, it should be equal to
or greater than 3.
Output:
state: N-qubit W state.
"""
assert tot_spins >= 3, "Total number of spins are less than 3"
state=np.zeros([2**tot_spins])
for i in range(0,tot_spins):
state[2**i]=1/np.sqrt(tot_spins)
return state
def nWerner(self,p,tot_spins=2):
"""
Construct N-qubit Werner state
Input:
tot_spins: it is the total number of spins, it should be equal to
or greater than 2.
p: it is the mixing probability
Output:
rho: N-qubit Werner state.
"""
assert tot_spins >= 2, "Total number of spins are less than 2"
qobj=QM()
if tot_spins == 2:
state=self.bell3()
else:
state=self.nGHZ(tot_spins=tot_spins)
den=qobj.outer_product_rvec(state,state)
identity=np.identity(2**tot_spins, dtype = 'float64')
identity=identity*(1/(2**tot_spins))
rho=(p*den)+((1-p)*identity)
return rho
def shannon_entropy(self,pvec):
"""
Calculates the Shannon entropy
Input:
pvec: column vector which contains probabilities
Output:
se: it returns the Shannon entropy value
"""
size=pvec.shape[0]
se=0.0
for i in range(0,size):
assert pvec[i]<=1 and pvec[i] >=0, "probability values are incorrect"
se=se-(pvec[i]*math.log2(pvec[i]))
return se
def linear_entropy(self, rho):
"""
Calculates the Linear entropy
Input:
rho: it is the density matrix
Output:
le: linear entropy value
"""
tr=np.trace(rho)
assert np.allclose(abs(tr),1), "density matrix is not correct"
tr2=np.trace(np.matmul(rho,rho))
le=1.0-abs(tr2)
return le
def relative_entropy(self,rho,sigma):
"""
Calculates relative entropy
Input:
rho: input density matrix
sigma: input density matrix
Output:
rtent: the value of relative entropy
"""
laobj=LA()
typerho=str(rho.dtype)
typesig=str(sigma.dtype)
if re.findall('^float|^int',typerho):
logrho=laobj.function_smatrix(rho, mode="log",\
log_base=math.exp(1))
elif re.findall("^complex",typerho):
logrho=laobj.function_hmatrix(rho, mode="log",\
log_base=math.exp(1))
if re.findall('^float|^int',typesig):
logsig=laobj.function_smatrix(sigma, mode="log",\
log_base=math.exp(1))
elif re.findall("^complex",typesig):
logsig=laobj.function_hmatrix(sigma, mode="log",\
log_base=math.exp(1))
rtent=np.trace(np.matmul(rho,logrho))-np.trace(np.matmul(rho,logsig))
rtent=abs(rtent)
return rtent
def trace_distance(self,rho,sigma):
"""
Calculates trace distance between two density matrices
Input:
rho: input density matrix
sigma: input density matrix
Output:
trd: it stores trace distance
"""
res=rho-sigma
laobj=LA()
typeres=str(res.dtype)
if re.findall('^float|^int',typeres):
trd=laobj.trace_norm_rmatrix(res)
trd=trd/2
elif re.findall("^complex",typeres):
trd=laobj.trace_norm_cmatrix(res)
trd=trd/2
return trd
def fidelity_den2(self,rho,sigma):
"""
Calculates fidelity between two density matrices
Input:
rho: input density matrix
sigma: input density matrix
Output:
fidelity: it stores the value of fidelity
"""
laobj=LA()
typerho=str(rho.dtype)
typesig=str(sigma.dtype)
flag=0
if re.findall('^float|^int',typerho):
rhosq=laobj.power_smatrix(rho,0.5)
elif re.findall("^complex",typerho):
rhosq=laobj.power_hmatrix(rho,0.5)
flag=1
if re.findall('^float|^int',typesig):
sigsq=laobj.power_smatrix(sigma,0.5)
elif re.findall("^complex",typesig):
sigsq=laobj.power_hmatrix(sigma,0.5)
flag=1
if flag==0:
fidelity=laobj.trace_norm_rmatrix(np.matmul(rhosq,sigsq))
fidelity=fidelity**2
else:
fidelity=laobj.trace_norm_cmatrix(np.matmul(rhosq,sigsq))
fidelity=fidelity**2
return fidelity
def fidelity_vec2(self,vecrho,vecsigma):
"""
Calculates fidelity between two quantum states
Input:
vecrho: input pure state vector.
vecsigma: input pure state vector.
Output:
fidelity: it stores the value of fidelity
"""
typerho=str(vecrho.dtype)
if re.findall('^complex',typerho):
fidelity=np.matmul(np.matrix.\
conjugate(np.matrix.transpose(vecrho)),\
vecsigma)
else:
fidelity=np.matmul(np.matrix.transpose(vecrho), vecsigma)
fidelity=abs(fidelity)**2
return fidelity
def fidelity_vecden(self,vec,sigma):
"""
Calculates fidelity between a quantum state and a density matrix
Input:
vec: input pure state vector.
sigma: input density matrix
Output:
fidelity: it stores the value of fidelity
"""
typevec=str(vec.dtype)
if re.findall('^complex',typevec):
fidelity=np.matmul(np.matrix.\
conjugate(np.matrix.transpose(vec)),\
np.matmul(sigma,vec))
else:
fidelity=np.matmul(np.matrix.transpose(vec),\
np.matmul(sigma,vec))
fidelity=abs(fidelity)
return fidelity
def super_fidelity(self,rho,sigma):
"""
Calculates super fidelity between two density matrices
Input:
rho: input density matrix.
sigma: input density matrix.
output:
sf: the value of the super fidelity
"""
tr_rho2=np.trace(np.matmul(rho,rho))
tr_sigma2=np.trace(np.matmul(sigma,sigma))
tr_rhosigma=np.trace(np.matmul(rho,sigma))
sf=tr_rhosigma+np.sqrt((1-tr_rho2)*(1-tr_sigma2))
sf=abs(sf)
return sf
def bures_distance_vec(self,rho,sigma):
"""
Calculates Bures distance between two quantum state
Input:
rho: input state vector
sigma: input state vector
Output:
bd: the value of the Bures distance
"""
fid=self.fidelity_vec2(rho,sigma)
bd=np.sqrt(2*(1-np.sqrt(fid)))
return bd
def bures_distance_den(self,rho,sigma):
"""
Calculates Bures distance between two density matrix
Input:
rho: input density matrix
sigma: input density matrix
Output:
bd: the value of the Bures distance
"""
fid=self.fidelity_den2(rho,sigma)
bd=np.sqrt(2*(1-np.sqrt(fid)))
return bd
def expectation_vec(self,vec,obs):
"""
Expectation values of observable for a quantum state
Input:
vec: input state vector
obs: observable operator
Output:
expc: the expectation value of the measurement operator
"""
typevec=str(vec.dtype)
if re.findall('^complex',typevec):
expc=np.matmul(np.matmul(np.matrix.\
conjugate(np.matrix.transpose(vec)),\
obs),vec)
else:
expc=np.matmul(np.matmul(np.matrix.transpose(vec),obs),vec)
return expc
def expectation_den(self,rho,obs):
"""
Expectation values of observable for a density matrix
Input:
rho: input density matrix
obs: observable operator
Output:
expc: the expectation value of the observable operator
"""
return np.trace(np.matmul(rho,obs))
| [
"numpy.trace",
"QuantumInformation.RecurNum.RecurChainRL4",
"QuantumInformation.LinearAlgebra",
"numpy.identity",
"re.findall",
"numpy.kron",
"QuantumInformation.RecurNum.RecurChainRL3",
"QuantumInformation.RecurNum.RecurChainRL2",
"math.sqrt",
"numpy.matrix.transpose",
"cmath.exp",
"math.log2... | [((754, 780), 'numpy.zeros', 'np.zeros', (['[2 ** N, 2 ** N]'], {}), '([2 ** N, 2 ** N])\n', (762, 780), True, 'import numpy as np\n'), ((1698, 1725), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (1706, 1725), True, 'import numpy as np\n'), ((3947, 4013), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])\n', (3955, 4013), True, 'import numpy as np\n'), ((4112, 4179), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])\n', (4120, 4179), True, 'import numpy as np\n'), ((4274, 4340), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (4282, 4340), True, 'import numpy as np\n'), ((4441, 4668), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, \n 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0,\n 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, \n 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0,\n 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0]])\n', (4449, 4668), True, 'import numpy as np\n'), ((4793, 5020), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, \n 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0,\n 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, \n 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]])\n', (4801, 5020), True, 'import numpy as np\n'), ((5627, 5647), 'numpy.zeros', 'np.zeros', (['[terms, 1]'], {}), '([terms, 1])\n', (5635, 5647), True, 'import numpy as np\n'), ((5691, 5752), 'QuantumInformation.RecurNum.RecurChainRL1', 'RecurNum.RecurChainRL1', (['row', 'tot_spins', 'icount', 'mylist', 'shift'], {}), '(row, tot_spins, icount, mylist, shift)\n', (5713, 5752), False, 'from QuantumInformation import RecurNum\n'), ((5764, 5780), 'numpy.array', 'np.array', (['mylist'], {}), '(mylist)\n', (5772, 5780), True, 'import numpy as np\n'), ((5795, 5821), 'numpy.zeros', 'np.zeros', (['[2 ** tot_spins]'], {}), '([2 ** tot_spins])\n', (5803, 5821), True, 'import numpy as np\n'), ((6604, 6624), 'numpy.zeros', 'np.zeros', (['[terms, 1]'], {}), '([terms, 1])\n', (6612, 6624), True, 'import numpy as np\n'), ((6668, 6729), 'QuantumInformation.RecurNum.RecurChainRL2', 'RecurNum.RecurChainRL2', (['row', 'tot_spins', 'icount', 'mylist', 'shift'], {}), '(row, tot_spins, icount, mylist, shift)\n', (6690, 6729), False, 'from QuantumInformation import RecurNum\n'), ((6741, 6757), 'numpy.array', 'np.array', (['mylist'], {}), '(mylist)\n', (6749, 6757), True, 'import numpy as np\n'), ((6772, 6798), 'numpy.zeros', 'np.zeros', (['[2 ** tot_spins]'], {}), '([2 ** tot_spins])\n', (6780, 6798), True, 'import numpy as np\n'), ((7698, 7715), 'numpy.zeros', 'np.zeros', (['[terms]'], {}), '([terms])\n', (7706, 7715), True, 'import numpy as np\n'), ((7760, 7821), 'QuantumInformation.RecurNum.RecurChainRL3', 'RecurNum.RecurChainRL3', (['row', 'tot_spins', 'icount', 'mylist', 'shift'], {}), '(row, tot_spins, icount, mylist, shift)\n', (7782, 7821), False, 'from QuantumInformation import RecurNum\n'), ((7833, 7849), 'numpy.array', 'np.array', (['mylist'], {}), '(mylist)\n', (7841, 7849), True, 'import numpy as np\n'), ((7864, 7890), 'numpy.zeros', 'np.zeros', (['[2 ** tot_spins]'], {}), '([2 ** tot_spins])\n', (7872, 7890), True, 'import numpy as np\n'), ((8663, 8683), 'numpy.zeros', 'np.zeros', (['[terms, 1]'], {}), '([terms, 1])\n', (8671, 8683), True, 'import numpy as np\n'), ((8727, 8788), 'QuantumInformation.RecurNum.RecurChainRL4', 'RecurNum.RecurChainRL4', (['row', 'tot_spins', 'icount', 'mylist', 'shift'], {}), '(row, tot_spins, icount, mylist, shift)\n', (8749, 8788), False, 'from QuantumInformation import RecurNum\n'), ((8800, 8816), 'numpy.array', 'np.array', (['mylist'], {}), '(mylist)\n', (8808, 8816), True, 'import numpy as np\n'), ((8831, 8857), 'numpy.zeros', 'np.zeros', (['[2 ** tot_spins]'], {}), '([2 ** tot_spins])\n', (8839, 8857), True, 'import numpy as np\n'), ((9524, 9550), 'numpy.zeros', 'np.zeros', (['[2 ** tot_spins]'], {}), '([2 ** tot_spins])\n', (9532, 9550), True, 'import numpy as np\n'), ((10019, 10045), 'numpy.zeros', 'np.zeros', (['[2 ** tot_spins]'], {}), '([2 ** tot_spins])\n', (10027, 10045), True, 'import numpy as np\n'), ((10575, 10579), 'QuantumInformation.QuantumMechanics', 'QM', ([], {}), '()\n', (10577, 10579), True, 'from QuantumInformation import QuantumMechanics as QM\n'), ((10767, 10811), 'numpy.identity', 'np.identity', (['(2 ** tot_spins)'], {'dtype': '"""float64"""'}), "(2 ** tot_spins, dtype='float64')\n", (10778, 10811), True, 'import numpy as np\n'), ((11606, 11619), 'numpy.trace', 'np.trace', (['rho'], {}), '(rho)\n', (11614, 11619), True, 'import numpy as np\n'), ((12053, 12057), 'QuantumInformation.LinearAlgebra', 'LA', ([], {}), '()\n', (12055, 12057), True, 'from QuantumInformation import LinearAlgebra as LA\n'), ((12133, 12167), 're.findall', 're.findall', (['"""^float|^int"""', 'typerho'], {}), "('^float|^int', typerho)\n", (12143, 12167), False, 'import re\n'), ((12472, 12506), 're.findall', 're.findall', (['"""^float|^int"""', 'typesig'], {}), "('^float|^int', typesig)\n", (12482, 12506), False, 'import re\n'), ((13251, 13255), 'QuantumInformation.LinearAlgebra', 'LA', ([], {}), '()\n', (13253, 13255), True, 'from QuantumInformation import LinearAlgebra as LA\n'), ((13298, 13332), 're.findall', 're.findall', (['"""^float|^int"""', 'typeres'], {}), "('^float|^int', typeres)\n", (13308, 13332), False, 'import re\n'), ((13835, 13839), 'QuantumInformation.LinearAlgebra', 'LA', ([], {}), '()\n', (13837, 13839), True, 'from QuantumInformation import LinearAlgebra as LA\n'), ((13930, 13964), 're.findall', 're.findall', (['"""^float|^int"""', 'typerho'], {}), "('^float|^int', typerho)\n", (13940, 13964), False, 'import re\n'), ((14134, 14168), 're.findall', 're.findall', (['"""^float|^int"""', 'typesig'], {}), "('^float|^int', typesig)\n", (14144, 14168), False, 'import re\n'), ((14950, 14981), 're.findall', 're.findall', (['"""^complex"""', 'typerho'], {}), "('^complex', typerho)\n", (14960, 14981), False, 'import re\n'), ((15632, 15663), 're.findall', 're.findall', (['"""^complex"""', 'typevec'], {}), "('^complex', typevec)\n", (15642, 15663), False, 'import re\n'), ((17694, 17725), 're.findall', 're.findall', (['"""^complex"""', 'typevec'], {}), "('^complex', typevec)\n", (17704, 17725), False, 'import re\n'), ((5837, 5849), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5846, 5849), False, 'import math\n'), ((6814, 6826), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6823, 6826), False, 'import math\n'), ((7906, 7918), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (7915, 7918), False, 'import math\n'), ((8873, 8885), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (8882, 8885), False, 'import math\n'), ((9568, 9578), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9575, 9578), True, 'import numpy as np\n'), ((9613, 9623), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9620, 9623), True, 'import numpy as np\n'), ((11712, 11731), 'numpy.matmul', 'np.matmul', (['rho', 'rho'], {}), '(rho, rho)\n', (11721, 11731), True, 'import numpy as np\n'), ((12305, 12336), 're.findall', 're.findall', (['"""^complex"""', 'typerho'], {}), "('^complex', typerho)\n", (12315, 12336), False, 'import re\n'), ((12646, 12677), 're.findall', 're.findall', (['"""^complex"""', 'typesig'], {}), "('^complex', typesig)\n", (12656, 12677), False, 'import re\n'), ((13414, 13445), 're.findall', 're.findall', (['"""^complex"""', 'typeres'], {}), "('^complex', typeres)\n", (13424, 13445), False, 'import re\n'), ((14025, 14056), 're.findall', 're.findall', (['"""^complex"""', 'typerho'], {}), "('^complex', typerho)\n", (14035, 14056), False, 'import re\n'), ((14231, 14262), 're.findall', 're.findall', (['"""^complex"""', 'typesig'], {}), "('^complex', typesig)\n", (14241, 14262), False, 'import re\n'), ((16329, 16348), 'numpy.matmul', 'np.matmul', (['rho', 'rho'], {}), '(rho, rho)\n', (16338, 16348), True, 'import numpy as np\n'), ((16376, 16399), 'numpy.matmul', 'np.matmul', (['sigma', 'sigma'], {}), '(sigma, sigma)\n', (16385, 16399), True, 'import numpy as np\n'), ((16429, 16450), 'numpy.matmul', 'np.matmul', (['rho', 'sigma'], {}), '(rho, sigma)\n', (16438, 16450), True, 'import numpy as np\n'), ((16474, 16514), 'numpy.sqrt', 'np.sqrt', (['((1 - tr_rho2) * (1 - tr_sigma2))'], {}), '((1 - tr_rho2) * (1 - tr_sigma2))\n', (16481, 16514), True, 'import numpy as np\n'), ((18322, 18341), 'numpy.matmul', 'np.matmul', (['rho', 'obs'], {}), '(rho, obs)\n', (18331, 18341), True, 'import numpy as np\n'), ((10107, 10125), 'numpy.sqrt', 'np.sqrt', (['tot_spins'], {}), '(tot_spins)\n', (10114, 10125), True, 'import numpy as np\n'), ((12831, 12853), 'numpy.matmul', 'np.matmul', (['rho', 'logrho'], {}), '(rho, logrho)\n', (12840, 12853), True, 'import numpy as np\n'), ((12863, 12885), 'numpy.matmul', 'np.matmul', (['rho', 'logsig'], {}), '(rho, logsig)\n', (12872, 12885), True, 'import numpy as np\n'), ((14397, 14420), 'numpy.matmul', 'np.matmul', (['rhosq', 'sigsq'], {}), '(rhosq, sigsq)\n', (14406, 14420), True, 'import numpy as np\n'), ((14514, 14537), 'numpy.matmul', 'np.matmul', (['rhosq', 'sigsq'], {}), '(rhosq, sigsq)\n', (14523, 14537), True, 'import numpy as np\n'), ((15183, 15210), 'numpy.matrix.transpose', 'np.matrix.transpose', (['vecrho'], {}), '(vecrho)\n', (15202, 15210), True, 'import numpy as np\n'), ((15807, 15828), 'numpy.matmul', 'np.matmul', (['sigma', 'vec'], {}), '(sigma, vec)\n', (15816, 15828), True, 'import numpy as np\n'), ((15874, 15898), 'numpy.matrix.transpose', 'np.matrix.transpose', (['vec'], {}), '(vec)\n', (15893, 15898), True, 'import numpy as np\n'), ((15932, 15953), 'numpy.matmul', 'np.matmul', (['sigma', 'vec'], {}), '(sigma, vec)\n', (15941, 15953), True, 'import numpy as np\n'), ((1282, 1307), 'numpy.kron', 'np.kron', (['sigmay2', 'sigmay2'], {}), '(sigmay2, sigmay2)\n', (1289, 1307), True, 'import numpy as np\n'), ((1827, 1852), 'numpy.kron', 'np.kron', (['sigmaz2', 'sigmaz2'], {}), '(sigmaz2, sigmaz2)\n', (1834, 1852), True, 'import numpy as np\n'), ((2446, 2475), 'numpy.kron', 'np.kron', (['hadamard2', 'hadamard2'], {}), '(hadamard2, hadamard2)\n', (2453, 2475), True, 'import numpy as np\n'), ((3043, 3068), 'numpy.kron', 'np.kron', (['phaseg2', 'phaseg2'], {}), '(phaseg2, phaseg2)\n', (3050, 3068), True, 'import numpy as np\n'), ((3556, 3568), 'cmath.exp', 'cmath.exp', (['z'], {}), '(z)\n', (3565, 3568), False, 'import cmath\n'), ((3674, 3695), 'numpy.kron', 'np.kron', (['rotg2', 'rotg2'], {}), '(rotg2, rotg2)\n', (3681, 3695), True, 'import numpy as np\n'), ((11345, 11363), 'math.log2', 'math.log2', (['pvec[i]'], {}), '(pvec[i])\n', (11354, 11363), False, 'import math\n'), ((12279, 12290), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (12287, 12290), False, 'import math\n'), ((12620, 12631), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (12628, 12631), False, 'import math\n'), ((15066, 15093), 'numpy.matrix.transpose', 'np.matrix.transpose', (['vecrho'], {}), '(vecrho)\n', (15085, 15093), True, 'import numpy as np\n'), ((15748, 15772), 'numpy.matrix.transpose', 'np.matrix.transpose', (['vec'], {}), '(vec)\n', (15767, 15772), True, 'import numpy as np\n'), ((16910, 16922), 'numpy.sqrt', 'np.sqrt', (['fid'], {}), '(fid)\n', (16917, 16922), True, 'import numpy as np\n'), ((17309, 17321), 'numpy.sqrt', 'np.sqrt', (['fid'], {}), '(fid)\n', (17316, 17321), True, 'import numpy as np\n'), ((17936, 17960), 'numpy.matrix.transpose', 'np.matrix.transpose', (['vec'], {}), '(vec)\n', (17955, 17960), True, 'import numpy as np\n'), ((1363, 1387), 'numpy.kron', 'np.kron', (['sigmay', 'sigmay2'], {}), '(sigmay, sigmay2)\n', (1370, 1387), True, 'import numpy as np\n'), ((1908, 1932), 'numpy.kron', 'np.kron', (['sigmaz', 'sigmaz2'], {}), '(sigmaz, sigmaz2)\n', (1915, 1932), True, 'import numpy as np\n'), ((2255, 2265), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2262, 2265), True, 'import numpy as np\n'), ((2268, 2278), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2275, 2278), True, 'import numpy as np\n'), ((2312, 2322), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2319, 2322), True, 'import numpy as np\n'), ((2326, 2336), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2333, 2336), True, 'import numpy as np\n'), ((2533, 2561), 'numpy.kron', 'np.kron', (['hadamard', 'hadamard2'], {}), '(hadamard, hadamard2)\n', (2540, 2561), True, 'import numpy as np\n'), ((3124, 3148), 'numpy.kron', 'np.kron', (['phaseg', 'phaseg2'], {}), '(phaseg, phaseg2)\n', (3131, 3148), True, 'import numpy as np\n'), ((3749, 3769), 'numpy.kron', 'np.kron', (['rotg', 'rotg2'], {}), '(rotg, rotg2)\n', (3756, 3769), True, 'import numpy as np\n'), ((12448, 12459), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (12456, 12459), False, 'import math\n'), ((12791, 12802), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (12799, 12802), False, 'import math\n'), ((17816, 17840), 'numpy.matrix.transpose', 'np.matrix.transpose', (['vec'], {}), '(vec)\n', (17835, 17840), True, 'import numpy as np\n')] |
import logging
from ROAR.agent_module.agent import Agent
from ROAR.utilities_module.data_structures_models import SensorsData, Transform, Location
from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl
from ROAR.configurations.configuration import Configuration as AgentConfig
import cv2
import numpy as np
from enum import Enum
from typing import Optional, List, Tuple
from collections import deque
from ROAR.control_module.real_world_image_based_pid_controller import RealWorldImageBasedPIDController as ImageBasedPIDController
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from pathlib import Path
from datetime import datetime
import requests
import open3d as o3d
from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector
class PointcloudRecordingAgent(Agent):
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs):
super().__init__(vehicle, agent_settings, **kwargs)
self.prev_steerings: deque = deque(maxlen=10)
self.agent_settings.pid_config_file_path = (Path(self.agent_settings.pid_config_file_path).parent /
"iOS_pid_config.json").as_posix()
self.controller = ImageBasedPIDController(agent=self)
# START LOC
self.start_loc: Optional[Transform] = None
self.start_loc_bound: float = 0.2
self.has_exited_start_loc: bool = False
# STOP Mid step
self.ip_addr = "10.0.0.2"
# Waypoint Following
self.waypoints: List[Transform] = []
self.curr_waypoint_index = 0
self.closeness_threshold = 0.4
# occupancy grid map
# point cloud visualization
# self.vis = o3d.visualization.Visualizer()
# self.vis.create_window(width=500, height=500)
# self.pcd = o3d.geometry.PointCloud()
# self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
# self.points_added = False
# pointcloud and ground plane detection
self.depth2pointcloud = DepthToPointCloudDetector(agent=self)
self.max_dist = 1.5
self.height_threshold = 1
self.ransac_dist_threshold = 0.01
self.ransac_n = 3
self.ransac_itr = 100
self.waypoint_map: Optional[Map] = None
buffer = 10
x_scale = 20
y_scale = 20
x_offset = 100
y_offset = 100
self.occu_map = Map(
x_offset=x_offset, y_offset=y_offset, x_scale=x_scale, y_scale=y_scale,
x_width=2500, y_height=2500, buffer=10, name="occupancy map"
)
self.m = np.zeros(shape=(self.occu_map.map.shape[0], self.occu_map.map.shape[1], 3))
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl:
super(PointcloudRecordingAgent, self).run_step(sensors_data, vehicle)
if self.front_rgb_camera.data is not None and self.front_depth_camera.data is not None:
self.prev_steerings.append(self.vehicle.control.steering)
try:
pcd: o3d.geometry.PointCloud = self.depth2pointcloud.run_in_series(self.front_depth_camera.data,
self.front_rgb_camera.data)
folder_name = Path("./data/pointcloud")
folder_name.mkdir(parents=True, exist_ok=True)
o3d.io.write_point_cloud((folder_name / f"{datetime.now().strftime('%m_%d_%Y_%H_%M_%S_%f')}.pcd").as_posix(),
pcd, print_progress=True)
pcd = self.filter_ground(pcd)
points = np.asarray(pcd.points)
new_points = np.copy(points)
points = np.vstack([new_points[:, 0], new_points[:, 2]]).T
self.occu_map.update(points, val=1)
coord = self.occu_map.world_loc_to_occu_map_coord(loc=self.vehicle.transform.location)
self.m[np.where(self.occu_map.map == 1)] = [255, 255, 255]
self.m[coord[1] - 2:coord[1] + 2, coord[0] - 2:coord[0] + 2] = [0, 0, 255]
cv2.imshow("m", self.m)
except Exception as e:
print(e)
return VehicleControl()
@staticmethod
def load_data(file_path: str) -> List[Transform]:
waypoints = []
f = Path(file_path).open('r')
for line in f.readlines():
x, y, z = line.split(",")
x, y, z = float(x), float(y), float(z)
l = Location(x=x, y=y, z=z)
waypoints.append(Transform(location=l))
return waypoints
def filter_ground(self, pcd: o3d.geometry.PointCloud, max_dist: float = -1, height_threshold=0.1,
ransac_dist_threshold=0.01, ransac_n=3, ransac_itr=100) -> o3d.geometry.PointCloud:
"""
Find ground from point cloud by first selecting points that are below the (car's position + a certain threshold)
Then it will take only the points that are less than `max_dist` distance away
Then do RANSAC on the resulting point cloud.
Note that this function assumes that the ground will be the largest plane seen after filtering out everything
above the vehicle
Args:
pcd: point cloud to be parsed
max_dist: maximum distance
height_threshold: additional height padding
ransac_dist_threshold: RANSAC distance threshold
ransac_n: RANSAC starting number of points
ransac_itr: RANSAC number of iterations
Returns:
point cloud that only has the ground.
"""
points = np.asarray(pcd.points)
colors = np.asarray(pcd.colors)
# height and distance filter
# 0 -> left and right | 1 -> up and down | 2 = close and far
points_of_interest = np.where((points[:, 1] < 0.3))
points = points[points_of_interest]
colors = colors[points_of_interest]
pcd.points = o3d.utility.Vector3dVector(points)
pcd.colors = o3d.utility.Vector3dVector(colors)
plane_model, inliers = pcd.segment_plane(distance_threshold=ransac_dist_threshold,
ransac_n=ransac_n,
num_iterations=ransac_itr)
pcd: o3d.geometry.PointCloud = pcd.select_by_index(inliers)
pcd = pcd.voxel_down_sample(0.01)
return pcd
def waypoint_visualize(self,
map_data: np.ndarray,
name: str = "waypoint_visualization",
car_location: Optional[Location] = None,
next_waypoint_location: Optional[Location] = None):
m = np.zeros(shape=(map_data.shape[0], map_data.shape[1], 3))
m[np.where(map_data > 0.9)] = [255, 255, 255]
point_size = 2
if car_location is not None:
coord = self.waypoint_map.world_loc_to_occu_map_coord(car_location)
m[coord[1] - point_size:coord[1] + point_size, coord[0] - point_size:coord[0] + point_size] = [0, 0, 255]
if next_waypoint_location is not None:
coord = self.waypoint_map.world_loc_to_occu_map_coord(next_waypoint_location)
m[coord[1] - point_size:coord[1] + point_size, coord[0] - point_size:coord[0] + point_size] = [0, 255, 0]
cv2.imshow(name, m)
cv2.waitKey(1)
"""
Lane Following
"""
def find_error(self):
# make rgb and depth into the same shape
data: np.ndarray = cv2.resize(self.front_rgb_camera.data.copy(),
dsize=(192, 256))
# cv2.imshow("rgb_mask", cv2.inRange(data, self.rgb_lower_range, self.rgb_upper_range))
data = self.rgb2ycbcr(data)
# cv2.imshow("ycbcr_mask", cv2.inRange(data, self.ycbcr_lower_range, self.ycbcr_upper_range))
# find the lane
error_at_10 = self.find_error_at(data=data,
y_offset=10,
error_scaling=[
(20, 0.1),
(40, 0.75),
(60, 0.8),
(80, 0.9),
(100, 0.95),
(200, 1)
])
error_at_50 = self.find_error_at(data=data,
y_offset=50,
error_scaling=[
(20, 0.2),
(40, 0.4),
(60, 0.7),
(70, 0.7),
(80, 0.7),
(100, 0.8),
(200, 0.8)
]
)
if error_at_10 is None and error_at_50 is None:
return None
# we only want to follow the furthest thing we see.
error = 0
if error_at_10 is not None:
error = error_at_10
if error_at_50 is not None:
error = error_at_50
return error
def find_error_at(self, data, y_offset, error_scaling) -> Optional[float]:
y = data.shape[0] - y_offset
lane_x = []
cv2.imshow("data", data)
# mask_red = cv2.inRange(src=data, lowerb=(0, 150, 60), upperb=(250, 240, 140)) # TERRACE RED
# mask_yellow = cv2.inRange(src=data, lowerb=(0, 130, 0), upperb=(250, 200, 110)) # TERRACE YELLOW
# mask_red = cv2.inRange(src=data, lowerb=(0, 180, 60), upperb=(250, 240, 140)) # CORY 337 RED
# mask_yellow = cv2.inRange(src=data, lowerb=(0, 140, 0), upperb=(250, 200, 80)) # CORY 337 YELLOW
mask_blue = cv2.inRange(src=data, lowerb=(60, 70, 120), upperb=(170, 130, 255)) # SHUWEI BLUE
mask = mask_blue
# mask = mask_red | mask_yellow
# cv2.imshow("Lane Mask (Red)", mask_red)
# cv2.imshow("Lane Mask (Yellow)", mask_yellow)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.erode(mask, kernel, iterations=1)
mask = cv2.dilate(mask, kernel, iterations=1)
cv2.imshow("Lane Mask (mask_blue)", mask)
for x in range(0, data.shape[1], 5):
if mask[y][x] > 0:
lane_x.append(x)
if len(lane_x) == 0:
return None
# if lane is found
avg_x = int(np.average(lane_x))
# find error
center_x = data.shape[1] // 2
error = avg_x - center_x
# we want small error to be almost ignored, only big errors matter.
for e, scale in error_scaling:
if abs(error) <= e:
# print(f"Error at {y_offset} -> {error, scale} -> {error * scale}")
error = error * scale
break
return error
def execute_prev_command(self):
# no lane found, execute the previous control with a decaying factor
if np.average(self.prev_steerings) < 0:
self.vehicle.control.steering = -1
else:
self.vehicle.control.steering = 1
# self.logger.info("Cannot see line, executing prev cmd")
self.prev_steerings.append(self.vehicle.control.steering)
self.vehicle.control.throttle = self.controller.long_pid_control()
# self.logger.info(f"No Lane found, executing discounted prev command: {self.vehicle.control}")
return self.vehicle.control
def rgb2ycbcr(self, im):
xform = np.array([[.299, .587, .114],
[-.1687, -.3313, .5],
[.5, -.4187, -.0813]])
ycbcr = im.dot(xform.T)
ycbcr[:, :, [1, 2]] += 128
return np.uint8(ycbcr)
def no_line_seen(self):
# did not see the line
neutral = -90
incline = self.vehicle.transform.rotation.pitch - neutral
if incline < -10:
# is down slope, execute previous command as-is
# get the PID for downhill
long_control = self.controller.long_pid_control()
self.vehicle.control.throttle = long_control
return self.vehicle.control
else:
# is flat or up slope, execute adjusted previous command
return self.execute_prev_command()
def non_blocking_pcd_visualization(self, pcd: o3d.geometry.PointCloud,
should_center=False,
should_show_axis=False,
axis_size: float = 1):
"""
Real time point cloud visualization.
Args:
pcd: point cloud to be visualized
should_center: true to always center the point cloud
should_show_axis: true to show axis
axis_size: adjust axis size
Returns:
None
"""
points = np.asarray(pcd.points)
colors = np.asarray(pcd.colors)
if should_center:
points = points - np.mean(points, axis=0)
if self.points_added is False:
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.add_geometry(self.coordinate_frame)
self.vis.add_geometry(self.pcd)
self.points_added = True
else:
# print(np.shape(np.vstack((np.asarray(self.pcd.points), points))))
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.update_geometry(self.coordinate_frame)
self.vis.update_geometry(self.pcd)
self.vis.poll_events()
self.vis.update_renderer()
class Map:
def __init__(self,
x_offset: float, y_offset: float, x_scale: float, y_scale: float,
x_width: int = 5000, y_height: int = 5000, buffer: int = 100,
name: str = "map"
):
self.name = name
self.x_offset = x_offset
self.y_offset = y_offset
self.x_scale = x_scale
self.y_scale = y_scale
self.x_width = x_width
self.y_height = y_height
self.buffer = buffer
self.map = np.zeros(shape=(self.y_height, self.x_width))
def world_loc_to_occu_map_coord(self, loc: Location) -> Tuple[int, int]:
"""
Takes in a coordinate in the world reference frame and transform it into the occupancy map coordinate by
applying the equation
`int( (WORLD + OFFSET ) * SCALE)`
Args:
loc:
Returns:
"""
x = int((loc.x + self.x_offset) * self.x_scale) + self.buffer
y = int((loc.z + self.y_offset) * self.y_scale) + self.buffer
return x, y
def world_arr_to_occu_map(self, arr: np.ndarray) -> np.ndarray:
xs = ((arr[:, 0] + self.x_offset) * self.x_scale + self.buffer).astype(int)
ys = ((arr[:, 1] + self.y_offset) * self.y_scale + self.buffer).astype(int)
return np.array([xs, ys]).T
def update(self, points: np.ndarray, val=1) -> int:
"""
Args:
val: value to update those points to
points: points is a 2D numpy array consist of X and Z coordinates
Returns:
number of points updated
"""
# print(np.min(points, axis=0), np.max(points, axis=0))
points = self.world_arr_to_occu_map(points)
self.map = np.zeros(shape=self.map.shape)
self.map[points[:, 1], points[:, 0]] = val
return len(points)
def visualize(self, dsize: Optional[Tuple] = None):
img = self.map.copy()
if dsize:
img = cv2.resize(img, dsize=dsize)
cv2.imshow(self.name, img)
@staticmethod
def filter_outlier(track,
min_distance_btw_points: float = 0,
max_distance_btw_points: float = 0.2):
filtered = []
max_num_points_skipped = 0
num_points_skipped = 0
filtered.append(track[0])
for i in range(1, len(track)):
x2, z2 = track[i]
x1, z1 = filtered[-1]
diff_x, diff_z = abs(x2 - x1), abs(z2 - z1)
if min_distance_btw_points < diff_x < max_distance_btw_points and min_distance_btw_points < diff_z < max_distance_btw_points:
filtered.append([x2, z2])
num_points_skipped = 0
else:
num_points_skipped += 1
max_num_points_skipped = max(num_points_skipped, max_num_points_skipped)
filtered = np.array(filtered)
return filtered
| [
"open3d.geometry.PointCloud",
"numpy.ones",
"pathlib.Path",
"numpy.mean",
"cv2.erode",
"cv2.imshow",
"cv2.inRange",
"collections.deque",
"numpy.copy",
"cv2.dilate",
"ROAR.perception_module.depth_to_pointcloud_detector.DepthToPointCloudDetector",
"datetime.datetime.now",
"cv2.resize",
"ROAR... | [((1026, 1042), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (1031, 1042), False, 'from collections import deque\n'), ((1263, 1298), 'ROAR.control_module.real_world_image_based_pid_controller.RealWorldImageBasedPIDController', 'ImageBasedPIDController', ([], {'agent': 'self'}), '(agent=self)\n', (1286, 1298), True, 'from ROAR.control_module.real_world_image_based_pid_controller import RealWorldImageBasedPIDController as ImageBasedPIDController\n'), ((2095, 2132), 'ROAR.perception_module.depth_to_pointcloud_detector.DepthToPointCloudDetector', 'DepthToPointCloudDetector', ([], {'agent': 'self'}), '(agent=self)\n', (2120, 2132), False, 'from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector\n'), ((2663, 2738), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.occu_map.map.shape[0], self.occu_map.map.shape[1], 3)'}), '(shape=(self.occu_map.map.shape[0], self.occu_map.map.shape[1], 3))\n', (2671, 2738), True, 'import numpy as np\n'), ((4279, 4295), 'ROAR.utilities_module.vehicle_models.VehicleControl', 'VehicleControl', ([], {}), '()\n', (4293, 4295), False, 'from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl\n'), ((5716, 5738), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (5726, 5738), True, 'import numpy as np\n'), ((5756, 5778), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), '(pcd.colors)\n', (5766, 5778), True, 'import numpy as np\n'), ((5914, 5942), 'numpy.where', 'np.where', (['(points[:, 1] < 0.3)'], {}), '(points[:, 1] < 0.3)\n', (5922, 5942), True, 'import numpy as np\n'), ((6054, 6088), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (6080, 6088), True, 'import open3d as o3d\n'), ((6110, 6144), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (6136, 6144), True, 'import open3d as o3d\n'), ((6817, 6874), 'numpy.zeros', 'np.zeros', ([], {'shape': '(map_data.shape[0], map_data.shape[1], 3)'}), '(shape=(map_data.shape[0], map_data.shape[1], 3))\n', (6825, 6874), True, 'import numpy as np\n'), ((7451, 7470), 'cv2.imshow', 'cv2.imshow', (['name', 'm'], {}), '(name, m)\n', (7461, 7470), False, 'import cv2\n'), ((7479, 7493), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7490, 7493), False, 'import cv2\n'), ((9642, 9666), 'cv2.imshow', 'cv2.imshow', (['"""data"""', 'data'], {}), "('data', data)\n", (9652, 9666), False, 'import cv2\n'), ((10109, 10176), 'cv2.inRange', 'cv2.inRange', ([], {'src': 'data', 'lowerb': '(60, 70, 120)', 'upperb': '(170, 130, 255)'}), '(src=data, lowerb=(60, 70, 120), upperb=(170, 130, 255))\n', (10120, 10176), False, 'import cv2\n'), ((10381, 10406), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (10388, 10406), True, 'import numpy as np\n'), ((10422, 10459), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (10431, 10459), False, 'import cv2\n'), ((10475, 10513), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (10485, 10513), False, 'import cv2\n'), ((10522, 10563), 'cv2.imshow', 'cv2.imshow', (['"""Lane Mask (mask_blue)"""', 'mask'], {}), "('Lane Mask (mask_blue)', mask)\n", (10532, 10563), False, 'import cv2\n'), ((11866, 11954), 'numpy.array', 'np.array', (['[[0.299, 0.587, 0.114], [-0.1687, -0.3313, 0.5], [0.5, -0.4187, -0.0813]]'], {}), '([[0.299, 0.587, 0.114], [-0.1687, -0.3313, 0.5], [0.5, -0.4187, -\n 0.0813]])\n', (11874, 11954), True, 'import numpy as np\n'), ((12075, 12090), 'numpy.uint8', 'np.uint8', (['ycbcr'], {}), '(ycbcr)\n', (12083, 12090), True, 'import numpy as np\n'), ((13250, 13272), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (13260, 13272), True, 'import numpy as np\n'), ((13290, 13312), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), '(pcd.colors)\n', (13300, 13312), True, 'import numpy as np\n'), ((15407, 15452), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.y_height, self.x_width)'}), '(shape=(self.y_height, self.x_width))\n', (15415, 15452), True, 'import numpy as np\n'), ((16638, 16668), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.map.shape'}), '(shape=self.map.shape)\n', (16646, 16668), True, 'import numpy as np\n'), ((16907, 16933), 'cv2.imshow', 'cv2.imshow', (['self.name', 'img'], {}), '(self.name, img)\n', (16917, 16933), False, 'import cv2\n'), ((17768, 17786), 'numpy.array', 'np.array', (['filtered'], {}), '(filtered)\n', (17776, 17786), True, 'import numpy as np\n'), ((4570, 4593), 'ROAR.utilities_module.data_structures_models.Location', 'Location', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (4578, 4593), False, 'from ROAR.utilities_module.data_structures_models import SensorsData, Transform, Location\n'), ((6885, 6909), 'numpy.where', 'np.where', (['(map_data > 0.9)'], {}), '(map_data > 0.9)\n', (6893, 6909), True, 'import numpy as np\n'), ((10776, 10794), 'numpy.average', 'np.average', (['lane_x'], {}), '(lane_x)\n', (10786, 10794), True, 'import numpy as np\n'), ((11329, 11360), 'numpy.average', 'np.average', (['self.prev_steerings'], {}), '(self.prev_steerings)\n', (11339, 11360), True, 'import numpy as np\n'), ((13456, 13481), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (13479, 13481), True, 'import open3d as o3d\n'), ((13512, 13546), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (13538, 13546), True, 'import open3d as o3d\n'), ((13577, 13611), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (13603, 13611), True, 'import open3d as o3d\n'), ((14245, 14279), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (14271, 14279), True, 'import open3d as o3d\n'), ((14310, 14344), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (14336, 14344), True, 'import open3d as o3d\n'), ((16203, 16221), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (16211, 16221), True, 'import numpy as np\n'), ((16870, 16898), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': 'dsize'}), '(img, dsize=dsize)\n', (16880, 16898), False, 'import cv2\n'), ((3342, 3367), 'pathlib.Path', 'Path', (['"""./data/pointcloud"""'], {}), "('./data/pointcloud')\n", (3346, 3367), False, 'from pathlib import Path\n'), ((3697, 3719), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (3707, 3719), True, 'import numpy as np\n'), ((3749, 3764), 'numpy.copy', 'np.copy', (['points'], {}), '(points)\n', (3756, 3764), True, 'import numpy as np\n'), ((4179, 4202), 'cv2.imshow', 'cv2.imshow', (['"""m"""', 'self.m'], {}), "('m', self.m)\n", (4189, 4202), False, 'import cv2\n'), ((4404, 4419), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (4408, 4419), False, 'from pathlib import Path\n'), ((4623, 4644), 'ROAR.utilities_module.data_structures_models.Transform', 'Transform', ([], {'location': 'l'}), '(location=l)\n', (4632, 4644), False, 'from ROAR.utilities_module.data_structures_models import SensorsData, Transform, Location\n'), ((13369, 13392), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (13376, 13392), True, 'import numpy as np\n'), ((3791, 3838), 'numpy.vstack', 'np.vstack', (['[new_points[:, 0], new_points[:, 2]]'], {}), '([new_points[:, 0], new_points[:, 2]])\n', (3800, 3838), True, 'import numpy as np\n'), ((4020, 4052), 'numpy.where', 'np.where', (['(self.occu_map.map == 1)'], {}), '(self.occu_map.map == 1)\n', (4028, 4052), True, 'import numpy as np\n'), ((1095, 1141), 'pathlib.Path', 'Path', (['self.agent_settings.pid_config_file_path'], {}), '(self.agent_settings.pid_config_file_path)\n', (1099, 1141), False, 'from pathlib import Path\n'), ((13849, 13872), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (13856, 13872), True, 'import numpy as np\n'), ((14581, 14604), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (14588, 14604), True, 'import numpy as np\n'), ((3490, 3504), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3502, 3504), False, 'from datetime import datetime\n')] |
import os
import os.path
import tensorflow as tf
import numpy as np
from glob import glob as get_all_paths
from src.utils.utils import get_logger
from src.video_preprocessor import preprocess_videos
from src.utils.utils import contains_videos
dataset_names = ['trainA', 'trainB']
preferred_image_format_file_ending = 'jpg'
supported_image_format_file_ending = 'png'
video_format_file_ending = 'mp4'
video_index_padding = 1 + 6 + 1
logger = get_logger("data_loader")
def get_training_datasets(task_name, image_size, batch_size, dataset_dir="datasets", frame_sequence_length=3,
force_video=False) -> [tf.data.Dataset]:
with tf.device('/cpu:0'):
verify_directory_structure(task_name, dataset_dir)
image_path_tensors = get_image_paths(task_name, dataset_dir, frame_sequence_length, force_video)
datasets = build_datasets(image_path_tensors, image_size, batch_size)
return datasets
def build_datasets(image_path_tensors, image_size, batch_size):
datasets = []
for image_path in image_path_tensors:
dataset = build_dataset(image_path, image_size, batch_size)
datasets.append(dataset)
return datasets
def build_dataset(image_path, image_size, batch_size):
dataset = tf.data.Dataset.from_tensor_slices(image_path)
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.prefetch(16)
def load_image(filename):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_image(image_string, channels=3)
image_normalized = tf.image.convert_image_dtype(image_decoded, dtype=tf.float32)
image_normalized = (image_normalized * 2) - 1
shape = tf.shape(image_normalized)
min_resolution = tf.minimum(shape[0], shape[1])
image_cropped = tf.image.resize_image_with_crop_or_pad(
image_normalized,
min_resolution,
min_resolution
)
image_resized = tf.image.resize_images(image_cropped, [image_size, image_size])
return image_resized
def load_images(filenames):
return tf.map_fn(load_image, filenames, dtype=tf.float32)
dataset = dataset.map(load_images)
dataset = dataset.batch(batch_size)
return dataset
def get_image_paths(task_name, dataset_dir, frame_sequence_length, force_video):
image_path_lists = get_path_lists(task_name, dataset_dir, frame_sequence_length, force_video)
image_path_tensors = get_path_tensors(image_path_lists)
return image_path_tensors
def get_path_tensors(image_path_lists):
image_path_tensors = []
for path_list in image_path_lists:
path_tensor = tf.convert_to_tensor(path_list, dtype=tf.string)
image_path_tensors.append(path_tensor)
return image_path_tensors
def get_path_lists(task_name, dataset_dir, frame_sequence_length, force_video):
image_path_lists = []
for dir_name in dataset_names:
base_dir = os.path.join(dataset_dir, task_name)
data_dir = os.path.join(base_dir, dir_name)
is_video_data = force_video or contains_videos(data_dir)
logger.info(f"Training with {'video' if is_video_data else 'image'} data from {data_dir}")
if is_video_data:
task_image_paths = get_video_frame_sequences(data_dir, frame_sequence_length)
else:
task_image_paths = get_image_frame_sequences(data_dir)
image_path_lists.append(task_image_paths)
return image_path_lists
def get_image_frame_sequences(data_dir):
task_image_paths = np.array([get_path_list(data_dir)]).transpose()
return task_image_paths
def get_path_list(data_dir):
image_path_pattern = os.path.join(data_dir, f"*{preferred_image_format_file_ending}")
task_image_paths = get_all_paths(image_path_pattern)
if len(task_image_paths) == 0:
print(f"No {preferred_image_format_file_ending}s found, looking for {supported_image_format_file_ending}s.")
image_path_pattern = os.path.join(data_dir, f"*{supported_image_format_file_ending}")
task_image_paths = get_all_paths(image_path_pattern)
if len(task_image_paths) == 0:
raise Exception(f"Training data not found in {data_dir}!")
task_image_paths.sort(key=str.lower)
return task_image_paths
def verify_directory_structure(task_name, dataset_dir):
if not os.path.exists(dataset_dir):
raise Exception("Dataset Directory does not exist!")
base_dir = os.path.join(dataset_dir, task_name)
if not os.path.exists(base_dir):
raise Exception("Task Dataset Directory does not exist!")
for dataset_name in dataset_names:
dataset_directory = os.path.join(base_dir, dataset_name)
if not os.path.exists(dataset_directory):
raise Exception(f"{dataset_directory} does not exist!")
if contains_videos(dataset_directory):
preprocess_videos(dataset_directory)
def get_video_names(task_name):
image_paths = get_path_list(os.path.join(task_name, 'frames'))
videos = set([])
for path in image_paths:
videos.add(path[:-(video_index_padding + len(preferred_image_format_file_ending))])
return list(videos)
def get_video_frames(video_name):
all_frames = get_all_paths(video_name + "_*." + preferred_image_format_file_ending)
all_frames.sort(key=str.lower)
return all_frames
def get_video_frame_sequences(task_name, sequencial_frames):
video_names = get_video_names(task_name)
# TODO: make free of transpose
frame_sequences = np.array([[] for _ in range(sequencial_frames)])
for video_name in video_names:
frames = get_video_frames(video_name)
consecutive_frames = get_consecutive_frames(frames, sequencial_frames)
frame_sequences = np.concatenate((frame_sequences, consecutive_frames), axis=1)
if frame_sequences.size == 0:
frames = get_path_list(os.path.join(task_name, "frames"))
consecutive_frames = get_consecutive_frames(frames, sequencial_frames)
frame_sequences = np.concatenate((frame_sequences, consecutive_frames), axis=1)
return frame_sequences.transpose()
def get_consecutive_frames(frames, num_frames):
result = []
for frame_id in range(num_frames):
result.append(frames[frame_id:len(frames) - num_frames + frame_id + 1])
return result
| [
"src.video_preprocessor.preprocess_videos",
"tensorflow.image.resize_images",
"src.utils.utils.get_logger",
"tensorflow.convert_to_tensor",
"tensorflow.device",
"os.path.exists",
"tensorflow.map_fn",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.minimum",
"tensorflow.shape",
"tensorf... | [((443, 468), 'src.utils.utils.get_logger', 'get_logger', (['"""data_loader"""'], {}), "('data_loader')\n", (453, 468), False, 'from src.utils.utils import get_logger\n'), ((1262, 1308), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['image_path'], {}), '(image_path)\n', (1296, 1308), True, 'import tensorflow as tf\n'), ((3707, 3771), 'os.path.join', 'os.path.join', (['data_dir', 'f"""*{preferred_image_format_file_ending}"""'], {}), "(data_dir, f'*{preferred_image_format_file_ending}')\n", (3719, 3771), False, 'import os\n'), ((3795, 3828), 'glob.glob', 'get_all_paths', (['image_path_pattern'], {}), '(image_path_pattern)\n', (3808, 3828), True, 'from glob import glob as get_all_paths\n'), ((4482, 4518), 'os.path.join', 'os.path.join', (['dataset_dir', 'task_name'], {}), '(dataset_dir, task_name)\n', (4494, 4518), False, 'import os\n'), ((5260, 5330), 'glob.glob', 'get_all_paths', (["(video_name + '_*.' + preferred_image_format_file_ending)"], {}), "(video_name + '_*.' + preferred_image_format_file_ending)\n", (5273, 5330), True, 'from glob import glob as get_all_paths\n'), ((657, 676), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (666, 676), True, 'import tensorflow as tf\n'), ((1478, 1500), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (1490, 1500), True, 'import tensorflow as tf\n'), ((1525, 1572), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['image_string'], {'channels': '(3)'}), '(image_string, channels=3)\n', (1546, 1572), True, 'import tensorflow as tf\n'), ((1600, 1661), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image_decoded'], {'dtype': 'tf.float32'}), '(image_decoded, dtype=tf.float32)\n', (1628, 1661), True, 'import tensorflow as tf\n'), ((1733, 1759), 'tensorflow.shape', 'tf.shape', (['image_normalized'], {}), '(image_normalized)\n', (1741, 1759), True, 'import tensorflow as tf\n'), ((1785, 1815), 'tensorflow.minimum', 'tf.minimum', (['shape[0]', 'shape[1]'], {}), '(shape[0], shape[1])\n', (1795, 1815), True, 'import tensorflow as tf\n'), ((1840, 1932), 'tensorflow.image.resize_image_with_crop_or_pad', 'tf.image.resize_image_with_crop_or_pad', (['image_normalized', 'min_resolution', 'min_resolution'], {}), '(image_normalized, min_resolution,\n min_resolution)\n', (1878, 1932), True, 'import tensorflow as tf\n'), ((1999, 2062), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['image_cropped', '[image_size, image_size]'], {}), '(image_cropped, [image_size, image_size])\n', (2021, 2062), True, 'import tensorflow as tf\n'), ((2140, 2190), 'tensorflow.map_fn', 'tf.map_fn', (['load_image', 'filenames'], {'dtype': 'tf.float32'}), '(load_image, filenames, dtype=tf.float32)\n', (2149, 2190), True, 'import tensorflow as tf\n'), ((2693, 2741), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['path_list'], {'dtype': 'tf.string'}), '(path_list, dtype=tf.string)\n', (2713, 2741), True, 'import tensorflow as tf\n'), ((2981, 3017), 'os.path.join', 'os.path.join', (['dataset_dir', 'task_name'], {}), '(dataset_dir, task_name)\n', (2993, 3017), False, 'import os\n'), ((3037, 3069), 'os.path.join', 'os.path.join', (['base_dir', 'dir_name'], {}), '(base_dir, dir_name)\n', (3049, 3069), False, 'import os\n'), ((4010, 4074), 'os.path.join', 'os.path.join', (['data_dir', 'f"""*{supported_image_format_file_ending}"""'], {}), "(data_dir, f'*{supported_image_format_file_ending}')\n", (4022, 4074), False, 'import os\n'), ((4102, 4135), 'glob.glob', 'get_all_paths', (['image_path_pattern'], {}), '(image_path_pattern)\n', (4115, 4135), True, 'from glob import glob as get_all_paths\n'), ((4376, 4403), 'os.path.exists', 'os.path.exists', (['dataset_dir'], {}), '(dataset_dir)\n', (4390, 4403), False, 'import os\n'), ((4530, 4554), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (4544, 4554), False, 'import os\n'), ((4689, 4725), 'os.path.join', 'os.path.join', (['base_dir', 'dataset_name'], {}), '(base_dir, dataset_name)\n', (4701, 4725), False, 'import os\n'), ((4855, 4889), 'src.utils.utils.contains_videos', 'contains_videos', (['dataset_directory'], {}), '(dataset_directory)\n', (4870, 4889), False, 'from src.utils.utils import contains_videos\n'), ((5006, 5039), 'os.path.join', 'os.path.join', (['task_name', '"""frames"""'], {}), "(task_name, 'frames')\n", (5018, 5039), False, 'import os\n'), ((5788, 5849), 'numpy.concatenate', 'np.concatenate', (['(frame_sequences, consecutive_frames)'], {'axis': '(1)'}), '((frame_sequences, consecutive_frames), axis=1)\n', (5802, 5849), True, 'import numpy as np\n'), ((6056, 6117), 'numpy.concatenate', 'np.concatenate', (['(frame_sequences, consecutive_frames)'], {'axis': '(1)'}), '((frame_sequences, consecutive_frames), axis=1)\n', (6070, 6117), True, 'import numpy as np\n'), ((3109, 3134), 'src.utils.utils.contains_videos', 'contains_videos', (['data_dir'], {}), '(data_dir)\n', (3124, 3134), False, 'from src.utils.utils import contains_videos\n'), ((4741, 4774), 'os.path.exists', 'os.path.exists', (['dataset_directory'], {}), '(dataset_directory)\n', (4755, 4774), False, 'import os\n'), ((4903, 4939), 'src.video_preprocessor.preprocess_videos', 'preprocess_videos', (['dataset_directory'], {}), '(dataset_directory)\n', (4920, 4939), False, 'from src.video_preprocessor import preprocess_videos\n'), ((5916, 5949), 'os.path.join', 'os.path.join', (['task_name', '"""frames"""'], {}), "(task_name, 'frames')\n", (5928, 5949), False, 'import os\n')] |
from functools import partial
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
class QtPlotter(object):
def __init__(self):
self._app = QtGui.QApplication([])
self._win = pg.GraphicsWindow(title="JackPlay Plotter")
self._win.setWindowTitle("JackPlay Plotter")
self._plots = dict()
self._images = dict()
pg.setConfigOptions(antialias=True)
def add_plot(self, plot_id, plot_title, data_source):
plt = self._win.addPlot(title=plot_title)
curve = plt.plot(pen='y')
first_data = data_source()
curve.setData(first_data)
plt.enableAutoRange('y', False)
minval, maxval = first_data.min(), first_data.max()
plt.setYRange(minval, maxval)
timer = QtCore.QTimer()
timer.timeout.connect(partial(self._update, plot_id))
timer.start(50)
self._plots[plot_id] = (plt, curve, data_source, timer, minval, maxval)
def add_image(self, image_id, image_title, data_source):
view = pg.ViewBox()
self._win.addItem(view)
img = pg.ImageItem(np.random.rand(10,10))
# simple colormap
# stops=np.r_[-1.0,-0.5,0.5,1.0]
# colors=np.array([[0,0,1,0.7],[0,1,0,0.2],[0,0,0,0.8],[1,0,0,1.0]])
# cm = pg.ColorMap(stops,colors)
# pos = np.array([0., 1., 0.5, 0.25, 0.75])
# color = np.array([[0,255,255,255], [255,255,0,255], [0,0,0,255], (0, 0, 255, 255), (255, 0, 0, 255)], dtype=np.ubyte)
# cmap = pg.ColorMap(pos, color)
# lut = cm.getLookupTable(0.0, 1.0, 256)
# img.setLookupTable(lut)
view.addItem(img)
timer = QtCore.QTimer()
timer.timeout.connect(partial(self._update_image, image_id))
timer.start(50)
self._images[image_id] = (img, data_source, timer,)
def _update(self, plot_id):
plt, curve, data_source, timer, minval, maxval = self._plots[plot_id]
data = data_source()
minval = min(data.min(), minval)
maxval = max(data.max(), maxval)
curve.setData(data_source())
plt.setYRange(minval, maxval)
self._plots[plot_id] = plt, curve, data_source, timer, minval, maxval
def _update_image(self, image_id):
img, data_source, timer = self._images[image_id]
data = data_source()
img.setImage(data)
def run(self):
QtGui.QApplication.instance().exec_()
def update():
print("other update called")
def test_data_source():
return np.random.normal(size=1000)
def main():
plotter = QtPlotter()
plotter.add_plot("default", "Test Plot", test_data_source)
QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
main()
| [
"functools.partial",
"pyqtgraph.ViewBox",
"pyqtgraph.GraphicsWindow",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"pyqtgraph.Qt.QtCore.QTimer",
"numpy.random.normal",
"pyqtgraph.Qt.QtGui.QApplication",
"numpy.random.rand",
"pyqtgraph.setConfigOptions"
] | [((2530, 2557), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (2546, 2557), True, 'import numpy as np\n'), ((183, 205), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (201, 205), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((226, 269), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {'title': '"""JackPlay Plotter"""'}), "(title='JackPlay Plotter')\n", (243, 269), True, 'import pyqtgraph as pg\n'), ((391, 426), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'antialias': '(True)'}), '(antialias=True)\n', (410, 426), True, 'import pyqtgraph as pg\n'), ((795, 810), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (808, 810), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1055, 1067), 'pyqtgraph.ViewBox', 'pg.ViewBox', ([], {}), '()\n', (1065, 1067), True, 'import pyqtgraph as pg\n'), ((1682, 1697), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (1695, 1697), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((841, 871), 'functools.partial', 'partial', (['self._update', 'plot_id'], {}), '(self._update, plot_id)\n', (848, 871), False, 'from functools import partial\n'), ((1127, 1149), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (1141, 1149), True, 'import numpy as np\n'), ((1728, 1765), 'functools.partial', 'partial', (['self._update_image', 'image_id'], {}), '(self._update_image, image_id)\n', (1735, 1765), False, 'from functools import partial\n'), ((2665, 2694), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (2692, 2694), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((2407, 2436), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (2434, 2436), False, 'from pyqtgraph.Qt import QtGui, QtCore\n')] |
# nbapr/nbapr/nbapr.py
# -*- coding: utf-8 -*-
# Copyright (C) 2021 <NAME>
# Licensed under the MIT License
import logging
import time
from typing import Iterable, Union
import warnings
import numpy as np
import pandas as pd
logging.getLogger(__name__).addHandler(logging.NullHandler())
def _timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
logging.info('%r %2.2f ms' % \
(method.__name__, (te - ts) * 1000))
return result
return timed
@_timeit
def _multidimensional_shifting(elements: Iterable,
num_samples: int,
sample_size: int,
probs: Iterable) -> np.ndarray:
"""Based on https://medium.com/ibm-watson/incredibly-fast-random-sampling-in-python-baf154bd836a
Args:
elements (iterable): iterable to sample from, typically a dataframe index
num_samples (int): the number of rows (e.g. initial population size)
sample_size (int): the number of columns (e.g. team size)
probs (iterable): is same size as elements
Returns:
ndarray: of shape (num_samples, sample_size)
"""
replicated_probabilities = np.tile(probs, (num_samples, 1))
random_shifts = np.random.random(replicated_probabilities.shape)
random_shifts /= random_shifts.sum(axis=1)[:, np.newaxis]
shifted_probabilities = random_shifts - replicated_probabilities
samples = np.argpartition(shifted_probabilities, sample_size, axis=1)[:, :sample_size]
return elements[samples]
def rankdata(a: np.ndarray, method: str = 'average', *, axis: Union[None, int] = None) -> np.ndarray:
"""Assign ranks to data, dealing with ties appropriately.
Args:
a (np.ndarray): the array of values to be ranked.
method (str): {'average', 'min', 'max', 'dense', 'ordinal'}, optional
axis: Union[None, int], optional
Returns:
ndarray
Size equal to the size of `a`, containing rank scores.
"""
# NOTE: this is from scipy 1.6.0 to avoid importing full library
# not a problem on local machine but slows github builds
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
if axis is not None:
a = np.asarray(a)
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
# use scipy._lib._util._normalize_axis_index when available
np.core.multiarray.normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method)
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
@_timeit
def _create_player_points(
pool: pd.DataFrame,
teams: np.ndarray,
n_iterations: int,
n_teams: int,
n_players: int,
team_points: np.ndarray
) -> np.ndarray:
"""Calculates playerpoints
Args:
pool (pd.DataFrame): the player pool
statscols (Iterable[str]): the statistics columns
teams (np.ndarray): the teams
Returns:
np.ndarray
"""
# now need to link back to players
players = pool.index.values
# once we've calculated stats, can remove league dimension from teams
# is just a 2D array of teams
# if you flatten teampoints, get 1D array lines up with 2D teams
teams2d = teams.reshape(n_iterations * n_teams, n_players)
team_points1d = team_points.ravel()
# creates array of shape (len(teams2d), len(players))
# is effectively one hot encoder for player indexes
# if player index 3 is on team 0, then on_team[0, 3] == 1
on_team = (players[...,None]==teams2d[:,None,:]).any(-1).astype(int)
# now we can calculate player points by multiplying
# matrix of zeroes and ones with team points
return on_team * team_points1d[:, np.newaxis]
@_timeit
def _create_teams(
pool: pd.DataFrame,
n_iterations: int = 500,
n_teams: int = 10,
n_players: int = 10,
probcol: str = 'probs'
) -> np.ndarray:
"""Creates initial set of teams
Returns:
np.ndarray of shape
axis 0 - number of iterations
axis 1 - number of teams in league
axis 2 - number of players on team
"""
# get the teams, which are represented as 3D array
# axis 0 = number of iterations (leagues)
# axis 1 = number of teams in league
# axis 2 = number of players on team
arr = _multidimensional_shifting(
elements=pool.index.values,
num_samples=n_iterations,
sample_size=n_teams * n_players,
probs=pool[probcol]
)
return arr.reshape(n_iterations, n_teams, n_players)
@_timeit
def _create_teamstats(
pool: pd.DataFrame,
statscols: Iterable[str],
teams: np.ndarray
) -> np.ndarray:
"""Calculates team statistics
Args:
pool (pd.DataFrame): the player pool
statscols (Iterable[str]): the statistics columns
teams (np.ndarray): the teams
Returns:
np.ndarray
"""
# get the player stats as a 2D array
stats_mda = pool.loc[:, statscols].values
# now get the team stats
# has shape (n_iterations, n_teams, n_players, len(statcols))
team_stats = stats_mda[teams]
# sum along axis 2 to get team totals
# has shape (n_iterations, n_teams, len(statcols))
return np.sum(team_stats, axis=2)
def _zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""Added from scipy to remove dependency
Args:
a (np.ndarray): array like object containing the sample data.
axis (int): default 0
Returns:
np.ndarray
"""
mn = a.mean(axis=axis, keepdims=True)
std = a.std(axis=axis, ddof=ddof, keepdims=True)
first = np.take_along_axis(a, np.array(0, ndmin=a.ndim), axis)
isconst = (first == a).all(axis=axis, keepdims=True)
# Set std deviations that are 0 to 1 to avoid division by 0.
std[isconst] = 1.0
z = (a - mn) / std
# Set the outputs associated with a constant input to nan.
z[np.broadcast_to(isconst, z.shape)] = np.nan
return z
def pr_traditional(pool: pd.DataFrame,
statscols: Iterable[str] = ('WFGP', 'FTM', 'FG3M', 'REB', 'AST', 'STL', 'BLK', 'TOV', 'PTS'),
) -> pd.DataFrame:
"""Traditional player rater
Args:
pool (pd.DataFrame): the player pool dataframe
statscols (Iterable[str]): the stats columns
Returns:
pd.DataFrame with columns
player[str], pts[float]
"""
pool = pool.dropna()
stats = pool.loc[:, statscols].values
pts = np.sum(_zscore(stats), axis=1)
# return results
return pd.DataFrame({
'player': pool.PLAYER_NAME,
'pos': pool.POS,
'team': pool.TEAM,
'pr_zscore': pts
})
@_timeit
def sim(pool: pd.DataFrame,
n_iterations: int = 500,
n_teams: int = 10,
n_players: int = 10,
statscols: Iterable[str] = ('WFGP', 'FTM', 'FG3M', 'REB', 'AST', 'STL', 'BLK', 'TOV', 'PTS'),
probcol: str = 'probs'
) -> pd.DataFrame:
"""Simulates NBA fantasy season
Args:
pool (pd.DataFrame): the player pool dataframe
n_iterations (int): number of leagues to simulate, default 500
n_teams (int): number of teams per league, default 10
n_players (int): number of player per team, default 10
statscols (Iterable[str]): the stats columns
probcol (str): the column name with probabilities for sampling
Returns:
pd.DataFrame with columns
player[str], pts[float]
"""
# get the teams, which are represented as 3D array
# axis 0 = number of iterations (leagues)
# axis 1 = number of teams in league
# axis 2 = number of players in team
teams = _create_teams(pool, n_iterations, n_teams, n_players)
# stats_mda is shape(len(players), len(statcols)
# so each row is a player's stats in those categories
# row_index == index in the players dataframe
team_stats_totals = _create_teamstats(pool, statscols, teams)
# calculate ranks and sum them
# team_ranks has same shape as team_totals (n_iterations, n_teams, len(statcols))
team_ranks = rankdata(team_stats_totals, method='average', axis=1)
# team_points is sum of team ranks along axis 2
# has shape (n_iterations, n_teams)
team_points = np.sum(team_ranks, axis=2)
# now need to link back to players
player_points = _create_player_points(pool, teams, n_iterations, n_teams, n_players, team_points)
# have convert 0 to nan so can calculate true average
player_points[player_points == 0] = np.nan
player_mean = np.nanmean(player_points, axis=0)
# return results
return pd.DataFrame({
'player': pool.PLAYER_NAME,
'pos': pool.POS,
'team': pool.TEAM,
'pts': player_mean
})
if __name__ == '__main__':
pass
| [
"numpy.sum",
"numpy.empty",
"numpy.argsort",
"numpy.argpartition",
"numpy.arange",
"numpy.tile",
"logging.NullHandler",
"numpy.nanmean",
"pandas.DataFrame",
"numpy.apply_along_axis",
"numpy.asarray",
"numpy.broadcast_to",
"numpy.core.multiarray.normalize_axis_index",
"time.time",
"numpy.... | [((268, 289), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (287, 289), False, 'import logging\n'), ((1463, 1495), 'numpy.tile', 'np.tile', (['probs', '(num_samples, 1)'], {}), '(probs, (num_samples, 1))\n', (1470, 1495), True, 'import numpy as np\n'), ((1516, 1564), 'numpy.random.random', 'np.random.random', (['replicated_probabilities.shape'], {}), '(replicated_probabilities.shape)\n', (1532, 1564), True, 'import numpy as np\n'), ((3190, 3216), 'numpy.argsort', 'np.argsort', (['arr'], {'kind': 'algo'}), '(arr, kind=algo)\n', (3200, 3216), True, 'import numpy as np\n'), ((3228, 3264), 'numpy.empty', 'np.empty', (['sorter.size'], {'dtype': 'np.intp'}), '(sorter.size, dtype=np.intp)\n', (3236, 3264), True, 'import numpy as np\n'), ((3283, 3320), 'numpy.arange', 'np.arange', (['sorter.size'], {'dtype': 'np.intp'}), '(sorter.size, dtype=np.intp)\n', (3292, 3320), True, 'import numpy as np\n'), ((6595, 6621), 'numpy.sum', 'np.sum', (['team_stats'], {'axis': '(2)'}), '(team_stats, axis=2)\n', (6601, 6621), True, 'import numpy as np\n'), ((7895, 7996), 'pandas.DataFrame', 'pd.DataFrame', (["{'player': pool.PLAYER_NAME, 'pos': pool.POS, 'team': pool.TEAM,\n 'pr_zscore': pts}"], {}), "({'player': pool.PLAYER_NAME, 'pos': pool.POS, 'team': pool.\n TEAM, 'pr_zscore': pts})\n", (7907, 7996), True, 'import pandas as pd\n'), ((9625, 9651), 'numpy.sum', 'np.sum', (['team_ranks'], {'axis': '(2)'}), '(team_ranks, axis=2)\n', (9631, 9651), True, 'import numpy as np\n'), ((9922, 9955), 'numpy.nanmean', 'np.nanmean', (['player_points'], {'axis': '(0)'}), '(player_points, axis=0)\n', (9932, 9955), True, 'import numpy as np\n'), ((9989, 10092), 'pandas.DataFrame', 'pd.DataFrame', (["{'player': pool.PLAYER_NAME, 'pos': pool.POS, 'team': pool.TEAM, 'pts':\n player_mean}"], {}), "({'player': pool.PLAYER_NAME, 'pos': pool.POS, 'team': pool.\n TEAM, 'pts': player_mean})\n", (10001, 10092), True, 'import pandas as pd\n'), ((229, 256), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (246, 256), False, 'import logging\n'), ((355, 366), 'time.time', 'time.time', ([], {}), '()\n', (364, 366), False, 'import time\n'), ((417, 428), 'time.time', 'time.time', ([], {}), '()\n', (426, 428), False, 'import time\n'), ((1710, 1769), 'numpy.argpartition', 'np.argpartition', (['shifted_probabilities', 'sample_size'], {'axis': '(1)'}), '(shifted_probabilities, sample_size, axis=1)\n', (1725, 1769), True, 'import numpy as np\n'), ((2588, 2601), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (2598, 2601), True, 'import numpy as np\n'), ((3032, 3078), 'numpy.apply_along_axis', 'np.apply_along_axis', (['rankdata', 'axis', 'a', 'method'], {}), '(rankdata, axis, a, method)\n', (3051, 3078), True, 'import numpy as np\n'), ((3099, 3112), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (3109, 3112), True, 'import numpy as np\n'), ((7007, 7032), 'numpy.array', 'np.array', (['(0)'], {'ndmin': 'a.ndim'}), '(0, ndmin=a.ndim)\n', (7015, 7032), True, 'import numpy as np\n'), ((7279, 7312), 'numpy.broadcast_to', 'np.broadcast_to', (['isconst', 'z.shape'], {}), '(isconst, z.shape)\n', (7294, 7312), True, 'import numpy as np\n'), ((604, 670), 'logging.info', 'logging.info', (["('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))"], {}), "('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))\n", (616, 670), False, 'import logging\n'), ((2852, 2905), 'numpy.core.multiarray.normalize_axis_index', 'np.core.multiarray.normalize_axis_index', (['axis', 'a.ndim'], {}), '(axis, a.ndim)\n', (2891, 2905), True, 'import numpy as np\n'), ((2989, 3016), 'numpy.empty', 'np.empty', (['a.shape'], {'dtype': 'dt'}), '(a.shape, dtype=dt)\n', (2997, 3016), True, 'import numpy as np\n'), ((3581, 3596), 'numpy.nonzero', 'np.nonzero', (['obs'], {}), '(obs)\n', (3591, 3596), True, 'import numpy as np\n')] |
"""Dask_cudf reader."""
import logging
from typing import Any
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import TypeVar
from typing import Union
import numpy as np
import cupy as cp
import pandas as pd
import cudf
import dask_cudf
import dask.dataframe as dd
from dask_cudf.core import DataFrame
from dask_cudf.core import Series
from lightautoml.dataset.gpu.gpu_dataset import CudfDataset
from lightautoml.dataset.gpu.gpu_dataset import DaskCudfDataset
from lightautoml.dataset.roles import ColumnRole
from lightautoml.dataset.roles import DropRole
from lightautoml.tasks import Task
from lightautoml.reader.utils import set_sklearn_folds
from .cudf_reader import CudfReader
from time import perf_counter
logger = logging.getLogger(__name__)
# roles, how it's passed to automl
RoleType = TypeVar('RoleType', bound=ColumnRole)
RolesDict = Dict[str, RoleType]
# how user can define roles
UserDefinedRole = Optional[Union[str, RoleType]]
UserDefinedRolesDict = Dict[UserDefinedRole, Sequence[str]]
UserDefinedRolesSequence = Sequence[UserDefinedRole]
UserRolesDefinition = Optional[Union[UserDefinedRole, UserDefinedRolesDict,
UserDefinedRolesSequence]]
class DaskCudfReader(CudfReader):
"""
Reader to convert :class:`~dask_cudf.core.DataFrame` to
AutoML's :class:`~lightautoml.dataset.daskcudf_dataset.DaskCudfDataset`.
Stages:
- Drop obviously useless features.
- Convert roles dict from user format to automl format.
- Simple role guess for features without input role.
- Create cv folds.
- Create initial PandasDataset.
- Optional: advanced guessing of role and handling types.
"""
def __init__(self, task: Task, compute: bool = True,
index_ok: bool = False, npartitions: int = 1,
*args: Any, **kwargs: Any):
"""
Args:
compute: if True reader transfers sample to a signle GPU before guessing roles.
index_ok: if data is already indexed
npartitions: number of partitions
"""
self.compute = compute
self.npartitions = npartitions
self.index_ok = index_ok
super().__init__(task, *args, **kwargs)
def _prepare_data_and_target(self, train_data, **kwargs):
if isinstance(train_data, (pd.DataFrame, pd.Series)):
train_data = cudf.from_pandas(train_data, nan_as_null=False)
train_data = dask_cudf.from_cudf(train_data, npartitions=self.npartitions)
kwargs['target'] = train_data[self.target]
elif isinstance(train_data, (cudf.DataFrame, cudf.Series)):
train_data = dask_cudf.from_cudf(train_data, npartitions=self.npartitions)
kwargs['target'] = train_data[self.target]
elif isinstance(train_data, (dask_cudf.DataFrame, dask_cudf.Series)):
pass
elif isinstance(train_data, (dd.DataFrame, dd.Series)):
train_data = train_data.map_partitions(
cudf.DataFrame.from_pandas,
nan_as_null=False,
meta=cudf.DataFrame(columns=train_data.columns)
).persist()
kwargs['target'] = train_data[self.target]
else:
raise NotImplementedError("Input data type is not supported")
kwargs['target'] = self._create_target(kwargs['target'])
return train_data.persist(), kwargs
def fit_read(self, train_data: DataFrame, features_names: Any = None,
roles: UserDefinedRolesDict = None, roles_parsed: bool = False,
**kwargs: Any) -> DaskCudfDataset:
"""Get dataset with initial feature selection.
Args:
train_data: Input data.
features_names: Ignored. Just to keep signature.
roles: Dict of features roles in format
``{RoleX: ['feat0', 'feat1', ...], RoleY: 'TARGET', ....}``.
**kwargs: Can be used for target/group/weights.
Returns:
Dataset with selected features.
"""
st = perf_counter()
logger.info('Train data shape: {}'.format(train_data.shape))
parsed_roles, kwargs = self._prepare_roles_and_kwargs(roles, train_data,
roles_parsed = roles_parsed, **kwargs)
train_data, kwargs = self._prepare_data_and_target(train_data, **kwargs)
# get subsample if it needed
subsample = train_data
zero_partn = None
train_len = len(subsample)
if self.samples is not None and self.samples < train_len:
frac = self.samples/train_len
subsample = subsample.sample(frac = frac, random_state=42)
if self.compute:
subsample = subsample.compute()
zero_partn = subsample
else:
subsample = subsample.persist()
zero_partn = subsample.get_partition(0).compute()
# infer roles
for feat in subsample.columns:
if not roles_parsed:
assert isinstance(feat, str), 'Feature names must be string,' \
' find feature name: {}, with type: {}'.format(feat, type(feat))
if feat in parsed_roles:
r = parsed_roles[feat]
# handle datetimes
if r.name == 'Datetime':
# try if it's ok to infer date with given params
if self.compute:
self._try_datetime(subsample[feat], r)
else:
subsample[feat].map_partitions(self._try_datetime, r,
meta=(None, None)).compute()
#replace default category dtype for numeric roles dtype
#if cat col dtype is numeric
if r.name == 'Category':
# default category role
cat_role = self._get_default_role_from_str('category')
# check if role with dtypes was exactly defined
try:
flg_default_params = feat in roles['category']
except KeyError:
flg_default_params = False
if flg_default_params and\
not np.issubdtype(cat_role.dtype, np.number) and\
np.issubdtype(subsample.dtypes[feat], np.number):
r.dtype=self._get_default_role_from_str('numeric').dtype
else:
# if no - infer
is_ok_feature = False
if self.compute:
is_ok_feature = self._is_ok_feature(subsample[feat])
else:
is_ok_feature = subsample[feat]\
.map_partitions(self._is_ok_feature,
meta=(None, '?')).compute().all()
if is_ok_feature:
r = self._guess_role(zero_partn[feat])
else:
r = DropRole()
# set back
else:
try:
r = parsed_roles[feat]
except KeyError:
r = DropRole()
if r.name != 'Drop':
self._roles[feat] = r
self._used_features.append(feat)
else:
self._dropped_features.append(feat)
assert len(self.used_features) > 0,\
'All features are excluded for some reasons'
folds = set_sklearn_folds(self.task, kwargs['target'],
cv=self.cv, random_state=self.random_state,
group=None if 'group' not in kwargs else kwargs['group'])
if folds is not None:
kwargs['folds'] = folds
dataset = None
if self.advanced_roles:
computed_kwargs = {}
for item in kwargs:
computed_kwargs[item] = kwargs[item].get_partition(0).compute()
data = train_data[self.used_features].get_partition(0).compute()
dataset = CudfDataset(data=data, roles=self.roles,
task=self.task, **computed_kwargs)
new_roles = self.advanced_roles_guess(dataset,
manual_roles=parsed_roles)
droplist = [x for x in new_roles if new_roles[x].name == 'Drop' and\
not self._roles[x].force_input]
self.upd_used_features(remove=droplist)
self._roles = {x: new_roles[x] for x in new_roles if x not in droplist}
dataset = DaskCudfDataset(train_data[self.used_features],
self.roles, index_ok = self.index_ok, task=self.task, **kwargs)
else:
dataset = DaskCudfDataset(data=train_data[self.used_features],
roles=self.roles, index_ok = self.index_ok, task=self.task, **kwargs)
print("daskcudf reader:", perf_counter() - st)
return dataset
def _create_target(self, target: Series):
"""Validate target column and create class mapping is needed
Args:
target: Column with target values.
Returns:
Transformed target.
"""
self.class_mapping = None
if self.task.name != 'reg':
# expect binary or multiclass here
cnts = target.value_counts(dropna=False).compute()
assert not cnts.index.isna().any(), 'Nan in target detected'
unqiues = cnts.index.values
srtd = cp.sort(unqiues)
self._n_classes = len(unqiues)
# case - target correctly defined and no mapping
if (cp.arange(srtd.shape[0]) == srtd).all():
assert srtd.shape[0] > 1, 'Less than 2 unique values in target'
if self.task.name == 'binary':
assert srtd.shape[0] == 2,\
'Binary task and more than 2 values in target'
return target.persist()
# case - create mapping
self.class_mapping = {n: x for (x, n) in enumerate(cp.asnumpy(unqiues))}
return target.astype(np.int32).persist()
assert not target.isna().any().compute().any(), 'Nan in target detected'
return target.persist()
def read(self, data: DataFrame, features_names: Any = None,
add_array_attrs: bool = False) -> DaskCudfDataset:
"""Read dataset with fitted metadata.
Args:
data: Data.
features_names: Not used.
add_array_attrs: Additional attributes, like
target/group/weights/folds.
Returns:
Dataset with new columns.
"""
if isinstance(data, (pd.DataFrame, pd.Series)):
data = cudf.from_pandas(data, nan_as_null=False)
data = dask_cudf.from_cudf(data, npartitions=self.npartitions)
elif isinstance(data, (cudf.DataFrame, cudf.Series)):
data = dask_cudf.from_cudf(data, npartitions=self.npartitions)
elif isinstance(data, (dask_cudf.DataFrame, dask_cudf.Series)):
pass
elif isinstance(data, (dd.DataFrame, dd.Series)):
data = data.map_partitions(cudf.DataFrame.from_pandas, nan_as_null=False,
meta=cudf.DataFrame(columns=data.columns)).persist()
else:
raise NotImplementedError("Input data type is not supported")
kwargs = {}
if add_array_attrs:
for array_attr in self.used_array_attrs:
col_name = self.used_array_attrs[array_attr]
try:
val = data[col_name]
except KeyError:
continue
if array_attr == 'target' and self.class_mapping is not None:
kwargs[array_attr] = val.map_partitions(self._apply_class_mapping,
col_name, meta=val).persist()
dataset = DaskCudfDataset(data[self.used_features], roles=self.roles,
task=self.task, **kwargs)
return dataset
| [
"lightautoml.dataset.gpu.gpu_dataset.DaskCudfDataset",
"cudf.DataFrame",
"dask_cudf.from_cudf",
"cupy.sort",
"cudf.from_pandas",
"lightautoml.dataset.roles.DropRole",
"time.perf_counter",
"lightautoml.dataset.gpu.gpu_dataset.CudfDataset",
"numpy.issubdtype",
"cupy.asnumpy",
"cupy.arange",
"lig... | [((771, 798), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (788, 798), False, 'import logging\n'), ((846, 883), 'typing.TypeVar', 'TypeVar', (['"""RoleType"""'], {'bound': 'ColumnRole'}), "('RoleType', bound=ColumnRole)\n", (853, 883), False, 'from typing import TypeVar\n'), ((4099, 4113), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4111, 4113), False, 'from time import perf_counter\n'), ((7746, 7904), 'lightautoml.reader.utils.set_sklearn_folds', 'set_sklearn_folds', (['self.task', "kwargs['target']"], {'cv': 'self.cv', 'random_state': 'self.random_state', 'group': "(None if 'group' not in kwargs else kwargs['group'])"}), "(self.task, kwargs['target'], cv=self.cv, random_state=\n self.random_state, group=None if 'group' not in kwargs else kwargs['group']\n )\n", (7763, 7904), False, 'from lightautoml.reader.utils import set_sklearn_folds\n'), ((12287, 12376), 'lightautoml.dataset.gpu.gpu_dataset.DaskCudfDataset', 'DaskCudfDataset', (['data[self.used_features]'], {'roles': 'self.roles', 'task': 'self.task'}), '(data[self.used_features], roles=self.roles, task=self.task,\n **kwargs)\n', (12302, 12376), False, 'from lightautoml.dataset.gpu.gpu_dataset import DaskCudfDataset\n'), ((2412, 2459), 'cudf.from_pandas', 'cudf.from_pandas', (['train_data'], {'nan_as_null': '(False)'}), '(train_data, nan_as_null=False)\n', (2428, 2459), False, 'import cudf\n'), ((2485, 2546), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (['train_data'], {'npartitions': 'self.npartitions'}), '(train_data, npartitions=self.npartitions)\n', (2504, 2546), False, 'import dask_cudf\n'), ((8308, 8383), 'lightautoml.dataset.gpu.gpu_dataset.CudfDataset', 'CudfDataset', ([], {'data': 'data', 'roles': 'self.roles', 'task': 'self.task'}), '(data=data, roles=self.roles, task=self.task, **computed_kwargs)\n', (8319, 8383), False, 'from lightautoml.dataset.gpu.gpu_dataset import CudfDataset\n'), ((8867, 8981), 'lightautoml.dataset.gpu.gpu_dataset.DaskCudfDataset', 'DaskCudfDataset', (['train_data[self.used_features]', 'self.roles'], {'index_ok': 'self.index_ok', 'task': 'self.task'}), '(train_data[self.used_features], self.roles, index_ok=self.\n index_ok, task=self.task, **kwargs)\n', (8882, 8981), False, 'from lightautoml.dataset.gpu.gpu_dataset import DaskCudfDataset\n'), ((9041, 9165), 'lightautoml.dataset.gpu.gpu_dataset.DaskCudfDataset', 'DaskCudfDataset', ([], {'data': 'train_data[self.used_features]', 'roles': 'self.roles', 'index_ok': 'self.index_ok', 'task': 'self.task'}), '(data=train_data[self.used_features], roles=self.roles,\n index_ok=self.index_ok, task=self.task, **kwargs)\n', (9056, 9165), False, 'from lightautoml.dataset.gpu.gpu_dataset import DaskCudfDataset\n'), ((9826, 9842), 'cupy.sort', 'cp.sort', (['unqiues'], {}), '(unqiues)\n', (9833, 9842), True, 'import cupy as cp\n'), ((11080, 11121), 'cudf.from_pandas', 'cudf.from_pandas', (['data'], {'nan_as_null': '(False)'}), '(data, nan_as_null=False)\n', (11096, 11121), False, 'import cudf\n'), ((11141, 11196), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (['data'], {'npartitions': 'self.npartitions'}), '(data, npartitions=self.npartitions)\n', (11160, 11196), False, 'import dask_cudf\n'), ((2696, 2757), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (['train_data'], {'npartitions': 'self.npartitions'}), '(train_data, npartitions=self.npartitions)\n', (2715, 2757), False, 'import dask_cudf\n'), ((9228, 9242), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (9240, 9242), False, 'from time import perf_counter\n'), ((11280, 11335), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (['data'], {'npartitions': 'self.npartitions'}), '(data, npartitions=self.npartitions)\n', (11299, 11335), False, 'import dask_cudf\n'), ((7250, 7260), 'lightautoml.dataset.roles.DropRole', 'DropRole', ([], {}), '()\n', (7258, 7260), False, 'from lightautoml.dataset.roles import DropRole\n'), ((7423, 7433), 'lightautoml.dataset.roles.DropRole', 'DropRole', ([], {}), '()\n', (7431, 7433), False, 'from lightautoml.dataset.roles import DropRole\n'), ((9963, 9987), 'cupy.arange', 'cp.arange', (['srtd.shape[0]'], {}), '(srtd.shape[0])\n', (9972, 9987), True, 'import cupy as cp\n'), ((10394, 10413), 'cupy.asnumpy', 'cp.asnumpy', (['unqiues'], {}), '(unqiues)\n', (10404, 10413), True, 'import cupy as cp\n'), ((6490, 6538), 'numpy.issubdtype', 'np.issubdtype', (['subsample.dtypes[feat]', 'np.number'], {}), '(subsample.dtypes[feat], np.number)\n', (6503, 6538), True, 'import numpy as np\n'), ((6420, 6460), 'numpy.issubdtype', 'np.issubdtype', (['cat_role.dtype', 'np.number'], {}), '(cat_role.dtype, np.number)\n', (6433, 6460), True, 'import numpy as np\n'), ((3126, 3168), 'cudf.DataFrame', 'cudf.DataFrame', ([], {'columns': 'train_data.columns'}), '(columns=train_data.columns)\n', (3140, 3168), False, 'import cudf\n'), ((11603, 11639), 'cudf.DataFrame', 'cudf.DataFrame', ([], {'columns': 'data.columns'}), '(columns=data.columns)\n', (11617, 11639), False, 'import cudf\n')] |
import copy
import glob
import math
import pickle
import random
from typing import Any, Optional, Dict, List, Union, Tuple, Collection, Sequence
import ai2thor.server
import numpy as np
from ai2thor.controller import Controller
from ai2thor.util import metrics
from utils.cache_utils import _str_to_pos, _pos_to_str
from utils.experiment_utils import recursive_update
from utils.system import get_logger
class RoboThorEnvironment:
"""Wrapper for the robo2thor controller providing additional functionality
and bookkeeping.
See [here](https://ai2thor.allenai.org/robothor/documentation) for comprehensive
documentation on RoboTHOR.
# Attributes
controller : The AI2THOR controller.
config : The AI2THOR controller configuration
"""
def __init__(self, **kwargs):
self.config = dict(
rotateStepDegrees=30.0,
visibilityDistance=1.0,
gridSize=0.25,
agentType="stochastic",
continuousMode=True,
snapToGrid=False,
agentMode="bot",
width=640,
height=480,
)
recursive_update(self.config, {**kwargs, "agentMode": "bot"})
self.controller = Controller(**self.config)
self.known_good_locations: Dict[str, Any] = {
self.scene_name: copy.deepcopy(self.currently_reachable_points)
}
assert len(self.known_good_locations[self.scene_name]) > 10
# onames = [o['objectId'] for o in self.last_event.metadata['objects']]
# removed = []
# for oname in onames:
# if 'Painting' in oname:
# self.controller.step("RemoveFromScene", objectId=oname)
# removed.append(oname)
# get_logger().info("Removed {} Paintings from {}".format(len(removed), self.scene_name))
# get_logger().warning("init to scene {} in pos {}".format(self.scene_name, self.agent_state()))
# npoints = len(self.currently_reachable_points)
# assert npoints > 100, "only {} reachable points after init".format(npoints)
self.grids: Dict[str, Tuple[Dict[str, np.array], int, int, int, int]] = {}
self.initialize_grid()
def initialize_grid_dimensions(
self, reachable_points: Collection[Dict[str, float]]
) -> Tuple[int, int, int, int]:
"""Computes bounding box for reachable points quantized with the
current gridSize."""
points = {
(
round(p["x"] / self.config["gridSize"]),
round(p["z"] / self.config["gridSize"]),
): p
for p in reachable_points
}
assert len(reachable_points) == len(points)
xmin, xmax = min([p[0] for p in points]), max([p[0] for p in points])
zmin, zmax = min([p[1] for p in points]), max([p[1] for p in points])
return xmin, xmax, zmin, zmax
def access_grid(self, target: str) -> float:
"""Returns the geodesic distance from the quantized location of the
agent in the current scene's grid to the target object of given
type."""
if target not in self.grids[self.scene_name][0]:
xmin, xmax, zmin, zmax = self.grids[self.scene_name][1:5]
nx = xmax - xmin + 1
nz = zmax - zmin + 1
self.grids[self.scene_name][0][target] = -2 * np.ones(
(nx, nz), dtype=np.float64
)
p = self.quantized_agent_state()
if self.grids[self.scene_name][0][target][p[0], p[1]] < -1.5:
corners = self.path_corners(target)
dist = self.path_corners_to_dist(corners)
if dist == float("inf"):
dist = -1.0 # -1.0 for unreachable
self.grids[self.scene_name][0][target][p[0], p[1]] = dist
return dist
return self.grids[self.scene_name][0][target][p[0], p[1]]
def initialize_grid(self) -> None:
"""Initializes grid for current scene if not already initialized."""
if self.scene_name in self.grids:
return
self.grids[self.scene_name] = ({},) + self.initialize_grid_dimensions(self.known_good_locations[self.scene_name]) # type: ignore
def object_reachable(self, object_type: str) -> bool:
"""Determines whether a path can be computed from the discretized
current agent location to the target object of given type."""
return (
self.access_grid(object_type) > -0.5
) # -1.0 for unreachable, 0.0 for end point
def point_reachable(self, xyz: Dict[str, float]) -> bool:
"""Determines whether a path can be computed from the current agent
location to the target point."""
return self.dist_to_point(xyz) > -0.5 # -1.0 for unreachable, 0.0 for end point
def path_corners(
self, target: Union[str, Dict[str, float]]
) -> List[Dict[str, float]]:
"""Returns an array with a sequence of xyz dictionaries objects
representing the corners of the shortest path to the object of given
type or end point location."""
pose = self.agent_state()
position = {k: pose[k] for k in ["x", "y", "z"]}
# get_logger().debug("initial pos in path corners {} target {}".format(pose, target))
try:
if isinstance(target, str):
path = metrics.get_shortest_path_to_object_type(
self.controller,
target,
position,
{**pose["rotation"]} if "rotation" in pose else None,
)
else:
path = metrics.get_shortest_path_to_point(
self.controller, position, target
)
except ValueError:
get_logger().debug(
"No path to object {} from {} in {}".format(
target, position, self.scene_name
)
)
path = []
finally:
if isinstance(target, str):
self.controller.step("TeleportFull", **pose)
# pass
new_pose = self.agent_state()
try:
assert abs(new_pose["x"] - pose["x"]) < 1e-5, "wrong x"
assert abs(new_pose["y"] - pose["y"]) < 1e-5, "wrong y"
assert abs(new_pose["z"] - pose["z"]) < 1e-5, "wrong z"
assert (
abs(new_pose["rotation"]["x"] - pose["rotation"]["x"]) < 1e-5
), "wrong rotation x"
assert (
abs(new_pose["rotation"]["y"] - pose["rotation"]["y"]) < 1e-5
), "wrong rotation y"
assert (
abs(new_pose["rotation"]["z"] - pose["rotation"]["z"]) < 1e-5
), "wrong rotation z"
assert (
abs((new_pose["horizon"] % 360) - (pose["horizon"] % 360)) < 1e-5
), "wrong horizon {} vs {}".format(
(new_pose["horizon"] % 360), (pose["horizon"] % 360)
)
except Exception:
# get_logger().error("new_pose {} old_pose {} in {}".format(new_pose, pose, self.scene_name))
pass
# if abs((new_pose['horizon'] % 360) - (pose['horizon'] % 360)) > 1e-5:
# get_logger().debug("wrong horizon {} vs {} after path to object {} from {} in {}".format((new_pose['horizon'] % 360), (pose['horizon'] % 360), target, position, self.scene_name))
# else:
# get_logger().debug("correct horizon {} vs {} after path to object {} from {} in {}".format((new_pose['horizon'] % 360), (pose['horizon'] % 360), target, position, self.scene_name))
# assert abs((new_pose['horizon'] % 360) - (pose['horizon'] % 360)) < 1e-5, "wrong horizon {} vs {}".format((new_pose['horizon'] % 360), (pose['horizon'] % 360))
# # TODO: the agent will continue with a random horizon from here on
# target_horizon = (pose['horizon'] % 360) - (360 if (pose['horizon'] % 360) >= 180 else 0)
# new_pose = self.agent_state()['horizon']
# update_horizon = (new_pose % 360) - (360 if (new_pose % 360) >= 180 else 0)
# cond = abs(target_horizon - update_horizon) > 1e-5
# nmovements = 0
# while cond:
# cond = abs(target_horizon - update_horizon) > 1e-5 and target_horizon > update_horizon
# while cond:
# self.controller.step("LookDown")
# old = update_horizon
# new_pose = self.agent_state()['horizon']
# update_horizon = (new_pose % 360) - (360 if (new_pose % 360) >= 180 else 0)
# get_logger().debug("LookDown horizon {} -> {} ({})".format(old, update_horizon, target_horizon))
# nmovements += 1
# cond = abs(target_horizon - update_horizon) > 1e-5 and target_horizon > update_horizon
#
# cond = abs(target_horizon - update_horizon) > 1e-5 and target_horizon < update_horizon
# while cond:
# self.controller.step("LookUp")
# old = update_horizon
# new_pose = self.agent_state()['horizon']
# update_horizon = (new_pose % 360) - (360 if (new_pose % 360) >= 180 else 0)
# get_logger().debug("LookUp horizon {} -> {} ({})".format(old, update_horizon, target_horizon))
# nmovements += 1
# cond = abs(target_horizon - update_horizon) > 1e-5 and target_horizon < update_horizon
#
# cond = abs(target_horizon - update_horizon) > 1e-5
# get_logger().debug("nmovements {}".format(nmovements))
# new_pose = self.agent_state()
# assert abs((new_pose['horizon'] % 360) - (pose['horizon'] % 360)) < 1e-5, "wrong horizon {} vs {}".format((new_pose['horizon'] % 360), (pose['horizon'] % 360))
# try:
# assert abs((new_pose['horizon'] % 360) - (pose['horizon'] % 360)) < 1e-5, "wrong horizon {} vs {}".format((new_pose['horizon'] % 360), (pose['horizon'] % 360))
# except Exception:
# get_logger().error("wrong horizon {} vs {}".format((new_pose['horizon'] % 360), (pose['horizon'] % 360)))
# self.controller.step("TeleportFull", **pose)
# assert abs(
# (new_pose['horizon'] % 360) - (pose['horizon'] % 360)) < 1e-5, "wrong horizon {} vs {} after teleport full".format(
# (new_pose['horizon'] % 360), (pose['horizon'] % 360))
# # get_logger().debug("initial pos in path corners {} current pos {} path {}".format(pose, self.agent_state(), path))
return path
def path_corners_to_dist(self, corners: Sequence[Dict[str, float]]) -> float:
"""Computes the distance covered by the given path described by its
corners."""
if len(corners) == 0:
return float("inf")
sum = 0.0
for it in range(1, len(corners)):
sum += math.sqrt(
(corners[it]["x"] - corners[it - 1]["x"]) ** 2
+ (corners[it]["z"] - corners[it - 1]["z"]) ** 2
)
return sum
def quantized_agent_state(
self, xz_subsampling: int = 1, rot_subsampling: int = 1
) -> Tuple[int, int, int]:
"""Quantizes agent location (x, z) to a (subsampled) position in a
fixed size grid derived from the initial set of reachable points; and
rotation (around y axis) as a (subsampled) discretized angle given the
current `rotateStepDegrees`."""
pose = self.agent_state()
p = {k: float(pose[k]) for k in ["x", "y", "z"]}
xmin, xmax, zmin, zmax = self.grids[self.scene_name][1:5]
x = int(np.clip(round(p["x"] / self.config["gridSize"]), xmin, xmax))
z = int(np.clip(round(p["z"] / self.config["gridSize"]), zmin, zmax))
rs = self.config["rotateStepDegrees"] * rot_subsampling
shifted = pose["rotation"]["y"] + rs / 2
normalized = shifted % 360.0
r = int(round(normalized / rs))
return (x - xmin) // xz_subsampling, (z - zmin) // xz_subsampling, r
def dist_to_object(self, object_type: str) -> float:
"""Minimal geodesic distance to object of given type from agent's
current location.
It might return -1.0 for unreachable targets.
"""
return self.access_grid(object_type)
def dist_to_point(self, xyz: Dict[str, float]) -> float:
"""Minimal geodesic distance to end point from agent's current
location.
It might return -1.0 for unreachable targets.
"""
corners = self.path_corners(xyz)
dist = self.path_corners_to_dist(corners)
if dist == float("inf"):
dist = -1.0 # -1.0 for unreachable
return dist
def agent_state(self) -> Dict:
"""Return agent position, rotation and horizon."""
agent_meta = self.last_event.metadata["agent"]
return {
**{k: float(v) for k, v in agent_meta["position"].items()},
"rotation": {k: float(v) for k, v in agent_meta["rotation"].items()},
"horizon": round(float(agent_meta["cameraHorizon"]), 1),
}
def teleport(
self, pose: Dict[str, float], rotation: Dict[str, float], horizon: float = 0.0
):
e = self.controller.step(
"TeleportFull",
x=pose["x"],
y=pose["y"],
z=pose["z"],
rotation=rotation,
horizon=horizon,
)
return e.metadata["lastActionSuccess"]
def reset(self, scene_name: str = None) -> None:
"""Resets scene to a known initial state."""
if scene_name is not None and scene_name != self.scene_name:
self.controller.reset(scene_name)
assert self.last_action_success, "Could not reset to new scene"
if scene_name not in self.known_good_locations:
self.known_good_locations[scene_name] = copy.deepcopy(
self.currently_reachable_points
)
assert len(self.known_good_locations[scene_name]) > 10
# onames = [o['objectId'] for o in self.last_event.metadata['objects']]
# removed = []
# for oname in onames:
# if 'Painting' in oname:
# self.controller.step("RemoveFromScene", objectId=oname)
# removed.append(oname)
# get_logger().info("Removed {} Paintings from {}".format(len(removed), scene_name))
# else:
# assert (
# self.scene_name in self.known_good_locations
# ), "Resetting scene without known good location"
# get_logger().warning("Resetting {} to {}".format(self.scene_name, self.known_good_locations[self.scene_name]))
# self.controller.step("TeleportFull", **self.known_good_locations[self.scene_name])
# assert self.last_action_success, "Could not reset to known good location"
# npoints = len(self.currently_reachable_points)
# assert npoints > 100, "only {} reachable points after reset".format(npoints)
self.initialize_grid()
def randomize_agent_location(
self, seed: int = None, partial_position: Optional[Dict[str, float]] = None
) -> Dict[str, Union[Dict[str, float], float]]:
"""Teleports the agent to a random reachable location in the scene."""
if partial_position is None:
partial_position = {}
k = 0
state: Optional[Dict] = None
while k == 0 or (not self.last_action_success and k < 10):
# self.reset()
state = {**self.random_reachable_state(seed=seed), **partial_position}
# get_logger().debug("picked target location {}".format(state))
self.controller.step("TeleportFull", **state)
k += 1
if not self.last_action_success:
get_logger().warning(
(
"Randomize agent location in scene {} and current random state {}"
" with seed {} and partial position {} failed in "
"10 attempts. Forcing the action."
).format(self.scene_name, state, seed, partial_position)
)
self.controller.step("TeleportFull", **state, force_action=True) # type: ignore
assert self.last_action_success, "Force action failed with {}".format(state)
# get_logger().debug("location after teleport full {}".format(self.agent_state()))
# self.controller.step("TeleportFull", **self.agent_state()) # TODO only for debug
# get_logger().debug("location after re-teleport full {}".format(self.agent_state()))
return self.agent_state()
def random_reachable_state(
self, seed: Optional[int] = None
) -> Dict[str, Union[Dict[str, float], float]]:
"""Returns a random reachable location in the scene."""
if seed is not None:
random.seed(seed)
# xyz = random.choice(self.currently_reachable_points)
assert len(self.known_good_locations[self.scene_name]) > 10
xyz = copy.deepcopy(random.choice(self.known_good_locations[self.scene_name]))
rotation = random.choice(
np.arange(0.0, 360.0, self.config["rotateStepDegrees"])
)
horizon = 0.0 # random.choice([0.0, 30.0, 330.0])
return {
**{k: float(v) for k, v in xyz.items()},
"rotation": {"x": 0.0, "y": float(rotation), "z": 0.0},
"horizon": float(horizon),
}
def known_good_locations_list(self):
return self.known_good_locations[self.scene_name]
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that are
currently reachable."""
self.controller.step(action="GetReachablePositions")
return self.last_action_return
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.controller.last_event.metadata["sceneName"].replace("_physics", "")
@property
def current_frame(self) -> np.ndarray:
"""Returns rgb image corresponding to the agent's egocentric view."""
return self.controller.last_event.frame
@property
def current_depth(self) -> np.ndarray:
"""Returns depth image corresponding to the agent's egocentric view."""
return self.controller.last_event.depth_frame
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.controller.last_event
@property
def last_action(self) -> str:
"""Last action, as a string, taken by the agent."""
return self.controller.last_event.metadata["lastAction"]
@property
def last_action_success(self) -> bool:
"""Was the last action taken by the agent a success?"""
return self.controller.last_event.metadata["lastActionSuccess"]
@property
def last_action_return(self) -> Any:
"""Get the value returned by the last action (if applicable).
For an example of an action that returns a value, see
`"GetReachablePositions"`.
"""
return self.controller.last_event.metadata["actionReturn"]
def step(self, action_dict: Dict) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
return self.controller.step(**action_dict)
def stop(self):
"""Stops the ai2thor controller."""
try:
self.controller.stop()
except Exception as e:
get_logger().warning(str(e))
def all_objects(self) -> List[Dict[str, Any]]:
"""Return all object metadata."""
return self.controller.last_event.metadata["objects"]
def all_objects_with_properties(
self, properties: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Find all objects with the given properties."""
objects = []
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self) -> List[Dict[str, Any]]:
"""Return all visible objects."""
return self.all_objects_with_properties({"visible": True})
# def get_shortest_path_to_point(
# controller,
# initial_position,
# target_position
# ):
# """
# Computes the shortest path to an end point from an initial position using a controller
# :param controller: agent controller
# :param initial_position: dict(x=float, y=float, z=float) with the desired initial position
# :param target_position: dict(x=float, y=float, z=float) with the desired target position
# """
# args = dict(
# action='GetShortestPathToPoint',
# position=initial_position,
# x=target_position['x'],
# y=target_position['y'],
# z=target_position['z']
# )
# event = controller.step(args)
# if event.metadata['lastActionSuccess']:
# return event.metadata['actionReturn']['corners']
# else:
# raise ValueError(
# "Unable to find shortest path for target point '{}'".format(
# target_position
# )
# )
class RoboThorCachedEnvironment:
"""Wrapper for the robo2thor controller providing additional functionality
and bookkeeping.
See [here](https://ai2thor.allenai.org/robothor/documentation) for comprehensive
documentation on RoboTHOR.
# Attributes
controller : The AI2THOR controller.
config : The AI2THOR controller configuration
"""
def __init__(self, **kwargs):
self.config = dict(
rotateStepDegrees=30.0,
visibilityDistance=1.0,
gridSize=0.25,
# agentType="stochastic",
continuousMode=True,
snapToGrid=False,
agentMode="bot",
width=640,
height=480,
)
self.env_root_dir = kwargs["env_root_dir"]
random_scene = random.choice(list(glob.glob(self.env_root_dir + "/*.pkl")))
handle = open(random_scene, "rb")
self.view_cache = pickle.load(handle)
handle.close()
self.agent_position = list(self.view_cache.keys())[0]
self.agent_rotation = list(self.view_cache[self.agent_position].keys())[0]
self.known_good_locations: Dict[str, Any] = {
self.scene_name: copy.deepcopy(self.currently_reachable_points)
}
self._last_action = "None"
assert len(self.known_good_locations[self.scene_name]) > 10
def agent_state(self) -> Dict[str, Union[Dict[str, float], float]]:
"""Return agent position, rotation and horizon."""
return {
**_str_to_pos(self.agent_position),
"rotation": {"x": 0.0, "y": self.agent_rotation, "z": 0.0},
"horizon": 1.0,
}
def teleport(
self, pose: Dict[str, float], rotation: Dict[str, float], horizon: float = 0.0
):
self.agent_position = _pos_to_str(pose)
self.agent_rotation = (
math.floor(rotation["y"] / 90.0) * 90
) # round to nearest 90 degree angle
return True
def reset(self, scene_name: str = None) -> None:
"""Resets scene to a known initial state."""
try:
handle = open(self.env_root_dir + "/" + scene_name + ".pkl", "rb")
self.view_cache = pickle.load(handle)
handle.close()
self.agent_position = list(self.view_cache.keys())[0]
self.agent_rotation = list(self.view_cache[self.agent_position].keys())[0]
self.known_good_locations[self.scene_name] = copy.deepcopy(
self.currently_reachable_points
)
self._last_action = "None"
assert len(self.known_good_locations[self.scene_name]) > 10
except:
print("Could not load scene:", scene_name)
def known_good_locations_list(self):
return self.known_good_locations[self.scene_name]
@property
def currently_reachable_points(self) -> List[Dict[str, float]]:
"""List of {"x": x, "y": y, "z": z} locations in the scene that are
currently reachable."""
return [_str_to_pos(pos) for pos in self.view_cache]
@property
def scene_name(self) -> str:
"""Current ai2thor scene."""
return self.view_cache[self.agent_position][self.agent_rotation].metadata[
"sceneName"
]
@property
def current_frame(self) -> np.ndarray:
"""Returns rgb image corresponding to the agent's egocentric view."""
return self.view_cache[self.agent_position][self.agent_rotation].frame
@property
def current_depth(self) -> np.ndarray:
"""Returns depth image corresponding to the agent's egocentric view."""
return self.view_cache[self.agent_position][self.agent_rotation].depth_frame
@property
def last_event(self) -> ai2thor.server.Event:
"""Last event returned by the controller."""
return self.view_cache[self.agent_position][self.agent_rotation]
@property
def last_action(self) -> str:
"""Last action, as a string, taken by the agent."""
return self._last_action
@property
def last_action_success(self) -> bool:
"""In the cached environment, all actions succeed."""
return True
@property
def last_action_return(self) -> Any:
"""Get the value returned by the last action (if applicable).
For an example of an action that returns a value, see
`"GetReachablePositions"`.
"""
return self.view_cache[self.agent_position][self.agent_rotation].metadata[
"actionReturn"
]
def step(
self, action_dict: Dict[str, Union[str, int, float]]
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
self._last_action = action_dict["action"]
if action_dict["action"] == "RotateLeft":
self.agent_rotation = (self.agent_rotation - 90.0) % 360.0
elif action_dict["action"] == "RotateRight":
self.agent_rotation = (self.agent_rotation + 90.0) % 360.0
elif action_dict["action"] == "MoveAhead":
pos = _str_to_pos(self.agent_position)
if self.agent_rotation == 0.0:
pos["x"] += 0.25
elif self.agent_rotation == 90.0:
pos["z"] += 0.25
elif self.agent_rotation == 180.0:
pos["x"] -= 0.25
elif self.agent_rotation == 270.0:
pos["z"] -= 0.25
pos_string = _pos_to_str(pos)
if pos_string in self.view_cache:
self.agent_position = _pos_to_str(pos)
return True
def stop(self):
"""Stops the ai2thor controller."""
print("No need to stop cached environment")
def all_objects(self) -> List[Dict[str, Any]]:
"""Return all object metadata."""
return self.view_cache[self.agent_position][self.agent_rotation].metadata[
"objects"
]
def all_objects_with_properties(
self, properties: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Find all objects with the given properties."""
objects = []
for o in self.all_objects():
satisfies_all = True
for k, v in properties.items():
if o[k] != v:
satisfies_all = False
break
if satisfies_all:
objects.append(o)
return objects
def visible_objects(self) -> List[Dict[str, Any]]:
"""Return all visible objects."""
return self.all_objects_with_properties({"visible": True})
| [
"utils.cache_utils._str_to_pos",
"copy.deepcopy",
"utils.cache_utils._pos_to_str",
"math.sqrt",
"ai2thor.controller.Controller",
"math.floor",
"random.choice",
"numpy.ones",
"utils.system.get_logger",
"pickle.load",
"random.seed",
"numpy.arange",
"glob.glob",
"utils.experiment_utils.recurs... | [((1126, 1187), 'utils.experiment_utils.recursive_update', 'recursive_update', (['self.config', "{**kwargs, 'agentMode': 'bot'}"], {}), "(self.config, {**kwargs, 'agentMode': 'bot'})\n", (1142, 1187), False, 'from utils.experiment_utils import recursive_update\n'), ((1214, 1239), 'ai2thor.controller.Controller', 'Controller', ([], {}), '(**self.config)\n', (1224, 1239), False, 'from ai2thor.controller import Controller\n'), ((22657, 22676), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (22668, 22676), False, 'import pickle\n'), ((23538, 23555), 'utils.cache_utils._pos_to_str', '_pos_to_str', (['pose'], {}), '(pose)\n', (23549, 23555), False, 'from utils.cache_utils import _str_to_pos, _pos_to_str\n'), ((1323, 1369), 'copy.deepcopy', 'copy.deepcopy', (['self.currently_reachable_points'], {}), '(self.currently_reachable_points)\n', (1336, 1369), False, 'import copy\n'), ((11185, 11296), 'math.sqrt', 'math.sqrt', (["((corners[it]['x'] - corners[it - 1]['x']) ** 2 + (corners[it]['z'] -\n corners[it - 1]['z']) ** 2)"], {}), "((corners[it]['x'] - corners[it - 1]['x']) ** 2 + (corners[it]['z'\n ] - corners[it - 1]['z']) ** 2)\n", (11194, 11296), False, 'import math\n'), ((17216, 17233), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (17227, 17233), False, 'import random\n'), ((17393, 17450), 'random.choice', 'random.choice', (['self.known_good_locations[self.scene_name]'], {}), '(self.known_good_locations[self.scene_name])\n', (17406, 17450), False, 'import random\n'), ((17498, 17553), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', "self.config['rotateStepDegrees']"], {}), "(0.0, 360.0, self.config['rotateStepDegrees'])\n", (17507, 17553), True, 'import numpy as np\n'), ((22928, 22974), 'copy.deepcopy', 'copy.deepcopy', (['self.currently_reachable_points'], {}), '(self.currently_reachable_points)\n', (22941, 22974), False, 'import copy\n'), ((23251, 23283), 'utils.cache_utils._str_to_pos', '_str_to_pos', (['self.agent_position'], {}), '(self.agent_position)\n', (23262, 23283), False, 'from utils.cache_utils import _str_to_pos, _pos_to_str\n'), ((23600, 23632), 'math.floor', 'math.floor', (["(rotation['y'] / 90.0)"], {}), "(rotation['y'] / 90.0)\n", (23610, 23632), False, 'import math\n'), ((23933, 23952), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (23944, 23952), False, 'import pickle\n'), ((24190, 24236), 'copy.deepcopy', 'copy.deepcopy', (['self.currently_reachable_points'], {}), '(self.currently_reachable_points)\n', (24203, 24236), False, 'import copy\n'), ((24756, 24772), 'utils.cache_utils._str_to_pos', '_str_to_pos', (['pos'], {}), '(pos)\n', (24767, 24772), False, 'from utils.cache_utils import _str_to_pos, _pos_to_str\n'), ((3359, 3394), 'numpy.ones', 'np.ones', (['(nx, nz)'], {'dtype': 'np.float64'}), '((nx, nz), dtype=np.float64)\n', (3366, 3394), True, 'import numpy as np\n'), ((5354, 5487), 'ai2thor.util.metrics.get_shortest_path_to_object_type', 'metrics.get_shortest_path_to_object_type', (['self.controller', 'target', 'position', "({**pose['rotation']} if 'rotation' in pose else None)"], {}), "(self.controller, target, position,\n {**pose['rotation']} if 'rotation' in pose else None)\n", (5394, 5487), False, 'from ai2thor.util import metrics\n'), ((5624, 5693), 'ai2thor.util.metrics.get_shortest_path_to_point', 'metrics.get_shortest_path_to_point', (['self.controller', 'position', 'target'], {}), '(self.controller, position, target)\n', (5658, 5693), False, 'from ai2thor.util import metrics\n'), ((14200, 14246), 'copy.deepcopy', 'copy.deepcopy', (['self.currently_reachable_points'], {}), '(self.currently_reachable_points)\n', (14213, 14246), False, 'import copy\n'), ((22547, 22586), 'glob.glob', 'glob.glob', (["(self.env_root_dir + '/*.pkl')"], {}), "(self.env_root_dir + '/*.pkl')\n", (22556, 22586), False, 'import glob\n'), ((16150, 16162), 'utils.system.get_logger', 'get_logger', ([], {}), '()\n', (16160, 16162), False, 'from utils.system import get_logger\n'), ((26795, 26827), 'utils.cache_utils._str_to_pos', '_str_to_pos', (['self.agent_position'], {}), '(self.agent_position)\n', (26806, 26827), False, 'from utils.cache_utils import _str_to_pos, _pos_to_str\n'), ((27168, 27184), 'utils.cache_utils._pos_to_str', '_pos_to_str', (['pos'], {}), '(pos)\n', (27179, 27184), False, 'from utils.cache_utils import _str_to_pos, _pos_to_str\n'), ((5771, 5783), 'utils.system.get_logger', 'get_logger', ([], {}), '()\n', (5781, 5783), False, 'from utils.system import get_logger\n'), ((19906, 19918), 'utils.system.get_logger', 'get_logger', ([], {}), '()\n', (19916, 19918), False, 'from utils.system import get_logger\n'), ((27269, 27285), 'utils.cache_utils._pos_to_str', '_pos_to_str', (['pos'], {}), '(pos)\n', (27280, 27285), False, 'from utils.cache_utils import _str_to_pos, _pos_to_str\n')] |
import numpy as np
from random import choice
from composer.instruments import Instruments, DrumInstruments
INDEX_TO_NOTENUMBER = 20 #1から88にこれを足すとmidiのノートナンバーになる
# 例:よくある左手のF=20+21
#ダイアトニックコードリスト
F_DIATONIC = \
[
["F2","A2","C3"], #Ⅰ 0
["G2","A#2","D3"], #Ⅱm 1
["A2","C3","E3"], #Ⅲm 2
["A#2","D3","F3"], #Ⅳ 3
["C3","E3","G3"], #Ⅴ 4
["D3","F3","A3"], #Ⅵm 5
["E3","G3","A#3"], #Ⅶdim 6
#ここから7th
["F2","A2","C3", "E3" ], #Ⅰ7 7
["G2","A#2","D3", "F3" ], #Ⅱm7 8
["A2","C3","E3", "G3" ], #Ⅲm7 9
["A#2","D3","F3", "A3" ], #Ⅳ7 10
["C3","E3","G3", "A#3" ], #Ⅴ7 11
["D3","F3","A3", "C4" ], #Ⅵm7 12
["E3","G3","A#3", "D4" ], #Ⅶdim7 13
["F2","A#2","C3"], #Ⅰsus4 14
]
CHORDS_DICT = [
"Ⅰ",
"Ⅱm",
"Ⅲm",
"Ⅳ",
"Ⅴ",
"Ⅵm",
"Ⅶdim",
#ここから7th
"Ⅰ7",
"Ⅱm7",
"Ⅲm7",
"Ⅳ7",
"Ⅴ7",
"Ⅵm7",
"Ⅶdim7",
"Ⅰsus4",
]
def create_backing(key_note_list, rhythm_denominator, emotion_value, emotion_dict, debug=False):
"""
入力されたパラメータを基に伴奏とベースを作成する
Parameters
----------
key_note_list : [int/float]
great_oceanの21個の音の開始地点を入れたリスト
rhythm_denominator : int
何拍子か? 3or4を想定
emotion_value : float
感情分析の結果の値
emotion_dict : dict
IBMの感情分析の結果
debug : Bool
デバッグ用の表示や確認の有効/無効
デバッグ時のみTrue, 基本はFalse
Returns
----------
[(int, str, float, float)]
伴奏について, 順にvelocity, 音高("4"みたいな), start, end が入る
[(int, str, float, float)]
ベースについて, 順にvelocity, 音高("4"みたいな), start, end が入る
"""
if (len(key_note_list) != 21):
raise ValueError(f"length of key_note_list must be 21, but input was {len(key_note_list)}")
b = 5 # 発生確率の標準となる値 (Ⅶdim とかの多用を避けたい和音が選ばれにくくするため)
chords_candidate_list = [
{
# candidate: 使えるコード
# -1: 前のコードを継続することを意味する
# -2: N.C.
"candidate": [-2], # C
# probability: 重み 数値が大きいほど選ばれやすい
"probability": [1],
},
{
"candidate": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,], # D,
"probability": [b, b, b, b, b, b, 1, b, b, b, b, b, b, 1, b,],
},
{
"candidate": [-1], # C
"probability": [1],
},
{
"candidate": [0, 1, 3, 5, 6, 7, 9 ,11,12,], # F
"probability": [b, b, b, b, 1, b, b, b, b,],
},
{
"candidate": [0, 2, 4, 6, 7, 9 ,11,13,], # E
"probability": [b, b, b, 1, b, b, b, 1, ],
},
{
"candidate": [-2], # C
"probability": [1],
},
{
"candidate": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,], # D
"probability": [b, b, b, b ,b ,b ,1 ,b ,b ,b ,b ,b ,b ,1 ,b ,],
},
{
"candidate": [-2], # C
"probability": [1],
},
{
"candidate": [0, 1, 3, 4, 7, 8, 9 ,11], # G
"probability": [b, b, 1 ,b ,b ,b ,b ,b ,],
},
{
"candidate": [0, 1, 3, 5, 8, 10,12], # F
"probability": [b, b, b ,b ,b ,b ,b ,],
},
{
"candidate": [-2], # C
"probability": [1],
},
{
"candidate": [0, 2, 3, 4, 5, 9 ,10,11], # C(hi)
"probability": [b, b, 1 ,b, b ,b ,b ,b, ],
},
{
"candidate": [-1,0, 1, 2, 5, 8, 9 ,10,12], # A
"probability": [8 * b, b, b, b, b, b, b, b, b,],
},
{
"candidate": [-1,0, 3, 5, 8, 10,12,], # F
"probability": [8 * b, b, b, b, b, b, b,],
},
{
"candidate": [-1,0,], # E
"probability": [b, b,],
},
{
"candidate": [1, 3, 8, 10,], # D
"probability": [b, b, b, b, ],
},
{
"candidate": [-2,1, 3, 8,], # B♭
"probability": [3 * b ,b, b, b,],
},
{
"candidate": [1, 2, 3, 4, 8, 10,11,12], # A
"probability": [b, b, b, b ,b ,b ,b ,b, ],
},
{
"candidate": [-1,1, 3, 8, 10,], # F
"probability": [4* b, b, b ,b ,b ,],
},
{
"candidate": [4, 11], # G
"probability": [3 * b, b,],
},
{
"candidate": [0, 7, 14,], # F
"probability": [3 * b, 2 * b, 1 * b],
},
] # コード進行の候補と確率
# デバッグ用 確率が正しく設定されているかチェック
if (debug):
for i in range(len(chords_candidate_list)):
if (len(chords_candidate_list[i]["candidate"]) != len(chords_candidate_list[i]["probability"])):
raise Exception("chords_candidate_list おかしい")
# 感情分析の結果の整形
emotion_weight = 4
negaposi_weight = 2
posi = int(emotion_value * negaposi_weight + emotion_dict["joy"] * (emotion_weight - negaposi_weight) - (emotion_dict["sadness"] + emotion_dict["fear"] + emotion_dict["disgust"] + emotion_dict["anger"]) / 4) # 確率決定の時に値を整数値で扱いたいため
nega = negaposi_weight - posi # 同上
posi_chords_idx = [0, 3, 4, 5, 7, 10, 11, 13]
nega_chords_idx = [2, 6, 8, 13, 14]
# 感情分析の結果を反映させる
for i in range(len(chords_candidate_list)):
for probability_idx in range(len(chords_candidate_list[i]["probability"])):
if (chords_candidate_list[i]["probability"][probability_idx] in posi_chords_idx):
chords_candidate_list[i]["probability"][probability_idx] += posi
elif (chords_candidate_list[i]["probability"][probability_idx] in nega_chords_idx):
chords_candidate_list[i]["probability"][probability_idx] += nega
else:
chords_candidate_list[i]["probability"][probability_idx] += emotion_weight // 2
chords_progression = []
# コード進行を作る
for i in range(len(key_note_list)):
candidate = []
for probability_idx in range(len(chords_candidate_list[i]["probability"])):
for pb in range(chords_candidate_list[i]["probability"][probability_idx]):
candidate.append(chords_candidate_list[i]["candidate"][probability_idx])
# ドミナントモーションの発生確率を上げるための処理
if (len(chords_progression) != 0 and chords_progression[-1] != -1 and chords_progression[-1] != -2):
if ( chords_candidate_list[i]["candidate"][probability_idx] in [chords_progression[-1] - 4, chords_progression[-1] + 3, chords_progression[-1] - 11]):
# とりあえず発生確率を+2してみる
candidate.append(chords_candidate_list[i]["candidate"][probability_idx])
candidate.append(chords_candidate_list[i]["candidate"][probability_idx])
chords_progression.append(np.random.choice(candidate))
# ----テスト出力用-----
if (debug):
for i in range(21):
if (chords_progression[i] != -1 and chords_progression[i] != -2):
print(CHORDS_DICT[chords_progression[i]], end=" ")
else:
print(chords_progression[i], end=" ")
print("")
# print(chords_progression)
# ----テスト出力用ここまで-----
# key_note_list があれば拍子を考える必要ないかも
#if (rhythm_denominator == 3):
# pass
#elif (rhythm_denominator == 4):
# pass
# ベースを作る
notes_list_base = create_baseline(key_note_list, rhythm_denominator, chords_progression)
notes_list_chords = []
# コード進行 chords_progression をもとに伴奏を作る
threshold = 0.5 # 0 から1の値 コードをじゃかじゃか or アルペジオの選ばれる確率 小さいほどアルペジオ
style = "s" if np.random.rand() < threshold else "a"
vel = 60 # velocity
# コードじゃかじゃか
if (style == "s"):
# コードが連続する部分は境界を削除
for i in reversed(range(len(key_note_list))):
if (chords_progression[i] == -1):
key_note_list.pop(i)
# 最後以外を作る
for i in range(len(key_note_list) - 1):
# N.C.
if (chords_progression[i] == -2):
continue
duration = key_note_list[i + 1] - key_note_list[i]
rhythm = create_chord_rhythm(duration)
base_time = key_note_list[i]
for r in rhythm:
for n in F_DIATONIC[chords_progression[i]]:
notes_list_chords.append((vel, n, base_time, base_time + duration))
base_time += r
# 最後の音
for n in F_DIATONIC[chords_progression[-1]]:
notes_list_chords.append((vel, n, key_note_list[-1], key_note_list[-1] + 1))
# アルペジオ
elif (style == "a"):
# 最後以外を作る
for i in range(len(key_note_list) - 1):
# N.C.
if (chords_progression[i] == -2):
continue
# 前のコードを継ぐ場合
elif (chords_progression[i] == -1):
i -= 1
duration = key_note_list[i + 1] - key_note_list[i]
arpeggio = create_chord_arpeggio(duration, F_DIATONIC[chords_progression[i]], density=0) # densityをキーワードによって変えるようにしたい
base_time = key_note_list[i]
for n in arpeggio:
notes_list_chords.append((vel, n[0], base_time, base_time + n[1]))
base_time += n[1]
# 最後の音
for n in F_DIATONIC[chords_progression[-1]]:
notes_list_chords.append((vel, n, key_note_list[-1], key_note_list[-1] + 1))
return notes_list_chords, notes_list_base
def create_chord_rhythm(chord_duration):
"""
コードをじゃかじゃか弾く場合のリズムを決定
Parameters
chord_duration: int/float
そのコードを継続する時間
0.5→1拍
Rerturns
list(int/float)
コードのストロークの継続時間の配列
"""
chords_durations = []
# 長さと, それに対応するリズムの刻み方
duration_candidate = {
0: [[]],
0.25: [[0.25]],
0.5: [[0.125, 0.25, 0.125], [0.375, 0.125]],
0.75: [[0.125, 0.25, 0.25], [0.25, 0.25, 0.125]],
1: [[0.25, 0.5, 0.25], [0.375, 0.375, 0.25], [0.5, 0.25, 0.25]],
}
# 整数部分を先に埋める
for _ in range(int(chord_duration)):
chords_durations += choice(duration_candidate[1])
# 残った部分を埋める
chords_durations += choice(duration_candidate[chord_duration - int(chord_duration)])
return chords_durations
# ↓ボツにした実装
duration_fixed = chord_duration * 2
chords_durations = np.array([])
while True:
if (len(chords_durations) == 0):
max_duration = np.min([(duration_fixed) * 4 , 4])
chords_durations = np.append(chords_durations, 0.25 * np.random.randint(1, max_duration + 1))
else:
if (np.sum(chords_durations) == duration_fixed):
break
max_duration = np.min([(duration_fixed - np.sum(chords_durations)) * 4 , 4])
chords_durations = np.append(chords_durations, 0.25 * np.random.randint(1, max_duration + 1))
return (chords_durations / 2).tolist()
def create_chord_arpeggio(chords_duration, notes_list, density):
"""
コードをアルペジオで弾く場合のリズムと音を決定
Parameters
chord_duration: int/float
そのコードを継続する時間
0.5→1拍
notes_lsit: list[int]
コードの構成音
density: 0 or 1
0: 密度低め
1: 密度高め
Rerturns
list(tuple(int, int/float))
音高と継続時間のタプルのリスト
"""
if (density != 0 and density != 1):
raise ValueError("argument [density] must be 0 or 1")
# ひとつひとつの音の長さの候補
note_duration = [0.125, 0.25]
arpeggio_ary = [(notes_list[0], note_duration[density]),] # (音高, 長さ)のタプルのリスト
for i in range(int(chords_duration / note_duration[density]) - 1):
# 同じ音が連続しないための処理
t = np.random.choice(notes_list)
while t == arpeggio_ary[-1][0]:
t = np.random.choice(notes_list)
arpeggio_ary.append((t, note_duration[density]))
return arpeggio_ary
def create_baseline(key_note_list, rhythm_denominator, chords_progression):
"""
入力されたパラメータを基に曲を作成する
Parameters
----------
key_note_list : [int/float]
great_oceanの21個の音の開始地点を入れたリスト
rhythm_denominator : int
何拍子か? 3or4を想定
chords_progression : [int]
コードの数字が入ったリスト
Returns
----------
[(int, str, float, float)]
ベースについて, 順にvelocity, 音高("4"みたいな), start, end が入る
"""
vel = 60
notes_list_base = []
Fdur_NOTES = ["D2", "E2", "F2", "G2", "A2", "A#2", "C3"]
# 長さと, それに対応するリズムの刻み方
duration_candidate = {
0: [[]],
0.25: [[0.25]],
0.5: [[0.125, 0.25, 0.125], [0.375, 0.125]],
0.75: [[0.125, 0.25, 0.25], [0.25, 0.25, 0.125]],
1: [[0.25, 0.5, 0.25], [0.375, 0.375, 0.25], [0.5, 0.25, 0.25]],
}
# コンセプト
if (False):
pass
else:
# 最後以外を生成
i = 0
# ベースを入れる最初の位置を決める chords_progressionでコードがしていされたタイミング
while chords_progression[i] != -1 and chords_progression[i] != -2:
i += 1
base_time = key_note_list[i]
for i in range(len(chords_progression) - 1):
if (chords_progression[i] == -2):
continue
# 前のコードを続ける場合
elif (chords_progression[i] == -1):
i -= 1
duration_list = []
chord_duration = key_note_list[i + 1] - key_note_list[i]
# 整数部分を先に埋める
for _ in range(int(chord_duration)):
duration_list += choice(duration_candidate[1])
# 残った部分を埋める
duration_list += choice(duration_candidate[chord_duration - int(chord_duration)])
# 音を当てはめる
for j in range(len(duration_list)):
n = choice(F_DIATONIC[chords_progression[i]] + [F_DIATONIC[chords_progression[i]][0]] + [F_DIATONIC[chords_progression[i]][2]] + Fdur_NOTES)
notes_list_base.append((vel, n, base_time, base_time + duration_list[j]))
base_time += duration_list[j]
# 最後の音を入れる
notes_list_base.append((vel, F_DIATONIC[chords_progression[-1]][0], base_time, base_time + 1))
return notes_list_base
# 動作テスト
if __name__ == "__main__":
for _ in range(5):
back = create_backing(
key_note_list=[
2,3,4,5,6, #Happy Birthday to you
8,9,10,11,12, #Happy Birthday to you
14,15,16,17,18,19, #Happy Birthday dear ??
21,22,23,24,25 #Happy Birthday to you
],
rhythm_denominator=3,
debug=True
) | [
"numpy.sum",
"random.choice",
"numpy.min",
"numpy.random.randint",
"numpy.array",
"numpy.random.choice",
"numpy.random.rand"
] | [((10577, 10589), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10585, 10589), True, 'import numpy as np\n'), ((10333, 10362), 'random.choice', 'choice', (['duration_candidate[1]'], {}), '(duration_candidate[1])\n', (10339, 10362), False, 'from random import choice\n'), ((11861, 11889), 'numpy.random.choice', 'np.random.choice', (['notes_list'], {}), '(notes_list)\n', (11877, 11889), True, 'import numpy as np\n'), ((7064, 7091), 'numpy.random.choice', 'np.random.choice', (['candidate'], {}), '(candidate)\n', (7080, 7091), True, 'import numpy as np\n'), ((7892, 7908), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7906, 7908), True, 'import numpy as np\n'), ((10674, 10705), 'numpy.min', 'np.min', (['[duration_fixed * 4, 4]'], {}), '([duration_fixed * 4, 4])\n', (10680, 10705), True, 'import numpy as np\n'), ((11946, 11974), 'numpy.random.choice', 'np.random.choice', (['notes_list'], {}), '(notes_list)\n', (11962, 11974), True, 'import numpy as np\n'), ((10845, 10869), 'numpy.sum', 'np.sum', (['chords_durations'], {}), '(chords_durations)\n', (10851, 10869), True, 'import numpy as np\n'), ((13597, 13626), 'random.choice', 'choice', (['duration_candidate[1]'], {}), '(duration_candidate[1])\n', (13603, 13626), False, 'from random import choice\n'), ((13836, 13977), 'random.choice', 'choice', (['(F_DIATONIC[chords_progression[i]] + [F_DIATONIC[chords_progression[i]][0]] +\n [F_DIATONIC[chords_progression[i]][2]] + Fdur_NOTES)'], {}), '(F_DIATONIC[chords_progression[i]] + [F_DIATONIC[chords_progression[i\n ]][0]] + [F_DIATONIC[chords_progression[i]][2]] + Fdur_NOTES)\n', (13842, 13977), False, 'from random import choice\n'), ((10775, 10813), 'numpy.random.randint', 'np.random.randint', (['(1)', '(max_duration + 1)'], {}), '(1, max_duration + 1)\n', (10792, 10813), True, 'import numpy as np\n'), ((11067, 11105), 'numpy.random.randint', 'np.random.randint', (['(1)', '(max_duration + 1)'], {}), '(1, max_duration + 1)\n', (11084, 11105), True, 'import numpy as np\n'), ((10965, 10989), 'numpy.sum', 'np.sum', (['chords_durations'], {}), '(chords_durations)\n', (10971, 10989), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
ret = ts.pct_change()
settings.returns['year_freq'] = '252 days' # same as empyrical
seed = 42
np.random.seed(seed)
benchmark_rets = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.wrapper.freq == day_dt
assert ret['a'].vbt.returns.wrapper.freq == day_dt
assert ret.vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.wrapper.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a']).obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts).obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_daily(self):
ret_12h = pd.DataFrame({
'a': [0.1, 0.1, 0.1, 0.1, 0.1],
'b': [-0.1, -0.1, -0.1, -0.1, -0.1],
'c': [0.1, -0.1, 0.1, -0.1, 0.1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1, 0),
datetime(2018, 1, 1, 12),
datetime(2018, 1, 2, 0),
datetime(2018, 1, 2, 12),
datetime(2018, 1, 3, 0)
]))
pd.testing.assert_series_equal(
ret_12h['a'].vbt.returns.daily(),
pd.Series(
np.array([0.21, 0.21, 0.1]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
name=ret_12h['a'].name
)
)
pd.testing.assert_frame_equal(
ret_12h.vbt.returns.daily(),
pd.DataFrame(
np.array([
[0.21, -0.19, -0.01],
[0.21, -0.19, -0.01],
[0.1, -0.1, 0.1]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
columns=ret_12h.columns
)
)
def test_annual(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.annual(),
pd.Series(
np.array([4.]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.annual(),
pd.DataFrame(
np.array([[4., -0.8, 0.]]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
columns=ret.columns
)
)
def test_cumulative(self):
res_a = empyrical.cum_returns(ret['a']).rename('a')
res_b = empyrical.cum_returns(ret['b']).rename('b')
res_c = empyrical.cum_returns(ret['c']).rename('c')
pd.testing.assert_series_equal(
ret['a'].vbt.returns.cumulative(),
res_a
)
pd.testing.assert_frame_equal(
ret.vbt.returns.cumulative(),
pd.concat([res_a, res_b, res_c], axis=1)
)
def test_total_return(self):
res_a = empyrical.cum_returns_final(ret['a'])
res_b = empyrical.cum_returns_final(ret['b'])
res_c = empyrical.cum_returns_final(ret['c'])
assert isclose(ret['a'].vbt.returns.total(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.total(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('total_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_total(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_annualized_return(self):
res_a = empyrical.annual_return(ret['a'])
res_b = empyrical.annual_return(ret['b'])
res_c = empyrical.annual_return(ret['c'])
assert isclose(ret['a'].vbt.returns.annualized(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_alpha",
[1., 2., 3.],
)
def test_annualized_volatility(self, test_alpha):
res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha)
res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha)
res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha)
assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_volatility')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized_volatility(ret.shape[0], minp=1, levy_alpha=test_alpha).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_calmar_ratio(self):
res_a = empyrical.calmar_ratio(ret['a'])
res_b = empyrical.calmar_ratio(ret['b'])
res_c = empyrical.calmar_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.calmar_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.calmar_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('calmar_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_calmar_ratio(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free,test_required_return",
[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)],
)
def test_omega_ratio(self, test_risk_free, test_required_return):
res_a = empyrical.omega_ratio(ret['a'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_a):
res_a = np.inf
res_b = empyrical.omega_ratio(ret['b'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_b):
res_b = np.inf
res_c = empyrical.omega_ratio(ret['c'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_c):
res_c = np.inf
assert isclose(ret['a'].vbt.returns.omega_ratio(
risk_free=test_risk_free, required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.omega_ratio(risk_free=test_risk_free, required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('omega_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_omega_ratio(
ret.shape[0], minp=1, risk_free=test_risk_free, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_sharpe_ratio(self, test_risk_free):
res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free)
res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free)
res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sharpe_ratio(ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_deflated_sharpe_ratio(self):
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.01),
pd.Series([np.nan, np.nan, 0.0005355605507117676], index=ret.columns).rename('deflated_sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.03),
pd.Series([np.nan, np.nan, 0.0003423112350834066], index=ret.columns).rename('deflated_sharpe_ratio')
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_downside_risk(self, test_required_return):
res_a = empyrical.downside_risk(ret['a'], required_return=test_required_return)
res_b = empyrical.downside_risk(ret['b'], required_return=test_required_return)
res_c = empyrical.downside_risk(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.downside_risk(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.downside_risk(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('downside_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_downside_risk(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_sortino_ratio(self, test_required_return):
res_a = empyrical.sortino_ratio(ret['a'], required_return=test_required_return)
res_b = empyrical.sortino_ratio(ret['b'], required_return=test_required_return)
res_c = empyrical.sortino_ratio(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.sortino_ratio(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sortino_ratio(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sortino_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sortino_ratio(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_information_ratio(self):
res_a = empyrical.excess_sharpe(ret['a'], benchmark_rets['a'])
res_b = empyrical.excess_sharpe(ret['b'], benchmark_rets['b'])
res_c = empyrical.excess_sharpe(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.information_ratio(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.information_ratio(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('information_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_information_ratio(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_beta(self):
res_a = empyrical.beta(ret['a'], benchmark_rets['a'])
res_b = empyrical.beta(ret['b'], benchmark_rets['b'])
res_c = empyrical.beta(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.beta(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.beta(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('beta')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_beta(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_alpha(self, test_risk_free):
res_a = empyrical.alpha(ret['a'], benchmark_rets['a'], risk_free=test_risk_free)
res_b = empyrical.alpha(ret['b'], benchmark_rets['b'], risk_free=test_risk_free)
res_c = empyrical.alpha(ret['c'], benchmark_rets['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.alpha(benchmark_rets['a'], risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.alpha(benchmark_rets, risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('alpha')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_alpha(
benchmark_rets, ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_tail_ratio(self):
res_a = empyrical.tail_ratio(ret['a'])
res_b = empyrical.tail_ratio(ret['b'])
res_c = empyrical.tail_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.tail_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.tail_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('tail_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_tail_ratio(
ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaN here
res_a = empyrical.value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.value_at_risk(cutoff=test_cutoff),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('value_at_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_value_at_risk(
ret.shape[0], minp=1, cutoff=test_cutoff).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_cond_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaN here
res_a = empyrical.conditional_value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.conditional_value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.conditional_value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.cond_value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.cond_value_at_risk(cutoff=test_cutoff),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('cond_value_at_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_cond_value_at_risk(
ret.shape[0], minp=1, cutoff=test_cutoff).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_capture(self):
res_a = empyrical.capture(ret['a'], benchmark_rets['a'])
res_b = empyrical.capture(ret['b'], benchmark_rets['b'])
res_c = empyrical.capture(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.capture(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.capture(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('capture')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_capture(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_up_capture(self):
res_a = empyrical.up_capture(ret['a'], benchmark_rets['a'])
res_b = empyrical.up_capture(ret['b'], benchmark_rets['b'])
res_c = empyrical.up_capture(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.up_capture(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.up_capture(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('up_capture')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_up_capture(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_down_capture(self):
res_a = empyrical.down_capture(ret['a'], benchmark_rets['a'])
res_b = empyrical.down_capture(ret['b'], benchmark_rets['b'])
res_c = empyrical.down_capture(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.down_capture(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.down_capture(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('down_capture')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_down_capture(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_drawdown(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.drawdown(),
pd.Series(
np.array([0., 0., 0., 0., 0.]),
index=ret['a'].index,
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.drawdown(),
pd.DataFrame(
np.array([
[0., 0., 0.],
[0., -0.2, 0.],
[0., -0.4, 0.],
[0., -0.6, -0.33333333],
[0., -0.8, -0.66666667]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03',
'2018-01-04',
'2018-01-05'
], dtype='datetime64[ns]', freq=None),
columns=ret.columns
)
)
def test_max_drawdown(self):
res_a = empyrical.max_drawdown(ret['a'])
res_b = empyrical.max_drawdown(ret['b'])
res_c = empyrical.max_drawdown(ret['c'])
assert isclose(ret['a'].vbt.returns.max_drawdown(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.max_drawdown(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('max_drawdown')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_max_drawdown(
ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_drawdowns(self):
assert type(ret['a'].vbt.returns.drawdowns) is vbt.Drawdowns
assert ret['a'].vbt.returns.drawdowns.wrapper.freq == ret['a'].vbt.wrapper.freq
assert ret['a'].vbt.returns.drawdowns.wrapper.ndim == ret['a'].ndim
assert ret.vbt.returns.drawdowns.wrapper.ndim == ret.ndim
assert isclose(ret['a'].vbt.returns.drawdowns.max_drawdown(), ret['a'].vbt.returns.max_drawdown())
pd.testing.assert_series_equal(
ret.vbt.returns.drawdowns.max_drawdown(),
ret.vbt.returns.max_drawdown()
)
def test_stats(self):
pd.testing.assert_series_equal(
ret['b'].vbt.returns.stats(
benchmark_rets['b'],
levy_alpha=2.,
risk_free=0.01,
required_return=0.1
),
pd.Series([
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2018-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
-80.0,
-100.72986288899584,
-100.0,
208.74625745148103,
-39.93844058228336,
-1.25,
-80.0,
0.0,
-15.323368643952458,
-1.0653693625282994,
0.6452153997516223,
0.43684210526315786,
0.0,
-0.47500000000000003,
-0.9999978857530595,
0.4123019930790345
], index=[
'Start',
'End',
'Duration',
'Total Return [%]',
'Benchmark Return [%]',
'Annual Return [%]',
'Annual Volatility [%]',
'Sharpe Ratio',
'Calmar Ratio',
'Max Drawdown [%]',
'Omega Ratio',
'Sortino Ratio',
'Skew',
'Kurtosis',
'Tail Ratio',
'Common Sense Ratio',
'Value at Risk',
'Alpha',
'Beta'
], name='b')
)
pd.testing.assert_frame_equal(
ret.vbt.returns.stats(
benchmark_rets,
levy_alpha=2.,
risk_free=0.01,
required_return=0.1
),
pd.DataFrame([[
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2018-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
400.0,
451.8597134178033,
1.690784346944584e+37,
533.2682251925386,
24.139821935485003,
np.nan,
0.0,
np.inf,
np.inf,
1.4693345482106241,
2.030769230769236,
3.5238095238095237,
5.958001984471391e+35,
0.26249999999999996,
21533588.23721922,
0.7853755858374825
], [
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2018-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
-80.0,
-100.72986288899584,
-100.0,
208.74625745148103,
-39.93844058228336,
-1.25,
-80.0,
0.0,
-15.323368643952458,
-1.0653693625282994,
0.6452153997516223,
0.43684210526315786,
0.0,
-0.47500000000000003,
-0.9999978857530595,
0.4123019930790345
], [
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2018-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
0.0,
-143.81732886778948,
0.0,
1122.4972160321827,
3.517157943567505,
0.0,
-66.66666666666667,
1.7974602203427394,
2.8598075085224215,
0.3666479606152471,
-3.438271604938274,
1.947368421052631,
1.947368421052631,
-0.47500000000000003,
-0.9999999982512272,
0.30840682076341036
]], columns=[
'Start',
'End',
'Duration',
'Total Return [%]',
'Benchmark Return [%]',
'Annual Return [%]',
'Annual Volatility [%]',
'Sharpe Ratio',
'Calmar Ratio',
'Max Drawdown [%]',
'Omega Ratio',
'Sortino Ratio',
'Skew',
'Kurtosis',
'Tail Ratio',
'Common Sense Ratio',
'Value at Risk',
'Alpha',
'Beta'
], index=ret.columns)
)
| [
"empyrical.tail_ratio",
"empyrical.excess_sharpe",
"numpy.random.seed",
"empyrical.conditional_value_at_risk",
"numpy.isnan",
"pandas.DatetimeIndex",
"pytest.mark.parametrize",
"empyrical.value_at_risk",
"empyrical.beta",
"empyrical.downside_risk",
"empyrical.omega_ratio",
"empyrical.max_drawd... | [((197, 227), 'numpy.timedelta64', 'np.timedelta64', (['(86400000000000)'], {}), '(86400000000000)\n', (211, 227), True, 'import numpy as np\n'), ((586, 606), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (600, 606), True, 'import numpy as np\n'), ((5995, 6049), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_alpha"""', '[1.0, 2.0, 3.0]'], {}), "('test_alpha', [1.0, 2.0, 3.0])\n", (6018, 6049), False, 'import pytest\n'), ((7558, 7665), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_risk_free,test_required_return"""', '[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)]'], {}), "('test_risk_free,test_required_return', [(0.01, 0.1),\n (0.02, 0.2), (0.03, 0.3)])\n", (7581, 7665), False, 'import pytest\n'), ((8941, 9002), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_risk_free"""', '[0.01, 0.02, 0.03]'], {}), "('test_risk_free', [0.01, 0.02, 0.03])\n", (8964, 9002), False, 'import pytest\n'), ((10350, 10417), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_required_return"""', '[0.01, 0.02, 0.03]'], {}), "('test_required_return', [0.01, 0.02, 0.03])\n", (10373, 10417), False, 'import pytest\n'), ((11363, 11430), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_required_return"""', '[0.01, 0.02, 0.03]'], {}), "('test_required_return', [0.01, 0.02, 0.03])\n", (11386, 11430), False, 'import pytest\n'), ((13890, 13951), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_risk_free"""', '[0.01, 0.02, 0.03]'], {}), "('test_risk_free', [0.01, 0.02, 0.03])\n", (13913, 13951), False, 'import pytest\n'), ((15518, 15576), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_cutoff"""', '[0.05, 0.06, 0.07]'], {}), "('test_cutoff', [0.05, 0.06, 0.07])\n", (15541, 15576), False, 'import pytest\n'), ((16476, 16534), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_cutoff"""', '[0.05, 0.06, 0.07]'], {}), "('test_cutoff', [0.05, 0.06, 0.07])\n", (16499, 16534), False, 'import pytest\n'), ((4746, 4783), 'empyrical.cum_returns_final', 'empyrical.cum_returns_final', (["ret['a']"], {}), "(ret['a'])\n", (4773, 4783), False, 'import empyrical\n'), ((4800, 4837), 'empyrical.cum_returns_final', 'empyrical.cum_returns_final', (["ret['b']"], {}), "(ret['b'])\n", (4827, 4837), False, 'import empyrical\n'), ((4854, 4891), 'empyrical.cum_returns_final', 'empyrical.cum_returns_final', (["ret['c']"], {}), "(ret['c'])\n", (4881, 4891), False, 'import empyrical\n'), ((5391, 5424), 'empyrical.annual_return', 'empyrical.annual_return', (["ret['a']"], {}), "(ret['a'])\n", (5414, 5424), False, 'import empyrical\n'), ((5441, 5474), 'empyrical.annual_return', 'empyrical.annual_return', (["ret['b']"], {}), "(ret['b'])\n", (5464, 5474), False, 'import empyrical\n'), ((5491, 5524), 'empyrical.annual_return', 'empyrical.annual_return', (["ret['c']"], {}), "(ret['c'])\n", (5514, 5524), False, 'import empyrical\n'), ((6140, 6195), 'empyrical.annual_volatility', 'empyrical.annual_volatility', (["ret['a']"], {'alpha': 'test_alpha'}), "(ret['a'], alpha=test_alpha)\n", (6167, 6195), False, 'import empyrical\n'), ((6212, 6267), 'empyrical.annual_volatility', 'empyrical.annual_volatility', (["ret['b']"], {'alpha': 'test_alpha'}), "(ret['b'], alpha=test_alpha)\n", (6239, 6267), False, 'import empyrical\n'), ((6284, 6339), 'empyrical.annual_volatility', 'empyrical.annual_volatility', (["ret['c']"], {'alpha': 'test_alpha'}), "(ret['c'], alpha=test_alpha)\n", (6311, 6339), False, 'import empyrical\n'), ((6956, 6988), 'empyrical.calmar_ratio', 'empyrical.calmar_ratio', (["ret['a']"], {}), "(ret['a'])\n", (6978, 6988), False, 'import empyrical\n'), ((7005, 7037), 'empyrical.calmar_ratio', 'empyrical.calmar_ratio', (["ret['b']"], {}), "(ret['b'])\n", (7027, 7037), False, 'import empyrical\n'), ((7054, 7086), 'empyrical.calmar_ratio', 'empyrical.calmar_ratio', (["ret['c']"], {}), "(ret['c'])\n", (7076, 7086), False, 'import empyrical\n'), ((7771, 7871), 'empyrical.omega_ratio', 'empyrical.omega_ratio', (["ret['a']"], {'risk_free': 'test_risk_free', 'required_return': 'test_required_return'}), "(ret['a'], risk_free=test_risk_free, required_return=\n test_required_return)\n", (7792, 7871), False, 'import empyrical\n'), ((7878, 7893), 'numpy.isnan', 'np.isnan', (['res_a'], {}), '(res_a)\n', (7886, 7893), True, 'import numpy as np\n'), ((7938, 8038), 'empyrical.omega_ratio', 'empyrical.omega_ratio', (["ret['b']"], {'risk_free': 'test_risk_free', 'required_return': 'test_required_return'}), "(ret['b'], risk_free=test_risk_free, required_return=\n test_required_return)\n", (7959, 8038), False, 'import empyrical\n'), ((8045, 8060), 'numpy.isnan', 'np.isnan', (['res_b'], {}), '(res_b)\n', (8053, 8060), True, 'import numpy as np\n'), ((8105, 8205), 'empyrical.omega_ratio', 'empyrical.omega_ratio', (["ret['c']"], {'risk_free': 'test_risk_free', 'required_return': 'test_required_return'}), "(ret['c'], risk_free=test_risk_free, required_return=\n test_required_return)\n", (8126, 8205), False, 'import empyrical\n'), ((8212, 8227), 'numpy.isnan', 'np.isnan', (['res_c'], {}), '(res_c)\n', (8220, 8227), True, 'import numpy as np\n'), ((9091, 9149), 'empyrical.sharpe_ratio', 'empyrical.sharpe_ratio', (["ret['a']"], {'risk_free': 'test_risk_free'}), "(ret['a'], risk_free=test_risk_free)\n", (9113, 9149), False, 'import empyrical\n'), ((9166, 9224), 'empyrical.sharpe_ratio', 'empyrical.sharpe_ratio', (["ret['b']"], {'risk_free': 'test_risk_free'}), "(ret['b'], risk_free=test_risk_free)\n", (9188, 9224), False, 'import empyrical\n'), ((9241, 9299), 'empyrical.sharpe_ratio', 'empyrical.sharpe_ratio', (["ret['c']"], {'risk_free': 'test_risk_free'}), "(ret['c'], risk_free=test_risk_free)\n", (9263, 9299), False, 'import empyrical\n'), ((10513, 10584), 'empyrical.downside_risk', 'empyrical.downside_risk', (["ret['a']"], {'required_return': 'test_required_return'}), "(ret['a'], required_return=test_required_return)\n", (10536, 10584), False, 'import empyrical\n'), ((10601, 10672), 'empyrical.downside_risk', 'empyrical.downside_risk', (["ret['b']"], {'required_return': 'test_required_return'}), "(ret['b'], required_return=test_required_return)\n", (10624, 10672), False, 'import empyrical\n'), ((10689, 10760), 'empyrical.downside_risk', 'empyrical.downside_risk', (["ret['c']"], {'required_return': 'test_required_return'}), "(ret['c'], required_return=test_required_return)\n", (10712, 10760), False, 'import empyrical\n'), ((11526, 11597), 'empyrical.sortino_ratio', 'empyrical.sortino_ratio', (["ret['a']"], {'required_return': 'test_required_return'}), "(ret['a'], required_return=test_required_return)\n", (11549, 11597), False, 'import empyrical\n'), ((11614, 11685), 'empyrical.sortino_ratio', 'empyrical.sortino_ratio', (["ret['b']"], {'required_return': 'test_required_return'}), "(ret['b'], required_return=test_required_return)\n", (11637, 11685), False, 'import empyrical\n'), ((11702, 11773), 'empyrical.sortino_ratio', 'empyrical.sortino_ratio', (["ret['c']"], {'required_return': 'test_required_return'}), "(ret['c'], required_return=test_required_return)\n", (11725, 11773), False, 'import empyrical\n'), ((12425, 12479), 'empyrical.excess_sharpe', 'empyrical.excess_sharpe', (["ret['a']", "benchmark_rets['a']"], {}), "(ret['a'], benchmark_rets['a'])\n", (12448, 12479), False, 'import empyrical\n'), ((12496, 12550), 'empyrical.excess_sharpe', 'empyrical.excess_sharpe', (["ret['b']", "benchmark_rets['b']"], {}), "(ret['b'], benchmark_rets['b'])\n", (12519, 12550), False, 'import empyrical\n'), ((12567, 12621), 'empyrical.excess_sharpe', 'empyrical.excess_sharpe', (["ret['c']", "benchmark_rets['c']"], {}), "(ret['c'], benchmark_rets['c'])\n", (12590, 12621), False, 'import empyrical\n'), ((13215, 13260), 'empyrical.beta', 'empyrical.beta', (["ret['a']", "benchmark_rets['a']"], {}), "(ret['a'], benchmark_rets['a'])\n", (13229, 13260), False, 'import empyrical\n'), ((13277, 13322), 'empyrical.beta', 'empyrical.beta', (["ret['b']", "benchmark_rets['b']"], {}), "(ret['b'], benchmark_rets['b'])\n", (13291, 13322), False, 'import empyrical\n'), ((13339, 13384), 'empyrical.beta', 'empyrical.beta', (["ret['c']", "benchmark_rets['c']"], {}), "(ret['c'], benchmark_rets['c'])\n", (13353, 13384), False, 'import empyrical\n'), ((14033, 14105), 'empyrical.alpha', 'empyrical.alpha', (["ret['a']", "benchmark_rets['a']"], {'risk_free': 'test_risk_free'}), "(ret['a'], benchmark_rets['a'], risk_free=test_risk_free)\n", (14048, 14105), False, 'import empyrical\n'), ((14122, 14194), 'empyrical.alpha', 'empyrical.alpha', (["ret['b']", "benchmark_rets['b']"], {'risk_free': 'test_risk_free'}), "(ret['b'], benchmark_rets['b'], risk_free=test_risk_free)\n", (14137, 14194), False, 'import empyrical\n'), ((14211, 14283), 'empyrical.alpha', 'empyrical.alpha', (["ret['c']", "benchmark_rets['c']"], {'risk_free': 'test_risk_free'}), "(ret['c'], benchmark_rets['c'], risk_free=test_risk_free)\n", (14226, 14283), False, 'import empyrical\n'), ((14913, 14943), 'empyrical.tail_ratio', 'empyrical.tail_ratio', (["ret['a']"], {}), "(ret['a'])\n", (14933, 14943), False, 'import empyrical\n'), ((14960, 14990), 'empyrical.tail_ratio', 'empyrical.tail_ratio', (["ret['b']"], {}), "(ret['b'])\n", (14980, 14990), False, 'import empyrical\n'), ((15007, 15037), 'empyrical.tail_ratio', 'empyrical.tail_ratio', (["ret['c']"], {}), "(ret['c'])\n", (15027, 15037), False, 'import empyrical\n'), ((15707, 15769), 'empyrical.value_at_risk', 'empyrical.value_at_risk', (["ret['a'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['a'].iloc[1:], cutoff=test_cutoff)\n", (15730, 15769), False, 'import empyrical\n'), ((15786, 15848), 'empyrical.value_at_risk', 'empyrical.value_at_risk', (["ret['b'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['b'].iloc[1:], cutoff=test_cutoff)\n", (15809, 15848), False, 'import empyrical\n'), ((15865, 15927), 'empyrical.value_at_risk', 'empyrical.value_at_risk', (["ret['c'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['c'].iloc[1:], cutoff=test_cutoff)\n", (15888, 15927), False, 'import empyrical\n'), ((16670, 16744), 'empyrical.conditional_value_at_risk', 'empyrical.conditional_value_at_risk', (["ret['a'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['a'].iloc[1:], cutoff=test_cutoff)\n", (16705, 16744), False, 'import empyrical\n'), ((16761, 16835), 'empyrical.conditional_value_at_risk', 'empyrical.conditional_value_at_risk', (["ret['b'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['b'].iloc[1:], cutoff=test_cutoff)\n", (16796, 16835), False, 'import empyrical\n'), ((16852, 16926), 'empyrical.conditional_value_at_risk', 'empyrical.conditional_value_at_risk', (["ret['c'].iloc[1:]"], {'cutoff': 'test_cutoff'}), "(ret['c'].iloc[1:], cutoff=test_cutoff)\n", (16887, 16926), False, 'import empyrical\n'), ((17534, 17582), 'empyrical.capture', 'empyrical.capture', (["ret['a']", "benchmark_rets['a']"], {}), "(ret['a'], benchmark_rets['a'])\n", (17551, 17582), False, 'import empyrical\n'), ((17599, 17647), 'empyrical.capture', 'empyrical.capture', (["ret['b']", "benchmark_rets['b']"], {}), "(ret['b'], benchmark_rets['b'])\n", (17616, 17647), False, 'import empyrical\n'), ((17664, 17712), 'empyrical.capture', 'empyrical.capture', (["ret['c']", "benchmark_rets['c']"], {}), "(ret['c'], benchmark_rets['c'])\n", (17681, 17712), False, 'import empyrical\n'), ((18272, 18323), 'empyrical.up_capture', 'empyrical.up_capture', (["ret['a']", "benchmark_rets['a']"], {}), "(ret['a'], benchmark_rets['a'])\n", (18292, 18323), False, 'import empyrical\n'), ((18340, 18391), 'empyrical.up_capture', 'empyrical.up_capture', (["ret['b']", "benchmark_rets['b']"], {}), "(ret['b'], benchmark_rets['b'])\n", (18360, 18391), False, 'import empyrical\n'), ((18408, 18459), 'empyrical.up_capture', 'empyrical.up_capture', (["ret['c']", "benchmark_rets['c']"], {}), "(ret['c'], benchmark_rets['c'])\n", (18428, 18459), False, 'import empyrical\n'), ((19033, 19086), 'empyrical.down_capture', 'empyrical.down_capture', (["ret['a']", "benchmark_rets['a']"], {}), "(ret['a'], benchmark_rets['a'])\n", (19055, 19086), False, 'import empyrical\n'), ((19103, 19156), 'empyrical.down_capture', 'empyrical.down_capture', (["ret['b']", "benchmark_rets['b']"], {}), "(ret['b'], benchmark_rets['b'])\n", (19125, 19156), False, 'import empyrical\n'), ((19173, 19226), 'empyrical.down_capture', 'empyrical.down_capture', (["ret['c']", "benchmark_rets['c']"], {}), "(ret['c'], benchmark_rets['c'])\n", (19195, 19226), False, 'import empyrical\n'), ((20763, 20795), 'empyrical.max_drawdown', 'empyrical.max_drawdown', (["ret['a']"], {}), "(ret['a'])\n", (20785, 20795), False, 'import empyrical\n'), ((20812, 20844), 'empyrical.max_drawdown', 'empyrical.max_drawdown', (["ret['b']"], {}), "(ret['b'])\n", (20834, 20844), False, 'import empyrical\n'), ((20861, 20893), 'empyrical.max_drawdown', 'empyrical.max_drawdown', (["ret['c']"], {}), "(ret['c'])\n", (20883, 20893), False, 'import empyrical\n'), ((659, 700), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)', 'ret.shape[0]'], {}), '(0.8, 1.2, ret.shape[0])\n', (676, 700), True, 'import numpy as np\n'), ((1640, 1664), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1653, 1664), False, 'import pytest\n'), ((2082, 2109), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""365 days"""'], {}), "('365 days')\n", (2097, 2109), True, 'import pandas as pd\n'), ((2200, 2227), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""365 days"""'], {}), "('365 days')\n", (2215, 2227), True, 'import pandas as pd\n'), ((4645, 4685), 'pandas.concat', 'pd.concat', (['[res_a, res_b, res_c]'], {'axis': '(1)'}), '([res_a, res_b, res_c], axis=1)\n', (4654, 4685), True, 'import pandas as pd\n'), ((358, 378), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (366, 378), False, 'from datetime import datetime\n'), ((384, 404), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(2)'], {}), '(2018, 1, 2)\n', (392, 404), False, 'from datetime import datetime\n'), ((410, 430), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(3)'], {}), '(2018, 1, 3)\n', (418, 430), False, 'from datetime import datetime\n'), ((436, 456), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(4)'], {}), '(2018, 1, 4)\n', (444, 456), False, 'from datetime import datetime\n'), ((462, 482), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(5)'], {}), '(2018, 1, 5)\n', (470, 482), False, 'from datetime import datetime\n'), ((722, 763), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)', 'ret.shape[0]'], {}), '(0.8, 1.2, ret.shape[0])\n', (739, 763), True, 'import numpy as np\n'), ((789, 830), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)', 'ret.shape[0]'], {}), '(0.8, 1.2, ret.shape[0])\n', (806, 830), True, 'import numpy as np\n'), ((1821, 1862), 'pandas.Series.vbt.returns.from_price', 'pd.Series.vbt.returns.from_price', (["ts['a']"], {}), "(ts['a'])\n", (1853, 1862), True, 'import pandas as pd\n'), ((1928, 1967), 'pandas.DataFrame.vbt.returns.from_price', 'pd.DataFrame.vbt.returns.from_price', (['ts'], {}), '(ts)\n', (1963, 1967), True, 'import pandas as pd\n'), ((2005, 2068), 'pandas.Series.vbt.returns.from_price', 'pd.Series.vbt.returns.from_price', (["ts['a']"], {'year_freq': '"""365 days"""'}), "(ts['a'], year_freq='365 days')\n", (2037, 2068), True, 'import pandas as pd\n'), ((2125, 2186), 'pandas.DataFrame.vbt.returns.from_price', 'pd.DataFrame.vbt.returns.from_price', (['ts'], {'year_freq': '"""365 days"""'}), "(ts, year_freq='365 days')\n", (2160, 2186), True, 'import pandas as pd\n'), ((2785, 2812), 'numpy.array', 'np.array', (['[0.21, 0.21, 0.1]'], {}), '([0.21, 0.21, 0.1])\n', (2793, 2812), True, 'import numpy as np\n'), ((3195, 3267), 'numpy.array', 'np.array', (['[[0.21, -0.19, -0.01], [0.21, -0.19, -0.01], [0.1, -0.1, 0.1]]'], {}), '([[0.21, -0.19, -0.01], [0.21, -0.19, -0.01], [0.1, -0.1, 0.1]])\n', (3203, 3267), True, 'import numpy as np\n'), ((3757, 3772), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (3765, 3772), True, 'import numpy as np\n'), ((4044, 4072), 'numpy.array', 'np.array', (['[[4.0, -0.8, 0.0]]'], {}), '([[4.0, -0.8, 0.0]])\n', (4052, 4072), True, 'import numpy as np\n'), ((4273, 4304), 'empyrical.cum_returns', 'empyrical.cum_returns', (["ret['a']"], {}), "(ret['a'])\n", (4294, 4304), False, 'import empyrical\n'), ((4333, 4364), 'empyrical.cum_returns', 'empyrical.cum_returns', (["ret['b']"], {}), "(ret['b'])\n", (4354, 4364), False, 'import empyrical\n'), ((4393, 4424), 'empyrical.cum_returns', 'empyrical.cum_returns', (["ret['c']"], {}), "(ret['c'])\n", (4414, 4424), False, 'import empyrical\n'), ((19912, 19947), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0])\n', (19920, 19947), True, 'import numpy as np\n'), ((20162, 20282), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.0, -0.2, 0.0], [0.0, -0.4, 0.0], [0.0, -0.6, -\n 0.33333333], [0.0, -0.8, -0.66666667]]'], {}), '([[0.0, 0.0, 0.0], [0.0, -0.2, 0.0], [0.0, -0.4, 0.0], [0.0, -0.6, \n -0.33333333], [0.0, -0.8, -0.66666667]])\n', (20170, 20282), True, 'import numpy as np\n'), ((2836, 2935), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01', '2018-01-02', '2018-01-03']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""D"""'}), "(['2018-01-01', '2018-01-02', '2018-01-03'], dtype=\n 'datetime64[ns]', freq='D')\n", (2852, 2935), True, 'import pandas as pd\n'), ((3369, 3468), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01', '2018-01-02', '2018-01-03']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""D"""'}), "(['2018-01-01', '2018-01-02', '2018-01-03'], dtype=\n 'datetime64[ns]', freq='D')\n", (3385, 3468), True, 'import pandas as pd\n'), ((3795, 3864), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""252D"""'}), "(['2018-01-01'], dtype='datetime64[ns]', freq='252D')\n", (3811, 3864), True, 'import pandas as pd\n'), ((4094, 4163), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01']"], {'dtype': '"""datetime64[ns]"""', 'freq': '"""252D"""'}), "(['2018-01-01'], dtype='datetime64[ns]', freq='252D')\n", (4110, 4163), True, 'import pandas as pd\n'), ((5041, 5092), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (5050, 5092), True, 'import pandas as pd\n'), ((5252, 5303), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (5261, 5303), True, 'import pandas as pd\n'), ((5684, 5735), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (5693, 5735), True, 'import pandas as pd\n'), ((5905, 5956), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (5914, 5956), True, 'import pandas as pd\n'), ((6563, 6614), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (6572, 6614), True, 'import pandas as pd\n'), ((6822, 6873), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (6831, 6873), True, 'import pandas as pd\n'), ((7250, 7301), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (7259, 7301), True, 'import pandas as pd\n'), ((7468, 7519), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (7477, 7519), True, 'import pandas as pd\n'), ((8554, 8605), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (8563, 8605), True, 'import pandas as pd\n'), ((8851, 8902), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (8860, 8902), True, 'import pandas as pd\n'), ((9511, 9562), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (9520, 9562), True, 'import pandas as pd\n'), ((9755, 9806), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (9764, 9806), True, 'import pandas as pd\n'), ((10001, 10070), 'pandas.Series', 'pd.Series', (['[np.nan, np.nan, 0.0005355605507117676]'], {'index': 'ret.columns'}), '([np.nan, np.nan, 0.0005355605507117676], index=ret.columns)\n', (10010, 10070), True, 'import pandas as pd\n'), ((10232, 10301), 'pandas.Series', 'pd.Series', (['[np.nan, np.nan, 0.0003423112350834066]'], {'index': 'ret.columns'}), '([np.nan, np.nan, 0.0003423112350834066], index=ret.columns)\n', (10241, 10301), True, 'import pandas as pd\n'), ((10998, 11049), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (11007, 11049), True, 'import pandas as pd\n'), ((11273, 11324), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (11282, 11324), True, 'import pandas as pd\n'), ((12011, 12062), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (12020, 12062), True, 'import pandas as pd\n'), ((12286, 12337), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (12295, 12337), True, 'import pandas as pd\n'), ((12828, 12879), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (12837, 12879), True, 'import pandas as pd\n'), ((13089, 13140), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (13098, 13140), True, 'import pandas as pd\n'), ((13565, 13616), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (13574, 13616), True, 'import pandas as pd\n'), ((13800, 13851), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (13809, 13851), True, 'import pandas as pd\n'), ((14518, 14569), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (14527, 14569), True, 'import pandas as pd\n'), ((14781, 14832), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (14790, 14832), True, 'import pandas as pd\n'), ((15197, 15248), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (15206, 15248), True, 'import pandas as pd\n'), ((15428, 15479), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (15437, 15479), True, 'import pandas as pd\n'), ((16129, 16180), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (16138, 16180), True, 'import pandas as pd\n'), ((16386, 16437), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (16395, 16437), True, 'import pandas as pd\n'), ((17138, 17189), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (17147, 17189), True, 'import pandas as pd\n'), ((17405, 17456), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (17414, 17456), True, 'import pandas as pd\n'), ((17899, 17950), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (17908, 17950), True, 'import pandas as pd\n'), ((18140, 18191), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (18149, 18191), True, 'import pandas as pd\n'), ((18652, 18703), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (18661, 18703), True, 'import pandas as pd\n'), ((18899, 18950), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (18908, 18950), True, 'import pandas as pd\n'), ((19423, 19474), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (19432, 19474), True, 'import pandas as pd\n'), ((19674, 19725), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (19683, 19725), True, 'import pandas as pd\n'), ((20410, 20537), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']"], {'dtype': '"""datetime64[ns]"""', 'freq': 'None'}), "(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05'], dtype='datetime64[ns]', freq=None)\n", (20426, 20537), True, 'import pandas as pd\n'), ((21057, 21108), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (21066, 21108), True, 'import pandas as pd\n'), ((21292, 21343), 'pandas.Series', 'pd.Series', (['[res_a, res_b, res_c]'], {'index': 'ret.columns'}), '([res_a, res_b, res_c], index=ret.columns)\n', (21301, 21343), True, 'import pandas as pd\n'), ((22258, 22293), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-01 00:00:00"""'], {}), "('2018-01-01 00:00:00')\n", (22270, 22293), True, 'import pandas as pd\n'), ((22311, 22346), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-05 00:00:00"""'], {}), "('2018-01-05 00:00:00')\n", (22323, 22346), True, 'import pandas as pd\n'), ((22364, 22395), 'pandas.Timedelta', 'pd.Timedelta', (['"""5 days 00:00:00"""'], {}), "('5 days 00:00:00')\n", (22376, 22395), True, 'import pandas as pd\n'), ((2474, 2497), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)', '(0)'], {}), '(2018, 1, 1, 0)\n', (2482, 2497), False, 'from datetime import datetime\n'), ((2511, 2535), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)', '(12)'], {}), '(2018, 1, 1, 12)\n', (2519, 2535), False, 'from datetime import datetime\n'), ((2549, 2572), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(2)', '(0)'], {}), '(2018, 1, 2, 0)\n', (2557, 2572), False, 'from datetime import datetime\n'), ((2586, 2610), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(2)', '(12)'], {}), '(2018, 1, 2, 12)\n', (2594, 2610), False, 'from datetime import datetime\n'), ((2624, 2647), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(3)', '(0)'], {}), '(2018, 1, 3, 0)\n', (2632, 2647), False, 'from datetime import datetime\n'), ((23815, 23850), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-01 00:00:00"""'], {}), "('2018-01-01 00:00:00')\n", (23827, 23850), True, 'import pandas as pd\n'), ((23868, 23903), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-05 00:00:00"""'], {}), "('2018-01-05 00:00:00')\n", (23880, 23903), True, 'import pandas as pd\n'), ((23921, 23952), 'pandas.Timedelta', 'pd.Timedelta', (['"""5 days 00:00:00"""'], {}), "('5 days 00:00:00')\n", (23933, 23952), True, 'import pandas as pd\n'), ((24501, 24536), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-01 00:00:00"""'], {}), "('2018-01-01 00:00:00')\n", (24513, 24536), True, 'import pandas as pd\n'), ((24554, 24589), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-05 00:00:00"""'], {}), "('2018-01-05 00:00:00')\n", (24566, 24589), True, 'import pandas as pd\n'), ((24607, 24638), 'pandas.Timedelta', 'pd.Timedelta', (['"""5 days 00:00:00"""'], {}), "('5 days 00:00:00')\n", (24619, 24638), True, 'import pandas as pd\n'), ((25174, 25209), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-01 00:00:00"""'], {}), "('2018-01-01 00:00:00')\n", (25186, 25209), True, 'import pandas as pd\n'), ((25227, 25262), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-01-05 00:00:00"""'], {}), "('2018-01-05 00:00:00')\n", (25239, 25262), True, 'import pandas as pd\n'), ((25280, 25311), 'pandas.Timedelta', 'pd.Timedelta', (['"""5 days 00:00:00"""'], {}), "('5 days 00:00:00')\n", (25292, 25311), True, 'import pandas as pd\n'), ((1204, 1224), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1213, 1224), True, 'import pandas as pd\n'), ((1397, 1419), 'numpy.timedelta64', 'np.timedelta64', (['(4)', '"""D"""'], {}), "(4, 'D')\n", (1411, 1419), True, 'import numpy as np\n'), ((1695, 1715), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1704, 1715), True, 'import pandas as pd\n'), ((1273, 1293), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1282, 1293), True, 'import pandas as pd\n'), ((1359, 1379), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1368, 1379), True, 'import pandas as pd\n')] |
import numpy as np
import pytest
from steppy.base import Step, IdentityOperation, StepsError, make_transformer
from steppy.adapter import Adapter, E
from .steppy_test_utils import EXP_DIR
@pytest.fixture
def data():
return {
'input_1': {
'features': np.array([
[1, 6],
[2, 5],
[3, 4]
]),
'labels': np.array([2, 5, 3])
},
'input_2': {
'extra_features': np.array([
[5, 7, 3],
[67, 4, 5],
[6, 13, 14]
])
},
'input_3': {
'images': np.array([
[[0, 255], [255, 0]],
[[255, 0], [0, 255]],
[[255, 255], [0, 0]],
]),
'labels': np.array([1, 1, 0])
}
}
@pytest.mark.parametrize("mode", [0, 1])
def test_make_transformer(mode):
def fun(x, y, mode=0):
return x + y if mode == 0 else x - y
tr = make_transformer(fun)
tr.fit()
res = tr.transform(7, 3, mode=mode)
assert res == (10 if mode == 0 else 4)
def test_inputs_without_conflicting_names_do_not_require_adapter(data):
step = Step(
name='test_inputs_without_conflicting_names_do_not_require_adapter_1',
transformer=IdentityOperation(),
input_data=['input_1'],
experiment_directory=EXP_DIR
)
output = step.fit_transform(data)
assert output == data['input_1']
step = Step(
name='test_inputs_without_conflicting_names_do_not_require_adapter_2',
transformer=IdentityOperation(),
input_data=['input_1', 'input_2'],
experiment_directory=EXP_DIR
)
output = step.fit_transform(data)
assert output == {**data['input_1'], **data['input_2']}
def test_inputs_with_conflicting_names_require_adapter(data):
step = Step(
name='test_inputs_with_conflicting_names_require_adapter',
transformer=IdentityOperation(),
input_data=['input_1', 'input_3'],
experiment_directory=EXP_DIR
)
with pytest.raises(StepsError):
step.fit_transform(data)
def test_step_with_adapted_inputs(data):
step = Step(
name='test_step_wit_adapted_inputs',
transformer=IdentityOperation(),
input_data=['input_1', 'input_3'],
experiment_directory=EXP_DIR,
adapter=Adapter({
'img': E('input_3', 'images'),
'fea': E('input_1', 'features'),
'l1': E('input_3', 'labels'),
'l2': E('input_1', 'labels'),
})
)
output = step.fit_transform(data)
expected = {
'img': data['input_3']['images'],
'fea': data['input_1']['features'],
'l1': data['input_3']['labels'],
'l2': data['input_1']['labels'],
}
assert output == expected
| [
"steppy.base.make_transformer",
"steppy.base.IdentityOperation",
"pytest.raises",
"numpy.array",
"steppy.adapter.E",
"pytest.mark.parametrize"
] | [((845, 884), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', '[0, 1]'], {}), "('mode', [0, 1])\n", (868, 884), False, 'import pytest\n'), ((999, 1020), 'steppy.base.make_transformer', 'make_transformer', (['fun'], {}), '(fun)\n', (1015, 1020), False, 'from steppy.base import Step, IdentityOperation, StepsError, make_transformer\n'), ((2085, 2110), 'pytest.raises', 'pytest.raises', (['StepsError'], {}), '(StepsError)\n', (2098, 2110), False, 'import pytest\n'), ((278, 312), 'numpy.array', 'np.array', (['[[1, 6], [2, 5], [3, 4]]'], {}), '([[1, 6], [2, 5], [3, 4]])\n', (286, 312), True, 'import numpy as np\n'), ((398, 417), 'numpy.array', 'np.array', (['[2, 5, 3]'], {}), '([2, 5, 3])\n', (406, 417), True, 'import numpy as np\n'), ((480, 526), 'numpy.array', 'np.array', (['[[5, 7, 3], [67, 4, 5], [6, 13, 14]]'], {}), '([[5, 7, 3], [67, 4, 5], [6, 13, 14]])\n', (488, 526), True, 'import numpy as np\n'), ((643, 719), 'numpy.array', 'np.array', (['[[[0, 255], [255, 0]], [[255, 0], [0, 255]], [[255, 255], [0, 0]]]'], {}), '([[[0, 255], [255, 0]], [[255, 0], [0, 255]], [[255, 255], [0, 0]]])\n', (651, 719), True, 'import numpy as np\n'), ((806, 825), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (814, 825), True, 'import numpy as np\n'), ((1308, 1327), 'steppy.base.IdentityOperation', 'IdentityOperation', ([], {}), '()\n', (1325, 1327), False, 'from steppy.base import Step, IdentityOperation, StepsError, make_transformer\n'), ((1596, 1615), 'steppy.base.IdentityOperation', 'IdentityOperation', ([], {}), '()\n', (1613, 1615), False, 'from steppy.base import Step, IdentityOperation, StepsError, make_transformer\n'), ((1969, 1988), 'steppy.base.IdentityOperation', 'IdentityOperation', ([], {}), '()\n', (1986, 1988), False, 'from steppy.base import Step, IdentityOperation, StepsError, make_transformer\n'), ((2269, 2288), 'steppy.base.IdentityOperation', 'IdentityOperation', ([], {}), '()\n', (2286, 2288), False, 'from steppy.base import Step, IdentityOperation, StepsError, make_transformer\n'), ((2416, 2438), 'steppy.adapter.E', 'E', (['"""input_3"""', '"""images"""'], {}), "('input_3', 'images')\n", (2417, 2438), False, 'from steppy.adapter import Adapter, E\n'), ((2459, 2483), 'steppy.adapter.E', 'E', (['"""input_1"""', '"""features"""'], {}), "('input_1', 'features')\n", (2460, 2483), False, 'from steppy.adapter import Adapter, E\n'), ((2503, 2525), 'steppy.adapter.E', 'E', (['"""input_3"""', '"""labels"""'], {}), "('input_3', 'labels')\n", (2504, 2525), False, 'from steppy.adapter import Adapter, E\n'), ((2545, 2567), 'steppy.adapter.E', 'E', (['"""input_1"""', '"""labels"""'], {}), "('input_1', 'labels')\n", (2546, 2567), False, 'from steppy.adapter import Adapter, E\n')] |
"""
Copyright (c) 2019, National Institute of Informatics
All rights reserved.
Author: <NAME>
-----------------------------------------------------
Script for fine-tuning ClassNSeg (the proposed method)
"""
import os
import random
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from torch.optim import Adam
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.models as models
from tqdm import tqdm
from sklearn import metrics
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from sklearn.metrics import roc_curve
import argparse
from PIL import Image
from model.ae import Encoder
from model.ae import Decoder
from model.ae import ActivationLoss
from model.ae import ReconstructionLoss
from model.ae import SegmentationLoss
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default ='datasets/finetune', help='path to dataset')
parser.add_argument('--train_set', default ='train_ft', help='path to train dataset')
parser.add_argument('--val_set', default ='validation_ft', help='path to validation dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to network')
parser.add_argument('--niter', type=int, default=50, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate, default=0.01')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay. default=0.005')
parser.add_argument('--gamma', type=float, default=1, help='weight decay. default=5')
parser.add_argument('--eps', type=float, default=1e-07, help='epsilon. default=eps=1e-07')
parser.add_argument('--gpu_id', type=int, default=0, help='GPU ID')
parser.add_argument('--outf', default='checkpoints/finetune', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
opt = parser.parse_args()
print(opt)
if __name__ == "__main__":
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.gpu_id >= 0:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
text_writer = open(os.path.join(opt.outf, 'finetune.csv'), 'w')
encoder = Encoder(3)
decoder = Decoder(3)
act_loss_fn = ActivationLoss()
rect_loss_fn = ReconstructionLoss()
seg_loss_fn = SegmentationLoss()
optimizer_encoder = Adam(encoder.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay, eps=opt.eps)
optimizer_decoder = Adam(decoder.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay, eps=opt.eps)
encoder.load_state_dict(torch.load(os.path.join(opt.outf,'encoder_0.pt')))
encoder.train(mode=True)
decoder.load_state_dict(torch.load(os.path.join(opt.outf,'decoder_0.pt')))
decoder.train(mode=True)
optimizer_encoder.load_state_dict(torch.load(os.path.join(opt.outf,'optim_encoder_0.pt')))
optimizer_decoder.load_state_dict(torch.load(os.path.join(opt.outf,'optim_decoder_0.pt')))
if opt.gpu_id >= 0:
for state in optimizer_encoder.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(opt.gpu_id)
for state in optimizer_decoder.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(opt.gpu_id)
if opt.gpu_id >= 0:
encoder.cuda(opt.gpu_id)
decoder.cuda(opt.gpu_id)
act_loss_fn.cuda(opt.gpu_id)
seg_loss_fn.cuda(opt.gpu_id)
rect_loss_fn.cuda(opt.gpu_id)
class Normalize_3D(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Tensor: Normalized image.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns: """
for t, m, s in zip(tensor, self.mean, self.std):
t.sub_(m).div_(s)
return tensor
class UnNormalize_3D(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Tensor: Normalized image.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns: """
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
transform_tns = transforms.Compose([
transforms.ToTensor(),
])
transform_pil = transforms.Compose([
transforms.ToPILImage(),
])
transform_norm = Normalize_3D((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
transform_unnorm = UnNormalize_3D((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
dataset_train = dset.ImageFolder(root=os.path.join(opt.dataset, opt.train_set), transform=transform_tns)
assert dataset_train
dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
dataset_val = dset.ImageFolder(root=os.path.join(opt.dataset, opt.val_set), transform=transform_tns)
assert dataset_val
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers))
for epoch in range(1, opt.niter+1):
count = 0
loss_act_train = 0.0
loss_seg_train = 0.0
loss_rect_train = 0.0
loss_act_test = 0.0
loss_seg_test = 0.0
loss_rect_test = 0.0
tol_label = np.array([], dtype=np.float)
tol_pred = np.array([], dtype=np.float)
for fft_data, labels_data in tqdm(dataloader_train):
optimizer_encoder.zero_grad()
optimizer_decoder.zero_grad()
fft_label = labels_data.numpy().astype(np.float)
labels_data = labels_data.float()
rgb = transform_norm(fft_data[:,:,:,0:256])
mask = fft_data[:,0,:,256:512]
mask[mask >= 0.5] = 1.0
mask[mask < 0.5] = 0.0
mask = mask.long()
if opt.gpu_id >= 0:
rgb = rgb.cuda(opt.gpu_id)
mask = mask.cuda(opt.gpu_id)
labels_data = labels_data.cuda(opt.gpu_id)
latent = encoder(rgb).reshape(-1, 2, 64, 16, 16)
zero_abs = torch.abs(latent[:,0]).view(latent.shape[0], -1)
zero = zero_abs.mean(dim=1)
one_abs = torch.abs(latent[:,1]).view(latent.shape[0], -1)
one = one_abs.mean(dim=1)
loss_act = act_loss_fn(zero, one, labels_data)
loss_act_data = loss_act.item()
y = torch.eye(2)
if opt.gpu_id >= 0:
y = y.cuda(opt.gpu_id)
y = y.index_select(dim=0, index=labels_data.data.long())
latent = (latent * y[:,:,None, None, None]).reshape(-1, 128, 16, 16)
seg, rect = decoder(latent)
loss_seg = seg_loss_fn(seg, mask)
loss_seg = loss_seg * opt.gamma
loss_seg_data = loss_seg.item()
loss_rect = rect_loss_fn(rect, rgb)
loss_rect = loss_rect * opt.gamma
loss_rect_data = loss_rect.item()
loss_total = loss_act + loss_seg + loss_rect
loss_total.backward()
optimizer_decoder.step()
optimizer_encoder.step()
output_pred = np.zeros((fft_data.shape[0]), dtype=np.float)
for i in range(fft_data.shape[0]):
if one[i] >= zero[i]:
output_pred[i] = 1.0
else:
output_pred[i] = 0.0
tol_label = np.concatenate((tol_label, fft_label))
tol_pred = np.concatenate((tol_pred, output_pred))
loss_act_train += loss_act_data
loss_seg_train += loss_seg_data
loss_rect_train += loss_rect_data
count += 1
acc_train = metrics.accuracy_score(tol_label, tol_pred)
loss_act_train /= count
loss_seg_train /= count
loss_rect_train /= count
########################################################################
# do checkpointing & validation
torch.save(encoder.state_dict(), os.path.join(opt.outf, 'encoder_%d.pt' % epoch))
torch.save(optimizer_encoder.state_dict(), os.path.join(opt.outf, 'optim_encoder_%d.pt' % epoch))
torch.save(decoder.state_dict(), os.path.join(opt.outf, 'decoder_%d.pt' % epoch))
torch.save(optimizer_decoder.state_dict(), os.path.join(opt.outf, 'optim_decoder_%d.pt' % epoch))
encoder.eval()
decoder.eval()
tol_label = np.array([], dtype=np.float)
tol_pred = np.array([], dtype=np.float)
tol_pred_prob = np.array([], dtype=np.float)
count = 0
for fft_data, labels_data in tqdm(dataloader_val):
fft_label = labels_data.numpy().astype(np.float)
labels_data = labels_data.float()
rgb = transform_norm(fft_data[:,:,:,0:256])
mask = fft_data[:,0,:,256:512]
mask[mask >= 0.5] = 1.0
mask[mask < 0.5] = 0.0
mask = mask.long()
if opt.gpu_id >= 0:
rgb = rgb.cuda(opt.gpu_id)
mask = mask.cuda(opt.gpu_id)
labels_data = labels_data.cuda(opt.gpu_id)
latent = encoder(rgb).reshape(-1, 2, 64, 16, 16)
zero_abs = torch.abs(latent[:,0]).view(latent.shape[0], -1)
zero = zero_abs.mean(dim=1)
one_abs = torch.abs(latent[:,1]).view(latent.shape[0], -1)
one = one_abs.mean(dim=1)
loss_act = act_loss_fn(zero, one, labels_data)
loss_act_data = loss_act.item()
y = torch.eye(2)
if opt.gpu_id >= 0:
y = y.cuda(opt.gpu_id)
y = y.index_select(dim=0, index=labels_data.data.long())
latent = (latent * y[:,:,None, None, None]).reshape(-1, 128, 16, 16)
seg, rect = decoder(latent)
loss_seg = seg_loss_fn(seg, mask)
loss_seg = loss_seg * opt.gamma
loss_seg_data = loss_seg.item()
loss_rect = rect_loss_fn(rect, rgb)
loss_rect = loss_rect * opt.gamma
loss_rect_data = loss_rect.item()
output_pred = np.zeros((fft_data.shape[0]), dtype=np.float)
for i in range(fft_data.shape[0]):
if one[i] >= zero[i]:
output_pred[i] = 1.0
else:
output_pred[i] = 0.0
tol_label = np.concatenate((tol_label, fft_label))
tol_pred = np.concatenate((tol_pred, output_pred))
pred_prob = torch.softmax(torch.cat((zero.reshape(zero.shape[0],1), one.reshape(one.shape[0],1)), dim=1), dim=1)
tol_pred_prob = np.concatenate((tol_pred_prob, pred_prob[:,1].data.cpu().numpy()))
loss_act_test += loss_act_data
loss_seg_test += loss_seg_data
loss_rect_test += loss_rect_data
count += 1
acc_test = metrics.accuracy_score(tol_label, tol_pred)
loss_act_test /= count
loss_seg_test /= count
loss_rect_test /= count
fpr, tpr, thresholds = roc_curve(tol_label, tol_pred_prob, pos_label=1)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
print('[Epoch %d] Train: act_loss: %.4f seg_loss: %.4f rect_loss: %.4f acc: %.2f | Test: act_loss: %.4f seg_loss: %.4f rect_loss: %.4f acc: %.2f eer: %.2f'
% (epoch, loss_act_train, loss_seg_train, loss_rect_train, acc_train*100, loss_act_test, loss_seg_test, loss_rect_test, acc_test*100, eer*100))
text_writer.write('%d,%.4f,%.4f,%.4f,%.2f,%.4f,%.4f,%.4f,%.2f,%.2f\n'
% (epoch, loss_act_train, loss_seg_train, loss_rect_train, acc_train*100, loss_act_test, loss_seg_test, loss_rect_test, acc_test*100, eer*100))
text_writer.flush()
########################################################################
real_img = transform_tns(Image.open(os.path.join('input', 'real.jpg'))).unsqueeze(0)[:,:,:,0:256]
real_mask = transform_tns(Image.open(os.path.join('input', 'real.jpg'))).unsqueeze(0)[:,:,:,256:512]
fake_img = transform_tns(Image.open(os.path.join('input', 'fake.jpg'))).unsqueeze(0)[:,:,:,0:256]
fake_mask = transform_tns(Image.open(os.path.join('input', 'fake.jpg'))).unsqueeze(0)[:,:,:,256:512]
rgb = torch.cat((real_img, fake_img), dim=0)
rgb = transform_norm(rgb)
real_mask[real_mask >= 0.5] = 1.0
real_mask[real_mask < 0.5] = 0.0
real_mask = real_mask.long()
fake_mask[fake_mask >= 0.5] = 1.0
fake_mask[fake_mask < 0.5] = 0.0
fake_mask = fake_mask.long()
# real = 1, fake = 0
labels_data = torch.FloatTensor([1,0])
if opt.gpu_id >= 0:
rgb = rgb.cuda(opt.gpu_id)
labels_data = labels_data.cuda(opt.gpu_id)
latent = encoder(rgb).reshape(-1, 2, 64, 16, 16)
zero_abs = torch.abs(latent[:,0]).view(latent.shape[0], -1)
zero = zero_abs.mean(dim=1)
one_abs = torch.abs(latent[:,1]).view(latent.shape[0], -1)
one = one_abs.mean(dim=1)
y = torch.eye(2)
if opt.gpu_id >= 0:
y = y.cuda(opt.gpu_id)
y = y.index_select(dim=0, index=labels_data.data.long())
latent = (latent * y[:,:,None, None, None]).reshape(-1, 128, 16, 16)
seg, rect = decoder(latent)
seg = seg[:,1,:,:].detach().cpu()
seg[seg >= 0.5] = 1.0
seg[seg < 0.5] = 0.0
rect = transform_unnorm(rect).detach().cpu()
real_seg = transform_pil(seg[0])
fake_seg = transform_pil(seg[1])
real_img = transform_pil(rect[0])
fake_img = transform_pil(rect[1])
real_seg.save(os.path.join(opt.outf, 'image', 'seg_real_' + str(epoch).zfill(3) + '.jpg'))
fake_seg.save(os.path.join(opt.outf, 'image', 'seg_fake_' + str(epoch).zfill(3) + '.jpg'))
real_img.save(os.path.join(opt.outf, 'image', 'real_' + str(epoch).zfill(3) + '.jpg'))
fake_img.save(os.path.join(opt.outf, 'image', 'fake_' + str(epoch).zfill(3) + '.jpg'))
encoder.train(mode=True)
decoder.train(mode=True)
text_writer.close() | [
"torch.eye",
"argparse.ArgumentParser",
"sklearn.metrics.accuracy_score",
"torch.cat",
"model.ae.Encoder",
"scipy.interpolate.interp1d",
"os.path.join",
"random.randint",
"torch.FloatTensor",
"torchvision.transforms.ToPILImage",
"model.ae.ActivationLoss",
"random.seed",
"tqdm.tqdm",
"torch... | [((884, 909), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (907, 909), False, 'import argparse\n'), ((2494, 2521), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2505, 2521), False, 'import random\n'), ((2527, 2560), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2544, 2560), False, 'import torch\n'), ((2760, 2770), 'model.ae.Encoder', 'Encoder', (['(3)'], {}), '(3)\n', (2767, 2770), False, 'from model.ae import Encoder\n'), ((2786, 2796), 'model.ae.Decoder', 'Decoder', (['(3)'], {}), '(3)\n', (2793, 2796), False, 'from model.ae import Decoder\n'), ((2816, 2832), 'model.ae.ActivationLoss', 'ActivationLoss', ([], {}), '()\n', (2830, 2832), False, 'from model.ae import ActivationLoss\n'), ((2853, 2873), 'model.ae.ReconstructionLoss', 'ReconstructionLoss', ([], {}), '()\n', (2871, 2873), False, 'from model.ae import ReconstructionLoss\n'), ((2893, 2911), 'model.ae.SegmentationLoss', 'SegmentationLoss', ([], {}), '()\n', (2909, 2911), False, 'from model.ae import SegmentationLoss\n'), ((2414, 2438), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2428, 2438), False, 'import random\n'), ((2595, 2637), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2621, 2637), False, 'import torch\n'), ((2696, 2734), 'os.path.join', 'os.path.join', (['opt.outf', '"""finetune.csv"""'], {}), "(opt.outf, 'finetune.csv')\n", (2708, 2734), False, 'import os\n'), ((6370, 6398), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (6378, 6398), True, 'import numpy as np\n'), ((6419, 6447), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (6427, 6447), True, 'import numpy as np\n'), ((6488, 6510), 'tqdm.tqdm', 'tqdm', (['dataloader_train'], {}), '(dataloader_train)\n', (6492, 6510), False, 'from tqdm import tqdm\n'), ((8852, 8895), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['tol_label', 'tol_pred'], {}), '(tol_label, tol_pred)\n', (8874, 8895), False, 'from sklearn import metrics\n'), ((9596, 9624), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (9604, 9624), True, 'import numpy as np\n'), ((9645, 9673), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (9653, 9673), True, 'import numpy as np\n'), ((9699, 9727), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (9707, 9727), True, 'import numpy as np\n'), ((9789, 9809), 'tqdm.tqdm', 'tqdm', (['dataloader_val'], {}), '(dataloader_val)\n', (9793, 9809), False, 'from tqdm import tqdm\n'), ((12122, 12165), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['tol_label', 'tol_pred'], {}), '(tol_label, tol_pred)\n', (12144, 12165), False, 'from sklearn import metrics\n'), ((12297, 12345), 'sklearn.metrics.roc_curve', 'roc_curve', (['tol_label', 'tol_pred_prob'], {'pos_label': '(1)'}), '(tol_label, tol_pred_prob, pos_label=1)\n', (12306, 12345), False, 'from sklearn.metrics import roc_curve\n'), ((13548, 13586), 'torch.cat', 'torch.cat', (['(real_img, fake_img)'], {'dim': '(0)'}), '((real_img, fake_img), dim=0)\n', (13557, 13586), False, 'import torch\n'), ((13927, 13952), 'torch.FloatTensor', 'torch.FloatTensor', (['[1, 0]'], {}), '([1, 0])\n', (13944, 13952), False, 'import torch\n'), ((14367, 14379), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (14376, 14379), False, 'import torch\n'), ((3228, 3266), 'os.path.join', 'os.path.join', (['opt.outf', '"""encoder_0.pt"""'], {}), "(opt.outf, 'encoder_0.pt')\n", (3240, 3266), False, 'import os\n'), ((3340, 3378), 'os.path.join', 'os.path.join', (['opt.outf', '"""decoder_0.pt"""'], {}), "(opt.outf, 'decoder_0.pt')\n", (3352, 3378), False, 'import os\n'), ((3462, 3506), 'os.path.join', 'os.path.join', (['opt.outf', '"""optim_encoder_0.pt"""'], {}), "(opt.outf, 'optim_encoder_0.pt')\n", (3474, 3506), False, 'import os\n'), ((3558, 3602), 'os.path.join', 'os.path.join', (['opt.outf', '"""optim_decoder_0.pt"""'], {}), "(opt.outf, 'optim_decoder_0.pt')\n", (3570, 3602), False, 'import os\n'), ((5272, 5293), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5291, 5293), True, 'import torchvision.transforms as transforms\n'), ((5356, 5379), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (5377, 5379), True, 'import torchvision.transforms as transforms\n'), ((5608, 5648), 'os.path.join', 'os.path.join', (['opt.dataset', 'opt.train_set'], {}), '(opt.dataset, opt.train_set)\n', (5620, 5648), False, 'import os\n'), ((5881, 5919), 'os.path.join', 'os.path.join', (['opt.dataset', 'opt.val_set'], {}), '(opt.dataset, opt.val_set)\n', (5893, 5919), False, 'import os\n'), ((7523, 7535), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (7532, 7535), False, 'import torch\n'), ((8294, 8337), 'numpy.zeros', 'np.zeros', (['fft_data.shape[0]'], {'dtype': 'np.float'}), '(fft_data.shape[0], dtype=np.float)\n', (8302, 8337), True, 'import numpy as np\n'), ((8563, 8601), 'numpy.concatenate', 'np.concatenate', (['(tol_label, fft_label)'], {}), '((tol_label, fft_label))\n', (8577, 8601), True, 'import numpy as np\n'), ((8626, 8665), 'numpy.concatenate', 'np.concatenate', (['(tol_pred, output_pred)'], {}), '((tol_pred, output_pred))\n', (8640, 8665), True, 'import numpy as np\n'), ((9167, 9214), 'os.path.join', 'os.path.join', (['opt.outf', "('encoder_%d.pt' % epoch)"], {}), "(opt.outf, 'encoder_%d.pt' % epoch)\n", (9179, 9214), False, 'import os\n'), ((9268, 9321), 'os.path.join', 'os.path.join', (['opt.outf', "('optim_encoder_%d.pt' % epoch)"], {}), "(opt.outf, 'optim_encoder_%d.pt' % epoch)\n", (9280, 9321), False, 'import os\n'), ((9367, 9414), 'os.path.join', 'os.path.join', (['opt.outf', "('decoder_%d.pt' % epoch)"], {}), "(opt.outf, 'decoder_%d.pt' % epoch)\n", (9379, 9414), False, 'import os\n'), ((9468, 9521), 'os.path.join', 'os.path.join', (['opt.outf', "('optim_decoder_%d.pt' % epoch)"], {}), "(opt.outf, 'optim_decoder_%d.pt' % epoch)\n", (9480, 9521), False, 'import os\n'), ((10734, 10746), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (10743, 10746), False, 'import torch\n'), ((11332, 11375), 'numpy.zeros', 'np.zeros', (['fft_data.shape[0]'], {'dtype': 'np.float'}), '(fft_data.shape[0], dtype=np.float)\n', (11340, 11375), True, 'import numpy as np\n'), ((11601, 11639), 'numpy.concatenate', 'np.concatenate', (['(tol_label, fft_label)'], {}), '((tol_label, fft_label))\n', (11615, 11639), True, 'import numpy as np\n'), ((11664, 11703), 'numpy.concatenate', 'np.concatenate', (['(tol_pred, output_pred)'], {}), '((tol_pred, output_pred))\n', (11678, 11703), True, 'import numpy as np\n'), ((14161, 14184), 'torch.abs', 'torch.abs', (['latent[:, 0]'], {}), '(latent[:, 0])\n', (14170, 14184), False, 'import torch\n'), ((14268, 14291), 'torch.abs', 'torch.abs', (['latent[:, 1]'], {}), '(latent[:, 1])\n', (14277, 14291), False, 'import torch\n'), ((7194, 7217), 'torch.abs', 'torch.abs', (['latent[:, 0]'], {}), '(latent[:, 0])\n', (7203, 7217), False, 'import torch\n'), ((7309, 7332), 'torch.abs', 'torch.abs', (['latent[:, 1]'], {}), '(latent[:, 1])\n', (7318, 7332), False, 'import torch\n'), ((10405, 10428), 'torch.abs', 'torch.abs', (['latent[:, 0]'], {}), '(latent[:, 0])\n', (10414, 10428), False, 'import torch\n'), ((10520, 10543), 'torch.abs', 'torch.abs', (['latent[:, 1]'], {}), '(latent[:, 1])\n', (10529, 10543), False, 'import torch\n'), ((12388, 12406), 'scipy.interpolate.interp1d', 'interp1d', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (12396, 12406), False, 'from scipy.interpolate import interp1d\n'), ((13142, 13175), 'os.path.join', 'os.path.join', (['"""input"""', '"""real.jpg"""'], {}), "('input', 'real.jpg')\n", (13154, 13175), False, 'import os\n'), ((13250, 13283), 'os.path.join', 'os.path.join', (['"""input"""', '"""real.jpg"""'], {}), "('input', 'real.jpg')\n", (13262, 13283), False, 'import os\n'), ((13359, 13392), 'os.path.join', 'os.path.join', (['"""input"""', '"""fake.jpg"""'], {}), "('input', 'fake.jpg')\n", (13371, 13392), False, 'import os\n'), ((13467, 13500), 'os.path.join', 'os.path.join', (['"""input"""', '"""fake.jpg"""'], {}), "('input', 'fake.jpg')\n", (13479, 13500), False, 'import os\n')] |
import numpy as np
from Bio import SeqIO
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
from tensorflow.keras import models, layers
from tensorflow.keras.utils import plot_model
import keras
from matplotlib import pyplot as plt
from keras.models import load_model
from matplotlib import pyplot as plt
# import sklearn
# from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
import dot_product_attention
import positional_embedding
fasta_train = 'H_train.fasta'
csv_train = 'H_train.csv'
fasta_test = 'H_test.fasta'
csv_test = 'H_test.csv'
tf.keras.backend.clear_session
# tf.debugging.set_log_device_placement(True)
## Add label to the train dataset and generate X_train=record.seq and Y_train=label
size_train = 0
train_lst = []
train_samples = []
train_labels = []
letters = 'XACGT'
emb_dict = {letter: number for number, letter in
enumerate(letters)} # number+1 for emb bec
# train
with open(fasta_train)as fn:
for record in SeqIO.parse(fn, 'fasta'):
label_train = 0 if 'CDS' in record.id else 1
# print(label_train)
train_sample = []
size_train = size_train + 1
lst = [record.id, str(record.seq), len(record), label_train]
# print(lst)
for index, letter, in enumerate(record.seq):
train_sample.append(emb_dict[letter])
train_lst.append(lst)
train_labels.append(label_train)
train_samples.append(train_sample)
# padding train
padded_inputs = tf.keras.preprocessing.sequence.pad_sequences(
train_samples, padding="post"
)
padded_inputs, train_labels = np.array(padded_inputs), np.array(train_labels)
# test
size_test = 0
test_lst = []
test_samples = []
test_labels = []
with open(fasta_test)as fn:
for record in SeqIO.parse(fn, 'fasta'):
label_test = 0 if 'CDS' in record.id else 1
# print(label_test)
test_sample = []
size_test = size_test + 1
lst = [record.id, str(record.seq), label_test]
for index, letter, in enumerate(record.seq):
test_sample.append(emb_dict[letter])
test_labels.append(label_test)
test_samples.append(test_sample)
# padding
padded_tests = tf.keras.preprocessing.sequence.pad_sequences(list(test_samples), maxlen=3000)
padded_tests, test_labels = np.array(padded_tests), np.array(test_labels)
### what are the max lengths of the train dataset?
# max_train = df_train['length'].max()
max_train = 3000
d_model = 2
output_dim = 2
input_layer1 = layers.Input(shape=(max_train,))
embedding_layer = tf.keras.layers.Embedding(input_dim=5, output_dim=2, input_length=3000, mask_zero=True)(input_layer1)
positional_embedding = layers.Lambda(positional_embedding.positional_encoding)([3000, 2])
add_embeddings = layers.Add()([embedding_layer, positional_embedding])
#flatt_output = layers.Flatten()(embedding_layer)
q0 = layers.Dense(d_model, use_bias=False, name='query_layer0')(embedding_layer)
k0 = layers.Dense(d_model, use_bias=False, name='key_layer0')(embedding_layer)
v0 = layers.Dense(d_model, use_bias=False, name='values_layer0')(embedding_layer)
attention_filter0 = tf.matmul(q0, k0, transpose_b=True)
scale = np.math.sqrt(d_model)
Scaling = attention_filter0 / scale
attention_weights0 = tf.nn.softmax(Scaling, axis=-1)
print('Attention weights are:')
print(attention_weights0.shape)
output0 = tf.matmul(attention_weights0, v0)
print('Output is:')
print(output0.shape)
q1 = layers.Dense(d_model, use_bias=False, name='query_layer1')(embedding_layer)
k1 = layers.Dense(d_model, use_bias=False, name='key_layer1')(embedding_layer)
v1 = layers.Dense(d_model, use_bias=False, name='values_layer1')(embedding_layer)
attention_filter1 = tf.matmul(q1, k1, transpose_b=True)
scale = np.math.sqrt(d_model)
Scaling = attention_filter1 / scale
attention_weights1 = tf.nn.softmax(Scaling, axis=-1)
print('Attention weights are:')
print(attention_weights1.shape)
output1 = tf.matmul(attention_weights1, v1)
concated_heads = layers.Concatenate()([output0, output1])
multi_head_output = layers.Dense(output_dim)(concated_heads)
#temp_out0, temp_attn0 = dot_product_attention.scaled_dot_product_attention(q0, k0, v0, None)
#q1 = layers.Dense(output_dim, use_bias=False, name='query_layer1')(embedding_layer)
#k1 = layers.Dense(output_dim, use_bias=False, name='key_layer1')(embedding_layer)
#v1 = layers.Dense(output_dim, use_bias=False, name='values_layer1')(embedding_layer)
#temp_out1, temp_attn1 = dot_product_attention.scaled_dot_product_attention(q1, k1, v1, None)
#q2 = layers.Dense(output_dim, use_bias=False, name='query_layer2')(embedding_layer)
#k2 = layers.Dense(output_dim, use_bias=False, name='key_layer2')(embedding_layer)
#v2 = layers.Dense(output_dim, use_bias=False, name='values_layer2')(embedding_layer)
#temp_out2, temp_attn2 = dot_product_attention.scaled_dot_product_attention(q2, k2, v2, None)
#concated_heads = layers.Concatenate()([temp_out0, temp_out1])
#attention_output = layers.Dense(output_dim)(temp_out0)
# feed_forward
#hidden = layers.Dense(3, activation='relu')(attention_output)
#hidden = layers.Dense(2, activation='relu')(hidden)
# cf = layers.Dense(1, activation='sigmoid')(temp_out)
cf = layers.Dense(1, activation='sigmoid')(multi_head_output)
adam = tf.keras.optimizers.Adam(
learning_rate=0.009,
beta_1=0.6,
beta_2=0.6,
epsilon=1e-07,
amsgrad=False)
#sgd = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.8, nesterov=True)
classifier = models.Model(input_layer1, cf)
classifier.compile(optimizer=adam,
loss='binary_crossentropy',
metrics=['accuracy'])
classifier.summary()
#plot_model(classifier, to_file="model.png")
#callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)
#tf.keras.callbacks.TensorBoard(
#log_dir='logs', histogram_freq=0, write_graph=True,
#write_images=False, write_steps_per_second=False, update_freq='epoch',
#profile_batch=2, embeddings_freq=0, embeddings_metadata=None)
#tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
history = classifier.fit(padded_inputs, train_labels, batch_size=32, epochs=5, validation_data=(padded_tests, test_labels))
'''
pred = classifier.predict([padded_tests, test_labels])
pred = np.argmax(pred, axis=1)
from sklearn.metrics import mean_squared_error, accuracy_score
print(f'error: {mean_squared_error(test_labels, pred)}')
print(f'accuracy: {accuracy_score(test_labels, pred)}')
#history.history
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = classifier.evaluate(padded_tests, test_labels)
print("test loss, test acc:", results)
'''
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
classifier.save('./epoch_10_attention.h5')
# classifier.save_weights('./epoch_10_attention.h5')
# my_model = load_model('./epoch_10_attention.h5')
# my_model.get_weights()
| [
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.Concatenate",
"numpy.math.sqrt",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"tensorf... | [((1520, 1596), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'tf.keras.preprocessing.sequence.pad_sequences', (['train_samples'], {'padding': '"""post"""'}), "(train_samples, padding='post')\n", (1565, 1596), True, 'import tensorflow as tf\n'), ((2532, 2564), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(max_train,)'}), '(shape=(max_train,))\n', (2544, 2564), False, 'from tensorflow.keras import models, layers\n'), ((3160, 3195), 'tensorflow.matmul', 'tf.matmul', (['q0', 'k0'], {'transpose_b': '(True)'}), '(q0, k0, transpose_b=True)\n', (3169, 3195), True, 'import tensorflow as tf\n'), ((3204, 3225), 'numpy.math.sqrt', 'np.math.sqrt', (['d_model'], {}), '(d_model)\n', (3216, 3225), True, 'import numpy as np\n'), ((3283, 3314), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['Scaling'], {'axis': '(-1)'}), '(Scaling, axis=-1)\n', (3296, 3314), True, 'import tensorflow as tf\n'), ((3389, 3422), 'tensorflow.matmul', 'tf.matmul', (['attention_weights0', 'v0'], {}), '(attention_weights0, v0)\n', (3398, 3422), True, 'import tensorflow as tf\n'), ((3729, 3764), 'tensorflow.matmul', 'tf.matmul', (['q1', 'k1'], {'transpose_b': '(True)'}), '(q1, k1, transpose_b=True)\n', (3738, 3764), True, 'import tensorflow as tf\n'), ((3773, 3794), 'numpy.math.sqrt', 'np.math.sqrt', (['d_model'], {}), '(d_model)\n', (3785, 3794), True, 'import numpy as np\n'), ((3852, 3883), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['Scaling'], {'axis': '(-1)'}), '(Scaling, axis=-1)\n', (3865, 3883), True, 'import tensorflow as tf\n'), ((3958, 3991), 'tensorflow.matmul', 'tf.matmul', (['attention_weights1', 'v1'], {}), '(attention_weights1, v1)\n', (3967, 3991), True, 'import tensorflow as tf\n'), ((5285, 5388), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.009)', 'beta_1': '(0.6)', 'beta_2': '(0.6)', 'epsilon': '(1e-07)', 'amsgrad': '(False)'}), '(learning_rate=0.009, beta_1=0.6, beta_2=0.6,\n epsilon=1e-07, amsgrad=False)\n', (5309, 5388), True, 'import tensorflow as tf\n'), ((5499, 5529), 'tensorflow.keras.models.Model', 'models.Model', (['input_layer1', 'cf'], {}), '(input_layer1, cf)\n', (5511, 5529), False, 'from tensorflow.keras import models, layers\n'), ((6696, 6733), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (6704, 6733), True, 'from matplotlib import pyplot as plt\n'), ((6734, 6775), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (6742, 6775), True, 'from matplotlib import pyplot as plt\n'), ((6776, 6803), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (6785, 6803), True, 'from matplotlib import pyplot as plt\n'), ((6804, 6826), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (6814, 6826), True, 'from matplotlib import pyplot as plt\n'), ((6827, 6846), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6837, 6846), True, 'from matplotlib import pyplot as plt\n'), ((6847, 6893), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper left"""'}), "(['train', 'val'], loc='upper left')\n", (6857, 6893), True, 'from matplotlib import pyplot as plt\n'), ((6894, 6904), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6902, 6904), True, 'from matplotlib import pyplot as plt\n'), ((6906, 6939), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (6914, 6939), True, 'from matplotlib import pyplot as plt\n'), ((6940, 6977), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (6948, 6977), True, 'from matplotlib import pyplot as plt\n'), ((6978, 7001), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (6987, 7001), True, 'from matplotlib import pyplot as plt\n'), ((7002, 7020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (7012, 7020), True, 'from matplotlib import pyplot as plt\n'), ((7021, 7040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (7031, 7040), True, 'from matplotlib import pyplot as plt\n'), ((7041, 7087), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper left"""'}), "(['train', 'val'], loc='upper left')\n", (7051, 7087), True, 'from matplotlib import pyplot as plt\n'), ((7088, 7098), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7096, 7098), True, 'from matplotlib import pyplot as plt\n'), ((1009, 1033), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fn', '"""fasta"""'], {}), "(fn, 'fasta')\n", (1020, 1033), False, 'from Bio import SeqIO\n'), ((1633, 1656), 'numpy.array', 'np.array', (['padded_inputs'], {}), '(padded_inputs)\n', (1641, 1656), True, 'import numpy as np\n'), ((1658, 1680), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (1666, 1680), True, 'import numpy as np\n'), ((1798, 1822), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fn', '"""fasta"""'], {}), "(fn, 'fasta')\n", (1809, 1822), False, 'from Bio import SeqIO\n'), ((2335, 2357), 'numpy.array', 'np.array', (['padded_tests'], {}), '(padded_tests)\n', (2343, 2357), True, 'import numpy as np\n'), ((2359, 2380), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (2367, 2380), True, 'import numpy as np\n'), ((2583, 2674), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', ([], {'input_dim': '(5)', 'output_dim': '(2)', 'input_length': '(3000)', 'mask_zero': '(True)'}), '(input_dim=5, output_dim=2, input_length=3000,\n mask_zero=True)\n', (2608, 2674), True, 'import tensorflow as tf\n'), ((2708, 2763), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['positional_embedding.positional_encoding'], {}), '(positional_embedding.positional_encoding)\n', (2721, 2763), False, 'from tensorflow.keras import models, layers\n'), ((2792, 2804), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (2802, 2804), False, 'from tensorflow.keras import models, layers\n'), ((2902, 2960), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['d_model'], {'use_bias': '(False)', 'name': '"""query_layer0"""'}), "(d_model, use_bias=False, name='query_layer0')\n", (2914, 2960), False, 'from tensorflow.keras import models, layers\n'), ((2983, 3039), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['d_model'], {'use_bias': '(False)', 'name': '"""key_layer0"""'}), "(d_model, use_bias=False, name='key_layer0')\n", (2995, 3039), False, 'from tensorflow.keras import models, layers\n'), ((3062, 3121), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['d_model'], {'use_bias': '(False)', 'name': '"""values_layer0"""'}), "(d_model, use_bias=False, name='values_layer0')\n", (3074, 3121), False, 'from tensorflow.keras import models, layers\n'), ((3471, 3529), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['d_model'], {'use_bias': '(False)', 'name': '"""query_layer1"""'}), "(d_model, use_bias=False, name='query_layer1')\n", (3483, 3529), False, 'from tensorflow.keras import models, layers\n'), ((3552, 3608), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['d_model'], {'use_bias': '(False)', 'name': '"""key_layer1"""'}), "(d_model, use_bias=False, name='key_layer1')\n", (3564, 3608), False, 'from tensorflow.keras import models, layers\n'), ((3631, 3690), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['d_model'], {'use_bias': '(False)', 'name': '"""values_layer1"""'}), "(d_model, use_bias=False, name='values_layer1')\n", (3643, 3690), False, 'from tensorflow.keras import models, layers\n'), ((4010, 4030), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (4028, 4030), False, 'from tensorflow.keras import models, layers\n'), ((4071, 4095), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['output_dim'], {}), '(output_dim)\n', (4083, 4095), False, 'from tensorflow.keras import models, layers\n'), ((5220, 5257), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5232, 5257), False, 'from tensorflow.keras import models, layers\n')] |
import random
from collections import defaultdict
import os
import glob
import cv2
import numpy as np
from keras.datasets import mnist
from keras import backend as K
from keras.models import Model
import scikitplot as skplt
import matplotlib.pyplot as plt
from PIL import Image
import keras
from sklearn.metrics import precision_score, recall_score, f1_score
import math
# util function to convert a tensor into a valid image
def deprocess_image(x):
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x.reshape(x.shape[1], x.shape[2]) # original shape (img_rows, img_cols,1)
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(6, 6)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model2, model3):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
init_dict(model2, model_layer_dict2)
init_dict(model3, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def neuron_covered(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def update_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
for num_neuron in range(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
# if predictions2 == predictions3 == target and predictions1 != target:
if not predictions1 == predictions2 == predictions3:
return True
return False
def cumulative_neuron_coverage(model_layer_dict1, model_layer_dict2, model_layer_dict3):
for (layer_name, index), v in model_layer_dict1.items():
model_layer_dict3[(layer_name, index)] = v or model_layer_dict2[(layer_name, index)]
def neurons_covered_uncommon(model_layer_dict1, model_layer_dict2):
result = []
#dict1 are valid tests and dict2 are invalid
for (layer_name, index), v in model_layer_dict1.items():
if (not v) and model_layer_dict2[(layer_name, index)]:
result.append((layer_name, index))
return result
def neuron_not_covered(model_layer_dict1):
result = []
for (layer_name, index), v in model_layer_dict1.items():
if (not v):
result.append((layer_name, index))
return result
def delete_files_from_dir(dirPath, ext):
# eg input = /tmp/*.txt
fileFormat = dirPath + '*.' + ext
files = glob.glob(fileFormat)
for f in files:
try:
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
# This api is for sampling from latent space of VAE
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
# Logic for calculating reconstruction probability
def reconstruction_probability(decoder, z_mean, z_log_var, X):
"""
:param decoder: decoder model
:param z_mean: encoder predicted mean value
:param z_log_var: encoder predicted sigma square value
:param X: input data
:return: reconstruction probability of input
calculated over L samples from z_mean and z_log_var distribution
"""
reconstructed_prob = np.zeros((X.shape[0],), dtype='float32')
L = 1000
for l in range(L):
sampled_zs = sampling([z_mean, z_log_var])
mu_hat, log_sigma_hat = decoder.predict(sampled_zs, steps=1)
log_sigma_hat = np.float64(log_sigma_hat)
sigma_hat = np.exp(log_sigma_hat) + 0.00001
loss_a = np.log(2 * np.pi * sigma_hat)
loss_m = np.square(mu_hat - X) / sigma_hat
reconstructed_prob += -0.5 * np.sum(loss_a + loss_m, axis=1)
reconstructed_prob /= L
return reconstructed_prob
# Calculates and returns probability density of test input
def calculate_density(x_target_orig, vae):
x_target_orig = np.clip(x_target_orig, 0, 1)
x_target_orig = np.reshape(x_target_orig, (-1, 28*28))
x_target = np.reshape(x_target_orig, (-1, 28, 28, 1))
z_mean, z_log_var, _ = vae.get_layer('encoder').predict(x_target,
batch_size=128)
reconstructed_prob_x_target = reconstruction_probability(vae.get_layer('decoder'), z_mean, z_log_var, x_target_orig)
return reconstructed_prob_x_target
# checks whether a test input is valid or invalid
#Returns true if invalid
def isInvalid(gen_img, vae, vae_threshold):
gen_img_density = calculate_density(gen_img, vae)
if gen_img_density < vae_threshold or math.isnan(gen_img_density):
return True
else:
return False
| [
"os.remove",
"numpy.sum",
"numpy.clip",
"collections.defaultdict",
"numpy.mean",
"keras.backend.shape",
"numpy.exp",
"glob.glob",
"numpy.float64",
"numpy.zeros_like",
"random.randint",
"numpy.reshape",
"math.isnan",
"numpy.ones_like",
"keras.backend.exp",
"numpy.square",
"keras.backe... | [((801, 825), 'numpy.zeros_like', 'np.zeros_like', (['gradients'], {}), '(gradients)\n', (814, 825), True, 'import numpy as np\n'), ((1175, 1198), 'numpy.ones_like', 'np.ones_like', (['gradients'], {}), '(gradients)\n', (1187, 1198), True, 'import numpy as np\n'), ((1215, 1233), 'numpy.mean', 'np.mean', (['gradients'], {}), '(gradients)\n', (1222, 1233), True, 'import numpy as np\n'), ((1475, 1499), 'numpy.zeros_like', 'np.zeros_like', (['gradients'], {}), '(gradients)\n', (1488, 1499), True, 'import numpy as np\n'), ((1889, 1906), 'collections.defaultdict', 'defaultdict', (['bool'], {}), '(bool)\n', (1900, 1906), False, 'from collections import defaultdict\n'), ((1931, 1948), 'collections.defaultdict', 'defaultdict', (['bool'], {}), '(bool)\n', (1942, 1948), False, 'from collections import defaultdict\n'), ((1973, 1990), 'collections.defaultdict', 'defaultdict', (['bool'], {}), '(bool)\n', (1984, 1990), False, 'from collections import defaultdict\n'), ((5741, 5762), 'glob.glob', 'glob.glob', (['fileFormat'], {}), '(fileFormat)\n', (5750, 5762), False, 'import glob\n'), ((6360, 6395), 'keras.backend.random_normal', 'K.random_normal', ([], {'shape': '(batch, dim)'}), '(shape=(batch, dim))\n', (6375, 6395), True, 'from keras import backend as K\n'), ((6898, 6938), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {'dtype': '"""float32"""'}), "((X.shape[0],), dtype='float32')\n", (6906, 6938), True, 'import numpy as np\n'), ((7547, 7575), 'numpy.clip', 'np.clip', (['x_target_orig', '(0)', '(1)'], {}), '(x_target_orig, 0, 1)\n', (7554, 7575), True, 'import numpy as np\n'), ((7596, 7636), 'numpy.reshape', 'np.reshape', (['x_target_orig', '(-1, 28 * 28)'], {}), '(x_target_orig, (-1, 28 * 28))\n', (7606, 7636), True, 'import numpy as np\n'), ((7650, 7692), 'numpy.reshape', 'np.reshape', (['x_target_orig', '(-1, 28, 28, 1)'], {}), '(x_target_orig, (-1, 28, 28, 1))\n', (7660, 7692), True, 'import numpy as np\n'), ((1349, 1402), 'random.randint', 'random.randint', (['(0)', '(gradients.shape[1] - rect_shape[0])'], {}), '(0, gradients.shape[1] - rect_shape[0])\n', (1363, 1402), False, 'import random\n'), ((1404, 1457), 'random.randint', 'random.randint', (['(0)', '(gradients.shape[2] - rect_shape[1])'], {}), '(0, gradients.shape[2] - rect_shape[1])\n', (1418, 1457), False, 'import random\n'), ((1626, 1640), 'numpy.mean', 'np.mean', (['patch'], {}), '(patch)\n', (1633, 1640), True, 'import numpy as np\n'), ((2639, 2665), 'random.choice', 'random.choice', (['not_covered'], {}), '(not_covered)\n', (2652, 2665), False, 'import random\n'), ((4526, 4553), 'numpy.mean', 'np.mean', (['scaled[..., index]'], {}), '(scaled[..., index])\n', (4533, 4553), True, 'import numpy as np\n'), ((6235, 6250), 'keras.backend.shape', 'K.shape', (['z_mean'], {}), '(z_mean)\n', (6242, 6250), True, 'from keras import backend as K\n'), ((6264, 6283), 'keras.backend.int_shape', 'K.int_shape', (['z_mean'], {}), '(z_mean)\n', (6275, 6283), True, 'from keras import backend as K\n'), ((7119, 7144), 'numpy.float64', 'np.float64', (['log_sigma_hat'], {}), '(log_sigma_hat)\n', (7129, 7144), True, 'import numpy as np\n'), ((7215, 7244), 'numpy.log', 'np.log', (['(2 * np.pi * sigma_hat)'], {}), '(2 * np.pi * sigma_hat)\n', (7221, 7244), True, 'import numpy as np\n'), ((8220, 8247), 'math.isnan', 'math.isnan', (['gen_img_density'], {}), '(gen_img_density)\n', (8230, 8247), False, 'import math\n'), ((473, 491), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (480, 491), True, 'import numpy as np\n'), ((1772, 1791), 'numpy.ones_like', 'np.ones_like', (['patch'], {}), '(patch)\n', (1784, 1791), True, 'import numpy as np\n'), ((5808, 5820), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (5817, 5820), False, 'import os\n'), ((6416, 6438), 'keras.backend.exp', 'K.exp', (['(0.5 * z_log_var)'], {}), '(0.5 * z_log_var)\n', (6421, 6438), True, 'from keras import backend as K\n'), ((7165, 7186), 'numpy.exp', 'np.exp', (['log_sigma_hat'], {}), '(log_sigma_hat)\n', (7171, 7186), True, 'import numpy as np\n'), ((7262, 7283), 'numpy.square', 'np.square', (['(mu_hat - X)'], {}), '(mu_hat - X)\n', (7271, 7283), True, 'import numpy as np\n'), ((7333, 7364), 'numpy.sum', 'np.sum', (['(loss_a + loss_m)'], {'axis': '(1)'}), '(loss_a + loss_m, axis=1)\n', (7339, 7364), True, 'import numpy as np\n'), ((704, 715), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (712, 715), True, 'from keras import backend as K\n'), ((3665, 3697), 'numpy.mean', 'np.mean', (['scaled[..., num_neuron]'], {}), '(scaled[..., num_neuron])\n', (3672, 3697), True, 'import numpy as np\n')] |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, List, Tuple, Union
import numpy as np
import pennylane as qml
# import pennylane.numpy as pnp
from numpy.typing import ArrayLike
from pennylearn.templates import Ansatz, Embedding
from pennylearn.utils.scores import accuracy
class VQC:
r"""Variational Quantum Classifier.
For the details of the algorithm check
https://pennylane.ai/qml/demos/tutorial_variational_classifier.html.
Args:
embedding (Embedding): Embedding wrapping a 'non-trainable' template from
PennyLane's template library.
ansatz (Ansatz): Ansatz wrapping a 'trainable' template from PennyLane's
template library.
loss (Callable): The loss function that is to be minimized.
optimizer (qml.optimize.GradientDescentOptimizer): Any optimizer included with
PennyLane, or a custom optimizer that subclasses `GradientDescentOptimizer`.
device (str): Name of device on which VQC will run.
"""
def __init__(
self,
embedding: Embedding,
ansatz: Ansatz,
loss: Callable,
optimizer: qml.optimize.GradientDescentOptimizer,
device: str = "default.qubit",
):
if embedding.num_wires != ansatz.num_wires:
raise ValueError(
"Embedding and ansatz must be applied to the same number of wires."
)
self._embedding = embedding
self._ansatz = ansatz
self._num_wires = self._embedding.num_wires
self._loss = loss
self._optimizer = optimizer
self._device = qml.device(device, wires=self._num_wires)
self._fit_result = None
@property
def embedding(self) -> Embedding:
"""Returns the embedding."""
return self._embedding
@property
def ansatz(self) -> Ansatz:
"""Returns the ansatz."""
return self._ansatz
@property
def num_wires(self) -> int:
"""Returns number of wires in the circuit."""
return self._num_wires
@property
def loss(self) -> Callable:
"""Returns the loss function."""
return self._loss
@loss.setter
def loss(self, loss):
"""Sets the loss."""
self._loss = loss
@property
def optimizer(self) -> qml.optimize.GradientDescentOptimizer:
"""Returns the optimizer."""
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer: qml.optimize.GradientDescentOptimizer):
"""Sets the optimizer."""
self._optimizer = optimizer
@property
def device(self) -> qml.Device:
"""Returns the device."""
return self._device
@device.setter
def device(self, device: str):
"""Sets the device."""
self._device = qml.device(device, wires=self._num_wires)
def _circuit(self, weights: ArrayLike, x: ArrayLike) -> Callable:
"""The VQC circuit.
Args:
weights (ArrayLike): Array of weights for the ansatz.
x (ArrayLike): n-dimensional datapoint.
Returns:
A function that can construct the circuit.
"""
@qml.qnode(self._device)
def circuit_helper():
self._embedding.circuit(x)
self._ansatz.circuit(weights)
return qml.expval(qml.PauliZ(0))
return circuit_helper()
def _forward(self, var: Tuple[ArrayLike, float], x: ArrayLike) -> float:
"""Forward propagation of the VQC circuit.
Args:
var (Tuple[ArrayLike, float]): A tuple of an array, containing the weights
for the circuit, and a float, which is the bias.
x (ArrayLike): n-dimensional datapoint.
Returns:
The value after forward propagation of the datapoint.
"""
weights = var[0]
bias = var[1]
return self._circuit(weights, x) + bias
def fit(
self,
x: ArrayLike,
y: ArrayLike,
epochs: int = 40,
batch_size: int = 5,
seed: Union[int, ArrayLike, Any] = None,
callback: Callable = None,
):
"""Fits the VQC to the training data, by optimizing the ansatz parameters.
Args:
x (ArrayLike): The input data, must have shape (n_samples, n_features).
y (ArrayLike): The targets, must have shape (n_samples,).
epochs (int, optional): Number of training iterations, default value is 40.
batch_size (int, optional): Size of the training batch, default value is 5.
seed (int, ArrayLike[int], SeedSequence, BitGenerator, Generator): Seed for
numpy's random number generator.
callback (Callable, optional): Callback function for status updates. Should
have the signature (int, ArrayLike, float).
"""
def objective(var, x_batch, y_batch):
predictions = [self._forward(var, sample) for sample in x_batch]
return self._loss(predictions, y_batch)
rng = np.random.default_rng(seed=seed)
var = (0.01 * rng.standard_normal(size=self._ansatz.shape), 0.0)
for epoch in range(epochs):
batch_index = rng.integers(0, len(x), (batch_size,))
x_batch = x[batch_index]
y_batch = y[batch_index]
var = self._optimizer.step(lambda v: objective(v, x_batch, y_batch), var)
if callback is not None:
predictions = [np.sign(self._forward(var, sample)) for sample in x]
cost = objective(var, x, y)
callback(epoch, predictions, cost)
self._fit_result = var
def predict(self, x: ArrayLike) -> List:
"""Predict using the trained VQC model.
Args:
x (ArrayLike): The input data, must have shape (n_samples, n_features).
Returns:
The predicted classes.
"""
if self._fit_result is None:
raise ValueError("Model needs to be fitted to some training data")
predictions = [np.sign(self._forward(self._fit_result, sample)) for sample in x]
return np.asarray(predictions)
def score(self, x: ArrayLike, y: ArrayLike) -> float:
"""Returns the accuracy score of the trained model with respect to the input.
Args:
x (ArrayLike): The input data, must have shape (n_samples, n_features).
y (ArrayLike): The target classes, must have shape (n_samples,).
"""
predictions = self.predict(x)
return accuracy(predictions, y)
| [
"pennylearn.utils.scores.accuracy",
"numpy.asarray",
"numpy.random.default_rng",
"pennylane.device",
"pennylane.qnode",
"pennylane.PauliZ"
] | [((2143, 2184), 'pennylane.device', 'qml.device', (['device'], {'wires': 'self._num_wires'}), '(device, wires=self._num_wires)\n', (2153, 2184), True, 'import pennylane as qml\n'), ((3331, 3372), 'pennylane.device', 'qml.device', (['device'], {'wires': 'self._num_wires'}), '(device, wires=self._num_wires)\n', (3341, 3372), True, 'import pennylane as qml\n'), ((3700, 3723), 'pennylane.qnode', 'qml.qnode', (['self._device'], {}), '(self._device)\n', (3709, 3723), True, 'import pennylane as qml\n'), ((5574, 5606), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'seed'}), '(seed=seed)\n', (5595, 5606), True, 'import numpy as np\n'), ((6668, 6691), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (6678, 6691), True, 'import numpy as np\n'), ((7078, 7102), 'pennylearn.utils.scores.accuracy', 'accuracy', (['predictions', 'y'], {}), '(predictions, y)\n', (7086, 7102), False, 'from pennylearn.utils.scores import accuracy\n'), ((3865, 3878), 'pennylane.PauliZ', 'qml.PauliZ', (['(0)'], {}), '(0)\n', (3875, 3878), True, 'import pennylane as qml\n')] |
import pandas as pd
from numpy.random import default_rng
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn import linear_model
from a_utils import *
from sklearn import feature_selection
import statsmodels.api as sm
from sklearn.feature_selection import chi2
from mpmath import *
import matplotlib.pyplot as plt
import matplotlib as mpl
from cycler import cycler
import seaborn as sns
import networkx as nx
from textwrap import wrap
from sklearn.feature_selection import SelectFromModel
def printGraph(columns, data):
# plt.style.use('ggplot')
columns = columns.delete(len(columns) - 1)
data = data[columns]
corData = data.corr()
# links = corData.stack().reset_index()
# links.columns = ['var1', 'var2', 'value']
# links_filtered = links.loc[(links['value'] > 0.8) & (links['var1'] != links['var2'])]
# G = nx.from_pandas_edgelist(links_filtered, 'var1', 'var2')
# nx.draw(G, with_labels=True, node_color='orange', node_size=400, edge_color='black', linewidths=1, font_size=15)
fig, ax = plt.subplots(figsize=(16, 14))
sns.heatmap(corData, annot=False, cmap=plt.cm.Reds,ax=ax)
plt.show()
class TeamFile:
# instance attribute
def __init__(self, train, listFileTest, resultColName):
self.train = train
self.listFileTest = listFileTest
self.resultColName = resultColName
def getOldDataset():
train = "feng_x.csv"
fileListTest = []
fileListTest.append('yu_x.csv')
fileListTest.append('zeller_x.csv')
fileListTest.append('vogtmann_x.csv')
return TeamFile(train, fileListTest, "RS")
def getNewDataset():
train = "ibdfullHS_UCr_x.csv" #iCDr & UCf &iCDf &CDr&CDf
fileListTest = []
fileListTest.append('ibdfullHS_iCDr_x.csv')
fileListTest.append('ibdfullHS_UCf_x.csv')
fileListTest.append('ibdfullHS_iCDf_x.csv')
fileListTest.append('ibdfullHS_CDr_x.csv')
fileListTest.append('ibdfullHS_CDf_x.csv')
return TeamFile(train, fileListTest, "RS")
class color:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def textcolor_display(text, values):
return f"{values}" + text + f"{color.ENDC}"
class IF_Method:
PearsonCorrelationMatrix = 1
UnivariateSelection = 2
FeatureImportance = 3
def findImportancesFeatures(resultColName, filenameTrain, coef_percent, flag, nlargestFeatures):
ways_to_if = "NONE"
if (flag == IF_Method.PearsonCorrelationMatrix):
ways_to_if = "Pearson Correlation Matrix"
if (flag == IF_Method.UnivariateSelection):
ways_to_if = "Univariate Selection"
if (flag == IF_Method.FeatureImportance):
ways_to_if = "Feature Importance"
print("Cách để chọn features : " + ways_to_if)
print(str("Train bằng file ") + str(filenameTrain))
data = pd.read_csv(filenameTrain)
colName = data.columns
df = pd.DataFrame(data, columns=colName)
df.head()
X = df[colName]
y = df[resultColName]
if (flag == IF_Method.PearsonCorrelationMatrix):
print("Hệ số tương quan > " + str(coef_percent))
if float(coef_percent) != 0.0:
cor = X.corr()
cor_target = abs(cor[resultColName])
relevant_features = cor_target[cor_target > float(coef_percent)]
else:
relevant_features = X_train_new
importanceFeature = relevant_features.index
print("Number feature selected : " + str(len(importanceFeature)))
if (flag == IF_Method.UnivariateSelection):
print("Số lượng Importance Feature: " + str(nlargestFeatures))
X_No_V = X.drop(data.columns[0], 1) # independent columns
# y = data.iloc[:, -1] # target column i.e price range
# .nlargest(10, 'Score')
# apply SelectKBest class to extract top 10 best features
bestfeatures = SelectKBest(score_func=chi2, k=10)
fit = bestfeatures.fit(X_No_V, y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
# concat two dataframes for better visualization
relevant_features = pd.concat([dfcolumns, dfscores], axis=1)
relevant_features.columns = ['Specs', 'Score']
importanceFeature = relevant_features.nlargest(nlargestFeatures, 'Score')
importanceFeature = importanceFeature.drop('Score', 1)
importanceFeature = importanceFeature.iloc[:, -1]
if (flag == IF_Method.FeatureImportance):
print("Số lượng Importance Feature: " + str(nlargestFeatures))
X_No_V = X.drop(data.columns[0], 1) # independent columns
from sklearn.ensemble import ExtraTreesClassifier
import matplotlib.pyplot as plt
model = ExtraTreesClassifier()
model.fit(X_No_V, y)
# plot graph of feature importances for better visualization
importanceFeature = pd.Series(model.feature_importances_, index=X_No_V.columns)
importanceFeature = importanceFeature.nlargest(nlargestFeatures)
importanceFeature = importanceFeature.index
X_Train_ImportFeature = df[importanceFeature]
y_Train_ImportFeature = y
printGraph(importanceFeature, data)
return importanceFeature, X_Train_ImportFeature, y_Train_ImportFeature
def findRandomeFeaturesSets(resultColName, filenameTrain, sizeIF):
data = pd.read_csv(filenameTrain)
colName = data.columns
df = pd.DataFrame(data, columns=colName)
df.head()
y = df[resultColName]
rng = default_rng()
# In colName has n columns, position of RS is n - 1. Because of a noname rows of V1,V2,V3,...
numbers = rng.choice(len(colName) - 2, size=sizeIF, replace=False)
randomeFeatureSameSize = colName.delete(0).take(numbers)
X_Train_Random = df[randomeFeatureSameSize]
y_Train_Random = y
return randomeFeatureSameSize, X_Train_Random, y_Train_Random
def printResult(acc_random, mcc_random, auc_random, acc_if, mcc_if, auc_if, nTimes):
print("Khi Random ")
print("ACC = " + str(acc_random / nTimes))
print("MCC = " + str(mcc_random / nTimes))
print("AUC = " + str(auc_random / nTimes))
print("+++++ ")
print("Khi xét Importance Features")
print("ACC = " + str(acc_if / nTimes))
print("MCC = " + str(mcc_if / nTimes))
print("AUC = " + str(auc_if / nTimes))
print("--------------------------------- ")
def sumThenAveragePercisely(accuracy_model_acc):
return fdiv(fsum(accuracy_model_acc), len(accuracy_model_acc), prec=5)
| [
"pandas.DataFrame",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.random.default_rng",
"sklearn.ensemble.ExtraTreesClassifier",
"pandas.Series",
"pandas.concat",
"matplotlib.pyplot.subplots",
"sklearn.feature_selection.SelectKBest"
] | [((1176, 1206), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 14)'}), '(figsize=(16, 14))\n', (1188, 1206), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1269), 'seaborn.heatmap', 'sns.heatmap', (['corData'], {'annot': '(False)', 'cmap': 'plt.cm.Reds', 'ax': 'ax'}), '(corData, annot=False, cmap=plt.cm.Reds, ax=ax)\n', (1222, 1269), True, 'import seaborn as sns\n'), ((1273, 1283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1281, 1283), True, 'import matplotlib.pyplot as plt\n'), ((3043, 3069), 'pandas.read_csv', 'pd.read_csv', (['filenameTrain'], {}), '(filenameTrain)\n', (3054, 3069), True, 'import pandas as pd\n'), ((3106, 3141), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'colName'}), '(data, columns=colName)\n', (3118, 3141), True, 'import pandas as pd\n'), ((5518, 5544), 'pandas.read_csv', 'pd.read_csv', (['filenameTrain'], {}), '(filenameTrain)\n', (5529, 5544), True, 'import pandas as pd\n'), ((5581, 5616), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'colName'}), '(data, columns=colName)\n', (5593, 5616), True, 'import pandas as pd\n'), ((5667, 5680), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (5678, 5680), False, 'from numpy.random import default_rng\n'), ((4061, 4095), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'chi2', 'k': '(10)'}), '(score_func=chi2, k=10)\n', (4072, 4095), False, 'from sklearn.feature_selection import SelectKBest\n'), ((4157, 4182), 'pandas.DataFrame', 'pd.DataFrame', (['fit.scores_'], {}), '(fit.scores_)\n', (4169, 4182), True, 'import pandas as pd\n'), ((4203, 4226), 'pandas.DataFrame', 'pd.DataFrame', (['X.columns'], {}), '(X.columns)\n', (4215, 4226), True, 'import pandas as pd\n'), ((4312, 4352), 'pandas.concat', 'pd.concat', (['[dfcolumns, dfscores]'], {'axis': '(1)'}), '([dfcolumns, dfscores], axis=1)\n', (4321, 4352), True, 'import pandas as pd\n'), ((4910, 4932), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {}), '()\n', (4930, 4932), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((5059, 5118), 'pandas.Series', 'pd.Series', (['model.feature_importances_'], {'index': 'X_No_V.columns'}), '(model.feature_importances_, index=X_No_V.columns)\n', (5068, 5118), True, 'import pandas as pd\n')] |
#%%
import os
import itertools
import cloudpickle
import re
import glob
import git
# Our numerical workhorses
import numpy as np
import pandas as pd
import scipy as sp
# Import library to perform maximum entropy fits
from maxentropy.skmaxent import FeatureTransformer, MinDivergenceModel
# Import libraries to parallelize processes
from joblib import Parallel, delayed
# Import the project utils
import ccutils
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
tmpdir = f'{homedir}/tmp/'
datadir = f'{homedir}/data/csv_maxEnt_dist/'
#%%
# Load moments for multi-promoter level
df_constraints = pd.read_csv(
f'{datadir}MaxEnt_constraints_mult_protein_ext_R.csv'
)
# Remove the zeroth moment column
df_constraints = df_constraints.drop(labels="m0p0", axis=1)
#%%
# Extract protein moments in constraints
prot_mom = [x for x in df_constraints.columns if "m0" in x]
# Define index of moments to be used in the computation
moments = [tuple(map(int, re.findall(r"\d+", s))) for s in prot_mom]
# Define sample space
mRNA_space = np.array([0]) # Dummy space
protein_space = np.arange(0, 10e4)
# Generate sample space as a list of pairs using itertools.
samplespace = list(itertools.product(mRNA_space, protein_space))
# Initialize matrix to save all the features that are fed to the
# maxentropy function
features = np.zeros([len(moments), len(samplespace)])
# Loop through constraints and compute features
for i, mom in enumerate(moments):
features[i, :] = [ccutils.maxent.feature_fn(x, mom) for x in samplespace]
#%%
# Initialize data frame to save the lagrange multipliers.
names = ["operator", "binding_energy", "repressor", "inducer_uM"]
# Add names of the constraints
names = names + ["lambda_m" + str(m[0]) + "p" + str(m[1]) for m in moments]
# Initialize empty dataframe
df_maxEnt = pd.DataFrame([], columns=names)
# Define column names containing the constraints used to fit the distribution
constraints_names = ["m" + str(m[0]) + "p" + str(m[1]) for m in moments]
# Define function for parallel computation
def maxEnt_parallel(idx, df):
# Report on progress
print("iteration: ", idx)
# Extract constraints
constraints = df.loc[constraints_names]
# Perform MaxEnt computation
# We use the Powell method because despite being slower it is more
# robust than the other implementations.
Lagrange = ccutils.maxent.MaxEnt_bretthorst(
constraints,
features,
algorithm="Powell",
tol=1e-5,
paramtol=1e-5,
maxiter=10000,
)
# Save Lagrange multipliers into dataframe
series = pd.Series(Lagrange, index=names[4::])
# Add other features to series before appending to dataframe
series = pd.concat([df.drop(constraints_names), series])
return series
# Run the function in parallel
maxEnt_series = Parallel(n_jobs=6)(
delayed(maxEnt_parallel)(idx, df)
for idx, df in df_constraints.iterrows()
)
# Initialize data frame to save list of parameters
df_maxEnt = pd.DataFrame([], columns=names)
for s in maxEnt_series:
df_maxEnt = df_maxEnt.append(s, ignore_index=True)
df_maxEnt.to_csv(f'{datadir}MaxEnt_Lagrange_mult_protein_ext_R.csv',
index=False) | [
"pandas.DataFrame",
"pandas.read_csv",
"ccutils.maxent.MaxEnt_bretthorst",
"git.Repo",
"joblib.Parallel",
"ccutils.maxent.feature_fn",
"re.findall",
"numpy.arange",
"numpy.array",
"pandas.Series",
"itertools.product",
"joblib.delayed"
] | [((454, 500), 'git.Repo', 'git.Repo', (['"""./"""'], {'search_parent_directories': '(True)'}), "('./', search_parent_directories=True)\n", (462, 500), False, 'import git\n'), ((663, 729), 'pandas.read_csv', 'pd.read_csv', (['f"""{datadir}MaxEnt_constraints_mult_protein_ext_R.csv"""'], {}), "(f'{datadir}MaxEnt_constraints_mult_protein_ext_R.csv')\n", (674, 729), True, 'import pandas as pd\n'), ((1099, 1112), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1107, 1112), True, 'import numpy as np\n'), ((1144, 1166), 'numpy.arange', 'np.arange', (['(0)', '(100000.0)'], {}), '(0, 100000.0)\n', (1153, 1166), True, 'import numpy as np\n'), ((1871, 1902), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': 'names'}), '([], columns=names)\n', (1883, 1902), True, 'import pandas as pd\n'), ((3052, 3083), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': 'names'}), '([], columns=names)\n', (3064, 3083), True, 'import pandas as pd\n'), ((1243, 1287), 'itertools.product', 'itertools.product', (['mRNA_space', 'protein_space'], {}), '(mRNA_space, protein_space)\n', (1260, 1287), False, 'import itertools\n'), ((2420, 2541), 'ccutils.maxent.MaxEnt_bretthorst', 'ccutils.maxent.MaxEnt_bretthorst', (['constraints', 'features'], {'algorithm': '"""Powell"""', 'tol': '(1e-05)', 'paramtol': '(1e-05)', 'maxiter': '(10000)'}), "(constraints, features, algorithm='Powell',\n tol=1e-05, paramtol=1e-05, maxiter=10000)\n", (2452, 2541), False, 'import ccutils\n'), ((2651, 2687), 'pandas.Series', 'pd.Series', (['Lagrange'], {'index': 'names[4:]'}), '(Lagrange, index=names[4:])\n', (2660, 2687), True, 'import pandas as pd\n'), ((2883, 2901), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(6)'}), '(n_jobs=6)\n', (2891, 2901), False, 'from joblib import Parallel, delayed\n'), ((1536, 1569), 'ccutils.maxent.feature_fn', 'ccutils.maxent.feature_fn', (['x', 'mom'], {}), '(x, mom)\n', (1561, 1569), False, 'import ccutils\n'), ((1020, 1041), 're.findall', 're.findall', (['"""\\\\d+"""', 's'], {}), "('\\\\d+', s)\n", (1030, 1041), False, 'import re\n'), ((2907, 2931), 'joblib.delayed', 'delayed', (['maxEnt_parallel'], {}), '(maxEnt_parallel)\n', (2914, 2931), False, 'from joblib import Parallel, delayed\n')] |
import torch
import numpy as np
import torch.utils.data
from lib.add_window import Add_Window_Horizon
from lib.load_dataset import load_st_dataset
from lib.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler
import controldiffeq
def normalize_dataset(data, normalizer, column_wise=False):
if normalizer == 'max01':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax01Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax01 Normalization')
elif normalizer == 'max11':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax11Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax11 Normalization')
elif normalizer == 'std':
if column_wise:
mean = data.mean(axis=0, keepdims=True)
std = data.std(axis=0, keepdims=True)
else:
mean = data.mean()
std = data.std()
scaler = StandardScaler(mean, std)
data = scaler.transform(data)
print('Normalize the dataset by Standard Normalization')
elif normalizer == 'None':
scaler = NScaler()
data = scaler.transform(data)
print('Does not normalize the dataset')
elif normalizer == 'cmax':
#column min max, to be depressed
#note: axis must be the spatial dimension, please check !
scaler = ColumnMinMaxScaler(data.min(axis=0), data.max(axis=0))
data = scaler.transform(data)
print('Normalize the dataset by Column Min-Max Normalization')
else:
raise ValueError
return data, scaler
def split_data_by_days(data, val_days, test_days, interval=60):
'''
:param data: [B, *]
:param val_days:
:param test_days:
:param interval: interval (15, 30, 60) minutes
:return:
'''
T = int((24*60)/interval)
test_data = data[-T*test_days:]
val_data = data[-T*(test_days + val_days): -T*test_days]
train_data = data[:-T*(test_days + val_days)]
return train_data, val_data, test_data
def split_data_by_ratio(data, val_ratio, test_ratio):
data_len = data.shape[0]
test_data = data[-int(data_len*test_ratio):]
val_data = data[-int(data_len*(test_ratio+val_ratio)):-int(data_len*test_ratio)]
train_data = data[:-int(data_len*(test_ratio+val_ratio))]
return train_data, val_data, test_data
def data_loader(X, Y, batch_size, shuffle=True, drop_last=True):
cuda = True if torch.cuda.is_available() else False
TensorFloat = torch.cuda.FloatTensor if cuda else torch.FloatTensor
X, Y = TensorFloat(X), TensorFloat(Y)
data = torch.utils.data.TensorDataset(X, Y)
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
return dataloader
def data_loader_cde(X, Y, batch_size, shuffle=True, drop_last=True):
cuda = True if torch.cuda.is_available() else False
TensorFloat = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# X, Y = TensorFloat(X), TensorFloat(Y)
# X = tuple(TensorFloat(x) for x in X)
# Y = TensorFloat(Y)
data = torch.utils.data.TensorDataset(*X, torch.tensor(Y))
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
return dataloader
def get_dataloader(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True):
#load raw st dataset
data = load_st_dataset(args.dataset) # B, N, D
#normalize st data
data, scaler = normalize_dataset(data, normalizer, args.column_wise)
#spilit dataset by days or by ratio
if args.test_ratio > 1:
data_train, data_val, data_test = split_data_by_days(data, args.val_ratio, args.test_ratio)
else:
data_train, data_val, data_test = split_data_by_ratio(data, args.val_ratio, args.test_ratio)
#add time window
x_tra, y_tra = Add_Window_Horizon(data_train, args.lag, args.horizon, single)
x_val, y_val = Add_Window_Horizon(data_val, args.lag, args.horizon, single)
x_test, y_test = Add_Window_Horizon(data_test, args.lag, args.horizon, single)
print('Train: ', x_tra.shape, y_tra.shape)
print('Val: ', x_val.shape, y_val.shape)
print('Test: ', x_test.shape, y_test.shape)
##############get dataloader######################
train_dataloader = data_loader(x_tra, y_tra, args.batch_size, shuffle=True, drop_last=True)
if len(x_val) == 0:
val_dataloader = None
else:
val_dataloader = data_loader(x_val, y_val, args.batch_size, shuffle=False, drop_last=True)
test_dataloader = data_loader(x_test, y_test, args.batch_size, shuffle=False, drop_last=False)
return train_dataloader, val_dataloader, test_dataloader, scaler
def get_dataloader_cde(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True):
#load raw st dataset
data = load_st_dataset(args.dataset) # B, N, D
#normalize st data
data, scaler = normalize_dataset(data, normalizer, args.column_wise)
#spilit dataset by days or by ratio
if args.test_ratio > 1:
data_train, data_val, data_test = split_data_by_days(data, args.val_ratio, args.test_ratio)
else:
data_train, data_val, data_test = split_data_by_ratio(data, args.val_ratio, args.test_ratio)
#add time window
x_tra, y_tra = Add_Window_Horizon(data_train, args.lag, args.horizon, single)
x_val, y_val = Add_Window_Horizon(data_val, args.lag, args.horizon, single)
x_test, y_test = Add_Window_Horizon(data_test, args.lag, args.horizon, single)
print('Train: ', x_tra.shape, y_tra.shape)
print('Val: ', x_val.shape, y_val.shape)
print('Test: ', x_test.shape, y_test.shape)
# TODO: make argument for missing data
if args.missing_test == True:
generator = torch.Generator().manual_seed(56789)
xs = np.concatenate([x_tra, x_val, x_test])
for xi in xs:
removed_points_seq = torch.randperm(xs.shape[1], generator=generator)[:int(xs.shape[1] * args.missing_rate)].sort().values
removed_points_node = torch.randperm(xs.shape[2], generator=generator)[:int(xs.shape[2] * args.missing_rate)].sort().values
for seq in removed_points_seq:
for node in removed_points_node:
xi[seq,node] = float('nan')
x_tra = xs[:x_tra.shape[0],...]
x_val = xs[x_tra.shape[0]:x_tra.shape[0]+x_val.shape[0],...]
x_test = xs[-x_test.shape[0]:,...]
####
# TODO: make argument for data category
data_category = 'traffic'
if data_category == 'traffic':
times = torch.linspace(0, 11, 12)
elif data_category == 'token':
times = torch.linspace(0, 6, 7)
else:
raise ValueError
augmented_X_tra = []
augmented_X_tra.append(times.unsqueeze(0).unsqueeze(0).repeat(x_tra.shape[0],x_tra.shape[2],1).unsqueeze(-1).transpose(1,2))
augmented_X_tra.append(torch.Tensor(x_tra[..., :]))
x_tra = torch.cat(augmented_X_tra, dim=3)
augmented_X_val = []
augmented_X_val.append(times.unsqueeze(0).unsqueeze(0).repeat(x_val.shape[0],x_val.shape[2],1).unsqueeze(-1).transpose(1,2))
augmented_X_val.append(torch.Tensor(x_val[..., :]))
x_val = torch.cat(augmented_X_val, dim=3)
augmented_X_test = []
augmented_X_test.append(times.unsqueeze(0).unsqueeze(0).repeat(x_test.shape[0],x_test.shape[2],1).unsqueeze(-1).transpose(1,2))
augmented_X_test.append(torch.Tensor(x_test[..., :]))
x_test = torch.cat(augmented_X_test, dim=3)
####
# train_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, torch.Tensor(x_tra).transpose(1,2))
# valid_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, torch.Tensor(x_val).transpose(1,2))
# test_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, torch.Tensor(x_test).transpose(1,2))
train_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, x_tra.transpose(1,2))
valid_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, x_val.transpose(1,2))
test_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, x_test.transpose(1,2))
# train_coeffs = tuple(coeff.transpose(1,2) for coeff in train_coeffs)
# valid_coeffs = tuple(coeff.transpose(1,2) for coeff in valid_coeffs)
# test_coeffs = tuple(coeff.transpose(1,2) for coeff in test_coeffs)
##############get dataloader######################
train_dataloader = data_loader_cde(train_coeffs, y_tra, args.batch_size, shuffle=True, drop_last=True)
if len(x_val) == 0:
val_dataloader = None
else:
val_dataloader = data_loader_cde(valid_coeffs, y_val, args.batch_size, shuffle=False, drop_last=True)
# val_dataloader = data_loader(x_val, y_val, args.batch_size, shuffle=False, drop_last=True)
# test_dataloader = data_loader(x_test, y_test, args.batch_size, shuffle=False, drop_last=False)
test_dataloader = data_loader_cde(test_coeffs, y_test, args.batch_size, shuffle=False, drop_last=False)
return train_dataloader, val_dataloader, test_dataloader, scaler, times
if __name__ == '__main__':
import argparse
#MetrLA 207; BikeNYC 128; SIGIR_solar 137; SIGIR_electric 321
DATASET = 'SIGIR_electric'
if DATASET == 'MetrLA':
NODE_NUM = 207
elif DATASET == 'BikeNYC':
NODE_NUM = 128
elif DATASET == 'SIGIR_solar':
NODE_NUM = 137
elif DATASET == 'SIGIR_electric':
NODE_NUM = 321
parser = argparse.ArgumentParser(description='PyTorch dataloader')
parser.add_argument('--dataset', default=DATASET, type=str)
parser.add_argument('--num_nodes', default=NODE_NUM, type=int)
parser.add_argument('--val_ratio', default=0.1, type=float)
parser.add_argument('--test_ratio', default=0.2, type=float)
parser.add_argument('--lag', default=12, type=int)
parser.add_argument('--horizon', default=12, type=int)
parser.add_argument('--batch_size', default=64, type=int)
args = parser.parse_args()
train_dataloader, val_dataloader, test_dataloader, scaler = get_dataloader(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True) | [
"lib.normalization.MinMax11Scaler",
"lib.load_dataset.load_st_dataset",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"lib.normalization.StandardScaler",
"torch.cat",
"lib.add_window.Add_Window_Horizon",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
... | [((3105, 3141), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (3135, 3141), False, 'import torch\n'), ((3160, 3258), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'drop_last': 'drop_last'}), '(data, batch_size=batch_size, shuffle=shuffle,\n drop_last=drop_last)\n', (3187, 3258), False, 'import torch\n'), ((3723, 3821), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'drop_last': 'drop_last'}), '(data, batch_size=batch_size, shuffle=shuffle,\n drop_last=drop_last)\n', (3750, 3821), False, 'import torch\n'), ((4026, 4055), 'lib.load_dataset.load_st_dataset', 'load_st_dataset', (['args.dataset'], {}), '(args.dataset)\n', (4041, 4055), False, 'from lib.load_dataset import load_st_dataset\n'), ((4497, 4559), 'lib.add_window.Add_Window_Horizon', 'Add_Window_Horizon', (['data_train', 'args.lag', 'args.horizon', 'single'], {}), '(data_train, args.lag, args.horizon, single)\n', (4515, 4559), False, 'from lib.add_window import Add_Window_Horizon\n'), ((4580, 4640), 'lib.add_window.Add_Window_Horizon', 'Add_Window_Horizon', (['data_val', 'args.lag', 'args.horizon', 'single'], {}), '(data_val, args.lag, args.horizon, single)\n', (4598, 4640), False, 'from lib.add_window import Add_Window_Horizon\n'), ((4663, 4724), 'lib.add_window.Add_Window_Horizon', 'Add_Window_Horizon', (['data_test', 'args.lag', 'args.horizon', 'single'], {}), '(data_test, args.lag, args.horizon, single)\n', (4681, 4724), False, 'from lib.add_window import Add_Window_Horizon\n'), ((5499, 5528), 'lib.load_dataset.load_st_dataset', 'load_st_dataset', (['args.dataset'], {}), '(args.dataset)\n', (5514, 5528), False, 'from lib.load_dataset import load_st_dataset\n'), ((5970, 6032), 'lib.add_window.Add_Window_Horizon', 'Add_Window_Horizon', (['data_train', 'args.lag', 'args.horizon', 'single'], {}), '(data_train, args.lag, args.horizon, single)\n', (5988, 6032), False, 'from lib.add_window import Add_Window_Horizon\n'), ((6053, 6113), 'lib.add_window.Add_Window_Horizon', 'Add_Window_Horizon', (['data_val', 'args.lag', 'args.horizon', 'single'], {}), '(data_val, args.lag, args.horizon, single)\n', (6071, 6113), False, 'from lib.add_window import Add_Window_Horizon\n'), ((6136, 6197), 'lib.add_window.Add_Window_Horizon', 'Add_Window_Horizon', (['data_test', 'args.lag', 'args.horizon', 'single'], {}), '(data_test, args.lag, args.horizon, single)\n', (6154, 6197), False, 'from lib.add_window import Add_Window_Horizon\n'), ((7636, 7669), 'torch.cat', 'torch.cat', (['augmented_X_tra'], {'dim': '(3)'}), '(augmented_X_tra, dim=3)\n', (7645, 7669), False, 'import torch\n'), ((7896, 7929), 'torch.cat', 'torch.cat', (['augmented_X_val'], {'dim': '(3)'}), '(augmented_X_val, dim=3)\n', (7905, 7929), False, 'import torch\n'), ((8163, 8197), 'torch.cat', 'torch.cat', (['augmented_X_test'], {'dim': '(3)'}), '(augmented_X_test, dim=3)\n', (8172, 8197), False, 'import torch\n'), ((10160, 10217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch dataloader"""'}), "(description='PyTorch dataloader')\n", (10183, 10217), False, 'import argparse\n'), ((610, 642), 'lib.normalization.MinMax01Scaler', 'MinMax01Scaler', (['minimum', 'maximum'], {}), '(minimum, maximum)\n', (624, 642), False, 'from lib.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler\n'), ((2940, 2965), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2963, 2965), False, 'import torch\n'), ((3416, 3441), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3439, 3441), False, 'import torch\n'), ((3688, 3703), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (3700, 3703), False, 'import torch\n'), ((6494, 6532), 'numpy.concatenate', 'np.concatenate', (['[x_tra, x_val, x_test]'], {}), '([x_tra, x_val, x_test])\n', (6508, 6532), True, 'import numpy as np\n'), ((7270, 7295), 'torch.linspace', 'torch.linspace', (['(0)', '(11)', '(12)'], {}), '(0, 11, 12)\n', (7284, 7295), False, 'import torch\n'), ((7594, 7621), 'torch.Tensor', 'torch.Tensor', (['x_tra[..., :]'], {}), '(x_tra[..., :])\n', (7606, 7621), False, 'import torch\n'), ((7854, 7881), 'torch.Tensor', 'torch.Tensor', (['x_val[..., :]'], {}), '(x_val[..., :])\n', (7866, 7881), False, 'import torch\n'), ((8119, 8147), 'torch.Tensor', 'torch.Tensor', (['x_test[..., :]'], {}), '(x_test[..., :])\n', (8131, 8147), False, 'import torch\n'), ((1017, 1049), 'lib.normalization.MinMax11Scaler', 'MinMax11Scaler', (['minimum', 'maximum'], {}), '(minimum, maximum)\n', (1031, 1049), False, 'from lib.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler\n'), ((7349, 7372), 'torch.linspace', 'torch.linspace', (['(0)', '(6)', '(7)'], {}), '(0, 6, 7)\n', (7363, 7372), False, 'import torch\n'), ((1410, 1435), 'lib.normalization.StandardScaler', 'StandardScaler', (['mean', 'std'], {}), '(mean, std)\n', (1424, 1435), False, 'from lib.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler\n'), ((6443, 6460), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (6458, 6460), False, 'import torch\n'), ((1591, 1600), 'lib.normalization.NScaler', 'NScaler', ([], {}), '()\n', (1598, 1600), False, 'from lib.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler\n'), ((6590, 6638), 'torch.randperm', 'torch.randperm', (['xs.shape[1]'], {'generator': 'generator'}), '(xs.shape[1], generator=generator)\n', (6604, 6638), False, 'import torch\n'), ((6727, 6775), 'torch.randperm', 'torch.randperm', (['xs.shape[2]'], {'generator': 'generator'}), '(xs.shape[2], generator=generator)\n', (6741, 6775), False, 'import torch\n')] |
import os
import sys
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, main_path)
import numpy as np
import time
from src.preprocesing import gen_dataset_from_h5, rearrange_splits, get_mmbopf_plasticc_path
from src.cross_validation import cv_mmm_bopf, load_bopf_from_quantity_search
from src.mmmbopf.method import MMMBOPF
import argparse
from multiprocessing import cpu_count
import pandas as pd
'''
Script used to find best Multi-resolution Multi-quantity Multi-variate Bag-of-Patterns Features
using a grid search algorithm
The script is intended to work only with PLaSTiCC dataset
The parameters window width (win) and word length (wl) are fixed for this script
'''
_BANDS = ["lsstg", "lssti", "lsstr", "lsstu", "lssty", "lsstz"] # bands on PLaSTiCC dataset
def get_fixed_arguments():
doc_kwargs = {
"irr_handler": "#",
"mean_bp_dist": "normal",
"verbose": True,
}
lsa_kwargs = { # scheme: ltc
"class_based": False, # options: True, False
"normalize": "l2", # options: None, l2
"use_idf": True, # options: True, False
"sublinear_tf": True # options: True, False
}
pre_load_bopf = True
return doc_kwargs, lsa_kwargs, pre_load_bopf
def get_dataset_variables(dataset_name, select_survey=None):
# read dataset
dataset, labels_, metadata, split_folds = gen_dataset_from_h5(dataset_name, bands=_BANDS, num_folds=5, select_survey=select_survey)
split_folds = rearrange_splits(split_folds)
classes = np.unique(labels_)
print(len(labels_))
N = int(np.mean([len(ts[0]) * 2 for ts in dataset]))
return dataset, labels_, metadata, split_folds, classes, N
def check_or_create_folder(path):
if not os.path.exists(path):
os.mkdir(path)
def get_files_and_folders(c, select_survey=None):
data_path = get_mmbopf_plasticc_path()
resolution_search_main_directory = os.path.join(data_path, "resolution_search")
check_or_create_folder(resolution_search_main_directory)
resolution_search_base_result = os.path.join(data_path, "res_search_base_res")
check_or_create_folder(resolution_search_base_result)
resolution_search_directory = os.path.join(resolution_search_main_directory, c.lower())
check_or_create_folder(resolution_search_directory)
name = "%s_resolution_search" % c.lower()
if select_survey is not None:
name += "_%s" % select_survey
method_sub_directory = os.path.join(resolution_search_directory, name)
check_or_create_folder(method_sub_directory)
name = "optimal_config_%s" % c.lower()
if select_survey is not None:
name += "_%s" % select_survey
name += ".json"
config_file = os.path.join(data_path, name)
return resolution_search_directory, method_sub_directory, config_file, resolution_search_base_result
def run_script(dataset_name, q_code, q_search_path, q_search_cv_results,
top_k, resolution_max, alpha, C, timestamp, n_jobs, select_survey=None):
dataset, labels_, metadata, split_folds, classes, N = get_dataset_variables(dataset_name, select_survey=select_survey)
doc_kwargs, lsa_kwargs, pre_load_bopf = get_fixed_arguments()
drop_zero_variance = C.lower() == "lsa" # drop zero variance doesnt work for MANOVA
resolution_search_directory, method_sub_directory, config_file, out_base_bopf_path = get_files_and_folders(C, select_survey=select_survey)
# get pipeline
method = MMMBOPF(alpha=alpha, Q_code=q_code, C=C, lsa_kw=lsa_kwargs,
doc_kw=doc_kwargs, N=N, n_jobs=n_jobs,
drop_zero_variance=drop_zero_variance)
# pre-load saved base bopf
print("LOADING PRECOMPUTED BASE BOPF...")
if args.cv_smm_again is None:
cv_smm_bopf_results = load_bopf_from_quantity_search(q_search_path,
q_search_cv_results,
method.quantities_code())
wins = None
else:
cv_smm_bopf_results = None
wins = np.append(np.array([4, 6, 8, 10, 14, 18, 25]), np.logspace(np.log10(30), np.log10(1000), 20))
wls = [1, 2, 3]
R, _, optimal_acc = cv_mmm_bopf(
dataset, labels_, method, cv=split_folds, resolution_max=resolution_max,
top_k=top_k, out_path=method_sub_directory, n_jobs=n_jobs,
cv_smm_bopf_results=cv_smm_bopf_results, drop_zero_variance=drop_zero_variance,
timestamp=timestamp, out_base_bopf_path=out_base_bopf_path, wls=wls, wins=wins)
return R, optimal_acc, method, config_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'dataset',
help='Name of the dataset to find best combination of quantities on.'
)
parser.add_argument(
"multi_quantity_resume_file",
help="The resume with the search of the optimal multi-quantity combination"
)
parser.add_argument(
"--top_k",
type=int,
default=4,
help="the top K single-resolution representations to try on this multi-resolution search"
)
parser.add_argument(
"--resolution_max",
type=int,
default=4,
help="The maximum number of resolutions to include in the optimal multi-resolution combination"
)
parser.add_argument(
"--alpha",
type=int,
default=4,
help="alphabet size to use during the search"
)
parser.add_argument(
"--compact_method",
type=str,
default="LSA",
help="The compact method to use, options are: LSA or MANOVA"
)
parser.add_argument(
"--timestamp",
type=str,
default=time.strftime("%Y%m%d-%H%M%S"),
help="timestamp for creating unique files"
)
parser.add_argument(
"--n_jobs",
type=int,
default=-1,
help="The number of process to run in parallel"
)
parser.add_argument("--cv_smm_again", type=str, default=None)
parser.add_argument("--split_surveys", type=str, default=None)
args = parser.parse_args()
if args.n_jobs == -1:
n_jobs = cpu_count()
else:
n_jobs = args.n_jobs
ini = time.time()
# we try the top 2 quantities combinations and save the configuration for the best
quantity_search_resume = pd.read_csv(args.multi_quantity_resume_file, index_col=None)
quantity_search_resume = quantity_search_resume.sort_values("cv_mean", ascending=False)
top1 = quantity_search_resume[quantity_search_resume["quantity"] == "(TrMm-MmMn-MmMx-TrMn)"].iloc[0]
if args.split_surveys:
R1, optimal_acc1, method1, config_file1 = run_script(
args.dataset, top1.quantity, top1.q_search_path, top1.cv_results_file, args.top_k,
args.resolution_max, args.alpha, args.compact_method, args.timestamp, n_jobs, select_survey="ddf")
R2, optimal_acc2, method2, config_file2 = run_script(
args.dataset, top1.quantity, top1.q_search_path, top1.cv_results_file, args.top_k,
args.resolution_max, args.alpha, args.compact_method, args.timestamp, n_jobs, select_survey="wdf")
else:
R1, optimal_acc1, method1, config_file1 = run_script(
args.dataset, top1.quantity, top1.q_search_path, top1.cv_results_file, args.top_k,
args.resolution_max, args.alpha, args.compact_method, args.timestamp, n_jobs)
# top2 = quantity_search_resume.iloc[1]
# R2, optimal_acc2, method2, config_file2 = run_script(
# args.dataset, top2.quantity, top2.q_search_path, top2.cv_results_file, args.top_k,
# args.resolution_max, args.alpha, args.compact_method, args.timestamp, n_jobs)
# if optimal_acc2 > optimal_acc1:
# R1 = R2
# optimal_acc1 = optimal_acc2
# method1 = method2
# config_file1 = config_file2
end = time.time()
print("ELAPSE TIME: %.3f secs (%.4f Hours)" % ((end - ini), (end - ini) / 3600))
try:
log_file_finder = os.path.join(
"..", "data", "configs_results_new", "%s_multi_ress_search.txt" % args.compact_method.lower())
f = open(log_file_finder, "a+")
f.write("++++++++++++++++++++++++++++++++\n")
f.write("compact method: %s\n" % args.compact_method)
f.write("alphabet size: %s\n" % str(args.alpha))
f.write("quantities: %s\n" % args.q_code)
f.write("resolutions: %s\n" % repr(R1))
f.write("optimal acc_cv: %s\n" % str(optimal_acc1))
f.write("timestamp: %s\n" % str(args.timestamp))
f.write("split_surveys: %s\n" % "True" if args.split_surveys is not None else "False")
f.write("elapse time: %.3f secs (%.4f Hours)\n" % ((end - ini), (end - ini) / 3600))
f.write("comment: run with only forward slider and fixed index\n") # this change
f.write("++++++++++++++++++++++++++++++++\n")
f.close()
except Exception as e:
print("failed to write log file, error: ", e)
# R = [(115.691, 1), (393.505, 2), (642.112, 1), (642.112, 3)]
# R for new implementation, LSA and alpha=4
# R = [(win:wl)-(122.649:1)-(367.244:2)-(429.533:1)-(122.649:2)] (acc: 0.480)
# R for new implementation, LSA, alpha=4, and forward/backward segmentator
# R = [(win:wl)-(89.656:1)-(587.597:2)-(687.260:2)-(429.533:1)] (acc: 0.459)
# R for new imp, LSA, alpha=2, and only forward slicer/segmentator
# R = ?
# best config comb triple-q
# FINAL BEST CONFIG: [(406.48199999999997, 1), (63.172, 1)] , acc: 0.5017615943950111
# best config comb double-q
# [(110.428, 1), (589.8530000000001, 1)] , acc: 0.5040139362331313
method1.config_to_json(config_file1)
# TODO: CORRER DENUEVO PERO PARA ALPHA=6 Y WL= {1, 2}.
# call script with python -m sklearnex 2.find_best_config.py
| [
"os.mkdir",
"argparse.ArgumentParser",
"pandas.read_csv",
"src.preprocesing.gen_dataset_from_h5",
"numpy.unique",
"os.path.dirname",
"sys.path.insert",
"src.preprocesing.rearrange_splits",
"time.time",
"os.path.exists",
"time.strftime",
"src.mmmbopf.method.MMMBOPF",
"src.cross_validation.cv_... | [((96, 125), 'sys.path.insert', 'sys.path.insert', (['(0)', 'main_path'], {}), '(0, main_path)\n', (111, 125), False, 'import sys\n'), ((1402, 1496), 'src.preprocesing.gen_dataset_from_h5', 'gen_dataset_from_h5', (['dataset_name'], {'bands': '_BANDS', 'num_folds': '(5)', 'select_survey': 'select_survey'}), '(dataset_name, bands=_BANDS, num_folds=5, select_survey=\n select_survey)\n', (1421, 1496), False, 'from src.preprocesing import gen_dataset_from_h5, rearrange_splits, get_mmbopf_plasticc_path\n'), ((1510, 1539), 'src.preprocesing.rearrange_splits', 'rearrange_splits', (['split_folds'], {}), '(split_folds)\n', (1526, 1539), False, 'from src.preprocesing import gen_dataset_from_h5, rearrange_splits, get_mmbopf_plasticc_path\n'), ((1554, 1572), 'numpy.unique', 'np.unique', (['labels_'], {}), '(labels_)\n', (1563, 1572), True, 'import numpy as np\n'), ((1878, 1904), 'src.preprocesing.get_mmbopf_plasticc_path', 'get_mmbopf_plasticc_path', ([], {}), '()\n', (1902, 1904), False, 'from src.preprocesing import gen_dataset_from_h5, rearrange_splits, get_mmbopf_plasticc_path\n'), ((1945, 1989), 'os.path.join', 'os.path.join', (['data_path', '"""resolution_search"""'], {}), "(data_path, 'resolution_search')\n", (1957, 1989), False, 'import os\n'), ((2087, 2133), 'os.path.join', 'os.path.join', (['data_path', '"""res_search_base_res"""'], {}), "(data_path, 'res_search_base_res')\n", (2099, 2133), False, 'import os\n'), ((2485, 2532), 'os.path.join', 'os.path.join', (['resolution_search_directory', 'name'], {}), '(resolution_search_directory, name)\n', (2497, 2532), False, 'import os\n'), ((2736, 2765), 'os.path.join', 'os.path.join', (['data_path', 'name'], {}), '(data_path, name)\n', (2748, 2765), False, 'import os\n'), ((3492, 3634), 'src.mmmbopf.method.MMMBOPF', 'MMMBOPF', ([], {'alpha': 'alpha', 'Q_code': 'q_code', 'C': 'C', 'lsa_kw': 'lsa_kwargs', 'doc_kw': 'doc_kwargs', 'N': 'N', 'n_jobs': 'n_jobs', 'drop_zero_variance': 'drop_zero_variance'}), '(alpha=alpha, Q_code=q_code, C=C, lsa_kw=lsa_kwargs, doc_kw=\n doc_kwargs, N=N, n_jobs=n_jobs, drop_zero_variance=drop_zero_variance)\n', (3499, 3634), False, 'from src.mmmbopf.method import MMMBOPF\n'), ((4242, 4565), 'src.cross_validation.cv_mmm_bopf', 'cv_mmm_bopf', (['dataset', 'labels_', 'method'], {'cv': 'split_folds', 'resolution_max': 'resolution_max', 'top_k': 'top_k', 'out_path': 'method_sub_directory', 'n_jobs': 'n_jobs', 'cv_smm_bopf_results': 'cv_smm_bopf_results', 'drop_zero_variance': 'drop_zero_variance', 'timestamp': 'timestamp', 'out_base_bopf_path': 'out_base_bopf_path', 'wls': 'wls', 'wins': 'wins'}), '(dataset, labels_, method, cv=split_folds, resolution_max=\n resolution_max, top_k=top_k, out_path=method_sub_directory, n_jobs=\n n_jobs, cv_smm_bopf_results=cv_smm_bopf_results, drop_zero_variance=\n drop_zero_variance, timestamp=timestamp, out_base_bopf_path=\n out_base_bopf_path, wls=wls, wins=wins)\n', (4253, 4565), False, 'from src.cross_validation import cv_mmm_bopf, load_bopf_from_quantity_search\n'), ((4669, 4713), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (4692, 4713), False, 'import argparse\n'), ((6280, 6291), 'time.time', 'time.time', ([], {}), '()\n', (6289, 6291), False, 'import time\n'), ((6408, 6468), 'pandas.read_csv', 'pd.read_csv', (['args.multi_quantity_resume_file'], {'index_col': 'None'}), '(args.multi_quantity_resume_file, index_col=None)\n', (6419, 6468), True, 'import pandas as pd\n'), ((7947, 7958), 'time.time', 'time.time', ([], {}), '()\n', (7956, 7958), False, 'import time\n'), ((62, 87), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (77, 87), False, 'import os\n'), ((1765, 1785), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1779, 1785), False, 'import os\n'), ((1795, 1809), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1803, 1809), False, 'import os\n'), ((6218, 6229), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (6227, 6229), False, 'from multiprocessing import cpu_count\n'), ((4112, 4147), 'numpy.array', 'np.array', (['[4, 6, 8, 10, 14, 18, 25]'], {}), '([4, 6, 8, 10, 14, 18, 25])\n', (4120, 4147), True, 'import numpy as np\n'), ((5774, 5804), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (5787, 5804), False, 'import time\n'), ((4161, 4173), 'numpy.log10', 'np.log10', (['(30)'], {}), '(30)\n', (4169, 4173), True, 'import numpy as np\n'), ((4175, 4189), 'numpy.log10', 'np.log10', (['(1000)'], {}), '(1000)\n', (4183, 4189), True, 'import numpy as np\n')] |
import numpy as np
# for dummy executor
from concurrent.futures import Future, Executor
from threading import Lock
from datetime import date
from hashlib import blake2b
import yaml
import json
from copy import deepcopy
import importlib
import inspect
import datetime
import os
import logging
xopt_logo = """ _
| |
__ _____ _ __ | |_
\ \/ / _ \| '_ \| __|
> < (_) | |_) | |_
/_/\_\___/| .__/ \__|
| |
|_|
"""
"""UTC to ISO 8601 with Local TimeZone information without microsecond"""
def isotime():
return datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).astimezone().replace(
microsecond=0).isoformat()
logger = logging.getLogger(__name__)
# --------------------------------
# Config utilities
def load_config(source):
"""
Returns a dict loaded from a JSON or YAML file, or string.
If source is already a dict, just returns the same dict.
"""
if isinstance(source, dict):
logger.info('Loading config from dict.')
return source
if isinstance(source, str):
if os.path.exists(source):
if source.endswith('.json'):
logger.info(f'Loading from JSON file: {source}')
return json.load(open(source))
elif source.endswith('.yaml'):
logger.info(f'Loading from YAML file: {source}')
return yaml.safe_load(open(source))
else:
logger.error(f'Cannot load file {source}')
else:
logger.info('Loading config from text')
return yaml.safe_load(source)
else:
raise Exception(f'Do not know how to load {source}')
def save_config(data, filename, verbose=True):
"""
Saves data to a JSON or YAML file, chosen by the filename extension.
"""
if filename.endswith('json'):
with open(filename, 'w') as f:
json.dump(data, f, ensure_ascii=True, indent=' ',
cls=NpEncoder)
if verbose:
logger.info(f'Config written as JSON to {filename}')
elif filename.endswith('yaml'):
with open(filename, 'w') as f:
yaml.dump(data, f, default_flow_style=None, sort_keys=False)
if verbose:
logger.info(f'Config written as YAML to {filename}')
else:
raise
# --------------------------------
# VOCS utilities
def save_vocs(vocs_dict, filePath=None):
"""
Write VOCS dictionary to a JSON file.
If no filePath is given, the name is chosen from the 'name' key + '.json'
"""
if filePath:
name = filePath
else:
name = vocs_dict['name'] + '.json'
with open(name, 'w') as outfile:
json.dump(vocs_dict, outfile, ensure_ascii=True, indent=' ')
logger.info(name, 'written')
def load_vocs(filePath):
"""
Load VOCS from a JSON file
Returns a dict
"""
with open(filePath, 'r') as f:
dat = json.load(f)
return dat
def random_settings(vocs, include_constants=True, include_linked_variables=True):
"""
Uniform sampling of the variables described in vocs['variables'] = min, max.
Returns a dict of settings.
If include_constants, the vocs['constants'] are added to the dict.
"""
settings = {}
for key, val in vocs['variables'].items():
a, b = val
x = np.random.random()
settings[key] = x * a + (1 - x) * b
# Constants
if include_constants and vocs['constants']:
settings.update(vocs['constants'])
# Handle linked variables
if include_linked_variables and 'linked_variables' in vocs and vocs[
'linked_variables']:
for k, v in vocs['linked_variables'].items():
settings[k] = settings[v]
return settings
def random_settings_arrays(vocs, n, include_constants=True,
include_linked_variables=True):
"""
Similar to random_settings, but with arrays of size n.
Uniform sampling of the variables described in vocs['variables'] = min, max.
Returns a dict of settings, with each settings as an array.
If include_constants, the vocs['constants'] are added to the dict as full arrays.
"""
settings = {}
for key, val in vocs['variables'].items():
a, b = val
x = np.random.random(n)
settings[key] = x * a + (1 - x) * b
# Constants
if include_constants and 'constants' in vocs and vocs['constants']:
for k, v in vocs['constants'].items():
settings[k] = np.full(n, v)
# Handle linked variables
if include_linked_variables and 'linked_variables' in vocs and vocs[
'linked_variables']:
for k, v in vocs['linked_variables'].items():
settings[k] = np.full(n, settings[v])
return settings
# --------------------------------
# Vector encoding and decoding
# Decode vector to dict
def decode1(vec, labels):
return dict(zip(labels, vec.tolist()))
# encode dict to vector
def encode1(d, labels):
return [d[key] for key in labels]
# --------------------------------
# Paths
def full_path(path, ensure_exists=True):
"""
Makes path abolute. Can ensure exists.
"""
p = os.path.expandvars(path)
p = os.path.abspath(p)
if ensure_exists:
assert os.path.exists(p), 'path does not exist: ' + p
return p
def add_to_path(path, prepend=True):
"""
Add path to $PATH
"""
p = full_path(path)
if prepend:
os.environ['PATH'] = p + os.pathsep + os.environ['PATH']
else:
# just append
os.environ['PATH'] += os.pathsep + p
return p
def expand_paths(nested_dict, suffixes=['_file', '_path', '_bin'], verbose=True,
sep=' : ', ensure_exists=False):
"""
Crawls through a nested dict and expands the path of any key that ends
with characters in the suffixes list.
Internally flattens, and unflattens a dict to this using a seperator string sep
"""
d = flatten_dict(nested_dict, sep=sep)
for k, v in d.items():
k2 = k.split(sep)
if len(k2) == 1:
k2 = k2[0]
else:
k2 = k2[-1]
if any([k2.endswith(x) for x in suffixes]):
if not v:
if verbose:
logger.warning(f'Warning: No path set for key {k}')
continue
if not isinstance(v, str):
# Not a path
continue
file = full_path(v, ensure_exists=ensure_exists)
if os.path.exists(file):
d[k] = file
else:
if verbose:
logger.warning(f'Warning: Path {v} does not exist for key {k}')
return unflatten_dict(d, sep=sep)
# --------------------------------
# filenames
def new_date_filename(prefix='', suffix='.json', path=''):
"""
Gets a filename that doesn't exist based on the date
Example:
new_date_filename('sample-', '.json', '.')
Returns:
'./sample-2020-02-09-1.json'
"""
counter = 1
while True:
name = f'{prefix}{date.today()}-{counter}{suffix}'
file = os.path.join(path, name)
if os.path.exists(file):
counter += 1
else:
break
return file
# --------------------------------
# h5 utils
def write_attrs(h5, group_name, data):
"""
Simple function to write dict data to attribues in a group with name
"""
g = h5.create_group(group_name)
for key in data:
g.attrs[key] = data[key]
return g
def write_attrs_nested(h5, name, data):
"""
Recursive routine to write nested dicts to attributes in a group with name 'name'
"""
if type(data) == dict:
g = h5.create_group(name)
for k, v in data.items():
write_attrs_nested(g, k, v)
else:
h5.attrs[name] = data
# --------------------------------
# data fingerprinting
class NpEncoder(json.JSONEncoder):
"""
See: https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable/50916741
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def fingerprint(keyed_data, digest_size=16):
"""
Creates a cryptographic fingerprint from keyed data.
Used JSON dumps to form strings, and the blake2b algorithm to hash.
"""
h = blake2b(digest_size=16)
for key in keyed_data:
val = keyed_data[key]
s = json.dumps(val, sort_keys=True, cls=NpEncoder).encode()
h.update(s)
return h.hexdigest()
# --------------------------------
# nested dict flattening, unflattening
def flatten_dict(dd, sep=':', prefix=''):
"""
Flattens a nested dict into a single dict, with keys concatenated with sep.
Similar to pandas.io.json.json_normalize
Example:
A dict of dicts:
dd = {'a':{'x':1}, 'b':{'d':{'y':3}}}
flatten_dict(dd, prefix='Z')
Returns: {'Z:a:x': 1, 'Z:b:d:y': 3}
"""
return {prefix + sep + k if prefix else k: v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, sep, kk).items()
} if (dd and isinstance(dd, dict)) else {prefix: dd} # handles empty dicts
def unflatten_dict(d, sep=':', prefix=''):
"""
Inverse of flatten_dict. Forms a nested dict.
"""
dd = {}
for kk, vv in d.items():
if kk.startswith(prefix + sep):
kk = kk[len(prefix + sep):]
klist = kk.split(sep)
d1 = dd
for k in klist[0:-1]:
if k not in d1:
d1[k] = {}
d1 = d1[k]
d1[klist[-1]] = vv
return dd
def update_nested_dict(d, settings, verbose=False):
"""
Updates a nested dict with flattened settings
"""
flat_params = flatten_dict(d)
for key, value in settings.items():
if verbose:
if key in flat_params:
logger.info(f'Replacing param {key} with value {value}')
else:
logger.info(f'New param {key} with value {value}')
flat_params[key] = value
new_dict = unflatten_dict(flat_params)
return new_dict
# --------------------------------
# Function manipulation
def get_function(name):
"""
Returns a function from a fully qualified name or global name.
"""
# Check if already a function
if callable(name):
return name
if not isinstance(name, str):
raise ValueError(f'{name} must be callable or a string.')
if name in globals():
if callable(globals()[name]):
f = globals()[name]
else:
raise ValueError(f'global {name} is not callable')
else:
if '.' in name:
# try to import
m_name, f_name = name.rsplit('.', 1)
module = importlib.import_module(m_name)
f = getattr(module, f_name)
else:
raise Exception(f'function {name} does not exist')
return f
def get_function_defaults(f):
"""
Returns a dict of the non-empty POSITIONAL_OR_KEYWORD arguments.
See the `inspect` documentation for detauls.
"""
defaults = {}
for k, v in inspect.signature(f).parameters.items():
if v.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
# print(k, v.default, v.kind)
if v.default != inspect.Parameter.empty:
defaults[k] = v.default
return defaults
def get_n_required_fuction_arguments(f):
"""
Counts the number of required function arguments using the `inspect` module.
"""
n = 0
for k, v in inspect.signature(f).parameters.items():
if v.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
if v.default == inspect.Parameter.empty:
n += 1
return n
# Dummy executor
class DummyExecutor(Executor):
"""
Dummy executor.
From: https://stackoverflow.com/questions/10434593/dummyexecutor-for-pythons-futures
"""
def __init__(self):
self._shutdown = False
self._shutdownLock = Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdownLock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
try:
result = fn(*args, **kwargs)
except BaseException as e:
f.set_exception(e)
else:
f.set_result(result)
return f
def shutdown(self, wait=True):
with self._shutdownLock:
self._shutdown = True
| [
"yaml.dump",
"json.dumps",
"datetime.datetime.utcnow",
"yaml.safe_load",
"hashlib.blake2b",
"os.path.join",
"numpy.full",
"concurrent.futures.Future",
"os.path.abspath",
"os.path.exists",
"threading.Lock",
"inspect.signature",
"json.dump",
"importlib.import_module",
"datetime.date.today"... | [((720, 747), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (737, 747), False, 'import logging\n'), ((5269, 5293), 'os.path.expandvars', 'os.path.expandvars', (['path'], {}), '(path)\n', (5287, 5293), False, 'import os\n'), ((5302, 5320), 'os.path.abspath', 'os.path.abspath', (['p'], {}), '(p)\n', (5317, 5320), False, 'import os\n'), ((8728, 8751), 'hashlib.blake2b', 'blake2b', ([], {'digest_size': '(16)'}), '(digest_size=16)\n', (8735, 8751), False, 'from hashlib import blake2b\n'), ((1129, 1151), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (1143, 1151), False, 'import os\n'), ((2753, 2814), 'json.dump', 'json.dump', (['vocs_dict', 'outfile'], {'ensure_ascii': '(True)', 'indent': '""" """'}), "(vocs_dict, outfile, ensure_ascii=True, indent=' ')\n", (2762, 2814), False, 'import json\n'), ((2990, 3002), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2999, 3002), False, 'import json\n'), ((3405, 3423), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3421, 3423), True, 'import numpy as np\n'), ((4362, 4381), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (4378, 4381), True, 'import numpy as np\n'), ((5358, 5375), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (5372, 5375), False, 'import os\n'), ((7238, 7262), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (7250, 7262), False, 'import os\n'), ((7274, 7294), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (7288, 7294), False, 'import os\n'), ((12448, 12454), 'threading.Lock', 'Lock', ([], {}), '()\n', (12452, 12454), False, 'from threading import Lock\n'), ((1628, 1650), 'yaml.safe_load', 'yaml.safe_load', (['source'], {}), '(source)\n', (1642, 1650), False, 'import yaml\n'), ((1952, 2017), 'json.dump', 'json.dump', (['data', 'f'], {'ensure_ascii': '(True)', 'indent': '""" """', 'cls': 'NpEncoder'}), "(data, f, ensure_ascii=True, indent=' ', cls=NpEncoder)\n", (1961, 2017), False, 'import json\n'), ((4592, 4605), 'numpy.full', 'np.full', (['n', 'v'], {}), '(n, v)\n', (4599, 4605), True, 'import numpy as np\n'), ((4819, 4842), 'numpy.full', 'np.full', (['n', 'settings[v]'], {}), '(n, settings[v])\n', (4826, 4842), True, 'import numpy as np\n'), ((6602, 6622), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (6616, 6622), False, 'import os\n'), ((11192, 11223), 'importlib.import_module', 'importlib.import_module', (['m_name'], {}), '(m_name)\n', (11215, 11223), False, 'import importlib\n'), ((12661, 12669), 'concurrent.futures.Future', 'Future', ([], {}), '()\n', (12667, 12669), False, 'from concurrent.futures import Future, Executor\n'), ((2211, 2271), 'yaml.dump', 'yaml.dump', (['data', 'f'], {'default_flow_style': 'None', 'sort_keys': '(False)'}), '(data, f, default_flow_style=None, sort_keys=False)\n', (2220, 2271), False, 'import yaml\n'), ((7190, 7202), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7200, 7202), False, 'from datetime import date\n'), ((8821, 8867), 'json.dumps', 'json.dumps', (['val'], {'sort_keys': '(True)', 'cls': 'NpEncoder'}), '(val, sort_keys=True, cls=NpEncoder)\n', (8831, 8867), False, 'import json\n'), ((11560, 11580), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (11577, 11580), False, 'import inspect\n'), ((11984, 12004), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (12001, 12004), False, 'import inspect\n'), ((578, 604), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (602, 604), False, 'import datetime\n')] |
'''
Pack64 is a vector encoding using a kind-of-floating-point, kind-of-base64
representation requiring only 3 bytes per vector entry. This Python module
provides functions for encoding and decoding pack64 vectors.
'''
__all__ = ['pack64', 'unpack64']
import math
import numpy as np
# CHARS is a bytestring of the 64 characters in the encoding, in order.
CHARS = b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
# Make lookup tables for those characters.
# DIGIT_TO_CHAR maps the numbers 0-63 to those characters.
DIGIT_TO_CHAR = np.frombuffer(CHARS, dtype=np.uint8)
# CHAR_TO_DIGIT maps byte values of those characters to the numbers 0-63.
# Other byte values are mapped to -1, and do not appear in VALID_CHARS.
CHAR_TO_DIGIT = np.full((128,), -1, dtype=np.int)
CHAR_TO_DIGIT[DIGIT_TO_CHAR] = np.arange(64)
VALID_CHARS = set(CHARS)
# Compute a small value that we use in determining what the exponent should
# be (more on this in a moment):
EPSILON = (2.0 ** 17 - 0.5) * 2.0 ** -40
# EPSILON is used to determine the biased exponent emitted by pack64(). The
# largest integer part that can be emitted is (2 ** 17 - 1), and hence the
# largest number that can be emitted with a biased exponent of zero is
# (2 ** 17 - 1) * (2 ** -40). However, numbers slightly larger than this still
# round down to this when encoded. The smallest positive number that (rounded)
# requires a biased exponent of 1 is EPSILON. By extension, the smallest
# number that requires a biased exponent of 2 is 2 * EPSILON, and so on.
# Thus, the biased exponent that should be used can be found by applying the
# following rule to the magnitude L of the largest vector entry:
# If L is less than... Set the biased exponent to ...
# EPSILON 0
# 2 * EPSILON 1
# 4 * EPSILON 2
# ... ...
# Or, put another way:
# If L / EPSILON is less than... Set the biased exponent to...
# 1 (i.e. 2 ** 0) 0
# 2 (i.e. 2 ** 1) 1
# 4 (i.e. 2 ** 2) 2
# ... ...
# So the biased exponent should be the smallest nonnegative integer e such that
# L / EPSILON == m * (2 ** e)
# with m < 1. This is exactly the computation provided by math.frexp(),
# except that we have to handle "nonnegative" ourselves.
# Because we do not check directly that the packed values fit in 18 bits, a
# rounding error in this calculation could cause a large error in the result.
# Fortunately, the values of M where we need to transition between exponents
# are all power-of-two multiples of EPSILON, which are represented exactly and
# produce exact results when divided by EPSILON and passed to frexp().
# Finally, note that this calculation ignores the availablity of 'gAA' for
# -(2 ** 17). Using this string would allow us to encode a small number of
# vectors with greater precision, but it doesn't seem worth the effort.
def pack64(vector):
'''
Encode the given vector, returning a string. Accepts any object that can
be converted to a NumPy float array.
'''
if not len(vector):
return 'A'
vector = np.asarray(vector)
largest_entry = np.max(np.abs(vector))
if not np.isfinite(largest_entry):
raise ValueError('Vector contains an invalid value.')
biased_exponent = max(math.frexp(float(largest_entry) / EPSILON)[1], 0)
if biased_exponent > 63:
raise OverflowError('Vector has an entry too large to encode.')
values = np.round(vector * 0.5 ** (biased_exponent - 40)).astype(np.int)
digits = np.empty((3 * len(values) + 1,), dtype=np.int)
digits[0] = biased_exponent
digits[1::3] = values >> 12
digits[2::3] = values >> 6
digits[3::3] = values
digits &= 63
return DIGIT_TO_CHAR[digits].tobytes().decode('ascii')
def unpack64(string, check=True):
'''
Decode the given string, returning a NumPy array of dtype float32.
Optionally pass check=False to disable input validation, for circumstances
where you are sure the input is a properly packed vector.
'''
if isinstance(string, bytes):
bstring = string
else:
bstring = string.encode('ascii')
if check and (len(bstring) % 3 != 1 or not VALID_CHARS.issuperset(bstring)):
raise ValueError('Cannot decode string %r' % string)
digits = CHAR_TO_DIGIT[np.frombuffer(bstring, dtype=np.uint8)]
values = (digits[1::3] << 12) + (digits[2::3] << 6) + digits[3::3]
values -= (values >> 17) << 18
return values.astype(np.float32) * 2.0 ** (digits[0] - 40)
| [
"numpy.full",
"numpy.abs",
"numpy.frombuffer",
"numpy.asarray",
"numpy.isfinite",
"numpy.arange",
"numpy.round"
] | [((555, 591), 'numpy.frombuffer', 'np.frombuffer', (['CHARS'], {'dtype': 'np.uint8'}), '(CHARS, dtype=np.uint8)\n', (568, 591), True, 'import numpy as np\n'), ((755, 788), 'numpy.full', 'np.full', (['(128,)', '(-1)'], {'dtype': 'np.int'}), '((128,), -1, dtype=np.int)\n', (762, 788), True, 'import numpy as np\n'), ((820, 833), 'numpy.arange', 'np.arange', (['(64)'], {}), '(64)\n', (829, 833), True, 'import numpy as np\n'), ((3227, 3245), 'numpy.asarray', 'np.asarray', (['vector'], {}), '(vector)\n', (3237, 3245), True, 'import numpy as np\n'), ((3274, 3288), 'numpy.abs', 'np.abs', (['vector'], {}), '(vector)\n', (3280, 3288), True, 'import numpy as np\n'), ((3301, 3327), 'numpy.isfinite', 'np.isfinite', (['largest_entry'], {}), '(largest_entry)\n', (3312, 3327), True, 'import numpy as np\n'), ((4447, 4485), 'numpy.frombuffer', 'np.frombuffer', (['bstring'], {'dtype': 'np.uint8'}), '(bstring, dtype=np.uint8)\n', (4460, 4485), True, 'import numpy as np\n'), ((3582, 3630), 'numpy.round', 'np.round', (['(vector * 0.5 ** (biased_exponent - 40))'], {}), '(vector * 0.5 ** (biased_exponent - 40))\n', (3590, 3630), True, 'import numpy as np\n')] |
import sys
sys.path.append('../')
from model import StyledGenerator, Discriminator
import torch
import numpy as np
generator = StyledGenerator(flame_dim=159,
all_stage_discrim=False,
embedding_vocab_size=70_000,
rendered_flame_ascondition=False,
inst_norm=True,
normal_maps_as_cond=True,
core_tensor_res=4,
use_styled_conv_stylegan2=True,
n_mlp=8)
# set all weights to 1s
mdl_state = generator.state_dict()
torch.manual_seed(2)
# tot_params = 0
# for name in mdl_state:
# if name.find('z_to_w') >= 0 or name.find('generator') >= 0 and name.find('embd') < 0 and \
# name.find('to_rgb.8') < 0 and name.find('to_rgb.7') < 0 and name.find('progression.8') < 0 \
# and name.find('progression.7') < 0:
# print(name)
# mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
# tot_params += np.prod(mdl_state[name].shape)
# else:
# mdl_state[name] = mdl_state[name] * 0 + 6e-3
#
# print(f'Total set params are: {tot_params}')
tot_params = 0
for name in mdl_state:
if name.find('z_to_w') >= 0:
print(name)
mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
tot_params += np.prod(mdl_state[name].shape)
else:
mdl_state[name] = mdl_state[name] * 0 + 6e-3
print(f'Total set params are: {tot_params} \n\n\n\n\n')
tot_params = 0
for i in range(7):
for name in mdl_state:
if name.find(f'progression.{i}.') >= 0:
mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
tot_params += np.prod(mdl_state[name].shape)
print(f'{name} : {mdl_state[name].shape}; params this layer: {np.prod(mdl_state[name].shape)}')
# else:
# mdl_state[name] = mdl_state[name] * 0 + 6e-3
print(f'Total set params are: {tot_params} \n\n\n\n\n')
tot_params = 0
for i in range(7):
for name in mdl_state:
if name.find(f'to_rgb.{i}') >= 0:
mdl_state[name] = mdl_state[name] * 0 + torch.randn(mdl_state[name].shape)
tot_params += np.prod(mdl_state[name].shape)
print(f'{name} : {mdl_state[name].shape}; params this layer: {np.prod(mdl_state[name].shape)}')
# else:
# mdl_state[name] = mdl_state[name] * 0 + 6e-3
print(f'Total set params are: {tot_params} \n\n\n\n\n')
generator.load_state_dict(mdl_state)
input_indices = torch.zeros((1, ), dtype=torch.long)
flm_rndr = torch.zeros((1, 3, 4, 4))
torch.manual_seed(2)
forward_pass_gen = generator(flm_rndr, pose=None, step=6, alpha=1, input_indices=input_indices)
print(forward_pass_gen)
print(forward_pass_gen[0].shape)
# for param in generator.parameters():
# print(param) | [
"sys.path.append",
"torch.manual_seed",
"torch.randn",
"torch.zeros",
"model.StyledGenerator",
"numpy.prod"
] | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((128, 360), 'model.StyledGenerator', 'StyledGenerator', ([], {'flame_dim': '(159)', 'all_stage_discrim': '(False)', 'embedding_vocab_size': '(70000)', 'rendered_flame_ascondition': '(False)', 'inst_norm': '(True)', 'normal_maps_as_cond': '(True)', 'core_tensor_res': '(4)', 'use_styled_conv_stylegan2': '(True)', 'n_mlp': '(8)'}), '(flame_dim=159, all_stage_discrim=False,\n embedding_vocab_size=70000, rendered_flame_ascondition=False, inst_norm\n =True, normal_maps_as_cond=True, core_tensor_res=4,\n use_styled_conv_stylegan2=True, n_mlp=8)\n', (143, 360), False, 'from model import StyledGenerator, Discriminator\n'), ((634, 654), 'torch.manual_seed', 'torch.manual_seed', (['(2)'], {}), '(2)\n', (651, 654), False, 'import torch\n'), ((2602, 2637), 'torch.zeros', 'torch.zeros', (['(1,)'], {'dtype': 'torch.long'}), '((1,), dtype=torch.long)\n', (2613, 2637), False, 'import torch\n'), ((2650, 2675), 'torch.zeros', 'torch.zeros', (['(1, 3, 4, 4)'], {}), '((1, 3, 4, 4))\n', (2661, 2675), False, 'import torch\n'), ((2677, 2697), 'torch.manual_seed', 'torch.manual_seed', (['(2)'], {}), '(2)\n', (2694, 2697), False, 'import torch\n'), ((1422, 1452), 'numpy.prod', 'np.prod', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (1429, 1452), True, 'import numpy as np\n'), ((1365, 1399), 'torch.randn', 'torch.randn', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (1376, 1399), False, 'import torch\n'), ((1796, 1826), 'numpy.prod', 'np.prod', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (1803, 1826), True, 'import numpy as np\n'), ((2276, 2306), 'numpy.prod', 'np.prod', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (2283, 2306), True, 'import numpy as np\n'), ((1735, 1769), 'torch.randn', 'torch.randn', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (1746, 1769), False, 'import torch\n'), ((2215, 2249), 'torch.randn', 'torch.randn', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (2226, 2249), False, 'import torch\n'), ((1901, 1931), 'numpy.prod', 'np.prod', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (1908, 1931), True, 'import numpy as np\n'), ((2381, 2411), 'numpy.prod', 'np.prod', (['mdl_state[name].shape'], {}), '(mdl_state[name].shape)\n', (2388, 2411), True, 'import numpy as np\n')] |
import numpy as np
import torch
import time
import tqdm
import datetime
# from torchvision.utils import make_grid
from pkg_resources import parse_version
from base import BasePCTrainer
from torch.nn.modules.batchnorm import _BatchNorm
from pointcloud_utils.pointcloud_vis import *
from pointcloud_utils.iou_metric import *
from pointcloud_utils.iou_metric import PointCloudIOU
import os
from pointcloud_utils import pointcloud_vis
def my_timer(prev_tic, name, timings):
timings[name] = time.time() - prev_tic
tic = time.time()
return timings, tic
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class PCTrainer(BasePCTrainer):
"""
Trainer class
Note:
Inherited from BasePCTrainer.
"""
def __init__(
self,
model,
loss,
metrics,
optimizer,
resume,
config,
data_loader,
valid_data_loader=None,
lr_scheduler=None,
mini_train=False,
check_bn_working=False,
**kwargs,
):
super().__init__(model, loss, metrics, optimizer, resume, config)
self.config = config
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = max(1, int(len(self.data_loader) / 5.))
self.mini_train = mini_train
self.check_bn_working = check_bn_working
self.loss_args = config.get('loss_args', {})
print('Loss args', self.loss_args)
assert self.lr_scheduler.optimizer is self.optimizer
assert self.start_epoch >= 1
"""
inherited from the codebase
"""
if self.start_epoch != 1:
self.lr_scheduler.step(self.start_epoch - 2)
if parse_version(torch.__version__) <= parse_version("1.0.0"):
assert self.lr_scheduler.last_epoch == self.start_epoch - 2
# Handle segmentation metrics separately, since the accumulation cannot be
# done directly via an AverageMeter
self.log_miou = self.config["trainer"].get("log_miou", False)
def _eval_metrics(self, output, target):
acc_metrics = np.zeros(len(self.metrics))
for i, metric in enumerate(self.metrics):
acc_metrics[i] += metric(
output,
target,
self.data_loader.dataset,
self.config
)
self.writer.add_scalar(f'{metric.__name__}', acc_metrics[i])
return acc_metrics
def _visualize_examples(self):
""" visualization
batch_vis_idx: batch idx to be visualized; output
"""
batch_vis_idx = None
if self.config.get('visualize', False):
num_vis = self.config['visualize']['num_examples']
batch_vis_idx = np.random.choice(len(self.data_loader.dataset)-3, num_vis, replace=False)
batch_vis_idx = np.concatenate([batch_vis_idx+3, [0, 1, 2]]) # avoid replacement
print("batch_vis_idx: ", batch_vis_idx)
return batch_vis_idx
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
self.model.train()
train_tic = time.time()
avg_loss = AverageMeter()
avg_total_loss = AverageMeter()
avg_perm_loss = AverageMeter()
avg_matchsize = AverageMeter()
total_metrics = [AverageMeter() for a in range(len(self.metrics))]
seen_tic = time.time()
seen = 0
profile = self.config["profile"]
total_batches = len(self.data_loader)
""" visualization """
pc_visualize = False
fname_vis = None
batch_vis_idx = self._visualize_examples()
if profile:
batch_tic = time.time()
## loop
for batch_idx, batch in enumerate(self.data_loader):
data, meta = batch["data"], batch["meta"]
data_indices = meta["index"]
print(f"index: {data_indices}")
data = data.to(self.device)
seen_batch = data.shape[0]
if batch_vis_idx is not None and np.sum(batch_vis_idx == batch_idx) == 1:
pc_visualize = True
## check this?
seen += seen_batch//2
##
self.optimizer.zero_grad()
with torch.autograd.set_detect_anomaly(True):
output = self.model(data)
## visualize the correspondence matrix as image
if pc_visualize is True:
parent_dir = self.config.result_dir
fname_vis = "vis_example_"+str(epoch)+"_"+str(batch_idx)+"_"
fname_vis = os.path.join(parent_dir, fname_vis)
## compute loss
output_loss, output_info = self.loss(output, meta, epoch, fname_vis=fname_vis)
for name, iter_loss in output_loss.items():
print(name, iter_loss)
for name, iter_info in output_info.items():
print(name, iter_info)
##
total_loss = output_loss['total_loss']
loss = output_loss['cycle_loss']
pc_visualize = False
fname_vis = None
## backward pass
total_loss.backward()
self.optimizer.step()
avg_loss.update(loss.item(), data.size(0))
avg_total_loss.update(total_loss.item(), data.size(0))
avg_perm_loss.update(output_loss['perm_loss'].item(), data.size(0))
avg_matchsize.update(output_info['correct_match'], data.size(0))
for i, m in enumerate(self._eval_metrics(output, meta)):
total_metrics[i].update(m, data.size(0))
if self.verbosity >= 2 and batch_idx % self.log_step == 0:
toc = time.time() - seen_tic
rate = max(seen / toc, 1E-5)
tic = time.time()
msg = "Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} "
msg += "Hz: {:.2f}, ETA: {}"
batches_left = total_batches - batch_idx
remaining = batches_left * self.data_loader.batch_size / rate
eta_str = str(datetime.timedelta(seconds=remaining))
self.logger.info(
msg.format(
epoch,
batch_idx * self.data_loader.batch_size,
len(self.data_loader.dataset),
100.0 * batch_idx / len(self.data_loader),
loss.item(),
rate,
eta_str
)
)
self.logger.info(f"index: {data_indices}")
del data
del loss
del output
del total_loss
output_loss.clear()
output_info.clear()
torch.cuda.empty_cache()
if profile:
timings["minibatch"] = time.time() - batch_tic
batch_tic = time.time()
print("==============")
for key in timings:
ratio = 100 * timings[key] / timings["minibatch"]
msg = "{:.3f} ({:.2f}%) >>> {}"
print(msg.format(timings[key], ratio, key))
print("==============")
if self.mini_train and batch_idx > 3:
self.logger.info("Mini training: exiting epoch early...")
break
log = {'loss': avg_loss.avg, 'metrics': [a.avg for a in total_metrics]}
log = {**log,
'total_loss': avg_total_loss.avg,
'perm_loss': avg_perm_loss.avg,
'match_size': avg_matchsize.avg,
}
duration = time.strftime('%Hh%Mm%Ss', time.gmtime(time.time() - train_tic))
print(f"training epoch took {duration}")
if self.lr_scheduler is not None:
self.lr_scheduler.step(epoch - 1)
return log
def _segmentation_valid_epoch(self, epoch):
self.logger.info(f"Running segmentation validation for epoch {epoch}")
# input()
self.model.eval()
mean_shape_IOU_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
n_cls = self.config["dataset"]["num_lbl_classes"]
n_pts = self.config["dataset"]["args"]["num_points"]
print("n_cls: ", n_cls)
iou_eval = PointCloudIOU(n_cls)
seed = 0
torch.manual_seed(seed)
with torch.no_grad():
"""
select K reference models as a batch
compute their features
process to desired shape
"""
# valdiation time
tic = time.time()
K = self.config["evaluation"].get("num_refs", 4)
temperature = self.config["evaluation"].get("temperature")
early_stop_num = 15
rand_indices = torch.randperm(self.valid_data_loader.dataset.size())[0:K]
self.logger.info(f"auxiliary shape indices: {rand_indices}")
aux_models = self.valid_data_loader.dataset.get_data_with_indices(rand_indices)
aux_data, aux_meta = aux_models["data"], aux_models["meta"]
aux_lbls = aux_meta["label"]
# compute features
aux_data = aux_data.to(self.device)
aux_feat = self.model(aux_data) # model outputs descriptors(desc)
Ba, N, C = aux_feat.shape
aux_data = aux_data.view(Ba*N, -1)
aux_feat = aux_feat.contiguous().view(Ba*N, -1)
aux_lbls = aux_lbls.view(Ba*N, -1)
# # make data_aux
def _compute_vis_offset(num_instance, layout="cyclic", distance=1.0, extra_offset=[0.0, 0.0, 0.0]):
distance = float(distance)
if layout is "cyclic":
rads = np.linspace(0.0, 2.0*np.pi, Ba, endpoint=False)
# print("rads.shape: ", rads.shape)
multiplier = np.zeros((Ba, 3))
cos_rads, sin_rads = np.cos(rads), np.sin(rads)
multiplier[:, 1]=sin_rads
multiplier[:, 2]=cos_rads
else:
raise "no other layout type is supported for now"
aux_offsets = torch.from_numpy(multiplier)*distance
aux_offsets = aux_offsets.type(torch.FloatTensor)
aux_offsets -= torch.tensor(extra_offset)
return aux_offsets
aux_offsets = _compute_vis_offset(num_instance=Ba, layout="cyclic", distance=2.5, extra_offset=[0.5, 0.0, 0.0])
xyzl_as = aux_data.cpu()
xyzl_as = xyzl_as.view(Ba, N, -1)
xyzl_as[:,:,:3] = xyzl_as[:,:,:3] + aux_offsets[:, None, :]
xyzl_as = xyzl_as.view(Ba*N, -1)
vis_data_aux = {"xyz": xyzl_as, "gt_lbls": aux_lbls}
# vis_data_aux = {"xyz": xyzl_as}
"""
evaluation
"""
mean_shape_IoU_list = []
# mean_IoU_dataset = torch.tensor([0.0])
# cnt = 0
# vis_idx = rand_indices = torch.randperm(dataset.size())[100:200]
print("load validation data: ", time.time() - tic)
tic = time.time()
for i, batch in enumerate(self.valid_data_loader):
# for i, batch in enumerate(tqdm(self.valid_data_loader, ncols=80)):
if (self.config["evaluation"].get("mini_eval", False) and i == early_stop_num):
print("Early stop as mini_eval is used")
break
data, meta = batch["data"], batch["meta"]
gt_lbls = meta["label"]
data = data.to(self.device)
feat = self.model(data) # model outputs descriptors(desc)
assert feat.shape[0] == 1
feat = feat.squeeze(0)
gt_lbls = gt_lbls.squeeze(0)
xyz = data.squeeze(1).squeeze(0).cpu()
## pack to correspondence
pred_lbls, perm_1a = compute_correspondence_from_a_to_1_no_batch(
feat, aux_feat, aux_lbls, n_cls, temperature=temperature)
mean_shape_IoU, area_intersection, area_union = iou_eval.get_mean_IoU(pred_lbls, gt_lbls)
mean_shape_IoU_list.append(mean_shape_IoU)
mean_shape_IOU_meter.update(mean_shape_IoU.item(), data.size(0))
intersection_meter.update(area_intersection, data.size(0))
union_meter.update(area_union, data.size(0))
if mean_shape_IOU_meter.count % 50 == 0:
print(f"after {mean_shape_IOU_meter.count} shapes: shape mean_IoU = {mean_shape_IOU_meter.avg}")
print(f"after {intersection_meter.count} shapes: category mean_IoU = {(intersection_meter.avg/union_meter.avg).mean()}")
if mean_shape_IOU_meter.count % 10 == 0:
vis_data_query = {"xyz": xyz, "gt_lbls": gt_lbls, "pred_lbls": pred_lbls}
parent_dir = os.path.join(self.config.result_dir, "segmentation")
if not os.path.exists(parent_dir):
os.mkdir(parent_dir)
fname_vis = f"rnd_seed{seed}-shape{i}.obj"
fname_vis = os.path.join(parent_dir, fname_vis)
idx = torch.arange(0, perm_1a.shape[0])
pair = torch.stack((perm_1a.cpu(),idx), dim=0)
pair = pair.permute(1,0)
write_paired_pointclouds_obj(fname_vis, vis_data_aux, pair, vis_data_query)
print(f"check validation data {early_stop_num}: {time.time() - tic}")
print(f"Segmentation Validation:\n --shape mean_IoU = {mean_shape_IOU_meter.avg} ")
print(f"--category mean_IoU = {(intersection_meter.avg/union_meter.avg).mean()} with {intersection_meter.count} shapes")
val_log = {"shape mean_IoUe": mean_shape_IOU_meter.avg}
val_log = {"category mean_IoU": (intersection_meter.avg/union_meter.avg).mean()}
return val_log
def _correspondence_valid_epoch(self, epoch):
self.logger.info(f"Running correspondence validation for epoch {epoch}")
self.model.eval()
match_rate_meter = AverageMeter()
n_pts = self.config["dataset"]["args"]["num_points"]
seed = 0
torch.manual_seed(seed)
early_stop_num = 100
with torch.no_grad():
tic = time.time()
for i, batch in enumerate(self.valid_data_loader):
if (self.config["evaluation"].get("mini_eval", False) and i == early_stop_num):
print("Early stop as mini_eval is used")
break
## load
data, meta = batch["data"], batch["meta"]
gt_lbls = meta["label"]
data = data.to(self.device)
feat = self.model(data) # model outputs descriptors(desc)
## compute loss
output_loss, output_info = self.loss(feat, meta, epoch, fname_vis=fname_vis)
for name, iter_loss in output_loss.items():
print(name, iter_loss)
for name, iter_info in output_info.items():
print(name, iter_info)
total_loss = output_loss['total_loss']
loss=output_loss['cycle_loss']
val_log = {**output_loss, **output_info}
return val_log
| [
"os.mkdir",
"numpy.sum",
"numpy.sin",
"torch.autograd.set_detect_anomaly",
"torch.arange",
"torch.no_grad",
"os.path.join",
"os.path.exists",
"datetime.timedelta",
"numpy.linspace",
"pointcloud_utils.iou_metric.PointCloudIOU",
"torch.manual_seed",
"numpy.cos",
"numpy.concatenate",
"torch... | [((525, 536), 'time.time', 'time.time', ([], {}), '()\n', (534, 536), False, 'import time\n'), ((492, 503), 'time.time', 'time.time', ([], {}), '()\n', (501, 503), False, 'import time\n'), ((4117, 4128), 'time.time', 'time.time', ([], {}), '()\n', (4126, 4128), False, 'import time\n'), ((4377, 4388), 'time.time', 'time.time', ([], {}), '()\n', (4386, 4388), False, 'import time\n'), ((9564, 9584), 'pointcloud_utils.iou_metric.PointCloudIOU', 'PointCloudIOU', (['n_cls'], {}), '(n_cls)\n', (9577, 9584), False, 'from pointcloud_utils.iou_metric import PointCloudIOU\n'), ((9611, 9634), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (9628, 9634), False, 'import torch\n'), ((15659, 15682), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (15676, 15682), False, 'import torch\n'), ((2223, 2255), 'pkg_resources.parse_version', 'parse_version', (['torch.__version__'], {}), '(torch.__version__)\n', (2236, 2255), False, 'from pkg_resources import parse_version\n'), ((2259, 2281), 'pkg_resources.parse_version', 'parse_version', (['"""1.0.0"""'], {}), "('1.0.0')\n", (2272, 2281), False, 'from pkg_resources import parse_version\n'), ((3372, 3418), 'numpy.concatenate', 'np.concatenate', (['[batch_vis_idx + 3, [0, 1, 2]]'], {}), '([batch_vis_idx + 3, [0, 1, 2]])\n', (3386, 3418), True, 'import numpy as np\n'), ((4674, 4685), 'time.time', 'time.time', ([], {}), '()\n', (4683, 4685), False, 'import time\n'), ((7970, 7994), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7992, 7994), False, 'import torch\n'), ((9648, 9663), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9661, 9663), False, 'import torch\n'), ((9866, 9877), 'time.time', 'time.time', ([], {}), '()\n', (9875, 9877), False, 'import time\n'), ((12433, 12444), 'time.time', 'time.time', ([], {}), '()\n', (12442, 12444), False, 'import time\n'), ((15725, 15740), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15738, 15740), False, 'import torch\n'), ((15760, 15771), 'time.time', 'time.time', ([], {}), '()\n', (15769, 15771), False, 'import time\n'), ((5278, 5317), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (5311, 5317), False, 'import torch\n'), ((6986, 6997), 'time.time', 'time.time', ([], {}), '()\n', (6995, 6997), False, 'import time\n'), ((8123, 8134), 'time.time', 'time.time', ([], {}), '()\n', (8132, 8134), False, 'import time\n'), ((11606, 11632), 'torch.tensor', 'torch.tensor', (['extra_offset'], {}), '(extra_offset)\n', (11618, 11632), False, 'import torch\n'), ((5066, 5100), 'numpy.sum', 'np.sum', (['(batch_vis_idx == batch_idx)'], {}), '(batch_vis_idx == batch_idx)\n', (5072, 5100), True, 'import numpy as np\n'), ((5668, 5703), 'os.path.join', 'os.path.join', (['parent_dir', 'fname_vis'], {}), '(parent_dir, fname_vis)\n', (5680, 5703), False, 'import os\n'), ((6896, 6907), 'time.time', 'time.time', ([], {}), '()\n', (6905, 6907), False, 'import time\n'), ((7280, 7317), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'remaining'}), '(seconds=remaining)\n', (7298, 7317), False, 'import datetime\n'), ((8071, 8082), 'time.time', 'time.time', ([], {}), '()\n', (8080, 8082), False, 'import time\n'), ((8904, 8915), 'time.time', 'time.time', ([], {}), '()\n', (8913, 8915), False, 'import time\n'), ((11034, 11083), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0 * np.pi)', 'Ba'], {'endpoint': '(False)'}), '(0.0, 2.0 * np.pi, Ba, endpoint=False)\n', (11045, 11083), True, 'import numpy as np\n'), ((11171, 11188), 'numpy.zeros', 'np.zeros', (['(Ba, 3)'], {}), '((Ba, 3))\n', (11179, 11188), True, 'import numpy as np\n'), ((11471, 11499), 'torch.from_numpy', 'torch.from_numpy', (['multiplier'], {}), '(multiplier)\n', (11487, 11499), False, 'import torch\n'), ((12396, 12407), 'time.time', 'time.time', ([], {}), '()\n', (12405, 12407), False, 'import time\n'), ((14324, 14376), 'os.path.join', 'os.path.join', (['self.config.result_dir', '"""segmentation"""'], {}), "(self.config.result_dir, 'segmentation')\n", (14336, 14376), False, 'import os\n'), ((14572, 14607), 'os.path.join', 'os.path.join', (['parent_dir', 'fname_vis'], {}), '(parent_dir, fname_vis)\n', (14584, 14607), False, 'import os\n'), ((14635, 14668), 'torch.arange', 'torch.arange', (['(0)', 'perm_1a.shape[0]'], {}), '(0, perm_1a.shape[0])\n', (14647, 14668), False, 'import torch\n'), ((11230, 11242), 'numpy.cos', 'np.cos', (['rads'], {}), '(rads)\n', (11236, 11242), True, 'import numpy as np\n'), ((11244, 11256), 'numpy.sin', 'np.sin', (['rads'], {}), '(rads)\n', (11250, 11256), True, 'import numpy as np\n'), ((14404, 14430), 'os.path.exists', 'os.path.exists', (['parent_dir'], {}), '(parent_dir)\n', (14418, 14430), False, 'import os\n'), ((14456, 14476), 'os.mkdir', 'os.mkdir', (['parent_dir'], {}), '(parent_dir)\n', (14464, 14476), False, 'import os\n'), ((14939, 14950), 'time.time', 'time.time', ([], {}), '()\n', (14948, 14950), False, 'import time\n')] |
"""
Bar plots
==========
An example of bar plots with matplotlib.
"""
import numpy as np
import matplotlib.pyplot as plt
n = 12
X = np.arange(n)
Y1 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)
Y2 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)
plt.axes([0.025, 0.025, 0.95, 0.95])
plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')
plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x, y in zip(X, Y1):
plt.text(x + 0.4, y + 0.05, '%.2f' % y, ha='center', va= 'bottom')
for x, y in zip(X, Y2):
plt.text(x + 0.4, -y - 0.05, '%.2f' % y, ha='center', va= 'top')
plt.xlim(-.5, n)
plt.xticks(())
plt.ylim(-1.25, 1.25)
plt.yticks(())
plt.show()
| [
"matplotlib.pyplot.xlim",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.text",
"numpy.arange",
"matplotlib.pyplot.xticks"
] | [((135, 147), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (144, 147), True, 'import numpy as np\n'), ((263, 299), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.025, 0.025, 0.95, 0.95]'], {}), '([0.025, 0.025, 0.95, 0.95])\n', (271, 299), True, 'import matplotlib.pyplot as plt\n'), ((300, 355), 'matplotlib.pyplot.bar', 'plt.bar', (['X', '(+Y1)'], {'facecolor': '"""#9999ff"""', 'edgecolor': '"""white"""'}), "(X, +Y1, facecolor='#9999ff', edgecolor='white')\n", (307, 355), True, 'import matplotlib.pyplot as plt\n'), ((356, 411), 'matplotlib.pyplot.bar', 'plt.bar', (['X', '(-Y2)'], {'facecolor': '"""#ff9999"""', 'edgecolor': '"""white"""'}), "(X, -Y2, facecolor='#ff9999', edgecolor='white')\n", (363, 411), True, 'import matplotlib.pyplot as plt\n'), ((603, 620), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', 'n'], {}), '(-0.5, n)\n', (611, 620), True, 'import matplotlib.pyplot as plt\n'), ((620, 634), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (630, 634), True, 'import matplotlib.pyplot as plt\n'), ((635, 656), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.25)', '(1.25)'], {}), '(-1.25, 1.25)\n', (643, 656), True, 'import matplotlib.pyplot as plt\n'), ((657, 671), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (667, 671), True, 'import matplotlib.pyplot as plt\n'), ((673, 683), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (681, 683), True, 'import matplotlib.pyplot as plt\n'), ((174, 204), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.0)', 'n'], {}), '(0.5, 1.0, n)\n', (191, 204), True, 'import numpy as np\n'), ((231, 261), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.0)', 'n'], {}), '(0.5, 1.0, n)\n', (248, 261), True, 'import numpy as np\n'), ((441, 506), 'matplotlib.pyplot.text', 'plt.text', (['(x + 0.4)', '(y + 0.05)', "('%.2f' % y)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x + 0.4, y + 0.05, '%.2f' % y, ha='center', va='bottom')\n", (449, 506), True, 'import matplotlib.pyplot as plt\n'), ((537, 600), 'matplotlib.pyplot.text', 'plt.text', (['(x + 0.4)', '(-y - 0.05)', "('%.2f' % y)"], {'ha': '"""center"""', 'va': '"""top"""'}), "(x + 0.4, -y - 0.05, '%.2f' % y, ha='center', va='top')\n", (545, 600), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import scipy as sp
import scipy.io
import scipy.signal
np.random.seed(4)
abs_val, phase_val = [sp.rand(13, 20) for _ in range(2)]
phase_val *= 2 * np.pi
shift = (2, 3)
for img in (abs_val, phase_val):
for ax in range(2):
img[:] = sp.signal.resample(img, int(img.shape[ax] * 1.5), axis=ax)
cplx = dest * np.exp(1j * np.pi * 2 * dest2)
first = cplx[shift[0]:, shift[1]:]
second = cplx[:-shift[0], :-shift[1]]
sp.io.savemat("first.mat", dict(rca=first))
sp.io.savemat("first2.mat", dict(rca=second))
| [
"scipy.rand",
"numpy.random.seed",
"numpy.exp"
] | [((77, 94), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (91, 94), True, 'import numpy as np\n'), ((117, 132), 'scipy.rand', 'sp.rand', (['(13)', '(20)'], {}), '(13, 20)\n', (124, 132), True, 'import scipy as sp\n'), ((339, 371), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * 2 * dest2)'], {}), '(1.0j * np.pi * 2 * dest2)\n', (345, 371), True, 'import numpy as np\n')] |
from tkinter import *
import numpy as np
import json
from sklearn.cluster import KMeans
from itertools import count
from tqdm import tqdm
from PIL import Image, ImageDraw, ImageFilter, ImageEnhance
import os
from multiprocessing import Pool
import traceback
import math
import json
import csv
from config import *
import random
import webp
def create_tile(depth, x, y):
tile_file_name = TILE_FILE_FORMAT.format('', depth + DEPTH_OFFSET, x, y)
tile_file_name_hq = TILE_FILE_FORMAT.format('@2x', depth + DEPTH_OFFSET, x, y)
if os.path.exists(tile_file_name) and (not CREATE_HQ_TILES or os.path.exists(CREATE_HQ_TILES)):
return
tile = Image.new("RGB", (TILE_SIZE, TILE_SIZE), (255, 255, 255))
tile_hq = Image.new("RGB", (TILE_SIZE * 2, TILE_SIZE * 2), (255, 255, 255))
is_empty = True
if depth < TILE_DEPTH:
for a, b in ((0, 0), (0, 1), (1, 0), (1, 1)):
old_tile_file_name = TILE_FILE_FORMAT.format('', depth + 1 + DEPTH_OFFSET, x * 2 + a, y * 2 + b)
if os.path.exists(old_tile_file_name):
image = Image.open(old_tile_file_name)
image = image.resize((TILE_SIZE // 2, TILE_SIZE // 2), resample=Image.BICUBIC)
tile.paste(image, (a * TILE_SIZE // 2, b * TILE_SIZE // 2))
is_empty = False
old_tile_file_name_hq = TILE_FILE_FORMAT.format('@2x', depth + 1 + DEPTH_OFFSET, x * 2 + a, y * 2 + b)
if CREATE_HQ_TILES and os.path.exists(old_tile_file_name_hq):
image = Image.open(old_tile_file_name)
image = image.resize((TILE_SIZE, TILE_SIZE), resample=Image.BICUBIC)
tile_hq.paste(image, (a * TILE_SIZE, b * TILE_SIZE))
is_empty = False
if depth > 1:
margin = (IMAGE_SIZE / 2 + SHADOW_RADIUS) / TILE_SIZE
x_range = ((x - margin) / 2**depth, (x + 1 + margin) / 2**depth)
y_range = ((y - margin) / 2**depth, (y + 1 + margin) / 2**depth)
codes_current = codes_by_depth[depth]
hashes = hashes_by_depth[depth]
mask = (codes_current[:, 0] > x_range[0]) \
& (codes_current[:, 0] < x_range[1]) \
& (codes_current[:, 1] > y_range[0]) \
& (codes_current[:, 1] < y_range[1])
indices = mask.nonzero()[0]
if indices.shape[0] > 0:
is_empty = False
positions = codes_current[indices, :]
positions *= 2**depth * TILE_SIZE
positions -= np.array((x * TILE_SIZE, y * TILE_SIZE))[np.newaxis, :]
for i in range(indices.shape[0]):
index = indices[i]
image_id = hashes[index]
angle = rotations[image_id]
image_file_name = 'data/images_alpha/{:s}.webp'.format(image_id)
image_original = Image.fromarray(webp.imread(image_file_name))
image_rotated = image_original.rotate(angle, resample=Image.BICUBIC, expand=True)
size = int(IMAGE_SIZE * image_rotated.size[0] / image_original.size[0])
image = image_rotated.resize((size, size), resample=Image.BICUBIC)
image_hq = image_rotated.resize((size * 2, size * 2), resample=Image.BICUBIC)
shadow_mask_hq = Image.new("L", (size * 2 + 2 * SHADOW_RADIUS * 2, size * 2 + 2 * SHADOW_RADIUS * 2), 0)
shadow_mask_hq.paste(image_hq.split()[-1], (SHADOW_RADIUS * 2, SHADOW_RADIUS * 2))
shadow_mask_hq = shadow_mask_hq.filter(ImageFilter.GaussianBlur(radius=SHADOW_RADIUS))
enhancer = ImageEnhance.Brightness(shadow_mask_hq)
shadow_mask_hq = enhancer.enhance(SHADOW_VALUE)
shadow_mask = shadow_mask_hq.resize((shadow_mask_hq.size[0] // 2, shadow_mask_hq.size[1] // 2))
tile.paste((0, 0, 0), (int(positions[i, 0] - size / 2 - SHADOW_RADIUS), int(positions[i, 1] - size / 2 - SHADOW_RADIUS)), mask=shadow_mask)
tile.paste(image, (int(positions[i, 0] - size / 2), int(positions[i, 1] - size / 2)), mask=image)
if CREATE_HQ_TILES:
tile_hq.paste((0, 0, 0), (int(positions[i, 0] * 2 - size - SHADOW_RADIUS * 2), int(positions[i, 1] * 2 - size - SHADOW_RADIUS * 2)), mask=shadow_mask_hq)
tile_hq.paste(image_hq, (int(positions[i, 0] * 2 - size), int(positions[i, 1] * 2 - size)), mask=image_hq)
if not is_empty:
tile.save(tile_file_name, quality=TILE_IMAGE_QUALITY)
if CREATE_HQ_TILES:
tile_hq.save(tile_file_name_hq, quality=TILE_IMAGE_QUALITY)
def try_create_tile(*args):
try:
create_tile(*args)
except:
traceback.print_exc()
def kmeans(points, points_latent_codes, n):
if n == 0:
return [], np.zeros((0, 2))
if points.shape[0] <= n:
return range(points.shape[0]), points
kmeans = KMeans(n_clusters=n)
kmeans_clusters = kmeans.fit_predict(points)
result_indices = []
for i in range(n):
cluster_indices = np.nonzero(kmeans_clusters == i)[0]
if cluster_indices.shape[0] == 0:
# k-Means creates empty clusters when the dataset contains less than n *distinct* points, but more than n *total* points (due to duplicates)
return range(n), points[:n, :]
dist = np.linalg.norm(points_latent_codes[cluster_indices] - np.mean(points_latent_codes[cluster_indices], axis=0), axis=1)
result_indices.append(cluster_indices[np.argmin(dist)])
return result_indices, kmeans.cluster_centers_
def get_kmeans(depth, x, y):
try:
number_of_items = 2**(2*depth) * 2
subdivisions = max(1, 2**(depth - 2))
if subdivisions == 1:
kmeans_indices, kmeans_points = kmeans(codes, latent_codes, number_of_items)
return depth, kmeans_indices, kmeans_points
x_range = (-1 + 2 * x / subdivisions, -1 + 2 * (x + 1) / subdivisions)
y_range = (-1 + 2 * y / subdivisions, -1 + 2 * (y + 1) / subdivisions)
mask = (codes[:, 0] > x_range[0]) \
& (codes[:, 0] <= x_range[1]) \
& (codes[:, 1] > y_range[0]) \
& (codes[:, 1] <= y_range[1])
indices = np.nonzero(mask)[0]
codes_mask = codes[mask, :]
kmeans_indices, kmeans_points = kmeans(codes_mask, latent_codes[mask, :], int(number_of_items * indices.shape[0] / codes.shape[0]))
return depth, indices[kmeans_indices], kmeans_points
except:
traceback.print_exc()
if __name__ == '__main__':
latent_codes = np.load('data/latent_codes.npy')
codes = np.load(LATENT_CODES_EMBEDDED_MOVED_FILE_NAME)
from image_loader import ImageDataset
dataset = ImageDataset(quality=(1, 2))
rotation_file = open(ROTATIONS_CALCULATED_FILENAME, 'r')
reader = csv.reader(rotation_file)
rotations = {row[0]: float(row[1]) for row in reader}
rotation_file.close()
kmeans_tasks = []
for depth in range(TILE_DEPTH):
subdivisions = max(1, 2**(depth - 2))
for x in range(subdivisions):
for y in range(subdivisions):
kmeans_tasks.append((depth, x, y))
worker_count = os.cpu_count()
print("Using {:d} processes.".format(worker_count))
pool = Pool(worker_count)
progress = tqdm(total=len(kmeans_tasks), desc='Running k-means')
codes_by_depth = [[] for _ in range(TILE_DEPTH)]
hashes_by_depth = [[] for _ in range(TILE_DEPTH)]
def on_complete(args):
depth, kmeans_indices, kmeans_points = args
codes_by_depth[depth].append(kmeans_points)
for i in kmeans_indices:
hashes_by_depth[depth].append(dataset.hashes[i])
progress.update()
for depth, x, y in kmeans_tasks:
pool.apply_async(get_kmeans, args=(depth, x, y), callback=on_complete)
pool.close()
pool.join()
for depth in range(TILE_DEPTH):
codes_by_depth[depth] = np.concatenate(codes_by_depth[depth])
json_dict = {depth + DEPTH_OFFSET: [{'image': hash, 'x': codes_by_depth[depth][i, 0], 'y': codes_by_depth[depth][i, 1]} for i, hash in enumerate(hashes)] for depth, hashes in enumerate(hashes_by_depth)}
json_string = json.dumps(json_dict)
with open('data/clusters.json', 'w') as file:
file.write(json_string)
codes_by_depth.append(codes)
hashes_by_depth.append(dataset.hashes)
print("Using {:d} processes.".format(worker_count))
for depth in range(TILE_DEPTH, -4, -1):
pool = Pool(worker_count)
progress = tqdm(total=(2**(2 * depth + 2)), desc='Depth {:d}'.format(depth + DEPTH_OFFSET))
def on_complete(*_):
progress.update()
tile_addresses = []
for x in range(math.floor(-2**depth), math.ceil(2**depth)):
tile_directory = os.path.dirname(TILE_FILE_FORMAT.format('', depth + DEPTH_OFFSET, x, 0))
if not os.path.exists(tile_directory):
os.makedirs(tile_directory)
tile_directory_hq = os.path.dirname(TILE_FILE_FORMAT.format('@2x', depth + DEPTH_OFFSET, x, 0))
if not os.path.exists(tile_directory_hq):
os.makedirs(tile_directory_hq)
for y in range(math.floor(-2**depth), math.ceil(2**depth)):
tile_addresses.append((x, y))
random.shuffle(tile_addresses)
for x, y in tile_addresses:
pool.apply_async(try_create_tile, args=(depth, x, y), callback=on_complete)
pool.close()
pool.join() | [
"PIL.Image.new",
"numpy.load",
"csv.reader",
"PIL.ImageEnhance.Brightness",
"random.shuffle",
"json.dumps",
"numpy.argmin",
"numpy.mean",
"traceback.print_exc",
"sklearn.cluster.KMeans",
"os.path.exists",
"webp.imread",
"PIL.ImageFilter.GaussianBlur",
"image_loader.ImageDataset",
"math.c... | [((662, 719), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(TILE_SIZE, TILE_SIZE)', '(255, 255, 255)'], {}), "('RGB', (TILE_SIZE, TILE_SIZE), (255, 255, 255))\n", (671, 719), False, 'from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\n'), ((734, 799), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(TILE_SIZE * 2, TILE_SIZE * 2)', '(255, 255, 255)'], {}), "('RGB', (TILE_SIZE * 2, TILE_SIZE * 2), (255, 255, 255))\n", (743, 799), False, 'from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\n'), ((4930, 4950), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n'}), '(n_clusters=n)\n', (4936, 4950), False, 'from sklearn.cluster import KMeans\n'), ((6597, 6629), 'numpy.load', 'np.load', (['"""data/latent_codes.npy"""'], {}), "('data/latent_codes.npy')\n", (6604, 6629), True, 'import numpy as np\n'), ((6642, 6688), 'numpy.load', 'np.load', (['LATENT_CODES_EMBEDDED_MOVED_FILE_NAME'], {}), '(LATENT_CODES_EMBEDDED_MOVED_FILE_NAME)\n', (6649, 6688), True, 'import numpy as np\n'), ((6746, 6774), 'image_loader.ImageDataset', 'ImageDataset', ([], {'quality': '(1, 2)'}), '(quality=(1, 2))\n', (6758, 6774), False, 'from image_loader import ImageDataset\n'), ((6850, 6875), 'csv.reader', 'csv.reader', (['rotation_file'], {}), '(rotation_file)\n', (6860, 6875), False, 'import csv\n'), ((7216, 7230), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (7228, 7230), False, 'import os\n'), ((7298, 7316), 'multiprocessing.Pool', 'Pool', (['worker_count'], {}), '(worker_count)\n', (7302, 7316), False, 'from multiprocessing import Pool\n'), ((8238, 8259), 'json.dumps', 'json.dumps', (['json_dict'], {}), '(json_dict)\n', (8248, 8259), False, 'import json\n'), ((538, 568), 'os.path.exists', 'os.path.exists', (['tile_file_name'], {}), '(tile_file_name)\n', (552, 568), False, 'import os\n'), ((7970, 8007), 'numpy.concatenate', 'np.concatenate', (['codes_by_depth[depth]'], {}), '(codes_by_depth[depth])\n', (7984, 8007), True, 'import numpy as np\n'), ((8540, 8558), 'multiprocessing.Pool', 'Pool', (['worker_count'], {}), '(worker_count)\n', (8544, 8558), False, 'from multiprocessing import Pool\n'), ((9350, 9380), 'random.shuffle', 'random.shuffle', (['tile_addresses'], {}), '(tile_addresses)\n', (9364, 9380), False, 'import random\n'), ((597, 628), 'os.path.exists', 'os.path.exists', (['CREATE_HQ_TILES'], {}), '(CREATE_HQ_TILES)\n', (611, 628), False, 'import os\n'), ((1026, 1060), 'os.path.exists', 'os.path.exists', (['old_tile_file_name'], {}), '(old_tile_file_name)\n', (1040, 1060), False, 'import os\n'), ((4724, 4745), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4743, 4745), False, 'import traceback\n'), ((4825, 4841), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (4833, 4841), True, 'import numpy as np\n'), ((5073, 5105), 'numpy.nonzero', 'np.nonzero', (['(kmeans_clusters == i)'], {}), '(kmeans_clusters == i)\n', (5083, 5105), True, 'import numpy as np\n'), ((6250, 6266), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (6260, 6266), True, 'import numpy as np\n'), ((6528, 6549), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6547, 6549), False, 'import traceback\n'), ((8772, 8795), 'math.floor', 'math.floor', (['(-2 ** depth)'], {}), '(-2 ** depth)\n', (8782, 8795), False, 'import math\n'), ((8795, 8816), 'math.ceil', 'math.ceil', (['(2 ** depth)'], {}), '(2 ** depth)\n', (8804, 8816), False, 'import math\n'), ((1086, 1116), 'PIL.Image.open', 'Image.open', (['old_tile_file_name'], {}), '(old_tile_file_name)\n', (1096, 1116), False, 'from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\n'), ((1472, 1509), 'os.path.exists', 'os.path.exists', (['old_tile_file_name_hq'], {}), '(old_tile_file_name_hq)\n', (1486, 1509), False, 'import os\n'), ((1535, 1565), 'PIL.Image.open', 'Image.open', (['old_tile_file_name'], {}), '(old_tile_file_name)\n', (1545, 1565), False, 'from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\n'), ((2507, 2547), 'numpy.array', 'np.array', (['(x * TILE_SIZE, y * TILE_SIZE)'], {}), '((x * TILE_SIZE, y * TILE_SIZE))\n', (2515, 2547), True, 'import numpy as np\n'), ((3303, 3394), 'PIL.Image.new', 'Image.new', (['"""L"""', '(size * 2 + 2 * SHADOW_RADIUS * 2, size * 2 + 2 * SHADOW_RADIUS * 2)', '(0)'], {}), "('L', (size * 2 + 2 * SHADOW_RADIUS * 2, size * 2 + 2 *\n SHADOW_RADIUS * 2), 0)\n", (3312, 3394), False, 'from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\n'), ((3620, 3659), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['shadow_mask_hq'], {}), '(shadow_mask_hq)\n', (3643, 3659), False, 'from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\n'), ((5416, 5469), 'numpy.mean', 'np.mean', (['points_latent_codes[cluster_indices]'], {'axis': '(0)'}), '(points_latent_codes[cluster_indices], axis=0)\n', (5423, 5469), True, 'import numpy as np\n'), ((5525, 5540), 'numpy.argmin', 'np.argmin', (['dist'], {}), '(dist)\n', (5534, 5540), True, 'import numpy as np\n'), ((8938, 8968), 'os.path.exists', 'os.path.exists', (['tile_directory'], {}), '(tile_directory)\n', (8952, 8968), False, 'import os\n'), ((8986, 9013), 'os.makedirs', 'os.makedirs', (['tile_directory'], {}), '(tile_directory)\n', (8997, 9013), False, 'import os\n'), ((9141, 9174), 'os.path.exists', 'os.path.exists', (['tile_directory_hq'], {}), '(tile_directory_hq)\n', (9155, 9174), False, 'import os\n'), ((9192, 9222), 'os.makedirs', 'os.makedirs', (['tile_directory_hq'], {}), '(tile_directory_hq)\n', (9203, 9222), False, 'import os\n'), ((9250, 9273), 'math.floor', 'math.floor', (['(-2 ** depth)'], {}), '(-2 ** depth)\n', (9260, 9273), False, 'import math\n'), ((9273, 9294), 'math.ceil', 'math.ceil', (['(2 ** depth)'], {}), '(2 ** depth)\n', (9282, 9294), False, 'import math\n'), ((2860, 2888), 'webp.imread', 'webp.imread', (['image_file_name'], {}), '(image_file_name)\n', (2871, 2888), False, 'import webp\n'), ((3545, 3591), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', ([], {'radius': 'SHADOW_RADIUS'}), '(radius=SHADOW_RADIUS)\n', (3569, 3591), False, 'from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\n')] |
import time
import numpy as np
import tensorflow as tf
from models import GAT
from inits import test_positive_sample, test_negative_sample
from inits import load_data
from inits import generate_mask
from inits import sparse_to_tuple
from metrics import masked_accuracy
from metrics import ROC
def train(train_arr, test_arr, cv, args, labels):
batch_size = 1
l2_coef = 0.0005
hid_units = [8]
#nb_epochs = 600
#lr = 0.005
#weight_decay = 1e-4
#n_heads = 2
nb_epochs = args.n_epoch
lr = args.learning_rate
weight_decay = args.weight_decay
n_heads = args.n_head
residual = False
nonlinearity = tf.nn.elu
model = GAT
print('----- Opt. hyperparams -----')
print('lr: ' + str(lr))
print('l2_coef: ' + str(l2_coef))
print('----- Archi. hyperparams -----')
print('nb. layers: ' + str(len(hid_units)))
print('nb. units per layer: ' + str(hid_units))
print('nb. attention heads: ' + str(n_heads))
print('residual: ' + str(residual))
print('nonlinearity: ' + str(nonlinearity))
print('model: ' + str(model))
interaction_local_list, features_list, y_train, y_test, train_mask, test_mask, interaction_global_list = load_data(train_arr, test_arr, cv, args, labels)
nb_nodes = features_list[0].shape[0]
ft_size = features_list[0].shape[1]
features_list = [feature[np.newaxis] for feature in features_list]
biases_local_list = [sparse_to_tuple(interaction) for interaction in interaction_local_list]
biases_global_list = [sparse_to_tuple(interaction) for interaction in interaction_global_list]
#n = 6375
n = args.n_node
entry_size = n * n
with tf.Graph().as_default():
with tf.name_scope('input'):
feature_in_list = [tf.placeholder(dtype=tf.float32,
shape=(batch_size, nb_nodes, ft_size),
name='ftr_in_{}'.format(i))
for i in range(len(features_list))]
bias_in_local_list = [tf.compat.v1.sparse_placeholder(tf.float32, name='ftr_in_{}'.format(i)) for i in range(len(biases_local_list))]
bias_in_global_list = [tf.compat.v1.sparse_placeholder(tf.float32, name='ftr_in_{}'.format(i)) for i in range(len(biases_global_list))]
lbl_in = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size, batch_size))
msk_in = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size, batch_size))
neg_msk = tf.compat.v1.placeholder(dtype=tf.int32, shape=(entry_size,batch_size))
attn_drop = tf.compat.v1.placeholder(dtype=tf.float32, shape=())
ffd_drop = tf.compat.v1.placeholder(dtype=tf.float32, shape=())
is_train = tf.compat.v1.placeholder(dtype=tf.bool, shape=())
final_embedding = model.encoder(feature_in_list, nb_nodes, is_train,
attn_drop, ffd_drop,
bias_mat_local_list = bias_in_local_list,
bias_mat_global_list = bias_in_global_list,
hid_units=hid_units, n_heads = n_heads,
residual=residual, activation=nonlinearity)
pro_matrix = model.decoder_revised(final_embedding)
loss = model.loss_overall(pro_matrix, lbl_in, msk_in, neg_msk, weight_decay, final_embedding)
accuracy = masked_accuracy(pro_matrix, lbl_in, msk_in, neg_msk)
train_op = model.training(loss, lr, l2_coef)
init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
neg_mask = generate_mask(labels, len(train_arr), args.n_node)
#start to train
with tf.compat.v1.Session() as sess:
sess.run(init_op)
train_loss_avg = 0
train_acc_avg = 0
for epoch in range(nb_epochs):
t = time.time()
########## train ##############
tr_step = 0
tr_size = features_list[0].shape[0]
while tr_step * batch_size < tr_size:
fd1 = {i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
for i, d in zip(feature_in_list, features_list)}
fd2 = {bias_in_local_list[i]: biases_local_list[i] for i in range(len(biases_local_list))}
fd3 = {bias_in_global_list[i]: biases_global_list[i] for i in range(len(biases_global_list))}
fd4 = {lbl_in: y_train,
msk_in: train_mask,
neg_msk: neg_mask,
is_train: True,
attn_drop: args.dropout,
ffd_drop: args.dropout}
fd = fd1
fd.update(fd2)
fd.update(fd3)
fd.update(fd4)
_, loss_value_tr, acc_tr = sess.run([train_op, loss, accuracy], feed_dict=fd)
train_loss_avg += loss_value_tr
train_acc_avg += acc_tr
tr_step += 1
print('Epoch: %04d | Training: loss = %.5f, acc = %.5f, time = %.5f' % ((epoch+1), loss_value_tr,acc_tr, time.time()-t))
print("Finish traing.")
########### test ############
ts_size = features_list[0].shape[0]
ts_step = 0
ts_loss = 0.0
ts_acc = 0.0
print("Start to test")
while ts_step * batch_size < ts_size:
fd1 = {i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
for i, d in zip(feature_in_list, features_list)}
fd2 = {bias_in_local_list[i]: biases_local_list[i] for i in range(len(biases_local_list))}
fd3 = {bias_in_global_list[i]: biases_global_list[i] for i in range(len(biases_global_list))}
fd4 = {lbl_in: y_test,
msk_in: test_mask,
neg_msk: neg_mask,
is_train: False,
attn_drop: 0.0,
ffd_drop: 0.0}
fd = fd1
fd.update(fd2)
fd.update(fd3)
fd.update(fd4)
score_matrix, loss_value_ts, acc_ts = sess.run([pro_matrix, loss, accuracy], feed_dict=fd)
ts_loss += loss_value_ts
ts_acc += acc_ts
ts_step += 1
print('Test loss:', ts_loss/ts_step, '; Test accuracy:', ts_acc/ts_step)
score_matrix = score_matrix.reshape((n,n))
test_positive_samples = test_positive_sample(labels, test_arr)
test_negative_samples = test_negative_sample(labels,len(test_arr),neg_mask.reshape((n,n)))
test_labels, test_scores = ROC(score_matrix,labels, test_arr,test_negative_samples)
test_samples = np.vstack((test_positive_samples, test_negative_samples))
return test_labels, test_scores, test_samples
sess.close()
| [
"tensorflow.compat.v1.placeholder",
"metrics.masked_accuracy",
"metrics.ROC",
"inits.load_data",
"tensorflow.compat.v1.local_variables_initializer",
"tensorflow.compat.v1.Session",
"inits.test_positive_sample",
"time.time",
"tensorflow.Graph",
"inits.sparse_to_tuple",
"tensorflow.name_scope",
... | [((1222, 1270), 'inits.load_data', 'load_data', (['train_arr', 'test_arr', 'cv', 'args', 'labels'], {}), '(train_arr, test_arr, cv, args, labels)\n', (1231, 1270), False, 'from inits import load_data\n'), ((1459, 1487), 'inits.sparse_to_tuple', 'sparse_to_tuple', (['interaction'], {}), '(interaction)\n', (1474, 1487), False, 'from inits import sparse_to_tuple\n'), ((1559, 1587), 'inits.sparse_to_tuple', 'sparse_to_tuple', (['interaction'], {}), '(interaction)\n', (1574, 1587), False, 'from inits import sparse_to_tuple\n'), ((3489, 3541), 'metrics.masked_accuracy', 'masked_accuracy', (['pro_matrix', 'lbl_in', 'msk_in', 'neg_msk'], {}), '(pro_matrix, lbl_in, msk_in, neg_msk)\n', (3504, 3541), False, 'from metrics import masked_accuracy\n'), ((1736, 1758), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (1749, 1758), True, 'import tensorflow as tf\n'), ((2349, 2421), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'shape': '(entry_size, batch_size)'}), '(dtype=tf.int32, shape=(entry_size, batch_size))\n', (2373, 2421), True, 'import tensorflow as tf\n'), ((2445, 2517), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'shape': '(entry_size, batch_size)'}), '(dtype=tf.int32, shape=(entry_size, batch_size))\n', (2469, 2517), True, 'import tensorflow as tf\n'), ((2542, 2614), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'shape': '(entry_size, batch_size)'}), '(dtype=tf.int32, shape=(entry_size, batch_size))\n', (2566, 2614), True, 'import tensorflow as tf\n'), ((2640, 2692), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32', 'shape': '()'}), '(dtype=tf.float32, shape=())\n', (2664, 2692), True, 'import tensorflow as tf\n'), ((2718, 2770), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32', 'shape': '()'}), '(dtype=tf.float32, shape=())\n', (2742, 2770), True, 'import tensorflow as tf\n'), ((2796, 2845), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.bool', 'shape': '()'}), '(dtype=tf.bool, shape=())\n', (2820, 2845), True, 'import tensorflow as tf\n'), ((3632, 3675), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (3673, 3675), True, 'import tensorflow as tf\n'), ((3677, 3719), 'tensorflow.compat.v1.local_variables_initializer', 'tf.compat.v1.local_variables_initializer', ([], {}), '()\n', (3717, 3719), True, 'import tensorflow as tf\n'), ((3837, 3859), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (3857, 3859), True, 'import tensorflow as tf\n'), ((6913, 6951), 'inits.test_positive_sample', 'test_positive_sample', (['labels', 'test_arr'], {}), '(labels, test_arr)\n', (6933, 6951), False, 'from inits import test_positive_sample, test_negative_sample\n'), ((7090, 7148), 'metrics.ROC', 'ROC', (['score_matrix', 'labels', 'test_arr', 'test_negative_samples'], {}), '(score_matrix, labels, test_arr, test_negative_samples)\n', (7093, 7148), False, 'from metrics import ROC\n'), ((7173, 7230), 'numpy.vstack', 'np.vstack', (['(test_positive_samples, test_negative_samples)'], {}), '((test_positive_samples, test_negative_samples))\n', (7182, 7230), True, 'import numpy as np\n'), ((1698, 1708), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1706, 1708), True, 'import tensorflow as tf\n'), ((4014, 4025), 'time.time', 'time.time', ([], {}), '()\n', (4023, 4025), False, 'import time\n'), ((5493, 5504), 'time.time', 'time.time', ([], {}), '()\n', (5502, 5504), False, 'import time\n')] |
#!/usr/bin/env python
"""
Fit for shapelet coefficients across beta, xc, phi, and n_max
"""
import sys
import numpy as np
from scipy import optimize
import shapelets
if __name__ == '__main__':
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] FITS_IMAGE')
o.set_description(__doc__)
o.add_option('-r', '--region', dest='region', default=None,
help='Region of image to decompose into shapelets, (xmin,xmax,ymin,ymax), default: None')
o.add_option('-N', '--noise_region', dest='nregion', default=None,
help='Region of image to use to create a noise map, if set to None the entire image is used, this is not used in the script, (xmin,xmax,ymin,ymax), default: None')
o.add_option('-m', '--mode', dest='mode', default='cart',
help='Set the shapelet mode, cartesian or polar, default: cartesian')
o.add_option('-o', '--outfile', dest='ofn', default='shapeletCoeffs.pkl',
help='Coefficients output filename, default: shapeletCoeffs.pkl')
o.add_option('--max', dest='max_pos', action="store_true", default=False,
help='Override centroid position to be the position of max intensity')
o.add_option('-s', '--savefig', dest='savefig', default=None,
help='Save the figure, requires filename')
o.add_option('-x','--init_xc', dest='init_xc', default=None,
help='Initial parameter: set a x,y pixel position for initial center, if using a region it is based on the relative position, default: centroid of image/region')
o.add_option('--set_xc', dest='set_xc', action='store_true',
help='Set parameter: set init_xc x,y pixel position for center, these parameters will not be fit for if set')
o.add_option('-b', '--init_beta', dest='init_beta', default=None,
help='Initial parameter: initial beta value, can be two values i.e. \'25.0,30.5\', default: None, guess is made based on Gaussian fit')
o.add_option('--set_beta', dest='set_beta', action='store_true',
help='Set parameter: set init_beta beta value, these parameters will not be fit for if set')
o.add_option('-p','--init_phi', dest='init_phi', default=None,
help='Initial parameter: inital rotation angle (radians), only used when beta is manually input, default: 0')
o.add_option('--set_phi', dest='set_phi', action='store_true',
help='Set parameter: set init_phi rotation angle, this parameter will not be fit for if set')
o.add_option('-n', '--nmax', dest='nmax', default='5',
help='Size of coefficient dimensions for minimization fit, can be two values i.e. \'4,5\', default: 5')
o.add_option('--fitter',dest='fitterMethod', default='Nelder-Mead',
help='Fitting method: Nelder-Mead, Powell, CG, BFGS, see http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.minimize.html, default: Nelder-Mead')
o.add_option('--xtol', dest='xtol', default=0.001, type='float',
help='Relative error in parameters acceptable for convergence, default: 0.001')
o.add_option('--ftol', dest='ftol', default=0.001, type='float',
help='Relative error in chi^2 function acceptable for convergence, default: 0.001')
o.add_option('--maxiter', dest='maxiter', default=250, type='int',
help='Maximum number of iterations to perform, default: 250')
o.add_option('-B', '--brute', dest='brute', default=15, type='int',
help='Maximum basis function order to use when running brute force method, default: 15')
o.add_option('--noplot', dest='noplot', action='store_true',
help='Do no show plots')
opts, args = o.parse_args(sys.argv[1:])
#import matplotlib if needed
show_plots = not opts.noplot
if show_plots:
from matplotlib import pyplot as plt
import matplotlib.patches
ifn=args[0]
im0,hdr=shapelets.fileio.readFITS(ifn,hdr=True)
extent=[0,im0.shape[0],0,im0.shape[1]]
if not (opts.region is None):
extent=map(int, opts.region.split(','))
im=shapelets.img.selPxRange(im0,[extent[2],extent[3],extent[0],extent[1]])
else:
im=im0
#noise map
if opts.nregion is None:
#use the image region for noise estimation
mean,std=shapelets.img.estimateNoise(im0,mode='sample')
nm=shapelets.img.makeNoiseMap(im.shape,mean,std)
else:
#use a specific region for noise estimation
nextent=map(int, opts.nregion.split(','))
mean,std=shapelets.img.estimateNoise(shapelets.img.selPxRange(im0,[nextent[2],nextent[3],nextent[0],nextent[1]]),mode='basic')
nm=shapelets.img.makeNoiseMap(im.shape,mean,std)
#determine set parameters
set_xc=opts.set_xc
set_beta=opts.set_beta
set_phi=opts.set_phi
#select initial beta, phi, and xc
if opts.init_beta==None:
beta0,phi0,nmax0=shapelets.decomp.initParams(im,mode='fit',hdr=hdr)
else:
beta0=map(float,opts.init_beta.split(','))
if len(beta0)==1:
beta0=[beta0[0],beta0[0]]
else:
beta0=[beta0[1],beta0[0]] #input to numpy flip
if opts.init_phi==None:
betaTemp,phi0,nmax0=shapelets.decomp.initParams(im,mode='fit',hdr=hdr)
else:
phi0=float(opts.init_phi)
if opts.init_xc==None:
xc=shapelets.img.centroid(im)
else:
xc=map(float,opts.init_xc.split(','))
xc=[xc[1],xc[0]] #input to numpy flip
nmax=opts.nmax.split(',')
if len(nmax)==1:
nmax=[int(nmax[0])+1,int(nmax[0])+1]
else:
nmax=[int(nmax[1])+1,int(nmax[0])+1] #input to numpy flip
print('Using beta: (%f,%f) :: \tphi: %f radians :: \tcentre: x,y=(%f,%f) :: \tnmax: (%i,%i)'%(beta0[1],beta0[0],phi0,xc[1],xc[0],nmax[1]-1,nmax[0]-1))
print('Fitting xc : %r\nFitting beta : %r\nFitting phi : %r'%(not(set_xc),not(set_beta),not(set_phi)))
if opts.mode.startswith('pol'):
r0,th0=shapelets.shapelet.polarArray(xc,im.shape)
#scipy-based minimizer
if set_xc:
if set_beta:
if set_phi:
#same as solveShapelets, no minimization
print('No parameters to minimize, solving for coefficients with input values')
beta1=beta0
phi1=phi0
xc1=xc
else:
print('Running minimization for phi only...')
res=optimize.minimize(shapelets.decomp.chi2PolarFunc,[phi0],args=(nmax,im,nm,['phi'],beta0,None,xc,r0,th0),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=beta0
phi1=res['x'][0]
xc1=xc
else:
if set_phi:
print('Running minimization for beta only...')
res=optimize.minimize(shapelets.decomp.chi2PolarFunc,[beta0[0], beta0[1]],args=(nmax,im,nm,['beta0','beta1'],[None,None],phi0,xc,r0,th0),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=phi0
xc1=xc
else:
print('Running minimization for beta and phi...')
res=optimize.minimize(shapelets.decomp.chi2PolarFunc,[beta0[0], beta0[1], phi0],args=(nmax,im,nm,['beta0','beta1','phi'],[None,None],None,xc,r0,th0),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=res['x'][2]
xc1=xc
else:
if set_beta:
if set_phi:
print('Running minimization for centroid only...')
res=optimize.minimize(shapelets.decomp.chi2PolarFunc,[xc[0],xc[1]],args=(nmax,im,nm,['yc','xc'],beta0,phi0,[None,None],None,None),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=beta0
phi1=phi0
xc1=[res['x'][0],res['x'][1]]
else:
print('Running minimization for phi and centroid...')
res=optimize.minimize(shapelets.decomp.chi2PolarFunc,[phi0,xc[0],xc[1]],args=(nmax,im,nm,['phi','yc','xc'],beta0,None,[None,None],None,None),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=beta0
phi1=res['x'][0]
xc1=[res['x'][1],res['x'][2]]
else:
if set_phi:
print('Running minimization for beta and centroid...')
res=optimize.minimize(shapelets.decomp.chi2PolarFunc,[beta0[0],beta0[1],xc[0],xc[1]],args=(nmax,im,nm,['beta0','beta1','yc','xc'],[None,None],phi0,[None,None],None,None),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=phi0
xc1=[res['x'][2],res['x'][3]]
else:
print('Running minimization for beta, phi and centroid...')
res=optimize.minimize(shapelets.decomp.chi2PolarFunc,[beta0[0], beta0[1], phi0, xc[0], xc[1]],args=(nmax,im,nm),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=res['x'][2]
xc1=[res['x'][3],res['x'][4]]
print('\tDone')
# Correct if beta is negative
if beta1[0] < 0.: beta1[0] = np.abs(beta1[0])
if beta1[1] < 0.: beta1[1] = np.abs(beta1[1])
#scipy optimize brute force over a range of N values
n0=1
n1=opts.brute+1
print('Running brute force for size of N on range [%i:%i]...'%(n0,n1-1))
nmax1=optimize.brute(shapelets.decomp.chi2nmaxPolarFunc,[np.s_[n0:n1:1]],args=(im,nm,beta1[0],beta1[1],phi1,xc1),finish=None)
nmax1=[int(nmax1),int(nmax1)]
print('Using %i x %i coefficients'%(nmax1[1],nmax1[0]))
print('\tDone')
print('Solution:')
print('\tbeta: (%f,%f) \tphi: %f rad \tcentroid: (%f, %f) (sub image: %f,%f) pixels \t ncoeffs: %i x %i'%(beta1[1], beta1[0], phi1, xc1[1]+extent[2], xc1[0]+extent[0], xc1[1], xc1[0], nmax1[1],nmax1[0]))
#plot: data, model, residual: model-data, coeffs
if show_plots:
fig = plt.figure()
ax = fig.add_subplot(221)
plt.title('Image')
plt.imshow(im)
e=matplotlib.patches.Ellipse(xy=[xc[1],xc[0]],width=2.*beta0[1],height=2.*beta0[0],angle=(180.*phi0/np.pi))
e.set_clip_box(ax.bbox)
e.set_alpha(0.3)
e.set_facecolor('black')
ax.add_artist(e)
plt.text(xc[1],xc[0],'+',horizontalalignment='center',verticalalignment='center')
plt.colorbar()
plt.subplot(222)
plt.title('Model')
r1,th1=shapelets.shapelet.polarArray(xc1,im.shape)
bvals=shapelets.decomp.genPolarBasisMatrix(beta1,nmax1,phi1,r1,th1)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
mdl=np.abs(shapelets.img.constructModel(bvals,coeffs,im.shape))
plt.imshow(mdl)
plt.colorbar()
plt.subplot(223)
plt.title('Residual')
res=im-mdl
plt.imshow(res)
plt.colorbar()
plt.subplot(224)
plt.title('Coefficients')
cimR=shapelets.img.polarCoeffImg(coeffs.real,nmax1)
cimI=shapelets.img.polarCoeffImg(coeffs.imag,nmax1)
cimI=np.fliplr(cimI)
cim=np.concatenate((cimR,cimI),axis=1)
#plt.pcolor(cim)
plt.imshow(cim,interpolation='nearest',origin='lower')
plt.colorbar()
else:
r1,th1=shapelets.shapelet.polarArray(xc1,im.shape)
bvals=shapelets.decomp.genPolarBasisMatrix(beta1,nmax1,phi1,r1,th1)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
#determine (RA,dec) coordinates for centroid position
if extent is None:
#radec=hdr['wcs'].wcs_pix2sky(np.array([ [xc1[1]+1,xc1[0]+1] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
radec=hdr['wcs'].all_pix2world(np.array([ [xc1[1]+1,xc1[0]+1] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
else:
#radec=hdr['wcs'].wcs_pix2sky(np.array([ [xc1[1]+extent[0]+1,im0.shape[0]-(extent[2]+xc1[0])] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
radec=hdr['wcs'].all_pix2world(np.array([ [xc1[1]+extent[0]+1,im0.shape[0]-(extent[2]+xc1[0])] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
print('Centroid RA: %f (deg) Dec: %f (deg)'%(radec[0],radec[1]))
ofn=opts.ofn
print('Writing to file:',ofn)
shapelets.fileio.writeLageurreCoeffs(ofn,coeffs,xc1,im.shape,beta1,phi1,nmax1,info=ifn,pos=[radec[0],radec[1],hdr['dra'],hdr['ddec']])
elif opts.mode.startswith('cart'):
ry=np.array(range(0,im.shape[0]),dtype=float)-xc[0]
rx=np.array(range(0,im.shape[1]),dtype=float)-xc[1]
xx0,yy0=shapelets.shapelet.xy2Grid(ry,rx)
#scipy-based minimizer
if set_xc:
if set_beta:
if set_phi:
#same as solveShapelets, no minimization
print('No parameters to minimize, solving for coefficients with input values')
beta1=beta0
phi1=phi0
xc1=xc
else:
print('Running minimization for phi only...')
res=optimize.minimize(shapelets.decomp.chi2Func,[phi0],args=(nmax,im,nm,['phi'],beta0,None,xc,xx0,yy0),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=beta0
phi1=res['x'][0]
xc1=xc
else:
if set_phi:
print('Running minimization for beta only...')
res=optimize.minimize(shapelets.decomp.chi2Func,[beta0[0], beta0[1]],args=(nmax,im,nm,['beta0','beta1'],[None,None],phi0,xc,xx0,yy0),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=phi0
xc1=xc
else:
print('Running minimization for beta and phi...')
res=optimize.minimize(shapelets.decomp.chi2Func,[beta0[0], beta0[1], phi0],args=(nmax,im,nm,['beta0','beta1','phi'],[None,None],None,xc,xx0,yy0),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=res['x'][2]
xc1=xc
else:
if set_beta:
if set_phi:
print('Running minimization for centroid only...')
res=optimize.minimize(shapelets.decomp.chi2Func,[xc[0],xc[1]],args=(nmax,im,nm,['yc','xc'],beta0,phi0,[None,None],None,None),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=beta0
phi1=phi0
xc1=[res['x'][0],res['x'][1]]
else:
print('Running minimization for phi and centroid...')
res=optimize.minimize(shapelets.decomp.chi2Func,[phi0,xc[0],xc[1]],args=(nmax,im,nm,['phi','yc','xc'],beta0,None,[None,None],None,None),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=beta0
phi1=res['x'][0]
xc1=[res['x'][1],res['x'][2]]
else:
if set_phi:
print('Running minimization for beta and centroid...')
res=optimize.minimize(shapelets.decomp.chi2Func,[beta0[0],beta0[1],xc[0],xc[1]],args=(nmax,im,nm,['beta0','beta1','yc','xc'],[None,None],phi0,[None,None],None,None),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=phi0
xc1=[res['x'][2],res['x'][3]]
else:
print('Running minimization for beta, phi and centroid...')
res=optimize.minimize(shapelets.decomp.chi2Func,[beta0[0], beta0[1], phi0, xc[0], xc[1]],args=(nmax,im,nm),method=opts.fitterMethod,options={'xtol':opts.xtol,'ftol':opts.ftol,'maxiter':opts.maxiter})
print(res)
beta1=[res['x'][0],res['x'][1]]
phi1=res['x'][2]
xc1=[res['x'][3],res['x'][4]]
print('\tDone')
# Correct if beta is negative
if beta1[0] < 0.: beta1[0] = np.abs(beta1[0])
if beta1[1] < 0.: beta1[1] = np.abs(beta1[1])
#scipy optimize brute force over a range of N values
n0=1
n1=opts.brute+1
print('Running brute force for size of N on range [%i:%i]...'%(n0,n1-1))
nmax1=optimize.brute(shapelets.decomp.chi2nmaxFunc,[np.s_[n0:n1:1]],args=(im,nm,beta1[0],beta1[1],phi1,xc1),finish=None)
nmax1=[int(nmax1),int(nmax1)]
print('Using %i x %i coefficients'%(nmax1[1],nmax1[0]))
print('\tDone')
print('Solution:')
print('\tbeta: (%f,%f) \tphi: %f rad \tcentroid: (%f, %f) (sub image: %f,%f) pixels \t ncoeffs: %i x %i'%(beta1[1], beta1[0], phi1, xc1[1]+extent[2], xc1[0]+extent[0], xc1[1], xc1[0], nmax1[1],nmax1[0]))
#plot: data, model, residual: model-data, coeffs
if show_plots:
fig = plt.figure()
ax = fig.add_subplot(221)
plt.title('Image')
plt.imshow(im)
e=matplotlib.patches.Ellipse(xy=[xc[1],xc[0]],width=2.*beta0[1],height=2.*beta0[0],angle=(180.*phi0/np.pi))
e.set_clip_box(ax.bbox)
e.set_alpha(0.3)
e.set_facecolor('black')
ax.add_artist(e)
plt.text(xc[1],xc[0],'+',horizontalalignment='center',verticalalignment='center')
plt.colorbar()
plt.subplot(222)
plt.title('Model')
ry=np.array(range(0,im.shape[0]),dtype=float)-xc1[0]
rx=np.array(range(0,im.shape[1]),dtype=float)-xc1[1]
yy,xx=shapelets.shapelet.xy2Grid(ry,rx)
bvals=shapelets.decomp.genBasisMatrix(beta1,nmax1,phi1,yy,xx)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
mdl=shapelets.img.constructModel(bvals,coeffs,im.shape)
plt.imshow(mdl)
plt.text(xc1[1],xc1[0],'+',horizontalalignment='center',verticalalignment='center')
plt.colorbar()
plt.subplot(223)
plt.title('Residual')
res=im-mdl
plt.imshow(res)
plt.colorbar()
plt.subplot(224)
plt.title('Coefficients')
sqCoeffs=np.reshape(coeffs,nmax1)
#plt.pcolor(sqCoeffs)
plt.imshow(sqCoeffs,interpolation='nearest',origin='lower')
plt.colorbar()
else:
ry=np.array(range(0,im.shape[0]),dtype=float)-xc1[0]
rx=np.array(range(0,im.shape[1]),dtype=float)-xc1[1]
yy,xx=shapelets.shapelet.xy2Grid(ry,rx)
bvals=shapelets.decomp.genBasisMatrix(beta1,nmax1,phi1,yy,xx)
coeffs=shapelets.decomp.solveCoeffs(bvals,im)
#determine (RA,dec) coordinates for centroid position
if extent is None:
#radec=hdr['wcs'].wcs_pix2sky(np.array([ [xc1[1]+1,xc1[0]+1] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
radec=hdr['wcs'].wcs_pix2world(np.array([ [xc1[1]+1,xc1[0]+1] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
else:
#radec=hdr['wcs'].wcs_pix2sky(np.array([ [xc1[1]+extent[0]+1,im0.shape[0]-(extent[2]+xc1[0])] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
radec=hdr['wcs'].wcs_pix2world(np.array([ [xc1[1]+extent[0]+1,im0.shape[0]-(extent[2]+xc1[0])] ]),1)[0] #unit: degrees, FITS conventions: first pixel is (1,1)
print('Centroid RA: %f (deg) Dec: %f (deg)'%(radec[0],radec[1]))
ofn=opts.ofn
print('Writing to file:',ofn)
shapelets.fileio.writeHermiteCoeffs(ofn,coeffs,xc1,im.shape,beta1,phi1,nmax1,info=ifn,pos=[radec[0],radec[1],hdr['dra'],hdr['ddec']])
if show_plots:
if not (opts.savefig is None):
plt.savefig(opts.savefig)
else: plt.show()
| [
"matplotlib.pyplot.title",
"shapelets.img.makeNoiseMap",
"numpy.abs",
"optparse.OptionParser",
"shapelets.img.centroid",
"shapelets.decomp.genBasisMatrix",
"shapelets.img.estimateNoise",
"matplotlib.pyplot.figure",
"scipy.optimize.minimize",
"matplotlib.pyplot.imshow",
"shapelets.decomp.genPolar... | [((241, 255), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (253, 255), False, 'from optparse import OptionParser\n'), ((3847, 3887), 'shapelets.fileio.readFITS', 'shapelets.fileio.readFITS', (['ifn'], {'hdr': '(True)'}), '(ifn, hdr=True)\n', (3872, 3887), False, 'import shapelets\n'), ((4023, 4098), 'shapelets.img.selPxRange', 'shapelets.img.selPxRange', (['im0', '[extent[2], extent[3], extent[0], extent[1]]'], {}), '(im0, [extent[2], extent[3], extent[0], extent[1]])\n', (4047, 4098), False, 'import shapelets\n'), ((4233, 4280), 'shapelets.img.estimateNoise', 'shapelets.img.estimateNoise', (['im0'], {'mode': '"""sample"""'}), "(im0, mode='sample')\n", (4260, 4280), False, 'import shapelets\n'), ((4291, 4338), 'shapelets.img.makeNoiseMap', 'shapelets.img.makeNoiseMap', (['im.shape', 'mean', 'std'], {}), '(im.shape, mean, std)\n', (4317, 4338), False, 'import shapelets\n'), ((4595, 4642), 'shapelets.img.makeNoiseMap', 'shapelets.img.makeNoiseMap', (['im.shape', 'mean', 'std'], {}), '(im.shape, mean, std)\n', (4621, 4642), False, 'import shapelets\n'), ((4840, 4892), 'shapelets.decomp.initParams', 'shapelets.decomp.initParams', (['im'], {'mode': '"""fit"""', 'hdr': 'hdr'}), "(im, mode='fit', hdr=hdr)\n", (4867, 4892), False, 'import shapelets\n'), ((5146, 5198), 'shapelets.decomp.initParams', 'shapelets.decomp.initParams', (['im'], {'mode': '"""fit"""', 'hdr': 'hdr'}), "(im, mode='fit', hdr=hdr)\n", (5173, 5198), False, 'import shapelets\n'), ((5284, 5310), 'shapelets.img.centroid', 'shapelets.img.centroid', (['im'], {}), '(im)\n', (5306, 5310), False, 'import shapelets\n'), ((5901, 5944), 'shapelets.shapelet.polarArray', 'shapelets.shapelet.polarArray', (['xc', 'im.shape'], {}), '(xc, im.shape)\n', (5930, 5944), False, 'import shapelets\n'), ((10189, 10321), 'scipy.optimize.brute', 'optimize.brute', (['shapelets.decomp.chi2nmaxPolarFunc', '[np.s_[n0:n1:1]]'], {'args': '(im, nm, beta1[0], beta1[1], phi1, xc1)', 'finish': 'None'}), '(shapelets.decomp.chi2nmaxPolarFunc, [np.s_[n0:n1:1]], args=(\n im, nm, beta1[0], beta1[1], phi1, xc1), finish=None)\n', (10203, 10321), False, 'from scipy import optimize\n'), ((13306, 13455), 'shapelets.fileio.writeLageurreCoeffs', 'shapelets.fileio.writeLageurreCoeffs', (['ofn', 'coeffs', 'xc1', 'im.shape', 'beta1', 'phi1', 'nmax1'], {'info': 'ifn', 'pos': "[radec[0], radec[1], hdr['dra'], hdr['ddec']]"}), "(ofn, coeffs, xc1, im.shape, beta1,\n phi1, nmax1, info=ifn, pos=[radec[0], radec[1], hdr['dra'], hdr['ddec']])\n", (13342, 13455), False, 'import shapelets\n'), ((4494, 4573), 'shapelets.img.selPxRange', 'shapelets.img.selPxRange', (['im0', '[nextent[2], nextent[3], nextent[0], nextent[1]]'], {}), '(im0, [nextent[2], nextent[3], nextent[0], nextent[1]])\n', (4518, 4573), False, 'import shapelets\n'), ((9924, 9940), 'numpy.abs', 'np.abs', (['beta1[0]'], {}), '(beta1[0])\n', (9930, 9940), True, 'import numpy as np\n'), ((9978, 9994), 'numpy.abs', 'np.abs', (['beta1[1]'], {}), '(beta1[1])\n', (9984, 9994), True, 'import numpy as np\n'), ((10774, 10786), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10784, 10786), True, 'from matplotlib import pyplot as plt\n'), ((10837, 10855), 'matplotlib.pyplot.title', 'plt.title', (['"""Image"""'], {}), "('Image')\n", (10846, 10855), True, 'from matplotlib import pyplot as plt\n'), ((10868, 10882), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (10878, 10882), True, 'from matplotlib import pyplot as plt\n'), ((11146, 11236), 'matplotlib.pyplot.text', 'plt.text', (['xc[1]', 'xc[0]', '"""+"""'], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""'}), "(xc[1], xc[0], '+', horizontalalignment='center', verticalalignment\n ='center')\n", (11154, 11236), True, 'from matplotlib import pyplot as plt\n'), ((11240, 11254), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11252, 11254), True, 'from matplotlib import pyplot as plt\n'), ((11280, 11296), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (11291, 11296), True, 'from matplotlib import pyplot as plt\n'), ((11309, 11327), 'matplotlib.pyplot.title', 'plt.title', (['"""Model"""'], {}), "('Model')\n", (11318, 11327), True, 'from matplotlib import pyplot as plt\n'), ((11347, 11391), 'shapelets.shapelet.polarArray', 'shapelets.shapelet.polarArray', (['xc1', 'im.shape'], {}), '(xc1, im.shape)\n', (11376, 11391), False, 'import shapelets\n'), ((11409, 11474), 'shapelets.decomp.genPolarBasisMatrix', 'shapelets.decomp.genPolarBasisMatrix', (['beta1', 'nmax1', 'phi1', 'r1', 'th1'], {}), '(beta1, nmax1, phi1, r1, th1)\n', (11445, 11474), False, 'import shapelets\n'), ((11490, 11529), 'shapelets.decomp.solveCoeffs', 'shapelets.decomp.solveCoeffs', (['bvals', 'im'], {}), '(bvals, im)\n', (11518, 11529), False, 'import shapelets\n'), ((11617, 11632), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mdl'], {}), '(mdl)\n', (11627, 11632), True, 'from matplotlib import pyplot as plt\n'), ((11645, 11659), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11657, 11659), True, 'from matplotlib import pyplot as plt\n'), ((11685, 11701), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (11696, 11701), True, 'from matplotlib import pyplot as plt\n'), ((11714, 11735), 'matplotlib.pyplot.title', 'plt.title', (['"""Residual"""'], {}), "('Residual')\n", (11723, 11735), True, 'from matplotlib import pyplot as plt\n'), ((11771, 11786), 'matplotlib.pyplot.imshow', 'plt.imshow', (['res'], {}), '(res)\n', (11781, 11786), True, 'from matplotlib import pyplot as plt\n'), ((11799, 11813), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11811, 11813), True, 'from matplotlib import pyplot as plt\n'), ((11839, 11855), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (11850, 11855), True, 'from matplotlib import pyplot as plt\n'), ((11868, 11893), 'matplotlib.pyplot.title', 'plt.title', (['"""Coefficients"""'], {}), "('Coefficients')\n", (11877, 11893), True, 'from matplotlib import pyplot as plt\n'), ((11911, 11958), 'shapelets.img.polarCoeffImg', 'shapelets.img.polarCoeffImg', (['coeffs.real', 'nmax1'], {}), '(coeffs.real, nmax1)\n', (11938, 11958), False, 'import shapelets\n'), ((11975, 12022), 'shapelets.img.polarCoeffImg', 'shapelets.img.polarCoeffImg', (['coeffs.imag', 'nmax1'], {}), '(coeffs.imag, nmax1)\n', (12002, 12022), False, 'import shapelets\n'), ((12039, 12054), 'numpy.fliplr', 'np.fliplr', (['cimI'], {}), '(cimI)\n', (12048, 12054), True, 'import numpy as np\n'), ((12071, 12107), 'numpy.concatenate', 'np.concatenate', (['(cimR, cimI)'], {'axis': '(1)'}), '((cimR, cimI), axis=1)\n', (12085, 12107), True, 'import numpy as np\n'), ((12147, 12203), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cim'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(cim, interpolation='nearest', origin='lower')\n", (12157, 12203), True, 'from matplotlib import pyplot as plt\n'), ((12214, 12228), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (12226, 12228), True, 'from matplotlib import pyplot as plt\n'), ((12262, 12306), 'shapelets.shapelet.polarArray', 'shapelets.shapelet.polarArray', (['xc1', 'im.shape'], {}), '(xc1, im.shape)\n', (12291, 12306), False, 'import shapelets\n'), ((12324, 12389), 'shapelets.decomp.genPolarBasisMatrix', 'shapelets.decomp.genPolarBasisMatrix', (['beta1', 'nmax1', 'phi1', 'r1', 'th1'], {}), '(beta1, nmax1, phi1, r1, th1)\n', (12360, 12389), False, 'import shapelets\n'), ((12405, 12444), 'shapelets.decomp.solveCoeffs', 'shapelets.decomp.solveCoeffs', (['bvals', 'im'], {}), '(bvals, im)\n', (12433, 12444), False, 'import shapelets\n'), ((13625, 13659), 'shapelets.shapelet.xy2Grid', 'shapelets.shapelet.xy2Grid', (['ry', 'rx'], {}), '(ry, rx)\n', (13651, 13659), False, 'import shapelets\n'), ((17872, 17998), 'scipy.optimize.brute', 'optimize.brute', (['shapelets.decomp.chi2nmaxFunc', '[np.s_[n0:n1:1]]'], {'args': '(im, nm, beta1[0], beta1[1], phi1, xc1)', 'finish': 'None'}), '(shapelets.decomp.chi2nmaxFunc, [np.s_[n0:n1:1]], args=(im,\n nm, beta1[0], beta1[1], phi1, xc1), finish=None)\n', (17886, 17998), False, 'from scipy import optimize\n'), ((21138, 21286), 'shapelets.fileio.writeHermiteCoeffs', 'shapelets.fileio.writeHermiteCoeffs', (['ofn', 'coeffs', 'xc1', 'im.shape', 'beta1', 'phi1', 'nmax1'], {'info': 'ifn', 'pos': "[radec[0], radec[1], hdr['dra'], hdr['ddec']]"}), "(ofn, coeffs, xc1, im.shape, beta1, phi1,\n nmax1, info=ifn, pos=[radec[0], radec[1], hdr['dra'], hdr['ddec']])\n", (21173, 21286), False, 'import shapelets\n'), ((21351, 21376), 'matplotlib.pyplot.savefig', 'plt.savefig', (['opts.savefig'], {}), '(opts.savefig)\n', (21362, 21376), True, 'from matplotlib import pyplot as plt\n'), ((21391, 21401), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21399, 21401), True, 'from matplotlib import pyplot as plt\n'), ((11552, 11605), 'shapelets.img.constructModel', 'shapelets.img.constructModel', (['bvals', 'coeffs', 'im.shape'], {}), '(bvals, coeffs, im.shape)\n', (11580, 11605), False, 'import shapelets\n'), ((17607, 17623), 'numpy.abs', 'np.abs', (['beta1[0]'], {}), '(beta1[0])\n', (17613, 17623), True, 'import numpy as np\n'), ((17661, 17677), 'numpy.abs', 'np.abs', (['beta1[1]'], {}), '(beta1[1])\n', (17667, 17677), True, 'import numpy as np\n'), ((18452, 18464), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18462, 18464), True, 'from matplotlib import pyplot as plt\n'), ((18515, 18533), 'matplotlib.pyplot.title', 'plt.title', (['"""Image"""'], {}), "('Image')\n", (18524, 18533), True, 'from matplotlib import pyplot as plt\n'), ((18546, 18560), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (18556, 18560), True, 'from matplotlib import pyplot as plt\n'), ((18824, 18914), 'matplotlib.pyplot.text', 'plt.text', (['xc[1]', 'xc[0]', '"""+"""'], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""'}), "(xc[1], xc[0], '+', horizontalalignment='center', verticalalignment\n ='center')\n", (18832, 18914), True, 'from matplotlib import pyplot as plt\n'), ((18918, 18932), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (18930, 18932), True, 'from matplotlib import pyplot as plt\n'), ((18958, 18974), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (18969, 18974), True, 'from matplotlib import pyplot as plt\n'), ((18987, 19005), 'matplotlib.pyplot.title', 'plt.title', (['"""Model"""'], {}), "('Model')\n", (18996, 19005), True, 'from matplotlib import pyplot as plt\n'), ((19154, 19188), 'shapelets.shapelet.xy2Grid', 'shapelets.shapelet.xy2Grid', (['ry', 'rx'], {}), '(ry, rx)\n', (19180, 19188), False, 'import shapelets\n'), ((19206, 19265), 'shapelets.decomp.genBasisMatrix', 'shapelets.decomp.genBasisMatrix', (['beta1', 'nmax1', 'phi1', 'yy', 'xx'], {}), '(beta1, nmax1, phi1, yy, xx)\n', (19237, 19265), False, 'import shapelets\n'), ((19281, 19320), 'shapelets.decomp.solveCoeffs', 'shapelets.decomp.solveCoeffs', (['bvals', 'im'], {}), '(bvals, im)\n', (19309, 19320), False, 'import shapelets\n'), ((19336, 19389), 'shapelets.img.constructModel', 'shapelets.img.constructModel', (['bvals', 'coeffs', 'im.shape'], {}), '(bvals, coeffs, im.shape)\n', (19364, 19389), False, 'import shapelets\n'), ((19400, 19415), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mdl'], {}), '(mdl)\n', (19410, 19415), True, 'from matplotlib import pyplot as plt\n'), ((19428, 19519), 'matplotlib.pyplot.text', 'plt.text', (['xc1[1]', 'xc1[0]', '"""+"""'], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""'}), "(xc1[1], xc1[0], '+', horizontalalignment='center',\n verticalalignment='center')\n", (19436, 19519), True, 'from matplotlib import pyplot as plt\n'), ((19524, 19538), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (19536, 19538), True, 'from matplotlib import pyplot as plt\n'), ((19564, 19580), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (19575, 19580), True, 'from matplotlib import pyplot as plt\n'), ((19593, 19614), 'matplotlib.pyplot.title', 'plt.title', (['"""Residual"""'], {}), "('Residual')\n", (19602, 19614), True, 'from matplotlib import pyplot as plt\n'), ((19650, 19665), 'matplotlib.pyplot.imshow', 'plt.imshow', (['res'], {}), '(res)\n', (19660, 19665), True, 'from matplotlib import pyplot as plt\n'), ((19678, 19692), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (19690, 19692), True, 'from matplotlib import pyplot as plt\n'), ((19706, 19722), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (19717, 19722), True, 'from matplotlib import pyplot as plt\n'), ((19735, 19760), 'matplotlib.pyplot.title', 'plt.title', (['"""Coefficients"""'], {}), "('Coefficients')\n", (19744, 19760), True, 'from matplotlib import pyplot as plt\n'), ((19782, 19807), 'numpy.reshape', 'np.reshape', (['coeffs', 'nmax1'], {}), '(coeffs, nmax1)\n', (19792, 19807), True, 'import numpy as np\n'), ((19853, 19914), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sqCoeffs'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(sqCoeffs, interpolation='nearest', origin='lower')\n", (19863, 19914), True, 'from matplotlib import pyplot as plt\n'), ((19925, 19939), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (19937, 19939), True, 'from matplotlib import pyplot as plt\n'), ((20102, 20136), 'shapelets.shapelet.xy2Grid', 'shapelets.shapelet.xy2Grid', (['ry', 'rx'], {}), '(ry, rx)\n', (20128, 20136), False, 'import shapelets\n'), ((20154, 20213), 'shapelets.decomp.genBasisMatrix', 'shapelets.decomp.genBasisMatrix', (['beta1', 'nmax1', 'phi1', 'yy', 'xx'], {}), '(beta1, nmax1, phi1, yy, xx)\n', (20185, 20213), False, 'import shapelets\n'), ((20229, 20268), 'shapelets.decomp.solveCoeffs', 'shapelets.decomp.solveCoeffs', (['bvals', 'im'], {}), '(bvals, im)\n', (20257, 20268), False, 'import shapelets\n'), ((6409, 6629), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2PolarFunc', '[phi0]'], {'args': "(nmax, im, nm, ['phi'], beta0, None, xc, r0, th0)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2PolarFunc, [phi0], args=(nmax, im,\n nm, ['phi'], beta0, None, xc, r0, th0), method=opts.fitterMethod,\n options={'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (6426, 6629), False, 'from scipy import optimize\n'), ((6869, 7127), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2PolarFunc', '[beta0[0], beta0[1]]'], {'args': "(nmax, im, nm, ['beta0', 'beta1'], [None, None], phi0, xc, r0, th0)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2PolarFunc, [beta0[0], beta0[1]],\n args=(nmax, im, nm, ['beta0', 'beta1'], [None, None], phi0, xc, r0, th0\n ), method=opts.fitterMethod, options={'xtol': opts.xtol, 'ftol': opts.\n ftol, 'maxiter': opts.maxiter})\n", (6886, 7127), False, 'from scipy import optimize\n'), ((7351, 7621), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2PolarFunc', '[beta0[0], beta0[1], phi0]'], {'args': "(nmax, im, nm, ['beta0', 'beta1', 'phi'], [None, None], None, xc, r0, th0)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2PolarFunc, [beta0[0], beta0[1], phi0\n ], args=(nmax, im, nm, ['beta0', 'beta1', 'phi'], [None, None], None,\n xc, r0, th0), method=opts.fitterMethod, options={'xtol': opts.xtol,\n 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (7368, 7621), False, 'from scipy import optimize\n'), ((7898, 8149), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2PolarFunc', '[xc[0], xc[1]]'], {'args': "(nmax, im, nm, ['yc', 'xc'], beta0, phi0, [None, None], None, None)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2PolarFunc, [xc[0], xc[1]], args=(\n nmax, im, nm, ['yc', 'xc'], beta0, phi0, [None, None], None, None),\n method=opts.fitterMethod, options={'xtol': opts.xtol, 'ftol': opts.ftol,\n 'maxiter': opts.maxiter})\n", (7915, 8149), False, 'from scipy import optimize\n'), ((8380, 8643), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2PolarFunc', '[phi0, xc[0], xc[1]]'], {'args': "(nmax, im, nm, ['phi', 'yc', 'xc'], beta0, None, [None, None], None, None)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2PolarFunc, [phi0, xc[0], xc[1]],\n args=(nmax, im, nm, ['phi', 'yc', 'xc'], beta0, None, [None, None],\n None, None), method=opts.fitterMethod, options={'xtol': opts.xtol,\n 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (8397, 8643), False, 'from scipy import optimize\n'), ((8905, 9201), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2PolarFunc', '[beta0[0], beta0[1], xc[0], xc[1]]'], {'args': "(nmax, im, nm, ['beta0', 'beta1', 'yc', 'xc'], [None, None], phi0, [None,\n None], None, None)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2PolarFunc, [beta0[0], beta0[1], xc[0\n ], xc[1]], args=(nmax, im, nm, ['beta0', 'beta1', 'yc', 'xc'], [None,\n None], phi0, [None, None], None, None), method=opts.fitterMethod,\n options={'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (8922, 9201), False, 'from scipy import optimize\n'), ((9453, 9673), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2PolarFunc', '[beta0[0], beta0[1], phi0, xc[0], xc[1]]'], {'args': '(nmax, im, nm)', 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2PolarFunc, [beta0[0], beta0[1], phi0,\n xc[0], xc[1]], args=(nmax, im, nm), method=opts.fitterMethod, options={\n 'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (9470, 9673), False, 'from scipy import optimize\n'), ((12714, 12750), 'numpy.array', 'np.array', (['[[xc1[1] + 1, xc1[0] + 1]]'], {}), '([[xc1[1] + 1, xc1[0] + 1]])\n', (12722, 12750), True, 'import numpy as np\n'), ((13036, 13109), 'numpy.array', 'np.array', (['[[xc1[1] + extent[0] + 1, im0.shape[0] - (extent[2] + xc1[0])]]'], {}), '([[xc1[1] + extent[0] + 1, im0.shape[0] - (extent[2] + xc1[0])]])\n', (13044, 13109), True, 'import numpy as np\n'), ((14124, 14342), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2Func', '[phi0]'], {'args': "(nmax, im, nm, ['phi'], beta0, None, xc, xx0, yy0)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2Func, [phi0], args=(nmax, im, nm, [\n 'phi'], beta0, None, xc, xx0, yy0), method=opts.fitterMethod, options={\n 'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (14141, 14342), False, 'from scipy import optimize\n'), ((14580, 14833), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2Func', '[beta0[0], beta0[1]]'], {'args': "(nmax, im, nm, ['beta0', 'beta1'], [None, None], phi0, xc, xx0, yy0)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2Func, [beta0[0], beta0[1]], args=(\n nmax, im, nm, ['beta0', 'beta1'], [None, None], phi0, xc, xx0, yy0),\n method=opts.fitterMethod, options={'xtol': opts.xtol, 'ftol': opts.ftol,\n 'maxiter': opts.maxiter})\n", (14597, 14833), False, 'from scipy import optimize\n'), ((15058, 15323), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2Func', '[beta0[0], beta0[1], phi0]'], {'args': "(nmax, im, nm, ['beta0', 'beta1', 'phi'], [None, None], None, xc, xx0, yy0)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2Func, [beta0[0], beta0[1], phi0],\n args=(nmax, im, nm, ['beta0', 'beta1', 'phi'], [None, None], None, xc,\n xx0, yy0), method=opts.fitterMethod, options={'xtol': opts.xtol, 'ftol':\n opts.ftol, 'maxiter': opts.maxiter})\n", (15075, 15323), False, 'from scipy import optimize\n'), ((15601, 15847), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2Func', '[xc[0], xc[1]]'], {'args': "(nmax, im, nm, ['yc', 'xc'], beta0, phi0, [None, None], None, None)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2Func, [xc[0], xc[1]], args=(nmax, im,\n nm, ['yc', 'xc'], beta0, phi0, [None, None], None, None), method=opts.\n fitterMethod, options={'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter':\n opts.maxiter})\n", (15618, 15847), False, 'from scipy import optimize\n'), ((16078, 16337), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2Func', '[phi0, xc[0], xc[1]]'], {'args': "(nmax, im, nm, ['phi', 'yc', 'xc'], beta0, None, [None, None], None, None)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2Func, [phi0, xc[0], xc[1]], args=(\n nmax, im, nm, ['phi', 'yc', 'xc'], beta0, None, [None, None], None,\n None), method=opts.fitterMethod, options={'xtol': opts.xtol, 'ftol':\n opts.ftol, 'maxiter': opts.maxiter})\n", (16095, 16337), False, 'from scipy import optimize\n'), ((16598, 16890), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2Func', '[beta0[0], beta0[1], xc[0], xc[1]]'], {'args': "(nmax, im, nm, ['beta0', 'beta1', 'yc', 'xc'], [None, None], phi0, [None,\n None], None, None)", 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2Func, [beta0[0], beta0[1], xc[0], xc\n [1]], args=(nmax, im, nm, ['beta0', 'beta1', 'yc', 'xc'], [None, None],\n phi0, [None, None], None, None), method=opts.fitterMethod, options={\n 'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (16615, 16890), False, 'from scipy import optimize\n'), ((17141, 17357), 'scipy.optimize.minimize', 'optimize.minimize', (['shapelets.decomp.chi2Func', '[beta0[0], beta0[1], phi0, xc[0], xc[1]]'], {'args': '(nmax, im, nm)', 'method': 'opts.fitterMethod', 'options': "{'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter}"}), "(shapelets.decomp.chi2Func, [beta0[0], beta0[1], phi0, xc[\n 0], xc[1]], args=(nmax, im, nm), method=opts.fitterMethod, options={\n 'xtol': opts.xtol, 'ftol': opts.ftol, 'maxiter': opts.maxiter})\n", (17158, 17357), False, 'from scipy import optimize\n'), ((20546, 20582), 'numpy.array', 'np.array', (['[[xc1[1] + 1, xc1[0] + 1]]'], {}), '([[xc1[1] + 1, xc1[0] + 1]])\n', (20554, 20582), True, 'import numpy as np\n'), ((20868, 20941), 'numpy.array', 'np.array', (['[[xc1[1] + extent[0] + 1, im0.shape[0] - (extent[2] + xc1[0])]]'], {}), '([[xc1[1] + extent[0] + 1, im0.shape[0] - (extent[2] + xc1[0])]])\n', (20876, 20941), True, 'import numpy as np\n')] |
"""This module handles indels and frameshift mutations.
Indels and frameshifts are detected from the allele columns of the mutation
input.
"""
import prob2020.python.utils as utils
import numpy as np
import pandas as pd
def simulate_indel_counts(indel_df, bed_dict,
num_permutations=1,
seed=None):
# count indels
bed_genes = [mybed
for chrom in bed_dict
for mybed in bed_dict[chrom]]
tmp = []
for b in bed_genes:
b.init_genome_coordinates()
tmp.append(b)
bed_genes = tmp
gene_lengths = pd.Series([b.cds_len for b in bed_genes],
index=[b.gene_name for b in bed_genes])
# generate random indel assignments
gene_prob = gene_lengths.astype(float) / gene_lengths.sum()
indel_lens = indel_df['indel len'].copy().values
is_fs = (indel_lens % 3) > 0
indel_ixs = np.arange(len(indel_lens))
prng = np.random.RandomState(seed=seed)
# randomly reassign indels
mygene_cts = prng.multinomial(len(indel_lens), gene_prob, size=num_permutations)
inframe_cts = mygene_cts.copy()
for row in range(mygene_cts.shape[0]):
nonzero_ix = np.nonzero(mygene_cts[row,:])[0]
# randomly shuffle indel lengths
prng.shuffle(indel_ixs)
is_fs = is_fs[indel_ixs]
# iterate over each gene
indel_ix = 0
for j in range(len(nonzero_ix)):
prev_indel_ix = indel_ix
num_gene_indels = mygene_cts[row, nonzero_ix[j]]
indel_ix += num_gene_indels
inframe_cts[row, nonzero_ix[j]] = num_gene_indels - np.sum(is_fs[prev_indel_ix:indel_ix])
mygene_cts -= inframe_cts
return mygene_cts, inframe_cts, gene_lengths.index
def simulate_indel_maf(indel_df, bed_dict,
num_permutations=1,
seed=None):
# count indels
bed_genes = [mybed
for chrom in bed_dict
for mybed in bed_dict[chrom]]
tmp = []
for b in bed_genes:
b.init_genome_coordinates()
tmp.append(b)
bed_genes = tmp
gene_lengths = pd.Series([b.cds_len for b in bed_genes],
index=[b.gene_name for b in bed_genes])
# generate random indel assignments
gene_prob = gene_lengths.astype(float) / gene_lengths.sum()
indel_lens = indel_df['indel len'].copy().values
indel_types = indel_df['indel type'].copy().values
indel_ixs = np.arange(len(indel_lens))
prng = np.random.RandomState(seed=seed)
for i in range(num_permutations):
# randomly reassign indels
mygene_cts = prng.multinomial(len(indel_lens), gene_prob)
nonzero_ix = np.nonzero(mygene_cts)[0]
# randomly shuffle indel lengths
prng.shuffle(indel_ixs)
indel_lens = indel_lens[indel_ixs]
indel_types = indel_types[indel_ixs]
# iterate over each gene
indel_ix = 0
for j in range(len(nonzero_ix)):
prev_indel_ix = indel_ix
num_gene_indels = mygene_cts[nonzero_ix[j]]
indel_ix += num_gene_indels
maf_lines = counts2maf(num_gene_indels,
indel_lens[prev_indel_ix:indel_ix],
indel_types[prev_indel_ix:indel_ix],
bed_genes[nonzero_ix[j]])
yield maf_lines
def counts2maf(num_indels, myindel_lens, myindel_types, gene_bed, seed=None):
maf_list = []
prng = np.random.RandomState(seed=seed)
pos = prng.randint(low=0, high=gene_bed.cds_len, size=num_indels)
genome_pos = [gene_bed.seqpos2genome[p] for p in pos]
is_frame_shift = myindel_lens%3
for i, gpos in enumerate(genome_pos):
if myindel_types[i] == 'INS':
var_class = 'Frame_Shift_Ins' if is_frame_shift[i] else 'In_Frame_Ins'
dna_change = 'c.{0}_{1}ins'.format(pos[i], pos[i])
prot_change = 'p.?'
tmp = [gene_bed.gene_name, gene_bed.strand, gene_bed.chrom,
gpos, gpos, '-', 'N'*int(myindel_lens[i]), '-', dna_change,
prot_change, var_class]
maf_list.append(tmp)
else:
var_class = 'Frame_Shift_Del' if is_frame_shift[i] else 'In_Frame_Del'
dna_change = 'c.{0}_{1}del'.format(pos[i], pos[i]+myindel_lens[i])
prot_change = 'p.?'
tmp = [gene_bed.gene_name, gene_bed.strand, gene_bed.chrom,
gpos+1, gpos+myindel_lens[i], 'N'*int(myindel_lens[i]), '-', '-', dna_change,
prot_change, var_class]
maf_list.append(tmp)
return maf_list
def compute_indel_length(fs_df):
"""Computes the indel length accounting for wether it is an insertion or
deletion.
Parameters
----------
fs_df : pd.DataFrame
mutation input as dataframe only containing indel mutations
Returns
-------
indel_len : pd.Series
length of indels
"""
indel_len = pd.Series(index=fs_df.index)
indel_len[fs_df['Reference_Allele']=='-'] = fs_df['Tumor_Allele'][fs_df['Reference_Allele']=='-'].str.len()
indel_len[fs_df['Tumor_Allele']=='-'] = fs_df['Reference_Allele'][fs_df['Tumor_Allele']=='-'].str.len()
indel_len = indel_len.fillna(0).astype(int)
return indel_len
def keep_indels(mut_df,
indel_len_col=True,
indel_type_col=True):
"""Filters out all mutations that are not indels.
Requires that one of the alleles have '-' indicating either an insertion
or deletion depending if found in reference allele or somatic allele
columns, respectively.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
indel_len_col : bool
whether or not to add a column indicating the length of the indel
Returns
-------
mut_df : pd.DataFrame
mutations with only frameshift mutations kept
"""
# keep only frameshifts
mut_df = mut_df[is_indel_annotation(mut_df)]
if indel_len_col:
# calculate length
mut_df.loc[:, 'indel len'] = compute_indel_length(mut_df)
if indel_type_col:
is_ins = mut_df['Reference_Allele']=='-'
is_del = mut_df['Tumor_Allele']=='-'
mut_df['indel type'] = ''
mut_df.loc[is_ins, 'indel type'] = 'INS'
mut_df.loc[is_del, 'indel type'] = 'DEL'
return mut_df
def keep_frameshifts(mut_df,
indel_len_col=True):
"""Filters out all mutations that are not frameshift indels.
Requires that one of the alleles have '-' indicating either an insertion
or deletion depending if found in reference allele or somatic allele
columns, respectively.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
indel_len_col : bool
whether or not to add a column indicating the length of the frameshift
Returns
-------
mut_df : pd.DataFrame
mutations with only frameshift mutations kept
"""
# keep only frameshifts
mut_df = mut_df[is_frameshift_annotation(mut_df)]
if indel_len_col:
# calculate length
mut_df.loc[:, 'indel len'] = compute_indel_length(mut_df)
return mut_df
def is_frameshift_len(mut_df):
"""Simply returns a series indicating whether each corresponding mutation
is a frameshift.
This is based on the length of the indel. Thus may be fooled by frameshifts
at exon-intron boundaries or other odd cases.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
Returns
-------
is_fs : pd.Series
pandas series indicating if mutaitons are frameshifts
"""
# calculate length, 0-based coordinates
#indel_len = mut_df['End_Position'] - mut_df['Start_Position']
if 'indel len' in mut_df.columns:
indel_len = mut_df['indel len']
else:
indel_len = compute_indel_length(mut_df)
# only non multiples of 3 are frameshifts
is_fs = (indel_len%3)>0
# make sure no single base substitutions are counted
is_indel = (mut_df['Reference_Allele']=='-') | (mut_df['Tumor_Allele']=='-')
is_fs[~is_indel] = False
return is_fs
def is_frameshift_annotation(mut_df):
"""Designates frameshift mutations by the Variant_Classification column."""
is_fs = mut_df['Variant_Classification'].isin(utils.variant_frameshift)
return is_fs
def is_indel_len(mut_df):
"""Simply returns a series indicating whether each corresponding mutation
is an indel.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
Returns
-------
is_indel : pd.Series
pandas series indicating if mutaitons are indels
"""
# calculate length, 0-based coordinates
#indel_len = mut_df['End_Position'] - mut_df['Start_Position']
# make sure no single base substitutions are counted
is_indel = (mut_df['Reference_Allele']=='-') | (mut_df['Tumor_Allele']=='-')
# make sure indel has a length
# is_indel[indel_len<1] = False
return is_indel
def is_indel_annotation(mut_df):
"""Designates indel mutations by the Variant_Classification column."""
is_indel = mut_df['Variant_Classification'].isin(utils.variant_indel)
return is_indel
def is_in_frame_indel_annotation(mut_df):
"""Designates in frame indel mutations by the Variant_Classification column."""
is_indel = mut_df['Variant_Classification'].isin(utils.variant_in_frame_indel)
return is_indel
def get_frameshift_lengths(num_bins):
"""Simple function that returns the lengths for each frameshift category
if `num_bins` number of frameshift categories are requested.
"""
fs_len = []
i = 1
tmp_bins = 0
while(tmp_bins<num_bins):
if i%3:
fs_len.append(i)
tmp_bins += 1
i += 1
return fs_len
| [
"numpy.nonzero",
"numpy.sum",
"pandas.Series",
"numpy.random.RandomState"
] | [((614, 699), 'pandas.Series', 'pd.Series', (['[b.cds_len for b in bed_genes]'], {'index': '[b.gene_name for b in bed_genes]'}), '([b.cds_len for b in bed_genes], index=[b.gene_name for b in\n bed_genes])\n', (623, 699), True, 'import pandas as pd\n'), ((971, 1003), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (992, 1003), True, 'import numpy as np\n'), ((2165, 2250), 'pandas.Series', 'pd.Series', (['[b.cds_len for b in bed_genes]'], {'index': '[b.gene_name for b in bed_genes]'}), '([b.cds_len for b in bed_genes], index=[b.gene_name for b in\n bed_genes])\n', (2174, 2250), True, 'import pandas as pd\n'), ((2546, 2578), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (2567, 2578), True, 'import numpy as np\n'), ((3550, 3582), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (3571, 3582), True, 'import numpy as np\n'), ((5056, 5084), 'pandas.Series', 'pd.Series', ([], {'index': 'fs_df.index'}), '(index=fs_df.index)\n', (5065, 5084), True, 'import pandas as pd\n'), ((1221, 1251), 'numpy.nonzero', 'np.nonzero', (['mygene_cts[row, :]'], {}), '(mygene_cts[row, :])\n', (1231, 1251), True, 'import numpy as np\n'), ((2740, 2762), 'numpy.nonzero', 'np.nonzero', (['mygene_cts'], {}), '(mygene_cts)\n', (2750, 2762), True, 'import numpy as np\n'), ((1659, 1696), 'numpy.sum', 'np.sum', (['is_fs[prev_indel_ix:indel_ix]'], {}), '(is_fs[prev_indel_ix:indel_ix])\n', (1665, 1696), True, 'import numpy as np\n')] |
import numpy as np
import csv
import os
engel = 45
def generate_poses(file_path):
with open(file_path, 'a') as csvfile:
csvwriter = csv.writer(csvfile)
for idx in range(10000):
orientation_x = np.round(np.random.uniform()*2*engel *(np.pi/180) - engel *(np.pi/180),4)
orientation_y = np.round(np.random.uniform()*2*engel *(np.pi/180) - engel *(np.pi/180),4)
orientation_z = np.round(np.random.uniform()*2*engel *(np.pi/180) - engel *(np.pi/180),4)
x = np.round(2*np.random.uniform()-1,4)
y = np.round(2*np.random.uniform()-1,4)
z = np.round(2*np.random.uniform()-1,4)
pose = [x,y,z,orientation_x,orientation_y,orientation_z]
csvwriter.writerow(pose)
if __name__== '__main__':
generate_poses(file_path='data/car_data/itr_net_eval_data'+str(engel)+'.csv') | [
"numpy.random.uniform",
"csv.writer"
] | [((139, 158), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (149, 158), False, 'import csv\n'), ((483, 502), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (500, 502), True, 'import numpy as np\n'), ((526, 545), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (543, 545), True, 'import numpy as np\n'), ((569, 588), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (586, 588), True, 'import numpy as np\n'), ((214, 233), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (231, 233), True, 'import numpy as np\n'), ((307, 326), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (324, 326), True, 'import numpy as np\n'), ((400, 419), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (417, 419), True, 'import numpy as np\n')] |
import numpy as np
DEFAULT_MAZE = '''
+-----+
| |
| |
| |
| |
| |
+-----+
'''
HARD_MAZE = '''
+--------+-----+
| |
| |
+-----+ +-----+
| | |
| | |
| +--+- --+--+
| |
| |
| + + +-----+
| | | | |
| | | | |
| +--+ +--- |
| | |
| | |
+-----+--------+
'''
class MazeFactoryBase:
def __init__(self, maze_str=DEFAULT_MAZE):
self._maze = self._parse_maze(maze_str)
def _parse_maze(self, maze_source):
width = 0
height = 0
maze_matrix = []
for row in maze_source.strip().split('\n'):
row_vector = row.strip()
maze_matrix.append(row_vector)
height += 1
width = max(width, len(row_vector))
maze_array = np.zeros([height, width], dtype=str)
maze_array[:] = ' '
for i, row in enumerate(maze_matrix):
for j, val in enumerate(row):
maze_array[i, j] = val
return maze_array
def get_maze(self):
return self._maze
class SquareRoomFactory(MazeFactoryBase):
"""generate a square room with given size"""
def __init__(self, size):
maze_array = np.zeros([size+2, size+2], dtype=str)
maze_array[:] = ' '
maze_array[0] = '-'
maze_array[-1] = '-'
maze_array[:, 0] = '|'
maze_array[:, -1] = '|'
maze_array[0, 0] = '+'
maze_array[0, -1] = '+'
maze_array[-1, 0] = '+'
maze_array[-1, -1] = '+'
self._maze = maze_array
class FourRoomsFactory(MazeFactoryBase):
"""generate four rooms, each with the given size"""
def __init__(self, size):
maze_array = np.zeros([size*2+3, size*2+3], dtype=str)
maze_array[:] = ' '
wall_idx = [0, size+1, size*2+2]
maze_array[wall_idx] = '-'
maze_array[:, wall_idx] = '|'
maze_array[wall_idx][:, wall_idx] = '+'
door_idx = [int((size+1)/2), int((size+1)/2)+1,
int((size+1)/2)+size+1, int((size+1)/2)+size+2]
maze_array[size+1, door_idx] = ' '
maze_array[door_idx, size+1] = ' '
self._maze = maze_array
class TwoRoomsFactory(MazeFactoryBase):
def __init__(self, size):
maze_array = np.zeros([size+2, size+2], dtype=str)
maze_array[:] = ' '
hwall_idx = [0, int((size+1)/2), size+1]
vwall_idx = [0, size+1]
maze_array[hwall_idx] = '-'
maze_array[:, vwall_idx] = '|'
maze_array[hwall_idx][:, vwall_idx] = '+'
door_idx = [int((size+1)/2), int((size+1)/2)+1]
maze_array[hwall_idx[1], door_idx] = ' '
self._maze = maze_array
class Maze:
def __init__(self, maze_factory):
self._maze_factory = maze_factory
# parse maze ...
self._maze = None
self._height = None
self._width = None
self._build_maze()
self._all_empty_grids = np.argwhere(self._maze==' ')
self._n_states = self._all_empty_grids.shape[0]
self._pos_indices = {}
for i, pos in enumerate(self._all_empty_grids):
self._pos_indices[tuple(pos)] = i
def _build_maze(self):
self._maze = self._maze_factory.get_maze()
self._height = self._maze.shape[0]
self._width = self._maze.shape[1]
def rebuild(self):
self._build_maze()
def __getitem__(self, key):
return self._maze[key]
def __setitem__(self, key, val):
self._maze[key] = val
def is_empty(self, pos):
if (pos[0] >= 0 and pos[0] < self._height
and pos[1] >= 0 and pos[1] < self._width):
return self._maze[tuple(pos)] == ' '
else:
return False
@property
def maze_array(self):
return self._maze
@property
def height(self):
return self._height
@property
def width(self):
return self._width
@property
def n_states(self):
return self._n_states
def pos_index(self, pos):
return self._pos_indices[tuple(pos)]
def all_empty_grids(self):
return np.argwhere(self._maze==' ')
def random_empty_grids(self, k):
'''Return k random empty positions.'''
empty_grids = np.argwhere(self._maze==' ')
selected = np.random.choice(
np.arange(empty_grids.shape[0]),
size=k,
replace=False
)
return empty_grids[selected]
def first_empty_grid(self):
empty_grids = np.argwhere(self._maze==' ')
assert empty_grids.shape[0] > 0
return empty_grids[0]
def render(self):
# 0 for ground, 1 for wall
return (self._maze!=' ').astype(np.float32)
| [
"numpy.argwhere",
"numpy.zeros",
"numpy.arange"
] | [((842, 878), 'numpy.zeros', 'np.zeros', (['[height, width]'], {'dtype': 'str'}), '([height, width], dtype=str)\n', (850, 878), True, 'import numpy as np\n'), ((1255, 1296), 'numpy.zeros', 'np.zeros', (['[size + 2, size + 2]'], {'dtype': 'str'}), '([size + 2, size + 2], dtype=str)\n', (1263, 1296), True, 'import numpy as np\n'), ((1751, 1800), 'numpy.zeros', 'np.zeros', (['[size * 2 + 3, size * 2 + 3]'], {'dtype': 'str'}), '([size * 2 + 3, size * 2 + 3], dtype=str)\n', (1759, 1800), True, 'import numpy as np\n'), ((2315, 2356), 'numpy.zeros', 'np.zeros', (['[size + 2, size + 2]'], {'dtype': 'str'}), '([size + 2, size + 2], dtype=str)\n', (2323, 2356), True, 'import numpy as np\n'), ((2983, 3013), 'numpy.argwhere', 'np.argwhere', (["(self._maze == ' ')"], {}), "(self._maze == ' ')\n", (2994, 3013), True, 'import numpy as np\n'), ((4167, 4197), 'numpy.argwhere', 'np.argwhere', (["(self._maze == ' ')"], {}), "(self._maze == ' ')\n", (4178, 4197), True, 'import numpy as np\n'), ((4303, 4333), 'numpy.argwhere', 'np.argwhere', (["(self._maze == ' ')"], {}), "(self._maze == ' ')\n", (4314, 4333), True, 'import numpy as np\n'), ((4582, 4612), 'numpy.argwhere', 'np.argwhere', (["(self._maze == ' ')"], {}), "(self._maze == ' ')\n", (4593, 4612), True, 'import numpy as np\n'), ((4385, 4416), 'numpy.arange', 'np.arange', (['empty_grids.shape[0]'], {}), '(empty_grids.shape[0])\n', (4394, 4416), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import numpy as np
from six.moves import xrange
import tensorflow as tf
import warnings
from . import utils_tf
from . import utils
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
def fgsm(x, predictions, eps=0.3, clip_min=None, clip_max=None):
return fgm(x, predictions, y=None, eps=eps, ord=np.inf, clip_min=clip_min,
clip_max=clip_max)
def fgm(x, preds, y=None, eps=0.3, ord=np.inf, clip_min=None, clip_max=None):
"""
TensorFlow implementation of the Fast Gradient Method.
:param x: the input placeholder
:param preds: the model's output tensor
:param y: (optional) A placeholder for the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:return: a tensor for the adversarial example
"""
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = tf.reduce_max(preds, 1, keep_dims=True)
y = tf.to_float(tf.equal(preds, preds_max))
y = y / tf.reduce_sum(y, 1, keep_dims=True)
# Compute loss
loss = utils_tf.model_loss(y, preds, mean=False)
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
if ord == np.inf:
# Take sign of gradient
signed_grad = tf.sign(grad)
elif ord == 1:
reduc_ind = list(xrange(1, len(x.get_shape())))
signed_grad = grad / tf.reduce_sum(tf.abs(grad),
reduction_indices=reduc_ind,
keep_dims=True)
elif ord == 2:
reduc_ind = list(xrange(1, len(x.get_shape())))
signed_grad = grad / tf.sqrt(tf.reduce_sum(tf.square(grad),
reduction_indices=reduc_ind,
keep_dims=True))
else:
raise NotImplementedError("Only L-inf, L1 and L2 norms are "
"currently implemented.")
# Multiply by constant epsilon
scaled_signed_grad = eps * signed_grad
# Add perturbation to original example to obtain adversarial example
adv_x = tf.stop_gradient(x + scaled_signed_grad)
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
def vatm(model, x, logits, eps, num_iterations=1, xi=1e-6,
clip_min=None, clip_max=None, scope=None):
"""
Tensorflow implementation of the perturbation method used for virtual
adversarial training: https://arxiv.org/abs/1507.00677
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param logits: the model's unnormalized output tensor
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param seed: the seed for random generator
:return: a tensor for the adversarial example
"""
with tf.name_scope(scope, "virtual_adversarial_perturbation"):
d = tf.random_normal(tf.shape(x))
for i in range(num_iterations):
d = xi * utils_tf.l2_batch_normalize(d)
logits_d = model.get_logits(x + d)
kl = utils_tf.kl_with_logits(logits, logits_d)
Hd = tf.gradients(kl, d)[0]
d = tf.stop_gradient(Hd)
d = eps * utils_tf.l2_batch_normalize(d)
adv_x = tf.stop_gradient(x + d)
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
"""
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
"""
# perturb our input sample
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X
def saliency_map(grads_target, grads_other, search_domain, increase):
"""
TensorFlow implementation for computing saliency maps
:param grads_target: a matrix containing forward derivatives for the
target class
:param grads_other: a matrix where every element is the sum of forward
derivatives over all non-target classes at that index
:param search_domain: the set of input indices that we are considering
:param increase: boolean; true if we are increasing pixels, false otherwise
:return: (i, j, search_domain) the two input indices selected and the
updated search domain
"""
# Compute the size of the input (the number of features)
nf = len(grads_target)
# Remove the already-used input features from the search space
invalid = list(set(range(nf)) - search_domain)
increase_coef = (2 * int(increase) - 1)
grads_target[invalid] = - increase_coef * np.max(np.abs(grads_target))
grads_other[invalid] = increase_coef * np.max(np.abs(grads_other))
# Create a 2D numpy array of the sum of grads_target and grads_other
target_sum = grads_target.reshape((1, nf)) + grads_target.reshape((nf, 1))
other_sum = grads_other.reshape((1, nf)) + grads_other.reshape((nf, 1))
# Create a mask to only keep features that match saliency map conditions
if increase:
scores_mask = ((target_sum > 0) & (other_sum < 0))
else:
scores_mask = ((target_sum < 0) & (other_sum > 0))
# Create a 2D numpy array of the scores for each pair of candidate features
scores = scores_mask * (-target_sum * other_sum)
# A pixel can only be selected (and changed) once
np.fill_diagonal(scores, 0)
# Extract the best two pixels
best = np.argmax(scores)
p1, p2 = best % nf, best // nf
# Remove used pixels from our search domain
search_domain.discard(p1)
search_domain.discard(p2)
return p1, p2, search_domain
def jacobian(sess, x, grads, target, X, nb_features, nb_classes):
"""
TensorFlow implementation of the foward derivative / Jacobian
:param x: the input placeholder
:param grads: the list of TF gradients returned by jacobian_graph()
:param target: the target misclassification class
:param X: numpy array with sample input
:param nb_features: the number of features in the input
:return: matrix of forward derivatives flattened into vectors
"""
# Prepare feeding dictionary for all gradient computations
feed_dict = {x: X}
# Initialize a numpy array to hold the Jacobian component values
jacobian_val = np.zeros((nb_classes, nb_features), dtype=np.float32)
# Compute the gradients for all classes
for class_ind, grad in enumerate(grads):
run_grad = sess.run(grad, feed_dict)
jacobian_val[class_ind] = np.reshape(run_grad, (1, nb_features))
# Sum over all classes different from the target class to prepare for
# saliency map computation in the next step of the attack
other_classes = utils.other_classes(nb_classes, target)
grad_others = np.sum(jacobian_val[other_classes, :], axis=0)
return jacobian_val[target], grad_others
def jacobian_graph(predictions, x, nb_classes):
"""
Create the Jacobian graph to be ran later in a TF session
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param x: the input placeholder
:param nb_classes: the number of classes the model has
:return:
"""
# This function will return a list of TF gradients
list_derivatives = []
# Define the TF graph elements to compute our derivatives for each class
for class_ind in xrange(nb_classes):
derivatives, = tf.gradients(predictions[:, class_ind], x)
list_derivatives.append(derivatives)
return list_derivatives
def jsma(sess, x, predictions, grads, sample, target, theta, gamma, clip_min,
clip_max):
"""
TensorFlow implementation of the JSMA (see https://arxiv.org/abs/1511.07528
for details about the algorithm design choices).
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param grads: symbolic gradients
:param sample: numpy array with sample input
:param target: target class for sample input
:param theta: delta for each feature adjustment
:param gamma: a float between 0 - 1 indicating the maximum distortion
percentage
:param clip_min: minimum value for components of the example returned
:param clip_max: maximum value for components of the example returned
:return: an adversarial sample
"""
# Copy the source sample and define the maximum number of features
# (i.e. the maximum number of iterations) that we may perturb
adv_x = copy.copy(sample)
# count the number of features. For MNIST, 1x28x28 = 784; for
# CIFAR, 3x32x32 = 3072; etc.
nb_features = np.product(adv_x.shape[1:])
# reshape sample for sake of standardization
original_shape = adv_x.shape
adv_x = np.reshape(adv_x, (1, nb_features))
# compute maximum number of iterations
max_iters = np.floor(nb_features * gamma / 2)
# Find number of classes based on grads
nb_classes = len(grads)
increase = bool(theta > 0)
# Compute our initial search domain. We optimize the initial search domain
# by removing all features that are already at their maximum values (if
# increasing input features---otherwise, at their minimum value).
if increase:
search_domain = set([i for i in xrange(nb_features)
if adv_x[0, i] < clip_max])
else:
search_domain = set([i for i in xrange(nb_features)
if adv_x[0, i] > clip_min])
# Initialize the loop variables
iteration = 0
adv_x_original_shape = np.reshape(adv_x, original_shape)
current = utils_tf.model_argmax(sess, x, predictions, adv_x_original_shape)
# Repeat this main loop until we have achieved misclassification
while (current != target and iteration < max_iters and
len(search_domain) > 1):
# Reshape the adversarial example
adv_x_original_shape = np.reshape(adv_x, original_shape)
# Compute the Jacobian components
grads_target, grads_others = jacobian(sess, x, grads, target,
adv_x_original_shape,
nb_features, nb_classes)
# Compute the saliency map for each of our target classes
# and return the two best candidate features for perturbation
i, j, search_domain = saliency_map(
grads_target, grads_others, search_domain, increase)
# Apply the perturbation to the two input features selected previously
adv_x = apply_perturbations(
i, j, adv_x, increase, theta, clip_min, clip_max)
# Update our current prediction by querying the model
current = utils_tf.model_argmax(sess, x, predictions,
adv_x_original_shape)
# Update loop variables
iteration = iteration + 1
# Compute the ratio of pixels perturbed by the algorithm
percent_perturbed = float(iteration * 2) / nb_features
# Report success when the adversarial example is misclassified in the
# target class
if current == target:
return np.reshape(adv_x, original_shape), 1, percent_perturbed
else:
return np.reshape(adv_x, original_shape), 0, percent_perturbed
def jsma_batch(sess, x, pred, grads, X, theta, gamma, clip_min, clip_max,
nb_classes, targets=None):
"""
Applies the JSMA to a batch of inputs
:param sess: TF session
:param x: the input placeholder
:param pred: the model's symbolic output
:param grads: symbolic gradients
:param X: numpy array with sample inputs
:param theta: delta for each feature adjustment
:param gamma: a float between 0 - 1 indicating the maximum distortion
percentage
:param clip_min: minimum value for components of the example returned
:param clip_max: maximum value for components of the example returned
:param nb_classes: number of model output classes
:param targets: target class for sample input
:return: adversarial examples
"""
X_adv = np.zeros(X.shape)
for ind, val in enumerate(X):
val = np.expand_dims(val, axis=0)
if targets is None:
# No targets provided, randomly choose from other classes
from .utils_tf import model_argmax
gt = model_argmax(sess, x, pred, val)
# Randomly choose from the incorrect classes for each sample
from .utils import random_targets
target = random_targets(gt, nb_classes)[0]
else:
target = targets[ind]
X_adv[ind], _, _ = jsma(sess, x, pred, grads, val, np.argmax(target),
theta, gamma, clip_min, clip_max)
return np.asarray(X_adv, dtype=np.float32)
def jacobian_augmentation(sess, x, X_sub_prev, Y_sub, grads, lmbda,
keras_phase=None):
"""
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using attacks_tf.jacobian_graph)
:param keras_phase: (deprecated) if not None, holds keras learning_phase
:return: augmented substitute data (will need to be labeled by oracle)
"""
assert len(x.get_shape()) == len(np.shape(X_sub_prev))
assert len(grads) >= np.max(Y_sub) + 1
assert len(X_sub_prev) == len(Y_sub)
if keras_phase is not None:
warnings.warn("keras_phase argument is deprecated and will be removed"
" on 2017-09-28. Instead, use K.set_learning_phase(0) at"
" the start of your script and serve with tensorflow.")
# Prepare input_shape (outside loop) for feeding dictionary below
input_shape = list(x.get_shape())
input_shape[0] = 1
# Create new numpy array for adversary training data
# with twice as many components on the first dimension.
X_sub = np.vstack([X_sub_prev, X_sub_prev])
# For each input in the previous' substitute training iteration
for ind, input in enumerate(X_sub_prev):
# Select gradient corresponding to the label predicted by the oracle
grad = grads[Y_sub[ind]]
# Prepare feeding dictionary
feed_dict = {x: np.reshape(input, input_shape)}
# Compute sign matrix
grad_val = sess.run([tf.sign(grad)], feed_dict=feed_dict)[0]
# Create new synthetic point in adversary substitute training set
X_sub[2*ind] = X_sub[ind] + lmbda * grad_val
# Return augmented training data (needs to be labeled afterwards)
return X_sub
| [
"tensorflow.reduce_sum",
"numpy.sum",
"numpy.maximum",
"tensorflow.clip_by_value",
"numpy.argmax",
"numpy.abs",
"numpy.floor",
"numpy.shape",
"numpy.product",
"tensorflow.reduce_max",
"tensorflow.abs",
"tensorflow.sign",
"numpy.max",
"numpy.reshape",
"tensorflow.gradients",
"tensorflow... | [((1944, 1965), 'tensorflow.gradients', 'tf.gradients', (['loss', 'x'], {}), '(loss, x)\n', (1956, 1965), True, 'import tensorflow as tf\n'), ((2915, 2955), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(x + scaled_signed_grad)'], {}), '(x + scaled_signed_grad)\n', (2931, 2955), True, 'import tensorflow as tf\n'), ((7436, 7463), 'numpy.fill_diagonal', 'np.fill_diagonal', (['scores', '(0)'], {}), '(scores, 0)\n', (7452, 7463), True, 'import numpy as np\n'), ((7510, 7527), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (7519, 7527), True, 'import numpy as np\n'), ((8363, 8416), 'numpy.zeros', 'np.zeros', (['(nb_classes, nb_features)'], {'dtype': 'np.float32'}), '((nb_classes, nb_features), dtype=np.float32)\n', (8371, 8416), True, 'import numpy as np\n'), ((8840, 8886), 'numpy.sum', 'np.sum', (['jacobian_val[other_classes, :]'], {'axis': '(0)'}), '(jacobian_val[other_classes, :], axis=0)\n', (8846, 8886), True, 'import numpy as np\n'), ((9438, 9456), 'six.moves.xrange', 'xrange', (['nb_classes'], {}), '(nb_classes)\n', (9444, 9456), False, 'from six.moves import xrange\n'), ((10613, 10630), 'copy.copy', 'copy.copy', (['sample'], {}), '(sample)\n', (10622, 10630), False, 'import copy\n'), ((10749, 10776), 'numpy.product', 'np.product', (['adv_x.shape[1:]'], {}), '(adv_x.shape[1:])\n', (10759, 10776), True, 'import numpy as np\n'), ((10871, 10906), 'numpy.reshape', 'np.reshape', (['adv_x', '(1, nb_features)'], {}), '(adv_x, (1, nb_features))\n', (10881, 10906), True, 'import numpy as np\n'), ((10966, 10999), 'numpy.floor', 'np.floor', (['(nb_features * gamma / 2)'], {}), '(nb_features * gamma / 2)\n', (10974, 10999), True, 'import numpy as np\n'), ((11674, 11707), 'numpy.reshape', 'np.reshape', (['adv_x', 'original_shape'], {}), '(adv_x, original_shape)\n', (11684, 11707), True, 'import numpy as np\n'), ((14194, 14211), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (14202, 14211), True, 'import numpy as np\n'), ((14864, 14899), 'numpy.asarray', 'np.asarray', (['X_adv'], {'dtype': 'np.float32'}), '(X_adv, dtype=np.float32)\n', (14874, 14899), True, 'import numpy as np\n'), ((16606, 16641), 'numpy.vstack', 'np.vstack', (['[X_sub_prev, X_sub_prev]'], {}), '([X_sub_prev, X_sub_prev])\n', (16615, 16641), True, 'import numpy as np\n'), ((1678, 1717), 'tensorflow.reduce_max', 'tf.reduce_max', (['preds', '(1)'], {'keep_dims': '(True)'}), '(preds, 1, keep_dims=True)\n', (1691, 1717), True, 'import tensorflow as tf\n'), ((1782, 1817), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y', '(1)'], {'keep_dims': '(True)'}), '(y, 1, keep_dims=True)\n', (1795, 1817), True, 'import tensorflow as tf\n'), ((2043, 2056), 'tensorflow.sign', 'tf.sign', (['grad'], {}), '(grad)\n', (2050, 2056), True, 'import tensorflow as tf\n'), ((3109, 3152), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['adv_x', 'clip_min', 'clip_max'], {}), '(adv_x, clip_min, clip_max)\n', (3125, 3152), True, 'import tensorflow as tf\n'), ((4140, 4196), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""virtual_adversarial_perturbation"""'], {}), "(scope, 'virtual_adversarial_perturbation')\n", (4153, 4196), True, 'import tensorflow as tf\n'), ((4580, 4603), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(x + d)'], {}), '(x + d)\n', (4596, 4603), True, 'import tensorflow as tf\n'), ((5490, 5527), 'numpy.minimum', 'np.minimum', (['clip_max', '(X[0, i] + theta)'], {}), '(clip_max, X[0, i] + theta)\n', (5500, 5527), True, 'import numpy as np\n'), ((5546, 5583), 'numpy.minimum', 'np.minimum', (['clip_max', '(X[0, j] + theta)'], {}), '(clip_max, X[0, j] + theta)\n', (5556, 5583), True, 'import numpy as np\n'), ((5612, 5649), 'numpy.maximum', 'np.maximum', (['clip_min', '(X[0, i] - theta)'], {}), '(clip_min, X[0, i] - theta)\n', (5622, 5649), True, 'import numpy as np\n'), ((5668, 5705), 'numpy.maximum', 'np.maximum', (['clip_min', '(X[0, j] - theta)'], {}), '(clip_min, X[0, j] - theta)\n', (5678, 5705), True, 'import numpy as np\n'), ((8586, 8624), 'numpy.reshape', 'np.reshape', (['run_grad', '(1, nb_features)'], {}), '(run_grad, (1, nb_features))\n', (8596, 8624), True, 'import numpy as np\n'), ((9481, 9523), 'tensorflow.gradients', 'tf.gradients', (['predictions[:, class_ind]', 'x'], {}), '(predictions[:, class_ind], x)\n', (9493, 9523), True, 'import tensorflow as tf\n'), ((12026, 12059), 'numpy.reshape', 'np.reshape', (['adv_x', 'original_shape'], {}), '(adv_x, original_shape)\n', (12036, 12059), True, 'import numpy as np\n'), ((14261, 14288), 'numpy.expand_dims', 'np.expand_dims', (['val'], {'axis': '(0)'}), '(val, axis=0)\n', (14275, 14288), True, 'import numpy as np\n'), ((16115, 16303), 'warnings.warn', 'warnings.warn', (['"""keras_phase argument is deprecated and will be removed on 2017-09-28. Instead, use K.set_learning_phase(0) at the start of your script and serve with tensorflow."""'], {}), "(\n 'keras_phase argument is deprecated and will be removed on 2017-09-28. Instead, use K.set_learning_phase(0) at the start of your script and serve with tensorflow.'\n )\n", (16128, 16303), False, 'import warnings\n'), ((1742, 1768), 'tensorflow.equal', 'tf.equal', (['preds', 'preds_max'], {}), '(preds, preds_max)\n', (1750, 1768), True, 'import tensorflow as tf\n'), ((4227, 4238), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4235, 4238), True, 'import tensorflow as tf\n'), ((4494, 4514), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['Hd'], {}), '(Hd)\n', (4510, 4514), True, 'import tensorflow as tf\n'), ((4686, 4729), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['adv_x', 'clip_min', 'clip_max'], {}), '(adv_x, clip_min, clip_max)\n', (4702, 4729), True, 'import tensorflow as tf\n'), ((6698, 6718), 'numpy.abs', 'np.abs', (['grads_target'], {}), '(grads_target)\n', (6704, 6718), True, 'import numpy as np\n'), ((6770, 6789), 'numpy.abs', 'np.abs', (['grads_other'], {}), '(grads_other)\n', (6776, 6789), True, 'import numpy as np\n'), ((13247, 13280), 'numpy.reshape', 'np.reshape', (['adv_x', 'original_shape'], {}), '(adv_x, original_shape)\n', (13257, 13280), True, 'import numpy as np\n'), ((13328, 13361), 'numpy.reshape', 'np.reshape', (['adv_x', 'original_shape'], {}), '(adv_x, original_shape)\n', (13338, 13361), True, 'import numpy as np\n'), ((14767, 14784), 'numpy.argmax', 'np.argmax', (['target'], {}), '(target)\n', (14776, 14784), True, 'import numpy as np\n'), ((15968, 15988), 'numpy.shape', 'np.shape', (['X_sub_prev'], {}), '(X_sub_prev)\n', (15976, 15988), True, 'import numpy as np\n'), ((16015, 16028), 'numpy.max', 'np.max', (['Y_sub'], {}), '(Y_sub)\n', (16021, 16028), True, 'import numpy as np\n'), ((16928, 16958), 'numpy.reshape', 'np.reshape', (['input', 'input_shape'], {}), '(input, input_shape)\n', (16938, 16958), True, 'import numpy as np\n'), ((4455, 4474), 'tensorflow.gradients', 'tf.gradients', (['kl', 'd'], {}), '(kl, d)\n', (4467, 4474), True, 'import tensorflow as tf\n'), ((2175, 2187), 'tensorflow.abs', 'tf.abs', (['grad'], {}), '(grad)\n', (2181, 2187), True, 'import tensorflow as tf\n'), ((11388, 11407), 'six.moves.xrange', 'xrange', (['nb_features'], {}), '(nb_features)\n', (11394, 11407), False, 'from six.moves import xrange\n'), ((11515, 11534), 'six.moves.xrange', 'xrange', (['nb_features'], {}), '(nb_features)\n', (11521, 11534), False, 'from six.moves import xrange\n'), ((17020, 17033), 'tensorflow.sign', 'tf.sign', (['grad'], {}), '(grad)\n', (17027, 17033), True, 'import tensorflow as tf\n'), ((2446, 2461), 'tensorflow.square', 'tf.square', (['grad'], {}), '(grad)\n', (2455, 2461), True, 'import tensorflow as tf\n')] |
import numpy as np
from scipy import signal
import math
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from functions.pareq import pareq
def plotPredictions(filtergainsPrediction,G_db,fs,fc2,fc1,bw,G2opt_db,numsopt,densopt):
G_db2 = np.zeros([61,1])
G_db2[::2] = G_db
G_db2[1::2] = (G_db[:len(G_db)-1:1]+G_db[1::1])/2
wg = 2*math.pi*fc1/fs
c = 0.38
numsoptPred = np.zeros((3,31))
densoptPred = np.zeros((3,31))
G = 10 **(filtergainsPrediction/20)
Gw_db = c * filtergainsPrediction
Gw = 10 **(Gw_db/20)
for k in range(31):
[num,den] = pareq(G[k],Gw[k],wg[k],bw[k])
numsoptPred[:,k] = num
densoptPred[:,k] = den
N_freq = 2 **12
w = np.logspace(np.log10(9),np.log10(22050), N_freq)
H_optPred = np.ones((N_freq,31), dtype=complex)
H_opt_totPred = np.ones((N_freq,1), dtype=complex)
for k in range(31):
w, h = signal.freqz(numsoptPred[:,k], densoptPred[:,k],worN=w,fs=fs)
H_optPred[:,k]= h
H_opt_totPred = H_optPred[:,[k]] * H_opt_totPred
H_opt = np.ones((N_freq,31), dtype=complex)
H_opt_tot = np.ones((N_freq,1), dtype=complex)
numsopt = numsopt
densopt = densopt
for k in range(31):
w, h = signal.freqz(numsopt[:,k], densopt[:,k],worN=w,fs=fs)
H_opt[:,k]= h
H_opt_tot= H_opt[:,[k]] * H_opt_tot
fig = plt.figure(4)
plt.semilogx(w,20*np.log10(np.abs(H_opt_tot)), linewidth=3.0)
plt.semilogx(w,20*np.log10(np.abs(H_opt_totPred)), color="orange") #predicted
#plt.semilogx(w,20*np.log10(np.abs(H_opt)))
plt.plot(fc2,G_db2, "ro", markersize=4, markerfacecolor="none")
plt.plot(fc1,filtergainsPrediction, "ro", markersize=6, markerfacecolor="none",marker="x", markeredgecolor="r")
plt.plot(fc1,G2opt_db, "ro", markersize=6, markerfacecolor="none", marker="s",markeredgecolor="r")
plt.ylabel("Pegel in dB")
plt.xlabel("Frequenz in Hz")
plt.title("Predicted frequency response")
plt.xticks([10, 30, 100, 1000, 3000, 10000])
plt.yticks(np.arange(-15,20,5))
plt.grid(which="both", linestyle="--", color="grey")
filtergain = mlines.Line2D([], [], linestyle='None',
markersize=8, markerfacecolor="none", marker="o", markeredgecolor="r", label="Target Gains")
targetgain = mlines.Line2D([], [], linestyle='None',
markersize=8, markerfacecolor="none", marker="s", markeredgecolor="r", label="Optimized filter gains")
filterpredicted = mlines.Line2D([], [], linestyle='None',
markersize=8, markerfacecolor="none", marker="x", markeredgecolor="r", label="Predicted filter gains")
frequencyCalculated = mlines.Line2D([], [], linestyle='None',
markersize=8, markerfacecolor="none", marker="_", markeredgecolor="b", label="OGEQ")
frequencyPredicted = mlines.Line2D([], [], linestyle='None',
markersize=8, markerfacecolor="none", marker="_", markeredgecolor="orange", label="NGEQ")
plt.legend(handles=[filtergain,targetgain,filterpredicted,frequencyCalculated,frequencyPredicted])
#plt.show()
return fig | [
"matplotlib.pyplot.title",
"functions.pareq.pareq",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.lines.Line2D",
"scipy.signal.freqz",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylab... | [((263, 280), 'numpy.zeros', 'np.zeros', (['[61, 1]'], {}), '([61, 1])\n', (271, 280), True, 'import numpy as np\n'), ((418, 435), 'numpy.zeros', 'np.zeros', (['(3, 31)'], {}), '((3, 31))\n', (426, 435), True, 'import numpy as np\n'), ((453, 470), 'numpy.zeros', 'np.zeros', (['(3, 31)'], {}), '((3, 31))\n', (461, 470), True, 'import numpy as np\n'), ((808, 844), 'numpy.ones', 'np.ones', (['(N_freq, 31)'], {'dtype': 'complex'}), '((N_freq, 31), dtype=complex)\n', (815, 844), True, 'import numpy as np\n'), ((864, 899), 'numpy.ones', 'np.ones', (['(N_freq, 1)'], {'dtype': 'complex'}), '((N_freq, 1), dtype=complex)\n', (871, 899), True, 'import numpy as np\n'), ((1110, 1146), 'numpy.ones', 'np.ones', (['(N_freq, 31)'], {'dtype': 'complex'}), '((N_freq, 31), dtype=complex)\n', (1117, 1146), True, 'import numpy as np\n'), ((1162, 1197), 'numpy.ones', 'np.ones', (['(N_freq, 1)'], {'dtype': 'complex'}), '((N_freq, 1), dtype=complex)\n', (1169, 1197), True, 'import numpy as np\n'), ((1426, 1439), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (1436, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1652, 1716), 'matplotlib.pyplot.plot', 'plt.plot', (['fc2', 'G_db2', '"""ro"""'], {'markersize': '(4)', 'markerfacecolor': '"""none"""'}), "(fc2, G_db2, 'ro', markersize=4, markerfacecolor='none')\n", (1660, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1838), 'matplotlib.pyplot.plot', 'plt.plot', (['fc1', 'filtergainsPrediction', '"""ro"""'], {'markersize': '(6)', 'markerfacecolor': '"""none"""', 'marker': '"""x"""', 'markeredgecolor': '"""r"""'}), "(fc1, filtergainsPrediction, 'ro', markersize=6, markerfacecolor=\n 'none', marker='x', markeredgecolor='r')\n", (1728, 1838), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1941), 'matplotlib.pyplot.plot', 'plt.plot', (['fc1', 'G2opt_db', '"""ro"""'], {'markersize': '(6)', 'markerfacecolor': '"""none"""', 'marker': '"""s"""', 'markeredgecolor': '"""r"""'}), "(fc1, G2opt_db, 'ro', markersize=6, markerfacecolor='none', marker=\n 's', markeredgecolor='r')\n", (1844, 1941), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1964), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pegel in dB"""'], {}), "('Pegel in dB')\n", (1949, 1964), True, 'import matplotlib.pyplot as plt\n'), ((1969, 1997), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequenz in Hz"""'], {}), "('Frequenz in Hz')\n", (1979, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2002, 2043), 'matplotlib.pyplot.title', 'plt.title', (['"""Predicted frequency response"""'], {}), "('Predicted frequency response')\n", (2011, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2092), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[10, 30, 100, 1000, 3000, 10000]'], {}), '([10, 30, 100, 1000, 3000, 10000])\n', (2058, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2185), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""', 'linestyle': '"""--"""', 'color': '"""grey"""'}), "(which='both', linestyle='--', color='grey')\n", (2141, 2185), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2340), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'linestyle': '"""None"""', 'markersize': '(8)', 'markerfacecolor': '"""none"""', 'marker': '"""o"""', 'markeredgecolor': '"""r"""', 'label': '"""Target Gains"""'}), "([], [], linestyle='None', markersize=8, markerfacecolor=\n 'none', marker='o', markeredgecolor='r', label='Target Gains')\n", (2216, 2340), True, 'import matplotlib.lines as mlines\n'), ((2379, 2526), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'linestyle': '"""None"""', 'markersize': '(8)', 'markerfacecolor': '"""none"""', 'marker': '"""s"""', 'markeredgecolor': '"""r"""', 'label': '"""Optimized filter gains"""'}), "([], [], linestyle='None', markersize=8, markerfacecolor=\n 'none', marker='s', markeredgecolor='r', label='Optimized filter gains')\n", (2392, 2526), True, 'import matplotlib.lines as mlines\n'), ((2570, 2717), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'linestyle': '"""None"""', 'markersize': '(8)', 'markerfacecolor': '"""none"""', 'marker': '"""x"""', 'markeredgecolor': '"""r"""', 'label': '"""Predicted filter gains"""'}), "([], [], linestyle='None', markersize=8, markerfacecolor=\n 'none', marker='x', markeredgecolor='r', label='Predicted filter gains')\n", (2583, 2717), True, 'import matplotlib.lines as mlines\n'), ((2765, 2894), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'linestyle': '"""None"""', 'markersize': '(8)', 'markerfacecolor': '"""none"""', 'marker': '"""_"""', 'markeredgecolor': '"""b"""', 'label': '"""OGEQ"""'}), "([], [], linestyle='None', markersize=8, markerfacecolor=\n 'none', marker='_', markeredgecolor='b', label='OGEQ')\n", (2778, 2894), True, 'import matplotlib.lines as mlines\n'), ((2941, 3075), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'linestyle': '"""None"""', 'markersize': '(8)', 'markerfacecolor': '"""none"""', 'marker': '"""_"""', 'markeredgecolor': '"""orange"""', 'label': '"""NGEQ"""'}), "([], [], linestyle='None', markersize=8, markerfacecolor=\n 'none', marker='_', markeredgecolor='orange', label='NGEQ')\n", (2954, 3075), True, 'import matplotlib.lines as mlines\n'), ((3123, 3229), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[filtergain, targetgain, filterpredicted, frequencyCalculated,\n frequencyPredicted]'}), '(handles=[filtergain, targetgain, filterpredicted,\n frequencyCalculated, frequencyPredicted])\n', (3133, 3229), True, 'import matplotlib.pyplot as plt\n'), ((622, 654), 'functions.pareq.pareq', 'pareq', (['G[k]', 'Gw[k]', 'wg[k]', 'bw[k]'], {}), '(G[k], Gw[k], wg[k], bw[k])\n', (627, 654), False, 'from functions.pareq import pareq\n'), ((755, 766), 'numpy.log10', 'np.log10', (['(9)'], {}), '(9)\n', (763, 766), True, 'import numpy as np\n'), ((767, 782), 'numpy.log10', 'np.log10', (['(22050)'], {}), '(22050)\n', (775, 782), True, 'import numpy as np\n'), ((943, 1008), 'scipy.signal.freqz', 'signal.freqz', (['numsoptPred[:, k]', 'densoptPred[:, k]'], {'worN': 'w', 'fs': 'fs'}), '(numsoptPred[:, k], densoptPred[:, k], worN=w, fs=fs)\n', (955, 1008), False, 'from scipy import signal\n'), ((1290, 1347), 'scipy.signal.freqz', 'signal.freqz', (['numsopt[:, k]', 'densopt[:, k]'], {'worN': 'w', 'fs': 'fs'}), '(numsopt[:, k], densopt[:, k], worN=w, fs=fs)\n', (1302, 1347), False, 'from scipy import signal\n'), ((2108, 2129), 'numpy.arange', 'np.arange', (['(-15)', '(20)', '(5)'], {}), '(-15, 20, 5)\n', (2117, 2129), True, 'import numpy as np\n'), ((1476, 1493), 'numpy.abs', 'np.abs', (['H_opt_tot'], {}), '(H_opt_tot)\n', (1482, 1493), True, 'import numpy as np\n'), ((1543, 1564), 'numpy.abs', 'np.abs', (['H_opt_totPred'], {}), '(H_opt_totPred)\n', (1549, 1564), True, 'import numpy as np\n')] |
"""Parametrizing a single pv-panel.
Fit PV-Parameters from Datasheet
single_ifromv (singlediode)
substri_v (substring/module)
module_v (module/panel)
"""
#
# def single2ifromv(arg0 = np.array(
# [6.48000237e+00, 6.03959251e-10, 5.55129794e-03,
# 1.52143849e+04, 3.13453068e-02])):
import numpy as np
from scipy.interpolate import griddata
from scipy.interpolate import RegularGridInterpolator
from scipy.interpolate import NearestNDInterpolator
from scipy.optimize import brute
from typing import List, Dict, Callable
import functools
import pvlib
from pvlib import pvsystem
from concurrent import futures
class Pv_panel():
"""Class to parametrize a single pv-panel.
Attributes:
cells_x (int): Number of cells along x-direction.
cells_y (int): Number of cells along y-direction.
cell_dim_x (float): cell size in x-direction in meters.
cell_dim_y (float): cells size in y direciton in meters.
cell_param (dict): (approximated) physical cell paremeters.
v_from_i (fun): Panel voltage-current-function.
"""
def __init__(self, i_sc: float = 6.48, v_oc: float = 69.5,
i_mp: float = 6.09, v_mp: float = 59.1, n_cells: int = 96,
bypass_config: list = [[2, 4, 2], 12], dim_x: float = 1.046,
dim_y: float = 1.558, v_rb: float = -0.5, I_min: float = -4,
I_max: float = 7, L_max: float = 1.2):
"""Initialize.
Args:
i_sc (float): i_sc panel short circuit current in A.
Defaults to 6.48.
v_oc (float): v_oc panel open circuit current in V.
Defaults to 69.5.
i_mp (float): i_mp panel current at maximum power point stp.
Defaults to 6.09.
v_mp (float): v_mp panel voltage at maximum power point stp.
Defaults to 59.1.
n_cells (int): Number of cells in panel. Defaults to 96.
bypass_config (list): configuration of the bypass diodes. I this
case the panel consists of 8 columns and 12 rows of 96
cells. The row 1-2, 3-6, 7-8 each have their diode.
Defaults to [[2, 4, 2], 12].
dim_x (float): Width of the panel in meters. Defaults to 1.046.
dim_y (float): Height of the panel in meters. Defaults to 1.558.
v_rb (float): Reverse bias diode breakthrough voltage in V.
Defaults to -0.5.
I_min (float): Minimum current in the calculation.
Defaults to -4 A.
I_max (float): Maximum current in the calculation. Defaults to 7 A.
L_max (float): Maximum Irradiance. Defaults to 1.2 kw/m*m.
"""
self.i_sc = i_sc
self.v_oc = v_oc
self.i_mp = i_mp
self.v_mp = v_mp
self.n_cells = n_cells
self.bypass_config = bypass_config
self.dim_x = dim_x
self.dim_y = dim_y
self.cells_x = np.sum(bypass_config[0])
self.cells_y = np.sum(bypass_config[1])
self.cell_dim_x = self.dim_x/self.cells_x
self.cell_dim_y = self.dim_y/self.cells_y
self.v_rb = v_rb
self.cell_param = None
self.v_from_i = None
self.I_min = I_min
self.I_max = I_max
self.L_max = L_max
self.fit_cell_parameters()
self.set_v_from_i()
def fit_cell_parameters(self, cell_config: dict = None):
"""Fit and set physical cell paremeters.
Args:
cell_config (dict): Dictionary of panel parameters. If nothing is
provided, it takes the values from the instance creation
Defaults to None.
"""
if cell_config is None:
self.cell_param = fit_pv(**self.cell_config())
else:
self.cell_param = fit_pv(**cell_config)
def set_v_from_i(self):
"""Set voltage-current function."""
self.v_from_i = get_v_panel_from_i(cell_param=self.cell_param,
I_max=self.I_max,
I_min=self.I_min,
L_max=self.L_max,
)
def cell_config(self) -> Dict:
"""Return cell config as dictionary."""
return {
'i_sc': self.i_sc,
'v_oc': self.v_oc/self.n_cells,
'i_mp': self.i_mp,
'v_mp': self.v_mp/self.n_cells
}
def fit_pv(i_sc: float = 6.48, v_oc: float = 69.5/96, i_mp: float = 6.09,
v_mp: float = 59.1/96, Ns: int = 4, x0=None, delta_x: float = .8):
"""Brute force approximation of physical cell parameters.
the function brute forces the best fit for the phyiscal cell parameters of
the singlediode model and returns a dictionary with the values
- I_ph photo current
- I_0 reverse saturation current
- R_s series resistance
- R_sh shunt resistance
- v_th thermal voltage kT/e
Args:
i_sc (float): i_sc cell short circuit current in A. Defaults to 6.48.
v_oc (float): v_oc cell open circuit current in V. Defaults to 69.5/96.
i_mp (float): i_mp cell current at maximum power point stp.
Defaults to 6.09.
v_mp (float): v_mp cell voltage at maximum power point stp.
Defaults to 59.1/96.
Ns (type): Number of steps per dimension for brute force parameter
space. Defaults to 4.
x0 (type): Origin of the parameter space for the brute force
optimization. Defaults to None.
delta_x (type): Maximum deviation (extension) of the brute force grid.
Defaults to .8.
Returns:
pv_param (dict): Physical cell parameters
"""
if x0 is None:
x0 = (6.09, 4.268694026502061e-10, .0045, 10000, 0.025679644404907293)
x_0 = np.asarray(x0)
ranges = np.asarray([x_0-x_0*delta_x, x_0+x_0*delta_x])
pv_loss_func = _gen_pv_fit_loss_func(
i_sc=i_sc, v_oc=v_oc, i_mp=i_mp, v_mp=v_mp)
pv_param = brute(pv_loss_func, ranges=tuple(
zip(ranges[0], ranges[1])), Ns=Ns)
return pv_param
def _gen_pv_fit_loss_func(i_sc: float = 6.48, v_oc: float = 69.5/96,
i_mp: float = 6.09,
v_mp: float = 59.1/96) -> Callable:
"""Return a loss function for physical pv-cell parameters."""
def pv_fit_loss_func(params):
"""Loss function for physical pv-cell parameters."""
I_ph, I_0, R_s, R_sh, v_th = params
pv = pvsystem.singlediode(I_ph, I_0, R_s, R_sh, v_th)
return np.sum(np.abs(np.asarray([pv['i_sc']-i_sc, pv['v_oc']-v_oc,
pv['i_mp']-i_mp, pv['v_mp']-v_mp])),
axis=0)
return pv_fit_loss_func
def get_v_panel_from_i(cell_param=np.array([6.48000332e+00, 6.37762333e-10,
8.45318984e-04, 1.65194938e+03,
3.14194723e-02]),
I_min: float = -4, I_max: float = 7,
L_max: float = 1.2) -> Callable:
"""Returns a function to calculate voltages of a pv-panel at given Current.
Includes reverse bias diode.
Requiers physical cell parameters.
- I_ph photo current
- I_0 reverse saturation current
- R_s series resistance
- R_sh shunt resistance
- v_th thermal voltage kT/e
Args:
cell_param (tuplelike): Physical Cell Parameters.
Defaults to np.array([6.48000332e+00, 6.37762333e-10,
8.45318984e-04, 1.65194938e+03, 3.14194723e-02]).
I_min (type): Minimum current to be considered. Defaults to -4.
I_max (type): Maximum current to be considered. Defaults to 7.
L_max (type): Maximum Photocurrent to be considered in A.
Defaults to 1.2.
"""
def single2v_from_i_with_nan(arg0=np.array([6.48000332e+00, 6.37762333e-10,
8.45318984e-04, 1.65194938e+03,
3.14194723e-02])) -> Callable:
"""Return function to calculate voltage from curent of a single diode.
Might include Nan.
Args:
arg0 (type): . Defaults to np.array(
[6.48000332e+00, 6.37762333e-10,
8.45318984e-04, 1.65194938e+03, 3.14194723e-02]).
Returns:
Callable: Calculate voltage from current of a single diode.
"""
(I_ph, I_0, R_s, R_sh, v_th) = arg0
def v_from_i(I: np.ndarray, L: np.ndarray, t_cell: float):
"""Return diode voltage for a single pv-cell (diode).
Given the physical cell parameters (I_ph, I_0, R_s, R_sh, v_th)
and the arguments
Args:
I (np.ndarray): Current through the cell in A.
L (np.ndarray): Photocurrent in A .
(Is considered proportional to the irradiance)
t_cell (float): Cell temperature.
Returns:
np.ndarray: Voltage at given current without NAN catch.
"""
v_pn = pvlib.pvsystem.v_from_i(
R_sh, R_s, v_th*(t_cell+273)/298.5, np.array(I, ndmin=2).T,
I_0, I_ph*np.asarray(L))
return v_pn
return v_from_i
# Generate interpolation function to guarantee non nan values
v_i_with_nan = single2v_from_i_with_nan(arg0=cell_param)
I_arr = np.linspace(I_min, I_max, 110)
L_arr = np.linspace(0, L_max, 100)
T_arr = np.linspace(-20, 80, 100)
data = v_i_with_nan(*np.meshgrid(I_arr, L_arr, T_arr))
I_ = np.meshgrid(I_arr, L_arr, T_arr)[0].flatten()
L_ = np.meshgrid(I_arr, L_arr, T_arr)[1].flatten()
T_ = np.meshgrid(I_arr, L_arr, T_arr)[2].flatten()
not_nans = np.argwhere(np.logical_not(
np.isnan(data.flatten()))).reshape(-1)
v_i_interpolate = NearestNDInterpolator(
(I_.flatten()[not_nans], L_.flatten()[not_nans],
T_.flatten()[not_nans]), data.flatten()[not_nans]
)
def single2v_from_i(arg0=np.array([6.48000332e+00, 6.37762333e-10,
8.45318984e-04, 1.65194938e+03,
3.14194723e-02])) -> Callable:
"""Return function to calculate voltage from curent of a single diode.
Includes NAN catch.
Args:
arg0 (np.ndarray): Physical cell parameters
(I_ph, I_0, R_s, R_sh, v_th).
Defaults to np.array([6.48000332e+00, 6.37762333e-10,
8.45318984e-04, 1.65194938e+03, 3.14194723e-02]).
Returns:
Voltage at given current with NAN catch
"""
(I_ph, I_0, R_s, R_sh, v_th) = arg0
pvlib_v_from_i = pvlib.pvsystem.v_from_i
@functools.lru_cache(maxsize=2048*16)
def v_from_i(I_cells, Iph, t_cell):
"""Return diode voltage for a single pv-cell (diode).
Given the physical cell parameters (I_ph, I_0, R_s, R_sh, v_th)
and the arguments
Includes NAN catch.
Args:
I_cells (tuple): Current through the cell in A.
Iph (tuple): Photocurrent in A .
(Is considered proportional to the irradiance)
t_cell (float): Cell temperature.
Returns:
np.ndarray: Voltage at given current with NAN catch.
"""
v_pn = pvlib_v_from_i(
R_sh, R_s, v_th*(t_cell+273)/298.5,
np.array(I_cells, ndmin=2).T,
I_0, I_ph*np.asarray(Iph))
if np.isnan(v_pn).any():
return v_i_interpolate((np.array(I_cells, ndmin=2).T,
np.asarray(Iph), t_cell))
else:
return v_pn
return v_from_i
v_from_i = single2v_from_i(cell_param)
def calc_t_cell(L: np.ndarray, T_am: float, W_10: float,
model: str = 'roof_mount_cell_glassback'):
"""Wrapper function for cell temperature calculation
Args:
L (np.ndarray): Irradiance in kw.
T_am (float): Ambient temperature.
W_10 (float): Windspeed @10 meter.
model (str): Defaults to 'roof_mount_cell_glassback'.
Returns:
float: Cell temperature in Kelvin.
"""
return pvsystem.sapm_celltemp(
np.sum(np.hstack(L))/np.size(np.hstack(L))*1e3,
W_10, T_am,)['temp_cell'][0]
@functools.lru_cache(maxsize=2048*16)
def substr_v_P(I_substr: tuple, Iph_substr: tuple, t_cell: float = 0,
v_rb: float = -.5):
"""Returns voltages of a substring in a panel at given currents.
Returns voltages of a pv-panel including reverse bias diode given the
physical cell parameters (I_ph, I_0, R_s, R_sh, v_th)
and the arguments
Args:
I_substr (tuple): Current through the cell in A.
Iph_substr (tuple): Photocurrent in A .
t_cell (float, optional): Cell temperature. Defaults to 0.
v_rb (float, optional): bypass diode breakthrough voltage in V
(reverse bias). Defaults to -.5.
Returns:
np.ndarray: Voltages at given currents through the substring.
"""
return np.maximum(np.sum(v_from_i(I_substr, Iph_substr,
t_cell), axis=1),
v_rb*np.exp(np.asarray(I_substr)/20))
def v_from_i_panel(args):
"""Returns voltages of a pv-panel at given currents.
Args:
args (tuple): (
I_pan : current through the cell in A
Iph_panel : List of Photocurrents in A
(Is considered proportional to the irradiance)
t_amb : Cell temperature in Celsius
W_10 : windspeed in 10m
)
"""
(I_pan, Iph_panel, T_am, W_10, _) = args
t_cell = calc_t_cell(Iph_panel, T_am, W_10)
return np.asarray(
sum(substr_v_P(tuple(I_pan),
Iph_substr=tuple(Iph_substring),
t_cell=t_cell) for Iph_substring in Iph_panel)
)
return v_from_i_panel
| [
"numpy.meshgrid",
"numpy.sum",
"numpy.asarray",
"numpy.isnan",
"numpy.hstack",
"numpy.array",
"numpy.linspace",
"functools.lru_cache",
"pvlib.pvsystem.singlediode"
] | [((5885, 5899), 'numpy.asarray', 'np.asarray', (['x0'], {}), '(x0)\n', (5895, 5899), True, 'import numpy as np\n'), ((5913, 5967), 'numpy.asarray', 'np.asarray', (['[x_0 - x_0 * delta_x, x_0 + x_0 * delta_x]'], {}), '([x_0 - x_0 * delta_x, x_0 + x_0 * delta_x])\n', (5923, 5967), True, 'import numpy as np\n'), ((6859, 6944), 'numpy.array', 'np.array', (['[6.48000332, 6.37762333e-10, 0.000845318984, 1651.94938, 0.0314194723]'], {}), '([6.48000332, 6.37762333e-10, 0.000845318984, 1651.94938, 0.0314194723]\n )\n', (6867, 6944), True, 'import numpy as np\n'), ((9534, 9564), 'numpy.linspace', 'np.linspace', (['I_min', 'I_max', '(110)'], {}), '(I_min, I_max, 110)\n', (9545, 9564), True, 'import numpy as np\n'), ((9577, 9603), 'numpy.linspace', 'np.linspace', (['(0)', 'L_max', '(100)'], {}), '(0, L_max, 100)\n', (9588, 9603), True, 'import numpy as np\n'), ((9616, 9641), 'numpy.linspace', 'np.linspace', (['(-20)', '(80)', '(100)'], {}), '(-20, 80, 100)\n', (9627, 9641), True, 'import numpy as np\n'), ((12623, 12661), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(2048 * 16)'}), '(maxsize=2048 * 16)\n', (12642, 12661), False, 'import functools\n'), ((2974, 2998), 'numpy.sum', 'np.sum', (['bypass_config[0]'], {}), '(bypass_config[0])\n', (2980, 2998), True, 'import numpy as np\n'), ((3022, 3046), 'numpy.sum', 'np.sum', (['bypass_config[1]'], {}), '(bypass_config[1])\n', (3028, 3046), True, 'import numpy as np\n'), ((6563, 6611), 'pvlib.pvsystem.singlediode', 'pvsystem.singlediode', (['I_ph', 'I_0', 'R_s', 'R_sh', 'v_th'], {}), '(I_ph, I_0, R_s, R_sh, v_th)\n', (6583, 6611), False, 'from pvlib import pvsystem\n'), ((7946, 8031), 'numpy.array', 'np.array', (['[6.48000332, 6.37762333e-10, 0.000845318984, 1651.94938, 0.0314194723]'], {}), '([6.48000332, 6.37762333e-10, 0.000845318984, 1651.94938, 0.0314194723]\n )\n', (7954, 8031), True, 'import numpy as np\n'), ((10157, 10242), 'numpy.array', 'np.array', (['[6.48000332, 6.37762333e-10, 0.000845318984, 1651.94938, 0.0314194723]'], {}), '([6.48000332, 6.37762333e-10, 0.000845318984, 1651.94938, 0.0314194723]\n )\n', (10165, 10242), True, 'import numpy as np\n'), ((10887, 10925), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(2048 * 16)'}), '(maxsize=2048 * 16)\n', (10906, 10925), False, 'import functools\n'), ((9667, 9699), 'numpy.meshgrid', 'np.meshgrid', (['I_arr', 'L_arr', 'T_arr'], {}), '(I_arr, L_arr, T_arr)\n', (9678, 9699), True, 'import numpy as np\n'), ((6641, 6734), 'numpy.asarray', 'np.asarray', (["[pv['i_sc'] - i_sc, pv['v_oc'] - v_oc, pv['i_mp'] - i_mp, pv['v_mp'] - v_mp]"], {}), "([pv['i_sc'] - i_sc, pv['v_oc'] - v_oc, pv['i_mp'] - i_mp, pv[\n 'v_mp'] - v_mp])\n", (6651, 6734), True, 'import numpy as np\n'), ((9710, 9742), 'numpy.meshgrid', 'np.meshgrid', (['I_arr', 'L_arr', 'T_arr'], {}), '(I_arr, L_arr, T_arr)\n', (9721, 9742), True, 'import numpy as np\n'), ((9765, 9797), 'numpy.meshgrid', 'np.meshgrid', (['I_arr', 'L_arr', 'T_arr'], {}), '(I_arr, L_arr, T_arr)\n', (9776, 9797), True, 'import numpy as np\n'), ((9820, 9852), 'numpy.meshgrid', 'np.meshgrid', (['I_arr', 'L_arr', 'T_arr'], {}), '(I_arr, L_arr, T_arr)\n', (9831, 9852), True, 'import numpy as np\n'), ((9280, 9300), 'numpy.array', 'np.array', (['I'], {'ndmin': '(2)'}), '(I, ndmin=2)\n', (9288, 9300), True, 'import numpy as np\n'), ((9330, 9343), 'numpy.asarray', 'np.asarray', (['L'], {}), '(L)\n', (9340, 9343), True, 'import numpy as np\n'), ((11634, 11660), 'numpy.array', 'np.array', (['I_cells'], {'ndmin': '(2)'}), '(I_cells, ndmin=2)\n', (11642, 11660), True, 'import numpy as np\n'), ((11690, 11705), 'numpy.asarray', 'np.asarray', (['Iph'], {}), '(Iph)\n', (11700, 11705), True, 'import numpy as np\n'), ((11722, 11736), 'numpy.isnan', 'np.isnan', (['v_pn'], {}), '(v_pn)\n', (11730, 11736), True, 'import numpy as np\n'), ((11854, 11869), 'numpy.asarray', 'np.asarray', (['Iph'], {}), '(Iph)\n', (11864, 11869), True, 'import numpy as np\n'), ((13611, 13631), 'numpy.asarray', 'np.asarray', (['I_substr'], {}), '(I_substr)\n', (13621, 13631), True, 'import numpy as np\n'), ((11784, 11810), 'numpy.array', 'np.array', (['I_cells'], {'ndmin': '(2)'}), '(I_cells, ndmin=2)\n', (11792, 11810), True, 'import numpy as np\n'), ((12535, 12547), 'numpy.hstack', 'np.hstack', (['L'], {}), '(L)\n', (12544, 12547), True, 'import numpy as np\n'), ((12557, 12569), 'numpy.hstack', 'np.hstack', (['L'], {}), '(L)\n', (12566, 12569), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
__author__ = '<NAME>'
import numpy as np
import VoigtFit
def print_T_model_pars(dataset, filename=None):
"""Print the turbulence and T parameters for physical model."""
N_comp = len(dataset.components.values()[0])
print("")
print(u" No: Temperature [K] Turbulence [km/s]")
if filename:
out_file = open(filename, 'w')
out_file.write(u"# No: Temperature [K] Turbulence [km/s] \n")
for comp_num in range(N_comp):
T_name = 'T_%i' % comp_num
turb_name = 'turb_%i' % comp_num
T_fit = dataset.best_fit[T_name]
turb_fit = dataset.best_fit[turb_name]
par_tuple = (comp_num, T_fit.value, T_fit.stderr,
turb_fit.value, turb_fit.stderr)
print(u" %-3i %.2e ± %.2e %.2e ± %.2e" % par_tuple)
if filename:
out_file.write(u" %-3i %.2e ± %.2e %.2e ± %.2e \n" % par_tuple)
print("")
if filename:
out_file.close()
# -- Fit DLA towards quasar Q1313+1441
# Observed in X-shooter P089.A-0068
z_DLA = 0.00345
# If log(NHI) is not known use:
logNHI = None
# -- Load X-SHOOTER UVB and VIS data in ASCII format:
fname = 'thermal_model_2comp.dat'
res = 6.6
wl, spec, err = np.loadtxt(fname, unpack=True)
# -- Here you can load your data in any way you wish
# Only requirement is that wl, spec, and err have the same dimensions.
# -- A dataset which has already been defined can be loaded like this:
# dataset = VoigtFit.LoadDataSet('test_data.hdf5')
dataset = VoigtFit.DataSet(z_DLA)
dataset.set_name('test_2comp')
dataset.verbose = True
dataset.velspan = 150.
dataset.cheb_order = -1
# -- Add the data loaded from the
dataset.add_data(wl, spec, res, err=err, normalized=True)
# -- Define absorption lines:
dataset.add_many_lines(['FeII_2344', 'FeII_2374', 'FeII_2382'])
dataset.add_many_lines(['FeII_1608', 'FeII_1611'])
dataset.add_line('FeII_2260')
dataset.add_line('FeII_2249')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('CrII_2062')
dataset.add_line('ZnII_2062')
# -- dataset.add_many_lines is equivalent to dataset.add_lines:
dataset.add_many_lines(['CII_1036', 'CII_1334'])
dataset.add_many_lines(['OI_1302', 'OI_1039', 'OI_1355'])
dataset.add_many_lines(['SiII_1526', 'SiII_1808', 'SiII_1304'])
dataset.add_many_lines(['SiII_1260', 'FeII_1260', 'SII_1259'])
dataset.add_many_lines(['SII_1250', 'SII_1253'])
# -- If a line has been defined, and you don't want to fit it
# it can either be removed from the dataset completely:
# dataset.remove_line('CrII_2056')
# -- or deactivated:
# dataset.deactivate_line('FeII_2374')
# -- Deactivated lines will not be included in the fit, but their line definitions
# and components remain in the dataset for future reference.
# -- To use the physical model, make sure that all components are cleared:
dataset.reset_components()
# -- Add components for each ion:
# ion z b logN
dataset.add_component('FeII', 0.003290, 5., 15.0, var_z=1, var_b=1)
dataset.add_component('FeII', 0.003620, 5., 14.5, var_z=1, var_b=1)
# -- The physical model requires that all ions have the same velocity structure:
# The default order is 'to' , 'from' :
# dataset.copy_components('CrII', 'FeII')
# -- But the ions can be specified using keywords to ease the call:
dataset.copy_components(from_ion='FeII', to_ion='CrII', tie_b=False)
dataset.copy_components(from_ion='FeII', to_ion='ZnII', tie_b=False)
dataset.copy_components(from_ion='FeII', to_ion='SiII', tie_b=False)
dataset.copy_components(from_ion='FeII', to_ion='SII', tie_b=False)
dataset.copy_components(from_ion='FeII', to_ion='CII', tie_b=False)
dataset.copy_components(from_ion='FeII', to_ion='OI', tie_b=False)
# -- This copies the two components defined for FeII to the other ions and
# keeps the same pattern of initial guesses for column density scaled
# to the Solar abundance ratio.
# -- Individual components which are not observed for weaker lines can be removed:
# dataset.delete_component('ZnII', 1)
# dataset.delete_component('ZnII', 0)
#
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
# Components are zero-indexed!
# -- Prepare the dataset: This will prompt the user for interactive
# masking and normalization, as well as initiating the Parameters:
dataset.prepare_dataset(norm=False, mask=False)
# -- Define masks for individual lines:
# dataset.mask_line('ZnII_2026')
# --- This is where the magic happens ----------------------------------------
# Set up the thermal and turbulence parameters for each component:
dataset.pars.add('turb_0', value=5., vary=True, min=0.)
dataset.pars.add('turb_1', value=5., vary=True, min=0.)
dataset.pars.add('T_0', value=5000., vary=1, min=0.)
dataset.pars.add('T_1', value=5000., vary=1, min=0.)
# -- This can be defined in a for loop assuming the same intial guess for T:
# T_init = 1.e4
# for comp_num in range(len(dataset.components.values()[0])):
# dataset.pars.add('T_%i'%comp_num, value=T_init, vary=True, min=0.)
# -- Now set up the links for the 'b'-parameter of each component of each ion:
# 2k_B/m_u in (km/s)^2 units:
K = 0.0166287
for ion, comp in dataset.components.items():
N_comp = len(comp)
for comp_num in range(N_comp):
par_name = 'b%i_%s' % (comp_num, ion)
lines_for_ion = dataset.get_lines_for_ion(ion)
m_ion = lines_for_ion[0].mass
const = K/m_ion
T_num = dataset.pars['T_%i' % comp_num].value
b_eff = np.sqrt(5.**2 + K*T_num/m_ion)
model_constraint = 'sqrt((turb_%i)**2 + %.6f*T_%i)' % (comp_num,
const,
comp_num)
dataset.pars[par_name].set(expr=model_constraint, value=b_eff)
# ---------------------------------------------------------------------------
# -- Fit the dataset:
popt, chi2 = dataset.fit(verbose=True, plot=False, factor=10.)
dataset.plot_fit(filename=dataset.name)
# -- Print total column densities
dataset.print_total()
if logNHI:
dataset.print_metallicity(*logNHI)
print_T_model_pars(dataset)
# -- Save the dataset to file: taken from the dataset.name
dataset.save()
| [
"VoigtFit.DataSet",
"numpy.loadtxt",
"numpy.sqrt"
] | [((1261, 1291), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'unpack': '(True)'}), '(fname, unpack=True)\n', (1271, 1291), True, 'import numpy as np\n'), ((1554, 1577), 'VoigtFit.DataSet', 'VoigtFit.DataSet', (['z_DLA'], {}), '(z_DLA)\n', (1570, 1577), False, 'import VoigtFit\n'), ((5693, 5730), 'numpy.sqrt', 'np.sqrt', (['(5.0 ** 2 + K * T_num / m_ion)'], {}), '(5.0 ** 2 + K * T_num / m_ion)\n', (5700, 5730), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats
import numpy as np
import pandas as pd
import os
from biothings_client import get_client
from bioservices import KEGG
import porch
import porch.qvalue as qv
import porch.cache as cache
import porch.sunburst.sunburst as sb
# Directories and file endings for result and temporary files.
protein_expression_name = "tcell_protein"
metabolite_expression_name = "tcell_metabolite"
preprcoc_prefix = "proc_"
significance_name = "significance"
activity_name = "activity"
# URLs to the supplements of
# Geiger, Roger, et al. "L-arginine modulates T cell metabolism and enhances survival and anti-tumor activity." Cell 167.3 (2016): 829-842.
proteomics_data_url = "https://ars.els-cdn.com/content/image/1-s2.0-S0092867416313137-mmc1.xlsx"
metabolomics_data_url = "https://ars.els-cdn.com/content/image/1-s2.0-S0092867416313137-mmc2.xlsx"
def one_row_per_proteoform(group):
# From https://stackoverflow.com/questions/13050003/apply-function-to-pandas-dataframe-that-can-return-multiple-rows
row = group.iloc[0]
# proteoforms = row['Protein IDs'].split(';')
proteoforms = str(row.name).split(';')
copies = len(proteoforms)
content = {'ProteinID' : proteoforms}
row_dict = row.to_dict()
for item in row_dict: content[item] = [row[item]] * copies
return pd.DataFrame(content)
def one_row_per_compound_convert(group, map_kegg_chebi):
# From https://stackoverflow.com/questions/13050003/apply-function-to-pandas-dataframe-that-can-return-multiple-rows
row = group.iloc[0]
chebis = []
for kegg_id in str(row.name).split(','):
name = 'cpd:' + kegg_id
if name in map_kegg_chebi:
chebis += [map_kegg_chebi[name].split(':')[1]] # The map returns compond ids like 'chebi:5292'
copies = len(chebis)
if copies < 1:
return None
content = {'MetaboliteID' : chebis}
row_dict = row.to_dict()
for item in row_dict: content[item] = [row[item]] * copies
return pd.DataFrame(content)
def tcell_read_metabolomics_data():
"""This function is quite convoluted as it downloads an excelfile from a publication and extracts a dataframe, idexed by chebi. The function also caches intermediate files"""
tcell_metabol_xls = cache.UrlFileCache(os.path.join(cache.get_cache_path(), metabolite_expression_name + ".xlsx"), metabolomics_data_url)
metabolomics_df = pd.read_excel(tcell_metabol_xls.get_file_name(), sheet_name = "normalized by sample mean", index_col=0, usecols="A,C:HN", skiprows = [0])
#metabolomics_df = pd.read_excel(tcell_metabol_xls.get_file_name(), sheet_name = "normalized by sample mean", index_col=0, usecols="A,C:HN", skiprows = [0])
for col in metabolomics_df.columns:
# Average all technical replicates (Named by trailing ".1")
if len(col.split('.'))>1 and col.split('.')[1] == "1":
remcol = col.split('.')[0]
metabolomics_df[remcol] = scipy.stats.gmean(metabolomics_df[[remcol,col]],axis=1)
metabolomics_df.drop(col, axis=1, inplace=True)
metabolomics_df.index.name = "KEGG_ID"
metabolomics_df = metabolomics_df.apply(np.exp2) # The excel data is in log2 space, return it to normal
k = KEGG(verbose=False)
map_kegg_chebi = k.conv("chebi", "compound")
metabolomics_df = metabolomics_df.groupby("KEGG_ID", group_keys=False).apply(lambda x: one_row_per_compound_convert(x, map_kegg_chebi)).reset_index(drop=True)
metabolomics_df.set_index("MetaboliteID", inplace=True)
return metabolomics_df
def tcell_read_metabolomics_frames():
proc_tcell_t = cache.TsvFileTracker(os.path.join(cache.get_cache_path(), metabolite_expression_name + ".tsv.gz"), tcell_read_metabolomics_data)
metabolomics_df = proc_tcell_t.read_file()
values,cols = [],[]
for coln in metabolomics_df.columns:
if "non act" in coln:
time = -1.
elif "ON" in coln:
time = 0.
elif "act 3h" in coln:
time = 3.
elif "act 12h" in coln:
time = 12.
elif "act 14h" in coln:
time = 14.
elif "act 24h" in coln:
time = 24.
elif "act 2d" in coln:
time = 48.
elif "act 3d" in coln:
time = 72.
elif "act 4d" in coln:
time = 96.
else:
print(coln)
dish = coln.split('-')[0]
rep = coln.split('-')[2].replace(" ","")
values += [time]
cols += ['_'.join([dish,str(int(time)),rep])]
phenotype_df = pd.DataFrame(columns=cols, data=[values], index=["Time"])
metabolomics_df.columns = cols
return phenotype_df, metabolomics_df
def tcell_read_proteomics_data():
"""This function is quite convoluted as it downloads an excelfile from a publication and extracts a dataframe. The function also caches intermediate files"""
tcell_prot_xls = cache.UrlFileCache(os.path.join(cache.get_cache_path(), protein_expression_name + ".xlsx"),proteomics_data_url)
proteomics_df = pd.read_excel(tcell_prot_xls.get_file_name(), sheet_name = "Data", index_col=0, usecols="A,D:U")
# proteomics_df = pd.read_excel(tcell_prot_xls.get_file_name(), sheet_name = "Data", index_col=0, usecols="A,V:AM")
proteomics_df = proteomics_df - proteomics_df.mean() # Normalize by subtracting column mean
proteomics_df = proteomics_df.apply(np.exp2) # The excel data is in log2 space, return it to normal
proteomics_df = proteomics_df.groupby("Protein IDs", group_keys=False).apply(one_row_per_proteoform).reset_index(drop=True)
proteomics_df.set_index("ProteinID", inplace=True)
return proteomics_df
def tcell_read_proteomics_frames():
proc_tcell_t = cache.TsvFileTracker(os.path.join(cache.get_cache_path(), protein_expression_name + ".tsv.gz"),tcell_read_proteomics_data)
proteomics_df = proc_tcell_t.read_file()
values,cols = [],[]
for coln in proteomics_df.columns:
if "notact" in coln:
time = 0.
elif "act12h" in coln:
time = 12.
elif "act24h" in coln:
time = 24.
elif "act48h" in coln:
time = 48.
elif "act72h" in coln:
time = 72.
elif "act96h" in coln:
time = 96.
else:
print(coln)
not_sure = coln.split('_')[0].replace("q","")
rep = int(coln.split('_')[3].replace(" ",""))
if rep<18:
dish = int(rep/5)+2
rep = rep%5+1
elif rep<26:
dish = rep%3+2
rep = 3
elif rep<31:
dish = (rep-1)%3+2
rep = 3
else:
dish = (rep-2)%3+2
rep = 3
values += [time]
cols += ['_'.join([str(dish),str(int(time)),str(rep)])]
proteomics_df.columns = cols
phenotype_df = pd.DataFrame(columns=cols, data=[values], index=["Time"])
return phenotype_df, proteomics_df
def tcell_example():
"""
The examples loads the proteomics and metabolomics data from
> Geiger, Roger, et al. L-arginine modulates T cell metabolism and enhances survival and anti-tumor activity. Cell 167.3 (2016): 829-842.
The example decompose the individual datasets into pathway activities, and subsequently decompose the joinder of the metabolomics and transcriptomics data
"""
print("* Downloading data ...")
# These operatios are cached, to reduce re-execution time
p_phenotype_df, proteomics_df = tcell_read_proteomics_frames()
m_phenotype_df, metabolomics_df = tcell_read_metabolomics_frames()
print("* Factorize data ...")
p_activity_df, p_es, untested = porch.porch_reactome(proteomics_df, organism = "HSA", gene_anot = "UniProt")
m_activity_df, m_es, untested = porch.porch_reactome(metabolomics_df, organism = "HSA", gene_anot = "ChEBI")
print("* Significance Testing ...")
p_significance = porch.linear_model("Pathway ~ C(Time)", p_activity_df, p_phenotype_df)
m_significance = porch.linear_model("Pathway ~ C(Time)", m_activity_df, m_phenotype_df)
print("* Multiple Hypothesis correction ...")
qv.qvalues(p_significance,"C(Time)", "q_value_Time")
qv.qvalues(m_significance,"C(Time)", "q_value_Time")
print("* Plotting the significance of the pathway activity derived from the proteomics data ...")
fig = plt.figure(figsize=(10,6))
p_significance["-log10(q)"] = -np.log10(p_significance["q_value_Time"])
p_significance["z-value"] = scipy.stats.norm.ppf(p_significance["C(Time)"])
g = sns.distplot(p_significance["z-value"], bins=100, rug=True, kde=False)
plt.savefig("p_tcell-qtime-z.png")
plt.show()
print("* Plotting the significance of the pathway activity derived from the metabolomics data ...")
fig = plt.figure(figsize=(10,6))
m_significance["-log10(q)"] = -np.log10(m_significance["q_value_Time"])
g = sns.distplot(m_significance["-log10(q)"], rug=True, kde=False)
plt.savefig("m_tcell-qtime.png")
plt.show()
p_significance.sort_values(by=['q_value_Time'], inplace=True, ascending=True)
m_significance.sort_values(by=['q_value_Time'], inplace=True, ascending=True)
print("The most significant proteomics pathways are:")
print(p_significance.head(n=10))
print("The most significant metabolomics pathways are:")
print(m_significance.head(n=20))
# most = p_significance.iloc[0:5:1].index
# p_joint_df = p_phenotype_df.append(p_activity_df.loc[most]).T.reset_index()
# out_df = pd.melt(p_joint_df, id_vars=["Time","index"], value_vars=most, var_name='Pathway', value_name='Activity')
# sns.lineplot(data=out_df, x="Time", y="Activity", hue="Pathway")
# plt.savefig("p_tcell-qtime-top.png")
# plt.show()
# most = m_significance.iloc[0:10:1].index
# m_phenotype_df = m_phenotype_df.append(m_activity_df.loc[most]).T.reset_index()
# out_df = pd.melt(m_phenotype_df,id_vars=["Time","index"],value_vars=most, var_name='Pathway', value_name='Activity')
# sns.lineplot(data=out_df, x="Time", y="Activity", hue="Pathway")
# plt.savefig("m_tcell-qtime-top.png")
# plt.show()
print("* Multi-omics analysis ...")
multiomics_df = pd.concat([proteomics_df,metabolomics_df],axis=0,join="inner")
multi_phenotype_df = p_phenotype_df[multiomics_df.columns]
multi_activity_df, multi_es, untested = porch.porch_multi_reactome(multiomics_df,[["HSA","UniProt"], ["HSA","ChEBI"]])
multi_significance = porch.linear_model("Pathway ~ C(Time)", multi_activity_df, multi_phenotype_df)
qv.qvalues(multi_significance,"C(Time)", "q_value_Time")
multi_significance["-log10(q)"] = -np.log10(multi_significance["q_value_Time"])
print("The most significant multiomics pathways are:")
print(multi_significance.head(n=10))
# for s in range(0,10,5):
# most = multi_significance.iloc[s:s+5:1].index
# multi_joint_df = p_phenotype_df.append(p_activity_df.loc[most]).T.reset_index()
# out_df = pd.melt(multi_joint_df,id_vars=["Time","index"],value_vars=most, var_name='Pathway', value_name='Activity')
# sns.lineplot(data=out_df, x="Time", y="Activity", hue="Pathway")
# plt.savefig("multi_tcell-qtime-{}{}.png".format(s,s+5))
# plt.show()
# sorted_top = {k: v for k, v in sorted(multi_es["R-HSA-202403"].items(), key=lambda item: item[1])}
# print(sorted_top)
conf = sb.get_conf_human()
conf.update({
'value': "-log10(q)",
'ngenes': "set_size",
'description': "annotation" })
sb.generate_reactome_sunburst(multi_significance, conf)
def main():
tcell_example()
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"porch.porch_multi_reactome",
"porch.sunburst.sunburst.generate_reactome_sunburst",
"matplotlib.pyplot.show",
"porch.porch_reactome",
"porch.linear_model",
"porch.qvalue.qvalues",
"bioservices.KEGG",
"porch.sunburst.sunburst.get_conf_human",
"matplotlib.pyplot.figure",
"seabo... | [((1352, 1373), 'pandas.DataFrame', 'pd.DataFrame', (['content'], {}), '(content)\n', (1364, 1373), True, 'import pandas as pd\n'), ((2020, 2041), 'pandas.DataFrame', 'pd.DataFrame', (['content'], {}), '(content)\n', (2032, 2041), True, 'import pandas as pd\n'), ((3249, 3268), 'bioservices.KEGG', 'KEGG', ([], {'verbose': '(False)'}), '(verbose=False)\n', (3253, 3268), False, 'from bioservices import KEGG\n'), ((4568, 4625), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols', 'data': '[values]', 'index': "['Time']"}), "(columns=cols, data=[values], index=['Time'])\n", (4580, 4625), True, 'import pandas as pd\n'), ((6862, 6919), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols', 'data': '[values]', 'index': "['Time']"}), "(columns=cols, data=[values], index=['Time'])\n", (6874, 6919), True, 'import pandas as pd\n'), ((7661, 7733), 'porch.porch_reactome', 'porch.porch_reactome', (['proteomics_df'], {'organism': '"""HSA"""', 'gene_anot': '"""UniProt"""'}), "(proteomics_df, organism='HSA', gene_anot='UniProt')\n", (7681, 7733), False, 'import porch\n'), ((7774, 7846), 'porch.porch_reactome', 'porch.porch_reactome', (['metabolomics_df'], {'organism': '"""HSA"""', 'gene_anot': '"""ChEBI"""'}), "(metabolomics_df, organism='HSA', gene_anot='ChEBI')\n", (7794, 7846), False, 'import porch\n'), ((7913, 7983), 'porch.linear_model', 'porch.linear_model', (['"""Pathway ~ C(Time)"""', 'p_activity_df', 'p_phenotype_df'], {}), "('Pathway ~ C(Time)', p_activity_df, p_phenotype_df)\n", (7931, 7983), False, 'import porch\n'), ((8005, 8075), 'porch.linear_model', 'porch.linear_model', (['"""Pathway ~ C(Time)"""', 'm_activity_df', 'm_phenotype_df'], {}), "('Pathway ~ C(Time)', m_activity_df, m_phenotype_df)\n", (8023, 8075), False, 'import porch\n'), ((8131, 8184), 'porch.qvalue.qvalues', 'qv.qvalues', (['p_significance', '"""C(Time)"""', '"""q_value_Time"""'], {}), "(p_significance, 'C(Time)', 'q_value_Time')\n", (8141, 8184), True, 'import porch.qvalue as qv\n'), ((8188, 8241), 'porch.qvalue.qvalues', 'qv.qvalues', (['m_significance', '"""C(Time)"""', '"""q_value_Time"""'], {}), "(m_significance, 'C(Time)', 'q_value_Time')\n", (8198, 8241), True, 'import porch.qvalue as qv\n'), ((8354, 8381), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (8364, 8381), True, 'import matplotlib.pyplot as plt\n'), ((8545, 8615), 'seaborn.distplot', 'sns.distplot', (["p_significance['z-value']"], {'bins': '(100)', 'rug': '(True)', 'kde': '(False)'}), "(p_significance['z-value'], bins=100, rug=True, kde=False)\n", (8557, 8615), True, 'import seaborn as sns\n'), ((8620, 8654), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""p_tcell-qtime-z.png"""'], {}), "('p_tcell-qtime-z.png')\n", (8631, 8654), True, 'import matplotlib.pyplot as plt\n'), ((8659, 8669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8667, 8669), True, 'import matplotlib.pyplot as plt\n'), ((8785, 8812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (8795, 8812), True, 'import matplotlib.pyplot as plt\n'), ((8896, 8958), 'seaborn.distplot', 'sns.distplot', (["m_significance['-log10(q)']"], {'rug': '(True)', 'kde': '(False)'}), "(m_significance['-log10(q)'], rug=True, kde=False)\n", (8908, 8958), True, 'import seaborn as sns\n'), ((8963, 8995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""m_tcell-qtime.png"""'], {}), "('m_tcell-qtime.png')\n", (8974, 8995), True, 'import matplotlib.pyplot as plt\n'), ((9000, 9010), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9008, 9010), True, 'import matplotlib.pyplot as plt\n'), ((10188, 10253), 'pandas.concat', 'pd.concat', (['[proteomics_df, metabolomics_df]'], {'axis': '(0)', 'join': '"""inner"""'}), "([proteomics_df, metabolomics_df], axis=0, join='inner')\n", (10197, 10253), True, 'import pandas as pd\n'), ((10358, 10443), 'porch.porch_multi_reactome', 'porch.porch_multi_reactome', (['multiomics_df', "[['HSA', 'UniProt'], ['HSA', 'ChEBI']]"], {}), "(multiomics_df, [['HSA', 'UniProt'], ['HSA',\n 'ChEBI']])\n", (10384, 10443), False, 'import porch\n'), ((10462, 10540), 'porch.linear_model', 'porch.linear_model', (['"""Pathway ~ C(Time)"""', 'multi_activity_df', 'multi_phenotype_df'], {}), "('Pathway ~ C(Time)', multi_activity_df, multi_phenotype_df)\n", (10480, 10540), False, 'import porch\n'), ((10545, 10602), 'porch.qvalue.qvalues', 'qv.qvalues', (['multi_significance', '"""C(Time)"""', '"""q_value_Time"""'], {}), "(multi_significance, 'C(Time)', 'q_value_Time')\n", (10555, 10602), True, 'import porch.qvalue as qv\n'), ((11392, 11411), 'porch.sunburst.sunburst.get_conf_human', 'sb.get_conf_human', ([], {}), '()\n', (11409, 11411), True, 'import porch.sunburst.sunburst as sb\n'), ((11534, 11589), 'porch.sunburst.sunburst.generate_reactome_sunburst', 'sb.generate_reactome_sunburst', (['multi_significance', 'conf'], {}), '(multi_significance, conf)\n', (11563, 11589), True, 'import porch.sunburst.sunburst as sb\n'), ((8416, 8456), 'numpy.log10', 'np.log10', (["p_significance['q_value_Time']"], {}), "(p_significance['q_value_Time'])\n", (8424, 8456), True, 'import numpy as np\n'), ((8847, 8887), 'numpy.log10', 'np.log10', (["m_significance['q_value_Time']"], {}), "(m_significance['q_value_Time'])\n", (8855, 8887), True, 'import numpy as np\n'), ((10641, 10685), 'numpy.log10', 'np.log10', (["multi_significance['q_value_Time']"], {}), "(multi_significance['q_value_Time'])\n", (10649, 10685), True, 'import numpy as np\n'), ((2315, 2337), 'porch.cache.get_cache_path', 'cache.get_cache_path', ([], {}), '()\n', (2335, 2337), True, 'import porch.cache as cache\n'), ((3660, 3682), 'porch.cache.get_cache_path', 'cache.get_cache_path', ([], {}), '()\n', (3680, 3682), True, 'import porch.cache as cache\n'), ((4954, 4976), 'porch.cache.get_cache_path', 'cache.get_cache_path', ([], {}), '()\n', (4974, 4976), True, 'import porch.cache as cache\n'), ((5775, 5797), 'porch.cache.get_cache_path', 'cache.get_cache_path', ([], {}), '()\n', (5795, 5797), True, 'import porch.cache as cache\n')] |
# 用于编程测试一些函数
import numpy as np
from keras.models import load_model
model = load_model('model_200_10.h5')
x_input = np.zeros((1,361),dtype=int)
x_input[0,180] = 1
x_input[0,181] = 2
print(x_input)
print(x_input.shape)
score = model.predict(x_input) * 20000 - 10000
print(score)
score = score[0,0]
print(score)
print("------------")
| [
"keras.models.load_model",
"numpy.zeros"
] | [((76, 105), 'keras.models.load_model', 'load_model', (['"""model_200_10.h5"""'], {}), "('model_200_10.h5')\n", (86, 105), False, 'from keras.models import load_model\n'), ((116, 145), 'numpy.zeros', 'np.zeros', (['(1, 361)'], {'dtype': 'int'}), '((1, 361), dtype=int)\n', (124, 145), True, 'import numpy as np\n')] |
"""Chunker functions"""
from itertools import islice, chain
from functools import partial
from typing import Iterable
inf = float('inf')
DFLT_CHK_SIZE = 2048
def mk_chunker(chk_size=DFLT_CHK_SIZE, chk_step=None, *, use_numpy_reshape=None):
"""
Generator of (fixed size and fixed step) chunks of an iterable.
This function-making function will try to choose an optimized chunker for you depending on the parameters and
environment (if you have numpy and chk_size==chk_step, it's the fastest).
Note though, that as a tradeoff, you may get numpy arrays, tuples, or lists as the type that is yield.
:param chk_size: Size of chunk (default 2048)
:param chk_step: Size of step (step of sliding window). If not specified, will be taken to be chk_size
:param use_numpy_reshape: If None (default), will use numpy (reshape) if numpy is importable.
If True, will try to use numpy.reshape systematically.
If False, will not use numpy.reshape, even if numpy present.
:return: A generator of chunks (numpy.arrays, tuples, or lists, depending on the context)
>>> a = range(6)
>>> chunker = mk_chunker(3)
>>> list(chunker(a))
[array([0, 1, 2]), array([3, 4, 5])]
>>> list(mk_chunker(2)(a))
[array([0, 1]), array([2, 3]), array([4, 5])]
>>> list(tuple(x) for x in mk_chunker(1)(a))
[(0,), (1,), (2,), (3,), (4,), (5,)]
>>>
>>> chunker = mk_chunker(4, 2)
>>> list(chunker(a))
[[0, 1, 2, 3], [2, 3, 4, 5]]
"""
chk_step = chk_step or chk_size # default to chk_size == chk_step
if chk_step == chk_size:
if (use_numpy_reshape is None) or (use_numpy_reshape is True):
try:
from numpy import reshape
use_numpy_reshape = True
except ImportError:
if use_numpy_reshape is True:
raise # make sure the user knows she doesn't have numpy
use_numpy_reshape = False
if use_numpy_reshape:
def chunker(a):
n = len(a)
yield from reshape(a[: (n - (n % chk_size))], (-1, chk_size))
else:
def chunker(a):
yield from zip(*([iter(a)] * chk_size))
else:
def chunker(a):
yield from fixed_step_chunker(a, chk_size, chk_step)
chunker.chk_size = chk_size
chunker.chk_step = chk_step
return chunker
DFLT_CHUNKER = mk_chunker(chk_size=DFLT_CHK_SIZE)
def simple_fixed_step_chunker(it, chk_size, chk_step=None):
"""
Generates chunks of fixed size and step.
Yields chunks as lists.
"""
from itertools import islice
if chk_step is None:
chk_step = chk_size
if chk_step == chk_size:
yield from map(list, zip(*([iter(it)] * chk_step)))
elif chk_step < chk_size:
chk = list(islice(it, chk_size))
while len(chk) == chk_size:
yield chk
chk = chk[chk_step:] + list(islice(it, chk_step))
else:
chk = list(islice(it, chk_size))
gap = chk_step - chk_size
while len(chk) == chk_size:
yield chk
chk = list(islice(it, gap, gap + chk_size))
def _validate_chk_size(chk_size):
assert (
isinstance(chk_size, int) and chk_size > 0
), 'chk_size should be a positive interger'
def _validate_chk_size_and_step(chk_size, chk_step):
_validate_chk_size(chk_size)
if chk_step is None:
chk_step = chk_size
assert (
isinstance(chk_step, int) and chk_step > 0
), 'chk_step should be a positive integer'
return chk_size, chk_step
def _validate_fixed_step_chunker_args(chk_size, chk_step, start_at, stop_at):
chk_size, chk_step = _validate_chk_size_and_step(chk_size, chk_step)
if start_at is None:
start_at = 0
if stop_at is not None:
assert stop_at > start_at, 'stop_at should be larger than start_at'
if stop_at is not inf:
assert isinstance(stop_at, int), 'stop_at should be an integer'
# checking a few things
assert isinstance(start_at, int), 'start_at should be an integer'
assert start_at >= 0, 'start_at should be a non negative integer'
return chk_step, start_at
def rechunker(
chks, chk_size, chk_step=None, start_at=None, stop_at=None, return_tail=False
):
"""Takes an iterable of chks and produces another iterable of chunks.
The chunks generated by the input chks iterable is assumed to be gap-less and without overlap,
but these do not need to be of fixed size.
The output will be though.
"""
yield from fixed_step_chunker(
chain.from_iterable(chks), chk_size, chk_step, start_at, stop_at, return_tail
)
def fixed_step_chunker(
it, chk_size, chk_step=None, start_at=None, stop_at=None, return_tail=False
):
"""
a function to get (an iterator of) segments (bt, tt) of chunks from an iterator (or list)
of the for [it_1, it_2...], given a chk_size, chk_step, and a start_at and a stop_at.
The start_at, stop_at act like slices indices for a list: start_at is included and stop_at is excluded
:param it: iterator of elements of any type
:param chk_size: length of the chunks
:param chk_step: step between chunks
:param start_at: index of the first term of the iterator at which we begin building the chunks (inclusive)
:param stop_at: index of the last term from the iterator included in the chunks
:param return_tail: if set to false, only the chunks with max element with index less than stop_at are yielded
if set to true, any chunks with minimum index value no more than stop_at are returned but they contain
term with index no more than stop_at
:return: an iterator of the chunks
1) If stop_at is not None and return_tail is False:
will return all full chunks with maximum element index less than stop_at
or until the iterator is exhausted. Only full chunks are returned here.
2) If stop_at is not None and return_tail is True:
will return all full chunks as above along with possibly cut off chunks
containing one term whose index is stop_at-1 or one (last) term which is the
last element of it
3) If stop_at is None and return_tail is False:
will return all full chunks with maximum element index less or equal to the last
element of it
4) If stop_at is None and return_tail is True:
will return all full chunks with maximum element index less or equal to the last
element of it plus cut off chunks whose maximum term index is the last term of it
# testing chk_step < chk_size with return_tail=TRUE, stop and start_at PRESENT
# and stop_at SMALLER than the largest index of it
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=1, start_at=2, stop_at=5, return_tail=True)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[3, 4, 5], [4, 5], [5]]
# testing chk_step < chk_size with return_tail=FALSE, stop and start_at PRESENT
# and stop_at SMALLER than the largest index of it
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=1, start_at=2, stop_at=5, return_tail=False)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[3, 4, 5]]
# testing chk_step < chk_size with return_tail=TRUE, stop and start_at PRESENT
# and stop_at LARGER than the largest index of it
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=1, start_at=1, stop_at=20, return_tail=True)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16], [16]]
# testing chk_step < chk_size with return_tail=FALSE, stop and start_at PRESENT
# and stop_at LARGER than the largest index of it
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=1, start_at=1, stop_at=20, return_tail=False)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16]]
# testing chk_step = chk_size with return_tail=TRUE, stop and start_at PRESENT
# and stop_at SMALLER than the largest index of it
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=3, start_at=1, stop_at=7, return_tail=True)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4], [5, 6, 7]]
# testing chk_size > len(it) with return_tail=False, no stop_at or start_at
>>> f = lambda it: fixed_step_chunker(it, chk_size=30, chk_step=3, start_at=None, stop_at=None, return_tail=False)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[]
# testing chk_size > len(it) with return_tail=True, no stop_at or start_at
>>> f = lambda it: fixed_step_chunker(it, chk_size=30, chk_step=3, start_at=None, stop_at=None, return_tail=True)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [10, 11, 12, 13, 14, 15, 16], [13, 14, 15, 16], [16]]
# testing chk_step > chk_size with return_tail=TRUE, stop and start_at PRESENT
# and stop_at SMALLER than the largest index of it
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=4, start_at=1, stop_at=7, return_tail=True)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4], [6, 7]]
# testing chk_step > chk_size with return_tail=FALSE, stop and start_at PRESENT
# and stop_at SMALLER than the largest index of it
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=4, start_at=1, stop_at=7, return_tail=False)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4]]
# testing chk_step > chk_size with return_tail=FALSE, stop and start_at NOT PRESENT
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=4, start_at=None, stop_at=None, return_tail=False)
>>> it = range(1, 17, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[1, 2, 3], [5, 6, 7], [9, 10, 11], [13, 14, 15]]
# testing chk_step > chk_size with return_tail=TRUE, stop and start_at NOT PRESENT
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=4, start_at=None, stop_at=None, return_tail=True)
>>> it = range(1, 19, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[1, 2, 3], [5, 6, 7], [9, 10, 11], [13, 14, 15], [17, 18]]
# testing chk_step > chk_size with return_tail=TRUE, stop and start_at NOT PRESENT
# with negative values in the iterator
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=4, start_at=None, stop_at=None, return_tail=True)
>>> it = range(-10, 19, 1)
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[[-10, -9, -8], [-6, -5, -4], [-2, -1, 0], [2, 3, 4], [6, 7, 8], [10, 11, 12], [14, 15, 16], [18]]
# testing chk_step > chk_size with return_tail=TRUE, stop and start_at NOT PRESENT
# with items of various types in the iterator
>>> f = lambda it: fixed_step_chunker(it, chk_size=3, chk_step=2, start_at=None, stop_at=None, return_tail=True)
>>> it = ['a', 3, -10, 9.2, str, [1,2,3], set([10,20])]
>>> A = list(f(it)); B = list(f(iter(it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # it and iter(it) should give the same thing!
>>> A # and that thing is:
[['a', 3, -10], [-10, 9.2, <class 'str'>], [<class 'str'>, [1, 2, 3], {10, 20}], [{10, 20}]]
"""
chk_step, start_at = _validate_fixed_step_chunker_args(
chk_size, chk_step, start_at, stop_at
)
if chk_step == chk_size and not return_tail:
yield from map(list, zip(*([iter(it)] * chk_step)))
elif chk_step < chk_size:
it = islice(it, start_at, stop_at)
chk = list(islice(it, chk_size))
while len(chk) == chk_size:
yield chk
chk = chk[chk_step:] + list(islice(it, chk_step))
else:
it = islice(it, start_at, stop_at)
chk = list(islice(it, chk_size))
gap = chk_step - chk_size
while len(chk) == chk_size:
yield chk
chk = list(islice(it, gap, gap + chk_size))
if return_tail:
while len(chk) > 0:
yield chk
chk = chk[chk_step:]
#
# from collections import deque
# from itertools import islice, chain
#
#
# class nothing: ...
#
#
# strip_nothing_out = lambda item: tuple(filter(lambda x: x is not nothing, item))
#
#
# def new_fixed_step_chunker(it, chk_size, chk_step=None, start_at=None, stop_at=None, return_tail=False):
# chk_step, start_at = _validate_fixed_step_chunker_args(chk_size, chk_step, start_at, stop_at)
# if start_at is not None or stop_at is not None:
# it = islice(it, start_at, stop_at)
#
# iit = iter(it)
# window = deque([], chk_size)
# push_to_queue = window.extend
#
# # and there after... push new step data to queue and yield queue contents
# if not return_tail:
# if chk_step == chk_size:
# yield from zip(*([iit] * chk_step))
# else:
# chk = tuple(islice(iit, chk_size))
# if len(chk) >= chk_size:
# yield chk
# push_to_queue(chk)
# for step_data in zip(*([iit] * chk_step)):
# push_to_queue(step_data)
# yield tuple(window)
# else:
# iiit = chain(iit, [nothing] * (chk_step - 1))
# yield from map(strip_nothing_out, new_fixed_step_chunker(iiit, chk_size, chk_step, return_tail=False))
# # step_nibs = zip(*([iiit] * chk_step))
# # last_nib = next(step_nibs)
# # for step_nib in step_nibs:
# # push_to_queue(last_nib)
# # yield tuple(window)
# # last_nib = step_nib
# # push_to_queue(last_nib)
# # last_part = list(window)
# # yield strip_nothing_out(last_part)
| [
"itertools.chain.from_iterable",
"numpy.reshape",
"itertools.islice"
] | [((4643, 4668), 'itertools.chain.from_iterable', 'chain.from_iterable', (['chks'], {}), '(chks)\n', (4662, 4668), False, 'from itertools import islice, chain\n'), ((14606, 14635), 'itertools.islice', 'islice', (['it', 'start_at', 'stop_at'], {}), '(it, start_at, stop_at)\n', (14612, 14635), False, 'from itertools import islice\n'), ((14822, 14851), 'itertools.islice', 'islice', (['it', 'start_at', 'stop_at'], {}), '(it, start_at, stop_at)\n', (14828, 14851), False, 'from itertools import islice\n'), ((2855, 2875), 'itertools.islice', 'islice', (['it', 'chk_size'], {}), '(it, chk_size)\n', (2861, 2875), False, 'from itertools import islice\n'), ((3028, 3048), 'itertools.islice', 'islice', (['it', 'chk_size'], {}), '(it, chk_size)\n', (3034, 3048), False, 'from itertools import islice\n'), ((14655, 14675), 'itertools.islice', 'islice', (['it', 'chk_size'], {}), '(it, chk_size)\n', (14661, 14675), False, 'from itertools import islice\n'), ((14871, 14891), 'itertools.islice', 'islice', (['it', 'chk_size'], {}), '(it, chk_size)\n', (14877, 14891), False, 'from itertools import islice\n'), ((2088, 2133), 'numpy.reshape', 'reshape', (['a[:n - n % chk_size]', '(-1, chk_size)'], {}), '(a[:n - n % chk_size], (-1, chk_size))\n', (2095, 2133), False, 'from numpy import reshape\n'), ((3166, 3197), 'itertools.islice', 'islice', (['it', 'gap', '(gap + chk_size)'], {}), '(it, gap, gap + chk_size)\n', (3172, 3197), False, 'from itertools import islice\n'), ((15009, 15040), 'itertools.islice', 'islice', (['it', 'gap', '(gap + chk_size)'], {}), '(it, gap, gap + chk_size)\n', (15015, 15040), False, 'from itertools import islice\n'), ((2976, 2996), 'itertools.islice', 'islice', (['it', 'chk_step'], {}), '(it, chk_step)\n', (2982, 2996), False, 'from itertools import islice\n'), ((14776, 14796), 'itertools.islice', 'islice', (['it', 'chk_step'], {}), '(it, chk_step)\n', (14782, 14796), False, 'from itertools import islice\n')] |
import os
import time
import torch
import numpy as np
p = os.path.abspath('../..')
print(p)
import networks.networks as networks
def simple_resnet():
netname = "ResNet18"
net = networks.build(netname, 10)
print(net)
def all_thirdparty():
for netname in networks.KUANGLIU_NETS:
print(netname)
# Check if you can construct the network.
net = networks.build(netname, 10)
def all_probenets():
for netname in networks.PROBE_NETS:
print(netname)
# Check if you can construct the network.
net = networks.build(netname, 10)
def all_main_iotnets_manual():
import networks.iotnets.random_net_densenet as random_net_densenet
config = random_net_densenet.get_config("s1")
net_args = random_net_densenet.sample(config, 2)
net = random_net_densenet.get_instance(net_args, 12)
print(net)
import networks.iotnets.random_net_googlenet as random_net_googlenet
config = random_net_googlenet.get_config("s1")
net_args = random_net_googlenet.sample(config, 0)
net = random_net_googlenet.get_instance(net_args, 12)
print(net)
import networks.iotnets.random_net_mobilenetv2 as random_net_mobilenetv2
config = random_net_mobilenetv2.get_config("s1")
net_args = random_net_mobilenetv2.sample(config, 0)
net = random_net_mobilenetv2.get_instance(net_args, 12)
print(net)
import networks.iotnets.random_net_pnasnet as random_net_pnasnet
config = random_net_pnasnet.get_config("s1")
net_args = random_net_pnasnet.sample(config, 0)
net = random_net_pnasnet.get_instance(net_args, 12)
print(net)
import networks.iotnets.random_net_resnet as random_net_resnet
config = random_net_resnet.get_config("s1")
net_args = random_net_resnet.sample(config, 0)
net = random_net_resnet.get_instance(net_args, 12)
print(net)
import networks.iotnets.random_net_resnext as random_net_resnext
config = random_net_resnext.get_config("s1")
net_args = random_net_resnext.sample(config, 0)
net = random_net_resnext.get_instance(net_args, 12)
print(net)
# nicer way to achive the same.
def all_iotnets():
for spacename in networks.ARCH_SPACES:
print(spacename)
# Check if you can construct the network.
law_config = networks.get_law_config(spacename, "s1")
net_args = networks.sample_from_law(spacename, law_config, 0)
net = networks.build(spacename, 12, net_args)
print("OK")
def all_synthnets():
net_args = dict(f=1)
net = networks.build("SynthWide", 12, net_args)
net_args = dict(f=1, l=2)
net = networks.build("SynthWideAndLong", 12, net_args)
net_args = dict(f=1)
net = networks.build("SynthWide256", 12, net_args)
def latency_run(net, device = "cpu", bs=1, rep=10, verbose=True):
net = net.to(device)
net.eval()
ts = []
with torch.no_grad():
for r in range(rep):
x = torch.tensor(np.random.randn(bs, 3, 32, 32).astype(np.float32))
time_start = time.time()
y = net(x)
time_duration = time.time() - time_start
ts.append(time_duration)
ts = np.array(ts)
if verbose:
print("Times: {}".format(ts))
print("Avg: {:.1f} ms".format(np.mean(1000*ts)))
print("Std: {:.1f} ms".format(np.std(1000*ts)))
mid = int(rep/2)
print("Last Half Avg: {:.1f} ms".format(np.mean(1000*ts[mid:])))
print("Last Half Std: {:.1f} ms".format(np.std(1000*ts[mid:])))
print("")
return ts
def demo():
for arch in ['ResNet18', 'ResNet101', 'ResNet152']:
print("REFERENCE model: {}".format(arch))
net = networks.build(arch, 10, net_args=None)
ts = latency_run(net)
spacename = "random_net_mobilenetv2"
for law in ["s1", "s3", "s6"]:
print("OUR Constraint ARCH searchspace, based on {} with sampling law {}".format(spacename, law))
law_config = networks.get_law_config(spacename, law)
net_args = networks.sample_from_law(spacename, law_config, 0)
net = networks.build(spacename, 10, net_args)
ts = latency_run(net)
if __name__ == '__main__':
print("Create networks in PyTorch")
# simple_resnet()
# all_thirdparty()
# all_probenets()
all_main_iotnets_manual()
all_iotnets()
all_synthnets()
demo()
print("ALL DONE")
| [
"networks.iotnets.random_net_densenet.get_instance",
"networks.iotnets.random_net_resnet.get_config",
"networks.iotnets.random_net_googlenet.sample",
"numpy.mean",
"networks.networks.build",
"torch.no_grad",
"networks.networks.sample_from_law",
"os.path.abspath",
"numpy.random.randn",
"numpy.std",... | [((59, 83), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (74, 83), False, 'import os\n'), ((189, 216), 'networks.networks.build', 'networks.build', (['netname', '(10)'], {}), '(netname, 10)\n', (203, 216), True, 'import networks.networks as networks\n'), ((710, 746), 'networks.iotnets.random_net_densenet.get_config', 'random_net_densenet.get_config', (['"""s1"""'], {}), "('s1')\n", (740, 746), True, 'import networks.iotnets.random_net_densenet as random_net_densenet\n'), ((762, 799), 'networks.iotnets.random_net_densenet.sample', 'random_net_densenet.sample', (['config', '(2)'], {}), '(config, 2)\n', (788, 799), True, 'import networks.iotnets.random_net_densenet as random_net_densenet\n'), ((810, 856), 'networks.iotnets.random_net_densenet.get_instance', 'random_net_densenet.get_instance', (['net_args', '(12)'], {}), '(net_args, 12)\n', (842, 856), True, 'import networks.iotnets.random_net_densenet as random_net_densenet\n'), ((960, 997), 'networks.iotnets.random_net_googlenet.get_config', 'random_net_googlenet.get_config', (['"""s1"""'], {}), "('s1')\n", (991, 997), True, 'import networks.iotnets.random_net_googlenet as random_net_googlenet\n'), ((1013, 1051), 'networks.iotnets.random_net_googlenet.sample', 'random_net_googlenet.sample', (['config', '(0)'], {}), '(config, 0)\n', (1040, 1051), True, 'import networks.iotnets.random_net_googlenet as random_net_googlenet\n'), ((1062, 1109), 'networks.iotnets.random_net_googlenet.get_instance', 'random_net_googlenet.get_instance', (['net_args', '(12)'], {}), '(net_args, 12)\n', (1095, 1109), True, 'import networks.iotnets.random_net_googlenet as random_net_googlenet\n'), ((1218, 1257), 'networks.iotnets.random_net_mobilenetv2.get_config', 'random_net_mobilenetv2.get_config', (['"""s1"""'], {}), "('s1')\n", (1251, 1257), True, 'import networks.iotnets.random_net_mobilenetv2 as random_net_mobilenetv2\n'), ((1273, 1313), 'networks.iotnets.random_net_mobilenetv2.sample', 'random_net_mobilenetv2.sample', (['config', '(0)'], {}), '(config, 0)\n', (1302, 1313), True, 'import networks.iotnets.random_net_mobilenetv2 as random_net_mobilenetv2\n'), ((1324, 1373), 'networks.iotnets.random_net_mobilenetv2.get_instance', 'random_net_mobilenetv2.get_instance', (['net_args', '(12)'], {}), '(net_args, 12)\n', (1359, 1373), True, 'import networks.iotnets.random_net_mobilenetv2 as random_net_mobilenetv2\n'), ((1473, 1508), 'networks.iotnets.random_net_pnasnet.get_config', 'random_net_pnasnet.get_config', (['"""s1"""'], {}), "('s1')\n", (1502, 1508), True, 'import networks.iotnets.random_net_pnasnet as random_net_pnasnet\n'), ((1524, 1560), 'networks.iotnets.random_net_pnasnet.sample', 'random_net_pnasnet.sample', (['config', '(0)'], {}), '(config, 0)\n', (1549, 1560), True, 'import networks.iotnets.random_net_pnasnet as random_net_pnasnet\n'), ((1571, 1616), 'networks.iotnets.random_net_pnasnet.get_instance', 'random_net_pnasnet.get_instance', (['net_args', '(12)'], {}), '(net_args, 12)\n', (1602, 1616), True, 'import networks.iotnets.random_net_pnasnet as random_net_pnasnet\n'), ((1714, 1748), 'networks.iotnets.random_net_resnet.get_config', 'random_net_resnet.get_config', (['"""s1"""'], {}), "('s1')\n", (1742, 1748), True, 'import networks.iotnets.random_net_resnet as random_net_resnet\n'), ((1764, 1799), 'networks.iotnets.random_net_resnet.sample', 'random_net_resnet.sample', (['config', '(0)'], {}), '(config, 0)\n', (1788, 1799), True, 'import networks.iotnets.random_net_resnet as random_net_resnet\n'), ((1810, 1854), 'networks.iotnets.random_net_resnet.get_instance', 'random_net_resnet.get_instance', (['net_args', '(12)'], {}), '(net_args, 12)\n', (1840, 1854), True, 'import networks.iotnets.random_net_resnet as random_net_resnet\n'), ((1954, 1989), 'networks.iotnets.random_net_resnext.get_config', 'random_net_resnext.get_config', (['"""s1"""'], {}), "('s1')\n", (1983, 1989), True, 'import networks.iotnets.random_net_resnext as random_net_resnext\n'), ((2005, 2041), 'networks.iotnets.random_net_resnext.sample', 'random_net_resnext.sample', (['config', '(0)'], {}), '(config, 0)\n', (2030, 2041), True, 'import networks.iotnets.random_net_resnext as random_net_resnext\n'), ((2052, 2097), 'networks.iotnets.random_net_resnext.get_instance', 'random_net_resnext.get_instance', (['net_args', '(12)'], {}), '(net_args, 12)\n', (2083, 2097), True, 'import networks.iotnets.random_net_resnext as random_net_resnext\n'), ((2544, 2585), 'networks.networks.build', 'networks.build', (['"""SynthWide"""', '(12)', 'net_args'], {}), "('SynthWide', 12, net_args)\n", (2558, 2585), True, 'import networks.networks as networks\n'), ((2627, 2675), 'networks.networks.build', 'networks.build', (['"""SynthWideAndLong"""', '(12)', 'net_args'], {}), "('SynthWideAndLong', 12, net_args)\n", (2641, 2675), True, 'import networks.networks as networks\n'), ((2712, 2756), 'networks.networks.build', 'networks.build', (['"""SynthWide256"""', '(12)', 'net_args'], {}), "('SynthWide256', 12, net_args)\n", (2726, 2756), True, 'import networks.networks as networks\n'), ((3174, 3186), 'numpy.array', 'np.array', (['ts'], {}), '(ts)\n', (3182, 3186), True, 'import numpy as np\n'), ((386, 413), 'networks.networks.build', 'networks.build', (['netname', '(10)'], {}), '(netname, 10)\n', (400, 413), True, 'import networks.networks as networks\n'), ((564, 591), 'networks.networks.build', 'networks.build', (['netname', '(10)'], {}), '(netname, 10)\n', (578, 591), True, 'import networks.networks as networks\n'), ((2305, 2345), 'networks.networks.get_law_config', 'networks.get_law_config', (['spacename', '"""s1"""'], {}), "(spacename, 's1')\n", (2328, 2345), True, 'import networks.networks as networks\n'), ((2365, 2415), 'networks.networks.sample_from_law', 'networks.sample_from_law', (['spacename', 'law_config', '(0)'], {}), '(spacename, law_config, 0)\n', (2389, 2415), True, 'import networks.networks as networks\n'), ((2430, 2469), 'networks.networks.build', 'networks.build', (['spacename', '(12)', 'net_args'], {}), '(spacename, 12, net_args)\n', (2444, 2469), True, 'import networks.networks as networks\n'), ((2888, 2903), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2901, 2903), False, 'import torch\n'), ((3692, 3731), 'networks.networks.build', 'networks.build', (['arch', '(10)'], {'net_args': 'None'}), '(arch, 10, net_args=None)\n', (3706, 3731), True, 'import networks.networks as networks\n'), ((3967, 4006), 'networks.networks.get_law_config', 'networks.get_law_config', (['spacename', 'law'], {}), '(spacename, law)\n', (3990, 4006), True, 'import networks.networks as networks\n'), ((4026, 4076), 'networks.networks.sample_from_law', 'networks.sample_from_law', (['spacename', 'law_config', '(0)'], {}), '(spacename, law_config, 0)\n', (4050, 4076), True, 'import networks.networks as networks\n'), ((4091, 4130), 'networks.networks.build', 'networks.build', (['spacename', '(10)', 'net_args'], {}), '(spacename, 10, net_args)\n', (4105, 4130), True, 'import networks.networks as networks\n'), ((3039, 3050), 'time.time', 'time.time', ([], {}), '()\n', (3048, 3050), False, 'import time\n'), ((3102, 3113), 'time.time', 'time.time', ([], {}), '()\n', (3111, 3113), False, 'import time\n'), ((3280, 3298), 'numpy.mean', 'np.mean', (['(1000 * ts)'], {}), '(1000 * ts)\n', (3287, 3298), True, 'import numpy as np\n'), ((3337, 3354), 'numpy.std', 'np.std', (['(1000 * ts)'], {}), '(1000 * ts)\n', (3343, 3354), True, 'import numpy as np\n'), ((3428, 3452), 'numpy.mean', 'np.mean', (['(1000 * ts[mid:])'], {}), '(1000 * ts[mid:])\n', (3435, 3452), True, 'import numpy as np\n'), ((3501, 3524), 'numpy.std', 'np.std', (['(1000 * ts[mid:])'], {}), '(1000 * ts[mid:])\n', (3507, 3524), True, 'import numpy as np\n'), ((2963, 2993), 'numpy.random.randn', 'np.random.randn', (['bs', '(3)', '(32)', '(32)'], {}), '(bs, 3, 32, 32)\n', (2978, 2993), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
''' Develop the vehicle simulation model, including the decision making module and motion planning module. '''
import torch
import numpy as np
import xlrd
from utils.Veh_dyn import vehicle_dyn
from utils.Det_crash import det_crash
from utils.Con_est import Collision_cond
__author__ = "<NAME>"
class Vehicle_S12:
''' Develop the vehicle simulation model under the IRM algorithm of EB, S1, or s2. '''
def __init__(self, Num_file, Num_ext, Veh_id, mass_ratio, age, belt, airbag, female, r_seed):
''' Define the initial parameters and vehicle dynamics. '''
# Obtain the parameters.
self.Num_file = Num_file
self.Num_ext = Num_ext
self.mass_ratio = mass_ratio
self.age = age
self.belt = belt
self.airbag = airbag
self.female = female
self.r_seed = r_seed
# Initialize the vehicle safety decision.
self.deci = 'None'
# Get the reconstructed real-world accident dynamics.
data_record = xlrd.open_workbook('para/HumanDriver_dynamics_example.xls').sheet_by_name('dynamics')
if Veh_id == 1:
self.x = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 0)[1:201])
self.y = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 1)[1:201])
self.theta = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 2)[1:201])
self.v_long = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 3)[1:201])
self.v_lat = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 4)[1:201])
self.v_long_dot = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 5)[1:201])
self.omega_r = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 6)[1:201])
self.wheel_anlge = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 7)[1:201])
elif Veh_id == 2:
self.x = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 0)[1:201])
self.y = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 1)[1:201])
self.theta = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 2)[1:201])
self.v_long = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 3)[1:201])
self.v_lat = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 4)[1:201])
self.v_long_dot = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 5)[1:201])
self.omega_r = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 6)[1:201])
self.wheel_anlge = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 7)[1:201])
def decision(self, Veh_id, time_i, ego_t, Veh_e_state, veh_param, Deci_set, model):
''' Develop the vehicle decision making module. '''
# Get the vehicle parameters.
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
# Obtain the ego vehicle's dynamics.
veh_state = (self.x[ego_t], self.y[ego_t], self.theta[ego_t], self.v_long[ego_t], self.v_lat[ego_t],
self.v_long_dot[ego_t], self.omega_r[ego_t], self.wheel_anlge[ego_t])
self.injury = []
ego_t_0 = np.max([ego_t - 1, 0])
# Use the enumeration method to get the optimal vehicle decision with minimal injuries.
for deci in Deci_set:
if deci != 'Record_trajectory':
x_abs, y_abs, veh_angle, v_long, _, _, _, _ = vehicle_dyn(veh_state, deci, Deci_set)
elif self.deci in ['None', 'Record_trajectory']:
x_abs, y_abs, veh_angle, v_long = self.x[ego_t:], self.y[ego_t:], self.theta[ego_t:], self.v_long[
ego_t:]
else:
break
Veh_x_old, Veh_y_old, Veh_angle_old = self.x[ego_t_0], self.y[ego_t_0], self.theta[ego_t_0]
# Perceive the surrounding vehicle's dynamics.
(Veh_e_x, Veh_e_y, Veh_e_angle, Veh_e_V, Veh_e_x_old, Veh_e_y_old, Veh_e_angle_old,
Time_Perc_temp) = Veh_e_state
x_abs = np.append(self.x[(ego_t - Time_Perc_temp):ego_t], x_abs)
y_abs = np.append(self.y[(ego_t - Time_Perc_temp):ego_t], y_abs)
veh_angle = np.append(self.theta[(ego_t - Time_Perc_temp):ego_t], veh_angle)
v_long = np.append(self.v_long[(ego_t - Time_Perc_temp):ego_t], v_long)
# Update the time steps in virtual-time domain.
for i in range(200 - time_i):
V1_state_ = (x_abs[i], y_abs[i], veh_angle[i], Veh_x_old, Veh_y_old, Veh_angle_old)
V2_state_ = (Veh_e_x[i], Veh_e_y[i], Veh_e_angle[i], Veh_e_x_old, Veh_e_y_old, Veh_e_angle_old)
V1_state = V1_state_ if Veh_id == 1 else V2_state_
V2_state = V1_state_ if Veh_id == 2 else V2_state_
# Check whether there is a crash at the time step i.
veh_striking_list = det_crash(veh_l, veh_w, V1_state, V2_state)
Veh_x_old, Veh_y_old, Veh_angle_old = x_abs[i], y_abs[i], veh_angle[i]
Veh_e_x_old, Veh_e_y_old, Veh_e_angle_old = Veh_e_x[i], Veh_e_y[i], Veh_e_angle[i]
# If crash happens, estimate the collision condition and predict occupant injury severity.
if veh_striking_list:
if Veh_id == 1:
V1_v = v_long[i]
V2_v = Veh_e_V[i]
delta_angle = Veh_e_angle[i] - veh_angle[i]
else:
V1_v = Veh_e_V[i]
V2_v = v_long[i]
delta_angle = veh_angle[i] - Veh_e_angle[i]
# Obtain delta-v based on the plane 2-DOF rigid-body collision model with momentum conservation.
delta_v1, delta_v2, delta_v_index = Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle,
veh_param)
# Process input valuables of the injury prediction model.
d_V = round([delta_v1, delta_v2][Veh_id - 1] * 3.6)
angle = np.rad2deg(delta_angle)
angle = angle if angle >= 0 else angle + 360
angle = round((angle + 2.5) // 5) * 5
angle = 0 if angle == 360 else angle
PoI_ego, PoI_oth = veh_striking_list[delta_v_index][Veh_id], veh_striking_list[delta_v_index][
3 - Veh_id]
model_input = torch.from_numpy(np.array([[d_V, angle, PoI_ego, PoI_oth, self.age, self.female,
self.belt, self.airbag, self.mass_ratio]])).float()
# Get human injury information using the data-driven injury prediction model.
pred = model(model_input).detach()
injury = torch.nn.functional.softmax(pred, dim=1).data.numpy()[0]
# Translate injury probability into OISS.
injury = (0 * injury[0] + 1.37 * injury[1] + 7.54 * injury[2] + 32.22 * injury[3]) / 32.22
self.injury.append(injury)
break
# If no crash happens, OISS will be recorded as zero.
if i == 200 - time_i - 1:
length = np.min([len(x_abs), len(Veh_e_x), len(y_abs), len(Veh_e_y)])
distance_min = np.min(
((x_abs[:length] - Veh_e_x[:length]) ** 2 + (y_abs[:length] - Veh_e_y[:length]) ** 2) ** 0.5)
self.injury.append(0 - distance_min / 10000)
if self.injury[-1] == 0:
if distance_min > 8:
break
# Get the optimal vehicle decision u* with minimal injuries.
self.deci = Deci_set[self.injury.index(min(self.injury))]
def trajectory(self, time_i, ego_t):
''' Develop the vehicle motion planning module. '''
veh_state = (self.x[ego_t], self.y[ego_t], self.theta[ego_t], self.v_long[ego_t], self.v_lat[ego_t],
self.v_long_dot[ego_t], self.omega_r[ego_t], self.wheel_anlge[ego_t])
# Make motion planning according to the optimal vehicle decision u*.
if self.deci != 'Record_trajectory':
self.x, self.y, self.theta, self.v_long, self.v_lat, self.v_long_dot, self.omega_r, self.wheel_anlge = \
vehicle_dyn(veh_state, self.deci, if_error=True, r_seed=self.r_seed + time_i)
else:
self.x, self.y, self.theta, self.v_long, self.v_lat, self.v_long_dot, self.omega_r, self.wheel_anlge = \
self.x[ego_t:], self.y[ego_t:], self.theta[ego_t:], self.v_long[ego_t:], self.v_lat[ego_t:], \
self.v_long_dot[ego_t:], self.omega_r[ego_t:], self.wheel_anlge[ego_t:]
def deci_EB(time_i, Veh_i, t_i, Veh_e_state, Veh_state):
''' Develop the vehicle decision making module that decides whether to activate emergency braking (EB). '''
(Veh_e_x, Veh_e_y, Veh_e_theta, Veh_e_v, _, _, _, Veh_e_t) = Veh_e_state
(Veh_x, Veh_y, Veh_theta, Veh_v) = Veh_state
flag_EB = True
x_rela = (Veh_e_x[Veh_e_t] - Veh_x) * np.cos(Veh_theta) + (Veh_e_y[Veh_e_t] - Veh_y) * np.sin(Veh_theta)
y_rela = (Veh_e_y[Veh_e_t] - Veh_y) * np.cos(Veh_theta) - (Veh_e_x[Veh_e_t] - Veh_x) * np.sin(Veh_theta)
t_rela = np.arctan(y_rela / x_rela) if x_rela >= 0 else (np.arctan(y_rela / x_rela) + np.pi)
V_rela = Veh_v - Veh_e_v[Veh_e_t] * np.cos(Veh_e_theta[Veh_e_t] - Veh_theta)
TTC = x_rela / V_rela
if 0 < x_rela < 50 and -45 < np.rad2deg(t_rela) < 45 and 0 < TTC <= 1.4:
Veh_i.deci = 'straight_dec-all'
Veh_i.trajectory(time_i, t_i)
t_i = 0
flag_EB = False
return t_i, flag_EB
class Vehicle_S3:
''' Develop the vehicle simulation model under the IRM algorithm of S3. '''
def __init__(self, Num_file, Num_ext, Veh_id, mass_ratio, age, belt, airbag, female, r_seed):
''' Define the initial parameters and vehicle dynamics. '''
# Obtain the relative parameters.
self.Num_file = Num_file
self.Num_ext = Num_ext
self.mass_ratio = mass_ratio
self.age = age
self.belt = belt
self.airbag = airbag
self.female = female
self.r_seed = r_seed
# Initialize the vehicle safety decision.
self.deci = 'None'
# Get the reconstructed real-world accident dynamics.
data_record = xlrd.open_workbook('para/HumanDriver_dynamics_example.xls').sheet_by_name('dynamics')
if Veh_id == 1:
self.x = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 0)[1:201])
self.y = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 1)[1:201])
self.theta = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 2)[1:201])
self.v_long = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 3)[1:201])
self.v_lat = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 4)[1:201])
self.v_long_dot = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 5)[1:201])
self.omega_r = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 6)[1:201])
self.wheel_anlge = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 7)[1:201])
elif Veh_id == 2:
self.x = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 0)[1:201])
self.y = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 1)[1:201])
self.theta = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 2)[1:201])
self.v_long = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 3)[1:201])
self.v_lat = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 4)[1:201])
self.v_long_dot = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 5)[1:201])
self.omega_r = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 6)[1:201])
self.wheel_anlge = np.array(data_record.row_values(72 * self.Num_file + 18 * self.Num_ext + 9 + 7)[1:201])
def deci_S3(flag_S3, time_i, t_t, Veh_1, Veh_2, veh_param, Deci_set, model, r_seed):
''' Develop the vehicle decision making module and motion planning module under the IRM algorithm of S3. '''
# Get the vehicle parameters.
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
# Obtain the vehicles' dynamics.
veh_1_state = (Veh_1.x[t_t], Veh_1.y[t_t], Veh_1.theta[t_t], Veh_1.v_long[t_t], Veh_1.v_lat[t_t],
Veh_1.v_long_dot[t_t], Veh_1.omega_r[t_t], Veh_1.wheel_anlge[t_t])
veh_2_state = (Veh_2.x[t_t], Veh_2.y[t_t], Veh_2.theta[t_t], Veh_2.v_long[t_t], Veh_2.v_lat[t_t],
Veh_2.v_long_dot[t_t], Veh_2.omega_r[t_t], Veh_2.wheel_anlge[t_t])
Injury1, Injury2 = [], []
Injury_mean = []
Deci_1, Deci_2 = [], []
t_t_0 = np.max([t_t - 1, 0])
# Use the enumeration method to get the optimal vehicle decision with minimal injuries.
for deci_1 in Deci_set:
if deci_1 != 'Record_trajectory':
x_1_abs, y_1_abs, veh_1_angle, v_1_long, _, _, _, _ = vehicle_dyn(veh_1_state, deci_1)
elif Veh_1.deci in ['None', 'Record_trajectory']:
x_1_abs, y_1_abs, veh_1_angle, v_1_long = Veh_1.x[t_t:], Veh_1.y[t_t:], Veh_1.theta[t_t:], Veh_1.v_long[
t_t:]
else:
break
for deci_2 in Deci_set:
if deci_2 != 'Record_trajectory':
x_2_abs, y_2_abs, veh_2_angle, v_2_long, _, _, _, _ = vehicle_dyn(veh_2_state, deci_2)
elif Veh_2.deci in ['None', 'Record_trajectory']:
x_2_abs, y_2_abs, veh_2_angle, v_2_long = Veh_2.x[t_t:], Veh_2.y[t_t:], Veh_2.theta[t_t:], Veh_2.v_long[
t_t:]
else:
break
Veh_1_x_old, Veh_1_y_old, Veh_1_angle_old = Veh_1.x[t_t_0], Veh_1.y[t_t_0], Veh_1.theta[t_t_0]
Veh_2_x_old, Veh_2_y_old, Veh_2_angle_old = Veh_2.x[t_t_0], Veh_2.y[t_t_0], Veh_2.theta[t_t_0]
# Update the time steps in virtual-time domain.
for i in range(200 - time_i):
V1_state = (x_1_abs[i], y_1_abs[i], veh_1_angle[i], Veh_1_x_old, Veh_1_y_old, Veh_1_angle_old)
V2_state = (x_2_abs[i], y_2_abs[i], veh_2_angle[i], Veh_2_x_old, Veh_2_y_old, Veh_2_angle_old)
# Check whether there is a crash at the time step i.
veh_striking_list = det_crash(veh_l, veh_w, V1_state, V2_state)
Veh_1_x_old, Veh_1_y_old, Veh_1_angle_old = x_1_abs[i], y_1_abs[i], veh_1_angle[i]
Veh_2_x_old, Veh_2_y_old, Veh_2_angle_old = x_2_abs[i], y_2_abs[i], veh_2_angle[i]
# If crash happens, estimate the collision condition and predict occupant injury severity.
if veh_striking_list:
delta_v1, delta_v2, delta_v_index = Collision_cond(veh_striking_list, v_1_long[i], v_2_long[i],
veh_2_angle[i] - veh_1_angle[i], veh_param)
d_V_list = [delta_v1, delta_v2]
angle_list = [np.rad2deg(veh_2_angle[i] - veh_1_angle[i]),
np.rad2deg(veh_1_angle[i] - veh_2_angle[i])]
PoI_list = [veh_striking_list[delta_v_index][1], veh_striking_list[delta_v_index][2]]
injury_list = [0, 0]
Veh_list = [Veh_1, Veh_2]
for num_i in range(2):
# Process input valuables of the injury prediction model.
d_V = round(d_V_list[num_i] * 3.6)
angle_i = angle_list[num_i] if angle_list[num_i] >= 0 else angle_list[num_i] + 360
angle_i = round((angle_i + 2.5) // 5) * 5
angle_i = 0 if angle_i == 360 else angle_i
veh_i = Veh_list[num_i]
model_input = torch.from_numpy(np.array([[d_V, angle_i, PoI_list[num_i], PoI_list[1 - num_i],
veh_i.age, veh_i.female, veh_i.belt, veh_i.airbag,
veh_i.mass_ratio, ]])).float()
# Get human injury information using the data-driven injury prediction model.
pred = model(model_input).detach()
injury = torch.nn.functional.softmax(pred, dim=1).data.numpy()[0]
# Translate injury probability into OISS.
injury = (0 * injury[0] + 1.37 * injury[1] + 7.54 * injury[2] + 32.22 * injury[3]) / 32.22
injury_list[num_i] = injury
Injury1.append(injury_list[0])
Injury2.append(injury_list[1])
Injury_mean.append((injury_list[0] + injury_list[1])/2)
Deci_1.append(deci_1)
Deci_2.append(deci_2)
break
# If no crash happens, OISS will be recorded as zero.
if i == 200 - time_i - 1:
length = np.min([len(x_1_abs), len(y_1_abs), len(x_2_abs), len(y_2_abs)])
distance_min = np.min(((np.array(x_1_abs[:length]) - np.array(x_2_abs[:length])) ** 2 +
(np.array(y_1_abs[:length]) - np.array(y_2_abs[:length])) ** 2) ** 0.5)
Injury1.append(0 - distance_min / 10000)
Injury2.append(0 - distance_min / 10000)
Injury_mean.append(0 - distance_min / 10000)
Deci_1.append(deci_1)
Deci_2.append(deci_2)
if Injury_mean[-1] <= 0:
if distance_min > 5:
break
if Injury_mean[-1] <= 0:
if distance_min > 5:
break
# Get the optimal vehicle decision u* with minimal injuries.
Veh_1.deci = Deci_1[Injury_mean.index(min(Injury_mean))]
Veh_2.deci = Deci_2[Injury_mean.index(min(Injury_mean))]
Veh_1.injury = Injury1[Injury_mean.index(min(Injury_mean))]
Veh_2.injury = Injury2[Injury_mean.index(min(Injury_mean))]
if Veh_1.deci != 'Record_trajectory':
Veh_1.x, Veh_1.y, Veh_1.theta, Veh_1.v_long, Veh_1.v_lat, Veh_1.v_long_dot, Veh_1.omega_r, Veh_1.wheel_anlge = \
vehicle_dyn(veh_1_state, Veh_1.deci, if_error=True, r_seed=r_seed + time_i)
else:
Veh_1.x, Veh_1.y, Veh_1.theta, Veh_1.v_long, Veh_1.v_lat, Veh_1.v_long_dot, Veh_1.omega_r, Veh_1.wheel_anlge = \
Veh_1.x[t_t:], Veh_1.y[t_t:], Veh_1.theta[t_t:], Veh_1.v_long[t_t:], Veh_1.v_lat[t_t:], \
Veh_1.v_long_dot[t_t:], Veh_1.omega_r[t_t:], Veh_1.wheel_anlge[t_t:]
if Veh_2.deci != 'Record_trajectory':
Veh_2.x, Veh_2.y, Veh_2.theta, Veh_2.v_long, Veh_2.v_lat, Veh_2.v_long_dot, Veh_2.omega_r, Veh_2.wheel_anlge = \
vehicle_dyn(veh_2_state, Veh_2.deci, if_error=True, r_seed=r_seed + time_i)
else:
Veh_2.x, Veh_2.y, Veh_2.theta, Veh_2.v_long, Veh_2.v_lat, Veh_2.v_long_dot, Veh_2.omega_r, Veh_2.wheel_anlge = \
Veh_2.x[t_t:], Veh_2.y[t_t:], Veh_2.theta[t_t:], Veh_2.v_long[t_t:], Veh_2.v_lat[t_t:], \
Veh_2.v_long_dot[t_t:], Veh_2.omega_r[t_t:], Veh_2.wheel_anlge[t_t:]
if min(Injury_mean) <= 0:
flag_S3 = False
return flag_S3
| [
"xlrd.open_workbook",
"utils.Con_est.Collision_cond",
"torch.nn.functional.softmax",
"numpy.rad2deg",
"numpy.append",
"numpy.max",
"numpy.sin",
"utils.Veh_dyn.vehicle_dyn",
"numpy.min",
"numpy.cos",
"utils.Det_crash.det_crash",
"numpy.array",
"numpy.arctan"
] | [((13651, 13671), 'numpy.max', 'np.max', (['[t_t - 1, 0]'], {}), '([t_t - 1, 0])\n', (13657, 13671), True, 'import numpy as np\n'), ((3505, 3527), 'numpy.max', 'np.max', (['[ego_t - 1, 0]'], {}), '([ego_t - 1, 0])\n', (3511, 3527), True, 'import numpy as np\n'), ((9802, 9828), 'numpy.arctan', 'np.arctan', (['(y_rela / x_rela)'], {}), '(y_rela / x_rela)\n', (9811, 9828), True, 'import numpy as np\n'), ((19402, 19477), 'utils.Veh_dyn.vehicle_dyn', 'vehicle_dyn', (['veh_1_state', 'Veh_1.deci'], {'if_error': '(True)', 'r_seed': '(r_seed + time_i)'}), '(veh_1_state, Veh_1.deci, if_error=True, r_seed=r_seed + time_i)\n', (19413, 19477), False, 'from utils.Veh_dyn import vehicle_dyn\n'), ((19967, 20042), 'utils.Veh_dyn.vehicle_dyn', 'vehicle_dyn', (['veh_2_state', 'Veh_2.deci'], {'if_error': '(True)', 'r_seed': '(r_seed + time_i)'}), '(veh_2_state, Veh_2.deci, if_error=True, r_seed=r_seed + time_i)\n', (19978, 20042), False, 'from utils.Veh_dyn import vehicle_dyn\n'), ((4451, 4505), 'numpy.append', 'np.append', (['self.x[ego_t - Time_Perc_temp:ego_t]', 'x_abs'], {}), '(self.x[ego_t - Time_Perc_temp:ego_t], x_abs)\n', (4460, 4505), True, 'import numpy as np\n'), ((4528, 4582), 'numpy.append', 'np.append', (['self.y[ego_t - Time_Perc_temp:ego_t]', 'y_abs'], {}), '(self.y[ego_t - Time_Perc_temp:ego_t], y_abs)\n', (4537, 4582), True, 'import numpy as np\n'), ((4609, 4671), 'numpy.append', 'np.append', (['self.theta[ego_t - Time_Perc_temp:ego_t]', 'veh_angle'], {}), '(self.theta[ego_t - Time_Perc_temp:ego_t], veh_angle)\n', (4618, 4671), True, 'import numpy as np\n'), ((4695, 4755), 'numpy.append', 'np.append', (['self.v_long[ego_t - Time_Perc_temp:ego_t]', 'v_long'], {}), '(self.v_long[ego_t - Time_Perc_temp:ego_t], v_long)\n', (4704, 4755), True, 'import numpy as np\n'), ((8846, 8923), 'utils.Veh_dyn.vehicle_dyn', 'vehicle_dyn', (['veh_state', 'self.deci'], {'if_error': '(True)', 'r_seed': '(self.r_seed + time_i)'}), '(veh_state, self.deci, if_error=True, r_seed=self.r_seed + time_i)\n', (8857, 8923), False, 'from utils.Veh_dyn import vehicle_dyn\n'), ((9613, 9630), 'numpy.cos', 'np.cos', (['Veh_theta'], {}), '(Veh_theta)\n', (9619, 9630), True, 'import numpy as np\n'), ((9662, 9679), 'numpy.sin', 'np.sin', (['Veh_theta'], {}), '(Veh_theta)\n', (9668, 9679), True, 'import numpy as np\n'), ((9722, 9739), 'numpy.cos', 'np.cos', (['Veh_theta'], {}), '(Veh_theta)\n', (9728, 9739), True, 'import numpy as np\n'), ((9771, 9788), 'numpy.sin', 'np.sin', (['Veh_theta'], {}), '(Veh_theta)\n', (9777, 9788), True, 'import numpy as np\n'), ((9850, 9876), 'numpy.arctan', 'np.arctan', (['(y_rela / x_rela)'], {}), '(y_rela / x_rela)\n', (9859, 9876), True, 'import numpy as np\n'), ((9926, 9966), 'numpy.cos', 'np.cos', (['(Veh_e_theta[Veh_e_t] - Veh_theta)'], {}), '(Veh_e_theta[Veh_e_t] - Veh_theta)\n', (9932, 9966), True, 'import numpy as np\n'), ((10026, 10044), 'numpy.rad2deg', 'np.rad2deg', (['t_rela'], {}), '(t_rela)\n', (10036, 10044), True, 'import numpy as np\n'), ((13901, 13933), 'utils.Veh_dyn.vehicle_dyn', 'vehicle_dyn', (['veh_1_state', 'deci_1'], {}), '(veh_1_state, deci_1)\n', (13912, 13933), False, 'from utils.Veh_dyn import vehicle_dyn\n'), ((1035, 1094), 'xlrd.open_workbook', 'xlrd.open_workbook', (['"""para/HumanDriver_dynamics_example.xls"""'], {}), "('para/HumanDriver_dynamics_example.xls')\n", (1053, 1094), False, 'import xlrd\n'), ((3761, 3799), 'utils.Veh_dyn.vehicle_dyn', 'vehicle_dyn', (['veh_state', 'deci', 'Deci_set'], {}), '(veh_state, deci, Deci_set)\n', (3772, 3799), False, 'from utils.Veh_dyn import vehicle_dyn\n'), ((5313, 5356), 'utils.Det_crash.det_crash', 'det_crash', (['veh_l', 'veh_w', 'V1_state', 'V2_state'], {}), '(veh_l, veh_w, V1_state, V2_state)\n', (5322, 5356), False, 'from utils.Det_crash import det_crash\n'), ((10922, 10981), 'xlrd.open_workbook', 'xlrd.open_workbook', (['"""para/HumanDriver_dynamics_example.xls"""'], {}), "('para/HumanDriver_dynamics_example.xls')\n", (10940, 10981), False, 'import xlrd\n'), ((14399, 14431), 'utils.Veh_dyn.vehicle_dyn', 'vehicle_dyn', (['veh_2_state', 'deci_2'], {}), '(veh_2_state, deci_2)\n', (14410, 14431), False, 'from utils.Veh_dyn import vehicle_dyn\n'), ((15414, 15457), 'utils.Det_crash.det_crash', 'det_crash', (['veh_l', 'veh_w', 'V1_state', 'V2_state'], {}), '(veh_l, veh_w, V1_state, V2_state)\n', (15423, 15457), False, 'from utils.Det_crash import det_crash\n'), ((6228, 6297), 'utils.Con_est.Collision_cond', 'Collision_cond', (['veh_striking_list', 'V1_v', 'V2_v', 'delta_angle', 'veh_param'], {}), '(veh_striking_list, V1_v, V2_v, delta_angle, veh_param)\n', (6242, 6297), False, 'from utils.Con_est import Collision_cond\n'), ((6548, 6571), 'numpy.rad2deg', 'np.rad2deg', (['delta_angle'], {}), '(delta_angle)\n', (6558, 6571), True, 'import numpy as np\n'), ((7859, 7963), 'numpy.min', 'np.min', (['(((x_abs[:length] - Veh_e_x[:length]) ** 2 + (y_abs[:length] - Veh_e_y[:\n length]) ** 2) ** 0.5)'], {}), '(((x_abs[:length] - Veh_e_x[:length]) ** 2 + (y_abs[:length] -\n Veh_e_y[:length]) ** 2) ** 0.5)\n', (7865, 7963), True, 'import numpy as np\n'), ((15859, 15966), 'utils.Con_est.Collision_cond', 'Collision_cond', (['veh_striking_list', 'v_1_long[i]', 'v_2_long[i]', '(veh_2_angle[i] - veh_1_angle[i])', 'veh_param'], {}), '(veh_striking_list, v_1_long[i], v_2_long[i], veh_2_angle[i] -\n veh_1_angle[i], veh_param)\n', (15873, 15966), False, 'from utils.Con_est import Collision_cond\n'), ((16121, 16164), 'numpy.rad2deg', 'np.rad2deg', (['(veh_2_angle[i] - veh_1_angle[i])'], {}), '(veh_2_angle[i] - veh_1_angle[i])\n', (16131, 16164), True, 'import numpy as np\n'), ((16200, 16243), 'numpy.rad2deg', 'np.rad2deg', (['(veh_1_angle[i] - veh_2_angle[i])'], {}), '(veh_1_angle[i] - veh_2_angle[i])\n', (16210, 16243), True, 'import numpy as np\n'), ((6955, 7065), 'numpy.array', 'np.array', (['[[d_V, angle, PoI_ego, PoI_oth, self.age, self.female, self.belt, self.\n airbag, self.mass_ratio]]'], {}), '([[d_V, angle, PoI_ego, PoI_oth, self.age, self.female, self.belt,\n self.airbag, self.mass_ratio]])\n', (6963, 7065), True, 'import numpy as np\n'), ((7316, 7356), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (7343, 7356), False, 'import torch\n'), ((16967, 17104), 'numpy.array', 'np.array', (['[[d_V, angle_i, PoI_list[num_i], PoI_list[1 - num_i], veh_i.age, veh_i.\n female, veh_i.belt, veh_i.airbag, veh_i.mass_ratio]]'], {}), '([[d_V, angle_i, PoI_list[num_i], PoI_list[1 - num_i], veh_i.age,\n veh_i.female, veh_i.belt, veh_i.airbag, veh_i.mass_ratio]])\n', (16975, 17104), True, 'import numpy as np\n'), ((17439, 17479), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (17466, 17479), False, 'import torch\n'), ((18271, 18297), 'numpy.array', 'np.array', (['x_1_abs[:length]'], {}), '(x_1_abs[:length])\n', (18279, 18297), True, 'import numpy as np\n'), ((18300, 18326), 'numpy.array', 'np.array', (['x_2_abs[:length]'], {}), '(x_2_abs[:length])\n', (18308, 18326), True, 'import numpy as np\n'), ((18379, 18405), 'numpy.array', 'np.array', (['y_1_abs[:length]'], {}), '(y_1_abs[:length])\n', (18387, 18405), True, 'import numpy as np\n'), ((18408, 18434), 'numpy.array', 'np.array', (['y_2_abs[:length]'], {}), '(y_2_abs[:length])\n', (18416, 18434), True, 'import numpy as np\n')] |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
# matplotlib.use('Agg')
font = {'size': 14}
matplotlib.rc('font', **font)
root = os.getcwd()
root = '../absorb_spec/'
t, fit_wl, err_wl = np.genfromtxt(root+'peak_wls.txt', skip_header=1,
unpack=True)
fig, ax = plt.subplots()
ax.errorbar(t, fit_wl, yerr=3*err_wl)
ax.grid(True)
ax.set_xlabel('Time (minutes)')
ax.set_ylabel('Peak wavelength (nm)')
plt.tight_layout()
fig.savefig(root+'peak_wls.png')
plt.show()
| [
"matplotlib.rc",
"matplotlib.pyplot.show",
"os.getcwd",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout"
] | [((124, 153), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (137, 153), False, 'import matplotlib\n'), ((162, 173), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (171, 173), False, 'import os\n'), ((219, 283), 'numpy.genfromtxt', 'np.genfromtxt', (["(root + 'peak_wls.txt')"], {'skip_header': '(1)', 'unpack': '(True)'}), "(root + 'peak_wls.txt', skip_header=1, unpack=True)\n", (232, 283), True, 'import numpy as np\n'), ((327, 341), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (339, 341), True, 'import matplotlib.pyplot as plt\n'), ((465, 483), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (481, 483), True, 'import matplotlib.pyplot as plt\n'), ((517, 527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (525, 527), True, 'import matplotlib.pyplot as plt\n')] |
import torch
from torchvision import datasets, transforms
import argparse
import numpy as np
from PIL import Image
import json
def argparse_train():
parser = argparse.ArgumentParser()
parser.add_argument("data_directory", help="set directory to get the data from")
parser.add_argument("--save_dir", help="set directory to save checkpoints", default = "./")
parser.add_argument("--arch", help="choose model architecture", default="densenet121")
parser.add_argument("--prob_dropout",type=float, help="choose dropout probability in hidden layer", default=0.3)
parser.add_argument("--learning_rate",type=float , help="choose learning rate", default=0.001)
parser.add_argument("--hidden_units", type=int, help="choose hidden units", default=512)
parser.add_argument("--epochs", type=int, help="choose epochs", default=10)
parser.add_argument('--gpu', action='store_true', default=False, dest='gpu', help='set gpu-training to True')
results = parser.parse_args()
print('data_directory = {!r}'.format(results.data_directory))
print('save_dir = {!r}'.format(results.save_dir))
print('arch = {!r}'.format(results.arch))
print('prob_dropout = {!r}'.format(results.prob_dropout))
print('learning_rate = {!r}'.format(results.learning_rate))
print('hidden_units = {!r}'.format(results.hidden_units))
print('epochs = {!r}'.format(results.epochs))
print('gpu_training = {!r}'.format(results.gpu))
return results
def argparse_predict():
parser = argparse.ArgumentParser()
parser.add_argument("image_path", help="set path to image for prediction")
parser.add_argument("checkpoint", help="set checkpoint to load the model from")
parser.add_argument("--top_k", type=int, help="return top K most likely classes", default=5)
parser.add_argument("--category_names", help="use a mapping of categories to real names", default=None)
parser.add_argument('--gpu', action='store_true', default=False, dest='gpu', help='set gpu-training to True')
results = parser.parse_args()
print('image_path = {!r}'.format(results.image_path))
print('checkpoint = {!r}'.format(results.checkpoint))
print('top_k = {!r}'.format(results.top_k))
print('category_names = {!r}'.format(results.category_names))
print('gpu_training = {!r}'.format(results.gpu))
return results
def load_data(data_directory):
train_dir = data_directory + '/train'
valid_dir = data_directory + '/valid'
test_dir = data_directory + '/test'
# Define transforms for the training, validation, and testing sets
data_transforms = {'train': transforms.Compose([transforms.Resize((250,250)),
transforms.RandomCrop((224,224)),
transforms.RandomRotation(20),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]),
'valid': transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]),
'test': transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]),
}
# Load the datasets with ImageFolder
image_datasets = {'train': datasets.ImageFolder(train_dir, transform = data_transforms['train']),
'valid': datasets.ImageFolder(train_dir, transform = data_transforms['valid']),
'test': datasets.ImageFolder(train_dir, transform = data_transforms['test'])
}
# Using the image datasets and the trainforms, define the dataloaders
dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size = 64, shuffle=True),
'valid': torch.utils.data.DataLoader(image_datasets['valid'], batch_size = 64, shuffle=True),
'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size = 64, shuffle=True),
}
return image_datasets, dataloaders
def preprocess_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Load image
image = Image.open(image_path)
# scale image
size = 256, 256
image.thumbnail(size)
# center crop
width, height = image.size # Get dimensions
new_width = 224
new_height = 224
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
image = image.crop((left, top, right, bottom))
np_image = np.array(image)
# Normalize
means = np.array([0.485, 0.456, 0.406])
stdev = np.array([0.229, 0.224, 0.225])
np_image = (np_image/255 - means)/stdev
image_transposed = np_image.transpose(2,0,1)
return image_transposed
def import_mapping(category_names):
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name | [
"torchvision.transforms.ColorJitter",
"json.load",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomRotation",
"PIL.Image.open",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.ToTensor",
"numpy.array",
"torchvision.transforms.Normalize",
"torch... | [((163, 188), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (186, 188), False, 'import argparse\n'), ((1552, 1577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1575, 1577), False, 'import argparse\n'), ((4738, 4760), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4748, 4760), False, 'from PIL import Image\n'), ((5158, 5173), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (5166, 5173), True, 'import numpy as np\n'), ((5207, 5238), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (5215, 5238), True, 'import numpy as np\n'), ((5251, 5282), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (5259, 5282), True, 'import numpy as np\n'), ((3793, 3860), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': "data_transforms['train']"}), "(train_dir, transform=data_transforms['train'])\n", (3813, 3860), False, 'from torchvision import datasets, transforms\n'), ((3895, 3962), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': "data_transforms['valid']"}), "(train_dir, transform=data_transforms['valid'])\n", (3915, 3962), False, 'from torchvision import datasets, transforms\n'), ((3996, 4062), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': "data_transforms['test']"}), "(train_dir, transform=data_transforms['test'])\n", (4016, 4062), False, 'from torchvision import datasets, transforms\n'), ((4191, 4277), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['train']"], {'batch_size': '(64)', 'shuffle': '(True)'}), "(image_datasets['train'], batch_size=64, shuffle\n =True)\n", (4218, 4277), False, 'import torch\n'), ((4304, 4390), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['valid']"], {'batch_size': '(64)', 'shuffle': '(True)'}), "(image_datasets['valid'], batch_size=64, shuffle\n =True)\n", (4331, 4390), False, 'import torch\n'), ((4416, 4501), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['test']"], {'batch_size': '(64)', 'shuffle': '(True)'}), "(image_datasets['test'], batch_size=64, shuffle=True\n )\n", (4443, 4501), False, 'import torch\n'), ((5520, 5532), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5529, 5532), False, 'import json\n'), ((2702, 2731), 'torchvision.transforms.Resize', 'transforms.Resize', (['(250, 250)'], {}), '((250, 250))\n', (2719, 2731), False, 'from torchvision import datasets, transforms\n'), ((2774, 2807), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224, 224)'], {}), '((224, 224))\n', (2795, 2807), False, 'from torchvision import datasets, transforms\n'), ((2850, 2879), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(20)'], {}), '(20)\n', (2875, 2879), False, 'from torchvision import datasets, transforms\n'), ((2923, 3000), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)', 'saturation': '(0.2)', 'hue': '(0.2)'}), '(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2)\n', (2945, 3000), False, 'from torchvision import datasets, transforms\n'), ((3044, 3065), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3063, 3065), False, 'from torchvision import datasets, transforms\n'), ((3109, 3175), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (3129, 3175), False, 'from torchvision import datasets, transforms\n'), ((3231, 3260), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (3248, 3260), False, 'from torchvision import datasets, transforms\n'), ((3303, 3324), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3322, 3324), False, 'from torchvision import datasets, transforms\n'), ((3368, 3434), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (3388, 3434), False, 'from torchvision import datasets, transforms\n'), ((3489, 3518), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (3506, 3518), False, 'from torchvision import datasets, transforms\n'), ((3561, 3582), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3580, 3582), False, 'from torchvision import datasets, transforms\n'), ((3626, 3692), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (3646, 3692), False, 'from torchvision import datasets, transforms\n')] |
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import argparse
import cv2
import cvlib as cv
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = ap.parse_args()
image = cv2.imread(args.image)
if image is None:
print("Could not read input image")
exit()
# model = load_model("gender_detection.model")
face, confidence = cv.detect_face(image)
# classes = ['man','woman']
for idx, f in enumerate(face):
(startX, startY) = f[0], f[1]
(endX, endY) = f[2], f[3]
cv2.rectangle(image, (startX,startY), (endX,endY), (0,255,0), 2)
face_crop = np.copy(image[startY:endY,startX:endX])
face_crop = cv2.resize(face_crop, (96,96))
face_crop = face_crop.astype("float") / 255.0
face_crop = img_to_array(face_crop)
face_crop = np.expand_dims(face_crop, axis=0)
# cv2.imwrite(args.image, face_crop)
# conf = model.predict(face_crop)[0]
# print(conf)
# print(classes)
# idx = np.argmax(conf)
# label = classes[idx]
# if label == 'man':
# cv2.imwrite('male_'+args.image,image)
# else:
# cv2.imwrite('female_'+args.image,image)
cv2.imwrite(args.image,image)
cv2.destroyAllWindows() | [
"cvlib.detect_face",
"argparse.ArgumentParser",
"numpy.copy",
"cv2.imwrite",
"numpy.expand_dims",
"cv2.imread",
"keras.preprocessing.image.img_to_array",
"cv2.rectangle",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((168, 193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (191, 193), False, 'import argparse\n'), ((302, 324), 'cv2.imread', 'cv2.imread', (['args.image'], {}), '(args.image)\n', (312, 324), False, 'import cv2\n'), ((463, 484), 'cvlib.detect_face', 'cv.detect_face', (['image'], {}), '(image)\n', (477, 484), True, 'import cvlib as cv\n'), ((1233, 1263), 'cv2.imwrite', 'cv2.imwrite', (['args.image', 'image'], {}), '(args.image, image)\n', (1244, 1263), False, 'import cv2\n'), ((1263, 1286), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1284, 1286), False, 'import cv2\n'), ((623, 691), 'cv2.rectangle', 'cv2.rectangle', (['image', '(startX, startY)', '(endX, endY)', '(0, 255, 0)', '(2)'], {}), '(image, (startX, startY), (endX, endY), (0, 255, 0), 2)\n', (636, 691), False, 'import cv2\n'), ((705, 745), 'numpy.copy', 'np.copy', (['image[startY:endY, startX:endX]'], {}), '(image[startY:endY, startX:endX])\n', (712, 745), True, 'import numpy as np\n'), ((762, 793), 'cv2.resize', 'cv2.resize', (['face_crop', '(96, 96)'], {}), '(face_crop, (96, 96))\n', (772, 793), False, 'import cv2\n'), ((859, 882), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['face_crop'], {}), '(face_crop)\n', (871, 882), False, 'from keras.preprocessing.image import img_to_array\n'), ((899, 932), 'numpy.expand_dims', 'np.expand_dims', (['face_crop'], {'axis': '(0)'}), '(face_crop, axis=0)\n', (913, 932), True, 'import numpy as np\n')] |
import logging
import time
import numpy as np
import quaternion
logging.basicConfig(level=logging.INFO)
def lookRotation(forward, up):
"""
Quaternion that rotates world to face Forward, while keeping orientation dictated by Up
See https://answers.unity.com/questions/467614/what-is-the-source-code-of-quaternionlookrotation.html
:type forward: np.array
:type up: np.array
"""
up /= np.linalg.norm(up)
vector = forward / np.linalg.norm(forward)
vector2 = np.cross(up, vector)
vector2 /= np.linalg.norm(vector2)
vector3 = np.cross(vector, vector2)
m00 = vector2[0]
m01 = vector2[1]
m02 = vector2[2]
m10 = vector3[0]
m11 = vector3[1]
m12 = vector3[2]
m20 = vector[0]
m21 = vector[1]
m22 = vector[2]
num8 = (m00 + m11) + m22
output = quaternion.quaternion()
if num8 > 0:
num = np.sqrt(num8 + 1)
output.w = num / 2
num = 0.5 / num
output.x = (m12 - m21) * num
output.y = (m20 - m02) * num
output.z = (m01 - m10) * num
elif m00 >= m11 and m00 >= m22:
num7 = np.sqrt((m00 + 1) - m11 - m22)
num4 = 0.5 / num7
output.x = num7 / 2
output.y = (m01 + m10) * num4
output.z = (m02 + m20) * num4
output.w = (m12 - m21) * num4
elif m11 > m22:
num6 = np.sqrt(m11 + 1 - m00 - m22)
num3 = 0.5 / num6
output.x = (m10 + m01) * num3
output.y = num6 / 2
output.z = (m21 + m12) * num3
output.w = (m20 - m02) * num3
else:
num5 = np.sqrt(m22 + 1 - m00 - m11)
num2 = 0.5 / num5
output.x = (m20 + m02) * num2
output.y = (m21 + m12) * num2
output.z = num5 / 2
output.w = (m01 - m10) * num2
return output
def custom_interpolate(value, in_min, in_max, out_min, out_max, clamp=False):
interpolated = (value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
if clamp:
return np.clip(interpolated, out_min, out_max)
else:
return interpolated
def custom_euler(q):
# h = np.arctan2(np.square(q.x) - np.square(q.y) - np.square(q.z) + np.square(q.w), 2 * (q.x * q.y + q.z * q.w))
# h = np.arctan2(2 * (q.x * q.y + q.z * q.w), np.square(q.x) - np.square(q.y) - np.square(q.z) + np.square(q.w))
# p = np.arcsin(np.clip(-2 * (q.x * q.z - q.y * q.w), -1, 1))
# r = np.arctan2(np.square(q.z) + np.square(q.w) - np.square(q.x) - np.square(q.y), 2 * (q.x * q.w + q.y * q.z))
# r = np.arctan2(2 * (q.x * q.w + q.y * q.z), np.square(q.z) + np.square(q.w) - np.square(q.x) - np.square(q.y))
h = np.arctan2(2 * (q.w * q.z + q.x * q.y), 1 - 2 * (np.square(q.y) + np.square(q.z)))
p = np.arcsin(2 * (q.w * q.y - q.z * q.x))
r = np.arctan2(2 * (q.w * q.x + q.y * q.z), 1 - 2 * (np.square(q.x) + np.square(q.z)))
if h < 0:
h += 2 * np.pi
return h, p, r
def custom_euler_to_quat(yaw, pitch, roll):
cy = np.cos(yaw * 0.5)
sy = np.sin(yaw * 0.5)
cp = np.cos(pitch * 0.5)
sp = np.sin(pitch * 0.5)
cr = np.cos(roll * 0.5)
sr = np.sin(roll * 0.5)
q = quaternion.quaternion()
q.w = cr * cp * cy + sr * sp * sy
q.x = sr * cp * cy - cr * sp * sy
q.y = cr * sp * cy + sr * cp * sy
q.z = cr * cp * sy - sr * sp * cy
return q
def process_samples(samples: np.array, desired_length):
logger = logging.getLogger('process_samples()')
benchmark = time.perf_counter()
def clock_that_step(description, benchmark):
logger.debug('{} took {:.0f} ms'.format(description.capitalize(), (time.perf_counter() - benchmark) * 1000))
return time.perf_counter()
if not len(samples) > 1:
raise AttributeError('Sample list is empty')
logger.debug('Normalizing:\n{0!r}'.format(samples))
# Strip redundant bearings
unique_bearings = [samples[0]]
for index, bearing in enumerate(samples):
if not index:
continue
if np.isclose(bearing[0], samples[index - 1, 0]) and np.isclose(bearing[1], samples[index - 1, 1]):
logger.debug('Discarding redundant point ({:.3f}, {:.3f})'.format(bearing[0], bearing[1]))
else:
unique_bearings.append(bearing)
samples = np.array(unique_bearings)
benchmark = clock_that_step('Stripping dupes', benchmark)
# Remap standardized bearings so gestures are the same size
yaw_min = min(samples[:, 0])
yaw_max = max(samples[:, 0])
pitch_min = min(samples[:, 1])
pitch_max = max(samples[:, 1])
magnitude = np.linalg.norm([yaw_max - yaw_min, pitch_max - pitch_min])
fudge_factor = 1 / 10
logger.debug('Yaw min: {:.3f} Pitch min: {:.3f} Yaw max: {:.3f} Pitch max: {:.3f} Trim length: {:.3f}'.format(
yaw_min, pitch_min, yaw_max, pitch_max, magnitude * fudge_factor))
early_crap_count = 0
for i in range(1, len(samples)):
if np.linalg.norm([samples[i, 0] - samples[0, 0],
samples[i, 1] - samples[0, 1]]) > magnitude * fudge_factor:
logger.debug('Done stripping leading points - ({:.3f}, {:.3f}) is far enough from start point '
'({:.3f}, {:.3f}). Had to be {:.3f} units away, and is {:.3f}.'.format(
samples[i, 0], samples[i, 1], samples[0, 0], samples[0, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[i, 0] - samples[0, 0],
samples[i, 1] - samples[0, 1]])))
break
else:
logger.debug('Stripping leading point ({:.3f}, {:.3f}) - too close to start point ({:.3f}, {:.3f}). '
'Must be {:.3f} units away, but is {:.3f}.'.format(
samples[i, 0], samples[i, 1], samples[0, 0], samples[0, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[i, 0] - samples[0, 0],
samples[i, 1] - samples[0, 1]])))
early_crap_count += 1
start_point = samples[0]
trimmed = samples[early_crap_count + 1:].tolist()
samples = np.array([start_point] + trimmed)
benchmark = clock_that_step('Trimming early slop', benchmark)
# logger.debug('Early crap stripped: {}'.format(samples))
late_crap_count = 0
for i in range(2, len(samples)):
if np.linalg.norm([samples[-i, 0] - samples[- 1, 0],
samples[-i, 1] - samples[- 1, 1]]) > magnitude * fudge_factor:
logger.debug('Done stripping trailing points - ({:.3f}, {:.3f}) is far enough from endpoint '
'({:.3f}, {:.3f}). Had to be {:.3f} units away, and is {:.3f}.'.format(
samples[-i, 0], samples[-i, 1], samples[-1, 0], samples[-1, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[-i, 0] - samples[- 1, 0],
samples[-i, 1] - samples[- 1, 1]])))
break
else:
logger.debug('Stripping trailing point ({:.3f}, {:.3f}) - too close to endpoint ({:.3f}, {:.3f}). '
'Must be {:.3f} units away, but is {:.3f}.'.format(
samples[-i, 0], samples[-i, 1], samples[-1, 0], samples[-1, 1],
magnitude * fudge_factor,
np.linalg.norm([samples[-i, 0] - samples[- 1, 0],
samples[-i, 1] - samples[- 1, 1]])))
late_crap_count += 1
if late_crap_count:
endpoint = samples[-1]
trimmed = samples[:(late_crap_count + 1) * -1].tolist()
samples = np.array(trimmed + [endpoint])
logger.debug('Late crap stripped: {}'.format(samples))
benchmark = clock_that_step('Trimming late slop', benchmark)
# Standardize bearings 'curve' to evenly-spaced points
cumulative_segment_lengths = [0]
for index, sample in enumerate(samples):
if index == 0:
continue
segment_length = np.linalg.norm([sample[0] - samples[index - 1][0], sample[1] - samples[index - 1][1]])
cumulative_segment_lengths.append(segment_length + cumulative_segment_lengths[index - 1])
logger.debug('Segment ending in point {} length {:.3f} Cumul: {:.3f}'.format(
index, segment_length, cumulative_segment_lengths[index]))
curve_length = cumulative_segment_lengths[-1]
target_segment_length = curve_length / (desired_length - 1)
benchmark = clock_that_step('Calculating segment lengths', benchmark)
# logger.debug(
# 'Segment lengths: {} - {} segments, {} points'.format(segment_lengths, len(segment_lengths), len(samples)))
logger.debug('Total length: {:.2f} Target segment length: {:.4f}'.format(curve_length, target_segment_length))
standardized_bearings = [samples[0]]
first_longer_sample = 0
for i in range(1, desired_length):
target_length = i * target_segment_length
logger.debug('Looking to place a point at {:.3f} units along curve'.format(target_length))
if cumulative_segment_lengths[first_longer_sample] > target_length:
logger.debug('Previous point at {:.3f} units along curve still works'.format(
cumulative_segment_lengths[first_longer_sample]))
else:
while cumulative_segment_lengths[first_longer_sample] < target_length \
and not np.isclose(cumulative_segment_lengths[first_longer_sample], target_length):
logger.debug(
'Cumulative length of {:.3f} is too short - advancing to segment ending at point {}'.format(
cumulative_segment_lengths[first_longer_sample], first_longer_sample))
first_longer_sample += 1
if first_longer_sample >= len(cumulative_segment_lengths):
raise AttributeError("Entire line isn't long enough?!")
low_point = samples[first_longer_sample - 1]
high_point = samples[first_longer_sample]
position_along_segment = ((target_length - cumulative_segment_lengths[first_longer_sample - 1]) /
(cumulative_segment_lengths[first_longer_sample]
- cumulative_segment_lengths[first_longer_sample - 1]))
standardized_point_x = low_point[0] + position_along_segment * (high_point[0] - low_point[0])
standardized_point_y = low_point[1] + position_along_segment * (high_point[1] - low_point[1])
standardized_point = [standardized_point_x, standardized_point_y]
logger.debug('Placed point {:.3f} units ({:.0f}%) along the {:.3f} line between {} and {} ==> {}'
.format(target_length - cumulative_segment_lengths[first_longer_sample - 1],
position_along_segment * 100,
cumulative_segment_lengths[first_longer_sample]
- cumulative_segment_lengths[first_longer_sample - 1],
low_point, high_point, standardized_point))
standardized_bearings.append(standardized_point)
logger.debug('Done interpolating. Scaling into 0-1 fractional dims')
benchmark = clock_that_step('Interpolation', benchmark)
# Move lowest and leftest points to the edge
standardized_bearings = [[y - yaw_min, p - pitch_min] for y, p in standardized_bearings]
# Rescale, preserving proportions
total_width = yaw_max - yaw_min
total_height = pitch_max - pitch_min
standardized_bearings = np.array([[custom_interpolate(y, 0, max(total_width, total_height), 0, 1),
custom_interpolate(p, 0, max(total_width, total_height), 0, 1)]
for y, p in standardized_bearings])
clock_that_step('Resizing', benchmark)
return standardized_bearings
def wrapped_delta(old, new):
delta = old - new
if delta > np.pi:
delta -= 2 * np.pi
elif delta < -np.pi:
delta += 2 * np.pi
return delta
def bearing_delta(old, new):
return np.array([wrapped_delta(old[0], new[0]),
wrapped_delta(old[1], new[1])])
# This is taken from https://github.com/pyserial/pyserial/issues/216#issuecomment-369414522
class ReadLine:
def __init__(self, s):
self.buf = bytearray()
self.s = s # Serial object
def readline(self):
timeout = self.s.timeout
self.s.timeout = 0.1
i = self.buf.find(b"\n")
if i >= 0:
r = self.buf[:i + 1]
self.buf = self.buf[i + 1:]
self.s.timeout = timeout
return r
while True:
i = max(1, min(2048, self.s.in_waiting))
data = self.s.read(i)
i = data.find(b"\n")
if i >= 0:
r = self.buf + data[:i + 1]
self.buf[0:] = data[i + 1:]
self.s.timeout = timeout
return r
else:
self.buf.extend(data)
| [
"logging.basicConfig",
"numpy.square",
"numpy.cross",
"numpy.arcsin",
"time.perf_counter",
"numpy.clip",
"numpy.isclose",
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.cos",
"quaternion.quaternion",
"logging.getLogger",
"numpy.sqrt"
] | [((66, 105), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (85, 105), False, 'import logging\n'), ((416, 434), 'numpy.linalg.norm', 'np.linalg.norm', (['up'], {}), '(up)\n', (430, 434), True, 'import numpy as np\n'), ((498, 518), 'numpy.cross', 'np.cross', (['up', 'vector'], {}), '(up, vector)\n', (506, 518), True, 'import numpy as np\n'), ((534, 557), 'numpy.linalg.norm', 'np.linalg.norm', (['vector2'], {}), '(vector2)\n', (548, 557), True, 'import numpy as np\n'), ((573, 598), 'numpy.cross', 'np.cross', (['vector', 'vector2'], {}), '(vector, vector2)\n', (581, 598), True, 'import numpy as np\n'), ((830, 853), 'quaternion.quaternion', 'quaternion.quaternion', ([], {}), '()\n', (851, 853), False, 'import quaternion\n'), ((2731, 2769), 'numpy.arcsin', 'np.arcsin', (['(2 * (q.w * q.y - q.z * q.x))'], {}), '(2 * (q.w * q.y - q.z * q.x))\n', (2740, 2769), True, 'import numpy as np\n'), ((2974, 2991), 'numpy.cos', 'np.cos', (['(yaw * 0.5)'], {}), '(yaw * 0.5)\n', (2980, 2991), True, 'import numpy as np\n'), ((3001, 3018), 'numpy.sin', 'np.sin', (['(yaw * 0.5)'], {}), '(yaw * 0.5)\n', (3007, 3018), True, 'import numpy as np\n'), ((3028, 3047), 'numpy.cos', 'np.cos', (['(pitch * 0.5)'], {}), '(pitch * 0.5)\n', (3034, 3047), True, 'import numpy as np\n'), ((3057, 3076), 'numpy.sin', 'np.sin', (['(pitch * 0.5)'], {}), '(pitch * 0.5)\n', (3063, 3076), True, 'import numpy as np\n'), ((3086, 3104), 'numpy.cos', 'np.cos', (['(roll * 0.5)'], {}), '(roll * 0.5)\n', (3092, 3104), True, 'import numpy as np\n'), ((3114, 3132), 'numpy.sin', 'np.sin', (['(roll * 0.5)'], {}), '(roll * 0.5)\n', (3120, 3132), True, 'import numpy as np\n'), ((3142, 3165), 'quaternion.quaternion', 'quaternion.quaternion', ([], {}), '()\n', (3163, 3165), False, 'import quaternion\n'), ((3403, 3441), 'logging.getLogger', 'logging.getLogger', (['"""process_samples()"""'], {}), "('process_samples()')\n", (3420, 3441), False, 'import logging\n'), ((3459, 3478), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3476, 3478), False, 'import time\n'), ((4262, 4287), 'numpy.array', 'np.array', (['unique_bearings'], {}), '(unique_bearings)\n', (4270, 4287), True, 'import numpy as np\n'), ((4570, 4628), 'numpy.linalg.norm', 'np.linalg.norm', (['[yaw_max - yaw_min, pitch_max - pitch_min]'], {}), '([yaw_max - yaw_min, pitch_max - pitch_min])\n', (4584, 4628), True, 'import numpy as np\n'), ((6111, 6144), 'numpy.array', 'np.array', (['([start_point] + trimmed)'], {}), '([start_point] + trimmed)\n', (6119, 6144), True, 'import numpy as np\n'), ((459, 482), 'numpy.linalg.norm', 'np.linalg.norm', (['forward'], {}), '(forward)\n', (473, 482), True, 'import numpy as np\n'), ((886, 903), 'numpy.sqrt', 'np.sqrt', (['(num8 + 1)'], {}), '(num8 + 1)\n', (893, 903), True, 'import numpy as np\n'), ((1996, 2035), 'numpy.clip', 'np.clip', (['interpolated', 'out_min', 'out_max'], {}), '(interpolated, out_min, out_max)\n', (2003, 2035), True, 'import numpy as np\n'), ((3661, 3680), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3678, 3680), False, 'import time\n'), ((7598, 7628), 'numpy.array', 'np.array', (['(trimmed + [endpoint])'], {}), '(trimmed + [endpoint])\n', (7606, 7628), True, 'import numpy as np\n'), ((7968, 8059), 'numpy.linalg.norm', 'np.linalg.norm', (['[sample[0] - samples[index - 1][0], sample[1] - samples[index - 1][1]]'], {}), '([sample[0] - samples[index - 1][0], sample[1] - samples[\n index - 1][1]])\n', (7982, 8059), True, 'import numpy as np\n'), ((1121, 1149), 'numpy.sqrt', 'np.sqrt', (['(m00 + 1 - m11 - m22)'], {}), '(m00 + 1 - m11 - m22)\n', (1128, 1149), True, 'import numpy as np\n'), ((3989, 4034), 'numpy.isclose', 'np.isclose', (['bearing[0]', 'samples[index - 1, 0]'], {}), '(bearing[0], samples[index - 1, 0])\n', (3999, 4034), True, 'import numpy as np\n'), ((4039, 4084), 'numpy.isclose', 'np.isclose', (['bearing[1]', 'samples[index - 1, 1]'], {}), '(bearing[1], samples[index - 1, 1])\n', (4049, 4084), True, 'import numpy as np\n'), ((4921, 4999), 'numpy.linalg.norm', 'np.linalg.norm', (['[samples[i, 0] - samples[0, 0], samples[i, 1] - samples[0, 1]]'], {}), '([samples[i, 0] - samples[0, 0], samples[i, 1] - samples[0, 1]])\n', (4935, 4999), True, 'import numpy as np\n'), ((6349, 6436), 'numpy.linalg.norm', 'np.linalg.norm', (['[samples[-i, 0] - samples[-1, 0], samples[-i, 1] - samples[-1, 1]]'], {}), '([samples[-i, 0] - samples[-1, 0], samples[-i, 1] - samples[-\n 1, 1]])\n', (6363, 6436), True, 'import numpy as np\n'), ((1357, 1385), 'numpy.sqrt', 'np.sqrt', (['(m11 + 1 - m00 - m22)'], {}), '(m11 + 1 - m00 - m22)\n', (1364, 1385), True, 'import numpy as np\n'), ((1581, 1609), 'numpy.sqrt', 'np.sqrt', (['(m22 + 1 - m00 - m11)'], {}), '(m22 + 1 - m00 - m11)\n', (1588, 1609), True, 'import numpy as np\n'), ((2689, 2703), 'numpy.square', 'np.square', (['q.y'], {}), '(q.y)\n', (2698, 2703), True, 'import numpy as np\n'), ((2706, 2720), 'numpy.square', 'np.square', (['q.z'], {}), '(q.z)\n', (2715, 2720), True, 'import numpy as np\n'), ((2827, 2841), 'numpy.square', 'np.square', (['q.x'], {}), '(q.x)\n', (2836, 2841), True, 'import numpy as np\n'), ((2844, 2858), 'numpy.square', 'np.square', (['q.z'], {}), '(q.z)\n', (2853, 2858), True, 'import numpy as np\n'), ((5394, 5472), 'numpy.linalg.norm', 'np.linalg.norm', (['[samples[i, 0] - samples[0, 0], samples[i, 1] - samples[0, 1]]'], {}), '([samples[i, 0] - samples[0, 0], samples[i, 1] - samples[0, 1]])\n', (5408, 5472), True, 'import numpy as np\n'), ((5864, 5942), 'numpy.linalg.norm', 'np.linalg.norm', (['[samples[i, 0] - samples[0, 0], samples[i, 1] - samples[0, 1]]'], {}), '([samples[i, 0] - samples[0, 0], samples[i, 1] - samples[0, 1]])\n', (5878, 5942), True, 'import numpy as np\n'), ((6830, 6917), 'numpy.linalg.norm', 'np.linalg.norm', (['[samples[-i, 0] - samples[-1, 0], samples[-i, 1] - samples[-1, 1]]'], {}), '([samples[-i, 0] - samples[-1, 0], samples[-i, 1] - samples[-\n 1, 1]])\n', (6844, 6917), True, 'import numpy as np\n'), ((7308, 7395), 'numpy.linalg.norm', 'np.linalg.norm', (['[samples[-i, 0] - samples[-1, 0], samples[-i, 1] - samples[-1, 1]]'], {}), '([samples[-i, 0] - samples[-1, 0], samples[-i, 1] - samples[-\n 1, 1]])\n', (7322, 7395), True, 'import numpy as np\n'), ((9375, 9449), 'numpy.isclose', 'np.isclose', (['cumulative_segment_lengths[first_longer_sample]', 'target_length'], {}), '(cumulative_segment_lengths[first_longer_sample], target_length)\n', (9385, 9449), True, 'import numpy as np\n'), ((3604, 3623), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3621, 3623), False, 'import time\n')] |
import tempfile
import argparse
import logging
import os
import pickle
import pprint
import time
import numpy as np
import yaml
import utils
import settings
import dataset_for_data_analysisv2 as dataset
import models
from collections import Counter
def get_inverse_dict(mydict):
inverse_dict = {}
for k in mydict.keys():
if mydict[k] in inverse_dict:
raise "Cannot Construct inverse dictionary, as function not one-one"
inverse_dict[mydict[k]] = k
return inverse_dict
def read_entity_names(path):
entity_names = {}
with open(path, "r", errors='ignore', encoding='ascii') as f:
lines = f.readlines()
for line in lines:
content_raw = line.split('\t')
content = [el.strip() for el in content_raw]
if content[0] in entity_names:
logging.warn('Duplicate Entity found %s in line %s' %
(content[0], line))
name = content[1]
wiki_id = content[2]
# entity_names[content[0]] = {"name": name, "wiki_id": wiki_id}
entity_names[content[0]] = name
return entity_names
parser = argparse.ArgumentParser()
parser.add_argument('--training_data_path',
help="Training data path (pkl file)", type=str,
default = '../logs/fb15k/sm_with_id.data.pkl')
parser.add_argument('--labelled_training_data_path',
help="Labelled Training data path (pkl file)", type=str,
default = '../logs/fb15k/sm_sup_train_with_id.pkl')
parser.add_argument(
'--val_data_path', help="Validation data path in the same format as training data", type=str, default='../logs/fb15k/test_hits1_single_label_sm.data.pkl.pkl')
parser.add_argument(
'--val_labels_path', help="Validation data Labels path for multi-label evaluation", type=str, default='../data/fb15k/test/test_hits_1_ordered_y.txt')
parser.add_argument(
'--train_labels_path', help="Training data Labels path for multi-label training", type=str, default='../logs/fb15k/sm_sup_train_multilabels.txt')
parser.add_argument('--batch_size', help='batch size',
type=int, default=256)
parser.add_argument('--exp_name', help='Experiment name',
type=str, default='data_analysis')
parser.add_argument(
'--output_path', help='Output path to store models, and logs', type=str, required=True)
parser.add_argument('--each_input_size',
help='Input size of each template', type=int, default=7)
parser.add_argument(
'--num_templates', help='number of templates excluding other', type=int, default=6)
parser.add_argument('--use_ids', help='Use embeddings of entity and relations while training',
action='store_true', default=False)
parser.add_argument('--mil', help='Use MIL model',
action='store_true', default=True)
parser.add_argument('--exclude_t_ids', nargs='+', type=int, required=False,default=[], help='List of templates to be excluded while making predictions')
parser.add_argument('--base_model_file',
help="Base model dump for loading embeddings", type=str, default='')
parser.add_argument('--supervision', help='possible values - un, semi, sup',
type=str, default='semi')
parser.add_argument('--default_value',help='default value of template score when it is undefined?', default = 0, type=float)
parser.add_argument('--exclude_default',help='should default value be excluded while computing stats?', default = 0, type=int)
parser.add_argument('--hidden_unit_list', nargs='*', type=int, required=False,default=[], help='number of hidden neurons in each layer')
parser.add_argument('--train_ml',help='should use multi label loss?', default = 1, type=int)
parser.add_argument('--eval_ml',help='should eval multi label ?', default = 1, type=int)
parser.add_argument('--out_file', help='output file where analysis is written',required = False,
type=str, default = None)
parser.add_argument('--expdir', help='load checkpoints and model from this dir',required = True,
type=str)
parser.add_argument('--checkpoint_file', help='load checkpoints and model from this file',required = True,
type=str)
#train_r0.125_p1_n-2.0_i4_k0.0_best_checkpoint.pth0
#expdir = 'temp/fb15k_ex25_sl_hul0'
args = parser.parse_args()
config = {}
config.update(vars(args))
args = utils.Map(config)
o2n , n2o = utils.get_template_id_maps(args.num_templates, args.exclude_t_ids)
args.o2n = o2n
args.n2o = n2o
for key in ['train_labels_path','val_labels_path']:
if args[key] == 'None':
args[key] = None
settings.set_settings(args)
#args.output_path = tempfile.TemporaryDirectory().name
os.makedirs(args.output_path)
train_loader, val_loader, labelled_train_loader = dataset.get_data_loaders(args)
model = models.select_model(args)
def SIDX(template_id):
return (3 + (template_id-1)*7)
def EIDX(template_id):
return (3 + (template_id-1)*7 + 7)
# for i in val_loader.dataset.raw_data[:,SIDX(6)]:
# print (i)
# print (val_loader.dataset.raw_data[:,SIDX(2)])
#my_score, max_score, similarity, rank, conditional_rank, mean, std
#tid = 1
#for i in val_loader.dataset.data[:,SIDX(tid):SIDX(tid)+7+1]:
# for j in range(7):
# print (str(i[j])+" ",end="")
# print ("")
# for ln, loader in zip(['TRAIN UN','VAL','TRAIN LAB'],[train_loader, val_loader, labelled_train_loader]):
# for i in range(1,7):
# print('{}, Temp: {}, Mean: {}, Max Mean: {}'.format(ln,i,loader.dataset.raw_data[:,SIDX(i)].mean(),loader.dataset.raw_data[:,SIDX(i)+1].mean()))
# for ln, loader in zip(['TRAIN UN','VAL','TRAIN LAB'],[train_loader, val_loader, labelled_train_loader]):
# for i in range(1,7):
# print('{}, Temp: {}, Mean: {}, Max Mean: {}'.format(ln,i,loader.dataset.data[:,SIDX(i)].mean(),loader.dataset.raw_data[:,SIDX(i)+1].mean()))
def printmat(mat,file=None):
print('\n'.join([','.join(map(str,x.ravel())) for x in mat]), file = file)
import os
import torch
d = {}
for i in range(1,6):
chp = os.path.join(args.expdir,'run_{}'.format(i),'train',args.checkpoint_file)
d[i] = torch.load(chp)
wt= 'mlp.0.0.weight'
bias = 'mlp.0.0.bias'
dp = 'others_template_parameters'
loglin = d[1]['model'][wt].shape == (1,7)
if args.out_file is None:
fh = None
else:
fh = open(args.out_file,'w')
#print model wts if only a log linear model
if loglin:
print("WEIGHTS in LOG LINEAR MODEL:",file =fh)
print('My Score, Max Score, Similarity, Rank, Conditional Rank, Mean, Std',file=fh)
printmat([d[i]['model'][wt].cpu().numpy() for i in range(1,6)],file =fh )
i = 5
loader = train_loader
model.load_state_dict(d[1]['model'])
sf = os.path.join(args.expdir,'run_{}'.format(i), 'train','stats')
stats = pickle.load(open(sf,'rb'))
print("",file = fh)
print("STATS in THE STATS FILE", file = fh)
print("MEAN", file = fh)
printmat(stats['mean'][0].reshape(6,7),file =fh)
print("STD", file = fh)
printmat(stats['std'][0].reshape(6,7), file = fh)
meanmat = loader.dataset.stats['mean'][0].reshape(6,7)
stdmat = loader.dataset.stats['std'][0].reshape(6,7)
assert (meanmat == stats['mean'][0].reshape(6,7)).all()
assert (stdmat == stats['std'][0].reshape(6,7)).all()
"""
for ln, loader in zip(['TRAIN UN','VAL','TRAIN LAB'],[train_loader, val_loader, labelled_train_loader]):
print(ln, loader.dataset.data.mean(axis=0)[3:-1].reshape(6,7))
for ln, loader in zip(['TRAIN UN','TEST','TRAIN LAB'],[train_loader, val_loader, labelled_train_loader]):
print(ln, (d[1]['model'][wt].expand(6,7)*torch.tensor(loader.dataset.data.mean(axis=0)[3:-1].reshape(6,7)).float().cuda()).sum(dim=1))
data_indx = 10
ln = 'TEST'
loader = val_loader
print(ln, (d[1]['model'][wt].expand(6,7)*torch.tensor(loader.dataset.data[data_indx][3:-1].reshape(6,7)).float().cuda()).sum(dim=1))
loader.dataset.data[data_indx][3:-1].reshape(6,7)
"""
ln = 'TEST'
loader = val_loader
### Avg normalized features
avg_features = loader.dataset.data[:,3:-1].mean(axis=0).reshape(args.num_templates,args.each_input_size)
avg_features = np.concatenate((d[1]['model'][dp].cpu().numpy().reshape(1,args.each_input_size), avg_features),axis=0)
print("\nAVG FEATURE", file = fh)
print('My Score, Max Score, Similarity, Rank, Conditional Rank, Mean, Std',file=fh)
printmat(avg_features,file =fh)
if loglin:
score_of_avg_features = d[1]['model'][wt].expand(avg_features.shape).cpu().numpy()*avg_features
#score_of_avg_features = np.concatenate((score_of_avg_features,d[1]['model'][dp].cpu().numpy().reshape(1,7)),axis=0)
score_of_avg_features = np.concatenate((score_of_avg_features,np.repeat(d[1]['model'][bias].cpu().numpy(), score_of_avg_features.shape[0]).reshape(score_of_avg_features.shape[0],1)), axis = 1)
score_of_avg_features = np.concatenate((score_of_avg_features,score_of_avg_features.sum(axis=1).reshape(-1,1)), axis=1)
print("For LOG LINEAR MODEL: AVG FEATURE * WT, BIAS, Total", file = fh)
print('My Score, Max Score, Similarity, Rank, Conditional Rank, Mean, Std, Bias, Total',file=fh)
printmat(score_of_avg_features, file = fh)
Y = None
if loader.dataset.Y is not None:
Y = np.array(loader.dataset.Y)
model = model.cuda()
if Y is not None:
with torch.no_grad():
model.eval()
for template_id in range(0,args.num_templates+1):
if template_id not in args.exclude_t_ids:
new_template_id = args.o2n[template_id]
pos_ind = (Y[:,new_template_id] == 1)
neg_ind = (Y[:,new_template_id] != 1)
pos_data = torch.FloatTensor(loader.dataset.data[pos_ind,3:-1]).cuda()
neg_data = torch.FloatTensor(loader.dataset.data[neg_ind,3:-1]).cuda()
print("TEMPLATE {}".format(template_id), file = fh)
if pos_data.size(0) > 0:
pos_score = model(pos_data)
pos_prediction = pos_score.max(dim=1)[1]
pos_counter = Counter(pos_prediction.detach().cpu().numpy())
print("Total Positives: {}. Predicted as: ".format(pos_data.size(0)), file = fh)
printmat(np.array([[pos_counter[args.o2n[x]] for x in range(0,args.num_templates+1)]]),file =fh)
avg_pos_score = pos_score.mean(dim=0)
avg_pos_feature = pos_data.mean(dim=0)
pos_score_avg_feat = model(avg_pos_feature.unsqueeze(0))
print("AVG score POSITIVE ", file = fh)
printmat(avg_pos_score.detach().cpu().numpy()[args.o2n].reshape(1,-1),file = fh)
print("score At Avg Features POSITIVE ", file = fh)
printmat(pos_score_avg_feat.detach().cpu().numpy()[0][args.o2n].reshape(1,-1),file = fh)
print("AVG FEATUREs when POSITIVE", file = fh)
printmat(avg_pos_feature.reshape(args.num_templates,args.each_input_size).cpu().numpy(), file= fh)
if neg_data.size(0) > 0:
neg_score = model(neg_data)
neg_prediction = neg_score.max(dim=1)[1]
neg_counter = Counter(neg_prediction.detach().cpu().numpy())
avg_neg_score = neg_score.mean(dim=0)
avg_neg_feature = neg_data.mean(dim=0)
neg_score_avg_feat = model(avg_neg_feature.unsqueeze(0))
print("Total Negatives: {}. Predicted as: ".format(neg_data.size(0)), file = fh)
printmat(np.array([[neg_counter[args.o2n[x]] for x in range(0,args.num_templates+1)]]),file =fh)
print("AVG score NEGATIVE ", file = fh)
printmat(avg_neg_score.detach().cpu().numpy()[args.o2n].reshape(1,-1),file = fh)
print("score At Avg Features NEGATIVE ", file = fh)
printmat(neg_score_avg_feat.detach().cpu().numpy()[0][args.o2n].reshape(1,-1),file = fh)
print("AVG FEATUREs when NEGATIVE", file = fh)
printmat(avg_neg_feature.reshape(args.num_templates, args.each_input_size).cpu().numpy(), file= fh)
#Positive prediction(TEST) average features
print("AVG FEATUREs when POSITIVE PREDICTIONS TEST", file = fh)
pos_data = torch.FloatTensor(val_loader.dataset.data[:,3:-1]).cuda()
pos_score = model(pos_data)
pos_prediction = pos_score.max(dim=1)[1]
bool_arr = (pos_prediction == new_template_id)
printmat((((pos_data*bool_arr.reshape(pos_data.size(0),1).float()).mean(dim=0)).reshape(args.num_templates,args.each_input_size)).cpu().numpy(), file=fh)
#Negative prediction(TEST) average features
print("AVG FEATUREs when NEGATIVE PREDICTIONS TEST", file = fh)
pos_data = torch.FloatTensor(val_loader.dataset.data[:,3:-1]).cuda()
pos_score = model(pos_data)
pos_prediction = pos_score.max(dim=1)[1]
bool_arr = (pos_prediction != new_template_id)
printmat((((pos_data*bool_arr.reshape(pos_data.size(0),1).float()).mean(dim=0)).reshape(args.num_templates,args.each_input_size)).cpu().numpy(), file=fh)
fh.close()
distdump = pickle.load(open('../dumps/fb15k_distmult_dump_norm.pkl','rb'))
entity_names = read_entity_names('../../data/fb15k/mid2wikipedia_cleaned.tsv')
import sys
sys.path.insert(0,'..')
import models as dm
reload(dm)
base_model = dm.TypedDM('../dumps/fb15k_distmult_dump_norm.pkl')
#analyze a datapoint
model.eval()
fh = None
ind = 36
dat = loader.dataset.data[ind:(ind+1)]
pos_data = torch.FloatTensor(loader.dataset.data[ind:(ind+1),3:-1]).cuda()
pos_score = model(pos_data)
pos_prediction = pos_score.max(dim=1)[1]
pos_counter = Counter(pos_prediction.detach().cpu().numpy())
print("Total Positives: {}. Predicted as: ".format(pos_data.size(0)), file = fh)
printmat(np.array([[pos_counter[args.o2n[x]] for x in range(0,args.num_templates+1)]]),file =fh)
avg_pos_score = pos_score.mean(dim=0)
avg_pos_feature = pos_data.mean(dim=0)
pos_score_avg_feat = model(avg_pos_feature.unsqueeze(0))
print("AVG score POSITIVE ", file = fh)
printmat(avg_pos_score.detach().cpu().numpy()[args.o2n].reshape(1,-1),file = fh)
print("score At Avg Features POSITIVE ", file = fh)
printmat(pos_score_avg_feat.detach().cpu().numpy()[0][args.o2n].reshape(1,-1),file = fh)
print("AVG FEATUREs when POSITIVE", file = fh)
printmat(avg_pos_feature.reshape(args.num_templates,args.each_input_size).cpu().numpy(), file= fh)
eti_inv = get_inverse_dict(distdump['entity_to_id'])
rti_inv = get_inverse_dict(distdump['relation_to_id'])
entity_names[eti_inv[dat[0,0]]]
entity_names[eti_inv[dat[0,2]]]
import sys
sys.path.insert(0,'..')
import models as dm
reload(dm)
base_model = dm.TypedDM('../dumps/fb15k_distmult_dump_norm.pkl')
template3 = pickle.load(open('../logs/fb15k/3.pkl','rb'))
template3['table'][(13864,138)]
"""
loader = train_loader
valid_indx = {}
for i in range(1,7):
valid_indx[i] = np.logical_not(loader.dataset.raw_data[:,SIDX(i)] == 0)
def_stats = []
for i in range(1,7):
def_stats.append(loader.dataset.raw_data[valid_indx[i],SIDX(i):EIDX(i)].min(axis=0))
printmat(def_stats)
"""
| [
"os.makedirs",
"argparse.ArgumentParser",
"dataset_for_data_analysisv2.get_data_loaders",
"utils.Map",
"logging.warn",
"torch.load",
"utils.get_template_id_maps",
"sys.path.insert",
"torch.FloatTensor",
"numpy.array",
"settings.set_settings",
"models.select_model",
"models.TypedDM",
"torch... | [((1173, 1198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1196, 1198), False, 'import argparse\n'), ((4495, 4512), 'utils.Map', 'utils.Map', (['config'], {}), '(config)\n', (4504, 4512), False, 'import utils\n'), ((4525, 4591), 'utils.get_template_id_maps', 'utils.get_template_id_maps', (['args.num_templates', 'args.exclude_t_ids'], {}), '(args.num_templates, args.exclude_t_ids)\n', (4551, 4591), False, 'import utils\n'), ((4735, 4762), 'settings.set_settings', 'settings.set_settings', (['args'], {}), '(args)\n', (4756, 4762), False, 'import settings\n'), ((4819, 4848), 'os.makedirs', 'os.makedirs', (['args.output_path'], {}), '(args.output_path)\n', (4830, 4848), False, 'import os\n'), ((4901, 4931), 'dataset_for_data_analysisv2.get_data_loaders', 'dataset.get_data_loaders', (['args'], {}), '(args)\n', (4925, 4931), True, 'import dataset_for_data_analysisv2 as dataset\n'), ((4941, 4966), 'models.select_model', 'models.select_model', (['args'], {}), '(args)\n', (4960, 4966), False, 'import models\n'), ((13592, 13616), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (13607, 13616), False, 'import sys\n'), ((13660, 13711), 'models.TypedDM', 'dm.TypedDM', (['"""../dumps/fb15k_distmult_dump_norm.pkl"""'], {}), "('../dumps/fb15k_distmult_dump_norm.pkl')\n", (13670, 13711), True, 'import models as dm\n'), ((14925, 14949), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (14940, 14949), False, 'import sys\n'), ((14993, 15044), 'models.TypedDM', 'dm.TypedDM', (['"""../dumps/fb15k_distmult_dump_norm.pkl"""'], {}), "('../dumps/fb15k_distmult_dump_norm.pkl')\n", (15003, 15044), True, 'import models as dm\n'), ((6255, 6270), 'torch.load', 'torch.load', (['chp'], {}), '(chp)\n', (6265, 6270), False, 'import torch\n'), ((9273, 9299), 'numpy.array', 'np.array', (['loader.dataset.Y'], {}), '(loader.dataset.Y)\n', (9281, 9299), True, 'import numpy as np\n'), ((9349, 9364), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9362, 9364), False, 'import torch\n'), ((13818, 13875), 'torch.FloatTensor', 'torch.FloatTensor', (['loader.dataset.data[ind:ind + 1, 3:-1]'], {}), '(loader.dataset.data[ind:ind + 1, 3:-1])\n', (13835, 13875), False, 'import torch\n'), ((846, 919), 'logging.warn', 'logging.warn', (["('Duplicate Entity found %s in line %s' % (content[0], line))"], {}), "('Duplicate Entity found %s in line %s' % (content[0], line))\n", (858, 919), False, 'import logging\n'), ((9691, 9744), 'torch.FloatTensor', 'torch.FloatTensor', (['loader.dataset.data[pos_ind, 3:-1]'], {}), '(loader.dataset.data[pos_ind, 3:-1])\n', (9708, 9744), False, 'import torch\n'), ((9778, 9831), 'torch.FloatTensor', 'torch.FloatTensor', (['loader.dataset.data[neg_ind, 3:-1]'], {}), '(loader.dataset.data[neg_ind, 3:-1])\n', (9795, 9831), False, 'import torch\n'), ((12453, 12504), 'torch.FloatTensor', 'torch.FloatTensor', (['val_loader.dataset.data[:, 3:-1]'], {}), '(val_loader.dataset.data[:, 3:-1])\n', (12470, 12504), False, 'import torch\n'), ((13017, 13068), 'torch.FloatTensor', 'torch.FloatTensor', (['val_loader.dataset.data[:, 3:-1]'], {}), '(val_loader.dataset.data[:, 3:-1])\n', (13034, 13068), False, 'import torch\n')] |
import warnings
import scipy.ndimage
import numba
import numpy as np
def add_noise(image, model, mask=None):
"""
Adds noise to a simulated ccd image according to the noise model
Parameters
----------
image : ndarray
image data. Noise is only added to image values > 0
model : function
noise model with intensity as input argument
mask : ndarray
noise is only added to masked area
Returns
-------
res : ndarray
"""
image = np.array(image, copy=False)
noisy = model(image)
if mask is not None:
mask = np.logical_not(np.logical_and(np.isfinite(image), mask))
noisy[mask] = image[mask]
return noisy
def filter(image, sigma):
"""
Filters image with a 2d gaussian kernel
Parameters
----------
image : ndarray
image data. Noise is only added to image values > 0
sigma : float
variance of gauss kernel
Returns
-------
res : ndarray
"""
if sigma == 0:
return image
return scipy.ndimage.filters.gaussian_filter(image, sigma)
def filter2d(image, sigma):
"""
Filters image with a 2d gaussian kernel
Parameters
----------
image : ndarray
image data. Noise is only added to image values > 0
sigma : float
variance of gauss kernel
Returns
-------
res : ndarray
Hints
-----
the 2d filter function can be faster then the 1d function.
"""
if sigma == 0:
return image
return np.fft.ifft2(
scipy.ndimage.fourier_gaussian(np.fft.fft2(image), sigma=sigma)).real
@numba.njit(cache=True)
def resample(image, scale):
"""
Resamples image according to scale.
Resampling is done according to ccd pixels
Parameters
----------
image : ndarray
image data. Noise is only added to image values > 0
scale : float
scale value for image transformation
Returns
-------
res : ndarray
"""
sx = int(round(image.shape[0] * scale))
sy = int(round(image.shape[1] * scale))
data = np.zeros((sx, sy), dtype=image.dtype)
divisor = np.zeros((sx, sy), dtype=np.int64)
for i in range(image.shape[0]):
ii = int(np.floor((i + 0.5) * scale))
if ii >= data.shape[0]:
continue
for j in range(image.shape[1]):
jj = int(np.floor((j + 0.5) * scale))
if jj >= data.shape[1]:
continue
divisor[ii, jj] += 1
data[ii, jj] += image[i, j]
data = np.divide(data, divisor)
return data
def resize(image, scale, order=1):
"""
Resizes image according to scale
Parameters
----------
image : ndarray
image data. Noise is only added to image values > 0
sigma : float
variance of gauss kernel
Returns
-------
res : ndarray
"""
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'From scipy 0.13.0, the output shape of zoom\(\) is calculated '
r'with round\(\) instead of int\(\) - for these inputs the size '
r'of the returned array has changed.')
return scipy.ndimage.zoom(image, scale, order=order)
def filter_resize(image, sigma, scale, order=1):
"""
Applies filter then resize
Parameters
----------
image : ndarray
image data. Noise is only added to image values > 0
sigma : float
variance of gauss kernel
scale : float
scale value for image transformation
Returns
-------
res : ndarray
"""
return resize(filter(image, sigma / scale), scale, order)
def filter_resample(image, sigma, scale):
"""
Applies filter then resample
Parameters
----------
image : ndarray
image data. Noise is only added to image values > 0
sigma : float
variance of gauss kernel
scale : float
scale value for image transformation
Returns
-------
res : ndarray
"""
return resample(filter(image, sigma / scale), scale)
| [
"numpy.divide",
"warnings.filterwarnings",
"numpy.floor",
"numba.njit",
"numpy.zeros",
"numpy.isfinite",
"numpy.array",
"warnings.catch_warnings",
"numpy.fft.fft2"
] | [((1628, 1650), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (1638, 1650), False, 'import numba\n'), ((500, 527), 'numpy.array', 'np.array', (['image'], {'copy': '(False)'}), '(image, copy=False)\n', (508, 527), True, 'import numpy as np\n'), ((2099, 2136), 'numpy.zeros', 'np.zeros', (['(sx, sy)'], {'dtype': 'image.dtype'}), '((sx, sy), dtype=image.dtype)\n', (2107, 2136), True, 'import numpy as np\n'), ((2151, 2185), 'numpy.zeros', 'np.zeros', (['(sx, sy)'], {'dtype': 'np.int64'}), '((sx, sy), dtype=np.int64)\n', (2159, 2185), True, 'import numpy as np\n'), ((2557, 2581), 'numpy.divide', 'np.divide', (['data', 'divisor'], {}), '(data, divisor)\n', (2566, 2581), True, 'import numpy as np\n'), ((2904, 2929), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2927, 2929), False, 'import warnings\n'), ((2939, 3149), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""From scipy 0.13.0, the output shape of zoom\\\\(\\\\) is calculated with round\\\\(\\\\) instead of int\\\\(\\\\) - for these inputs the size of the returned array has changed."""'], {}), "('ignore',\n 'From scipy 0.13.0, the output shape of zoom\\\\(\\\\) is calculated with round\\\\(\\\\) instead of int\\\\(\\\\) - for these inputs the size of the returned array has changed.'\n )\n", (2962, 3149), False, 'import warnings\n'), ((2240, 2267), 'numpy.floor', 'np.floor', (['((i + 0.5) * scale)'], {}), '((i + 0.5) * scale)\n', (2248, 2267), True, 'import numpy as np\n'), ((624, 642), 'numpy.isfinite', 'np.isfinite', (['image'], {}), '(image)\n', (635, 642), True, 'import numpy as np\n'), ((1586, 1604), 'numpy.fft.fft2', 'np.fft.fft2', (['image'], {}), '(image)\n', (1597, 1604), True, 'import numpy as np\n'), ((2383, 2410), 'numpy.floor', 'np.floor', (['((j + 0.5) * scale)'], {}), '((j + 0.5) * scale)\n', (2391, 2410), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 15:32:07 2019
A really simple implementation of Keras functional model
@author: kerem.ataman
"""
from numpy import genfromtxt
from numpy import array
from numpy import reshape
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import os
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return array(dataX), array(dataY)
train_data_percentage = 0.67
inputLocation = '741_train_x.csv'
labelLocation = '741_train_y.csv'
num_epochs = 100
lstm_state_size = 64
look_back = 32
# used to be 1 for some bizzare reason
batch_size = 32
x = genfromtxt(inputLocation, delimiter=',', skip_header = 1)
y = genfromtxt(os.getcwd()+'/'+labelLocation, delimiter=',', skip_header = 1)
num_features = x.shape[1]
num_labels = y.shape[1]
train_size = round(len(x) * train_data_percentage)
train_x = x[0:train_size,:]
train_y = y[0:train_size,:]
test_x = x[train_size:len(x),:]
test_y = y[train_size:len(x),:]
trainX, trainY = create_dataset(x, look_back)
# reshape input to be [samples, time steps, features]
trainX = reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX, testY = create_dataset(y, look_back)
testX = reshape(testX, (testX.shape[0], 1, testX.shape[1]))
visible = Input(shape=(num_features,look_back))
hidden1 = LSTM(lstm_state_size)(visible)
output = Dense(num_labels)(hidden1)
model = Model(inputs=visible, outputs=output)
model.compile(optimizer = 'sgd', loss='mse', metrics = ['accuracy'])
es = EarlyStopping(monitor='accuracy', restore_best_weights = True)
model.fit(trainX,trainY, epochs= num_epochs, callbacks = [es])
results = model.evaluate(test_x,test_y,batch_size)
#save model
model.save('functional_model_128.h5') | [
"tensorflow.keras.layers.Dense",
"os.getcwd",
"numpy.genfromtxt",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Input",
"numpy.reshape",
"numpy.array",
"tensorflow.keras.callbacks.EarlyStopping"
] | [((985, 1040), 'numpy.genfromtxt', 'genfromtxt', (['inputLocation'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(inputLocation, delimiter=',', skip_header=1)\n", (995, 1040), False, 'from numpy import genfromtxt\n'), ((1464, 1518), 'numpy.reshape', 'reshape', (['trainX', '(trainX.shape[0], 1, trainX.shape[1])'], {}), '(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n', (1471, 1518), False, 'from numpy import reshape\n'), ((1573, 1624), 'numpy.reshape', 'reshape', (['testX', '(testX.shape[0], 1, testX.shape[1])'], {}), '(testX, (testX.shape[0], 1, testX.shape[1]))\n', (1580, 1624), False, 'from numpy import reshape\n'), ((1638, 1676), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(num_features, look_back)'}), '(shape=(num_features, look_back))\n', (1643, 1676), False, 'from tensorflow.keras.layers import Input\n'), ((1766, 1803), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'visible', 'outputs': 'output'}), '(inputs=visible, outputs=output)\n', (1771, 1803), False, 'from tensorflow.keras.models import Model\n'), ((1880, 1940), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""accuracy"""', 'restore_best_weights': '(True)'}), "(monitor='accuracy', restore_best_weights=True)\n", (1893, 1940), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), ((1687, 1708), 'tensorflow.keras.layers.LSTM', 'LSTM', (['lstm_state_size'], {}), '(lstm_state_size)\n', (1691, 1708), False, 'from tensorflow.keras.layers import Dense, LSTM\n'), ((1728, 1745), 'tensorflow.keras.layers.Dense', 'Dense', (['num_labels'], {}), '(num_labels)\n', (1733, 1745), False, 'from tensorflow.keras.layers import Dense, LSTM\n'), ((732, 744), 'numpy.array', 'array', (['dataX'], {}), '(dataX)\n', (737, 744), False, 'from numpy import array\n'), ((746, 758), 'numpy.array', 'array', (['dataY'], {}), '(dataY)\n', (751, 758), False, 'from numpy import array\n'), ((1059, 1070), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1068, 1070), False, 'import os\n')] |
import numpy as np
# ------------------ CONSTANTS --------------------
F_M = 50.1 * 10**6
V_TUBE = 2.248 * 10**8
V_SOLID = 1.87 * 10**8
V_AIR = 2.99 * 10**8
L_SOLID = 0.3
L_TUBE = 1 # +-0.001
V_C = 2.998 * 10**8
E_0 = 8.854 * 10** -12
MU_0 = 1.1257 * 10** -6
# ------------------------------------------------
def rel_err(v_theo:float, v_exp:float) -> float:
"""Computes the relative error between v_theo and v_exp.
Args:
v_theo (float): Theoretical value.
v_exp (float): Experimental value.
Returns:
float: Relative error between the values
"""
return abs(v_theo - v_exp) / v_theo * 100
air_xi = np.array([0,0,0,0,0])
air_xf = np.array([1.42, 1.42, 1.41, 1.41, 1.405]) # +- 0.0025
air_mean = np.mean(air_xf - air_xi) # +- 0.0025 ?
v_air = 4 * F_M * air_mean
rel_err_air = rel_err(V_AIR, v_air)
solid_xi = np.array([1.1475, 1.13, 1.135, 1.15, 1.13])
solid_xf = np.array([1.375, 1.375, 1.3775, 1.38, 1.38]) # +- 0.0025
solid_mean = np.mean(solid_xf - solid_xi) # +- 0.0025 ?
v_solid = (L_SOLID*v_air) / (L_SOLID + 2*solid_mean)
rel_err_solid = rel_err(V_SOLID, v_solid)
tube_xi = np.array([1.145, 1.145, 1.14, 1.14, 1.12])
tube_xf = np.array([1.375, 1.38, 1.37, 1.3725, 1.3775]) # +- 0.0025
tube_mean = np.mean(solid_xf - solid_xi) # +- 0.0025 ?
v_tube = (L_TUBE*v_air) / (L_TUBE + 2*solid_mean)
rel_err_tube = rel_err(V_TUBE, v_tube)
if __name__ == '__main__':
from leastsq import leastsq
from to_latex import to_latex
x = ['X_i', 'X_f']
to_latex(x, [air_xi, air_xf], buf='air.tex', index=False, escape=False, column_format='c|c')
to_latex(x, [solid_xi, solid_xf], buf='solid.tex', index=False, escape=False, column_format='c|c')
to_latex(x, [tube_xi, tube_xf], buf='tube.tex', index=False, escape=False, column_format='c|c')
| [
"numpy.mean",
"numpy.array",
"to_latex.to_latex"
] | [((695, 720), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (703, 720), True, 'import numpy as np\n'), ((727, 768), 'numpy.array', 'np.array', (['[1.42, 1.42, 1.41, 1.41, 1.405]'], {}), '([1.42, 1.42, 1.41, 1.41, 1.405])\n', (735, 768), True, 'import numpy as np\n'), ((793, 817), 'numpy.mean', 'np.mean', (['(air_xf - air_xi)'], {}), '(air_xf - air_xi)\n', (800, 817), True, 'import numpy as np\n'), ((914, 957), 'numpy.array', 'np.array', (['[1.1475, 1.13, 1.135, 1.15, 1.13]'], {}), '([1.1475, 1.13, 1.135, 1.15, 1.13])\n', (922, 957), True, 'import numpy as np\n'), ((970, 1014), 'numpy.array', 'np.array', (['[1.375, 1.375, 1.3775, 1.38, 1.38]'], {}), '([1.375, 1.375, 1.3775, 1.38, 1.38])\n', (978, 1014), True, 'import numpy as np\n'), ((1041, 1069), 'numpy.mean', 'np.mean', (['(solid_xf - solid_xi)'], {}), '(solid_xf - solid_xi)\n', (1048, 1069), True, 'import numpy as np\n'), ((1197, 1239), 'numpy.array', 'np.array', (['[1.145, 1.145, 1.14, 1.14, 1.12]'], {}), '([1.145, 1.145, 1.14, 1.14, 1.12])\n', (1205, 1239), True, 'import numpy as np\n'), ((1251, 1296), 'numpy.array', 'np.array', (['[1.375, 1.38, 1.37, 1.3725, 1.3775]'], {}), '([1.375, 1.38, 1.37, 1.3725, 1.3775])\n', (1259, 1296), True, 'import numpy as np\n'), ((1322, 1350), 'numpy.mean', 'np.mean', (['(solid_xf - solid_xi)'], {}), '(solid_xf - solid_xi)\n', (1329, 1350), True, 'import numpy as np\n'), ((1592, 1688), 'to_latex.to_latex', 'to_latex', (['x', '[air_xi, air_xf]'], {'buf': '"""air.tex"""', 'index': '(False)', 'escape': '(False)', 'column_format': '"""c|c"""'}), "(x, [air_xi, air_xf], buf='air.tex', index=False, escape=False,\n column_format='c|c')\n", (1600, 1688), False, 'from to_latex import to_latex\n'), ((1690, 1793), 'to_latex.to_latex', 'to_latex', (['x', '[solid_xi, solid_xf]'], {'buf': '"""solid.tex"""', 'index': '(False)', 'escape': '(False)', 'column_format': '"""c|c"""'}), "(x, [solid_xi, solid_xf], buf='solid.tex', index=False, escape=\n False, column_format='c|c')\n", (1698, 1793), False, 'from to_latex import to_latex\n'), ((1794, 1893), 'to_latex.to_latex', 'to_latex', (['x', '[tube_xi, tube_xf]'], {'buf': '"""tube.tex"""', 'index': '(False)', 'escape': '(False)', 'column_format': '"""c|c"""'}), "(x, [tube_xi, tube_xf], buf='tube.tex', index=False, escape=False,\n column_format='c|c')\n", (1802, 1893), False, 'from to_latex import to_latex\n')] |
import ROOT as rt
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve
import numpy as np
import argparse
import os
import random
fs=25
parser=argparse.ArgumentParser()
parser.add_argument("--var",type=str,default="eta",help='')
parser.add_argument("--savename",type=str,default="savemae",help='')
parser.add_argument("--eta",type=float,default=0,help='')
parser.add_argument("--etabin",type=float,default=2.4,help='')
parser.add_argument("--ptmin",type=float,default=0,help='')
parser.add_argument("--ptmax",type=float,default=2.,help='')
parser.add_argument("--pt",type=int,default=100,help='')
parser.add_argument("--get",type=str,default="pt",help='')
parser.add_argument("--gaus",type=int,default=0,help='')
args=parser.parse_args()
for bpt in [100]:
args.pt=bpt
if("pt" in args.get):
if(args.pt==100):
args.ptmin=0.815
args.ptmax=1.159
pt=args.pt
res=100
#os.system("ls save/"+args.savename+"/get.root")
#names=["drbox/qg423rot50out.npz","asuqqcnn{}ptonly".format(pt)]
#names=["drbox/ep423rot50out.npz","drbox/gp423rot50out.npz"]
#names=["drbox/qg423b64img50out.npz","drbox/qg2or501024out.npz","drbox/qg2onr502048out.npz"]
#names=["drbox/qg423img50out.npz","drbox/qg423rot50out.npz"]
names=["drqgpout.npz","drqgp0out.npz","drqgcout.npz","drqg0dout.npz","qgpntout.npz"]
labels=["30pixel","90pixel","zoom","90d","pnt"]
#names=["drbox/qg423b64img50out.npz","drbox/qg2onr502048out.npz"]
plt.figure(figsize=(12, 8))
if("qg" in names[0]):
plt.xlabel("Quark Jet Efficiency", fontsize=fs*1.2)
plt.ylabel("Gluon Jet Rejection", fontsize=fs*1.2)
if("ep" in names[0]):
plt.xlabel("Electron, Gamma Efficiency", fontsize=fs*1.1)
plt.ylabel("Pion Rejection", fontsize=fs*1.2)
plt.tick_params(labelsize=fs)
for k in [0]:
for num in range(len(names)):
savename = names[num]
y=[]
p=[]
if("npz" in savename):
f=np.load("drbox/"+savename)
if("or" in savename):
try:
y=f["testY"][:,1]
p=f["bp"][:,1]
except:
y=f["y"][:,1]
p=f["p"][:,1]
else:
try:
y=f["testY"][:,0]
p=f["bp"][:,0]
except:
y=f["y"][:,0]
p=f["p"][:,0]
f.close()
else:
f=rt.TFile("save/asu/"+savename+"/get.root",'read')
dq=f.Get("dq")
dg=f.Get("dg")
for sample in ["dq","dg"]:
tree=f.Get(sample)
for j in range(tree.GetEntries()):
tree.GetEntry(j)
if(abs(tree.pt)>args.pt*args.ptmax or abs(tree.pt)<args.pt*args.ptmin):continue
p.append(tree.p)
if(sample=="dq"):y.append(1)
else:y.append(0)
f.Close()
fpr,tpr,thresholds=roc_curve(y,p)
print(savename,roc_auc_score(y,p))
tnr=1-fpr
label=""
if("npz" in savename):
mk="D"
#mk="^"
ms=0.75
alpha=0.8
ls="-"
label=labels[num]
fils='none'
label+=" - AUC:{}".format(round(roc_auc_score(y,p),4))
plt.plot(tpr,tnr,lw=4,alpha=alpha,label=label,linestyle=ls,)
print(1)
plt.legend(loc=3, fontsize=fs*0.9)
plt.grid(alpha=0.6)
plt.axis((0,1,0,1))
if("ep" in names[0]):
plt.axis((0.9,1,0.9,1))
#plt.savefig("plots/asupurerocpt{}.png".format(pt),bbox_inches='tight',pad_inches=0.5,dpi=300)
plt.show()
#except:
# pass
| [
"numpy.load",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"sklearn.metrics.roc_auc_score",
"ROOT.TFile",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotli... | [((175, 200), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (198, 200), False, 'import argparse\n'), ((3409, 3419), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3417, 3419), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (1473, 1490), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1793), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fs'}), '(labelsize=fs)\n', (1779, 1793), True, 'import matplotlib.pyplot as plt\n'), ((3181, 3217), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(3)', 'fontsize': '(fs * 0.9)'}), '(loc=3, fontsize=fs * 0.9)\n', (3191, 3217), True, 'import matplotlib.pyplot as plt\n'), ((3218, 3237), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.6)'}), '(alpha=0.6)\n', (3226, 3237), True, 'import matplotlib.pyplot as plt\n'), ((3240, 3262), 'matplotlib.pyplot.axis', 'plt.axis', (['(0, 1, 0, 1)'], {}), '((0, 1, 0, 1))\n', (3248, 3262), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1572), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Quark Jet Efficiency"""'], {'fontsize': '(fs * 1.2)'}), "('Quark Jet Efficiency', fontsize=fs * 1.2)\n", (1529, 1572), True, 'import matplotlib.pyplot as plt\n'), ((1575, 1627), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gluon Jet Rejection"""'], {'fontsize': '(fs * 1.2)'}), "('Gluon Jet Rejection', fontsize=fs * 1.2)\n", (1585, 1627), True, 'import matplotlib.pyplot as plt\n'), ((1654, 1713), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Electron, Gamma Efficiency"""'], {'fontsize': '(fs * 1.1)'}), "('Electron, Gamma Efficiency', fontsize=fs * 1.1)\n", (1664, 1713), True, 'import matplotlib.pyplot as plt\n'), ((1716, 1763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pion Rejection"""'], {'fontsize': '(fs * 1.2)'}), "('Pion Rejection', fontsize=fs * 1.2)\n", (1726, 1763), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3314), 'matplotlib.pyplot.axis', 'plt.axis', (['(0.9, 1, 0.9, 1)'], {}), '((0.9, 1, 0.9, 1))\n', (3296, 3314), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2811), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'p'], {}), '(y, p)\n', (2805, 2811), False, 'from sklearn.metrics import roc_auc_score, roc_curve\n'), ((3103, 3167), 'matplotlib.pyplot.plot', 'plt.plot', (['tpr', 'tnr'], {'lw': '(4)', 'alpha': 'alpha', 'label': 'label', 'linestyle': 'ls'}), '(tpr, tnr, lw=4, alpha=alpha, label=label, linestyle=ls)\n', (3111, 3167), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1961), 'numpy.load', 'np.load', (["('drbox/' + savename)"], {}), "('drbox/' + savename)\n", (1940, 1961), True, 'import numpy as np\n'), ((2328, 2382), 'ROOT.TFile', 'rt.TFile', (["('save/asu/' + savename + '/get.root')", '"""read"""'], {}), "('save/asu/' + savename + '/get.root', 'read')\n", (2336, 2382), True, 'import ROOT as rt\n'), ((2832, 2851), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'p'], {}), '(y, p)\n', (2845, 2851), False, 'from sklearn.metrics import roc_auc_score, roc_curve\n'), ((3074, 3093), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'p'], {}), '(y, p)\n', (3087, 3093), False, 'from sklearn.metrics import roc_auc_score, roc_curve\n')] |
"""
<NAME> 02/2022
"""
# - python dependencies
from __future__ import print_function
import os
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
from matplotlib.gridspec import GridSpec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib_scalebar.scalebar import ScaleBar
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
def plot_interf_map(infer: np.array, x_centroids: np.array,
y_centroids: np.array, cmap: plt = plt.get_cmap('jet'),
ice_color: str = 'k'):
# - Not Editable Parameters
map_extent = [-61.1, -59.9, 80.4, 81.2]
figsize = (6, 9)
label_size = 12
# - Path to Glaciers Mask
ics_shp = os.path.join('.', 'esri_shp',
'Petermann_Domain_glaciers_epsg3413.shp')
# - set Coordinate Reference System
ref_crs = ccrs.NorthPolarStereo(central_longitude=-45,
true_scale_latitude=70)
# - Initialize figure object
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1, projection=ref_crs)
# - Set map extent
ax.set_extent(map_extent, crs=ccrs.PlateCarree())
# - Set Map Grid
gl = ax.gridlines(draw_labels=True, dms=True, x_inline=False,
y_inline=False, color='k', linestyle='dotted',
alpha=0.3)
gl.top_labels = False
gl.bottom_labels = True
gl.right_labels = False
gl.xlocator \
= mticker.FixedLocator(np.arange(np.floor(map_extent[0]) - 3.5,
np.floor(map_extent[1]) + 3, 1))
gl.ylocator \
= mticker.FixedLocator(np.arange(np.floor(map_extent[2]) - 5,
np.floor(map_extent[3]) + 5, 0.2))
gl.xlabel_style = {'rotation': 0, 'weight': 'bold', 'size': label_size}
gl.ylabel_style = {'rotation': 0, 'weight': 'bold', 'size': label_size}
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
# - Plot Glaciers Mask
shape_feature = ShapelyFeature(Reader(ics_shp).geometries(),
crs=ref_crs)
ax.add_feature(shape_feature, facecolor='None', edgecolor=ice_color)
# - Plot Interferogram
xx, yy = np.meshgrid(x_centroids, y_centroids)
im = ax.pcolormesh(x_centroids, y_centroids, infer, cmap=cmap,
zorder=0, vmin=-np.pi, vmax=np.pi,
rasterized=True)
# add an axes above the main axes.
divider = make_axes_locatable(ax)
ax_cb = divider.new_horizontal(size='5%', pad=0.1, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb = plt.colorbar(im, cax=ax_cb)
cb.ax.tick_params(labelsize='medium')
plt.show()
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"numpy.floor",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"cartopy.io.shapereader.Reader",
"cartopy.crs.PlateCarree",
"os.path.join",
"cartopy.crs.NorthPolarSte... | [((694, 713), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (706, 713), True, 'import matplotlib.pyplot as plt\n'), ((921, 992), 'os.path.join', 'os.path.join', (['"""."""', '"""esri_shp"""', '"""Petermann_Domain_glaciers_epsg3413.shp"""'], {}), "('.', 'esri_shp', 'Petermann_Domain_glaciers_epsg3413.shp')\n", (933, 992), False, 'import os\n'), ((1075, 1143), 'cartopy.crs.NorthPolarStereo', 'ccrs.NorthPolarStereo', ([], {'central_longitude': '(-45)', 'true_scale_latitude': '(70)'}), '(central_longitude=-45, true_scale_latitude=70)\n', (1096, 1143), True, 'import cartopy.crs as ccrs\n'), ((1223, 1250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1233, 1250), True, 'import matplotlib.pyplot as plt\n'), ((2467, 2504), 'numpy.meshgrid', 'np.meshgrid', (['x_centroids', 'y_centroids'], {}), '(x_centroids, y_centroids)\n', (2478, 2504), True, 'import numpy as np\n'), ((2724, 2747), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2743, 2747), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2857, 2884), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'ax_cb'}), '(im, cax=ax_cb)\n', (2869, 2884), True, 'import matplotlib.pyplot as plt\n'), ((2931, 2941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2939, 2941), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1380), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1378, 1380), True, 'import cartopy.crs as ccrs\n'), ((1712, 1735), 'numpy.floor', 'np.floor', (['map_extent[0]'], {}), '(map_extent[0])\n', (1720, 1735), True, 'import numpy as np\n'), ((1784, 1807), 'numpy.floor', 'np.floor', (['map_extent[1]'], {}), '(map_extent[1])\n', (1792, 1807), True, 'import numpy as np\n'), ((1876, 1899), 'numpy.floor', 'np.floor', (['map_extent[2]'], {}), '(map_extent[2])\n', (1884, 1899), True, 'import numpy as np\n'), ((1946, 1969), 'numpy.floor', 'np.floor', (['map_extent[3]'], {}), '(map_extent[3])\n', (1954, 1969), True, 'import numpy as np\n'), ((2275, 2290), 'cartopy.io.shapereader.Reader', 'Reader', (['ics_shp'], {}), '(ics_shp)\n', (2281, 2290), False, 'from cartopy.io.shapereader import Reader\n')] |
#!/usr/bin/python3
import numpy as np
import os
import yaml
import argparse
parser = argparse.ArgumentParser(description =
"""
This script functions by reading in energy and entropy data given
the number 'N' forming the N*N Ising model. The heat capacity is computed
and compared to a benchmark (which is also read in). A heat capacity
error .TXT file is saved in the appropriate data directory.
""")
parser.add_argument('--N', metavar='ising_length', type=int, default=1,
help='The length of the ising model -- Default = 1')
parser.add_argument('--data_dir', required=True,
help='The data directory to search through -- Required \
Example: /home/jordan/sad-monte-carlo/')
parser.add_argument('--fname', required=True,
help='The file name -- Required \
Example: ising-sad-32-s1.yaml')
parser.add_argument('--refname', required=True,
help='The reference file name -- Required \
Example: ising-sad-32-s1.yaml')
# parser.add_argument('--need_wide_cv', action='store_true',
# help='A boolean that determines whether we plot a narrow range --Default = False')
args = parser.parse_args()
N=args.N
#need_wide_cv = args.need_wide_cv
datadir = args.data_dir
def heat_capacity(T, E, S):
"""
This function calculates the heat capacity by taking in
the temperature 'T', energy 'E', and entropy 'S' as NumPy
arrays. The number 'Num' is also necessary to complete
the heat capacity calculation.
"""
C = np.zeros_like(T)
for i in range(len(T)):
boltz_arg = S - E/T[i]
P = np.exp(boltz_arg - boltz_arg.max())
P = P/P.sum()
U = (E*P).sum()
C[i] = ((E-U)**2*P).sum()/T[i]**2
return C
savedir = '../ising/data/ising-%i/' % N
os.makedirs(savedir, exist_ok=True)
# Define the temperature range
T = np.arange(0.01, 0.05000001, 0.05000001*0.01)
wideT = np.arange(0.01, 5, 0.001)
old_cvs = []
print(('trying filename ', args.fname))
# Read YAML file
if os.path.isfile(args.data_dir + args.fname):
with open(args.data_dir + args.fname, 'r') as stream:
data = yaml.load(stream)
else:
print(('unable to read file', args.data_dir + args.fname))
raise ValueError("%s isn't a file!" % (args.data_dir + args.fname))
# Read YAML Reference file
if os.path.isfile(args.data_dir + args.refname):
with open(args.data_dir + args.refname, 'r') as stream:
ref_data = yaml.load(stream)
else:
print(('unable to read file', args.data_dir + args.refname))
raise ValueError("%s isn't a file!" % (args.data_dir + args.refname))
time = data['movies']['time']
my_energy = data['movies']['energy']
my_entropy = data['movies']['entropy']
ref_energy = ref_data['movies']['energy']
ref_entropy = ref_data['movies']['entropy'][-1]
Ebest = ref_energy
Sbest = ref_entropy
CV = heat_capacity(T, Ebest, Sbest)
cv_error = []
cv_max_error = []
myt = []
for t in range(len(my_entropy)):
# if time[t] < 1e3:
# continue
myt.append(time[t])
mycv = heat_capacity(T, my_energy, my_entropy[t])
err = 0
norm = 0
for j in range(1, len(mycv)):
err += abs(CV[j]-mycv[j])
norm += 1.0
cv_error.append(err/norm)
cv_max_error.append(abs(CV-mycv).max())
# if time[t] == 1e12:
# np.savetxt(datadir+os.path.basename(fname)+'-cv.txt',
# np.array([T, mycv]).transpose(),
# fmt='%.4g', # good enough for our plot
# );
# np.savetxt(datadir+os.path.basename(fname)+'-wide-cv.txt',
# np.array([wideT, heat_capacity(wideT, my_energy, my_entropy[t,:])]).transpose(),
# fmt='%.4g', # good enough for our plot
# );
#old_cvs.append(mycv)
# if 'bench' not in fname:
# np.savetxt(savedir+os.path.basename(fname)+'-cv-error.txt',
# np.array([myt, cv_error, cv_max_error]).transpose(),
# fmt='%.3g', # low resolution is good enough for error data.
# )
import matplotlib.pyplot as plt
plt.figure('cv-error')
plt.loglog(myt,cv_error)
plt.figure('heat-capacity')
plt.xlim(0.3,1)
plt.plot(np.reciprocal(wideT),heat_capacity(wideT, Ebest, Sbest))
plt.plot(np.reciprocal(wideT),heat_capacity(wideT, my_energy, my_entropy[-1]))
plt.show()
| [
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.xlim",
"yaml.load",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.reciprocal",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.arange"
] | [((87, 447), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n This script functions by reading in energy and entropy data given\n the number \'N\' forming the N*N Ising model. The heat capacity is computed\n and compared to a benchmark (which is also read in). A heat capacity\n error .TXT file is saved in the appropriate data directory.\n """'}), '(description=\n """\n This script functions by reading in energy and entropy data given\n the number \'N\' forming the N*N Ising model. The heat capacity is computed\n and compared to a benchmark (which is also read in). A heat capacity\n error .TXT file is saved in the appropriate data directory.\n """\n )\n', (110, 447), False, 'import argparse\n'), ((1925, 1960), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (1936, 1960), False, 'import os\n'), ((1997, 2043), 'numpy.arange', 'np.arange', (['(0.01)', '(0.05000001)', '(0.05000001 * 0.01)'], {}), '(0.01, 0.05000001, 0.05000001 * 0.01)\n', (2006, 2043), True, 'import numpy as np\n'), ((2050, 2075), 'numpy.arange', 'np.arange', (['(0.01)', '(5)', '(0.001)'], {}), '(0.01, 5, 0.001)\n', (2059, 2075), True, 'import numpy as np\n'), ((2152, 2194), 'os.path.isfile', 'os.path.isfile', (['(args.data_dir + args.fname)'], {}), '(args.data_dir + args.fname)\n', (2166, 2194), False, 'import os\n'), ((2459, 2503), 'os.path.isfile', 'os.path.isfile', (['(args.data_dir + args.refname)'], {}), '(args.data_dir + args.refname)\n', (2473, 2503), False, 'import os\n'), ((4176, 4198), 'matplotlib.pyplot.figure', 'plt.figure', (['"""cv-error"""'], {}), "('cv-error')\n", (4186, 4198), True, 'import matplotlib.pyplot as plt\n'), ((4199, 4224), 'matplotlib.pyplot.loglog', 'plt.loglog', (['myt', 'cv_error'], {}), '(myt, cv_error)\n', (4209, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4252), 'matplotlib.pyplot.figure', 'plt.figure', (['"""heat-capacity"""'], {}), "('heat-capacity')\n", (4235, 4252), True, 'import matplotlib.pyplot as plt\n'), ((4253, 4269), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.3)', '(1)'], {}), '(0.3, 1)\n', (4261, 4269), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4422, 4424), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1675), 'numpy.zeros_like', 'np.zeros_like', (['T'], {}), '(T)\n', (1672, 1675), True, 'import numpy as np\n'), ((4278, 4298), 'numpy.reciprocal', 'np.reciprocal', (['wideT'], {}), '(wideT)\n', (4291, 4298), True, 'import numpy as np\n'), ((4344, 4364), 'numpy.reciprocal', 'np.reciprocal', (['wideT'], {}), '(wideT)\n', (4357, 4364), True, 'import numpy as np\n'), ((2269, 2286), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (2278, 2286), False, 'import yaml\n'), ((2584, 2601), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (2593, 2601), False, 'import yaml\n')] |
import gym
import time
import ctypes
import numpy as np
from collections import OrderedDict
from multiprocessing.context import Process
from multiprocessing import Array, Pipe, connection, Queue
from typing import Any, List, Tuple, Union, Callable, Optional
from tianshou.env.worker import EnvWorker
from tianshou.env.utils import CloudpickleWrapper
_NP_TO_CT = {
np.bool: ctypes.c_bool,
np.bool_: ctypes.c_bool,
np.uint8: ctypes.c_uint8,
np.uint16: ctypes.c_uint16,
np.uint32: ctypes.c_uint32,
np.uint64: ctypes.c_uint64,
np.int8: ctypes.c_int8,
np.int16: ctypes.c_int16,
np.int32: ctypes.c_int32,
np.int64: ctypes.c_int64,
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
}
class ShArray:
"""Wrapper of multiprocessing Array."""
def __init__(self, dtype: np.generic, shape: Tuple[int]) -> None:
self.arr = Array(
_NP_TO_CT[dtype.type], # type: ignore
int(np.prod(shape)),
)
self.dtype = dtype
self.shape = shape
def save(self, ndarray: np.ndarray) -> None:
"""
:param ndarray: np.ndarray:
:param ndarray: np.ndarray:
:param ndarray: np.ndarray:
"""
assert isinstance(ndarray, np.ndarray)
dst = self.arr.get_obj()
dst_np = np.frombuffer(dst, dtype=self.dtype).reshape(self.shape)
np.copyto(dst_np, ndarray)
def get(self) -> np.ndarray:
""" """
obj = self.arr.get_obj()
return np.frombuffer(obj, dtype=self.dtype).reshape(self.shape)
def _setup_buf(space: gym.Space) -> Union[dict, tuple, ShArray]:
"""
:param space: gym.Space:
:param space: gym.Space:
:param space: gym.Space:
"""
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict)
return {k: _setup_buf(v) for k, v in space.spaces.items()}
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(space.spaces, tuple)
return tuple([_setup_buf(t) for t in space.spaces])
else:
return ShArray(space.dtype, space.shape)
def _worker(
parent: connection.Connection,
p: connection.Connection,
env_fn_wrapper: CloudpickleWrapper,
obs_bufs: Optional[Union[dict, tuple, ShArray]] = None,
) -> None:
"""
:param parent: connection.Connection:
:param p: connection.Connection:
:param env_fn_wrapper: CloudpickleWrapper:
:param obs_bufs: Optional[Union[dict:
:param tuple: param ShArray]]: (Default value = None)
:param parent: connection.Connection:
:param p: connection.Connection:
:param env_fn_wrapper: CloudpickleWrapper:
:param obs_bufs: Optional[Union[dict:
:param ShArray]]: (Default value = None)
:param parent: connection.Connection:
:param p: connection.Connection:
:param env_fn_wrapper: CloudpickleWrapper:
:param obs_bufs: Optional[Union[dict:
"""
def _encode_obs(obs: Union[dict, tuple, np.ndarray], buffer: Union[dict, tuple, ShArray],) -> None:
"""
:param obs: Union[dict:
:param tuple: param np.ndarray]:
:param buffer: Union[dict:
:param ShArray:
:param obs: Union[dict:
:param np.ndarray]:
:param buffer: Union[dict:
:param ShArray]:
:param obs: Union[dict:
:param buffer: Union[dict:
"""
if isinstance(obs, np.ndarray) and isinstance(buffer, ShArray):
buffer.save(obs)
elif isinstance(obs, tuple) and isinstance(buffer, tuple):
for o, b in zip(obs, buffer):
_encode_obs(o, b)
elif isinstance(obs, dict) and isinstance(buffer, dict):
for k in obs.keys():
_encode_obs(obs[k], buffer[k])
return None
parent.close()
env = env_fn_wrapper.data()
try:
while True:
try:
cmd, data = p.recv()
except EOFError: # the pipe has been closed
p.close()
break
if cmd == "step":
obs, reward, done, info = env.step(data)
if obs_bufs is not None:
_encode_obs(obs, obs_bufs)
obs = None
p.send((obs, reward, done, info))
elif cmd == "reset":
obs = env.reset(data)
if obs_bufs is not None:
_encode_obs(obs, obs_bufs)
obs = None
p.send(obs)
elif cmd == "close":
p.send(env.close())
p.close()
break
elif cmd == "render":
p.send(env.render(**data) if hasattr(env, "render") else None)
elif cmd == "seed":
p.send(env.seed(data) if hasattr(env, "seed") else None)
elif cmd == "getattr":
p.send(getattr(env, data) if hasattr(env, data) else None)
elif cmd == "toggle_log":
env.toggle_log(data)
else:
p.close()
raise NotImplementedError
except KeyboardInterrupt:
p.close()
class SubprocEnvWorker(EnvWorker):
"""Subprocess worker used in SubprocVectorEnv and ShmemVectorEnv."""
def __init__(self, env_fn: Callable[[], gym.Env], share_memory: bool = False) -> None:
super().__init__(env_fn)
self.parent_remote, self.child_remote = Pipe()
self.share_memory = share_memory
self.buffer: Optional[Union[dict, tuple, ShArray]] = None
if self.share_memory:
dummy = env_fn()
obs_space = dummy.observation_space
dummy.close()
del dummy
self.buffer = _setup_buf(obs_space)
args = (
self.parent_remote,
self.child_remote,
CloudpickleWrapper(env_fn),
self.buffer,
)
self.process = Process(target=_worker, args=args, daemon=True)
self.process.start()
self.child_remote.close()
def __getattr__(self, key: str) -> Any:
self.parent_remote.send(["getattr", key])
return self.parent_remote.recv()
def _decode_obs(self) -> Union[dict, tuple, np.ndarray]:
""" """
def decode_obs(buffer: Optional[Union[dict, tuple, ShArray]]) -> Union[dict, tuple, np.ndarray]:
"""
:param buffer: Optional[Union[dict:
:param tuple: param ShArray]]:
:param buffer: Optional[Union[dict:
:param ShArray]]:
:param buffer: Optional[Union[dict:
"""
if isinstance(buffer, ShArray):
return buffer.get()
elif isinstance(buffer, tuple):
return tuple([decode_obs(b) for b in buffer])
elif isinstance(buffer, dict):
return {k: decode_obs(v) for k, v in buffer.items()}
else:
raise NotImplementedError
return decode_obs(self.buffer)
def reset(self, sample) -> Any:
"""
:param sample:
"""
self.parent_remote.send(["reset", sample])
# obs = self.parent_remote.recv()
# if self.share_memory:
# obs = self._decode_obs()
# return obs
def get_reset_result(self):
""" """
obs = self.parent_remote.recv()
if self.share_memory:
obs = self._decode_obs()
return obs
@staticmethod
def wait( # type: ignore
workers: List["SubprocEnvWorker"], wait_num: int, timeout: Optional[float] = None,
) -> List["SubprocEnvWorker"]:
"""
:param # type: ignoreworkers: List["SubprocEnvWorker"]:
:param wait_num: int:
:param timeout: Optional[float]: (Default value = None)
:param # type: ignoreworkers: List["SubprocEnvWorker"]:
:param wait_num: int:
:param timeout: Optional[float]: (Default value = None)
"""
remain_conns = conns = [x.parent_remote for x in workers]
ready_conns: List[connection.Connection] = []
remain_time, t1 = timeout, time.time()
while len(remain_conns) > 0 and len(ready_conns) < wait_num:
if timeout:
remain_time = timeout - (time.time() - t1)
if remain_time <= 0:
break
# connection.wait hangs if the list is empty
new_ready_conns = connection.wait(remain_conns, timeout=remain_time)
ready_conns.extend(new_ready_conns) # type: ignore
remain_conns = [conn for conn in remain_conns if conn not in ready_conns]
return [workers[conns.index(con)] for con in ready_conns]
def send_action(self, action: np.ndarray) -> None:
"""
:param action: np.ndarray:
:param action: np.ndarray:
:param action: np.ndarray:
"""
self.parent_remote.send(["step", action])
def toggle_log(self, log):
self.parent_remote.send(["toggle_log", log])
def get_result(self,) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
""" """
obs, rew, done, info = self.parent_remote.recv()
if self.share_memory:
obs = self._decode_obs()
return obs, rew, done, info
def seed(self, seed: Optional[int] = None) -> Optional[List[int]]:
"""
:param seed: Optional[int]: (Default value = None)
:param seed: Optional[int]: (Default value = None)
:param seed: Optional[int]: (Default value = None)
"""
self.parent_remote.send(["seed", seed])
return self.parent_remote.recv()
def render(self, **kwargs: Any) -> Any:
"""
:param **kwargs: Any:
:param **kwargs: Any:
"""
self.parent_remote.send(["render", kwargs])
return self.parent_remote.recv()
def close_env(self) -> None:
""" """
try:
self.parent_remote.send(["close", None])
# mp may be deleted so it may raise AttributeError
self.parent_remote.recv()
self.process.join()
except (BrokenPipeError, EOFError, AttributeError):
pass
# ensure the subproc is terminated
self.process.terminate()
class BaseVectorEnv(gym.Env):
"""Base class for vectorized environments wrapper.
Usage:
::
env_num = 8
envs = DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
assert len(envs) == env_num
It accepts a list of environment generators. In other words, an environment
generator ``efn`` of a specific task means that ``efn()`` returns the
environment of the given task, for example, ``gym.make(task)``.
All of the VectorEnv must inherit :class:`~tianshou.env.BaseVectorEnv`.
Here are some other usages:
::
envs.seed(2) # which is equal to the next line
envs.seed([2, 3, 4, 5, 6, 7, 8, 9]) # set specific seed for each env
obs = envs.reset() # reset all environments
obs = envs.reset([0, 5, 7]) # reset 3 specific environments
obs, rew, done, info = envs.step([1] * 8) # step synchronously
envs.render() # render all environments
envs.close() # close all environments
.. warning::
If you use your own environment, please make sure the ``seed`` method
is set up properly, e.g.,
::
def seed(self, seed):
np.random.seed(seed)
Otherwise, the outputs of these envs may be the same with each other.
:param env_fns: a list of callable envs
:param env:
:param worker_fn: a callable worker
:param worker: which contains the i
:param int: wait_num
:param env: step
:param environments: to finish a step is time
:param return: when
:param simulation: in these environments
:param is: disabled
:param float: timeout
:param vectorized: step it only deal with those environments spending time
:param within: timeout
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
worker_fn: Callable[[Callable[[], gym.Env]], EnvWorker],
sampler=None,
testing: Optional[bool] = False,
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
) -> None:
self._env_fns = env_fns
# A VectorEnv contains a pool of EnvWorkers, which corresponds to
# interact with the given envs (one worker <-> one env).
self.workers = [worker_fn(fn) for fn in env_fns]
self.worker_class = type(self.workers[0])
assert issubclass(self.worker_class, EnvWorker)
assert all([isinstance(w, self.worker_class) for w in self.workers])
self.env_num = len(env_fns)
self.wait_num = wait_num or len(env_fns)
assert 1 <= self.wait_num <= len(env_fns), f"wait_num should be in [1, {len(env_fns)}], but got {wait_num}"
self.timeout = timeout
assert self.timeout is None or self.timeout > 0, f"timeout is {timeout}, it should be positive if provided!"
self.is_async = self.wait_num != len(env_fns) or timeout is not None or testing
self.waiting_conn: List[EnvWorker] = []
# environments in self.ready_id is actually ready
# but environments in self.waiting_id are just waiting when checked,
# and they may be ready now, but this is not known until we check it
# in the step() function
self.waiting_id: List[int] = []
# all environments are ready in the beginning
self.ready_id = list(range(self.env_num))
self.is_closed = False
self.sampler = sampler
self.sample_obs = None
def _assert_is_not_closed(self) -> None:
""" """
assert not self.is_closed, f"Methods of {self.__class__.__name__} cannot be called after " "close."
def __len__(self) -> int:
"""Return len(self), which is the number of environments."""
return self.env_num
def __getattribute__(self, key: str) -> Any:
"""Switch the attribute getter depending on the key.
Any class who inherits ``gym.Env`` will inherit some attributes, like
``action_space``. However, we would like the attribute lookup to go
straight into the worker (in fact, this vector env's action_space is
always None).
"""
if key in [
"metadata",
"reward_range",
"spec",
"action_space",
"observation_space",
]: # reserved keys in gym.Env
return self.__getattr__(key)
else:
return super().__getattribute__(key)
def __getattr__(self, key: str) -> List[Any]:
"""Fetch a list of env attributes.
This function tries to retrieve an attribute from each individual
wrapped environment, if it does not belong to the wrapping vector
environment class.
"""
return [getattr(worker, key) for worker in self.workers]
def _wrap_id(self, id: Optional[Union[int, List[int], np.ndarray]] = None) -> Union[List[int], np.ndarray]:
"""
:param id: Optional[Union[int:
:param List: int]:
:param np: ndarray]]: (Default value = None)
:param id: Optional[Union[int:
:param List[int]:
:param np.ndarray]]: (Default value = None)
:param id: Optional[Union[int:
"""
if id is None:
id = list(range(self.env_num))
elif np.isscalar(id):
id = [id]
return id
def _assert_id(self, id: List[int]) -> None:
"""
:param id: List[int]:
:param id: List[int]:
:param id: List[int]:
"""
for i in id:
assert i not in self.waiting_id, f"Cannot interact with environment {i} which is stepping now."
assert i in self.ready_id, f"Can only interact with ready environments {self.ready_id}."
def reset(self, id: Optional[Union[int, List[int], np.ndarray]] = None) -> np.ndarray:
"""Reset the state of some envs and return initial observations.
If id is None, reset the state of all the environments and return
initial observations, otherwise reset the specific environments with
the given id, either an int or a list.
:param id: Optional[Union[int:
:param List: int]:
:param np: ndarray]]: (Default value = None)
:param id: Optional[Union[int:
:param List[int]:
:param np.ndarray]]: (Default value = None)
:param id: Optional[Union[int:
"""
start_time = time.time()
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
obs = []
stop_id = []
for i in id:
sample = self.sampler.sample()
if sample is None:
stop_id.append(i)
else:
self.workers[i].reset(sample)
for i in id:
if i in stop_id:
obs.append(self.sample_obs)
else:
this_obs = self.workers[i].get_reset_result()
if self.sample_obs is None:
self.sample_obs = this_obs
for j in range(len(obs)):
if obs[j] is None:
obs[j] = self.sample_obs
obs.append(this_obs)
if len(obs) > 0:
obs = np.stack(obs)
# if len(stop_id)> 0:
# obs_zero =
# print(time.time() - start_timed)
return obs, stop_id
def toggle_log(self, log):
for worker in self.workers:
worker.toggle_log(log)
def reset_sampler(self):
""" """
self.sampler.reset()
def step(self, action: np.ndarray, id: Optional[Union[int, List[int], np.ndarray]] = None) -> List[np.ndarray]:
"""Run one timestep of some environments' dynamics.
If id is None, run one timestep of all the environments’ dynamics;
otherwise run one timestep for some environments with given id, either
an int or a list. When the end of episode is reached, you are
responsible for calling reset(id) to reset this environment’s state.
Accept a batch of action and return a tuple (batch_obs, batch_rew,
batch_done, batch_info) in numpy format.
:param numpy: ndarray action: a batch of action provided by the agent.
:param action: np.ndarray:
:param id: Optional[Union[int:
:param List: int]:
:param np: ndarray]]: (Default value = None)
:param action: np.ndarray:
:param id: Optional[Union[int:
:param List[int]:
:param np.ndarray]]: (Default value = None)
:param action: np.ndarray:
:param id: Optional[Union[int:
:rtype: A tuple including four items
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if not self.is_async:
assert len(action) == len(id)
for i, j in enumerate(id):
self.workers[j].send_action(action[i])
result = []
for j in id:
obs, rew, done, info = self.workers[j].get_result()
info["env_id"] = j
result.append((obs, rew, done, info))
else:
if action is not None:
self._assert_id(id)
assert len(action) == len(id)
for i, (act, env_id) in enumerate(zip(action, id)):
self.workers[env_id].send_action(act)
self.waiting_conn.append(self.workers[env_id])
self.waiting_id.append(env_id)
self.ready_id = [x for x in self.ready_id if x not in id]
ready_conns: List[EnvWorker] = []
while not ready_conns:
ready_conns = self.worker_class.wait(self.waiting_conn, self.wait_num, self.timeout)
result = []
for conn in ready_conns:
waiting_index = self.waiting_conn.index(conn)
self.waiting_conn.pop(waiting_index)
env_id = self.waiting_id.pop(waiting_index)
obs, rew, done, info = conn.get_result()
info["env_id"] = env_id
result.append((obs, rew, done, info))
self.ready_id.append(env_id)
return list(map(np.stack, zip(*result)))
def seed(self, seed: Optional[Union[int, List[int]]] = None) -> List[Optional[List[int]]]:
"""Set the seed for all environments.
Accept ``None``, an int (which will extend ``i`` to
``[i, i + 1, i + 2, ...]``) or a list.
:param seed: Optional[Union[int:
:param List: int]]]: (Default value = None)
:param seed: Optional[Union[int:
:param List[int]]]: (Default value = None)
:param seed: Optional[Union[int:
:returns: The list of seeds used in this env's random number generators.
The first value in the list should be the "main" seed, or the value
which a reproducer pass to "seed".
"""
self._assert_is_not_closed()
seed_list: Union[List[None], List[int]]
if seed is None:
seed_list = [seed] * self.env_num
elif isinstance(seed, int):
seed_list = [seed + i for i in range(self.env_num)]
else:
seed_list = seed
return [w.seed(s) for w, s in zip(self.workers, seed_list)]
def render(self, **kwargs: Any) -> List[Any]:
"""Render all of the environments.
:param **kwargs: Any:
:param **kwargs: Any:
"""
self._assert_is_not_closed()
if self.is_async and len(self.waiting_id) > 0:
raise RuntimeError(f"Environments {self.waiting_id} are still stepping, cannot " "render them now.")
return [w.render(**kwargs) for w in self.workers]
def close(self) -> None:
"""Close all of the environments.
This function will be called only once (if not, it will be called
during garbage collected). This way, ``close`` of all workers can be
assured.
"""
self._assert_is_not_closed()
for w in self.workers:
w.close()
self.is_closed = True
def __del__(self) -> None:
"""Redirect to self.close()."""
if not self.is_closed:
self.close()
class SubprocVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on subprocess.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for more detailed
explanation.
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
sampler=None,
testing=False,
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
"""
:param fn: Callable[[]:
:param gym: Env]:
:param fn: Callable[[]:
:param gym.Env]:
:param fn: Callable[[]:
"""
return SubprocEnvWorker(fn, share_memory=False)
super().__init__(env_fns, worker_fn, sampler, testing, wait_num=wait_num, timeout=timeout)
class ShmemVectorEnv(BaseVectorEnv):
"""Optimized SubprocVectorEnv with shared buffers to exchange observations.
ShmemVectorEnv has exactly the same API as SubprocVectorEnv.
.. seealso::
Please refer to :class:`~tianshou.env.SubprocVectorEnv` for more
detailed explanation.
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
sampler=None,
testing=False,
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
"""
:param fn: Callable[[]:
:param gym: Env]:
:param fn: Callable[[]:
:param gym.Env]:
:param fn: Callable[[]:
"""
return SubprocEnvWorker(fn, share_memory=True)
super().__init__(env_fns, worker_fn, sampler, testing, wait_num=wait_num, timeout=timeout)
| [
"numpy.stack",
"numpy.isscalar",
"numpy.frombuffer",
"time.time",
"numpy.prod",
"multiprocessing.Pipe",
"tianshou.env.utils.CloudpickleWrapper",
"numpy.copyto",
"multiprocessing.context.Process",
"multiprocessing.connection.wait"
] | [((1387, 1413), 'numpy.copyto', 'np.copyto', (['dst_np', 'ndarray'], {}), '(dst_np, ndarray)\n', (1396, 1413), True, 'import numpy as np\n'), ((5443, 5449), 'multiprocessing.Pipe', 'Pipe', ([], {}), '()\n', (5447, 5449), False, 'from multiprocessing import Array, Pipe, connection, Queue\n'), ((5938, 5985), 'multiprocessing.context.Process', 'Process', ([], {'target': '_worker', 'args': 'args', 'daemon': '(True)'}), '(target=_worker, args=args, daemon=True)\n', (5945, 5985), False, 'from multiprocessing.context import Process\n'), ((16676, 16687), 'time.time', 'time.time', ([], {}), '()\n', (16685, 16687), False, 'import time\n'), ((5852, 5878), 'tianshou.env.utils.CloudpickleWrapper', 'CloudpickleWrapper', (['env_fn'], {}), '(env_fn)\n', (5870, 5878), False, 'from tianshou.env.utils import CloudpickleWrapper\n'), ((8138, 8149), 'time.time', 'time.time', ([], {}), '()\n', (8147, 8149), False, 'import time\n'), ((8452, 8502), 'multiprocessing.connection.wait', 'connection.wait', (['remain_conns'], {'timeout': 'remain_time'}), '(remain_conns, timeout=remain_time)\n', (8467, 8502), False, 'from multiprocessing import Array, Pipe, connection, Queue\n'), ((15549, 15564), 'numpy.isscalar', 'np.isscalar', (['id'], {}), '(id)\n', (15560, 15564), True, 'import numpy as np\n'), ((17533, 17546), 'numpy.stack', 'np.stack', (['obs'], {}), '(obs)\n', (17541, 17546), True, 'import numpy as np\n'), ((960, 974), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (967, 974), True, 'import numpy as np\n'), ((1322, 1358), 'numpy.frombuffer', 'np.frombuffer', (['dst'], {'dtype': 'self.dtype'}), '(dst, dtype=self.dtype)\n', (1335, 1358), True, 'import numpy as np\n'), ((1512, 1548), 'numpy.frombuffer', 'np.frombuffer', (['obj'], {'dtype': 'self.dtype'}), '(obj, dtype=self.dtype)\n', (1525, 1548), True, 'import numpy as np\n'), ((8284, 8295), 'time.time', 'time.time', ([], {}), '()\n', (8293, 8295), False, 'import time\n')] |
import numpy as np
def approximate_steady_state_iob_from_sbr(scheduled_basal_rate: np.float64) -> np.float64:
"""
Approximate the amount of insulin-on-board from user's scheduled basal rate (sbr). This value
comes from running the Tidepool Simple Diabetes Metabolism Model with the user's sbr for 8 hours.
Parameters
----------
scheduled_basal_rate : float
a single value that represents the user's insulin needs
NOTE: this needs to be updated to account for sbr schedule
Returns
-------
float:
insulin-on-board
"""
# TODO: need test coverage here, which can be done by calling the diabetes metabolism model
return scheduled_basal_rate * 2.111517
def dka_index(
iob_array: "np.ndarray[np.float64]", scheduled_basal_rate: np.float64, round_to_n_digits: int = 3
) -> np.float64:
"""
Calculate the Tidepool DKA Index, which is the number of hours with less than 50% of the
user's normal insulin needs, assuming that their scheduled basal rate can be used as a proxy
for their insulin needs.
https://docs.google.com/document/d/1zrQK7tQ3OJzjOXbwDgmQEeCdcig49F2TpJzNk2FU52k
Parameters
----------
iob_array : ndarray
1D array containing the insulin-on-board time series with float type.
scheduled_basal_rate : float (U/hr)
a single value that represents the user's insulin needs
NOTE: this needs to be updated to account for sbr schedule
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
float
The Tidepool DKA Index in hours.
"""
# TODO: this funciton needs to be updated to allow for multiple scheduled basal rates, AKA schedules
steady_state_iob = approximate_steady_state_iob_from_sbr(scheduled_basal_rate)
fifty_percent_steady_state_iob = steady_state_iob / 2
indices_with_less_50percent_sbr_iob = iob_array < fifty_percent_steady_state_iob
hours_with_less_50percent_sbr_iob = np.sum(indices_with_less_50percent_sbr_iob) * 5 / 60
return round(hours_with_less_50percent_sbr_iob, round_to_n_digits)
def dka_risk_score(hours_with_less_50percent_sbr_iob: np.float64) -> int:
"""
Calculate the Tidepool DKA Risk Score
https://docs.google.com/document/d/1zrQK7tQ3OJzjOXbwDgmQEeCdcig49F2TpJzNk2FU52k
Parameters
----------
hours_with_less_50percent_sbr_iob : float
calculated from dka_index
Returns
-------
int
The Tidepool DKAI Risk Score.
"""
if hours_with_less_50percent_sbr_iob >= 21:
risk_score = 4
elif hours_with_less_50percent_sbr_iob >= 14:
risk_score = 3
elif hours_with_less_50percent_sbr_iob >= 8:
risk_score = 2
elif hours_with_less_50percent_sbr_iob >= 2:
risk_score = 1
else:
risk_score = 0
return risk_score
| [
"numpy.sum"
] | [((2019, 2062), 'numpy.sum', 'np.sum', (['indices_with_less_50percent_sbr_iob'], {}), '(indices_with_less_50percent_sbr_iob)\n', (2025, 2062), True, 'import numpy as np\n')] |
import numpy as np
import json
import unittest
from opponent_move import opponent_move
"""
This is based on a paper titled 'Using Evolutionary Programming to Create
Neural Networks that are Capable of Playing Tic-Tac-Toe'.
The values and algorithms used in this project are taken from the paper,
except for the propagation function which isn't discussed as strictly and
which consequently ended up being a complete mess due to it being left up
to my own discretion.
"""
def sigmoid(arr_in):
"""Returns column array of m = sigmoid(n) for n in array"""
arr_len = len(arr_in)
arr_out = np.zeros((arr_len, 1))
for i in range(arr_len):
arr_out[i] = 1 / (1 + np.exp(-arr_in[i]))
return arr_out
def check_win(board):
"""
Returns 1, -1 for winning player, 0 for drawing position.
"""
win = 0
r_board = board.reshape((3, 3))
row_sum = np.absolute(np.sum(r_board, axis=1))
col_sum = np.absolute(np.sum(r_board, axis=0))
winning_row = np.nonzero(row_sum == 3)[0]
winning_col = np.nonzero(col_sum == 3)[0]
if abs(r_board[0, 0] + r_board[1, 1] + r_board[2, 2]) == 3:
if r_board[0, 0] == 1:
win = 1
else:
win = -1
elif abs(r_board[2, 0] + r_board[1, 1] + r_board[0, 2]) == 3:
if r_board[2, 0] == 1:
win = 1
else:
win = -1
elif winning_row.size > 0:
if r_board[winning_row[0], 0] == 1:
win = 1
else:
win = -1
elif winning_col.size > 0:
if r_board[0, winning_col[0]] == 1:
win = 1
else:
win = -1
return win
class Network():
def __init__(self, net_dict=False):
"""
If a dict is passed, network is initialized with dict values.
Else, random values are used.
"""
if net_dict:
self.n2 = net_dict["n2"]
self.w2 = np.array(net_dict["w2"])
self.w3 = np.array(net_dict["w3"])
self.b2 = np.array(net_dict["b2"])
self.b3 = np.array(net_dict["b3"])
else:
self.n2 = np.random.randint(1, 10)
self.w2 = np.random.uniform(-0.5, 0.5, (self.n2, 9))
self.w3 = np.random.uniform(-0.5, 0.5, (9, self.n2))
self.b2 = np.random.uniform(-0.5, 0.5, (self.n2, 1))
self.b3 = np.random.uniform(-0.5, 0.5, (9, 1))
def dict(self):
"""Creates dict with net values"""
net_dict = {}
net_dict["n2"] = self.n2
net_dict["w2"] = self.w2.tolist()
net_dict["w3"] = self.w3.tolist()
net_dict["b2"] = self.b2.tolist()
net_dict["b3"] = self.b3.tolist()
return net_dict
def network_move(self, board):
"""Sets net_move based on network and board array."""
layer_2 = sigmoid(self.w2.dot(board) - self.b2)
layer_3 = sigmoid(self.w3.dot(layer_2) - self.b3)
for n in range(len(layer_3)):
if board[n, 0] == 1 or board[n, 0] == -1:
layer_3[n, 0] = -1000
self.net_move = np.nonzero(layer_3 == np.amax(layer_3))[0][0]
@staticmethod
def get_turn(board):
"""Returns turn based on input board."""
board = board.reshape((3, 3))
used_squares = np.nonzero(board)[0]
return len(used_squares)
def play_game(self, board):
"""Plays game between heuristic AI and network."""
self.win = 0
turn = self.get_turn(board)
while self.win == 0 and turn != 9:
self.network_move(board)
board[self.net_move, 0] = 1
self.win = check_win(board)
turn += 1
if self.win == 0 and turn <= 8:
opp_move = opponent_move(board, turn)
board[opp_move] = -1
self.win = check_win(board)
turn += 1
@staticmethod
def setup_board(first_move, second_move):
"""Creates empty board and adds 2 moves"""
board = np.zeros((9, 1))
board[first_move, 0] = 1
board[second_move, 0] = -1
return board
def net_payoff(self):
"""
Makes network play against ai with first 2 moves fixed, 4 games for
each possible response to the first move. (change number of games to
offset variance due to randomness in opponent's play)
"""
board = np.zeros((9, 1))
self.network_move(board)
first_move = self.net_move
self.score = 0
pos = 0
while pos <= 8:
if first_move == pos:
pos += 1
for _ in range(4):
board = self.setup_board(first_move, pos)
self.play_game(board)
if self.win == 1:
self.score += 1
elif self.win == -1:
self.score -= 10
pos += 1
if first_move == 8 and pos == 8:
pos += 1
def normrand(self, mean):
"""Varies all network values by mean with normal distribution"""
self.w2 += np.random.normal(0.0, mean, (self.n2, 9))
self.w3 += np.random.normal(0.0, mean, (9, self.n2))
self.b2 += np.random.normal(0.0, mean, (self.n2, 1))
self.b3 += np.random.normal(0.0, mean, (9, 1))
def modify_layer(self):
"""Chance to change number of neurons in hidden layer"""
chance = np.random.randint(4)
if chance == 0 and self.n2 < 10:
self.n2 += 1
self.w2 = np.concatenate((self.w2, np.zeros((1, 9))))
self.w3 = np.concatenate((self.w3, np.zeros((9, 1))), axis=1)
self.b2 = np.concatenate((self.b2, np.array([[0]])))
elif chance == 1 and self.n2 > 1:
self.n2 -= 1
self.w2 = self.w2[:-1]
self.w3 = self.w3[:, :-1]
self.b2 = self.b2[:-1]
def evolve(self, parent_score):
"""Randomly modifies network values and checks for improvement"""
n2_parent = self.n2
w2_parent = self.w2
w3_parent = self.w3
b2_parent = self.b2
b3_parent = self.b3
self.normrand(0.05)
self.modify_layer()
self.net_payoff()
if self.score > parent_score:
self.improved = True
else:
self.improved = False
self.w2 = w2_parent
self.w3 = w3_parent
self.b2 = b2_parent
self.b3 = b3_parent
self.n2 = n2_parent
def select_network(index):
"""Initializes network based on network list"""
filename = "best_network.json"
with open(filename) as f:
net_list = json.load(f)
net_dict = net_list[index]
network = Network(net_dict)
return network
def propagate(scores):
"""
Outputs string of indices for the score list such that the frequency
of each index is exponentially proportional to its score.
"""
score_arr = np.array(scores)
score_arr += abs(np.amin(score_arr))
score_arr = 50 * score_arr / np.sum(score_arr)
polarized_scores = np.power(score_arr, 3)
polarized_scores = np.around(polarized_scores, 4)
edges = np.cumsum(polarized_scores)
size = len(edges)
x_values = np.random.uniform(0, np.sum(polarized_scores), (1, size))[0]
distribution = np.histogram(x_values, edges)
bins = distribution[0].tolist()
propagate = []
index = 0
for n in bins:
for _ in range(n):
propagate.append(index)
index += 1
while len(propagate) < len(scores): # lazy fix
index = np.random.randint(0, 50)
propagate.append(index)
return propagate
def save_to_json(score, net):
"""Dumps network to json file"""
filename = "best_network.json"
try:
with open(filename) as f:
score_list = json.load(f)
except FileNotFoundError:
print("Creating new file to store the best network ...")
score_list = []
net_dict = net.dict()
net_dict['score'] = score
net_dict['description'] = ""
score_list.append(net_dict)
with open(filename, 'w') as f:
json.dump(score_list, f, indent=4)
def learn(networks, generations):
"""
Creates list of Networks, for each generation calls the evolve method
and feeds score list to propagate function to generate children list.
"""
net_list = []
for _ in range(networks):
net = Network()
net_list.append(net)
scores = np.zeros((1, networks))[0]
best_score = -1000
count = 0
for gen in range(generations):
if networks > 1:
for net in range(networks):
n = net_list[net]
if gen > 0:
n.net_payoff()
parent_score = n.score
else:
parent_score = -640
n.evolve(parent_score)
if n.improved:
scores[net] = n.score
else:
scores[net] = parent_score
if scores[net] >= best_score:
best_score = scores[net]
best_net = n
parents = propagate(scores)
for net in range(networks):
parent = parents[net]
if scores[net] < scores[parent]:
n = net_list[parent]
net_dict = n.dict()
net_list[net] = Network(net_dict)
elif networks == 1: # Just in case
if gen == 0:
score = 0
n = net_list[0]
n.evolve(score)
if n.improved:
score = n.score
if score >= best_score:
best_score = score
best_net = n
count += 1
print(f" Generation {count}. Best score: {best_score}")
save_to_json(best_score, best_net)
| [
"json.dump",
"numpy.random.uniform",
"json.load",
"numpy.sum",
"numpy.amin",
"numpy.power",
"opponent_move.opponent_move",
"numpy.zeros",
"numpy.nonzero",
"numpy.around",
"numpy.cumsum",
"numpy.histogram",
"numpy.array",
"numpy.random.randint",
"numpy.random.normal",
"numpy.exp",
"nu... | [((599, 621), 'numpy.zeros', 'np.zeros', (['(arr_len, 1)'], {}), '((arr_len, 1))\n', (607, 621), True, 'import numpy as np\n'), ((6969, 6985), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (6977, 6985), True, 'import numpy as np\n'), ((7101, 7123), 'numpy.power', 'np.power', (['score_arr', '(3)'], {}), '(score_arr, 3)\n', (7109, 7123), True, 'import numpy as np\n'), ((7147, 7177), 'numpy.around', 'np.around', (['polarized_scores', '(4)'], {}), '(polarized_scores, 4)\n', (7156, 7177), True, 'import numpy as np\n'), ((7191, 7218), 'numpy.cumsum', 'np.cumsum', (['polarized_scores'], {}), '(polarized_scores)\n', (7200, 7218), True, 'import numpy as np\n'), ((7336, 7365), 'numpy.histogram', 'np.histogram', (['x_values', 'edges'], {}), '(x_values, edges)\n', (7348, 7365), True, 'import numpy as np\n'), ((897, 920), 'numpy.sum', 'np.sum', (['r_board'], {'axis': '(1)'}), '(r_board, axis=1)\n', (903, 920), True, 'import numpy as np\n'), ((948, 971), 'numpy.sum', 'np.sum', (['r_board'], {'axis': '(0)'}), '(r_board, axis=0)\n', (954, 971), True, 'import numpy as np\n'), ((991, 1015), 'numpy.nonzero', 'np.nonzero', (['(row_sum == 3)'], {}), '(row_sum == 3)\n', (1001, 1015), True, 'import numpy as np\n'), ((1037, 1061), 'numpy.nonzero', 'np.nonzero', (['(col_sum == 3)'], {}), '(col_sum == 3)\n', (1047, 1061), True, 'import numpy as np\n'), ((4010, 4026), 'numpy.zeros', 'np.zeros', (['(9, 1)'], {}), '((9, 1))\n', (4018, 4026), True, 'import numpy as np\n'), ((4398, 4414), 'numpy.zeros', 'np.zeros', (['(9, 1)'], {}), '((9, 1))\n', (4406, 4414), True, 'import numpy as np\n'), ((5097, 5138), 'numpy.random.normal', 'np.random.normal', (['(0.0)', 'mean', '(self.n2, 9)'], {}), '(0.0, mean, (self.n2, 9))\n', (5113, 5138), True, 'import numpy as np\n'), ((5158, 5199), 'numpy.random.normal', 'np.random.normal', (['(0.0)', 'mean', '(9, self.n2)'], {}), '(0.0, mean, (9, self.n2))\n', (5174, 5199), True, 'import numpy as np\n'), ((5219, 5260), 'numpy.random.normal', 'np.random.normal', (['(0.0)', 'mean', '(self.n2, 1)'], {}), '(0.0, mean, (self.n2, 1))\n', (5235, 5260), True, 'import numpy as np\n'), ((5280, 5315), 'numpy.random.normal', 'np.random.normal', (['(0.0)', 'mean', '(9, 1)'], {}), '(0.0, mean, (9, 1))\n', (5296, 5315), True, 'import numpy as np\n'), ((5427, 5447), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (5444, 5447), True, 'import numpy as np\n'), ((6680, 6692), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6689, 6692), False, 'import json\n'), ((7007, 7025), 'numpy.amin', 'np.amin', (['score_arr'], {}), '(score_arr)\n', (7014, 7025), True, 'import numpy as np\n'), ((7060, 7077), 'numpy.sum', 'np.sum', (['score_arr'], {}), '(score_arr)\n', (7066, 7077), True, 'import numpy as np\n'), ((7605, 7629), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (7622, 7629), True, 'import numpy as np\n'), ((8155, 8189), 'json.dump', 'json.dump', (['score_list', 'f'], {'indent': '(4)'}), '(score_list, f, indent=4)\n', (8164, 8189), False, 'import json\n'), ((8506, 8529), 'numpy.zeros', 'np.zeros', (['(1, networks)'], {}), '((1, networks))\n', (8514, 8529), True, 'import numpy as np\n'), ((1924, 1948), 'numpy.array', 'np.array', (["net_dict['w2']"], {}), "(net_dict['w2'])\n", (1932, 1948), True, 'import numpy as np\n'), ((1971, 1995), 'numpy.array', 'np.array', (["net_dict['w3']"], {}), "(net_dict['w3'])\n", (1979, 1995), True, 'import numpy as np\n'), ((2018, 2042), 'numpy.array', 'np.array', (["net_dict['b2']"], {}), "(net_dict['b2'])\n", (2026, 2042), True, 'import numpy as np\n'), ((2065, 2089), 'numpy.array', 'np.array', (["net_dict['b3']"], {}), "(net_dict['b3'])\n", (2073, 2089), True, 'import numpy as np\n'), ((2128, 2152), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2145, 2152), True, 'import numpy as np\n'), ((2175, 2217), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)', '(self.n2, 9)'], {}), '(-0.5, 0.5, (self.n2, 9))\n', (2192, 2217), True, 'import numpy as np\n'), ((2240, 2282), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)', '(9, self.n2)'], {}), '(-0.5, 0.5, (9, self.n2))\n', (2257, 2282), True, 'import numpy as np\n'), ((2305, 2347), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)', '(self.n2, 1)'], {}), '(-0.5, 0.5, (self.n2, 1))\n', (2322, 2347), True, 'import numpy as np\n'), ((2370, 2406), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)', '(9, 1)'], {}), '(-0.5, 0.5, (9, 1))\n', (2387, 2406), True, 'import numpy as np\n'), ((3286, 3303), 'numpy.nonzero', 'np.nonzero', (['board'], {}), '(board)\n', (3296, 3303), True, 'import numpy as np\n'), ((7277, 7301), 'numpy.sum', 'np.sum', (['polarized_scores'], {}), '(polarized_scores)\n', (7283, 7301), True, 'import numpy as np\n'), ((7856, 7868), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7865, 7868), False, 'import json\n'), ((681, 699), 'numpy.exp', 'np.exp', (['(-arr_in[i])'], {}), '(-arr_in[i])\n', (687, 699), True, 'import numpy as np\n'), ((3744, 3770), 'opponent_move.opponent_move', 'opponent_move', (['board', 'turn'], {}), '(board, turn)\n', (3757, 3770), False, 'from opponent_move import opponent_move\n'), ((5563, 5579), 'numpy.zeros', 'np.zeros', (['(1, 9)'], {}), '((1, 9))\n', (5571, 5579), True, 'import numpy as np\n'), ((5629, 5645), 'numpy.zeros', 'np.zeros', (['(9, 1)'], {}), '((9, 1))\n', (5637, 5645), True, 'import numpy as np\n'), ((5703, 5718), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (5711, 5718), True, 'import numpy as np\n'), ((3108, 3124), 'numpy.amax', 'np.amax', (['layer_3'], {}), '(layer_3)\n', (3115, 3124), True, 'import numpy as np\n')] |
# Copyright 2021 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# fine-tune XLNet for question answering with beam search
import collections
import json
import logging
import numpy as np
import os
import torch
from tqdm.auto import tqdm
from transformers import (
XLNetConfig,
XLNetTokenizerFast,
XLNetForQuestionAnswering,
EvalPrediction,
)
from .params import EVAL, TEST, EACH
from .qa import Runner as Base
from .utils import init_array
log = logging.getLogger(__name__)
class Runner(Base):
AutoConfig = XLNetConfig
AutoTokenizer = XLNetTokenizerFast
AutoModel = XLNetForQuestionAnswering
def prep_for_train(self, xs):
ps, pad_on_right = self.params, self.pad_on_right
q_col, c_col, a_col = self.cols[EACH]
xs[q_col] = [x.lstrip() for x in xs[q_col]]
ys = self.tokenizer(
xs[q_col if pad_on_right else c_col],
xs[c_col if pad_on_right else q_col],
truncation="only_second" if pad_on_right else "only_first",
max_len=self.max_seq_length,
stride=ps.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_len",
)
map = ys.pop("overflow_to_sample_mapping")
specials = ys.pop("special_tokens_mask")
ys["start_positions"] = []
ys["end_positions"] = []
ys["is_impossible"] = []
ys["cls_index"] = []
ys["p_mask"] = []
for i, offs in enumerate(ys.pop("offset_mapping")):
ins = ys["input_ids"][i]
cls = ins.index(self.tokenizer.cls_token_id)
ys["cls_index"].append(cls)
ids = ys["typ_ids"][i]
for k, s in enumerate(specials[i]):
if s:
ids[k] = 3
por = 1 if pad_on_right else 0
ys["p_mask"].append(
[
0.0 if (not specials[i][k] and s == por) or k == cls else 1.0
for k, s in enumerate(ids)
]
)
ans = xs[a_col][map[i]]
if len(ans["answer_start"]) == 0:
ys["start_positions"].append(cls)
ys["end_positions"].append(cls)
ys["is_impossible"].append(1.0)
else:
s = ans["answer_start"][0]
e = s + len(ans["text"][0])
j = 0
while ids[j] != por:
j += 1
k = len(ins) - 1
while ids[k] != por:
k -= 1
if not (offs[j][0] <= s and offs[k][1] >= e):
ys["start_positions"].append(cls)
ys["end_positions"].append(cls)
ys["is_impossible"].append(1.0)
else:
while j < len(offs) and offs[j][0] <= s:
j += 1
ys["start_positions"].append(j - 1)
while offs[k][1] >= e:
k -= 1
ys["end_positions"].append(k + 1)
ys["is_impossible"].append(0.0)
return ys
def prep_for_eval(self, xs):
ps, pad_on_right = self.params, self.pad_on_right
q_col, c_col, _ = self.cols[EACH]
xs[q_col] = [q.lstrip() for q in xs[q_col]]
ys = self.tokenizer(
xs[q_col if pad_on_right else c_col],
xs[c_col if pad_on_right else q_col],
truncation="only_second" if pad_on_right else "only_first",
max_len=self.max_seq_length,
stride=ps.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_len",
)
map = ys.pop("overflow_to_sample_mapping")
specials = ys.pop("special_tokens_mask")
ys["example_id"] = []
ys["cls_index"] = []
ys["p_mask"] = []
for i, ins in enumerate(ys["input_ids"]):
cls = ins.index(self.tokenizer.cls_token_id)
ys["cls_index"].append(cls)
ids = ys["typ_ids"][i]
for k, s in enumerate(specials[i]):
if s:
ids[k] = 3
por = 1 if pad_on_right else 0
ys["p_mask"].append(
[
0.0 if (not specials[i][k] and s == por) or k == cls else 1.0
for k, s in enumerate(ids)
]
)
ys["example_id"].append(xs["id"][map[i]])
ys["offset_mapping"][i] = [
(o if ids[k] == por else None) for k, o in enumerate(ys["offset_mapping"][i])
]
return ys
def eval(self):
ps, mgr, ds = self.params, self.mgr
ds, src = self.eval_ds, self.loaders[EVAL]
log.info("*** Evaluating ***")
log.info(f" Num samples = {len(ds)}")
log.info(f" Batch size per device = {ps.eval_batch_size}")
all_start_top_log_probs = []
all_start_top_index = []
all_end_top_log_probs = []
all_end_top_index = []
all_cls_logits = []
for xs in src:
with torch.no_grad():
ys = self.model(**xs)
start_top_log_probs = ys.start_top_log_probs
start_top_index = ys.start_top_index
end_top_log_probs = ys.end_top_log_probs
end_top_index = ys.end_top_index
cls_logits = ys.cls_logits
if not ps.pad_to_max_length:
start_top_log_probs = mgr.pad_across_processes(
start_top_log_probs, dim=1, PAD=-100
)
start_top_index = mgr.pad_across_processes(start_top_index, dim=1, PAD=-100)
end_top_log_probs = mgr.pad_across_processes(end_top_log_probs, dim=1, PAD=-100)
end_top_index = mgr.pad_across_processes(end_top_index, dim=1, PAD=-100)
cls_logits = mgr.pad_across_processes(cls_logits, dim=1, PAD=-100)
all_start_top_log_probs.append(mgr.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(mgr.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(mgr.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(mgr.gather(end_top_index).cpu().numpy())
all_cls_logits.append(mgr.gather(cls_logits).cpu().numpy())
l = max([x.shape[1] for x in all_end_top_log_probs])
start_top_log_probs_concat = init_array(all_start_top_log_probs, ds, l)
start_top_index_concat = init_array(all_start_top_index, ds, l)
end_top_log_probs_concat = init_array(all_end_top_log_probs, ds, l)
end_top_index_concat = init_array(all_end_top_index, ds, l)
cls_logits_concat = np.concatenate(all_cls_logits, axis=0)
del start_top_log_probs
del start_top_index
del end_top_log_probs
del end_top_index
del cls_logits
outputs_numpy = (
start_top_log_probs_concat,
start_top_index_concat,
end_top_log_probs_concat,
end_top_index_concat,
cls_logits_concat,
)
y = self.post_proc(self.evals, ds, outputs_numpy)
y = self.metric.compute(predictions=y.predictions, references=y.label_ids)
log.info(f"Evaluation metrics: {y}")
def post_proc(self, xs, features, preds, stage="eval"):
ps = self.params
ys, diff = proc_preds(
examples=xs,
features=features,
predictions=preds,
version_2_with_negative=ps.version_2_with_negative,
n_best_size=ps.n_best_size,
max_answer_length=ps.max_answer_length,
start_n_top=self.model.config.start_n_top,
end_n_top=self.model.config.end_n_top,
out_dir=ps.out_dir,
prefix=stage,
)
if ps.version_2_with_negative:
ys = [
{"id": k, "prediction_text": v, "no_answer_probability": diff[k]}
for k, v in ys.items()
]
else:
ys = [{"id": k, "prediction_text": v} for k, v in ys.items()]
ids = [{"id": x["id"], "answers": x[self.cols[EACH][2]]} for x in xs]
return EvalPrediction(predictions=ys, label_ids=ids)
def pred(self):
ps, mgr = self.params, self.mgr
if ps.do_test:
ds, src = self.test_ds, self.loaders[TEST]
log.info("*** Prediction ***")
log.info(f" Num samples = {len(ds)}")
log.info(f" Batch size per device = {ps.eval_batch_size}")
all_start_top_log_probs = []
all_start_top_index = []
all_end_top_log_probs = []
all_end_top_index = []
all_cls_logits = []
for xs in src:
with torch.no_grad():
ys = self.model(**xs)
start_top_log_probs = ys.start_top_log_probs
start_top_index = ys.start_top_index
end_top_log_probs = ys.end_top_log_probs
end_top_index = ys.end_top_index
cls_logits = ys.cls_logits
if not ps.pad_to_max_length:
start_top_log_probs = mgr.pad_across_processes(
start_top_log_probs, dim=1, PAD=-100
)
start_top_index = mgr.pad_across_processes(start_top_index, dim=1, PAD=-100)
end_top_log_probs = mgr.pad_across_processes(
end_top_log_probs, dim=1, PAD=-100
)
end_top_index = mgr.pad_across_processes(end_top_index, dim=1, PAD=-100)
cls_logits = mgr.pad_across_processes(cls_logits, dim=1, PAD=-100)
all_start_top_log_probs.append(mgr.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(mgr.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(mgr.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(mgr.gather(end_top_index).cpu().numpy())
all_cls_logits.append(mgr.gather(cls_logits).cpu().numpy())
l = max([x.shape[1] for x in all_end_top_log_probs])
start_top_log_probs_concat = init_array(all_start_top_log_probs, ds, l)
start_top_index_concat = init_array(all_start_top_index, ds, l)
end_top_log_probs_concat = init_array(all_end_top_log_probs, ds, l)
end_top_index_concat = init_array(all_end_top_index, ds, l)
cls_logits_concat = np.concatenate(all_cls_logits, axis=0)
del start_top_log_probs
del start_top_index
del end_top_log_probs
del end_top_index
del cls_logits
outputs_numpy = (
start_top_log_probs_concat,
start_top_index_concat,
end_top_log_probs_concat,
end_top_index_concat,
cls_logits_concat,
)
y = self.post_proc(self.preds, ds, outputs_numpy)
y = self.metric.compute(predictions=y.predictions, references=y.label_ids)
log.info(f"Prediction metrics: {y}")
def proc_preds(
examples,
features,
predictions,
version_2_with_negative=False,
n_best_size=20,
max_answer_length=30,
start_n_top=5,
end_n_top=5,
out_dir=None,
prefix=None,
log_level=logging.WARNING,
):
if len(predictions) != 5:
raise ValueError("`predictions` should be a tuple with five elements.")
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
log.setLevel(log_level)
log.info(
f"Post-processing {len(examples)} example predictions split into {len(features)} features."
)
for example_index, example in enumerate(tqdm(examples)):
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
for feature_index in feature_indices:
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
offset_mapping = features[feature_index]["offset_mapping"]
token_is_max_context = features[feature_index].get("token_is_max_context", None)
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = i * end_n_top + j
end_index = int(end_indexes[j_index])
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
if token_is_max_context is not None and not token_is_max_context.get(
str(start_index), False
):
continue
prelim_predictions.append(
{
"offsets": (
offset_mapping[start_index][0],
offset_mapping[end_index][1],
),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[
:n_best_size
]
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
if len(predictions) == 0:
predictions.insert(
0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6}
)
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
all_nbest_json[example["id"]] = [
{
k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v)
for k, v in pred.items()
}
for pred in predictions
]
if out_dir is not None:
if not os.path.isdir(out_dir):
raise EnvironmentError(f"{out_dir} is not a directory.")
prediction_file = os.path.join(
out_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
out_dir,
"nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json",
)
if version_2_with_negative:
null_odds_file = os.path.join(
out_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
log.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
log.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
log.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
def main():
x = Runner()
x.dataset
x.cols
x.config
x.tokenizer
x.model
x.loaders
x.prepare()
x.train()
x.eval()
x.pred()
x.save()
if __name__ == "__main__":
main()
| [
"os.path.isdir",
"transformers.EvalPrediction",
"logging.getLogger",
"json.dumps",
"collections.defaultdict",
"tqdm.auto.tqdm",
"numpy.max",
"collections.OrderedDict",
"torch.no_grad",
"os.path.join",
"numpy.concatenate"
] | [((1085, 1112), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1102, 1112), False, 'import logging\n'), ((12945, 12974), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (12968, 12974), False, 'import collections\n'), ((13123, 13148), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (13146, 13148), False, 'import collections\n'), ((13170, 13195), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (13193, 13195), False, 'import collections\n'), ((7668, 7706), 'numpy.concatenate', 'np.concatenate', (['all_cls_logits'], {'axis': '(0)'}), '(all_cls_logits, axis=0)\n', (7682, 7706), True, 'import numpy as np\n'), ((9157, 9202), 'transformers.EvalPrediction', 'EvalPrediction', ([], {'predictions': 'ys', 'label_ids': 'ids'}), '(predictions=ys, label_ids=ids)\n', (9171, 9202), False, 'from transformers import XLNetConfig, XLNetTokenizerFast, XLNetForQuestionAnswering, EvalPrediction\n'), ((13219, 13244), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (13242, 13244), False, 'import collections\n'), ((13474, 13488), 'tqdm.auto.tqdm', 'tqdm', (['examples'], {}), '(examples)\n', (13478, 13488), False, 'from tqdm.auto import tqdm\n'), ((16985, 17082), 'os.path.join', 'os.path.join', (['out_dir', "('predictions.json' if prefix is None else f'{prefix}_predictions.json')"], {}), "(out_dir, 'predictions.json' if prefix is None else\n f'{prefix}_predictions.json')\n", (16997, 17082), False, 'import os\n'), ((17122, 17231), 'os.path.join', 'os.path.join', (['out_dir', "('nbest_predictions.json' if prefix is None else\n f'{prefix}_nbest_predictions.json')"], {}), "(out_dir, 'nbest_predictions.json' if prefix is None else\n f'{prefix}_nbest_predictions.json')\n", (17134, 17231), False, 'import os\n'), ((11603, 11641), 'numpy.concatenate', 'np.concatenate', (['all_cls_logits'], {'axis': '(0)'}), '(all_cls_logits, axis=0)\n', (11617, 11641), True, 'import numpy as np\n'), ((16866, 16888), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (16879, 16888), False, 'import os\n'), ((17328, 17421), 'os.path.join', 'os.path.join', (['out_dir', "('null_odds.json' if prefix is None else f'{prefix}_null_odds.json')"], {}), "(out_dir, 'null_odds.json' if prefix is None else\n f'{prefix}_null_odds.json')\n", (17340, 17421), False, 'import os\n'), ((5959, 5974), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5972, 5974), False, 'import torch\n'), ((16254, 16268), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (16260, 16268), True, 'import numpy as np\n'), ((9740, 9755), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9753, 9755), False, 'import torch\n'), ((17586, 17623), 'json.dumps', 'json.dumps', (['all_predictions'], {'indent': '(4)'}), '(all_predictions, indent=4)\n', (17596, 17623), False, 'import json\n'), ((17760, 17796), 'json.dumps', 'json.dumps', (['all_nbest_json'], {'indent': '(4)'}), '(all_nbest_json, indent=4)\n', (17770, 17796), False, 'import json\n'), ((17987, 18025), 'json.dumps', 'json.dumps', (['scores_diff_json'], {'indent': '(4)'}), '(scores_diff_json, indent=4)\n', (17997, 18025), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
from scipy import sparse
from scipy.sparse import linalg
def error_A_norm(**kwargs):
'''
callback function to compute A-norm of error at each step
Parameters
----------
kwargs['k'] : inetger
current iteration
kwargs['x'] : (n,) array like
solution at step k-1
kwargs['x_true'] : (n,) array like
actual solution to system (if this is not provided, the callback function will compute it using kwargs['A'] and kwargs['b'])
Modifies
-------
output['error_A_norm'] : (max_iter,) array like
list of A-norm of error at each iteration
'''
output = kwargs['output']
A = kwargs['A']
x = kwargs['x_k']
b = kwargs['b']
k = kwargs['k']
# check if actual solution is known, otherwise compute it
if 'x_true' not in kwargs['kwargs'].keys():
# print('true solution unknown; computing...')
solver = sp.sparse.linalg.spsolve if sp.sparse.issparse(A) else np.linalg.solve
kwargs['kwargs']['x_true'] = solver(A.astype(np.double),b.astype(np.double))
# initialize
if k==0:
max_iter = kwargs['max_iter']
output['error_A_norm'] = np.zeros(max_iter,dtype=A.dtype)
# compute A-norm of error
error = x - kwargs['kwargs']['x_true'].astype(A.dtype)
output['error_A_norm'][k] = np.sqrt(error.T@(A@error))
| [
"scipy.sparse.issparse",
"numpy.zeros",
"numpy.sqrt"
] | [((1491, 1521), 'numpy.sqrt', 'np.sqrt', (['(error.T @ (A @ error))'], {}), '(error.T @ (A @ error))\n', (1498, 1521), True, 'import numpy as np\n'), ((1323, 1356), 'numpy.zeros', 'np.zeros', (['max_iter'], {'dtype': 'A.dtype'}), '(max_iter, dtype=A.dtype)\n', (1331, 1356), True, 'import numpy as np\n'), ((1093, 1114), 'scipy.sparse.issparse', 'sp.sparse.issparse', (['A'], {}), '(A)\n', (1111, 1114), True, 'import scipy as sp\n')] |
import matplotlib.pyplot as pyplot
import numpy
# inspired by http://people.duke.edu/~ccc14/pcfb/numpympl/MatplotlibBarPlots.html
xTickMarks = ["azure A1", "azure A4", "amazon T2", "amazon C4", "amazon M4", "amazon R4"]
N = 6
CPU_total_time = [66.8626, 66.6122, 29.8535, 25.0010, 29.3211, 27.8841]
CPU_avg_request = [6.69, 6.66, 2.986, 2.50, 2.93, 2.79]
ind = numpy.arange(N)
width = 0.35
fig = pyplot.figure()
fig.suptitle("sysbench CPU benchmark")
ax = fig.add_subplot(121) # subplot(nbcol, nbligne, numfigure)
ax.bar(ind, CPU_total_time, width)
ax.set_xlim(-width,len(ind)+width)
ax.set_ylim(min(CPU_total_time)-1, max(CPU_total_time)+1)
ax.set_xticks(ind)
ax.set_ylabel("total time (s)")
xtickNames = ax.set_xticklabels(xTickMarks)
pyplot.setp(xtickNames, rotation=45, fontsize=10)
ax2 = fig.add_subplot(122)
ax2.bar(ind, CPU_avg_request, width)
ax2.set_xlim(-width,len(ind)+width)
ax2.set_ylim(min(CPU_avg_request)-1, max(CPU_avg_request)+1)
ax2.set_xticks(ind)
ax2.set_ylabel("avg time per request (ms)")
xtickNames = ax2.set_xticklabels(xTickMarks)
pyplot.setp(xtickNames, rotation=45, fontsize=10)
pyplot.tight_layout()
pyplot.subplots_adjust(top=0.9)
pyplot.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.tight_layout"
] | [((363, 378), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (375, 378), False, 'import numpy\n'), ((399, 414), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (412, 414), True, 'import matplotlib.pyplot as pyplot\n'), ((741, 790), 'matplotlib.pyplot.setp', 'pyplot.setp', (['xtickNames'], {'rotation': '(45)', 'fontsize': '(10)'}), '(xtickNames, rotation=45, fontsize=10)\n', (752, 790), True, 'import matplotlib.pyplot as pyplot\n'), ((1062, 1111), 'matplotlib.pyplot.setp', 'pyplot.setp', (['xtickNames'], {'rotation': '(45)', 'fontsize': '(10)'}), '(xtickNames, rotation=45, fontsize=10)\n', (1073, 1111), True, 'import matplotlib.pyplot as pyplot\n'), ((1113, 1134), 'matplotlib.pyplot.tight_layout', 'pyplot.tight_layout', ([], {}), '()\n', (1132, 1134), True, 'import matplotlib.pyplot as pyplot\n'), ((1135, 1166), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (1157, 1166), True, 'import matplotlib.pyplot as pyplot\n'), ((1168, 1181), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1179, 1181), True, 'import matplotlib.pyplot as pyplot\n')] |
# Author: SiliconSloth 18/1/2018
import numpy as np
import cPickle
import cv2
# PyCharm seems to need this to work properly.
# try:
# from cv2 import cv2
# except:
# pass
# In a raw training video there is often very little difference between consecutive frames, so using every single frame of the video
# is a waste of memory and processing time. This script compares consecutive frames of a training video using the key points produced
# by FeatureDetector.py to see how similar they are, and if there is very little movement between them some frames will be removed from the video
# so that each frame shows noticeable change since the last.
videoPath = "/home/someone/RobleyVision/Recordings/" # Where video files will be saved to.
# Input file names.
videoFile = "Training.avi"
featureFile = "TrainingFeatures.pkl"
# The filenames to which the reduced video and feature files should be saved.
reducedVideoFile = "TrainingReduced.avi"
reducedFeatureFile = "ReducedFeatures.pkl"
# Function to find matching key points between two images.
def match(keypoints1, descriptors1, keypoints2, descriptors2):
try:
matches = bf.match(descriptors1, descriptors2)
goodMatches = []
for i,m in enumerate(matches):
# Get the coordinates of the two key points in this match.
point1, point2 = keypoints1[m.queryIdx].pt, keypoints2[m.trainIdx].pt
# Realistically we know that key points will not have moved very far across the image.
# If a matching is found between two points that are very far away from each other it is probably an
# incorrect matching, and should be excluded from the results.
if abs(point1[1] - point2[1]) < 100 and abs(point1[0] - point2[0]) < 100:
goodMatches.append(m)
return goodMatches
except:
# It is possible for bf.match() to throw an exception if it failed to find a match due to an insufficient number of key points.
# If this happens, return an empty list of matches.
return []
# Function to calculate the median optical flow between two images.
def calculateFlow(keypoints1, descriptors1, keypoints2, descriptors2):
# Find matches between the key points of the two images.
matches = match(keypoints1, descriptors1, keypoints2, descriptors2)
# If the matching failed and returned an empty list the optical flow cannot be found.
if not matches:
return None
# Create a 2D array in which each row contains the x and y components of one optical flow vector.
# Do this by subtracting the coordinates of the first key point of each match from the second key point's coordinates.
flowVectors = np.array([(keypoints2[m.trainIdx].pt[0] - keypoints1[m.queryIdx].pt[0],
keypoints2[m.trainIdx].pt[1] - keypoints1[m.queryIdx].pt[1]) for m in matches])
# Find the medians of all the x and y components.
return np.median(flowVectors, 0)
# Create the brute-force matcher.
bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True)
# Load the video and feature list.
capture = cv2.VideoCapture(videoPath + videoFile)
dimensions = int(capture.get(3)), int(capture.get(4)) # Find the width and height of the video.
with open(videoPath+featureFile, "rb") as file:
features = cPickle.load(file)
# Lists to store the reduced video frames and feature tuples.
outputVideo = []
outputFeatures = []
frame2, keypoints2, descriptors2 = None, None, None
for i, (pointTuples, descs) in enumerate(features):
# frame1 is the previous video frame and frame2 is the one just read from the video capture.
# Set frame1 to hold the frame taken from the capture on the last iteration, or None if this is the first frame.
frame1 = frame2
keypoints1 = keypoints2
descriptors1 = descriptors2
# Read the next video frame.
frame2 = capture.read()[1]
# Convert the point tuples made by FeatureDetector.py back to OpenCV KeyPoint objects.
keypoints2 = []
for point in pointTuples:
keypoints2.append(cv2.KeyPoint(point[0][0], point[0][1], point[1], point[2], point[3], point[4], point[5]))
descriptors2 = descs
# If keypoints1 is None this is the first frame. For some reason the first frame of the video always
# seems to just contain rubbish, so ignore it.
if keypoints1 is None:
# Skip the code that adds this frame to the output list, removing it from the output video.
continue
# Calculate the median optical flow between the two frames.
flow = calculateFlow(keypoints1, descriptors1, keypoints2, descriptors2)
# If flow is None that means matching failed and the flow could not be found, implying that the frames are very different
# and so should both be kept.
# If this is the last frame in the video, it should always be kept to ensure that the robot drives to the very end of the route.
# If neither of the above is true and the x and y components of the optical flow are very small, this suggests there was very little movement
# between the two frames so the second one should be removed from the video.
if flow is not None and i < len(features)-1 and abs(flow[0]) < 20 and abs(flow[1]) < 30:
# Make sure frame2 contains the latest frame in the video, ready for the next iteration.
frame2 = frame1
keypoints2 = keypoints1
descriptors2 = descriptors1
continue # Skip adding the frame.
# Add the frame and its features to the output lists, if the frame was not skipped.
outputVideo.append(frame2)
outputFeatures.append((pointTuples, descs))
# Save the reduced video to the output file.
writer = cv2.VideoWriter(videoPath+reducedVideoFile, cv2.VideoWriter_fourcc(*"MJPG"), 20, dimensions, True)
for frame in outputVideo:
writer.write(frame)
writer.release()
# Save the reduced feature lists to the output file.
with open(videoPath+reducedFeatureFile, "wb") as file:
cPickle.dump(outputFeatures, file, 2)
| [
"cv2.VideoWriter_fourcc",
"numpy.median",
"cPickle.load",
"cv2.VideoCapture",
"cPickle.dump",
"cv2.KeyPoint",
"numpy.array",
"cv2.BFMatcher_create"
] | [((3093, 3148), 'cv2.BFMatcher_create', 'cv2.BFMatcher_create', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (3113, 3148), False, 'import cv2\n'), ((3198, 3237), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(videoPath + videoFile)'], {}), '(videoPath + videoFile)\n', (3214, 3237), False, 'import cv2\n'), ((2773, 2933), 'numpy.array', 'np.array', (['[(keypoints2[m.trainIdx].pt[0] - keypoints1[m.queryIdx].pt[0], keypoints2[m\n .trainIdx].pt[1] - keypoints1[m.queryIdx].pt[1]) for m in matches]'], {}), '([(keypoints2[m.trainIdx].pt[0] - keypoints1[m.queryIdx].pt[0], \n keypoints2[m.trainIdx].pt[1] - keypoints1[m.queryIdx].pt[1]) for m in\n matches])\n', (2781, 2933), True, 'import numpy as np\n'), ((3022, 3047), 'numpy.median', 'np.median', (['flowVectors', '(0)'], {}), '(flowVectors, 0)\n', (3031, 3047), True, 'import numpy as np\n'), ((3402, 3420), 'cPickle.load', 'cPickle.load', (['file'], {}), '(file)\n', (3414, 3420), False, 'import cPickle\n'), ((5872, 5903), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (5894, 5903), False, 'import cv2\n'), ((6114, 6151), 'cPickle.dump', 'cPickle.dump', (['outputFeatures', 'file', '(2)'], {}), '(outputFeatures, file, 2)\n', (6126, 6151), False, 'import cPickle\n'), ((4171, 4264), 'cv2.KeyPoint', 'cv2.KeyPoint', (['point[0][0]', 'point[0][1]', 'point[1]', 'point[2]', 'point[3]', 'point[4]', 'point[5]'], {}), '(point[0][0], point[0][1], point[1], point[2], point[3], point[\n 4], point[5])\n', (4183, 4264), False, 'import cv2\n')] |
import argparse
import logging
import os
import random
import socket
import sys
from sklearn.utils import shuffle
import numpy as np
import psutil
import setproctitle
import torch
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from fedml_api.model.finance.vfl_classifier import VFLClassifier
from fedml_api.model.finance.vfl_feature_extractor import VFLFeatureExtractor
from fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset import loan_load_three_party_data
from fedml_api.data_preprocessing.NUS_WIDE.nus_wide_dataset import NUS_WIDE_load_three_party_data
from fedml_api.distributed.classical_vertical_fl.vfl_api import FedML_VFL_distributed
from fedml_api.distributed.fedavg.FedAvgAPI import FedML_init
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
parser.add_argument('--dataset', type=str, default='lending_club_loan', metavar='N',
help='dataset used for training')
parser.add_argument('--client_number', type=int, default=2, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--comm_round', type=int, default=100,
help='how many round of communications we shoud use')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--frequency_of_the_test', type=int, default=30,
help='the frequency of the algorithms')
args = parser.parse_args()
return args
def init_training_device(process_ID, fl_worker_num, gpu_num_per_machine):
# initialize the mapping from process ID to GPU ID: <process ID, GPU ID>
if process_ID == 0:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
process_gpu_dict = dict()
for client_index in range(fl_worker_num):
gpu_index = client_index % gpu_num_per_machine
process_gpu_dict[client_index] = gpu_index
logging.info(process_gpu_dict)
device = torch.device("cuda:" + str(process_gpu_dict[process_ID - 1]) if torch.cuda.is_available() else "cpu")
logging.info(device)
return device
if __name__ == "__main__":
# initialize distributed computing (MPI)
comm, process_id, worker_number = FedML_init()
# parse python script input parameters
parser = argparse.ArgumentParser()
args = add_args(parser)
# customize the process name
str_process_name = "Federated Learning:" + str(process_id)
setproctitle.setproctitle(str_process_name)
# customize the log format
logging.basicConfig(level=logging.INFO,
format=str(
process_id) + ' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
hostname = socket.gethostname()
logging.info("#############process ID = " + str(process_id) +
", host name = " + hostname + "########" +
", process ID = " + str(os.getpid()) +
", process Name = " + str(psutil.Process(os.getpid())))
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
seed = 0
np.random.seed(seed)
torch.manual_seed(worker_number)
random.seed(0)
# GPU management
logging.info("process_id = %d, size = %d" % (process_id, worker_number))
device = init_training_device(process_id, worker_number-1, 4)
# load data
print("################################ Prepare Data ############################")
if args.dataset == "lending_club_loan":
data_dir = "../../../data/lending_club_loan/"
train, test = loan_load_three_party_data(data_dir)
elif args.dataset == "NUS_WIDE":
data_dir = "../../../data/NUS_WIDE"
class_lbls = ['person', 'animal']
train, test = NUS_WIDE_load_three_party_data(data_dir, class_lbls, neg_label=0)
else:
data_dir = "../../../data/lending_club_loan/"
train, test = loan_load_three_party_data(data_dir)
Xa_train, Xb_train, Xc_train, y_train = train
Xa_test, Xb_test, Xc_test, y_test = test
Xa_train, Xb_train, Xc_train, y_train = shuffle(Xa_train, Xb_train, Xc_train, y_train)
Xa_test, Xb_test, Xc_test, y_test = shuffle(Xa_test, Xb_test, Xc_test, y_test)
train = [Xa_train, Xb_train, Xc_train, y_train]
test = [Xa_test, Xb_test, Xc_test, y_test]
guest_data = [Xa_train, y_train, Xa_test, y_test]
host_data = None
if process_id == 1:
host_data = [Xb_train, Xb_test]
elif process_id == 2:
host_data = [Xc_train, Xc_test]
# create models for each worker
if process_id == 0:
guest_feature_extractor = VFLFeatureExtractor(input_dim=Xa_train.shape[1], output_dim=10).to(device)
guest_classifier = VFLClassifier(guest_feature_extractor.get_output_dim(), 1).to(device)
guest_model = [guest_feature_extractor, guest_classifier]
host_model = [None, None]
elif process_id == 1:
host_feature_extractor = VFLFeatureExtractor(input_dim=Xb_train.shape[1], output_dim=10).to(device)
host_classifier = VFLClassifier(host_feature_extractor.get_output_dim(), 1).to(device)
host_model = [host_feature_extractor, host_classifier]
guest_model = [None, None]
elif process_id == 2:
host_feature_extractor = VFLFeatureExtractor(input_dim=Xc_train.shape[1], output_dim=10).to(device)
host_classifier = VFLClassifier(host_feature_extractor.get_output_dim(), 1).to(device)
host_model = [host_feature_extractor, host_classifier]
guest_model = [None, None]
else:
guest_model = [None, None]
host_model = [None, None]
FedML_VFL_distributed(process_id, worker_number, comm, args, device, guest_data, guest_model, host_data, host_model)
| [
"fedml_api.distributed.classical_vertical_fl.vfl_api.FedML_VFL_distributed",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.getpid",
"os.getcwd",
"torch.manual_seed",
"fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset.loan_load_three_party_data",
"setproctitle.setproctitle",
"... | [((2268, 2298), 'logging.info', 'logging.info', (['process_gpu_dict'], {}), '(process_gpu_dict)\n', (2280, 2298), False, 'import logging\n'), ((2418, 2438), 'logging.info', 'logging.info', (['device'], {}), '(device)\n', (2430, 2438), False, 'import logging\n'), ((2569, 2581), 'fedml_api.distributed.fedavg.FedAvgAPI.FedML_init', 'FedML_init', ([], {}), '()\n', (2579, 2581), False, 'from fedml_api.distributed.fedavg.FedAvgAPI import FedML_init\n'), ((2639, 2664), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2662, 2664), False, 'import argparse\n'), ((2794, 2837), 'setproctitle.setproctitle', 'setproctitle.setproctitle', (['str_process_name'], {}), '(str_process_name)\n', (2819, 2837), False, 'import setproctitle\n'), ((3138, 3158), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3156, 3158), False, 'import socket\n'), ((3632, 3652), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3646, 3652), True, 'import numpy as np\n'), ((3657, 3689), 'torch.manual_seed', 'torch.manual_seed', (['worker_number'], {}), '(worker_number)\n', (3674, 3689), False, 'import torch\n'), ((3694, 3708), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3705, 3708), False, 'import random\n'), ((3735, 3807), 'logging.info', 'logging.info', (["('process_id = %d, size = %d' % (process_id, worker_number))"], {}), "('process_id = %d, size = %d' % (process_id, worker_number))\n", (3747, 3807), False, 'import logging\n'), ((4611, 4657), 'sklearn.utils.shuffle', 'shuffle', (['Xa_train', 'Xb_train', 'Xc_train', 'y_train'], {}), '(Xa_train, Xb_train, Xc_train, y_train)\n', (4618, 4657), False, 'from sklearn.utils import shuffle\n'), ((4698, 4740), 'sklearn.utils.shuffle', 'shuffle', (['Xa_test', 'Xb_test', 'Xc_test', 'y_test'], {}), '(Xa_test, Xb_test, Xc_test, y_test)\n', (4705, 4740), False, 'from sklearn.utils import shuffle\n'), ((6152, 6272), 'fedml_api.distributed.classical_vertical_fl.vfl_api.FedML_VFL_distributed', 'FedML_VFL_distributed', (['process_id', 'worker_number', 'comm', 'args', 'device', 'guest_data', 'guest_model', 'host_data', 'host_model'], {}), '(process_id, worker_number, comm, args, device,\n guest_data, guest_model, host_data, host_model)\n', (6173, 6272), False, 'from fedml_api.distributed.classical_vertical_fl.vfl_api import FedML_VFL_distributed\n'), ((4099, 4135), 'fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset.loan_load_three_party_data', 'loan_load_three_party_data', (['data_dir'], {}), '(data_dir)\n', (4125, 4135), False, 'from fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset import loan_load_three_party_data\n'), ((230, 241), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (239, 241), False, 'import os\n'), ((2376, 2401), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2399, 2401), False, 'import torch\n'), ((4281, 4346), 'fedml_api.data_preprocessing.NUS_WIDE.nus_wide_dataset.NUS_WIDE_load_three_party_data', 'NUS_WIDE_load_three_party_data', (['data_dir', 'class_lbls'], {'neg_label': '(0)'}), '(data_dir, class_lbls, neg_label=0)\n', (4311, 4346), False, 'from fedml_api.data_preprocessing.NUS_WIDE.nus_wide_dataset import NUS_WIDE_load_three_party_data\n'), ((4433, 4469), 'fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset.loan_load_three_party_data', 'loan_load_three_party_data', (['data_dir'], {}), '(data_dir)\n', (4459, 4469), False, 'from fedml_api.data_preprocessing.lending_club_loan.lending_club_dataset import loan_load_three_party_data\n'), ((2021, 2046), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2044, 2046), False, 'import torch\n'), ((5142, 5205), 'fedml_api.model.finance.vfl_feature_extractor.VFLFeatureExtractor', 'VFLFeatureExtractor', ([], {'input_dim': 'Xa_train.shape[1]', 'output_dim': '(10)'}), '(input_dim=Xa_train.shape[1], output_dim=10)\n', (5161, 5205), False, 'from fedml_api.model.finance.vfl_feature_extractor import VFLFeatureExtractor\n'), ((3399, 3410), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3408, 3410), False, 'import os\n'), ((5473, 5536), 'fedml_api.model.finance.vfl_feature_extractor.VFLFeatureExtractor', 'VFLFeatureExtractor', ([], {'input_dim': 'Xb_train.shape[1]', 'output_dim': '(10)'}), '(input_dim=Xb_train.shape[1], output_dim=10)\n', (5492, 5536), False, 'from fedml_api.model.finance.vfl_feature_extractor import VFLFeatureExtractor\n'), ((3326, 3337), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3335, 3337), False, 'import os\n'), ((5800, 5863), 'fedml_api.model.finance.vfl_feature_extractor.VFLFeatureExtractor', 'VFLFeatureExtractor', ([], {'input_dim': 'Xc_train.shape[1]', 'output_dim': '(10)'}), '(input_dim=Xc_train.shape[1], output_dim=10)\n', (5819, 5863), False, 'from fedml_api.model.finance.vfl_feature_extractor import VFLFeatureExtractor\n')] |
##Python script to convert back from sphire to relion##
import sys
import os
import numpy as np
def is_number(s): ##### definition to check for int
try:
int(s)
return True
except ValueError:
return False
print('\nTaking sphire substack id list to convert back to relion!')
i = 1
while i <= len(sys.argv)-1: ##### extract star file input
if '-star' in sys.argv[i]:
if sys.argv[i+1]:
star_path = str(sys.argv[i+1])
else:
break
break
else:
i+=1
i = 1
while i <= len(sys.argv)-1: ##### extract id file input
if '-id' in sys.argv[i]:
if sys.argv[i+1]:
id_list = str(sys.argv[i+1])
else:
break
break
else:
i+=1
while i <= len(sys.argv)-1: ##### extract star file output
if '-out' in sys.argv[i]:
if sys.argv[i+1]:
star_out = str(sys.argv[i+1])
else:
break
break
else:
i+=1
if star_path and id_list and star_out:
pass
else:
print("Please define STAR input (-star), sphire id file (-id) and STAR output (-out) properly and check for correct paths!\n")
quit()
print('\nStar file input: %s' %star_path)
print('ID list: %s' %id_list)
print('Star output: %s' %star_out)
###remove output star file in case it exists already
try:
os.remove(star_out)
except OSError:
pass
#####check whether input files exist
if os.path.isfile(star_path):
pass
else:
print("\nCan't open input star file! Check path!\n")
print("EXIT!\n")
quit()
if os.path.isfile(id_list):
pass
else:
print("\nCan't open input id file from sphire substack! Check path!\n")
print("EXIT!\n")
quit()
####create star file output
part_id = np.loadtxt(id_list, dtype=int)
i=0
with open(star_out, 'a') as out_file:
with open (star_path, 'rt') as in_file:
for line in in_file:
if line.find('.mrc') != -1:
if i in part_id:
out_file.write(line)
i+=1
else:
out_file.write(line)
print('\nDONE!\n')
| [
"os.path.isfile",
"numpy.loadtxt",
"os.remove"
] | [((1307, 1332), 'os.path.isfile', 'os.path.isfile', (['star_path'], {}), '(star_path)\n', (1321, 1332), False, 'import os\n'), ((1430, 1453), 'os.path.isfile', 'os.path.isfile', (['id_list'], {}), '(id_list)\n', (1444, 1453), False, 'import os\n'), ((1606, 1636), 'numpy.loadtxt', 'np.loadtxt', (['id_list'], {'dtype': 'int'}), '(id_list, dtype=int)\n', (1616, 1636), True, 'import numpy as np\n'), ((1220, 1239), 'os.remove', 'os.remove', (['star_out'], {}), '(star_out)\n', (1229, 1239), False, 'import os\n')] |
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/8/14 17:40
# @author :Mo
# @function :
# 适配linux
import pathlib
import sys
import os
project_path = str(pathlib.Path(os.path.abspath(__file__)).parent.parent.parent)
sys.path.append(project_path)
# 地址
from keras_textclassification.conf.path_config import path_model, path_fineture, path_model_dir, path_hyper_parameters
# 训练验证数据地址
from keras_textclassification.conf.path_config import path_byte_multi_news_valid, path_byte_multi_news_train
# 数据预处理, 删除文件目录下文件
from keras_textclassification.data_preprocess.text_preprocess import PreprocessTextMulti, read_and_process, load_json
# 模型图
from keras_textclassification.m02_TextCNN.graph import TextCNNGraph as Graph
# 模型评估
from sklearn.metrics import classification_report
# 计算时间
import time
import numpy as np
def pred_input(path_hyper_parameter=path_hyper_parameters):
# 输入预测
# 加载超参数
hyper_parameters = load_json(path_hyper_parameter)
pt = PreprocessTextMulti()
# 模式初始化和加载
graph = Graph(hyper_parameters)
graph.load_model()
ra_ed = graph.word_embedding
ques = '我要打王者荣耀'
# str to token
ques_embed = ra_ed.sentence2idx(ques)
if hyper_parameters['embedding_type'] == 'bert':
x_val_1 = np.array([ques_embed[0]])
x_val_2 = np.array([ques_embed[1]])
x_val = [x_val_1, x_val_2]
else:
x_val = ques_embed
# 预测
pred = graph.predict(x_val)
print(pred)
# 取id to label and pred
pre = pt.prereocess_idx(pred[0])
ls_nulti = []
for ls in pre[0]:
if ls[1] >= 0.5:
ls_nulti.append(ls)
print(pre[0])
print(ls_nulti)
while True:
print("请输入: ")
ques = input()
ques_embed = ra_ed.sentence2idx(ques)
print(ques_embed)
if hyper_parameters['embedding_type'] == 'bert':
x_val_1 = np.array([ques_embed[0]])
x_val_2 = np.array([ques_embed[1]])
x_val = [x_val_1, x_val_2]
else:
x_val = ques_embed
pred = graph.predict(x_val)
pre = pt.prereocess_idx(pred[0])
ls_nulti = []
for ls in pre[0]:
if ls[1] >= 0.5:
ls_nulti.append(ls)
print(pre[0])
print(ls_nulti)
if __name__=="__main__":
# 测试集预测
# pred_tet(path_test=path_byte_multi_news_valid, rate=1) # sample条件下设为1,否则训练语料可能会很少
# 可输入 input 预测
pred_input()
| [
"sys.path.append",
"os.path.abspath",
"keras_textclassification.m02_TextCNN.graph.TextCNNGraph",
"keras_textclassification.data_preprocess.text_preprocess.PreprocessTextMulti",
"numpy.array",
"keras_textclassification.data_preprocess.text_preprocess.load_json"
] | [((231, 260), 'sys.path.append', 'sys.path.append', (['project_path'], {}), '(project_path)\n', (246, 260), False, 'import sys\n'), ((929, 960), 'keras_textclassification.data_preprocess.text_preprocess.load_json', 'load_json', (['path_hyper_parameter'], {}), '(path_hyper_parameter)\n', (938, 960), False, 'from keras_textclassification.data_preprocess.text_preprocess import PreprocessTextMulti, read_and_process, load_json\n'), ((970, 991), 'keras_textclassification.data_preprocess.text_preprocess.PreprocessTextMulti', 'PreprocessTextMulti', ([], {}), '()\n', (989, 991), False, 'from keras_textclassification.data_preprocess.text_preprocess import PreprocessTextMulti, read_and_process, load_json\n'), ((1019, 1042), 'keras_textclassification.m02_TextCNN.graph.TextCNNGraph', 'Graph', (['hyper_parameters'], {}), '(hyper_parameters)\n', (1024, 1042), True, 'from keras_textclassification.m02_TextCNN.graph import TextCNNGraph as Graph\n'), ((1252, 1277), 'numpy.array', 'np.array', (['[ques_embed[0]]'], {}), '([ques_embed[0]])\n', (1260, 1277), True, 'import numpy as np\n'), ((1296, 1321), 'numpy.array', 'np.array', (['[ques_embed[1]]'], {}), '([ques_embed[1]])\n', (1304, 1321), True, 'import numpy as np\n'), ((1864, 1889), 'numpy.array', 'np.array', (['[ques_embed[0]]'], {}), '([ques_embed[0]])\n', (1872, 1889), True, 'import numpy as np\n'), ((1912, 1937), 'numpy.array', 'np.array', (['[ques_embed[1]]'], {}), '([ques_embed[1]])\n', (1920, 1937), True, 'import numpy as np\n'), ((182, 207), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (197, 207), False, 'import os\n')] |
import crocoddyl
from crocoddyl.utils import DifferentialFreeFwdDynamicsDerived
import pinocchio
import example_robot_data
import numpy as np
import os
import sys
import time
import subprocess
# First, let's load the Pinocchio model for the Talos arm.
ROBOT = example_robot_data.loadTalosArm()
N = 100 # number of nodes
T = int(sys.argv[1]) if (len(sys.argv) > 1) else int(5e3) # number of trials
MAXITER = 1
CALLBACKS = False
def createProblem(model):
robot_model = ROBOT.model
q0 = np.matrix([0.173046, 1., -0.52366, 0., 0., 0.1, -0.005]).T
x0 = np.vstack([q0, np.zeros((robot_model.nv, 1))])
# Note that we need to include a cost model (i.e. set of cost functions) in
# order to fully define the action model for our optimal control problem.
# For this particular example, we formulate three running-cost functions:
# goal-tracking cost, state and control regularization; and one terminal-cost:
# goal cost. First, let's create the common cost functions.
state = crocoddyl.StateMultibody(robot_model)
Mref = crocoddyl.FramePlacement(robot_model.getFrameId("gripper_left_joint"),
pinocchio.SE3(np.eye(3), np.matrix([[.0], [.0], [.4]])))
goalTrackingCost = crocoddyl.CostModelFramePlacement(state, Mref)
xRegCost = crocoddyl.CostModelState(state)
uRegCost = crocoddyl.CostModelControl(state)
# Create a cost model per the running and terminal action model.
runningCostModel = crocoddyl.CostModelSum(state)
terminalCostModel = crocoddyl.CostModelSum(state)
# Then let's added the running and terminal cost functions
runningCostModel.addCost("gripperPose", goalTrackingCost, 1)
runningCostModel.addCost("xReg", xRegCost, 1e-4)
runningCostModel.addCost("uReg", uRegCost, 1e-4)
terminalCostModel.addCost("gripperPose", goalTrackingCost, 1)
# Next, we need to create an action model for running and terminal knots. The
# forward dynamics (computed using ABA) are implemented
# inside DifferentialActionModelFullyActuated.
actuation = crocoddyl.ActuationModelFull(state)
runningModel = crocoddyl.IntegratedActionModelEuler(model(state, actuation, runningCostModel), 1e-3)
runningModel.differential.armature = np.matrix([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.]).T
terminalModel = crocoddyl.IntegratedActionModelEuler(model(state, actuation, terminalCostModel), 1e-3)
terminalModel.differential.armature = np.matrix([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.]).T
# For this optimal control problem, we define 100 knots (or running action
# models) plus a terminal knot
problem = crocoddyl.ShootingProblem(x0, [runningModel] * N, terminalModel)
xs = [x0] * (len(problem.runningModels) + 1)
us = [m.quasiStatic(d, x0) for m, d in list(zip(problem.runningModels, problem.runningDatas))]
return xs, us, problem
def runDDPSolveBenchmark(xs, us, problem):
ddp = crocoddyl.SolverDDP(problem)
if CALLBACKS:
ddp.setCallbacks([crocoddyl.CallbackVerbose()])
duration = []
for i in range(T):
c_start = time.time()
ddp.solve(xs, us, MAXITER, False, 0.1)
c_end = time.time()
duration.append(1e3 * (c_end - c_start))
avrg_duration = sum(duration) / len(duration)
min_duration = min(duration)
max_duration = max(duration)
return avrg_duration, min_duration, max_duration
def runShootingProblemCalcBenchmark(xs, us, problem):
duration = []
for i in range(T):
c_start = time.time()
problem.calc(xs, us)
c_end = time.time()
duration.append(1e3 * (c_end - c_start))
avrg_duration = sum(duration) / len(duration)
min_duration = min(duration)
max_duration = max(duration)
return avrg_duration, min_duration, max_duration
def runShootingProblemCalcDiffBenchmark(xs, us, problem):
duration = []
for i in range(T):
c_start = time.time()
problem.calcDiff(xs, us)
c_end = time.time()
duration.append(1e3 * (c_end - c_start))
avrg_duration = sum(duration) / len(duration)
min_duration = min(duration)
max_duration = max(duration)
return avrg_duration, min_duration, max_duration
print('\033[1m')
print('C++:')
popen = subprocess.check_call([os.path.dirname(os.path.abspath(__file__)) + "/arm-manipulation", str(T)])
print('Python bindings:')
xs, us, problem = createProblem(crocoddyl.DifferentialActionModelFreeFwdDynamics)
avrg_duration, min_duration, max_duration = runDDPSolveBenchmark(xs, us, problem)
print(' DDP.solve [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))
avrg_duration, min_duration, max_duration = runShootingProblemCalcBenchmark(xs, us, problem)
print(' ShootingProblem.calc [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))
avrg_duration, min_duration, max_duration = runShootingProblemCalcDiffBenchmark(xs, us, problem)
print(' ShootingProblem.calcDiff [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))
print('Python:')
xs, us, problem = createProblem(DifferentialFreeFwdDynamicsDerived)
avrg_duration, min_duration, max_duration = runDDPSolveBenchmark(xs, us, problem)
print(' DDP.solve [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))
avrg_duration, min_duration, max_duration = runShootingProblemCalcBenchmark(xs, us, problem)
print(' ShootingProblem.calc [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))
avrg_duration, min_duration, max_duration = runShootingProblemCalcDiffBenchmark(xs, us, problem)
print(' ShootingProblem.calcDiff [ms]: {0} ({1}, {2})'.format(avrg_duration, min_duration, max_duration))
print('\033[0m')
| [
"crocoddyl.CostModelFramePlacement",
"crocoddyl.StateMultibody",
"crocoddyl.SolverDDP",
"numpy.matrix",
"os.path.abspath",
"crocoddyl.ShootingProblem",
"crocoddyl.CallbackVerbose",
"numpy.zeros",
"time.time",
"crocoddyl.CostModelControl",
"crocoddyl.CostModelState",
"crocoddyl.ActuationModelFu... | [((261, 294), 'example_robot_data.loadTalosArm', 'example_robot_data.loadTalosArm', ([], {}), '()\n', (292, 294), False, 'import example_robot_data\n'), ((1008, 1045), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['robot_model'], {}), '(robot_model)\n', (1032, 1045), False, 'import crocoddyl\n'), ((1244, 1290), 'crocoddyl.CostModelFramePlacement', 'crocoddyl.CostModelFramePlacement', (['state', 'Mref'], {}), '(state, Mref)\n', (1277, 1290), False, 'import crocoddyl\n'), ((1306, 1337), 'crocoddyl.CostModelState', 'crocoddyl.CostModelState', (['state'], {}), '(state)\n', (1330, 1337), False, 'import crocoddyl\n'), ((1353, 1386), 'crocoddyl.CostModelControl', 'crocoddyl.CostModelControl', (['state'], {}), '(state)\n', (1379, 1386), False, 'import crocoddyl\n'), ((1480, 1509), 'crocoddyl.CostModelSum', 'crocoddyl.CostModelSum', (['state'], {}), '(state)\n', (1502, 1509), False, 'import crocoddyl\n'), ((1534, 1563), 'crocoddyl.CostModelSum', 'crocoddyl.CostModelSum', (['state'], {}), '(state)\n', (1556, 1563), False, 'import crocoddyl\n'), ((2075, 2110), 'crocoddyl.ActuationModelFull', 'crocoddyl.ActuationModelFull', (['state'], {}), '(state)\n', (2103, 2110), False, 'import crocoddyl\n'), ((2631, 2695), 'crocoddyl.ShootingProblem', 'crocoddyl.ShootingProblem', (['x0', '([runningModel] * N)', 'terminalModel'], {}), '(x0, [runningModel] * N, terminalModel)\n', (2656, 2695), False, 'import crocoddyl\n'), ((2926, 2954), 'crocoddyl.SolverDDP', 'crocoddyl.SolverDDP', (['problem'], {}), '(problem)\n', (2945, 2954), False, 'import crocoddyl\n'), ((497, 556), 'numpy.matrix', 'np.matrix', (['[0.173046, 1.0, -0.52366, 0.0, 0.0, 0.1, -0.005]'], {}), '([0.173046, 1.0, -0.52366, 0.0, 0.0, 0.1, -0.005])\n', (506, 556), True, 'import numpy as np\n'), ((2257, 2303), 'numpy.matrix', 'np.matrix', (['[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.0]'], {}), '([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.0])\n', (2266, 2303), True, 'import numpy as np\n'), ((2454, 2500), 'numpy.matrix', 'np.matrix', (['[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.0]'], {}), '([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.0])\n', (2463, 2500), True, 'import numpy as np\n'), ((3088, 3099), 'time.time', 'time.time', ([], {}), '()\n', (3097, 3099), False, 'import time\n'), ((3163, 3174), 'time.time', 'time.time', ([], {}), '()\n', (3172, 3174), False, 'import time\n'), ((3509, 3520), 'time.time', 'time.time', ([], {}), '()\n', (3518, 3520), False, 'import time\n'), ((3566, 3577), 'time.time', 'time.time', ([], {}), '()\n', (3575, 3577), False, 'import time\n'), ((3916, 3927), 'time.time', 'time.time', ([], {}), '()\n', (3925, 3927), False, 'import time\n'), ((3977, 3988), 'time.time', 'time.time', ([], {}), '()\n', (3986, 3988), False, 'import time\n'), ((580, 609), 'numpy.zeros', 'np.zeros', (['(robot_model.nv, 1)'], {}), '((robot_model.nv, 1))\n', (588, 609), True, 'import numpy as np\n'), ((1178, 1187), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1184, 1187), True, 'import numpy as np\n'), ((1189, 1221), 'numpy.matrix', 'np.matrix', (['[[0.0], [0.0], [0.4]]'], {}), '([[0.0], [0.0], [0.4]])\n', (1198, 1221), True, 'import numpy as np\n'), ((2999, 3026), 'crocoddyl.CallbackVerbose', 'crocoddyl.CallbackVerbose', ([], {}), '()\n', (3024, 3026), False, 'import crocoddyl\n'), ((4288, 4313), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4303, 4313), False, 'import os\n')] |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('flask')
pytest.importorskip('flask.ext.cors')
from base64 import b64encode
from copy import copy
import datashape
from datashape.util.testing import assert_dshape_equal
import numpy as np
from odo import odo, convert
from datetime import datetime
import pandas as pd
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from toolz import pipe
from blaze.dispatch import dispatch
from blaze.expr import Expr
from blaze.utils import example
from blaze import discover, symbol, by, CSV, compute, join, into, data
from blaze.server.client import mimetype
from blaze.server.server import Server, to_tree, from_tree, RC
from blaze.server.serialization import all_formats, trusted_formats, fastmsgpack
accounts = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
cities = DataFrame([['Alice', 'NYC'], ['Bob', 'LA']],
columns=['name', 'city'])
events = DataFrame([[1, datetime(2000, 1, 1, 12, 0, 0)],
[2, datetime(2000, 1, 2, 12, 0, 0)]],
columns=['value', 'when'])
db = data('sqlite:///' + example('iris.db'))
class DumbResource(object):
df = DataFrame({'a': np.arange(5),
'b': np.arange(5, 10)})
class NoResource(Exception):
pass
@convert.register(DataFrame, DumbResource)
def dumb_to_df(d, return_df=None, **kwargs):
if return_df is None:
raise DumbResource.NoResource('return_df must be passed')
to_return = odo(return_df, DataFrame, dshape=discover(d))
assert_frame_equal(to_return, DumbResource.df)
return to_return
@dispatch(Expr, DumbResource)
def compute_down(expr, d, **kwargs):
return dumb_to_df(d, **kwargs)
@discover.register(DumbResource)
def _discover_dumb(d):
return discover(DumbResource.df)
tdata = {'accounts': accounts,
'cities': cities,
'events': events,
'db': db,
'dumb': DumbResource()}
@pytest.fixture(scope='module')
def server():
s = Server(tdata, all_formats)
s.app.testing = True
return s
@pytest.fixture(scope='module')
def add_server():
s = Server(tdata, all_formats, allow_add=True)
s.app.testing = True
return s
@pytest.yield_fixture(params=[None, tdata])
def temp_server(request):
"""For when we want to mutate the server"""
data = request.param
s = Server(copy(data), formats=all_formats)
s.app.testing = True
with s.app.test_client() as c:
yield c
@pytest.yield_fixture(params=[None, tdata])
def temp_add_server(request):
"""For when we want to mutate the server, and also add datasets to it."""
data = request.param
s = Server(copy(data), formats=all_formats, allow_add=True)
s.app.testing = True
with s.app.test_client() as c:
yield c
@pytest.yield_fixture
def test(server):
with server.app.test_client() as c:
yield c
@pytest.yield_fixture
def test_add(add_server):
with add_server.app.test_client() as c:
yield c
@pytest.yield_fixture
def iris_server():
iris = CSV(example('iris.csv'))
s = Server(iris, all_formats, allow_add=True)
s.app.testing = True
with s.app.test_client() as c:
yield c
def test_datasets(test):
response = test.get('/datashape')
assert_dshape_equal(datashape.dshape(response.data.decode('utf-8')),
datashape.dshape(discover(tdata)))
@pytest.mark.parametrize('serial', all_formats)
def test_bad_responses(test, serial):
post = test.post('/compute/accounts.{name}'.format(name=serial.name),
data=serial.dumps(500),)
assert 'OK' not in post.status
post = test.post('/compute/non-existent-table.{name}'.format(name=serial.name),
data=serial.dumps(0))
assert 'OK' not in post.status
post = test.post('/compute/accounts.{name}'.format(name=serial.name))
assert 'OK' not in post.status
def test_to_from_json():
t = symbol('t', 'var * {name: string, amount: int}')
assert from_tree(to_tree(t)).isidentical(t)
assert from_tree(to_tree(t.amount + 1)).isidentical(t.amount + 1)
def test_to_tree():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount.sum()
dshape = datashape.dshape('var * {name: string, amount: int32}',)
sum_args = [{'op': 'Field',
'args': [{'op': 'Symbol',
'args': ['t', dshape, 0]},
'amount']},
[0],
False]
expected = {'op': 'sum', 'args': sum_args}
assert to_tree(expr) == expected
@pytest.mark.parametrize('serial', all_formats)
def test_to_tree_slice(serial):
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t[:5]
expr2 = pipe(expr, to_tree, serial.dumps, serial.loads, from_tree)
assert expr.isidentical(expr2)
def test_to_from_tree_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.name
tree = to_tree(expr, names={t: 't'})
assert tree == {'op': 'Field', 'args': ['t', 'name']}
new = from_tree(tree, namespace={'t': t})
assert new.isidentical(expr)
def test_from_tree_is_robust_to_unnecessary_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount + 1
tree = to_tree(expr) # don't use namespace
assert from_tree(tree, {'t': t}).isidentical(expr)
t = symbol('t', discover(tdata))
@pytest.mark.parametrize('serial', all_formats)
def test_compute(test, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
expected = 300
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
assert serial.data_loads(tdata['data']) == expected
assert list(tdata['names']) == ['amount_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_get_datetimes(test, serial):
expr = t.events
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
ds = datashape.dshape(tdata['datashape'])
result = into(np.ndarray,
serial.data_loads(tdata['data']),
dshape=ds)
assert into(list, result) == into(list, events)
assert list(tdata['names']) == events.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def dont_test_compute_with_namespace(test, serial):
query = {'expr': {'op': 'Field',
'args': ['accounts', 'name']}}
expected = ['Alice', 'Bob']
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
assert serial.data_loads(tdata['data']) == expected
assert tdata['names'] == ['name']
iris = CSV(example('iris.csv'))
@pytest.mark.parametrize('serial', all_formats)
def test_compute_with_variable_in_namespace(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
pl = symbol('pl', 'float32')
expr = t[t.petal_length > pl].species
tree = to_tree(expr, {pl: 'pl'})
blob = serial.dumps({'expr': tree, 'namespace': {'pl': 5}})
resp = test.post('/compute',
data=blob,
headers=mimetype(serial))
assert 'OK' in resp.status
tdata = serial.loads(resp.data)
result = serial.data_loads(tdata['data'])
expected = list(compute(expr._subs({pl: 5}), {t: iris}))
assert odo(result, list) == expected
assert list(tdata['names']) == ['species']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_by_with_summary(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
expr = by(t.species,
max=t.petal_length.max(),
sum=t.petal_width.sum())
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post('/compute',
data=blob,
headers=mimetype(serial))
assert 'OK' in resp.status
tdata = serial.loads(resp.data)
result = DataFrame(serial.data_loads(tdata['data'])).values
expected = compute(expr, iris).values
np.testing.assert_array_equal(result[:, 0],
expected[:, 0])
np.testing.assert_array_almost_equal(result[:, 1:],
expected[:, 1:])
assert list(tdata['names']) == ['species', 'max', 'sum']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_column_wise(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
subexpr = ((t.petal_width / 2 > 0.5) &
(t.petal_length / 2 > 0.5))
expr = t[subexpr]
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post('/compute',
data=blob,
headers=mimetype(serial))
assert 'OK' in resp.status
tdata = serial.loads(resp.data)
result = serial.data_loads(tdata['data'])
expected = compute(expr, iris)
assert list(map(tuple, into(list, result))) == into(list, expected)
assert list(tdata['names']) == t.fields
@pytest.mark.parametrize('serial', all_formats)
def test_multi_expression_compute(test, serial):
s = symbol('s', discover(tdata))
expr = join(s.accounts, s.cities)
resp = test.post('/compute',
data=serial.dumps({'expr': to_tree(expr)}),
headers=mimetype(serial))
assert 'OK' in resp.status
respdata = serial.loads(resp.data)
result = serial.data_loads(respdata['data'])
expected = compute(expr, {s: tdata})
assert list(map(tuple, odo(result, list))) == into(list, expected)
assert list(respdata['names']) == expr.fields
@pytest.mark.parametrize('serial', all_formats)
def test_leaf_symbol(test, serial):
query = {'expr': {'op': 'Field', 'args': [':leaf', 'cities']}}
resp = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
tdata = serial.loads(resp.data)
a = serial.data_loads(tdata['data'])
b = into(list, cities)
assert list(map(tuple, into(list, a))) == b
assert list(tdata['names']) == cities.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def test_sqlalchemy_result(test, serial):
expr = t.db.iris.head(5)
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
tdata = serial.loads(response.data)
result = serial.data_loads(tdata['data'])
if isinstance(result, list):
assert all(isinstance(item, (tuple, list)) for item in result)
elif isinstance(result, DataFrame):
expected = DataFrame([[5.1, 3.5, 1.4, 0.2, 'Iris-setosa'],
[4.9, 3.0, 1.4, 0.2, 'Iris-setosa'],
[4.7, 3.2, 1.3, 0.2, 'Iris-setosa'],
[4.6, 3.1, 1.5, 0.2, 'Iris-setosa'],
[5.0, 3.6, 1.4, 0.2, 'Iris-setosa']],
columns=['sepal_length',
'sepal_width',
'petal_length',
'petal_width',
'species'])
assert_frame_equal(expected, result)
assert list(tdata['names']) == t.db.iris.fields
def test_server_accepts_non_nonzero_ables():
Server(DataFrame())
def serialize_query_with_map_builtin_function(test, serial, fcn):
"""
serialize a query that invokes the 'map' operation using a builtin function
return the result of the post operation along with expected result
"""
t = symbol('t', discover(iris))
expr = t.species.map(fcn, 'int')
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = serial.data_loads(respdata['data'])
exp_res = compute(expr, {t: iris}, return_type=list)
return (exp_res, result)
@pytest.mark.parametrize('serial', trusted_formats)
def test_map_builtin_client_server(iris_server, serial):
exp_res, result = serialize_query_with_map_builtin_function(iris_server,
serial,
len)
# Pass through Series() to canonicalize results.
assert (pd.Series(result) == pd.Series(exp_res)).all()
@pytest.mark.parametrize('serial', trusted_formats)
def test_map_numpy_client_server(iris_server, serial):
exp_res, result = serialize_query_with_map_builtin_function(iris_server,
serial,
np.size)
# Pass through Series() to canonicalize results.
assert (pd.Series(result) == pd.Series(exp_res)).all()
@pytest.mark.xfail(reason="pickle does not produce same error")
@pytest.mark.parametrize('serial', all_formats)
def test_builtin_403_exception(iris_server, serial):
'''
ensure exception is raised when both map and apply are invoked.
exception is raised in check_request() when object_hook is invoked;
this is when the payload is loaded from the bytes object in reqeust.data
'''
t = symbol('t', discover(iris))
for name in ('map', 'apply'):
func = getattr(t.species, name)
expr = func(eval, 'int')
query = {'expr': to_tree(expr)}
response = iris_server.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert '403 FORBIDDEN'.lower() in response.status.lower()
@pytest.mark.xfail(reason="pickle does nto produce same error")
@pytest.mark.parametrize('serial', all_formats)
def test_builtin_501_exception(iris_server, serial):
t = symbol('t', discover(iris))
for name in ('map', 'apply'):
func = getattr(t.species, name)
expr = func(copy, 'int')
query = {'expr': to_tree(expr)}
response = iris_server.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert '501 Not Implemented'.lower() in response.status.lower()
@pytest.mark.parametrize('serial', trusted_formats)
def test_map_pandas_client_server(iris_server, serial):
exp_res, result = serialize_query_with_map_builtin_function(iris_server,
serial,
pd.isnull)
# Pass through Series() to canonicalize results.
assert (pd.Series(result) == pd.Series(exp_res)).all()
@pytest.mark.parametrize('serial', trusted_formats)
def test_apply_client_server(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
expr = t.species.apply(id, 'int') # Very dumb example...
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = serial.data_loads(respdata['data'])
assert type(result) == type(compute(expr, {t: iris}, return_type=int))
@pytest.mark.parametrize('serial', all_formats)
def test_server_can_compute_sqlalchemy_reductions(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = serial.data_loads(respdata['data'])
assert result == into(int, compute(expr, {t: tdata}))
assert list(respdata['names']) == ['petal_length_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_serialization_endpoints(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = serial.data_loads(respdata['data'])
assert result == into(int, compute(expr, {t: tdata}))
assert list(respdata['names']) == ['petal_length_sum']
def test_cors_compute(test):
res = test.options('/compute')
assert res.status_code == RC.OK
assert 'HEAD' in res.headers['Allow']
assert 'OPTIONS' in res.headers['Allow']
assert 'POST' in res.headers['Allow']
# we don't allow gets because we're always sending data
assert 'GET' not in res.headers['Allow']
def test_cors_datashape(test):
res = test.options('/datashape')
assert res.status_code == RC.OK
assert 'HEAD' in res.headers['Allow']
assert 'OPTIONS' in res.headers['Allow']
assert 'GET' in res.headers['Allow']
# we don't allow posts because we're just getting (meta)data.
assert 'POST' not in res.headers['Allow']
def test_cors_add(test_add):
res = test_add.options('/add')
assert res.status_code == RC.OK
assert 'HEAD' in res.headers['Allow']
assert 'POST' in res.headers['Allow']
assert 'OPTIONS' in res.headers['Allow']
# we don't allow get because we're sending data.
assert 'GET' not in res.headers['Allow']
@pytest.fixture(scope='module')
def username():
return 'blaze-dev'
@pytest.fixture(scope='module')
def password():
return '<PASSWORD>'
@pytest.fixture(scope='module')
def server_with_auth(username, password):
def auth(a):
return a and a.username == username and a.password == password
s = Server(tdata, all_formats, authorization=auth)
s.app.testing = True
return s
@pytest.yield_fixture
def test_with_auth(server_with_auth):
with server_with_auth.app.test_client() as c:
yield c
def basic_auth(username, password):
return b'Basic ' + b64encode(':'.join((username, password)).encode('utf-8'))
@pytest.mark.parametrize('serial', all_formats)
def test_auth(test_with_auth, username, password, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
r = test_with_auth.get('/datashape',
headers={'authorization': basic_auth(username, password)})
assert r.status_code == RC.OK
headers = mimetype(serial)
headers['authorization'] = basic_auth(username, password)
s = test_with_auth.post('/compute',
data=serial.dumps(query),
headers=headers)
assert s.status_code == RC.OK
u = test_with_auth.get('/datashape',
headers={'authorization': basic_auth(username + 'a', password + 'a')})
assert u.status_code == RC.UNAUTHORIZED
headers['authorization'] = basic_auth(username + 'a', password + 'a')
v = test_with_auth.post('/compute',
data=serial.dumps(query),
headers=headers)
assert v.status_code == RC.UNAUTHORIZED
@pytest.mark.parametrize('serial', all_formats)
def test_minute_query(test, serial):
expr = t.events.when.minute
query = {'expr': to_tree(expr)}
result = test.post('/compute',
headers=mimetype(serial),
data=serial.dumps(query))
expected = {'data': [0, 0],
'names': ['when_minute'],
'datashape': '2 * int64'}
assert result.status_code == RC.OK
resp = serial.loads(result.data)
assert list(serial.data_loads(resp['data'])) == expected['data']
assert list(resp['names']) == expected['names']
assert resp['datashape'] == expected['datashape']
@pytest.mark.parametrize('serial', all_formats)
def test_isin(test, serial):
expr = t.events.value.isin(frozenset([1]))
query = {'expr': to_tree(expr)}
result = test.post('/compute',
headers=mimetype(serial),
data=serial.dumps(query))
expected = {'data': [True, False],
'names': ['value'],
'datashape': '2 * bool'}
assert result.status_code == RC.OK
resp = serial.loads(result.data)
assert list(serial.data_loads(resp['data'])) == expected['data']
assert list(resp['names']) == expected['names']
assert resp['datashape'] == expected['datashape']
@pytest.mark.parametrize('serial', all_formats)
def test_add_errors(temp_add_server, serial):
pre_datashape = datashape.dshape(temp_add_server
.get('/datashape')
.data.decode('utf-8'))
bunk_path = example('bunk.csv')
blob = serial.dumps({'bunk': bunk_path})
response1 = temp_add_server.post('/add',
headers=mimetype(serial),
data=blob)
assert response1.status_code == RC.UNPROCESSABLE_ENTITY
# Test that the datashape of the server is accessible and unchanged after
# trying to add a non-existent dataset.
response2 = temp_add_server.get('/datashape')
assert response2.status_code == RC.OK
response_dshape = datashape.dshape(response2.data.decode('utf-8'))
assert_dshape_equal(pre_datashape, response_dshape)
@pytest.mark.parametrize('serial', all_formats)
def test_add_default_not_allowed(temp_server, serial):
iris_path = example('iris.csv')
blob = serial.dumps({'iris': iris_path})
response1 = temp_server.post('/add',
headers=mimetype(serial),
data=blob)
assert 'NOT FOUND' in response1.status
assert response1.status_code == RC.NOT_FOUND
@pytest.mark.parametrize('serial', all_formats)
def test_add_data_to_server(temp_add_server, serial):
# add data
iris_path = example('iris.csv')
blob = serial.dumps({'iris': iris_path})
response1 = temp_add_server.post('/add',
headers=mimetype(serial),
data=blob)
assert 'CREATED' in response1.status
assert response1.status_code == RC.CREATED
# check for expected server datashape
response2 = temp_add_server.get('/datashape')
expected2 = discover({'iris': data(iris_path)})
response_dshape = datashape.dshape(response2.data.decode('utf-8'))
assert_dshape_equal(response_dshape.measure.dict['iris'],
expected2.measure.dict['iris'])
# compute on added data
t = data({'iris': data(iris_path)})
expr = t.iris.petal_length.sum()
response3 = temp_add_server.post('/compute',
data=serial.dumps({'expr': to_tree(expr)}),
headers=mimetype(serial))
result3 = serial.data_loads(serial.loads(response3.data)['data'])
expected3 = compute(expr, {'iris': data(iris_path)})
assert result3 == expected3
@pytest.mark.parametrize('serial', all_formats)
def test_cant_add_data_to_server(iris_server, serial):
# try adding more data to server
iris_path = example('iris.csv')
blob = serial.dumps({'iris': iris_path})
response1 = iris_server.post('/add',
headers=mimetype(serial),
data=blob)
assert response1.status_code == RC.UNPROCESSABLE_ENTITY
@pytest.mark.parametrize('serial', all_formats)
def test_add_data_twice_error(temp_add_server, serial):
# add iris
iris_path = example('iris.csv')
payload = serial.dumps({'iris': iris_path})
temp_add_server.post('/add',
headers=mimetype(serial),
data=payload)
# Try to add to existing 'iris'
resp = temp_add_server.post('/add',
headers=mimetype(serial),
data=payload)
assert resp.status_code == RC.CONFLICT
# Verify the server still serves the original 'iris'.
response_ds = temp_add_server.get('/datashape').data.decode('utf-8')
ds = datashape.dshape(response_ds)
t = symbol('t', ds)
query = {'expr': to_tree(t.iris)}
resp = temp_add_server.post('/compute',
data=serial.dumps(query),
headers=mimetype(serial))
assert resp.status_code == RC.OK
@pytest.mark.parametrize('serial', all_formats)
def test_add_two_data_sets_at_once_error(temp_add_server, serial):
# Try to add two things at once
payload = serial.dumps({'foo': 'iris.csv',
'bar': 'iris.csv'})
resp = temp_add_server.post('/add',
headers=mimetype(serial),
data=payload)
assert resp.status_code == RC.UNPROCESSABLE_ENTITY
@pytest.mark.parametrize('serial', all_formats)
def test_add_bunk_data_error(temp_add_server, serial):
# Try to add bunk data
payload = serial.dumps({'foo': None})
resp = temp_add_server.post('/add',
headers=mimetype(serial),
data=payload)
assert resp.status_code == RC.UNPROCESSABLE_ENTITY
@pytest.mark.parametrize('serial', all_formats)
def test_bad_add_payload(temp_add_server, serial):
# try adding more data to server
blob = serial.dumps('This is not a mutable mapping.')
response1 = temp_add_server.post('/add',
headers=mimetype(serial),
data=blob)
assert response1.status_code == RC.UNPROCESSABLE_ENTITY
@pytest.mark.parametrize('serial', all_formats)
def test_add_expanded_payload(temp_add_server, serial):
# Ensure that the expanded payload format is accepted by the server
iris_path = example('iris.csv')
blob = serial.dumps({'iris': {'source': iris_path,
'kwargs': {'delimiter': ','}}})
response1 = temp_add_server.post('/add',
headers=mimetype(serial),
data=blob)
assert 'CREATED' in response1.status
assert response1.status_code == RC.CREATED
@pytest.mark.parametrize('serial', all_formats)
def test_add_expanded_payload_with_imports(temp_add_server, serial):
# Ensure that the expanded payload format is accepted by the server
iris_path = example('iris.csv')
blob = serial.dumps({'iris': {'source': iris_path,
'kwargs': {'delimiter': ','},
'imports': ['csv']}})
response1 = temp_add_server.post('/add',
headers=mimetype(serial),
data=blob)
assert 'CREATED' in response1.status
assert response1.status_code == RC.CREATED
@pytest.mark.parametrize('serial', all_formats)
def test_add_expanded_payload_has_effect(temp_add_server, serial):
# Ensure that the expanded payload format actually passes the arguments
# through to the resource constructor
iris_path = example('iris-latin1.tsv')
csv_kwargs = {'delimiter': '\t', 'encoding': 'iso-8859-1'}
blob = serial.dumps({'iris': {'source': iris_path,
'kwargs': csv_kwargs}})
response1 = temp_add_server.post('/add',
headers=mimetype(serial),
data=blob)
assert 'CREATED' in response1.status
assert response1.status_code == RC.CREATED
# check for expected server datashape
response2 = temp_add_server.get('/datashape')
expected2 = discover({'iris': data(iris_path, **csv_kwargs)})
response_dshape = datashape.dshape(response2.data.decode('utf-8'))
assert_dshape_equal(response_dshape.measure.dict['iris'],
expected2.measure.dict['iris'])
# compute on added data
t = data({'iris': data(iris_path, **csv_kwargs)})
expr = t.iris.petal_length.sum()
response3 = temp_add_server.post('/compute',
data=serial.dumps({'expr': to_tree(expr)}),
headers=mimetype(serial))
result3 = serial.data_loads(serial.loads(response3.data)['data'])
expected3 = compute(expr, {'iris': data(iris_path, **csv_kwargs)})
assert result3 == expected3
@pytest.mark.parametrize('serial', all_formats)
def test_odo_kwargs(test, serial):
expr = t.dumb
bad_query = {'expr': to_tree(expr)}
result = test.post('/compute',
headers=mimetype(serial),
data=serial.dumps(bad_query))
assert result.status_code == RC.INTERNAL_SERVER_ERROR
assert b'return_df must be passed' in result.data
good_query = {'expr': to_tree(expr),
'odo_kwargs': {'return_df': odo(DumbResource.df, list)}}
result = test.post('/compute',
headers=mimetype(serial),
data=serial.dumps(good_query))
assert result.status_code == RC.OK
tdata = serial.loads(result.data)
dshape = discover(DumbResource.df)
assert_dshape_equal(datashape.dshape(tdata['datashape']),
dshape)
assert_frame_equal(odo(serial.data_loads(tdata['data']),
DataFrame,
dshape=dshape),
DumbResource.df)
@pytest.mark.parametrize('serial', all_formats)
def test_compute_kwargs(test, serial):
expr = t.dumb.sort()
bad_query = {'expr': to_tree(expr)}
result = test.post('/compute',
headers=mimetype(serial),
data=serial.dumps(bad_query))
assert result.status_code == RC.INTERNAL_SERVER_ERROR
assert b'return_df must be passed' in result.data
good_query = {'expr': to_tree(expr),
'compute_kwargs': {'return_df': odo(DumbResource.df, list)}}
result = test.post('/compute',
headers=mimetype(serial),
data=serial.dumps(good_query))
assert result.status_code == RC.OK
tdata = serial.loads(result.data)
dshape = discover(DumbResource.df)
assert_dshape_equal(datashape.dshape(tdata['datashape']),
dshape)
assert_frame_equal(odo(serial.data_loads(tdata['data']),
DataFrame,
dshape=dshape),
DumbResource.df)
def test_fastmsgmpack_mutable_dataframe(test):
expr = t.events # just get back the dataframe
query = {'expr': to_tree(expr)}
result = test.post('/compute',
headers=mimetype(fastmsgpack),
data=fastmsgpack.dumps(query))
assert result.status_code == RC.OK
data = fastmsgpack.data_loads(fastmsgpack.loads(result.data)['data'])
for block in data._data.blocks:
# make sure all the blocks are mutable
assert block.values.flags.writeable
| [
"numpy.arange",
"pytest.mark.parametrize",
"numpy.testing.assert_array_almost_equal",
"blaze.dispatch.dispatch",
"pandas.DataFrame",
"blaze.utils.example",
"blaze.compute",
"datashape.dshape",
"pytest.yield_fixture",
"blaze.symbol",
"blaze.server.serialization.fastmsgpack.loads",
"blaze.server... | [((80, 108), 'pytest.importorskip', 'pytest.importorskip', (['"""flask"""'], {}), "('flask')\n", (99, 108), False, 'import pytest\n'), ((109, 146), 'pytest.importorskip', 'pytest.importorskip', (['"""flask.ext.cors"""'], {}), "('flask.ext.cors')\n", (128, 146), False, 'import pytest\n'), ((839, 908), 'pandas.DataFrame', 'DataFrame', (["[['Alice', 100], ['Bob', 200]]"], {'columns': "['name', 'amount']"}), "([['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])\n", (848, 908), False, 'from pandas import DataFrame\n'), ((940, 1010), 'pandas.DataFrame', 'DataFrame', (["[['Alice', 'NYC'], ['Bob', 'LA']]"], {'columns': "['name', 'city']"}), "([['Alice', 'NYC'], ['Bob', 'LA']], columns=['name', 'city'])\n", (949, 1010), False, 'from pandas import DataFrame\n'), ((1401, 1442), 'odo.convert.register', 'convert.register', (['DataFrame', 'DumbResource'], {}), '(DataFrame, DumbResource)\n', (1417, 1442), False, 'from odo import odo, convert\n'), ((1717, 1745), 'blaze.dispatch.dispatch', 'dispatch', (['Expr', 'DumbResource'], {}), '(Expr, DumbResource)\n', (1725, 1745), False, 'from blaze.dispatch import dispatch\n'), ((1821, 1852), 'blaze.discover.register', 'discover.register', (['DumbResource'], {}), '(DumbResource)\n', (1838, 1852), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((2055, 2085), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2069, 2085), False, 'import pytest\n'), ((2176, 2206), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2190, 2206), False, 'import pytest\n'), ((2317, 2359), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {'params': '[None, tdata]'}), '(params=[None, tdata])\n', (2337, 2359), False, 'import pytest\n'), ((2586, 2628), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {'params': '[None, tdata]'}), '(params=[None, tdata])\n', (2606, 2628), False, 'import pytest\n'), ((3515, 3561), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (3538, 3561), False, 'import pytest\n'), ((4706, 4752), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (4729, 4752), False, 'import pytest\n'), ((5546, 5592), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (5569, 5592), False, 'import pytest\n'), ((6040, 6086), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (6063, 6086), False, 'import pytest\n'), ((6668, 6714), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (6691, 6714), False, 'import pytest\n'), ((7236, 7282), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (7259, 7282), False, 'import pytest\n'), ((7963, 8009), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (7986, 8009), False, 'import pytest\n'), ((8854, 8900), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (8877, 8900), False, 'import pytest\n'), ((9564, 9610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (9587, 9610), False, 'import pytest\n'), ((10168, 10214), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (10191, 10214), False, 'import pytest\n'), ((10661, 10707), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (10684, 10707), False, 'import pytest\n'), ((12689, 12739), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'trusted_formats'], {}), "('serial', trusted_formats)\n", (12712, 12739), False, 'import pytest\n'), ((13130, 13180), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'trusted_formats'], {}), "('serial', trusted_formats)\n", (13153, 13180), False, 'import pytest\n'), ((13573, 13635), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""pickle does not produce same error"""'}), "(reason='pickle does not produce same error')\n", (13590, 13635), False, 'import pytest\n'), ((13637, 13683), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (13660, 13683), False, 'import pytest\n'), ((14396, 14458), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""pickle does nto produce same error"""'}), "(reason='pickle does nto produce same error')\n", (14413, 14458), False, 'import pytest\n'), ((14460, 14506), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (14483, 14506), False, 'import pytest\n'), ((14992, 15042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'trusted_formats'], {}), "('serial', trusted_formats)\n", (15015, 15042), False, 'import pytest\n'), ((15439, 15489), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'trusted_formats'], {}), "('serial', trusted_formats)\n", (15462, 15489), False, 'import pytest\n'), ((16041, 16087), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (16064, 16087), False, 'import pytest\n'), ((16616, 16662), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (16639, 16662), False, 'import pytest\n'), ((18185, 18215), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (18199, 18215), False, 'import pytest\n'), ((18258, 18288), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (18272, 18288), False, 'import pytest\n'), ((18332, 18362), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (18346, 18362), False, 'import pytest\n'), ((18837, 18883), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (18860, 18883), False, 'import pytest\n'), ((19887, 19933), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (19910, 19933), False, 'import pytest\n'), ((20542, 20588), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (20565, 20588), False, 'import pytest\n'), ((21204, 21250), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (21227, 21250), False, 'import pytest\n'), ((22108, 22154), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (22131, 22154), False, 'import pytest\n'), ((22530, 22576), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (22553, 22576), False, 'import pytest\n'), ((23768, 23814), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (23791, 23814), False, 'import pytest\n'), ((24195, 24241), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (24218, 24241), False, 'import pytest\n'), ((25177, 25223), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (25200, 25223), False, 'import pytest\n'), ((25624, 25670), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (25647, 25670), False, 'import pytest\n'), ((25997, 26043), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (26020, 26043), False, 'import pytest\n'), ((26409, 26455), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (26432, 26455), False, 'import pytest\n'), ((26988, 27034), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (27011, 27034), False, 'import pytest\n'), ((27634, 27680), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (27657, 27680), False, 'import pytest\n'), ((29168, 29214), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (29191, 29214), False, 'import pytest\n'), ((30208, 30254), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""serial"""', 'all_formats'], {}), "('serial', all_formats)\n", (30231, 30254), False, 'import pytest\n'), ((1646, 1692), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['to_return', 'DumbResource.df'], {}), '(to_return, DumbResource.df)\n', (1664, 1692), False, 'from pandas.util.testing import assert_frame_equal\n'), ((1887, 1912), 'blaze.discover', 'discover', (['DumbResource.df'], {}), '(DumbResource.df)\n', (1895, 1912), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((2108, 2134), 'blaze.server.server.Server', 'Server', (['tdata', 'all_formats'], {}), '(tdata, all_formats)\n', (2114, 2134), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((2233, 2275), 'blaze.server.server.Server', 'Server', (['tdata', 'all_formats'], {'allow_add': '(True)'}), '(tdata, all_formats, allow_add=True)\n', (2239, 2275), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((3197, 3238), 'blaze.server.server.Server', 'Server', (['iris', 'all_formats'], {'allow_add': '(True)'}), '(iris, all_formats, allow_add=True)\n', (3203, 3238), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((4064, 4112), 'blaze.symbol', 'symbol', (['"""t"""', '"""var * {name: string, amount: int}"""'], {}), "('t', 'var * {name: string, amount: int}')\n", (4070, 4112), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((4261, 4311), 'blaze.symbol', 'symbol', (['"""t"""', '"""var * {name: string, amount: int32}"""'], {}), "('t', 'var * {name: string, amount: int32}')\n", (4267, 4311), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((4351, 4406), 'datashape.dshape', 'datashape.dshape', (['"""var * {name: string, amount: int32}"""'], {}), "('var * {name: string, amount: int32}')\n", (4367, 4406), False, 'import datashape\n'), ((4793, 4843), 'blaze.symbol', 'symbol', (['"""t"""', '"""var * {name: string, amount: int32}"""'], {}), "('t', 'var * {name: string, amount: int32}')\n", (4799, 4843), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((4873, 4931), 'toolz.pipe', 'pipe', (['expr', 'to_tree', 'serial.dumps', 'serial.loads', 'from_tree'], {}), '(expr, to_tree, serial.dumps, serial.loads, from_tree)\n', (4877, 4931), False, 'from toolz import pipe\n'), ((5012, 5062), 'blaze.symbol', 'symbol', (['"""t"""', '"""var * {name: string, amount: int32}"""'], {}), "('t', 'var * {name: string, amount: int32}')\n", (5018, 5062), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((5093, 5122), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {'names': "{t: 't'}"}), "(expr, names={t: 't'})\n", (5100, 5122), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((5192, 5227), 'blaze.server.server.from_tree', 'from_tree', (['tree'], {'namespace': "{'t': t}"}), "(tree, namespace={'t': t})\n", (5201, 5227), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((5328, 5378), 'blaze.symbol', 'symbol', (['"""t"""', '"""var * {name: string, amount: int32}"""'], {}), "('t', 'var * {name: string, amount: int32}')\n", (5334, 5378), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((5415, 5428), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (5422, 5428), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((5526, 5541), 'blaze.discover', 'discover', (['tdata'], {}), '(tdata)\n', (5534, 5541), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((6406, 6442), 'datashape.dshape', 'datashape.dshape', (["tdata['datashape']"], {}), "(tdata['datashape'])\n", (6422, 6442), False, 'import datashape\n'), ((7212, 7231), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (7219, 7231), False, 'from blaze.utils import example\n'), ((7417, 7440), 'blaze.symbol', 'symbol', (['"""pl"""', '"""float32"""'], {}), "('pl', 'float32')\n", (7423, 7440), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((7494, 7519), 'blaze.server.server.to_tree', 'to_tree', (['expr', "{pl: 'pl'}"], {}), "(expr, {pl: 'pl'})\n", (7501, 7519), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((8239, 8252), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (8246, 8252), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((8582, 8641), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result[:, 0]', 'expected[:, 0]'], {}), '(result[:, 0], expected[:, 0])\n', (8611, 8641), True, 'import numpy as np\n'), ((8680, 8748), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result[:, 1:]', 'expected[:, 1:]'], {}), '(result[:, 1:], expected[:, 1:])\n', (8716, 8748), True, 'import numpy as np\n'), ((9130, 9143), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (9137, 9143), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((9425, 9444), 'blaze.compute', 'compute', (['expr', 'iris'], {}), '(expr, iris)\n', (9432, 9444), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((9709, 9735), 'blaze.join', 'join', (['s.accounts', 's.cities'], {}), '(s.accounts, s.cities)\n', (9713, 9735), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((10017, 10042), 'blaze.compute', 'compute', (['expr', '{s: tdata}'], {}), '(expr, {s: tdata})\n', (10024, 10042), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((10531, 10549), 'blaze.into', 'into', (['list', 'cities'], {}), '(list, cities)\n', (10535, 10549), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((12614, 12656), 'blaze.compute', 'compute', (['expr', '{t: iris}'], {'return_type': 'list'}), '(expr, {t: iris}, return_type=list)\n', (12621, 12656), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((18502, 18548), 'blaze.server.server.Server', 'Server', (['tdata', 'all_formats'], {'authorization': 'auth'}), '(tdata, all_formats, authorization=auth)\n', (18508, 18548), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((19190, 19206), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (19198, 19206), False, 'from blaze.server.client import mimetype\n'), ((21482, 21501), 'blaze.utils.example', 'example', (['"""bunk.csv"""'], {}), "('bunk.csv')\n", (21489, 21501), False, 'from blaze.utils import example\n'), ((22053, 22104), 'datashape.util.testing.assert_dshape_equal', 'assert_dshape_equal', (['pre_datashape', 'response_dshape'], {}), '(pre_datashape, response_dshape)\n', (22072, 22104), False, 'from datashape.util.testing import assert_dshape_equal\n'), ((22226, 22245), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (22233, 22245), False, 'from blaze.utils import example\n'), ((22662, 22681), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (22669, 22681), False, 'from blaze.utils import example\n'), ((23191, 23285), 'datashape.util.testing.assert_dshape_equal', 'assert_dshape_equal', (["response_dshape.measure.dict['iris']", "expected2.measure.dict['iris']"], {}), "(response_dshape.measure.dict['iris'], expected2.measure\n .dict['iris'])\n", (23210, 23285), False, 'from datashape.util.testing import assert_dshape_equal\n'), ((23923, 23942), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (23930, 23942), False, 'from blaze.utils import example\n'), ((24329, 24348), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (24336, 24348), False, 'from blaze.utils import example\n'), ((24885, 24914), 'datashape.dshape', 'datashape.dshape', (['response_ds'], {}), '(response_ds)\n', (24901, 24914), False, 'import datashape\n'), ((24923, 24938), 'blaze.symbol', 'symbol', (['"""t"""', 'ds'], {}), "('t', ds)\n", (24929, 24938), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((26600, 26619), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (26607, 26619), False, 'from blaze.utils import example\n'), ((27192, 27211), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (27199, 27211), False, 'from blaze.utils import example\n'), ((27882, 27908), 'blaze.utils.example', 'example', (['"""iris-latin1.tsv"""'], {}), "('iris-latin1.tsv')\n", (27889, 27908), False, 'from blaze.utils import example\n'), ((28563, 28657), 'datashape.util.testing.assert_dshape_equal', 'assert_dshape_equal', (["response_dshape.measure.dict['iris']", "expected2.measure.dict['iris']"], {}), "(response_dshape.measure.dict['iris'], expected2.measure\n .dict['iris'])\n", (28582, 28657), False, 'from datashape.util.testing import assert_dshape_equal\n'), ((29903, 29928), 'blaze.discover', 'discover', (['DumbResource.df'], {}), '(DumbResource.df)\n', (29911, 29928), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((30958, 30983), 'blaze.discover', 'discover', (['DumbResource.df'], {}), '(DumbResource.df)\n', (30966, 30983), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((1218, 1236), 'blaze.utils.example', 'example', (['"""iris.db"""'], {}), "('iris.db')\n", (1225, 1236), False, 'from blaze.utils import example\n'), ((2474, 2484), 'copy.copy', 'copy', (['data'], {}), '(data)\n', (2478, 2484), False, 'from copy import copy\n'), ((2777, 2787), 'copy.copy', 'copy', (['data'], {}), '(data)\n', (2781, 2787), False, 'from copy import copy\n'), ((3168, 3187), 'blaze.utils.example', 'example', (['"""iris.csv"""'], {}), "('iris.csv')\n", (3175, 3187), False, 'from blaze.utils import example\n'), ((4677, 4690), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (4684, 4690), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((5681, 5694), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (5688, 5694), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((6166, 6179), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (6173, 6179), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((6565, 6583), 'blaze.into', 'into', (['list', 'result'], {}), '(list, result)\n', (6569, 6583), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((6587, 6605), 'blaze.into', 'into', (['list', 'events'], {}), '(list, events)\n', (6591, 6605), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((7392, 7406), 'blaze.discover', 'discover', (['iris'], {}), '(iris)\n', (7400, 7406), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((7883, 7900), 'odo.odo', 'odo', (['result', 'list'], {}), '(result, list)\n', (7886, 7900), False, 'from odo import odo, convert\n'), ((8108, 8122), 'blaze.discover', 'discover', (['iris'], {}), '(iris)\n', (8116, 8122), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((8551, 8570), 'blaze.compute', 'compute', (['expr', 'iris'], {}), '(expr, iris)\n', (8558, 8570), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((8995, 9009), 'blaze.discover', 'discover', (['iris'], {}), '(iris)\n', (9003, 9009), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((9496, 9516), 'blaze.into', 'into', (['list', 'expected'], {}), '(list, expected)\n', (9500, 9516), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((9680, 9695), 'blaze.discover', 'discover', (['tdata'], {}), '(tdata)\n', (9688, 9695), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((10094, 10114), 'blaze.into', 'into', (['list', 'expected'], {}), '(list, expected)\n', (10098, 10114), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((10800, 10813), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (10807, 10813), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((11976, 11987), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (11985, 11987), False, 'from pandas import DataFrame\n'), ((12244, 12258), 'blaze.discover', 'discover', (['iris'], {}), '(iris)\n', (12252, 12258), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((12318, 12331), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (12325, 12331), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((13990, 14004), 'blaze.discover', 'discover', (['iris'], {}), '(iris)\n', (13998, 14004), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((14580, 14594), 'blaze.discover', 'discover', (['iris'], {}), '(iris)\n', (14588, 14594), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((15584, 15598), 'blaze.discover', 'discover', (['iris'], {}), '(iris)\n', (15592, 15598), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((15682, 15695), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (15689, 15695), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((16214, 16227), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (16221, 16227), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((16772, 16785), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (16779, 16785), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((18999, 19012), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (19006, 19012), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((20024, 20037), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (20031, 20037), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((20686, 20699), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (20693, 20699), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((24960, 24975), 'blaze.server.server.to_tree', 'to_tree', (['t.iris'], {}), '(t.iris)\n', (24967, 24975), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((29293, 29306), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (29300, 29306), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((29585, 29598), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (29592, 29598), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((29953, 29989), 'datashape.dshape', 'datashape.dshape', (["tdata['datashape']"], {}), "(tdata['datashape'])\n", (29969, 29989), False, 'import datashape\n'), ((30344, 30357), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (30351, 30357), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((30636, 30649), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (30643, 30649), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((31008, 31044), 'datashape.dshape', 'datashape.dshape', (["tdata['datashape']"], {}), "(tdata['datashape'])\n", (31024, 31044), False, 'import datashape\n'), ((31381, 31394), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (31388, 31394), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((1055, 1085), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)', '(12)', '(0)', '(0)'], {}), '(2000, 1, 1, 12, 0, 0)\n', (1063, 1085), False, 'from datetime import datetime\n'), ((1112, 1142), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(2)', '(12)', '(0)', '(0)'], {}), '(2000, 1, 2, 12, 0, 0)\n', (1120, 1142), False, 'from datetime import datetime\n'), ((1293, 1305), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1302, 1305), True, 'import numpy as np\n'), ((1332, 1348), 'numpy.arange', 'np.arange', (['(5)', '(10)'], {}), '(5, 10)\n', (1341, 1348), True, 'import numpy as np\n'), ((1629, 1640), 'blaze.discover', 'discover', (['d'], {}), '(d)\n', (1637, 1640), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((3494, 3509), 'blaze.discover', 'discover', (['tdata'], {}), '(tdata)\n', (3502, 3509), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((5464, 5489), 'blaze.server.server.from_tree', 'from_tree', (['tree', "{'t': t}"], {}), "(tree, {'t': t})\n", (5473, 5489), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((5837, 5853), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (5845, 5853), False, 'from blaze.server.client import mimetype\n'), ((6303, 6319), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (6311, 6319), False, 'from blaze.server.client import mimetype\n'), ((7011, 7027), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (7019, 7027), False, 'from blaze.server.client import mimetype\n'), ((7679, 7695), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (7687, 7695), False, 'from blaze.server.client import mimetype\n'), ((8387, 8403), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (8395, 8403), False, 'from blaze.server.client import mimetype\n'), ((9278, 9294), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (9286, 9294), False, 'from blaze.server.client import mimetype\n'), ((9864, 9880), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (9872, 9880), False, 'from blaze.server.client import mimetype\n'), ((10427, 10443), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (10435, 10443), False, 'from blaze.server.client import mimetype\n'), ((10937, 10953), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (10945, 10953), False, 'from blaze.server.client import mimetype\n'), ((11240, 11533), 'pandas.DataFrame', 'DataFrame', (["[[5.1, 3.5, 1.4, 0.2, 'Iris-setosa'], [4.9, 3.0, 1.4, 0.2, 'Iris-setosa'],\n [4.7, 3.2, 1.3, 0.2, 'Iris-setosa'], [4.6, 3.1, 1.5, 0.2, 'Iris-setosa'\n ], [5.0, 3.6, 1.4, 0.2, 'Iris-setosa']]"], {'columns': "['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']"}), "([[5.1, 3.5, 1.4, 0.2, 'Iris-setosa'], [4.9, 3.0, 1.4, 0.2,\n 'Iris-setosa'], [4.7, 3.2, 1.3, 0.2, 'Iris-setosa'], [4.6, 3.1, 1.5, \n 0.2, 'Iris-setosa'], [5.0, 3.6, 1.4, 0.2, 'Iris-setosa']], columns=[\n 'sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'])\n", (11249, 11533), False, 'from pandas import DataFrame\n'), ((11829, 11865), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'result'], {}), '(expected, result)\n', (11847, 11865), False, 'from pandas.util.testing import assert_frame_equal\n'), ((12454, 12470), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (12462, 12470), False, 'from blaze.server.client import mimetype\n'), ((14139, 14152), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (14146, 14152), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((14729, 14742), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (14736, 14742), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((15818, 15834), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (15826, 15834), False, 'from blaze.server.client import mimetype\n'), ((15995, 16036), 'blaze.compute', 'compute', (['expr', '{t: iris}'], {'return_type': 'int'}), '(expr, {t: iris}, return_type=int)\n', (16002, 16036), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((16350, 16366), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (16358, 16366), False, 'from blaze.server.client import mimetype\n'), ((16527, 16552), 'blaze.compute', 'compute', (['expr', '{t: tdata}'], {}), '(expr, {t: tdata})\n', (16534, 16552), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((16908, 16924), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (16916, 16924), False, 'from blaze.server.client import mimetype\n'), ((17085, 17110), 'blaze.compute', 'compute', (['expr', '{t: tdata}'], {}), '(expr, {t: tdata})\n', (17092, 17110), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((20105, 20121), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (20113, 20121), False, 'from blaze.server.client import mimetype\n'), ((20767, 20783), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (20775, 20783), False, 'from blaze.server.client import mimetype\n'), ((21637, 21653), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (21645, 21653), False, 'from blaze.server.client import mimetype\n'), ((22373, 22389), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (22381, 22389), False, 'from blaze.server.client import mimetype\n'), ((22817, 22833), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (22825, 22833), False, 'from blaze.server.client import mimetype\n'), ((23098, 23113), 'blaze.data', 'data', (['iris_path'], {}), '(iris_path)\n', (23102, 23113), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((23356, 23371), 'blaze.data', 'data', (['iris_path'], {}), '(iris_path)\n', (23360, 23371), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((23587, 23603), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (23595, 23603), False, 'from blaze.server.client import mimetype\n'), ((23715, 23730), 'blaze.data', 'data', (['iris_path'], {}), '(iris_path)\n', (23719, 23730), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((24070, 24086), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (24078, 24086), False, 'from blaze.server.client import mimetype\n'), ((24463, 24479), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (24471, 24479), False, 'from blaze.server.client import mimetype\n'), ((24637, 24653), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (24645, 24653), False, 'from blaze.server.client import mimetype\n'), ((25119, 25135), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (25127, 25135), False, 'from blaze.server.client import mimetype\n'), ((25502, 25518), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (25510, 25518), False, 'from blaze.server.client import mimetype\n'), ((25875, 25891), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (25883, 25891), False, 'from blaze.server.client import mimetype\n'), ((26280, 26296), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (26288, 26296), False, 'from blaze.server.client import mimetype\n'), ((26831, 26847), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (26839, 26847), False, 'from blaze.server.client import mimetype\n'), ((27477, 27493), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (27485, 27493), False, 'from blaze.server.client import mimetype\n'), ((28175, 28191), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (28183, 28191), False, 'from blaze.server.client import mimetype\n'), ((28456, 28485), 'blaze.data', 'data', (['iris_path'], {}), '(iris_path, **csv_kwargs)\n', (28460, 28485), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((28728, 28757), 'blaze.data', 'data', (['iris_path'], {}), '(iris_path, **csv_kwargs)\n', (28732, 28757), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((28973, 28989), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (28981, 28989), False, 'from blaze.server.client import mimetype\n'), ((29101, 29130), 'blaze.data', 'data', (['iris_path'], {}), '(iris_path, **csv_kwargs)\n', (29105, 29130), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((29375, 29391), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (29383, 29391), False, 'from blaze.server.client import mimetype\n'), ((29646, 29672), 'odo.odo', 'odo', (['DumbResource.df', 'list'], {}), '(DumbResource.df, list)\n', (29649, 29672), False, 'from odo import odo, convert\n'), ((29741, 29757), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (29749, 29757), False, 'from blaze.server.client import mimetype\n'), ((30426, 30442), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (30434, 30442), False, 'from blaze.server.client import mimetype\n'), ((30701, 30727), 'odo.odo', 'odo', (['DumbResource.df', 'list'], {}), '(DumbResource.df, list)\n', (30704, 30727), False, 'from odo import odo, convert\n'), ((30796, 30812), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (30804, 30812), False, 'from blaze.server.client import mimetype\n'), ((31462, 31483), 'blaze.server.client.mimetype', 'mimetype', (['fastmsgpack'], {}), '(fastmsgpack)\n', (31470, 31483), False, 'from blaze.server.client import mimetype\n'), ((31513, 31537), 'blaze.server.serialization.fastmsgpack.dumps', 'fastmsgpack.dumps', (['query'], {}), '(query)\n', (31530, 31537), False, 'from blaze.server.serialization import all_formats, trusted_formats, fastmsgpack\n'), ((31612, 31642), 'blaze.server.serialization.fastmsgpack.loads', 'fastmsgpack.loads', (['result.data'], {}), '(result.data)\n', (31629, 31642), False, 'from blaze.server.serialization import all_formats, trusted_formats, fastmsgpack\n'), ((4134, 4144), 'blaze.server.server.to_tree', 'to_tree', (['t'], {}), '(t)\n', (4141, 4144), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((4182, 4203), 'blaze.server.server.to_tree', 'to_tree', (['(t.amount + 1)'], {}), '(t.amount + 1)\n', (4189, 4203), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((9472, 9490), 'blaze.into', 'into', (['list', 'result'], {}), '(list, result)\n', (9476, 9490), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((10071, 10088), 'odo.odo', 'odo', (['result', 'list'], {}), '(result, list)\n', (10074, 10088), False, 'from odo import odo, convert\n'), ((10578, 10591), 'blaze.into', 'into', (['list', 'a'], {}), '(list, a)\n', (10582, 10591), False, 'from blaze import discover, symbol, by, CSV, compute, join, into, data\n'), ((13080, 13097), 'pandas.Series', 'pd.Series', (['result'], {}), '(result)\n', (13089, 13097), True, 'import pandas as pd\n'), ((13101, 13119), 'pandas.Series', 'pd.Series', (['exp_res'], {}), '(exp_res)\n', (13110, 13119), True, 'import pandas as pd\n'), ((13523, 13540), 'pandas.Series', 'pd.Series', (['result'], {}), '(result)\n', (13532, 13540), True, 'import pandas as pd\n'), ((13544, 13562), 'pandas.Series', 'pd.Series', (['exp_res'], {}), '(exp_res)\n', (13553, 13562), True, 'import pandas as pd\n'), ((14308, 14324), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (14316, 14324), False, 'from blaze.server.client import mimetype\n'), ((14898, 14914), 'blaze.server.client.mimetype', 'mimetype', (['serial'], {}), '(serial)\n', (14906, 14914), False, 'from blaze.server.client import mimetype\n'), ((15388, 15405), 'pandas.Series', 'pd.Series', (['result'], {}), '(result)\n', (15397, 15405), True, 'import pandas as pd\n'), ((15409, 15427), 'pandas.Series', 'pd.Series', (['exp_res'], {}), '(exp_res)\n', (15418, 15427), True, 'import pandas as pd\n'), ((9818, 9831), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (9825, 9831), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((23525, 23538), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (23532, 23538), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n'), ((28911, 28924), 'blaze.server.server.to_tree', 'to_tree', (['expr'], {}), '(expr)\n', (28918, 28924), False, 'from blaze.server.server import Server, to_tree, from_tree, RC\n')] |
#!/usr/bin/env python
"""Demonstrate non-binary plot calibration curve failure
Reproduction for WB-6749.
---
id: 0.sklearn.01-plot-calibration-curve-nonbinary
tag:
shard: sklearn
plugin:
- wandb
depend:
requirements:
- numpy
- pandas
- scikit-learn
files:
- file: wine.csv
source: https://raw.githubusercontent.com/wandb/examples/master/examples/data/wine.csv
assert:
- :wandb:runs_len: 1
- :wandb:runs[0][exitcode]: 0
- :yea:exit: 0
- :op:contains_regex:
- :wandb:runs[0][output][stderr]
- This function only supports binary classification at the moment and therefore expects labels to be binary
- :op:contains:
- :wandb:runs[0][telemetry][1] # imports before
- 5 # sklearn
- :op:contains:
- :wandb:runs[0][telemetry][2] # imports after
- 5 # sklearn
"""
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import wandb
# yea test will grab this
# data_url = "https://raw.githubusercontent.com/wandb/examples/master/examples/data/wine.csv"
# !wget {data_url} -O "wine.csv"
# Load data
wine_quality = pd.read_csv("wine.csv")
y = wine_quality["quality"]
y = y.values
X = wine_quality.drop(["quality"], axis=1)
X = X.values
feature_names = wine_quality.columns
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
labels = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"]
# Train model, get predictions
model = RandomForestClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_probas = model.predict_proba(X_test)
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]
print(model.n_features_)
run = wandb.init(project="my-scikit-integration")
wandb.sklearn.plot_calibration_curve(model, X_train, y_train, "RandomForestClassifier")
print(model.n_features_)
outs = model.predict(X_train)
| [
"sklearn.ensemble.RandomForestClassifier",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.argsort",
"wandb.init",
"wandb.sklearn.plot_calibration_curve"
] | [((1168, 1191), 'pandas.read_csv', 'pd.read_csv', (['"""wine.csv"""'], {}), "('wine.csv')\n", (1179, 1191), True, 'import pandas as pd\n'), ((1362, 1399), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1378, 1399), False, 'from sklearn.model_selection import train_test_split\n'), ((1529, 1553), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1551, 1553), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1766, 1809), 'wandb.init', 'wandb.init', ([], {'project': '"""my-scikit-integration"""'}), "(project='my-scikit-integration')\n", (1776, 1809), False, 'import wandb\n'), ((1811, 1902), 'wandb.sklearn.plot_calibration_curve', 'wandb.sklearn.plot_calibration_curve', (['model', 'X_train', 'y_train', '"""RandomForestClassifier"""'], {}), "(model, X_train, y_train,\n 'RandomForestClassifier')\n", (1847, 1902), False, 'import wandb\n'), ((1703, 1726), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (1713, 1726), True, 'import numpy as np\n')] |
import numpy as np
import copy
n=6;
G=np.mat("0 0 0 1.0 0 1.0;1 0 0 0 0 0;0 1 0 0 0 0;0 1 1 0 0 0;0 0 1 0 0 0;0 0 0 1 1 0");
#0 0 0 1 0 1
#1 0 0 0 0 0
#0 1 0 0 0 0
#0 1 1 0 0 0
#0 0 1 0 0 0
#0 0 0 1 1 0
P=np.mat("0 0.0 0 0 0");
A=np.mat("0 0 0 1.0 0 1.0;1 0 0 0 0 0;0 1 0 0 0 0;0 1 1 0 0 0;0 0 1 0 0 0;0 0 0 1 1 0");
#for i in range(n):
# len();
lj=[];
cj=[];
factor=0.85;
base=(1-factor)/n;
pagerank=np.zeros((n,1), dtype = float);
frank= np.zeros((n,1), dtype = float);
E=np.ones((n,n),dtype=float);
for i in range(n):
count=0;
for j in range(n):
if A[i,j]>0:
count=count+1;
for j in range(n):
if A[i,j]>0:
A[i,j]=A[i,j]/count;
cj.append(count);
pagerank[i]=1.0;
min_delta=0.001
R=E;
A0=np.transpose(A);
#print(frank,pagerank)
# while max(abs(A-R))>0.001:
# R=copy.deepcopy(A);
# print(R)
flag=1;
while flag:
frank=copy.deepcopy(pagerank);
for i in range(n):
for j in range(n):
E[i][j]=1.0/n;
pagerank=(factor*A0+(1-factor)*E)*frank;
min=0.001;
C=pagerank-frank;
flag=0;
for i in range(n):
if(abs(C[i])>min):
flag=1;
print(pagerank); | [
"copy.deepcopy",
"numpy.zeros",
"numpy.transpose",
"numpy.ones",
"numpy.mat"
] | [((38, 133), 'numpy.mat', 'np.mat', (['"""0 0 0 1.0 0 1.0;1 0 0 0 0 0;0 1 0 0 0 0;0 1 1 0 0 0;0 0 1 0 0 0;0 0 0 1 1 0"""'], {}), "(\n '0 0 0 1.0 0 1.0;1 0 0 0 0 0;0 1 0 0 0 0;0 1 1 0 0 0;0 0 1 0 0 0;0 0 0 1 1 0'\n )\n", (44, 133), True, 'import numpy as np\n'), ((205, 226), 'numpy.mat', 'np.mat', (['"""0 0.0 0 0 0"""'], {}), "('0 0.0 0 0 0')\n", (211, 226), True, 'import numpy as np\n'), ((230, 325), 'numpy.mat', 'np.mat', (['"""0 0 0 1.0 0 1.0;1 0 0 0 0 0;0 1 0 0 0 0;0 1 1 0 0 0;0 0 1 0 0 0;0 0 0 1 1 0"""'], {}), "(\n '0 0 0 1.0 0 1.0;1 0 0 0 0 0;0 1 0 0 0 0;0 1 1 0 0 0;0 0 1 0 0 0;0 0 0 1 1 0'\n )\n", (236, 325), True, 'import numpy as np\n'), ((405, 434), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {'dtype': 'float'}), '((n, 1), dtype=float)\n', (413, 434), True, 'import numpy as np\n'), ((444, 473), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {'dtype': 'float'}), '((n, 1), dtype=float)\n', (452, 473), True, 'import numpy as np\n'), ((478, 506), 'numpy.ones', 'np.ones', (['(n, n)'], {'dtype': 'float'}), '((n, n), dtype=float)\n', (485, 506), True, 'import numpy as np\n'), ((753, 768), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (765, 768), True, 'import numpy as np\n'), ((893, 916), 'copy.deepcopy', 'copy.deepcopy', (['pagerank'], {}), '(pagerank)\n', (906, 916), False, 'import copy\n')] |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import pickle
from warnings import simplefilter
from all_functions import *
from feedback_functions import *
simplefilter(action='ignore', category=FutureWarning)
# np.random.seed(0)
# [babbling_kinematics, babbling_activations] = babbling_fcn(simulation_minutes=5)
# model = inverse_mapping_fcn(kinematics=babbling_kinematics, activations=babbling_activations)
# cum_kinematics = babbling_kinematics
# cum_activations = babbling_activations
# pickle.dump([model,cum_kinematics, cum_activations],open("results/mlp_model.sav", 'wb'))
[model,cum_kinematics, cum_activations] = pickle.load(open("results/mlp_model.sav", 'rb')) # loading the model
P = np.array([10, 15])
I = np.array([2, 6])
np.random.seed(0)
experiments_switch = np.zeros(11,) # sets which experiments should run
#np.zeros(10,)#[0, 0, 0, 1, 0, 0, 0, 0, 0, 1]
#experiments_switch[4]=1
trial_number = 1
plot_outputs = True
Mj_render = False
for ii in range(len(experiments_switch)):
globals()["exp{}_average_error".format(ii+1)]=np.zeros([2,1])
exp6_average_error = np.zeros([3,1])
exp7_average_error = np.zeros([3,1])
exp9_average_error = np.zeros([3,1,1])
exp10_average_error = np.zeros([3,1,1])
if experiments_switch[0] ==1: # as a function of cycle period
features=np.ones(10,)
cycle_durations = np.array([2.5,2.5])#np.linspace(.5,10,trial_number)
test1_no = cycle_durations.shape[0]
exp1_average_error = np.zeros([2,test1_no]) # first row open-loop and second row close-loop
#cycle length experiment
for cycle_duration_in_seconds, ii in zip(cycle_durations, range(test1_no)):
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = cycle_duration_in_seconds, show=False)
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
exp1_average_error[0,ii], _, _ = openloop_run_fcn(model=model, desired_kinematics=desired_kinematics, plot_outputs=plot_outputs, Mj_render=False)
exp1_average_error[1,ii], _, _ = closeloop_run_fcn(model=model, desired_kinematics=desired_kinematics, P=P, I=I, plot_outputs=plot_outputs, Mj_render=False) # K = [10, 15]
if experiments_switch[1] ==1: # cyclical on air
np.random.seed(0)
test2_no = trial_number
exp2_average_error = np.zeros([2,test2_no])
for ii in range(test2_no):
features = np.random.rand(10)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False)
#import pdb; pdb.set_trace()
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
exp2_average_error[0,ii], _, _ = openloop_run_fcn(model=model, desired_kinematics=desired_kinematics, plot_outputs=plot_outputs, Mj_render=False)
exp2_average_error[1,ii], _, _ = closeloop_run_fcn(model=model, desired_kinematics=desired_kinematics, P=P, I=I, plot_outputs=plot_outputs, Mj_render=False) # K = [10, 15]
#print("error_without: ", exp2_average_error[0,0], "error with: ", exp2_average_error[1,0])
if experiments_switch[2] ==1: # p2p
np.random.seed(0)
test3_no = trial_number
exp3_average_error = np.zeros([2,test3_no])
for ii in range(test3_no):
q0 = p2p_positions_gen_fcn(low=-np.pi/3, high=np.pi/3, number_of_positions=10, duration_of_each_position=2.5, timestep=.005)
q1 = p2p_positions_gen_fcn(low=-np.pi/2, high=0, number_of_positions=10, duration_of_each_position=2.5, timestep=.005)
desired_kinematics = positions_to_kinematics_fcn(q0, q1, timestep = 0.005)
exp3_average_error[0,ii], _, _ = openloop_run_fcn(model=model, desired_kinematics=desired_kinematics, plot_outputs=plot_outputs, Mj_render=False)
exp3_average_error[1,ii], _, _ = closeloop_run_fcn(model=model, desired_kinematics=desired_kinematics, P=P, I=I, plot_outputs=plot_outputs, Mj_render=False) # K = [10, 15]
if experiments_switch[3] ==1: # standing up against weight
test4_no = 1
exp4_average_error = np.zeros([2,test4_no])
q0 = p2p_positions_gen_fcn(low=np.pi/3, high=np.pi/3, number_of_positions=1, duration_of_each_position=1, timestep=.005)
q0 = np.append(q0,p2p_positions_gen_fcn(low=0, high=0, number_of_positions=1, duration_of_each_position=6, timestep=.005))
q1 = p2p_positions_gen_fcn(low=-np.pi/2, high=-np.pi/2, number_of_positions=1, duration_of_each_position=1, timestep=.005)
q1 = np.append(q1,p2p_positions_gen_fcn(low=0, high=0, number_of_positions=1, duration_of_each_position=6, timestep=.005))
desired_kinematics = positions_to_kinematics_fcn(q0, q1, timestep = 0.005)
exp4_average_error[0,:], _, _ = openloop_run_fcn(model=model, desired_kinematics=desired_kinematics, model_ver=3, plot_outputs=plot_outputs, Mj_render=Mj_render)
q0 = p2p_positions_gen_fcn(low=np.pi/3, high=np.pi/3, number_of_positions=1, duration_of_each_position=1, timestep=.005)
q0 = np.append(q0,p2p_positions_gen_fcn(low=0, high=0, number_of_positions=1, duration_of_each_position=6, timestep=.005))
q1 = p2p_positions_gen_fcn(low=-np.pi/2, high=-np.pi/2, number_of_positions=1, duration_of_each_position=1, timestep=.005)
q1 = np.append(q1,p2p_positions_gen_fcn(low=0, high=0, number_of_positions=1, duration_of_each_position=6, timestep=.005))
desired_kinematics = positions_to_kinematics_fcn(q0, q1, timestep = 0.005)
exp4_average_error[1,:], _, _ = closeloop_run_fcn(model=model, desired_kinematics=desired_kinematics, P=P, I=I, model_ver=3, plot_outputs=plot_outputs, Mj_render=Mj_render)
if experiments_switch[4] == 1: # walking; contact dynamics
np.random.seed(0)
test5_no = trial_number
exp5_average_error = np.zeros([2,test5_no])
for ii in range(test5_no):
##########################
features = np.random.rand(10)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False)
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
exp5_average_error[0,ii], real_attempt_kinematics_ol, _ = openloop_run_fcn(model=model, desired_kinematics=desired_kinematics, model_ver=2, plot_outputs=plot_outputs, Mj_render=Mj_render)
exp5_average_error[1,ii], real_attempt_kinematics_cl, _ = closeloop_run_fcn(model=model, desired_kinematics=desired_kinematics, model_ver=2, P=P, I=I, plot_outputs=plot_outputs, Mj_render=Mj_render) # K = [10, 15]
#import pdb; pdb.set_trace()
# np.savetxt('./results/withdynamics_desired.csv', desired_kinematics, delimiter=',')
# np.savetxt('./results/withdynamics_ol.csv', real_attempt_kinematics_ol, delimiter=',')
# np.savetxt('./results/withdynamics_cl.csv', real_attempt_kinematics_cl, delimiter=',')
if experiments_switch[5] == 1: # everlearn ones
np.random.seed(0)
[babbling_kinematics_1min, babbling_activations_1min] = babbling_fcn(simulation_minutes=1)
model_1min = inverse_mapping_fcn(kinematics=babbling_kinematics_1min, activations=babbling_activations_1min)
cum_kinematics_ol = deepcopy(babbling_kinematics_1min)
cum_activations_ol = deepcopy(babbling_activations_1min)
exp6_model_ol = deepcopy(model_1min)
cum_kinematics_cl = deepcopy(babbling_kinematics_1min)
cum_activations_cl = deepcopy(babbling_activations_1min)
exp6_model_cl = deepcopy(model_1min)
test6_no = trial_number
exp6_average_error = np.zeros([3,test6_no])
for ii in range(test6_no):
features = np.ones(10,)
print(features)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False) #1sec also fine
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
exp6_average_error[0,ii], real_attempt_kinematics_ol, real_attempt_activations_ol = openloop_run_fcn(model=exp6_model_ol, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
cum_kinematics_ol, cum_activations_ol = concatinate_data_fcn( cum_kinematics_ol, cum_activations_ol, real_attempt_kinematics_ol, real_attempt_activations_ol, throw_percentage = 0.20)
exp6_model_ol = inverse_mapping_fcn(cum_kinematics_ol, cum_activations_ol, prior_model = exp6_model_ol)
exp6_average_error[1,ii], real_attempt_kinematics_cl, real_attempt_activations_cl = closeloop_run_fcn(model=exp6_model_cl, desired_kinematics=desired_kinematics, model_ver=0, P=P, I=I, plot_outputs=False, Mj_render=False)
exp6_average_error[2,ii], _, _ = openloop_run_fcn(model=exp6_model_cl, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
cum_kinematics_cl, cum_activations_cl = concatinate_data_fcn( cum_kinematics_cl, cum_activations_cl, real_attempt_kinematics_cl, real_attempt_activations_cl, throw_percentage = 0.20)
exp6_model_cl = inverse_mapping_fcn(cum_kinematics_cl, cum_activations_cl, prior_model = exp6_model_cl)
if experiments_switch[6] == 1: # everlearn random multi
np.random.seed(0)
[babbling_kinematics_1min, babbling_activations_1min] = babbling_fcn(simulation_minutes=1)
model_1min = inverse_mapping_fcn(kinematics=babbling_kinematics_1min, activations=babbling_activations_1min)
cum_kinematics_ol = deepcopy(babbling_kinematics_1min)
cum_activations_ol = deepcopy(babbling_activations_1min)
exp7_model_ol = deepcopy(model_1min)
cum_kinematics_cl = deepcopy(babbling_kinematics_1min)
cum_activations_cl = deepcopy(babbling_activations_1min)
exp7_model_cl = deepcopy(model_1min)
test7_no = trial_number
exp7_average_error = np.zeros([3,test7_no])
for ii in range(test7_no):
print(ii)
features = np.random.rand(10)
print(features)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False) #1sec also fine
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
exp7_average_error[0,ii], real_attempt_kinematics_ol, real_attempt_activations_ol = openloop_run_fcn(model=exp7_model_ol, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
cum_kinematics_ol, cum_activations_ol = concatinate_data_fcn( cum_kinematics_ol, cum_activations_ol, real_attempt_kinematics_ol, real_attempt_activations_ol, throw_percentage = 0.20)
exp7_model_ol = inverse_mapping_fcn(cum_kinematics_ol, cum_activations_ol, prior_model = exp7_model_ol)
exp7_average_error[1,ii], real_attempt_kinematics_cl, real_attempt_activations_cl = closeloop_run_fcn(model=exp7_model_cl, desired_kinematics=desired_kinematics, model_ver=0, P=P, I=I, plot_outputs=False, Mj_render=False)
exp7_average_error[2,ii], _, _ = openloop_run_fcn(model=exp7_model_cl, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
cum_kinematics_cl, cum_activations_cl = concatinate_data_fcn( cum_kinematics_cl, cum_activations_cl, real_attempt_kinematics_cl, real_attempt_activations_cl, throw_percentage = 0.20)
exp7_model_cl = inverse_mapping_fcn(cum_kinematics_cl, cum_activations_cl, prior_model = exp7_model_cl)
if experiments_switch[7] ==1: # delay
np.random.seed(0)
test8_no = trial_number
all_delays = np.arange(0, 21, 2)
exp8_average_error = np.zeros([all_delays.shape[0]+1,test8_no])
for ii in range(test8_no):
features = np.random.rand(10)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False)
#import pdb; pdb.set_trace()
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
for delay_timesteps, jj in zip(all_delays, range(all_delays.shape[0])):
if jj==0: # 0 is for the open loop
exp8_average_error[jj,ii], _, _ = openloop_run_fcn(model=model, desired_kinematics=desired_kinematics, plot_outputs=False, Mj_render=False)
exp8_average_error[jj+1,ii], _, _ = closeloop_run_fcn(model=model, desired_kinematics=desired_kinematics, P=P, I=I, delay_timesteps=delay_timesteps, plot_outputs=False, Mj_render=False) # K = [10, 15]
#print("error_without: ", exp2_average_error[0,0], "error with: ", exp2_average_error[1,0])
if experiments_switch[8] == 1: # everlearn random mesh
refine_num = 25
test9_no = refine_num
babbling_times = np.array([1, 2.5, 5])
num_babbling_cases = babbling_times.shape[0]
#import pdb; pdb.set_trace()
exp9_average_error = np.zeros([3,test9_no,num_babbling_cases])
for babbling_time, jj in zip(babbling_times, range(num_babbling_cases)):
np.random.seed(0)
[babbling_kinematics_1min, babbling_activations_1min] = babbling_fcn(simulation_minutes=babbling_time)
model_1min = inverse_mapping_fcn(kinematics=babbling_kinematics_1min, activations=babbling_activations_1min)
cum_kinematics_ol = deepcopy(babbling_kinematics_1min)
cum_activations_ol = deepcopy(babbling_activations_1min)
exp9_model_ol = deepcopy(model_1min)
cum_kinematics_cl = deepcopy(babbling_kinematics_1min)
cum_activations_cl = deepcopy(babbling_activations_1min)
exp9_model_cl = deepcopy(model_1min)
for ii in range(test9_no):
features = np.ones(10,)#np.random.rand(10)*.8+.2
print(features)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False) #1sec also fine
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
exp9_average_error[0,ii,jj], real_attempt_kinematics_ol, real_attempt_activations_ol = openloop_run_fcn(model=exp9_model_ol, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
cum_kinematics_ol, cum_activations_ol = concatinate_data_fcn( cum_kinematics_ol, cum_activations_ol, real_attempt_kinematics_ol, real_attempt_activations_ol, throw_percentage = 0.20)
exp9_model_ol = inverse_mapping_fcn(cum_kinematics_ol, cum_activations_ol, prior_model = exp9_model_ol)
exp9_average_error[1,ii,jj], real_attempt_kinematics_cl, real_attempt_activations_cl = closeloop_run_fcn(model=exp9_model_cl, desired_kinematics=desired_kinematics, model_ver=0, P=P, I=I, plot_outputs=False, Mj_render=False)
exp9_average_error[2,ii,jj], _, _ = openloop_run_fcn(model=exp9_model_cl, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
cum_kinematics_cl, cum_activations_cl = concatinate_data_fcn( cum_kinematics_cl, cum_activations_cl, real_attempt_kinematics_cl, real_attempt_activations_cl, throw_percentage = 0.20)
exp9_model_cl = inverse_mapping_fcn(cum_kinematics_cl, cum_activations_cl, prior_model = exp9_model_cl)
if experiments_switch[9] == 1: # everlearn random
np.random.seed(0)
rep_num = trial_number
refine_num=25
exp10_average_error = np.zeros([4,refine_num,rep_num])
for jj in range(rep_num):
[babbling_kinematics_1min, babbling_activations_1min] = babbling_fcn(simulation_minutes=1)
model_1min = inverse_mapping_fcn(kinematics=babbling_kinematics_1min, activations=babbling_activations_1min)
cum_kinematics_ol = deepcopy(babbling_kinematics_1min)
cum_activations_ol = deepcopy(babbling_activations_1min)
exp10_model_ol = deepcopy(model_1min)
cum_kinematics_cl = deepcopy(babbling_kinematics_1min)
cum_activations_cl = deepcopy(babbling_activations_1min)
exp10_model_cl = deepcopy(model_1min)
features = np.random.rand(10)
for ii in range(refine_num):
print(features)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False) #1sec also fine
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
exp10_average_error[0,ii,jj], real_attempt_kinematics_ol, real_attempt_activations_ol = openloop_run_fcn(model=exp10_model_ol, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
exp10_average_error[3,ii,jj], _, _ = closeloop_run_fcn(model=exp10_model_ol, desired_kinematics=desired_kinematics, model_ver=0, P=P, I=I, plot_outputs=False, Mj_render=False)
cum_kinematics_ol, cum_activations_ol = concatinate_data_fcn( cum_kinematics_ol, cum_activations_ol, real_attempt_kinematics_ol, real_attempt_activations_ol, throw_percentage = 0.20)
exp10_model_ol = inverse_mapping_fcn(cum_kinematics_ol, cum_activations_ol, prior_model = exp10_model_ol)
exp10_average_error[1,ii,jj], real_attempt_kinematics_cl, real_attempt_activations_cl = closeloop_run_fcn(model=exp10_model_cl, desired_kinematics=desired_kinematics, model_ver=0, P=P, I=I, plot_outputs=False, Mj_render=False)
exp10_average_error[2,ii,jj], _, _ = openloop_run_fcn(model=exp10_model_cl, desired_kinematics=desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False)
cum_kinematics_cl, cum_activations_cl = concatinate_data_fcn( cum_kinematics_cl, cum_activations_cl, real_attempt_kinematics_cl, real_attempt_activations_cl, throw_percentage = 0.20)
exp10_model_cl = inverse_mapping_fcn(cum_kinematics_cl, cum_activations_cl, prior_model = exp10_model_cl)
if experiments_switch[10] ==1: # cyclical on air
rep_num = trial_number
powers=np.arange(-6,7,1)
coefficients = np.power(2.,powers)
P_exp11 = np.dot(coefficients[:,None],P[None,:])
I_exp11 = np.dot(coefficients[:,None],I[None,:])
PI_sets_no = P_exp11.shape[0]
exp11_average_error = np.zeros([2,PI_sets_no])
for jj in range(PI_sets_no):
np.random.seed(0)
current_average_error = np.zeros([2,rep_num])
for ii in range(rep_num):
features = np.random.rand(10)
[q0_filtered, q1_filtered] = feat_to_positions_fcn(features, timestep=0.005, cycle_duration_in_seconds = 2.5, show=False)
#import pdb; pdb.set_trace()
q0_filtered_10 = np.tile(q0_filtered,10)
q1_filtered_10 = np.tile(q1_filtered,10)
desired_kinematics = positions_to_kinematics_fcn(q0_filtered_10, q1_filtered_10, timestep = 0.005)
current_average_error[0,ii], _, _ = openloop_run_fcn(model=model, desired_kinematics=desired_kinematics, plot_outputs=False, Mj_render=False)
current_average_error[1,ii], _, _ = closeloop_run_fcn(model=model, desired_kinematics=desired_kinematics, P=P_exp11[jj], I=I_exp11[jj], plot_outputs=False, Mj_render=False) # K = [10, 15]
#print("error_without: ", exp2_average_error[0,0], "error with: ", exp2_average_error[1,0])
exp11_average_error[0, jj] = current_average_error[0,:].mean()
exp11_average_error[1, jj] = current_average_error[1,:].mean()
#import pdb; pdb.set_trace()
#experiments_switch = np.ones(11,)
errors_all = [exp1_average_error, exp2_average_error, exp3_average_error, exp4_average_error, exp5_average_error, exp6_average_error, exp7_average_error, exp8_average_error, exp9_average_error, exp10_average_error, exp11_average_error]
#pickle.dump([errors_all, trial_number],open("results/P_I/feedback_errors_P_I_tmp.sav", 'wb')) # saving the results with only P
[errors_all, trial_number] = pickle.load(open("results/P_I/feedback_errors_P_I_V8_50.sav", 'rb')) # loading the results with only P
experiments_switch = np.zeros(11,)
experiments_switch[0] =1
experiments_switch[3] =1
experiments_switch[7] =1
experiments_switch[9] =1
plot_comparison_figures_fcn(errors_all, experiments_switch, trial_number)
#import pdb; pdb.set_trace()
| [
"numpy.random.seed",
"warnings.simplefilter",
"numpy.power",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.arange",
"numpy.tile",
"numpy.random.rand",
"numpy.dot"
] | [((186, 239), 'warnings.simplefilter', 'simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (198, 239), False, 'from warnings import simplefilter\n'), ((731, 749), 'numpy.array', 'np.array', (['[10, 15]'], {}), '([10, 15])\n', (739, 749), True, 'import numpy as np\n'), ((754, 770), 'numpy.array', 'np.array', (['[2, 6]'], {}), '([2, 6])\n', (762, 770), True, 'import numpy as np\n'), ((773, 790), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (787, 790), True, 'import numpy as np\n'), ((812, 824), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (820, 824), True, 'import numpy as np\n'), ((19542, 19554), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (19550, 19554), True, 'import numpy as np\n'), ((1078, 1094), 'numpy.zeros', 'np.zeros', (['[2, 1]'], {}), '([2, 1])\n', (1086, 1094), True, 'import numpy as np\n'), ((1116, 1132), 'numpy.zeros', 'np.zeros', (['[3, 1]'], {}), '([3, 1])\n', (1124, 1132), True, 'import numpy as np\n'), ((1154, 1170), 'numpy.zeros', 'np.zeros', (['[3, 1]'], {}), '([3, 1])\n', (1162, 1170), True, 'import numpy as np\n'), ((1192, 1211), 'numpy.zeros', 'np.zeros', (['[3, 1, 1]'], {}), '([3, 1, 1])\n', (1200, 1211), True, 'import numpy as np\n'), ((1233, 1252), 'numpy.zeros', 'np.zeros', (['[3, 1, 1]'], {}), '([3, 1, 1])\n', (1241, 1252), True, 'import numpy as np\n'), ((1326, 1337), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1333, 1337), True, 'import numpy as np\n'), ((1358, 1378), 'numpy.array', 'np.array', (['[2.5, 2.5]'], {}), '([2.5, 2.5])\n', (1366, 1378), True, 'import numpy as np\n'), ((1469, 1492), 'numpy.zeros', 'np.zeros', (['[2, test1_no]'], {}), '([2, test1_no])\n', (1477, 1492), True, 'import numpy as np\n'), ((2349, 2366), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2363, 2366), True, 'import numpy as np\n'), ((2414, 2437), 'numpy.zeros', 'np.zeros', (['[2, test2_no]'], {}), '([2, test2_no])\n', (2422, 2437), True, 'import numpy as np\n'), ((3294, 3311), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3308, 3311), True, 'import numpy as np\n'), ((3359, 3382), 'numpy.zeros', 'np.zeros', (['[2, test3_no]'], {}), '([2, test3_no])\n', (3367, 3382), True, 'import numpy as np\n'), ((4154, 4177), 'numpy.zeros', 'np.zeros', (['[2, test4_no]'], {}), '([2, test4_no])\n', (4162, 4177), True, 'import numpy as np\n'), ((5717, 5734), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5731, 5734), True, 'import numpy as np\n'), ((5782, 5805), 'numpy.zeros', 'np.zeros', (['[2, test5_no]'], {}), '([2, test5_no])\n', (5790, 5805), True, 'import numpy as np\n'), ((6964, 6981), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6978, 6981), True, 'import numpy as np\n'), ((7535, 7558), 'numpy.zeros', 'np.zeros', (['[3, test6_no]'], {}), '([3, test6_no])\n', (7543, 7558), True, 'import numpy as np\n'), ((9199, 9216), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (9213, 9216), True, 'import numpy as np\n'), ((9770, 9793), 'numpy.zeros', 'np.zeros', (['[3, test7_no]'], {}), '([3, test7_no])\n', (9778, 9793), True, 'import numpy as np\n'), ((11434, 11451), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (11448, 11451), True, 'import numpy as np\n'), ((11491, 11510), 'numpy.arange', 'np.arange', (['(0)', '(21)', '(2)'], {}), '(0, 21, 2)\n', (11500, 11510), True, 'import numpy as np\n'), ((11533, 11578), 'numpy.zeros', 'np.zeros', (['[all_delays.shape[0] + 1, test8_no]'], {}), '([all_delays.shape[0] + 1, test8_no])\n', (11541, 11578), True, 'import numpy as np\n'), ((12647, 12668), 'numpy.array', 'np.array', (['[1, 2.5, 5]'], {}), '([1, 2.5, 5])\n', (12655, 12668), True, 'import numpy as np\n'), ((12768, 12811), 'numpy.zeros', 'np.zeros', (['[3, test9_no, num_babbling_cases]'], {}), '([3, test9_no, num_babbling_cases])\n', (12776, 12811), True, 'import numpy as np\n'), ((15113, 15130), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (15127, 15130), True, 'import numpy as np\n'), ((15193, 15227), 'numpy.zeros', 'np.zeros', (['[4, refine_num, rep_num]'], {}), '([4, refine_num, rep_num])\n', (15201, 15227), True, 'import numpy as np\n'), ((17654, 17673), 'numpy.arange', 'np.arange', (['(-6)', '(7)', '(1)'], {}), '(-6, 7, 1)\n', (17663, 17673), True, 'import numpy as np\n'), ((17688, 17709), 'numpy.power', 'np.power', (['(2.0)', 'powers'], {}), '(2.0, powers)\n', (17696, 17709), True, 'import numpy as np\n'), ((17719, 17760), 'numpy.dot', 'np.dot', (['coefficients[:, None]', 'P[None, :]'], {}), '(coefficients[:, None], P[None, :])\n', (17725, 17760), True, 'import numpy as np\n'), ((17769, 17810), 'numpy.dot', 'np.dot', (['coefficients[:, None]', 'I[None, :]'], {}), '(coefficients[:, None], I[None, :])\n', (17775, 17810), True, 'import numpy as np\n'), ((17862, 17887), 'numpy.zeros', 'np.zeros', (['[2, PI_sets_no]'], {}), '([2, PI_sets_no])\n', (17870, 17887), True, 'import numpy as np\n'), ((1809, 1833), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (1816, 1833), True, 'import numpy as np\n'), ((1852, 1876), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (1859, 1876), True, 'import numpy as np\n'), ((2478, 2496), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (2492, 2496), True, 'import numpy as np\n'), ((2672, 2696), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (2679, 2696), True, 'import numpy as np\n'), ((2715, 2739), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (2722, 2739), True, 'import numpy as np\n'), ((5875, 5893), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (5889, 5893), True, 'import numpy as np\n'), ((6038, 6062), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (6045, 6062), True, 'import numpy as np\n'), ((6081, 6105), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (6088, 6105), True, 'import numpy as np\n'), ((7599, 7610), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (7606, 7610), True, 'import numpy as np\n'), ((7790, 7814), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (7797, 7814), True, 'import numpy as np\n'), ((7833, 7857), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (7840, 7857), True, 'import numpy as np\n'), ((9846, 9864), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (9860, 9864), True, 'import numpy as np\n'), ((10043, 10067), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (10050, 10067), True, 'import numpy as np\n'), ((10086, 10110), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (10093, 10110), True, 'import numpy as np\n'), ((11617, 11635), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (11631, 11635), True, 'import numpy as np\n'), ((11811, 11835), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (11818, 11835), True, 'import numpy as np\n'), ((11854, 11878), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (11861, 11878), True, 'import numpy as np\n'), ((12886, 12903), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (12900, 12903), True, 'import numpy as np\n'), ((15782, 15800), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (15796, 15800), True, 'import numpy as np\n'), ((17919, 17936), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (17933, 17936), True, 'import numpy as np\n'), ((17963, 17985), 'numpy.zeros', 'np.zeros', (['[2, rep_num]'], {}), '([2, rep_num])\n', (17971, 17985), True, 'import numpy as np\n'), ((13473, 13484), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (13480, 13484), True, 'import numpy as np\n'), ((13692, 13716), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (13699, 13716), True, 'import numpy as np\n'), ((13736, 13760), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (13743, 13760), True, 'import numpy as np\n'), ((16013, 16037), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (16020, 16037), True, 'import numpy as np\n'), ((16057, 16081), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (16064, 16081), True, 'import numpy as np\n'), ((18027, 18045), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (18041, 18045), True, 'import numpy as np\n'), ((18224, 18248), 'numpy.tile', 'np.tile', (['q0_filtered', '(10)'], {}), '(q0_filtered, 10)\n', (18231, 18248), True, 'import numpy as np\n'), ((18268, 18292), 'numpy.tile', 'np.tile', (['q1_filtered', '(10)'], {}), '(q1_filtered, 10)\n', (18275, 18292), True, 'import numpy as np\n')] |
# https://zhuanlan.zhihu.com/p/335753926
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from collections import OrderedDict
import numpy as np
import cv2
import torch
from pytorchocr.base_ocr_v20 import BaseOCRV20
class PPOCRv2RecConverter(BaseOCRV20):
def __init__(self, config, paddle_pretrained_model_path, **kwargs):
para_state_dict, opti_state_dict = self.read_paddle_weights(paddle_pretrained_model_path)
out_channels = list(para_state_dict.values())[-1].shape[0]
print('out_channels: ', out_channels)
print(type(kwargs), kwargs)
kwargs['out_channels'] = out_channels
super(PPOCRv2RecConverter, self).__init__(config, **kwargs)
# self.load_paddle_weights(paddle_pretrained_model_path)
self.load_paddle_weights([para_state_dict, opti_state_dict])
print('model is loaded: {}'.format(paddle_pretrained_model_path))
self.net.eval()
def load_paddle_weights(self, paddle_weights):
para_state_dict, opti_state_dict = paddle_weights
[print('paddle: {} ---- {}'.format(k, v.shape)) for k, v in para_state_dict.items()]
[print('pytorch: {} ---- {}'.format(k, v.shape)) for k, v in self.net.state_dict().items()]
for k,v in self.net.state_dict().items():
if k.endswith('num_batches_tracked'):
continue
ppname = k
ppname = ppname.replace('.running_mean', '._mean')
ppname = ppname.replace('.running_var', '._variance')
if k.startswith('backbone.'):
ppname = ppname.replace('backbone.', 'Student.backbone.')
elif k.startswith('neck.'):
ppname = ppname.replace('neck.', 'Student.neck.')
elif k.startswith('head.'):
ppname = ppname.replace('head.', 'Student.head.')
else:
print('Redundance:')
print(k)
raise ValueError
try:
if ppname.endswith('fc1.weight') or ppname.endswith('fc2.weight'):
self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname].T))
else:
self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname]))
except Exception as e:
print('pytorch: {}, {}'.format(k, v.size()))
print('paddle: {}, {}'.format(ppname, para_state_dict[ppname].shape))
raise e
print('model is loaded.')
if __name__ == '__main__':
import argparse, json, textwrap, sys, os
parser = argparse.ArgumentParser()
parser.add_argument("--src_model_path", type=str, help='Assign the paddleOCR trained model(best_accuracy)')
args = parser.parse_args()
cfg = {'model_type':'rec',
'algorithm':'CRNN',
'Transform':None,
'Backbone':{'name':'MobileNetV1Enhance', 'scale':0.5},
'Neck':{'name':'SequenceEncoder', 'hidden_size':64, 'encoder_type':'rnn'},
'Head':{'name':'CTCHead', 'mid_channels': 96, 'fc_decay': 2e-05}}
paddle_pretrained_model_path = os.path.join(os.path.abspath(args.src_model_path), 'best_accuracy')
converter = PPOCRv2RecConverter(cfg, paddle_pretrained_model_path)
np.random.seed(666)
inputs = np.random.randn(1,3,32,320).astype(np.float32)
inp = torch.from_numpy(inputs)
out = converter.net(inp)
out = out.data.numpy()
print('out:', np.sum(out), np.mean(out), np.max(out), np.min(out))
# save
converter.save_pytorch_weights('ch_ptocr_v2_rec_infer.pth')
print('done.')
| [
"os.path.abspath",
"numpy.sum",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.random.randn",
"numpy.max",
"numpy.mean",
"numpy.min",
"torch.Tensor",
"torch.from_numpy"
] | [((2631, 2656), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2654, 2656), False, 'import argparse, json, textwrap, sys, os\n'), ((3300, 3319), 'numpy.random.seed', 'np.random.seed', (['(666)'], {}), '(666)\n', (3314, 3319), True, 'import numpy as np\n'), ((3390, 3414), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (3406, 3414), False, 'import torch\n'), ((3169, 3205), 'os.path.abspath', 'os.path.abspath', (['args.src_model_path'], {}), '(args.src_model_path)\n', (3184, 3205), False, 'import argparse, json, textwrap, sys, os\n'), ((3490, 3501), 'numpy.sum', 'np.sum', (['out'], {}), '(out)\n', (3496, 3501), True, 'import numpy as np\n'), ((3503, 3515), 'numpy.mean', 'np.mean', (['out'], {}), '(out)\n', (3510, 3515), True, 'import numpy as np\n'), ((3517, 3528), 'numpy.max', 'np.max', (['out'], {}), '(out)\n', (3523, 3528), True, 'import numpy as np\n'), ((3530, 3541), 'numpy.min', 'np.min', (['out'], {}), '(out)\n', (3536, 3541), True, 'import numpy as np\n'), ((107, 132), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (122, 132), False, 'import argparse, json, textwrap, sys, os\n'), ((3333, 3363), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(32)', '(320)'], {}), '(1, 3, 32, 320)\n', (3348, 3363), True, 'import numpy as np\n'), ((2148, 2187), 'torch.Tensor', 'torch.Tensor', (['para_state_dict[ppname].T'], {}), '(para_state_dict[ppname].T)\n', (2160, 2187), False, 'import torch\n'), ((2262, 2299), 'torch.Tensor', 'torch.Tensor', (['para_state_dict[ppname]'], {}), '(para_state_dict[ppname])\n', (2274, 2299), False, 'import torch\n')] |
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import pickle
import numpy as np
from typing import List, Union
from random import randint
import re
from perf_gan.data.preprocess import Identity, PitchTransform, LoudnessTransform
class SynthDataset(Dataset):
def __init__(self, path: str, list_transforms=None, eval=False):
"""Create the Dataset object relative to the data file (given with path)
Args:
path (str): path to the data file
list_transforms (list, optional): list of the transforms to be applied to the dataset.
Defaults to None.
"""
self.path = path
self.dataset = open(path, "rb")
# add transformations applied to data
if list_transforms is None:
self.list_transforms = [(Identity, {}), (Identity, {})]
else:
self.list_transforms = list_transforms
self.eval = eval
print("Dataset loaded.")
def __fit_transforms(self, u_f0, u_lo, e_f0, e_lo) -> List[object]:
"""Fit the two transforms to the contours
Returns:
list[object]: fitted scalers
"""
scalers = []
# pitch
contour = np.concatenate((u_f0, e_f0))
transform = self.list_transforms[0]
sc = transform[0](**transform[1]).fit(contour.reshape(-1, 1))
scalers.append(sc)
# loudness
contour = np.concatenate((u_lo, e_lo))
transform = self.list_transforms[1]
sc = transform[0](**transform[1]).fit(contour.reshape(-1, 1))
scalers.append(sc)
return scalers
def transform(self, x: torch.Tensor) -> torch.Tensor:
"""Apply transformations to the contour x (pitch and loudness)
Args:
x (torch.Tensor): input contour
Returns:
torch.Tensor: transformed contour
"""
f0, lo = torch.split(x, 1, -2)
# transforms
f0 = self.scalers[0].transform(f0)
lo = self.scalers[1].transform(lo)
return torch.cat([f0, lo], -2)
def inverse_transform(self, x: torch.Tensor) -> torch.Tensor:
"""Apply inverse transformations to the contour x (pitch and loudness)
Args:
x (torch.Tensor): input contour
Returns:
torch.Tensor: inverse transformed contour
"""
f0, lo = torch.split(x, 1, -2)
# Inverse transforms
f0 = self.scalers[0].inverse_transform(f0)
lo = self.scalers[1].inverse_transform(lo)
return torch.cat([f0, lo], -2)
def __len__(self) -> int:
"""Compute the number of samples in the dataset
Returns:
[int]: number of samples in the dataset
"""
return int(re.split("/|_|\.", self.path)[-2])
def __getitem__(self, idx: int) -> List[torch.Tensor]:
"""Select the ith sample from the dataset
Args:
idx (int): index of the sample
Returns:
list[torch.Tensor]: list of contours (pitch, loudness, onsets, offsets)
"""
sample = pickle.load(self.dataset)
u_f0 = torch.Tensor(sample["u_f0"])
u_lo = torch.Tensor(sample["u_lo"])
e_f0 = torch.Tensor(sample["e_f0"])
e_lo = torch.Tensor(sample["e_lo"])
self.scalers = self.__fit_transforms(u_f0, u_lo, e_f0, e_lo)
s_onsets = torch.Tensor(sample["onsets"]).unsqueeze(0)
s_offsets = torch.Tensor(sample["offsets"]).unsqueeze(0)
mask = torch.Tensor(sample["mask"])
# concatenate the contours into unexpressive/expressive tensors
u_contours = torch.cat([
u_f0.unsqueeze(0),
u_lo.unsqueeze(0),
], 0)
e_contours = torch.cat([
e_f0.unsqueeze(0),
e_lo.unsqueeze(0),
], 0)
return [
self.transform(u_contours),
self.transform(e_contours), s_onsets, s_offsets, mask
]
if __name__ == '__main__':
l = [(PitchTransform, {
"feature_range": (-1, 1)
}), (LoudnessTransform, {
"feature_range": (-1, 1)
})]
d = SynthDataset(path="data/dataset_train_1000.pickle", list_transforms=l)
# loop over the 4 components (u contours, e contours, onsets, offsets)
for i in range(10):
sample = d[0]
print("New sample")
for elt in sample:
print(elt.shape)
| [
"re.split",
"torch.split",
"torch.cat",
"pickle.load",
"torch.Tensor",
"numpy.concatenate"
] | [((1230, 1258), 'numpy.concatenate', 'np.concatenate', (['(u_f0, e_f0)'], {}), '((u_f0, e_f0))\n', (1244, 1258), True, 'import numpy as np\n'), ((1438, 1466), 'numpy.concatenate', 'np.concatenate', (['(u_lo, e_lo)'], {}), '((u_lo, e_lo))\n', (1452, 1466), True, 'import numpy as np\n'), ((1915, 1936), 'torch.split', 'torch.split', (['x', '(1)', '(-2)'], {}), '(x, 1, -2)\n', (1926, 1936), False, 'import torch\n'), ((2060, 2083), 'torch.cat', 'torch.cat', (['[f0, lo]', '(-2)'], {}), '([f0, lo], -2)\n', (2069, 2083), False, 'import torch\n'), ((2391, 2412), 'torch.split', 'torch.split', (['x', '(1)', '(-2)'], {}), '(x, 1, -2)\n', (2402, 2412), False, 'import torch\n'), ((2560, 2583), 'torch.cat', 'torch.cat', (['[f0, lo]', '(-2)'], {}), '([f0, lo], -2)\n', (2569, 2583), False, 'import torch\n'), ((3107, 3132), 'pickle.load', 'pickle.load', (['self.dataset'], {}), '(self.dataset)\n', (3118, 3132), False, 'import pickle\n'), ((3149, 3177), 'torch.Tensor', 'torch.Tensor', (["sample['u_f0']"], {}), "(sample['u_f0'])\n", (3161, 3177), False, 'import torch\n'), ((3193, 3221), 'torch.Tensor', 'torch.Tensor', (["sample['u_lo']"], {}), "(sample['u_lo'])\n", (3205, 3221), False, 'import torch\n'), ((3238, 3266), 'torch.Tensor', 'torch.Tensor', (["sample['e_f0']"], {}), "(sample['e_f0'])\n", (3250, 3266), False, 'import torch\n'), ((3282, 3310), 'torch.Tensor', 'torch.Tensor', (["sample['e_lo']"], {}), "(sample['e_lo'])\n", (3294, 3310), False, 'import torch\n'), ((3526, 3554), 'torch.Tensor', 'torch.Tensor', (["sample['mask']"], {}), "(sample['mask'])\n", (3538, 3554), False, 'import torch\n'), ((2772, 2802), 're.split', 're.split', (['"""/|_|\\\\."""', 'self.path'], {}), "('/|_|\\\\.', self.path)\n", (2780, 2802), False, 'import re\n'), ((3401, 3431), 'torch.Tensor', 'torch.Tensor', (["sample['onsets']"], {}), "(sample['onsets'])\n", (3413, 3431), False, 'import torch\n'), ((3465, 3496), 'torch.Tensor', 'torch.Tensor', (["sample['offsets']"], {}), "(sample['offsets'])\n", (3477, 3496), False, 'import torch\n')] |
# SPDX-License-Identifier: BSD-3-Clause AND Apache-2.0
# Copyright 2018 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright 2019 Blue Cheetah Analog Design Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines core data post-processing classes.
"""
import numpy as np
import scipy.interpolate as interp
import scipy.cluster.vq as svq
import scipy.optimize as sciopt
class Waveform(object):
"""A (usually transient) waveform.
This class provides interpolation and other convenience functions.
Parameters
----------
xvec : np.multiarray.ndarray
the X vector.
yvec : np.multiarray.ndarray
the Y vector.
xtol : float
the X value tolerance.
order : int
the interpolation order. 1 for nearest, 2 for linear, 3 for spline.
ext : int or str
interpolation extension mode. See documentation for InterpolatedUnivariateSpline.
"""
def __init__(self, xvec, yvec, xtol, order=3, ext=3):
self._xvec = xvec
self._yvec = yvec
self._xtol = xtol
self._order = order
self._ext = ext
self._fun = interp.InterpolatedUnivariateSpline(xvec, yvec, k=order, ext=ext)
@property
def xvec(self):
"""the X vector"""
return self._xvec
@property
def yvec(self):
"""the Y vector"""
return self._yvec
@property
def order(self):
"""the interpolation order. 1 for nearest, 2 for linear, 3 for spline."""
return self._order
@property
def xtol(self):
"""the X value tolerance."""
return self._xtol
@property
def ext(self):
"""interpolation extension mode. See documentation for InterpolatedUnivariateSpline."""
return self._ext
def __call__(self, *arg, **kwargs):
"""Evaluate the waveform at the given points."""
return self._fun(*arg, **kwargs)
def get_xrange(self):
"""Returns the X vector range.
Returns
-------
xmin : float
minimum X value.
xmax : float
maximum X value.
"""
return self.xvec[0], self.xvec[-1]
def shift_by(self, xshift):
"""Returns a shifted version of this waveform.
Parameters
----------
xshift : float
the amount to shift by.
Returns
-------
wvfm : bag.data.core.Waveform
a reference to this instance, or a copy if copy is True.
"""
return Waveform(self.xvec + xshift, self.yvec, self.xtol, order=self.order, ext=self.ext)
def get_all_crossings(self, threshold, start=None, stop=None, edge='both'):
"""Returns all X values at which this waveform crosses the given threshold.
Parameters
----------
threshold : float
the threshold value.
start : float or None
if given, search for crossings starting at this X value.
stop : float or None
if given, search only for crossings before this X value.
edge : string
crossing type. Valid values are 'rising', 'falling', or 'both'.
Returns
-------
xval_list : list[float]
all X values at which crossing occurs.
"""
# determine start and stop indices
sidx = 0 if start is None else np.searchsorted(self.xvec, [start])[0]
if stop is None:
eidx = len(self.xvec)
else:
eidx = np.searchsorted(self.xvec, [stop])[0]
if eidx < len(self.xvec) and abs(self.xvec[eidx] - stop) < self.xtol:
eidx += 1
# quantize waveform values, then detect edge.
bool_vec = self.yvec[sidx:eidx] >= threshold # type: np.ndarray
qvec = bool_vec.astype(int)
dvec = np.diff(qvec)
# eliminate unwanted edge types.
if edge == 'rising':
dvec = np.maximum(dvec, 0)
elif edge == 'falling':
dvec = np.minimum(dvec, 0)
# get crossing indices
idx_list = dvec.nonzero()[0]
# convert indices to X value using brentq interpolation.
def crossing_fun(x):
return self._fun(x) - threshold
xval_list = []
for idx in idx_list:
t0, t1 = self.xvec[sidx + idx], self.xvec[sidx + idx + 1]
try:
tcross = sciopt.brentq(crossing_fun, t0, t1, xtol=self.xtol)
except ValueError:
# no solution, this happens only if we have numerical error
# around the threshold. In this case just pick the endpoint
# closest to threshold.
va = crossing_fun(t0)
vb = crossing_fun(t1)
tcross = t0 if abs(va) < abs(vb) else t1
xval_list.append(tcross)
return xval_list
def get_crossing(self, threshold, start=None, stop=None, n=1, edge='both'):
"""Returns the X value at which this waveform crosses the given threshold.
Parameters
----------
threshold : float
the threshold value.
start : float or None
if given, search for the crossing starting at this X value.'
stop : float or None
if given, search only for crossings before this X value.
n : int
returns the nth crossing.
edge : str
crossing type. Valid values are 'rising', 'falling', or 'both'.
Returns
-------
xval : float or None
the X value at which the crossing occurs. None if no crossings are detected.
"""
xval_list = self.get_all_crossings(threshold, start=start, stop=stop, edge=edge)
if len(xval_list) < n:
return None
return xval_list[n - 1]
def to_arrays(self, xmin=None, xmax=None):
"""Returns the X and Y arrays representing this waveform.
Parameters
----------
xmin : float or None
If given, will start from this value.
xmax : float or None
If given, will end at this value.
Returns
-------
xvec : np.multiarray.ndarray
the X array
yvec : np.multiarray.ndarray
the Y array
"""
sidx = 0 if xmin is None else np.searchsorted(self.xvec, [xmin])[0]
eidx = len(self.xvec) if xmax is None else np.searchsorted(self.xvec, [xmax])[0]
if eidx < len(self.xvec) and self.xvec[eidx] == xmax:
eidx += 1
xtemp = self.xvec[sidx:eidx]
if xmin is not None and (len(xtemp) == 0 or xtemp[0] != xmin):
np.insert(xtemp, 0, [xmin])
if xmax is not None and (len(xtemp) == 0 or xtemp[-1] != xmax):
np.append(xtemp, [xmax])
return xtemp, self(xtemp)
def get_eye_specs(self, tbit, tsample, thres=0.0, nlev=2):
"""Compute the eye diagram spec of this waveform.
This algorithm uses the following steps.
1. set t_off to 0
2. sample the waveform at tbit interval, starting at t0 + t_off.
3. sort the sampled values, get gap between adjacent values.
4. record G, the length of the gap covering thres.
5. increment t_off by tsample, go to step 2 and repeat until
t_off >= tbit.
6. find t_off with maximum G. This is the eye center.
7. at the eye center, compute eye height and eye opening using kmeans
clustering algorithm.
8. return result.
Parameters
----------
tbit : float
eye period.
tsample : float
the resolution to sample the eye. Used to find optimal
time shift and maximum eye opening.
thres : float
the eye vertical threshold.
nlev : int
number of expected levels. 2 for NRZ, 4 for PAM4.
Returns
-------
result : dict
A dictionary from specification to value.
"""
tstart, tend = self.get_xrange()
toff_vec = np.arange(0, tbit, tsample)
best_idx = 0
best_gap = 0.0
best_values = None
mid_lev = nlev // 2
for idx, t_off in enumerate(toff_vec):
# noinspection PyTypeChecker
values = self(np.arange(tstart + t_off, tend, tbit))
values.sort()
up_idx = np.searchsorted(values, [thres])[0]
if up_idx == 0 or up_idx == len(values):
continue
cur_gap = values[up_idx] - values[up_idx - 1]
if cur_gap > best_gap:
best_idx = idx
best_gap = cur_gap
best_values = values
if best_values is None:
raise ValueError("waveform never cross threshold=%.4g" % thres)
vstd = np.std(best_values)
vtemp = best_values / vstd
tmp_arr = np.linspace(vtemp[0], vtemp[-1], nlev) # type: np.ndarray
clusters = svq.kmeans(vtemp, tmp_arr)[0]
# clusters = svq.kmeans(vtemp, 4, iter=50)[0]
clusters *= vstd
clusters.sort()
vcenter = (clusters[mid_lev] + clusters[mid_lev - 1]) / 2.0
# compute eye opening/margin
openings = []
tr_widths = []
last_val = best_values[0]
bot_val = last_val
cur_cidx = 0
for cur_val in best_values:
cur_cluster = clusters[cur_cidx]
next_cluster = clusters[cur_cidx + 1]
if abs(cur_val - cur_cluster) > abs(cur_val - next_cluster):
openings.append(cur_val - last_val)
tr_widths.append(last_val - bot_val)
cur_cidx += 1
if cur_cidx == len(clusters) - 1:
tr_widths.append(best_values[-1] - cur_val)
break
bot_val = cur_val
last_val = cur_val
return {'center': (float(toff_vec[best_idx]), vcenter),
'levels': clusters,
'heights': clusters[1:] - clusters[:-1],
'openings': np.array(openings),
'trace_widths': np.array(tr_widths)
}
def _add_xy(self, other):
if not isinstance(other, Waveform):
raise ValueError("Trying to add non-Waveform object.")
xnew = np.concatenate((self.xvec, other.xvec))
xnew = np.unique(np.around(xnew / self.xtol)) * self.xtol
# noinspection PyTypeChecker
y1 = self(xnew)
y2 = other(xnew)
return xnew, y1 + y2
def __add__(self, other):
if np.isscalar(other):
return Waveform(np.array(self.xvec), self.yvec + other, self.xtol, order=self.order,
ext=self.ext)
elif isinstance(other, Waveform):
new_order = max(self.order, other.order)
xvec, yvec = self._add_xy(other)
return Waveform(xvec, yvec, self.xtol, order=new_order, ext=self.ext)
else:
raise Exception('type %s not supported' % type(other))
def __neg__(self):
return Waveform(np.array(self.xvec), -self.yvec, self.xtol, order=self.order, ext=self.ext)
def __mul__(self, scale):
if not np.isscalar(scale):
raise ValueError("Can only multiply by scalar.")
return Waveform(np.array(self.xvec), scale * self.yvec, self.xtol, order=self.order,
ext=self.ext)
def __rmul__(self, scale):
return self.__mul__(scale)
| [
"numpy.minimum",
"numpy.maximum",
"scipy.interpolate.InterpolatedUnivariateSpline",
"scipy.optimize.brentq",
"scipy.cluster.vq.kmeans",
"numpy.std",
"numpy.isscalar",
"numpy.searchsorted",
"numpy.insert",
"numpy.append",
"numpy.around",
"numpy.diff",
"numpy.arange",
"numpy.array",
"numpy... | [((3142, 3207), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['xvec', 'yvec'], {'k': 'order', 'ext': 'ext'}), '(xvec, yvec, k=order, ext=ext)\n', (3177, 3207), True, 'import scipy.interpolate as interp\n'), ((5838, 5851), 'numpy.diff', 'np.diff', (['qvec'], {}), '(qvec)\n', (5845, 5851), True, 'import numpy as np\n'), ((10093, 10120), 'numpy.arange', 'np.arange', (['(0)', 'tbit', 'tsample'], {}), '(0, tbit, tsample)\n', (10102, 10120), True, 'import numpy as np\n'), ((10856, 10875), 'numpy.std', 'np.std', (['best_values'], {}), '(best_values)\n', (10862, 10875), True, 'import numpy as np\n'), ((10929, 10967), 'numpy.linspace', 'np.linspace', (['vtemp[0]', 'vtemp[-1]', 'nlev'], {}), '(vtemp[0], vtemp[-1], nlev)\n', (10940, 10967), True, 'import numpy as np\n'), ((12350, 12389), 'numpy.concatenate', 'np.concatenate', (['(self.xvec, other.xvec)'], {}), '((self.xvec, other.xvec))\n', (12364, 12389), True, 'import numpy as np\n'), ((12613, 12631), 'numpy.isscalar', 'np.isscalar', (['other'], {}), '(other)\n', (12624, 12631), True, 'import numpy as np\n'), ((5942, 5961), 'numpy.maximum', 'np.maximum', (['dvec', '(0)'], {}), '(dvec, 0)\n', (5952, 5961), True, 'import numpy as np\n'), ((8677, 8704), 'numpy.insert', 'np.insert', (['xtemp', '(0)', '[xmin]'], {}), '(xtemp, 0, [xmin])\n', (8686, 8704), True, 'import numpy as np\n'), ((8789, 8813), 'numpy.append', 'np.append', (['xtemp', '[xmax]'], {}), '(xtemp, [xmax])\n', (8798, 8813), True, 'import numpy as np\n'), ((11007, 11033), 'scipy.cluster.vq.kmeans', 'svq.kmeans', (['vtemp', 'tmp_arr'], {}), '(vtemp, tmp_arr)\n', (11017, 11033), True, 'import scipy.cluster.vq as svq\n'), ((12103, 12121), 'numpy.array', 'np.array', (['openings'], {}), '(openings)\n', (12111, 12121), True, 'import numpy as np\n'), ((12155, 12174), 'numpy.array', 'np.array', (['tr_widths'], {}), '(tr_widths)\n', (12163, 12174), True, 'import numpy as np\n'), ((13123, 13142), 'numpy.array', 'np.array', (['self.xvec'], {}), '(self.xvec)\n', (13131, 13142), True, 'import numpy as np\n'), ((13245, 13263), 'numpy.isscalar', 'np.isscalar', (['scale'], {}), '(scale)\n', (13256, 13263), True, 'import numpy as np\n'), ((13350, 13369), 'numpy.array', 'np.array', (['self.xvec'], {}), '(self.xvec)\n', (13358, 13369), True, 'import numpy as np\n'), ((5382, 5417), 'numpy.searchsorted', 'np.searchsorted', (['self.xvec', '[start]'], {}), '(self.xvec, [start])\n', (5397, 5417), True, 'import numpy as np\n'), ((5513, 5547), 'numpy.searchsorted', 'np.searchsorted', (['self.xvec', '[stop]'], {}), '(self.xvec, [stop])\n', (5528, 5547), True, 'import numpy as np\n'), ((6013, 6032), 'numpy.minimum', 'np.minimum', (['dvec', '(0)'], {}), '(dvec, 0)\n', (6023, 6032), True, 'import numpy as np\n'), ((6406, 6457), 'scipy.optimize.brentq', 'sciopt.brentq', (['crossing_fun', 't0', 't1'], {'xtol': 'self.xtol'}), '(crossing_fun, t0, t1, xtol=self.xtol)\n', (6419, 6457), True, 'import scipy.optimize as sciopt\n'), ((8344, 8378), 'numpy.searchsorted', 'np.searchsorted', (['self.xvec', '[xmin]'], {}), '(self.xvec, [xmin])\n', (8359, 8378), True, 'import numpy as np\n'), ((8433, 8467), 'numpy.searchsorted', 'np.searchsorted', (['self.xvec', '[xmax]'], {}), '(self.xvec, [xmax])\n', (8448, 8467), True, 'import numpy as np\n'), ((10334, 10371), 'numpy.arange', 'np.arange', (['(tstart + t_off)', 'tend', 'tbit'], {}), '(tstart + t_off, tend, tbit)\n', (10343, 10371), True, 'import numpy as np\n'), ((10421, 10453), 'numpy.searchsorted', 'np.searchsorted', (['values', '[thres]'], {}), '(values, [thres])\n', (10436, 10453), True, 'import numpy as np\n'), ((12415, 12442), 'numpy.around', 'np.around', (['(xnew / self.xtol)'], {}), '(xnew / self.xtol)\n', (12424, 12442), True, 'import numpy as np\n'), ((12661, 12680), 'numpy.array', 'np.array', (['self.xvec'], {}), '(self.xvec)\n', (12669, 12680), True, 'import numpy as np\n')] |
import os
from flask import Flask, request, render_template
os.environ['PATH'] = r'D:\home\python354x64;' + os.environ['PATH']
import cntk
import numpy as np
app = Flask(__name__)
wsgi_app = app.wsgi_app
model = cntk.load_model('D:\\home\\site\\wwwroot\\models\\hangman_model.dnn')
''' Helper functions for neural network evaluation '''
def encode_word(current_view):
word = [26 if i == '_' else ord(i) - 65 for i in current_view]
obscured_word = np.zeros((len(word), 27), dtype=np.float32)
for i, j in enumerate(word):
obscured_word[i, j] = 1
return(obscured_word)
def encode_previous_guesses(letters_guessed):
previous_guesses = np.zeros(26, dtype=np.float32)
for i in letters_guessed:
previous_guesses[ord(i) - 65] = 1
return(previous_guesses)
def get_next_guess(current_view, letters_guessed):
global model
guess = model.eval({model.arguments[0]: encode_word(current_view),
model.arguments[1]: encode_previous_guesses(letters_guessed)})
guess_letter = chr(65 + np.argmax(np.squeeze(guess)))
return(guess_letter)
@app.route('/', methods=['GET'])
def sign_form():
return render_template('index.html')
@app.route('/start', methods=['POST'])
def start():
global num_chars, lives_remaining, current_view, letters_guessed
# Initialize values for the new game
num_chars = int(request.form['num_chars'])
lives_remaining = 10
current_view = ['_'] * num_chars
letters_guessed = set([])
# Get the model's guess
guess = get_next_guess(current_view, letters_guessed)
letters_guessed.add(guess)
# Display the page where the user can provide feedback on the guess
return render_template('start.html',
guess=guess,
lives_remaining=lives_remaining,
current_view=current_view,
letters_guessed=letters_guessed)
@app.route('/feedback', methods=['POST'])
def feedback():
global num_chars, lives_remaining, current_view, letters_guessed
# Parse feedback to get lives_remaining and current_view
if len(request.form.getlist('present')) > 0:
guess_result = 'correctly'
else:
guess_result = 'incorrectly'
lives_remaining -= 1
for i in request.form.getlist('present'):
idx = int(i.split('letter')[1])
current_view[idx] = str(request.form['last_guess']).upper()
if lives_remaining == 0:
return render_template('gameover.html',
guess=request.form['last_guess'],
lives_remaining=lives_remaining,
current_view=current_view,
letters_guessed=letters_guessed)
elif '_' not in current_view:
return render_template('win.html',
guess=request.form['last_guess'],
lives_remaining=lives_remaining,
current_view=current_view,
letters_guessed=letters_guessed)
else:
guess = get_next_guess(current_view, letters_guessed)
letters_guessed.add(guess)
return render_template('feedback.html',
guess_result=guess_result,
lives_remaining=lives_remaining,
guess=guess,
current_view=current_view,
letters_guessed=letters_guessed)
if __name__ == '__main__':
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
| [
"flask.request.form.getlist",
"cntk.load_model",
"flask.Flask",
"numpy.zeros",
"os.environ.get",
"flask.render_template",
"numpy.squeeze"
] | [((165, 180), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (170, 180), False, 'from flask import Flask, request, render_template\n'), ((213, 282), 'cntk.load_model', 'cntk.load_model', (['"""D:\\\\home\\\\site\\\\wwwroot\\\\models\\\\hangman_model.dnn"""'], {}), "('D:\\\\home\\\\site\\\\wwwroot\\\\models\\\\hangman_model.dnn')\n", (228, 282), False, 'import cntk\n'), ((662, 692), 'numpy.zeros', 'np.zeros', (['(26)'], {'dtype': 'np.float32'}), '(26, dtype=np.float32)\n', (670, 692), True, 'import numpy as np\n'), ((1167, 1196), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1182, 1196), False, 'from flask import Flask, request, render_template\n'), ((1670, 1809), 'flask.render_template', 'render_template', (['"""start.html"""'], {'guess': 'guess', 'lives_remaining': 'lives_remaining', 'current_view': 'current_view', 'letters_guessed': 'letters_guessed'}), "('start.html', guess=guess, lives_remaining=lives_remaining,\n current_view=current_view, letters_guessed=letters_guessed)\n", (1685, 1809), False, 'from flask import Flask, request, render_template\n'), ((2173, 2204), 'flask.request.form.getlist', 'request.form.getlist', (['"""present"""'], {}), "('present')\n", (2193, 2204), False, 'from flask import Flask, request, render_template\n'), ((3126, 3168), 'os.environ.get', 'os.environ.get', (['"""SERVER_HOST"""', '"""localhost"""'], {}), "('SERVER_HOST', 'localhost')\n", (3140, 3168), False, 'import os\n'), ((2338, 2505), 'flask.render_template', 'render_template', (['"""gameover.html"""'], {'guess': "request.form['last_guess']", 'lives_remaining': 'lives_remaining', 'current_view': 'current_view', 'letters_guessed': 'letters_guessed'}), "('gameover.html', guess=request.form['last_guess'],\n lives_remaining=lives_remaining, current_view=current_view,\n letters_guessed=letters_guessed)\n", (2353, 2505), False, 'from flask import Flask, request, render_template\n'), ((2034, 2065), 'flask.request.form.getlist', 'request.form.getlist', (['"""present"""'], {}), "('present')\n", (2054, 2065), False, 'from flask import Flask, request, render_template\n'), ((2578, 2740), 'flask.render_template', 'render_template', (['"""win.html"""'], {'guess': "request.form['last_guess']", 'lives_remaining': 'lives_remaining', 'current_view': 'current_view', 'letters_guessed': 'letters_guessed'}), "('win.html', guess=request.form['last_guess'],\n lives_remaining=lives_remaining, current_view=current_view,\n letters_guessed=letters_guessed)\n", (2593, 2740), False, 'from flask import Flask, request, render_template\n'), ((2874, 3048), 'flask.render_template', 'render_template', (['"""feedback.html"""'], {'guess_result': 'guess_result', 'lives_remaining': 'lives_remaining', 'guess': 'guess', 'current_view': 'current_view', 'letters_guessed': 'letters_guessed'}), "('feedback.html', guess_result=guess_result, lives_remaining\n =lives_remaining, guess=guess, current_view=current_view,\n letters_guessed=letters_guessed)\n", (2889, 3048), False, 'from flask import Flask, request, render_template\n'), ((3188, 3225), 'os.environ.get', 'os.environ.get', (['"""SERVER_PORT"""', '"""5555"""'], {}), "('SERVER_PORT', '5555')\n", (3202, 3225), False, 'import os\n'), ((1059, 1076), 'numpy.squeeze', 'np.squeeze', (['guess'], {}), '(guess)\n', (1069, 1076), True, 'import numpy as np\n')] |
"""
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import random
import numpy as np
import h5py
from torch.utils.data import Dataset
from data import transforms
import torch
class SliceData(Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(self, root, transform, challenge, sequence, sample_rate, seed=42):
"""
Args:
root (pathlib.Path): Path to the dataset.
transform (callable): A callable object that pre-processes the raw data into
appropriate form. The transform function should take 'kspace', 'target',
'attributes', 'filename', and 'slice' as inputs. 'target' may be null
for test data.
challenge (str): "singlecoil" or "multicoil" depending on which challenge to use.
sample_rate (float, optional): A float between 0 and 1. This controls what fraction
of the volumes should be loaded.
"""
if challenge not in ('singlecoil', 'multicoil'):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.transform = transform
self.recons_key = 'reconstruction_esc' if challenge == 'singlecoil' else 'reconstruction_rss'
phase = root.parts[-1]
self.examples = []
files = list(pathlib.Path(root).iterdir())
print('Loading dataset :', root)
random.seed(seed)
if sample_rate < 1:
random.shuffle(files)
num_files = round(len(files) * sample_rate)
files = files[:num_files]
for fname in sorted(files):
data = h5py.File(fname, 'r')
padding_left = None
padding_right = None
kspace = data['kspace']
num_slices = kspace.shape[0]
num_start = 0
self.examples += [(fname, slice, padding_left, padding_right) for slice in range(num_start, num_slices)]
if phase == 'train' and sample_rate > 1:
self.paths_for_run = []
for element in self.examples:
for i in range(int(sample_rate)):
self.paths_for_run.append(element)
self.examples = self.paths_for_run
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice, padding_left, padding_right = self.examples[i]
with h5py.File(fname, 'r') as data:
kspace = data['kspace'][slice]
mask = np.asarray(data['mask']) if 'mask' in data else None
target = data[self.recons_key][slice] if self.recons_key in data else None
attrs = dict(data.attrs)
attrs['padding_left'] = padding_left
attrs['padding_right'] = padding_right
return self.transform(kspace, mask, target, attrs, fname.name, slice)
class DataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(self, resolution, which_challenge, mask_func=None, use_seed=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
which_challenge (str): Either "singlecoil" or "multicoil" denoting the dataset.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
if which_challenge not in ('singlecoil', 'multicoil'):
raise ValueError(
f'Challenge should either be "singlecoil" or "multicoil"')
self.mask_func = mask_func
self.resolution = resolution
self.which_challenge = which_challenge
self.use_seed = use_seed
def __call__(self, kspace, mask, target, attrs, fname, slice):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
mask (numpy.array): Mask from the test dataset
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
image (torch.Tensor): Zero-filled input image.
target (torch.Tensor): Target image converted to a torch Tensor.
mean (float): Mean value used for normalization.
std (float): Standard deviation value used for normalization.
"""
kspace = transforms.to_tensor(kspace)
# Apply mask
if self.mask_func:
seed = None if not self.use_seed else tuple(map(ord, fname))
masked_kspace, mask = transforms.apply_mask(
kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# Inverse Fourier Transform to get zero filled solution
image = transforms.ifft2(masked_kspace)
target = transforms.ifft2(kspace)
# Absolute value
abs_image = transforms.complex_abs(image)
mean = torch.tensor(0.0)
std = abs_image.mean()
# Normalize input
image = image.permute(2, 0, 1)
target = target.permute(2, 0, 1)
image = transforms.normalize(image, mean, std, eps=0)
masked_kspace = masked_kspace.permute(2, 0, 1)
masked_kspace = transforms.normalize(masked_kspace, mean, std, eps=0)
# Normalize target
target = transforms.normalize(target, mean, std, eps=0)
mask = mask.repeat(image.shape[1], 1, 1).squeeze().unsqueeze(0)
return image, target, mean, std, attrs['norm'].astype(np.float32), fname, slice, attrs['max'].astype(np.float32), mask, masked_kspace
| [
"data.transforms.complex_abs",
"data.transforms.to_tensor",
"h5py.File",
"random.shuffle",
"numpy.asarray",
"pathlib.Path",
"data.transforms.normalize",
"random.seed",
"data.transforms.apply_mask",
"torch.tensor",
"data.transforms.ifft2"
] | [((1527, 1544), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1538, 1544), False, 'import random\n'), ((4908, 4936), 'data.transforms.to_tensor', 'transforms.to_tensor', (['kspace'], {}), '(kspace)\n', (4928, 4936), False, 'from data import transforms\n'), ((5292, 5323), 'data.transforms.ifft2', 'transforms.ifft2', (['masked_kspace'], {}), '(masked_kspace)\n', (5308, 5323), False, 'from data import transforms\n'), ((5342, 5366), 'data.transforms.ifft2', 'transforms.ifft2', (['kspace'], {}), '(kspace)\n', (5358, 5366), False, 'from data import transforms\n'), ((5412, 5441), 'data.transforms.complex_abs', 'transforms.complex_abs', (['image'], {}), '(image)\n', (5434, 5441), False, 'from data import transforms\n'), ((5457, 5474), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5469, 5474), False, 'import torch\n'), ((5628, 5673), 'data.transforms.normalize', 'transforms.normalize', (['image', 'mean', 'std'], {'eps': '(0)'}), '(image, mean, std, eps=0)\n', (5648, 5673), False, 'from data import transforms\n'), ((5753, 5806), 'data.transforms.normalize', 'transforms.normalize', (['masked_kspace', 'mean', 'std'], {'eps': '(0)'}), '(masked_kspace, mean, std, eps=0)\n', (5773, 5806), False, 'from data import transforms\n'), ((5851, 5897), 'data.transforms.normalize', 'transforms.normalize', (['target', 'mean', 'std'], {'eps': '(0)'}), '(target, mean, std, eps=0)\n', (5871, 5897), False, 'from data import transforms\n'), ((1585, 1606), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (1599, 1606), False, 'import random\n'), ((1756, 1777), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1765, 1777), False, 'import h5py\n'), ((2514, 2535), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (2523, 2535), False, 'import h5py\n'), ((5093, 5144), 'data.transforms.apply_mask', 'transforms.apply_mask', (['kspace', 'self.mask_func', 'seed'], {}), '(kspace, self.mask_func, seed)\n', (5114, 5144), False, 'from data import transforms\n'), ((2607, 2631), 'numpy.asarray', 'np.asarray', (["data['mask']"], {}), "(data['mask'])\n", (2617, 2631), True, 'import numpy as np\n'), ((1448, 1466), 'pathlib.Path', 'pathlib.Path', (['root'], {}), '(root)\n', (1460, 1466), False, 'import pathlib\n')] |
import pytest, os
import numpy as np
import bowienator
global faces
def test_generator_face_list():
global faces
faces = bowienator.face_list(os.path.join(os.path.dirname(os.path.realpath(__file__)),'data','james.png'))
assert np.all(faces) == np.all([[482, 201, 330, 330]])
def test_generator_bowie_draw():
global faces
drawn = bowienator.bowie_draw(os.path.join(os.path.dirname(os.path.realpath(__file__)),'data','james.png'),faces)
assert drawn == True | [
"os.path.realpath",
"numpy.all"
] | [((234, 247), 'numpy.all', 'np.all', (['faces'], {}), '(faces)\n', (240, 247), True, 'import numpy as np\n'), ((251, 281), 'numpy.all', 'np.all', (['[[482, 201, 330, 330]]'], {}), '([[482, 201, 330, 330]])\n', (257, 281), True, 'import numpy as np\n'), ((177, 203), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (193, 203), False, 'import pytest, os\n'), ((391, 417), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (407, 417), False, 'import pytest, os\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import os.path as op
import mne
import numpy as np
from mnefun._paths import (get_raw_fnames, get_event_fnames)
import expyfun
# words = 1
# faces = 2
# cars = 3 (channels 1 and 2)
# alien = 4
def prek_score(p, subjects):
for si, subject in enumerate(subjects):
fnames = get_raw_fnames(p, subject, which='raw', erm=False, add_splits=False, run_indices=None)
event_fnames = get_event_fnames(p, subject, run_indices=None)
for fi, fname in enumerate(fnames):
raw = mne.io.read_raw_fif(fname, allow_maxshield=True)
# find four categories of visual events
words = mne.find_events(raw, shortest_event=2, mask=1)
faces = mne.find_events(raw, shortest_event=2, mask=2)
cars = mne.find_events(raw, shortest_event=2, mask=3)
alien = mne.find_events(raw, shortest_event=2, mask=4)
cars = [x for x in cars if x[2] == 3]
words = [x for x in words if not np.in1d(x[0], cars)]
faces = [x for x in faces if not np.in1d(x[0], cars)]
words = np.array(words)
faces = np.array(faces)
cars = np.array(cars)
alien = np.array(alien)
# check that these events have distinct timestamps
assert not np.in1d(words[:, 0], cars[:, 0]).any()
assert not np.in1d(words[:, 0], faces[:, 0]).any()
assert not np.in1d(cars[:, 0], faces[:, 0]).any()
assert not np.in1d(alien[:, 0], cars[:, 0]).any()
assert not np.in1d(alien[:, 0], faces[:, 0]).any()
assert not np.in1d(alien[:, 0], words[:, 0]).any()
# find button presses and turn them all into events with a value of 5
presses = mne.find_events(raw, shortest_event=2, mask=240)
presses[:, 2] = 5
# return all events
events = np.concatenate((words, cars, faces, alien, presses))
events[:, 2] *= 10
# mne.write_events(event_fnames[fi], events)
# write the behavioral data
hits = []
misses = []
correct_rejections = 0
images = np.concatenate((words, cars, faces))
all_events = mne.find_events(raw, shortest_event=2)
for event in all_events:
if event[0] in presses:
event[2] = 5
all_events[:, 2] *= 10
for i, event in enumerate(all_events):
if np.in1d(event[0], alien): # for each alien image
if event[0] == all_events[-1, 0]: # if the alien image is the very last event, its a miss
print('Miss', event, i)
misses.append(event)
elif all_events[i+1][2] != 50: # if the next event isn't a button press, it's a miss.
print('Miss', event, i)
misses.append(event)
else: # add the next event to hits if it's a button press
if all_events[i+1][2] == 50:
hits.append(all_events[i + 1])
print('hit: %s' % all_events[i+1], i+1)
else:
continue
if np.in1d(event[0], images): # for each regular image
try:
if all_events[i+1][2] != 50: # if the kid doesn't press a button next
correct_rejections += 1 # then it's a correct rejection
except IndexError:
correct_rejections += 1 # if an image is the last event, then there's no button press following
extras = [x for x in all_events if x[2] == 50 and not np.in1d(x[0], hits)]
hits = len(hits)
misses = len(misses)
false_alarms = len(extras)
d_prime = expyfun.analyze.dprime([hits, misses, false_alarms, correct_rejections])
with open(op.join(p.work_dir, subject, '%s_behavioral.txt' % subject), 'wb') as fid:
print('writing the behavioral csv at %s' % op.join(subject, '%s_behavioral.txt' % subject))
fid.write('total_presses, hits, misses, false_alarms, correct_rejections, d_prime: \n'.encode())
fid.write((('%s, %s, %s, %s, %s, %s' % (len(presses), hits, misses, false_alarms, correct_rejections, d_prime)).encode()))
with open(op.join(p.work_dir,'pre_behavioral.txt'), 'ab') as fid:
print('Adding a line to the global behavioral file.')
fid.write((('%s, %s, %s, %s, %s, %s, %s \n' % (subject, len(presses), hits, misses, false_alarms, correct_rejections, d_prime)).encode()))
def pick_cov_events_prek(events):
events = [x for x in events if x[2] != 5] # we only want visual events, not button presses
return events | [
"mne.io.read_raw_fif",
"mnefun._paths.get_raw_fnames",
"mnefun._paths.get_event_fnames",
"expyfun.analyze.dprime",
"mne.find_events",
"numpy.array",
"os.path.join",
"numpy.concatenate",
"numpy.in1d"
] | [((346, 436), 'mnefun._paths.get_raw_fnames', 'get_raw_fnames', (['p', 'subject'], {'which': '"""raw"""', 'erm': '(False)', 'add_splits': '(False)', 'run_indices': 'None'}), "(p, subject, which='raw', erm=False, add_splits=False,\n run_indices=None)\n", (360, 436), False, 'from mnefun._paths import get_raw_fnames, get_event_fnames\n'), ((456, 502), 'mnefun._paths.get_event_fnames', 'get_event_fnames', (['p', 'subject'], {'run_indices': 'None'}), '(p, subject, run_indices=None)\n', (472, 502), False, 'from mnefun._paths import get_raw_fnames, get_event_fnames\n'), ((565, 613), 'mne.io.read_raw_fif', 'mne.io.read_raw_fif', (['fname'], {'allow_maxshield': '(True)'}), '(fname, allow_maxshield=True)\n', (584, 613), False, 'import mne\n'), ((687, 733), 'mne.find_events', 'mne.find_events', (['raw'], {'shortest_event': '(2)', 'mask': '(1)'}), '(raw, shortest_event=2, mask=1)\n', (702, 733), False, 'import mne\n'), ((754, 800), 'mne.find_events', 'mne.find_events', (['raw'], {'shortest_event': '(2)', 'mask': '(2)'}), '(raw, shortest_event=2, mask=2)\n', (769, 800), False, 'import mne\n'), ((820, 866), 'mne.find_events', 'mne.find_events', (['raw'], {'shortest_event': '(2)', 'mask': '(3)'}), '(raw, shortest_event=2, mask=3)\n', (835, 866), False, 'import mne\n'), ((887, 933), 'mne.find_events', 'mne.find_events', (['raw'], {'shortest_event': '(2)', 'mask': '(4)'}), '(raw, shortest_event=2, mask=4)\n', (902, 933), False, 'import mne\n'), ((1138, 1153), 'numpy.array', 'np.array', (['words'], {}), '(words)\n', (1146, 1153), True, 'import numpy as np\n'), ((1174, 1189), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (1182, 1189), True, 'import numpy as np\n'), ((1209, 1223), 'numpy.array', 'np.array', (['cars'], {}), '(cars)\n', (1217, 1223), True, 'import numpy as np\n'), ((1244, 1259), 'numpy.array', 'np.array', (['alien'], {}), '(alien)\n', (1252, 1259), True, 'import numpy as np\n'), ((1804, 1852), 'mne.find_events', 'mne.find_events', (['raw'], {'shortest_event': '(2)', 'mask': '(240)'}), '(raw, shortest_event=2, mask=240)\n', (1819, 1852), False, 'import mne\n'), ((1937, 1989), 'numpy.concatenate', 'np.concatenate', (['(words, cars, faces, alien, presses)'], {}), '((words, cars, faces, alien, presses))\n', (1951, 1989), True, 'import numpy as np\n'), ((2221, 2257), 'numpy.concatenate', 'np.concatenate', (['(words, cars, faces)'], {}), '((words, cars, faces))\n', (2235, 2257), True, 'import numpy as np\n'), ((2283, 2321), 'mne.find_events', 'mne.find_events', (['raw'], {'shortest_event': '(2)'}), '(raw, shortest_event=2)\n', (2298, 2321), False, 'import mne\n'), ((3981, 4053), 'expyfun.analyze.dprime', 'expyfun.analyze.dprime', (['[hits, misses, false_alarms, correct_rejections]'], {}), '([hits, misses, false_alarms, correct_rejections])\n', (4003, 4053), False, 'import expyfun\n'), ((2539, 2563), 'numpy.in1d', 'np.in1d', (['event[0]', 'alien'], {}), '(event[0], alien)\n', (2546, 2563), True, 'import numpy as np\n'), ((3339, 3364), 'numpy.in1d', 'np.in1d', (['event[0]', 'images'], {}), '(event[0], images)\n', (3346, 3364), True, 'import numpy as np\n'), ((4077, 4136), 'os.path.join', 'op.join', (['p.work_dir', 'subject', "('%s_behavioral.txt' % subject)"], {}), "(p.work_dir, subject, '%s_behavioral.txt' % subject)\n", (4084, 4136), True, 'import os.path as op\n'), ((4535, 4576), 'os.path.join', 'op.join', (['p.work_dir', '"""pre_behavioral.txt"""'], {}), "(p.work_dir, 'pre_behavioral.txt')\n", (4542, 4576), True, 'import os.path as op\n'), ((1030, 1049), 'numpy.in1d', 'np.in1d', (['x[0]', 'cars'], {}), '(x[0], cars)\n', (1037, 1049), True, 'import numpy as np\n'), ((1096, 1115), 'numpy.in1d', 'np.in1d', (['x[0]', 'cars'], {}), '(x[0], cars)\n', (1103, 1115), True, 'import numpy as np\n'), ((1347, 1379), 'numpy.in1d', 'np.in1d', (['words[:, 0]', 'cars[:, 0]'], {}), '(words[:, 0], cars[:, 0])\n', (1354, 1379), True, 'import numpy as np\n'), ((1409, 1442), 'numpy.in1d', 'np.in1d', (['words[:, 0]', 'faces[:, 0]'], {}), '(words[:, 0], faces[:, 0])\n', (1416, 1442), True, 'import numpy as np\n'), ((1472, 1504), 'numpy.in1d', 'np.in1d', (['cars[:, 0]', 'faces[:, 0]'], {}), '(cars[:, 0], faces[:, 0])\n', (1479, 1504), True, 'import numpy as np\n'), ((1534, 1566), 'numpy.in1d', 'np.in1d', (['alien[:, 0]', 'cars[:, 0]'], {}), '(alien[:, 0], cars[:, 0])\n', (1541, 1566), True, 'import numpy as np\n'), ((1596, 1629), 'numpy.in1d', 'np.in1d', (['alien[:, 0]', 'faces[:, 0]'], {}), '(alien[:, 0], faces[:, 0])\n', (1603, 1629), True, 'import numpy as np\n'), ((1659, 1692), 'numpy.in1d', 'np.in1d', (['alien[:, 0]', 'words[:, 0]'], {}), '(alien[:, 0], words[:, 0])\n', (1666, 1692), True, 'import numpy as np\n'), ((4211, 4258), 'os.path.join', 'op.join', (['subject', "('%s_behavioral.txt' % subject)"], {}), "(subject, '%s_behavioral.txt' % subject)\n", (4218, 4258), True, 'import os.path as op\n'), ((3835, 3854), 'numpy.in1d', 'np.in1d', (['x[0]', 'hits'], {}), '(x[0], hits)\n', (3842, 3854), True, 'import numpy as np\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
import struct
import h5py
import numpy as np
import matplotlib.pyplot as plt
import pickle
from extractModel_mappings import allparams_from_mapping
import subprocess
import csv
import bluepyopt as bpop
import shutil, errno
import pandas as pd
#os.chdir("NeuroGPU/NeuroGPU_Base/VS/pyNeuroGPU_win2/NeuroGPU6/python/")
## set up filepaths
paramsCSV = '../params/params.csv'
data_dir = '../Data/'
run_dir = '../bin'
vs_fn = '/tmp/Data/VHotP'
ntimestep = 100000
nstims = 1
nGpus = len([devicenum for devicenum in os.environ['CUDA_VISIBLE_DEVICES'] if devicenum != ","])
if not os.path.isdir('/tmp/Data'):
os.mkdir('/tmp/Data')
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def nrnMread(fileName):
f = open(fileName, "rb")
nparam = struct.unpack('i', f.read(4))[0]
typeFlg = struct.unpack('i', f.read(4))[0]
return np.fromfile(f,np.double)
def nrnMreadH5(fileName):
f = h5py.File(fileName,'r')
dat = f['Data'][:][0]
return np.array(dat)
def readParamsCSV(fileName):
fields = ['Param name', 'Base value','Lower bound', 'Upper bound']
df = pd.read_csv(fileName,skipinitialspace=True, usecols=fields)
paramsList = [tuple(x) for x in df.values]
return paramsList
def run_model(stim_ind,real_ind):
"""
Parameters
-------------------------------------------------------
stim_ind: index to send as arg to neuroGPU
params: DEPRECATED remove
Returns
---------------------------------------------------------
p_object: process object that stops when neuroGPU done
"""
global_rank = 0
volts_fn = vs_fn + str(stim_ind) + '.h5'
if os.path.exists(volts_fn):
#print("removing ", volts_fn, " from ", global_rank)
#os.remove(volts_fn)
pass
#!{'../bin/neuroGPU'+str(global_rank),str(stim_ind), str(global_rank)}
p_object = subprocess.Popen(['../bin/neuroGPU',str(stim_ind)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # <-- redirect stderr to stdout
bufsize=1)
with p_object.stdout:
for line in iter(p_object.stdout.readline, b''):
print(line),
p_object.wait()
print(p_object.stderr)
#os.rename(volts_fn,'/tmp/Data/VHotP'+str(real_ind)+'.h5')
return p_object
def stim_swap(idx, i):
"""
Stim swap takes 'idx' which is the stim index % 8 and 'i' which is the actual stim idx
and then deletes the one at 'idx' and replaces it with the stim at i so that
neuroGPU reads stims like 13 as stim_raw5 (13 % 8)
"""
old_stim = '../Data/Stim_raw' + str(idx) + '.csv'
old_time = '../Data/times' + str(idx) + '.csv'
if os.path.exists(old_stim):
os.remove(old_stim)
os.remove(old_time)
os.rename(r'../Data/Stim_raw' + str(i) + '.csv', r'../Data/Stim_raw' + str(idx) + '.csv')
os.rename(r'../Data/times' + str(i) + '.csv', r'../Data/times' + str(idx) + '.csv')
def getVolts(idx):
'''Helper function that gets volts from data and shapes them for a given stim index'''
fn = vs_fn + str(idx) + '.h5' #'.h5'
curr_volts = nrnMreadH5(fn)
#fn = vs_fn + str(idx) + '.dat' #'.h5'
#curr_volts = nrnMread(fn)
Nt = int(len(curr_volts)/ntimestep)
shaped_volts = np.reshape(curr_volts, [Nt,ntimestep])
return shaped_volts
def main():
nstims = 8 # only running first 8 stims
###### TEN COPIES OF ORIG PARAMS FOR DEBUG #################
param_values = np.genfromtxt(paramsCSV)
print(param_values.shape, " : param value shape")
###### CREATE MAPPING #################
#allparams_from_mapping(param_values)
run_model(0,0)
data = nrnMread("../Data/VHotP0.dat")
print(np.max(data))
# # run neuroGPU -- set up for either ONE gpu or 8
# if nGpus == 1:
# for i in range(0,nstims):
# if i != 0:
# # swaps stim0.csv and times0.csv for stimi.csv and timesi.csv
# stim_swap(0,i)
# # run ... wait is built into run model
# run_model(0,i)
# else:
# run_model(0,0)
# else:
# for i in range(0,nstims):
# # run ... wait is built into run model
# run_model(i,i)
if __name__ == "__main__":
main()
# for file in os.listdir('../Data'):
# if 'h5' in file:
# print("replacing: ", file)
# os.remove('../Data/' + file)
# for file in os.listdir('/tmp/Data'):
# shutil.move("/tmp/Data/"+ file, "../Data/" + file) | [
"os.mkdir",
"h5py.File",
"os.remove",
"os.path.isdir",
"numpy.fromfile",
"pandas.read_csv",
"os.path.exists",
"numpy.genfromtxt",
"numpy.max",
"numpy.array",
"numpy.reshape",
"shutil.copytree",
"shutil.copy"
] | [((642, 668), 'os.path.isdir', 'os.path.isdir', (['"""/tmp/Data"""'], {}), "('/tmp/Data')\n", (655, 668), False, 'import os\n'), ((674, 695), 'os.mkdir', 'os.mkdir', (['"""/tmp/Data"""'], {}), "('/tmp/Data')\n", (682, 695), False, 'import os\n'), ((1067, 1092), 'numpy.fromfile', 'np.fromfile', (['f', 'np.double'], {}), '(f, np.double)\n', (1078, 1092), True, 'import numpy as np\n'), ((1128, 1152), 'h5py.File', 'h5py.File', (['fileName', '"""r"""'], {}), "(fileName, 'r')\n", (1137, 1152), False, 'import h5py\n'), ((1189, 1202), 'numpy.array', 'np.array', (['dat'], {}), '(dat)\n', (1197, 1202), True, 'import numpy as np\n'), ((1314, 1374), 'pandas.read_csv', 'pd.read_csv', (['fileName'], {'skipinitialspace': '(True)', 'usecols': 'fields'}), '(fileName, skipinitialspace=True, usecols=fields)\n', (1325, 1374), True, 'import pandas as pd\n'), ((1868, 1892), 'os.path.exists', 'os.path.exists', (['volts_fn'], {}), '(volts_fn)\n', (1882, 1892), False, 'import os\n'), ((2918, 2942), 'os.path.exists', 'os.path.exists', (['old_stim'], {}), '(old_stim)\n', (2932, 2942), False, 'import os\n'), ((3510, 3549), 'numpy.reshape', 'np.reshape', (['curr_volts', '[Nt, ntimestep]'], {}), '(curr_volts, [Nt, ntimestep])\n', (3520, 3549), True, 'import numpy as np\n'), ((3720, 3744), 'numpy.genfromtxt', 'np.genfromtxt', (['paramsCSV'], {}), '(paramsCSV)\n', (3733, 3744), True, 'import numpy as np\n'), ((748, 773), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (763, 773), False, 'import shutil, errno\n'), ((2952, 2971), 'os.remove', 'os.remove', (['old_stim'], {}), '(old_stim)\n', (2961, 2971), False, 'import os\n'), ((2980, 2999), 'os.remove', 'os.remove', (['old_time'], {}), '(old_time)\n', (2989, 2999), False, 'import os\n'), ((3966, 3978), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (3972, 3978), True, 'import numpy as np\n'), ((866, 887), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (877, 887), False, 'import shutil, errno\n')] |
import numpy as np
import os
import pandas as pd
import pytest
import torch
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from tx2.wrapper import Wrapper
@pytest.fixture
def replacement_debounce():
"""This is so that we can replace utils.debounce and actually get
error messages."""
def undebounce(wait):
def decorator(fn):
def undebounced(*args, **kwargs):
print("WE'RE DOING IT LIVE")
fn(*args, **kwargs)
return undebounced
return decorator
return undebounce
@pytest.fixture
def dummy_df():
rows = [
{"text": "testing row 0", "target": 0},
{"text": "testing row 1", "target": 0},
{"text": "testing row 2, awesome", "target": 1},
{"text": "testing row 3, awesome", "target": 1},
]
return pd.DataFrame(rows)
@pytest.fixture
def dummy_np_data(dummy_df):
texts = np.array(dummy_df.text)
targets = np.array(dummy_df.target)
return texts, targets
@pytest.fixture
def dummy_clusters():
return {"0": [0, 1], "1": [2, 3]}
@pytest.fixture
def dummy_embeddings():
return (
np.array([[0.1, 0.2], [-0.1, -0.1], [0.5, -0.8], [0.4, -0.9]]),
np.array([0.1, -0.1, 0.5, 0.4]),
np.array([0.2, -0.1, -0.8, -0.9]),
)
@pytest.fixture
def dummy_encodings():
return {"awesome": 0, "not-awesome": 1}
@pytest.fixture
def dummy_model(dummy_df):
class model:
def __init__(self, df):
self.vectorizer = CountVectorizer(stop_words="english")
x = self.vectorizer.fit_transform(df.text)
self.clf = LogisticRegression()
self.clf.fit(x, df.target)
def custom_encode(self, text):
transformed = torch.tensor(self.vectorizer.transform([text]).toarray())
return torch.squeeze(transformed)
def custom_classify(self, inputs):
return torch.tensor(self.clf.predict(inputs))
def custom_embedding(self, inputs):
return inputs
def custom_softclassify(self, inputs):
return torch.tensor(self.clf.predict_proba(inputs))
return model(dummy_df)
@pytest.fixture(scope="function")
def dummy_wrapper(dummy_df, dummy_encodings, dummy_model, clear_files_teardown):
wrapper = Wrapper(
train_texts=dummy_df.text,
train_labels=dummy_df.target,
test_texts=dummy_df.text,
test_labels=dummy_df.target,
encodings=dummy_encodings,
cache_path="testdata",
)
wrapper.encode_function = dummy_model.custom_encode
wrapper.classification_function = dummy_model.custom_classify
wrapper.embedding_function = dummy_model.custom_embedding
wrapper.soft_classification_function = dummy_model.custom_softclassify
wrapper.prepare(umap_args=dict(n_neighbors=2))
return wrapper
@pytest.fixture(scope="session")
def clear_files_teardown():
yield None
os.system("rm -rf testdata/")
| [
"pandas.DataFrame",
"sklearn.feature_extraction.text.CountVectorizer",
"tx2.wrapper.Wrapper",
"pytest.fixture",
"os.system",
"torch.squeeze",
"sklearn.linear_model.LogisticRegression",
"numpy.array"
] | [((2234, 2266), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (2248, 2266), False, 'import pytest\n'), ((2922, 2953), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2936, 2953), False, 'import pytest\n'), ((897, 915), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (909, 915), True, 'import pandas as pd\n'), ((975, 998), 'numpy.array', 'np.array', (['dummy_df.text'], {}), '(dummy_df.text)\n', (983, 998), True, 'import numpy as np\n'), ((1013, 1038), 'numpy.array', 'np.array', (['dummy_df.target'], {}), '(dummy_df.target)\n', (1021, 1038), True, 'import numpy as np\n'), ((2362, 2540), 'tx2.wrapper.Wrapper', 'Wrapper', ([], {'train_texts': 'dummy_df.text', 'train_labels': 'dummy_df.target', 'test_texts': 'dummy_df.text', 'test_labels': 'dummy_df.target', 'encodings': 'dummy_encodings', 'cache_path': '"""testdata"""'}), "(train_texts=dummy_df.text, train_labels=dummy_df.target, test_texts\n =dummy_df.text, test_labels=dummy_df.target, encodings=dummy_encodings,\n cache_path='testdata')\n", (2369, 2540), False, 'from tx2.wrapper import Wrapper\n'), ((3001, 3030), 'os.system', 'os.system', (['"""rm -rf testdata/"""'], {}), "('rm -rf testdata/')\n", (3010, 3030), False, 'import os\n'), ((1207, 1269), 'numpy.array', 'np.array', (['[[0.1, 0.2], [-0.1, -0.1], [0.5, -0.8], [0.4, -0.9]]'], {}), '([[0.1, 0.2], [-0.1, -0.1], [0.5, -0.8], [0.4, -0.9]])\n', (1215, 1269), True, 'import numpy as np\n'), ((1279, 1310), 'numpy.array', 'np.array', (['[0.1, -0.1, 0.5, 0.4]'], {}), '([0.1, -0.1, 0.5, 0.4])\n', (1287, 1310), True, 'import numpy as np\n'), ((1320, 1353), 'numpy.array', 'np.array', (['[0.2, -0.1, -0.8, -0.9]'], {}), '([0.2, -0.1, -0.8, -0.9])\n', (1328, 1353), True, 'import numpy as np\n'), ((1570, 1607), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': '"""english"""'}), "(stop_words='english')\n", (1585, 1607), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1688, 1708), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1706, 1708), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1891, 1917), 'torch.squeeze', 'torch.squeeze', (['transformed'], {}), '(transformed)\n', (1904, 1917), False, 'import torch\n')] |
from utils_tf_record.read_dataset_utils import read_and_parse_sharded_dataset
import os
import itertools
import random
import tensorflow as tf
import numpy as np
import pandas as pd
import math
from tqdm import tqdm
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
from numpy.polynomial.polynomial import polyfit
from collections import defaultdict
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.metrics import silhouette_score
import pyximport
# pyximport.install()
# from compute_overlap import compute_overlap
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
tf.compat.v1.enable_eager_execution()
FILENAME_PATTERN = "data/camera_data_lidarGB/training/*"
OUTPUT_FILE = 'boxes_all.csv'
GENERATE_DATASET = False
def generate_dataset():
dataset = read_and_parse_sharded_dataset(FILENAME_PATTERN)
def generate(x):
segment_id = x['image/source_id']
image_camera_name = x['image/camera_name']
timestamp_micros = x['image/frame_timestamp_micros']
source_id = segment_id + "_".encode() + str(image_camera_name).encode() + "_".encode() + str(
timestamp_micros).encode()
image_height = tf.cast(x['image/height'], tf.float32)
image_width = tf.cast(x['image/width'], tf.float32)
labels = tf.cast(x['image/object/class/label'], tf.float32)
xmax = tf.cast(x['image/object/bbox/xmax'], tf.float32) * image_width
xmin = tf.cast(x['image/object/bbox/xmin'], tf.float32) * image_width
ymax = tf.cast(x['image/object/bbox/ymax'], tf.float32) * image_height
ymin = tf.cast(x['image/object/bbox/ymin'], tf.float32) * image_height
boxes = tf.stack([labels, xmax, xmin, ymax, ymin], axis=1)
def _generator(x):
_features = {
'source_id': source_id,
'label': tf.slice(x, (0,), (1,)),
'xmax': tf.slice(x, (1,), (1,)),
'xmin': tf.slice(x, (2,), (1,)),
'ymax': tf.slice(x, (3,), (1,)),
'ymin': tf.slice(x, (4,), (1,)),
'image_width': [image_width],
'image_height': [image_height]
}
return _features
boxes = tf.data.Dataset.from_tensor_slices(boxes).map(_generator)
return boxes
dataset = dataset.flat_map(generate)
csv_headers = ','.join(
['source_id', 'label', 'xmax', 'xmin', 'ymax', 'ymin', 'image_width', 'image_height'], ) + '\n'
with open(OUTPUT_FILE, "w") as text_file:
text_file.write(csv_headers)
def write_to_csv(input, index, sep):
if index < 0:
v = 'NO_ID'
else:
v = str(input[index])
v = v + sep.decode()
with open(OUTPUT_FILE, "a") as text_file:
text_file.write(v)
return v
def reduce_to_csv(csv_lines, example):
keys = [
('source_id', -1, ','),
('label', 0, ','),
('xmax', 0, ','),
('xmin', 0, ','),
('ymax', 0, ','),
('ymin', 0, ','),
('image_width', 0, ','),
('image_height', 0, '\n'),
]
for i in range(len(keys)):
k, index, sep = keys[i]
_ = tf.numpy_function(write_to_csv, [example[k], index, sep], tf.string)
# csv_lines = csv_lines + ','.join([str(tf.numpy_function(write_to_csv, [example[k]], tf.string)) if k is not 'source_id' else str(example[k]) for k in keys]) + '\n'
return csv_lines
csv_lines = dataset.reduce('', reduce_to_csv)
if GENERATE_DATASET:
generate_dataset()
sample_frac = 1
print('Read csv')
df = pd.read_csv(OUTPUT_FILE)
print(df.shape[0], 'objects.')
# _ = df.pop('source_id')
print(df.head())
anchor_base = 256
aspect_ratios = [0.5, 1, 2] # [0.3, 1, 1.5, 2] #[0.5, 1, 2]
scales = [0.25, 0.5, 1, 2] # [0.005, 0.01, 0.5, 1, 2, 8, 13]#[0.25, 0.5, 1, 2]
def closer_to(data, keys):
def _cl(x):
return min(keys, key=lambda k: abs(k - x))
return np.vectorize(_cl)(data)
# print('\nGenerate new attributes')
# df['xcenter'] = (df['xmax'] + df['xmin']) / 2
# df['ycenter'] = (df['ymax'] + df['ymin']) / 2
# df['width'] = df['xmax'] - df['xmin']
# df['height'] = df['ymax'] - df['ymin']
# df['aspect_ratio'] = df['width'] / df['height']
# df['aspect_ratio_closer'] = closer_to(df['aspect_ratio'], aspect_ratios)
df['scale'] = df['height'] * np.sqrt(df[
'aspect_ratio']) / anchor_base # df['height'] * np.sqrt(df['aspect_ratio_closer']) / anchor_base # df['height'] * np.sqrt(df['aspect_ratio']) / anchor_base
# df['scale_closer'] = closer_to(df['scale'], scales)
#
# print('and remove old ones')
# _ = [df.pop(k) for k in ['xmax', 'xmin', 'ymax', 'ymin']]
#
# print("Remove useless anchors")
#
# print("")
# useless = df[(df['height'] < 16) | (df['width'] < 16)]
# print(len(useless), 'useless')
# df = df[(df['height'] > 5) & (df['width'] > 5)]
# df = df.sample(frac=sample_frac)
# print(df.shape[0], 'sample objects.')
#
# print(df.head())
colors = {1.: 'red', 2.: 'green', 3.: 'blue'}
boxlabels = {1.: 'VEHICLE', 2.: 'PEDESTRIAN', 3.: 'CYCLIST'}
def study_width_height_position():
# fig, ax = plt.subplots(4, 1, figsize=(5, 10))
# ax[0].hist(df.width, bins=800)
# ax[1].hist(df.width, bins=125, range=(0, 250))
# ax[2].hist(df.width, bins=25, range=(0, 25))
# ax[3].hist(df.width, bins=5, range=(0, 5))
# ax[0].set_title("Width histogram")
# fig.show()
# fig, ax = plt.subplots(4, 1, figsize=(5, 10))
# ax[0].hist(df.height, bins=800)
# ax[1].hist(df.height, bins=125, range=(0, 250))
# ax[2].hist(df.height, bins=25, range=(0, 25))
# ax[3].hist(df.height, bins=5, range=(0, 5))
# ax[0].set_title('Height histogram')
# fig.show()
h = 1.54 # 1.65 Height of the camera from the ground
H = np.mean([1280, 886]) # 375# Height of the image
Hv = 1.6 # Average height of the vehicles in the real world
max_var_v = 0.4 # 0.4 Max variation of the height of the vehicle
f = 721.54 / 512 # f = Focal length
p = 1 / 1280 # p = Size of each pixel
f_p = 721.54 # f/p
alpha = 2 # 2 Maximunm relative pitch angle between the camera and the ground plane
# p: Size of each pixel
# Hb: Height of the bounding box
def v(Hb, H=H):
Hb = np.asarray(Hb)
return ((h - Hv / 2) / Hv) * Hb + H / 2
def v_min(Hb, H=H):
Hb = np.asarray(Hb)
return ((h - (Hv + max_var_v) / 2) / (Hv + max_var_v)) * Hb - math.tan(math.radians(alpha)) * f_p + H / 2
def v_max(Hb, H=H):
Hb = np.asarray(Hb)
return ((h - (Hv - max_var_v) / 2) / (Hv - max_var_v)) * Hb + math.tan(math.radians(alpha)) * f_p + H / 2
# b, m = polyfit(df['height'], df['ycenter'], 1)
# # plt.plot([0,1280], 1/2*np.asarray([0,1280]), linestyle='--', c='r', alpha=0.5)
# # plt.plot([0,1280], -1/2*np.asarray([0,1280])+1280, linestyle='--', c='r', alpha=0.5)
# fig = plt.scatter(x=df['height'], y=df['ycenter'], s=0.01)
# plt.plot([0, 1000], b + m * np.asarray([0, 1000]), '-', c='grey', label='reg. line')
# # plt.plot([0, 1200],v([0, 1200]), c='green', label='v')
# # plt.plot([0, 1200],v_min([0, 1200]), c='orange', label='vmin')
# # plt.plot([0, 1000],v_max([0, 1000]), c='orange', label='vmax')
# plt.xlabel("Height")
# plt.ylabel("Vertical Position")
# plt.legend()
# plt.show()
b1, m1 = polyfit(df[df['image_height'] == 1280]['height'], df[df['image_height'] == 1280]['ycenter'], 1)
fig = plt.figure()
plt.scatter(x=df[df['image_height'] == 1280]['height'], y=df[df['image_height'] == 1280]['ycenter'], s=0.001)
plt.plot([0, 1200], b1 + m1 * np.asarray([0, 1200]), '--', c='grey', label='reg. line')
# plt.plot([0, 1200],v([0, 1200], H=1280), c='green', label='v')
# plt.plot([0, 1200],v_min([0, 1200], H=1280), c='orange', label='vmin')
# plt.plot([0, 1000],v_max([0, 1000], H=1280), c='orange', label='vmax')
# print(plt.ylim(), plt.xlim())
# plt.ylim(top=1300)
# print(plt.ylim())
plt.axis('tight')
plt.xlabel("Object Height", fontsize=14)
plt.ylabel("Vertical Position", fontsize=14)
plt.title('Frontal cameras (1920 x 1280)', fontsize=16)
plt.legend(loc='upper right')
fig.savefig('frontal_camera.png', format='png', dpi=500, bbox_inches='tight')
plt.show()
# print((df['image_height'] == 1280).count())
b, m = polyfit(df[df['image_height'] != 1280]['height'], df[df['image_height'] != 1280]['ycenter'], 1)
# fig2 = plt.figure(figsize=(0.5, 0.5))
fig2 = plt.figure()
plt.scatter(x=df[df['image_height'] != 1280]['height'], y=df[df['image_height'] != 1280]['ycenter'], s=0.001)
plt.plot([0, 800], b + m * np.asarray([0, 800]), '--', c='grey', label='reg. line')
# plt.plot([0, 886],v([0, 886], H=886), c='green', label='v')
# plt.plot([0, 886],v_min([0, 886], H=886), c='orange', label='vmin')
# plt.plot([0, 886],v_max([0, 886], H=886), c='orange', label='vmax')
plt.xlabel("Height", fontsize=14)
plt.ylabel("Vertical Position", fontsize=14)
plt.title('Lateral cameras (1920 x 886)', fontsize=16)
plt.legend()
plt.show()
fig2.savefig('lateral_camera.png', format='png', dpi=500, bbox_inches='tight')
b, m = polyfit(df[df['image_height'] != 1280]['width'], df[df['image_height'] != 1280]['ycenter'], 1)
# fig2 = plt.figure(figsize=(0.5, 0.5))
fig2 = plt.figure()
plt.scatter(x=df[df['image_height'] != 1280]['width'], y=df[df['image_height'] != 1280]['ycenter'], s=0.001)
plt.plot([0, 800], b + m * np.asarray([0, 800]), '-', c='grey', label='reg. line')
# plt.plot([0, 886],v([0, 886], H=886), c='green', label='v')
# plt.plot([0, 886],v_min([0, 886], H=886), c='orange', label='vmin')
# plt.plot([0, 886],v_max([0, 886], H=886), c='orange', label='vmax')
plt.xlabel("Width", fontsize=14)
plt.ylabel("Vertical Position", fontsize=14)
plt.title('Lateral cameras (1920 x 886)', fontsize=16)
plt.show()
b, m = polyfit(df[df['image_height'] == 1280]['width'], df[df['image_height'] == 1280]['ycenter'], 1)
# fig2 = plt.figure(figsize=(0.5, 0.5))
fig2 = plt.figure()
plt.scatter(x=df[df['image_height'] == 1280]['width'], y=df[df['image_height'] == 1280]['ycenter'], s=0.001)
plt.plot([0, 800], b + m * np.asarray([0, 800]), '-', c='grey', label='reg. line')
# plt.plot([0, 886],v([0, 886], H=886), c='green', label='v')
# plt.plot([0, 886],v_min([0, 886], H=886), c='orange', label='vmin')
# plt.plot([0, 886],v_max([0, 886], H=886), c='orange', label='vmax')
plt.xlabel("Width", fontsize=14)
plt.ylabel("Vertical Position", fontsize=14)
plt.title('Lateral cameras (1920 x 886)', fontsize=16)
plt.show()
x, y = df['height'] / df['image_height'], df['ycenter'] / df['image_height']
b, m = polyfit(x, y, 1)
fig = plt.scatter(x=x, y=y, s=0.01)
plt.plot([0, 1], b + m * np.asarray([0, 1]), '-', c='grey', label='reg. line')
plt.xlabel("Height (norm)")
plt.ylabel("Vertical Position (norm)")
plt.legend()
plt.show()
# fig = plt.scatter(x=df['height'], y=df['ycenter'], c=df['label'].apply(lambda x: colors[x]), marker='+', s=1)
# plt.xlabel("Height")
# plt.ylabel("Vertical Position")
# plt.legend()
# plt.show()
fig = plt.scatter(x=df['width'], y=df['ycenter'], marker='+', s=1)
plt.xlabel("Width")
plt.ylabel("Vertical Position")
plt.legend()
plt.show()
fig, ax = plt.subplots(3, 1, figsize=(5, 11))
for i in range(3):
_df = df[df['label'] == (i + 1.)]
ax[i].scatter(x=_df['height'], y=_df['ycenter'], c=_df['label'].apply(lambda x: colors[x]), marker='+', s=1)
ax[i].set_xlabel("Height")
ax[i].set_ylabel("Vertical Position")
ax[i].set_title(boxlabels[i + 1.])
fig.suptitle('All images', fontsize=16)
fig.tight_layout()
fig.subplots_adjust(top=.075)
fig.show()
fig, ax = plt.subplots(3, 1, figsize=(5, 11), sharex=True, sharey=True)
for i in range(3):
_df = df[df['label'] == (i + 1.)]
_df = _df[_df['image_height'] == 1280]
ax[i].scatter(x=_df['height'], y=_df['ycenter'], c=_df['label'].apply(lambda x: colors[x]), marker='+', s=1)
ax[i].set_xlabel("Height")
ax[i].set_ylabel("Vertical Position")
ax[i].set_title(boxlabels[i + 1.])
fig.suptitle('Images (1920 x 1280)', fontsize=16)
fig.tight_layout()
fig.subplots_adjust(top=.075)
fig.show()
fig, ax = plt.subplots(3, 1, figsize=(5, 11), sharex=True, sharey=True)
for i in range(3):
_df = df[df['label'] == (i + 1.)]
_df = _df[_df['image_height'] != 1280]
ax[i].scatter(x=_df['height'], y=_df['ycenter'], c=_df['label'].apply(lambda x: colors[x]), marker='+', s=1)
ax[i].set_xlabel("Height")
ax[i].set_ylabel("Vertical Position")
ax[i].set_title(boxlabels[i + 1.])
fig.suptitle('Images (1920 x 886)', fontsize=16)
fig.tight_layout()
fig.subplots_adjust(top=.075)
fig.show()
fig, ax = plt.subplots(2, 3, figsize=(12, 9))
for i in range(3):
l = boxlabels[i + 1]
_df = df[df['label'] == (i + 1.)]
ax[0, i].hist(_df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=colors[i + 1], density=True)
ax[1, i].hist(_df.scale, bins=100, range=(0, 3), alpha=0.7, color=colors[i + 1], density=True)
ax[0, i].set_title('Aspect ratio ({})'.format(l))
ax[1, i].set_title('Scale ({})'.format(l))
fig.tight_layout()
fig.show()
# corner = True
#
# fig = sns.pairplot(df, hue="label", corner=corner, markers='+', plot_kws=dict(s=2,linewidth=0.2))
# fig.savefig("boxes_label.png")
# plt.show()
#
#
# _ = df.pop('label')
#
# fig = sns.pairplot(df, corner=corner, markers='+', plot_kws=dict(s=2,linewidth=0.2))
# fig.savefig("boxes.png")
# plt.show()
fig, ax = plt.subplots(2, 1, figsize=(5, 6))
ax[0].hist(df.aspect_ratio_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in aspect_ratios]).flatten(),
color='red', density=True, rwidth=0.5, label=str(aspect_ratios))
ax[0].hist(df.aspect_ratio, bins=100, range=(0, 6), density=True, alpha=0.7,
label='Max {:.1f}\nMin: {:.3f}'.format(max(df.aspect_ratio), min(df.aspect_ratio)))
ax[0].set_title("Aspect ratio")
ax[0].legend()
ax[1].hist(df.scale_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in scales]).flatten(), color='red',
density=True, rwidth=0.5, label=str(scales))
ax[1].hist(df.scale, bins=100, range=(0, 4), density=True, alpha=0.7,
label='Max {:.3f}\nMin: {:.3f}'.format(max(df.scale), min(df.scale)))
ax[1].set_title("Scale")
ax[1].legend()
fig.savefig("boxes_generator_parameters.png")
fig.show()
_df = df[df.image_height == 1280]
_df = _df[_df.ycenter < 500]
# _df = _df[_df.height<200]
b, m = polyfit(_df['width'], _df['xcenter'], 1)
fig = plt.scatter(x=_df['width'], y=_df['xcenter'], marker='+', s=1)
plt.xlim((0, max(df.width)))
plt.ylim((0, max(df.xcenter)))
plt.xlabel("Width")
plt.ylabel("Horizontal Position")
plt.title('Boxes in the top of the images (1920 x 1280)')
plt.legend()
plt.show()
fig = plt.scatter(x=_df['height'], y=_df['ycenter'], marker='+', s=1)
plt.xlim((0, max(df.height)))
plt.ylim((0, max(df.ycenter)))
plt.xlabel("Height")
plt.ylabel("Vertical Position")
plt.title('Boxes in the top of the images (1920 x 1280)')
plt.legend()
plt.show()
fig, ax = plt.subplots(2, 1, figsize=(5, 6))
ax[0].hist(_df.aspect_ratio_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in aspect_ratios]).flatten(),
color='red', density=True, rwidth=0.5, label=str(aspect_ratios))
ax[0].hist(_df.aspect_ratio, bins=100, range=(0, 6), density=True, alpha=0.7,
label='Max {:.1f}\nMin: {:.3f}'.format(max(_df.aspect_ratio), min(_df.aspect_ratio)))
ax[0].set_title("Aspect ratio")
ax[0].legend()
ax[1].hist(_df.scale_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in scales]).flatten(), color='red',
density=True, rwidth=0.5, label=str(scales))
ax[1].hist(_df.scale, bins=100, range=(0, 4), density=True, alpha=0.7,
label='Max {:.3f}\nMin: {:.3f}'.format(max(_df.scale), min(_df.scale)))
ax[1].set_title("Scale")
ax[1].legend()
fig.show()
# study_width_height_position()
def study_cluster(_df, n_clusters, silhoutte_n_clusters=list(range(2, 21)), save_fig=True, label=''):
# label_array = np.reshape(_df['label'].array, (_df['label'].shape[0], -1))
# one_hot = OneHotEncoder().fit_transform(X=label_array)
X = MinMaxScaler().fit_transform(_df[['scale', 'aspect_ratio']])
# X = MinMaxScaler().fit_transform(_df[['height', 'width']])
plt.clf()
# Elbow
if silhoutte_n_clusters is not None and silhoutte_n_clusters != []:
silhoutte_values = {}
for k in tqdm(silhoutte_n_clusters):
kmeans = KMeans(n_clusters=k, random_state=0, n_jobs=8).fit(X)
labels = kmeans.predict(X)
silhoutte_values[k] = kmeans.inertia_ # [silhouette_score(X, labels)]
plt.plot(list(silhoutte_values.keys()), list(silhoutte_values.values()), 'o-')
plt.xlabel("number of clusters")
plt.ylabel("SSE")
plt.title(label)
if save_fig:
plt.savefig('elbow_' + label + '.png')
plt.show()
# Clusters
kmeans = KMeans(n_clusters=n_clusters, random_state=0, n_jobs=8).fit(X)
labels = kmeans.labels_
# fig = plt.scatter(x=_df['xcenter'], y=_df['ycenter'], c=labels, marker='+', s=0.1)
# ax = plt.gca()
# ax.invert_yaxis()
# legend1 = ax.legend(*fig.legend_elements(),
# loc="best", title="Clusters")
# ax.add_artist(legend1)
# plt.show()
## He sacado la imagen con 2 clusters y sample 0.3, and point size 0.001
# fig, ax = plt.subplots(1 , n_clusters + 1, figsize=(15, 2 * (n_clusters + 1)), sharex=True, sharey=True)
# #fig, ax = plt.subplots(1, n_clusters + 1, sharex=True, sharey=True)
# fig0 = ax[0].scatter(x=_df['xcenter'], y=_df['ycenter'], c=labels, s=0.001)
# ax[0].invert_yaxis()
# ax[0].set_title("All objects", fontsize=16)
# ax[0].set_xlabel("Horizontal Position", fontsize=14)
# ax[0].set_ylabel("Normalized Vertical Position", fontsize=14)
# for k in range(n_clusters):
# if k==1:
# ax[k + 1].scatter(x=_df[labels == k]['xcenter'], y=_df[labels == k]['ycenter'], s=0.002)
# else:
# ax[k + 1].scatter(x=_df[labels == k]['xcenter'], y=_df[labels == k]['ycenter'], s=0.001)
# ax[k + 1].set_title('Cluster {}'.format(k), fontsize=16)
# ax[k+1].set_xlabel("Horizontal Position", fontsize=14)
#
# legend1 = ax[0].legend(*fig0.legend_elements(),
# loc="best", title="Clusters")
# ax[0].add_artist(legend1)
# #plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
# # hspace=0, wspace=0)
# #plt.margins(0, 0)
# plt.show()
# if save_fig:
# fig.savefig('clustering_' + label + '.png', dpi=500, bbox_inches='tight')
df_cluster = pd.concat((_df, pd.DataFrame(labels, index=_df.index, columns=["cluster"])), axis=1)
df_cluster = df_cluster[(df_cluster.aspect_ratio<6) & (df_cluster.scale<4)]
fig = plt.figure()
# ax.boxplot(_df['scale'],positions=labels)
# ax = sns.catplot(x="cluster", y='aspect_ratio', data=df_cluster, kind='violin', palette="Blues")
_df_cluster = df_cluster[['aspect_ratio', 'scale', 'cluster']]
df_cluster = df_cluster.rename(columns={"aspect_ratio": "Aspect Ratio", "scale": "Scale Ratio"})
_df_cluster = df_cluster.melt(var_name='Characteristic', value_vars=['Aspect Ratio', 'Scale Ratio'], value_name='Value')
_df_cluster['cluster'] = df_cluster['cluster'].values.tolist() + df_cluster['cluster'].values.tolist()
_df_cluster = _df_cluster.rename(columns={"cluster": "Cluster"})
ax = sns.violinplot(y='Value', x='Characteristic', hue='Cluster', data=_df_cluster, split=True, palette='Blues',inner=None)
# ax = sns.violinplot(y="aspect_ratio", data=df_cluster[df_cluster.aspect_ratio<6], hue="cluster",split=True)
plt.title("Cluster distribution", fontsize=16)
# plt.xlabel("Cluster", fontsize=14)
plt.ylabel("Value", fontsize=14)
plt.xlabel("Feature", fontsize=14)
plt.show()
print()
fig.savefig('cluster_dist_all.png', dpi=500, bbox_inches='tight')
# fig = plt.scatter(x=_df['height'], y=_df['ycenter'], s=0.001, c=labels)
# plt.xlim((0, max(_df.height)))
# plt.ylim((0, max(_df.ycenter)))
# plt.xlabel("Height")
# plt.ylabel("Vertical Position")
# plt.title('')
# ax = plt.gca()
# ax.invert_yaxis()
# legend1 = ax.legend(*fig.legend_elements(),
# loc="best", title="Clusters")
# ax.add_artist(legend1)
# plt.title(label)
# if save_fig:
# plt.savefig('VerticalVsHeight_' + label + '.png')
# plt.show()
fig, ax = plt.subplots(2, n_clusters, figsize=(5 * n_clusters, 6), sharey='col')
for k in range(n_clusters):
__df = _df[labels == k]
# ax[0, k].hist(__df.aspect_ratio_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in aspect_ratios]).flatten(),
# color='red', density=True, rwidth=0.5, label=str(aspect_ratios))
sns.distplot(__df.aspect_ratio, ax=ax[k, 0], hist_kws={"range": (0, 6)}, kde_kws={"clip": (0, 6)})
# ax[0, k].hist(__df.aspect_ratio, bins=100, range=(0, 6), density=True, alpha=0.7,)
# label='Max {:.1f}\nMin: {:.3f}'.format(max(__df.aspect_ratio), min(__df.aspect_ratio)))
ax[k, 0].set_title("Aspect ratio (cluster {})".format(k))
# ax[0, k].legend()
# ax[1, k].hist(__df.scale_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in scales]).flatten(), color='red',
# density=True, rwidth=0.5, label=str(scales))
sns.distplot(__df[_df.scale < 4].scale, ax=ax[k, 1], hist_kws={"range": (0, 4)}, kde_kws={"clip": (0, 4)})
# ax[1, k].hist(__df.scale, bins=100, range=(0, 4), density=True, alpha=0.7,)
# label='Max {:.3f}\nMin: {:.3f}'.format(max(__df.scale), min(__df.scale)))
ax[k, 1].set_title("Scale ratio (cluster {})".format(k))
# ax[1, k].legend()
ax[0, 0].set_ylabel("Density", fontsize=10)
ax[1, 0].set_ylabel("Density", fontsize=10)
ax[0, 0].set_xlabel("")
ax[0, 1].set_xlabel("")
ax[1, 0].set_xlabel("")
ax[1, 1].set_xlabel("")
plt.show()
if save_fig:
fig.savefig('cluster_description_' + label + '.png', dpi=500, bbox_inches='tight')
# Scale and aspect ratio
fig, ax = plt.subplots(2, n_clusters, figsize=(5 * n_clusters, 6))
for k in range(n_clusters):
__df = _df[labels == k]
# ax[0, k].hist(__df.aspect_ratio_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in aspect_ratios]).flatten(),
# color='red', density=True, rwidth=0.5, label=str(aspect_ratios))
ax[0, k].hist(__df.aspect_ratio, bins=100, range=(0, 6), density=True, alpha=0.7,
label='Max {:.1f}\nMin: {:.3f}'.format(max(__df.aspect_ratio), min(__df.aspect_ratio)))
ax[0, k].set_title("Aspect ratio (cluster {})".format(k))
ax[0, k].legend()
# ax[1, k].hist(__df.scale_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in scales]).flatten(), color='red',
# density=True, rwidth=0.5, label=str(scales))
ax[1, k].hist(__df.scale, bins=100, range=(0, 4), density=True, alpha=0.7,
label='Max {:.3f}\nMin: {:.3f}'.format(max(__df.scale), min(__df.scale)))
ax[1, k].set_title("Scale (cluster {})".format(k))
ax[1, k].legend()
plt.show()
if save_fig:
fig.savefig('cluster_description_' + label + '.png')
def region_study(_df, n_clusters):
X = MinMaxScaler().fit_transform(_df[['scale', 'aspect_ratio']])
n_cluster = 2
kmeans = KMeans(n_clusters=n_clusters, random_state=0, n_jobs=8).fit(X)
labels = kmeans.labels_
df_cluster = pd.concat((_df, pd.DataFrame(labels, index=_df.index, columns=["cluster"])), axis=1)
num_elements = [df_cluster[df_cluster['cluster'] == k].shape[0] for k in [0, 1]]
regions_count = {}
for k in range(n_clusters):
count_dict = {}
clust = df_cluster[df_cluster['cluster'] == k]
keys_list = []
for i, limit in enumerate(np.arange(0.05, 1, 0.05)):
keys_list.append(limit)
if i == 0:
count_dict[limit] = clust[(clust['ycenter'] < limit)].shape[0]
else:
count_dict[limit] = clust[(clust['ycenter'] < limit) & (clust['ycenter'] > keys_list[i - 1])].shape[0]
regions_count[k] = count_dict
# fig, ax = plt.subplots(2, 2, figsize=(15, 2 * (n_clusters + 1)), sharex=True, sharey=True)
fig, ax = plt.subplots(2, 2, figsize=(12, 9))
for k in range(n_clusters):
if k == 1:
ax[k][0].scatter(x=_df[labels == k]['xcenter'], y=_df[labels == k]['ycenter'], s=0.002)
else:
ax[k][0].scatter(x=_df[labels == k]['xcenter'], y=_df[labels == k]['ycenter'], s=0.001)
ax[k][0].set_title('Cluster {}'.format(k), fontsize=16)
if k == 1:
ax[k][0].set_xlabel("Horizontal Position", fontsize=14)
ax[k][0].set_ylabel("Normalized Vertical Position", fontsize=14)
ax[k][0].invert_yaxis()
ax[k][0].set_xlim((0, 1920))
ax[k][0].set_xticks([250, 500, 750, 1000, 1250, 1500, 1750])
if k == 0:
for line in [0.2, 0.4, 0.7]:
ax[k][0].axhline(y=line, color='r', linestyle='--')
# ax[k][0].hlines([0.2, 0.4, 0.7], xmin=0, xmax=ax[k][0].get_xlim()[1], colors='r', linestyles='dashed')
else:
for line in [0.4]:
ax[k][0].axhline(y=line, color='r', linestyle='--')
# ax[k][0].hlines([0.4], xmin=0, xmax=ax[k][0].get_xlim()[1], colors='r',linestyles='dashed')
# ax[k][0].hlines(np.arange(0.05, 1, 0.05),xmin=0, xmax=ax[k][0].get_xlim()[1], colors='r',linestyles='dashed')
ax[k][0].set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
ax[k][1].plot([x for x in list(regions_count[k].keys())],
[x / num_elements[k] * 100 for x in list(regions_count[k].values())])
ax[k][1].scatter([x for x in list(regions_count[k].keys())],
[x / num_elements[k] * 100 for x in list(regions_count[k].values())], s=5)
# ax[k][1].bar([x for x in list(regions_count[k].keys())], [x / num_elements[k] * 100 for x in list(regions_count[k].values())], width=0.03)
ax[k][1].set_title('Cluster {}'.format(k), fontsize=16)
if k == 1:
ax[k][1].set_xlabel("Normalized Height", fontsize=14)
ax[k][1].set_ylabel("Percentage of elements", fontsize=14)
ax[k][1].set_xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
if k == 0:
ax[k][1].vlines([0.2, 0.4, 0.7], ymin=0, ymax=ax[k][1].get_ylim()[1], colors='r', linestyles='dashed')
else:
ax[k][1].vlines([0.4], ymin=0, ymax=ax[k][1].get_ylim()[1], colors='r', linestyles='dashed')
ax[k][1].set_yticks(list(range(0, 20, 2)))
# ax[k][1].set_ylim((0, 20))
#
# legend1 = ax[0].legend(*fig0.legend_elements(),
# loc="best", title="Clusters")
# ax[0].add_artist(legend1)
# dict_c0_num = {}
# c = df_cluster[df_cluster['cluster'] == 0]
# keys_list = []
# for i, limit in enumerate(np.arange(0.05, 1, 0.05)):
# keys_list.append(limit)
# if i == 0:
# dict_c0_num[limit] = c[(c['ycenter'] < limit)].shape[0]
# else:
# dict_c0_num[limit] = c[(c['ycenter'] < limit) & (c['ycenter'] > keys_list[i - 1])].shape[0]
#
# sorted_c0 = sorted(dict_c0.items(), key=lambda x: x[1], reverse=True)
#
# plt.figure()
# plt.title("Cluster 0")
# plt.plot([x for x in list(dict_c0_num.keys())], list(dict_c0_num.values()))
# plt.show()
#
# dict_c1_num = {}
# c = df_cluster[df_cluster['cluster'] == 1]
# keys_list = []
# for i, limit in enumerate(np.arange(0.05, 1, 0.05)):
# keys_list.append(limit)
# if i == 0:
# dict_c1_num[limit] = c[(c['ycenter'] < limit)].shape[0]
# else:
# dict_c1_num[limit] = c[(c['ycenter'] < limit) & (c['ycenter'] > keys_list[i - 1])].shape[0]
#
# sorted_c1 = sorted(dict_c0.items(), key=lambda x: x[1], reverse=True)
#
# plt.figure()
# plt.title("Cluster 1")
# plt.plot([x for x in list(dict_c1_num.keys())], [x / c.shape[0] * 100 for x in list(dict_c1_num.values())])
# plt.xlabel("Normalized Height")
# plt.ylabel("Percentage of elements")
# plt.xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
# plt.ylim((0, 20))
# plt.show()
# fig.savefig('cluster1.png', dpi=500, bbox_inches='tight')
#
# plt.figure()
# plt.bar([x for x in list(dict_c1_num.keys())], [x / c.shape[0] * 100 for x in list(dict_c1_num.values())],
# align='edge')
# plt.xlabel("Normalized Height")
# plt.ylabel("Percentage of elements")
# plt.show()
#
# sorted_c0 = sorted(dict_c0.items(), key=lambda x: x[1], reverse=True)
#
# # if save_fig:
fig.savefig('regions.png', dpi=500, bbox_inches='tight')
plt.show()
print("Hola")
def region_study2(_df, n_clusters):
X = MinMaxScaler().fit_transform(_df[['scale', 'aspect_ratio']])
n_cluster = 2
kmeans = KMeans(n_clusters=n_clusters, random_state=0, n_jobs=8).fit(X)
labels = kmeans.labels_
df_cluster = pd.concat((_df, pd.DataFrame(labels, index=_df.index, columns=["cluster"])), axis=1)
num_elements = [df_cluster[df_cluster['cluster'] == k].shape[0] for k in [0, 1]]
regions_count = {}
g = sns.jointplot("xcenter", "ycenter", data=_df[(labels == 0)],
kind="reg", scatter_kws={'s': 0.001})
g.ax_joint.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
g.ax_joint.invert_yaxis()
for line in [0.2, 0.7]:
g.ax_joint.axhline(y=line, color='r', linestyle='--')
# plt.show()
g.savefig('jointplot_c0.png', dpi=500, bbox_inches='tight')
g = sns.jointplot("xcenter", "ycenter", data=_df[(labels == 1)],
kind="reg", scatter_kws={'s':0.03})
g.ax_joint.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
g.ax_joint.invert_yaxis()
for line in [0.4]:
g.ax_joint.axhline(y=line, color='r', linestyle='--')
# plt.show()
g.savefig('jointplot_c1.png', dpi=500, bbox_inches='tight')
print()
def region_study3(_df, n_clusters):
X = MinMaxScaler().fit_transform(_df[['scale', 'aspect_ratio']])
n_cluster = 2
kmeans = KMeans(n_clusters=n_clusters, random_state=0, n_jobs=8).fit(X)
labels = kmeans.labels_
df_cluster = pd.concat((_df, pd.DataFrame(labels, index=_df.index, columns=["cluster"])), axis=1)
num_elements = [df_cluster[df_cluster['cluster'] == k].shape[0] for k in [0, 1]]
regions_count = {}
for k in range(n_clusters):
count_dict = {}
clust = df_cluster[df_cluster['cluster'] == k]
keys_list = []
for i, limit in enumerate(np.arange(0.05, 1, 0.05)):
keys_list.append(limit)
if i == 0:
count_dict[limit] = clust[(clust['ycenter'] < limit)].shape[0]
else:
count_dict[limit] = clust[(clust['ycenter'] < limit) & (clust['ycenter'] > keys_list[i - 1])].shape[0]
regions_count[k] = count_dict
# fig, ax = plt.subplots(2, 2, figsize=(15, 2 * (n_clusters + 1)), sharex=True, sharey=True)
fig, ax = plt.subplots(2, 1, figsize=(5, 9))
for k in range(n_clusters):
if k == 1:
ax[k].scatter(x=_df[labels == k]['xcenter'], y=_df[labels == k]['ycenter'], s=0.002)
else:
ax[k].scatter(x=_df[labels == k]['xcenter'], y=_df[labels == k]['ycenter'], s=0.001)
ax[k].set_title('Cluster {}'.format(k), fontsize=16)
if k == 1:
ax[k].set_xlabel("Horizontal Position", fontsize=14)
ax[k].set_ylabel("Normalized Vertical Position", fontsize=14)
ax[k].invert_yaxis()
ax[k].set_xlim((0, 1920))
ax[k].set_xticks([250, 500, 750, 1000, 1250, 1500, 1750])
if k == 0:
for line in [0.2, 0.7]:
ax[k].axhline(y=line, color='r', linestyle='--')
# ax[k][0].hlines([0.2, 0.4, 0.7], xmin=0, xmax=ax[k][0].get_xlim()[1], colors='r', linestyles='dashed')
else:
for line in [0.4]:
ax[k].axhline(y=line, color='r', linestyle='--')
# ax[k][0].hlines([0.4], xmin=0, xmax=ax[k][0].get_xlim()[1], colors='r',linestyles='dashed')
# ax[k][0].hlines(np.arange(0.05, 1, 0.05),xmin=0, xmax=ax[k][0].get_xlim()[1], colors='r',linestyles='dashed')
ax[k].set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
plt.show()
fig.savefig('regions_cluster4.png', dpi=500, bbox_inches='tight')
print("Hola")
n_clusters = 2
n_clusters_elbow = [] # list(range(2, 10)) # list(range(2,21))
print(df.shape[0])
_df = df
# _df = df[df['image_height'] == 1280]
print(_df.shape[0])
_df = _df.sample(frac=0.3)
print(_df.shape[0])
_df = _df[_df['xcenter'] > anchor_base / 2]
_df = _df[_df['xcenter'] < (_df['image_width'] - anchor_base / 2)]
_df = _df[_df['ycenter'] < (_df['image_height'] - anchor_base / 2)]
_df['ycenter'] = _df['ycenter'] / _df['image_height']
_df['height'] = _df['height'] / _df['image_height']
# study_cluster(_df, n_clusters, n_clusters_elbow, save_fig=True, label='AllCameras-{}Clusters'.format(n_clusters))
region_study(_df,n_clusters)
_df['height'] = _df['height'] / _df['image_height']
_df['ycenter'] = _df['ycenter'] / _df['image_height']
study_cluster(_df, n_clusters, n_clusters_elbow, save_fig=True, label='AllCamerasNorm-{}Clusters'.format(n_clusters))
_df = df.sample(frac=0.1)
_df = _df[_df['image_height'] == 1280]
# study_cluster(_df, n_clusters, n_clusters_elbow, save_fig=True, label='FrontCameras-{}Clusters'.format(n_clusters))
_df = df.sample(frac=0.1)
_df = _df[_df['image_height'] != 1280]
# study_cluster(_df, n_clusters, n_clusters_elbow, save_fig=True, label='SideCameras-{}Clusters'.format(n_clusters))
_df = df.sample(frac=sample_frac)
# _df = _df[_df['xcenter']>anchor_base/2]
# _df = _df[_df['xcenter']<(_df['image_width']-anchor_base/2)]
# _df = _df[_df['ycenter']<(_df['image_height']-anchor_base/2)]
_df['ycenter'] = _df['ycenter'] / _df['image_height']
lines = [0.17, 0.4, 0.7]
area_names = ['SKY', 'TOP', 'MIDDLE', 'BOTTOM']
region_colors = ['rebeccapurple', 'steelblue', 'mediumaquamarine', 'gold']
def assign_area(y, lines=lines, text=True, area_names=area_names):
lines = sorted(lines)
def _assign_area(y):
for i, (name, line) in enumerate(zip(area_names, lines)):
if y < line:
return name if text else i
return area_names[-1] if text else len(lines)
return np.vectorize(_assign_area)(y)
_df['region'] = assign_area(_df['ycenter'], text=True)
_df['region_n'] = assign_area(_df['ycenter'], text=False)
_df['label_name'] = np.vectorize(lambda x: boxlabels[int(x)])(_df['label'])
def clustering():
X = MinMaxScaler().fit_transform(_df[['scale', 'aspect_ratio']])
kmeans = KMeans(n_clusters=n_clusters, random_state=0, n_jobs=8).fit(X)
labels = kmeans.labels_
_, ax = plt.subplots(1, 4, figsize=(20, 5), sharey=True)
fig = ax[0].scatter(x=_df['xcenter'], y=_df['ycenter'], c=labels, marker='+', s=1)
ax[1].scatter(x=_df[labels == 0]['xcenter'], y=_df[labels == 0]['ycenter'], marker='+', s=1)
ax[2].scatter(x=_df[labels == 1]['xcenter'], y=_df[labels == 1]['ycenter'], marker='+', s=1)
fig1 = ax[3].scatter(_df['xcenter'], y=_df['ycenter'], c=_df['region_n'], cmap=ListedColormap(region_colors),
marker='+', s=1)
for line in lines:
ax[0].axhline(y=line, color='r', linestyle='--')
ax[1].axhline(y=line, color='r', linestyle='--')
ax[2].axhline(y=line, color='r', linestyle='--')
# ax[0].invert_yaxis()
legend1 = ax[0].legend(*fig.legend_elements(),
loc="best", title="Clusters")
ax[0].add_artist(legend1)
ax[3].invert_yaxis()
legend1 = ax[3].legend(handles=fig1.legend_elements()[0], labels=area_names,
loc="best", title="Regions")
ax[3].add_artist(legend1)
plt.savefig('regions_cluster.png')
plt.show()
fig, ax = plt.subplots(2, 1, figsize=(5, 6))
for r, region in enumerate(area_names):
# ax[0].hist(_df[_df['region']==region].aspect_ratio_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in aspect_ratios]).flatten(),
# color=region_colors[r], density=True, rwidth=0.5,) #label=str(aspect_ratios))
ax[0].hist(_df[_df['region'] == region].aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r],
histtype='step',
label=region + ' [{:.1f}, {:.3f}]'.format(max(_df[_df['region'] == region].aspect_ratio),
min(_df[_df['region'] == region].aspect_ratio)))
# ax[1].hist(_df[_df['region']==region].scale_closer, bins=np.asarray([[b - 0.1, b + 0.1] for b in scales]).flatten(),
# color=region_colors[r], density=True, rwidth=0.5, )#label=str(scales))
ax[1].hist(_df[_df['region'] == region].scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r],
histtype='step',
label=region + ' [{:.3f}, {:.3f}]'.format(max(_df[_df['region'] == region].scale),
min(_df[_df['region'] == region].scale)))
ax[0].set_title("Aspect ratio")
ax[0].legend()
ax[1].set_title("Scale")
ax[1].legend()
plt.show()
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
ax[0, r].hist(_df[_df['region'] == region].aspect_ratio_closer,
bins=np.asarray([[b - 0.1, b + 0.1] for b in aspect_ratios]).flatten(),
color='r', density=True, rwidth=0.5, label=str(aspect_ratios))
ax[0, r].hist(_df[_df['region'] == region].aspect_ratio, bins=100, range=(0, 6), alpha=0.7,
color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(
max(_df[_df['region'] == region].aspect_ratio),
min(_df[_df['region'] == region].aspect_ratio),
np.mean(_df[_df['region'] == region].aspect_ratio)))
ax[1, r].hist(_df[_df['region'] == region].scale_closer,
bins=np.asarray([[b - 0.1, b + 0.1] for b in scales]).flatten(),
color='r', density=True, rwidth=0.5, label=str(scales))
ax[1, r].hist(_df[_df['region'] == region].scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r],
density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(
max(_df[_df['region'] == region].scale), min(_df[_df['region'] == region].scale),
np.mean(_df[_df['region'] == region].scale)))
ax[0, r].set_title("Aspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description.png')
plt.show()
sns.countplot(y="region", order=area_names, data=_df, palette={r: c for r, c in zip(area_names, region_colors)})
plt.savefig("regions_countplot.png")
plt.show()
sns.countplot(x="label_name", hue='region', hue_order=area_names, data=_df,
palette={r: c for r, c in zip(area_names, region_colors)})
plt.show()
sns.countplot(y="region", order=area_names, hue='label_name', data=_df,
palette={**{r: c for r, c in zip(area_names, region_colors)},
**{boxlabels[float(i + 1)]: colors[float(i + 1)] for i in range(3)}})
plt.show()
fig, ax = plt.subplots(2, 5, figsize=(17, 4))
gs = ax[0, 0].get_gridspec()
ax[0, 0].remove()
ax[1, 0].remove()
ax[1, 4].remove()
axbig = fig.add_subplot(gs[0:, 0])
sns.countplot(y="region", order=area_names, data=_df, palette={r: c for r, c in zip(area_names, region_colors)},
ax=axbig)
sns.countplot(x="label_name", data=_df[_df['region'] == 'SKY'], ax=ax[0, 1])
ax[0, 1].set_title("SKY")
sns.countplot(x="label_name", data=_df[_df['region'] == 'TOP'], ax=ax[0, 2])
ax[0, 2].set_title("TOP")
sns.countplot(x="label_name", data=_df[_df['region'] == 'MIDDLE'], ax=ax[0, 3])
ax[0, 3].set_title("MIDDLE")
sns.countplot(x="label_name", data=_df[_df['region'] == 'BOTTOM'], ax=ax[0, 4])
ax[0, 4].set_title("BOTTOM")
sns.countplot(x="region", data=_df[_df['label_name'] == 'VEHICLE'], ax=ax[1, 1], order=area_names,
palette={r: c for r, c in zip(area_names, region_colors)})
ax[1, 1].set_title("VEHICLE")
sns.countplot(x="region", data=_df[_df['label_name'] == 'PEDESTRIAN'], ax=ax[1, 2], order=area_names,
palette={r: c for r, c in zip(area_names, region_colors)})
ax[1, 2].set_title("PEDESTRIAN")
sns.countplot(x="region", data=_df[_df['label_name'] == 'CYCLIST'], ax=ax[1, 3], order=area_names,
palette={r: c for r, c in zip(area_names, region_colors)})
ax[1, 3].set_title("CYCLIST")
fig.tight_layout()
plt.savefig("regions_countplot.png")
plt.show()
n_aspect_ration = 3
bound_aspect_ratio = n_aspect_ration * [[0, 5.]]
n_scale = 4
bound_scale = n_scale * [[0.06, 5.]]
best_values = defaultdict(lambda: {'aspect_ratio': None, 'scale': None})
def optimize_parameters():
from scipy.optimize import minimize, shgo
def optimize_param(x, data, param):
count = np.unique(closer_to(data[param], x), return_counts=True)[1]
if len(x) != len(count):
return 1e10
return count.std() - np.std(x) * 1000
print(8 * "#")
print("Aspect ratio and Scale OPTIMIZATION")
print("\tInit values")
print("\t\tAspect ratio", aspect_ratios[:n_aspect_ration])
print("\t\tScale", scales[:n_scale])
print("\tBounds", 'ToDo')
best_values = defaultdict(lambda: {'aspect_ratio': None, 'scale': None})
for region in area_names:
__df = _df[_df['region'] == region]
optimal_aspect_ratio = shgo(optimize_param, bound_aspect_ratio, args=(__df, 'aspect_ratio'), n=1000, iters=5,
sampling_method='sobol')
optimal_scale = shgo(optimize_param, bound_scale, args=(__df, 'scale'), n=1000, iters=5,
sampling_method='sobol')
best_values[region]['aspect_ratio'] = sorted(optimal_aspect_ratio.x)
best_values[region]['scale'] = sorted(optimal_scale.x)
print("\tBest values", region)
print("\t\tAspect ratio", best_values[region]['aspect_ratio'])
print("\t\tScale", best_values[region]['scale'])
print(8 * "#")
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
__df = _df[_df['region'] == region]
__df['aspect_ratio_best'] = closer_to(__df['aspect_ratio'], best_values[region]['aspect_ratio'])
__df['scale_best'] = closer_to(__df['scale'], best_values[region]['scale'])
ax[0, r].hist(__df.aspect_ratio_best,
bins=np.asarray([[b - 0.03, b + 0.03] for b in best_values[region]['aspect_ratio']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['aspect_ratio']]))
ax[0, r].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.aspect_ratio),
min(__df.aspect_ratio),
np.mean(__df.aspect_ratio)))
ax[1, r].hist(__df.scale_best,
bins=np.asarray([[b - 0.03, b + 0.03] for b in best_values[region]['scale']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['scale']]))
ax[1, r].hist(__df.scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.scale),
min(__df.scale),
np.mean(__df.scale)))
ax[0, r].set_title("Optimized sspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Optimized scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description_best.png')
plt.show()
def optimize_params_genetic_algorithm(features=['aspect_ratio', 'scale'], fitness_name='ALL'):
"""
Args:
features: a list which can contains 'aspect_ratio', 'scale' or both
fitness: ALL, STD, DIST, MAX_DIST, QTL
Returns:
"""
from pyeasyga import pyeasyga
best_values = defaultdict(lambda: {'aspect_ratio': None, 'scale': None})
def create_individual(data):
_, _, bounds = data[0]
return [np.random.randint(bound[0] * 100, bound[1] * 100) / 100 for bound in bounds]
def mutate(individual):
"""Reverse the bit of a random index in an individual."""
mutate_index = random.randrange(len(individual))
individual[mutate_index] = np.random.randint(6, 500) / 100
if fitness_name in ['STD', 'ALL']:
def fitness(individual, data):
param, df, bounds = data[0]
count = np.unique(closer_to(df[param], individual), return_counts=True)[1]
if len(count) != len(individual):
return 1e10
norm_std_count = count.std() / (len(df) / 2)
norm_std_individual = np.std(individual) / (max([x[1] for x in bounds]) / 2)
return 2 * norm_std_count # - norm_std_individual
for param, region in tqdm(list(itertools.product(features, area_names))):
bounds = bound_aspect_ratio if param == 'aspect_ratio' else bound_scale
__df = _df[_df['region'] == region]
data = [(param, __df, bounds)]
ga = pyeasyga.GeneticAlgorithm(data,
population_size=1000,
generations=10,
crossover_probability=0.8,
mutation_probability=0.2,
elitism=True,
maximise_fitness=False)
ga.create_individual = create_individual
ga.fitness_function = fitness
ga.mutate_function = mutate
ga.run()
ga.best_individual()
best_values[region][param] = sorted(ga.best_individual()[1])
print(region, param, best_values[region][param])
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
__df = _df[_df['region'] == region]
__df['aspect_ratio_best'] = closer_to(__df['aspect_ratio'], best_values[region]['aspect_ratio'])
__df['scale_best'] = closer_to(__df['scale'], best_values[region]['scale'])
ax[0, r].hist(__df.aspect_ratio_best, bins=np.asarray(
[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['aspect_ratio']]))
ax[0, r].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.aspect_ratio),
min(__df.aspect_ratio),
np.mean(__df.aspect_ratio)))
ax[1, r].hist(__df.scale_best,
bins=np.asarray([[b - 0.01, b + 0.01] for b in best_values[region]['scale']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['scale']]))
ax[1, r].hist(__df.scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.scale),
min(__df.scale),
np.mean(__df.scale)))
ax[0, r].set_title("Optimized aspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Optimized scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description_best_std.png')
plt.show()
if fitness_name in ['DIST', 'ALL']:
def fitness_distance(individual, data):
param, df, bounds = data[0]
distance = np.sum(np.abs(closer_to(df[param], individual) - df[param]))
if len(np.unique(individual)) != len(individual):
return 1e10
return distance
for param, region in tqdm(list(itertools.product(features, area_names))):
bounds = bound_aspect_ratio if param == 'aspect_ratio' else bound_scale
__df = _df[_df['region'] == region]
data = [(param, __df, bounds)]
ga = pyeasyga.GeneticAlgorithm(data,
population_size=1000,
generations=5,
crossover_probability=0.8,
mutation_probability=0.2,
elitism=True,
maximise_fitness=False)
ga.create_individual = create_individual
ga.fitness_function = fitness_distance
ga.mutate_function = mutate
ga.run()
ga.best_individual()
best_values[region][param] = sorted(ga.best_individual()[1])
print(region, param, best_values[region][param])
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
__df = _df[_df['region'] == region]
__df['aspect_ratio_best'] = closer_to(__df['aspect_ratio'], best_values[region]['aspect_ratio'])
__df['scale_best'] = closer_to(__df['scale'], best_values[region]['scale'])
ax[0, r].hist(__df.aspect_ratio_best, bins=np.asarray(
[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['aspect_ratio']]))
ax[0, r].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.aspect_ratio),
min(__df.aspect_ratio),
np.mean(__df.aspect_ratio)))
ax[1, r].hist(__df.scale_best,
bins=np.asarray([[b - 0.01, b + 0.01] for b in best_values[region]['scale']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['scale']]))
ax[1, r].hist(__df.scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.scale),
min(__df.scale),
np.mean(__df.scale)))
ax[0, r].set_title("Optimized aspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Optimized scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description_best_distance.png')
plt.show()
if fitness_name in ['MAX_DIST', 'ALL']:
def fitness_max_distance(individual, data):
param, df, bounds = data[0]
distance = np.max(np.abs(closer_to(df[param], individual) - df[param]))
if len(np.unique(individual)) != len(individual):
return 1e10
return distance
for param, region in tqdm(list(itertools.product(features, area_names))):
bounds = bound_aspect_ratio if param == 'aspect_ratio' else bound_scale
__df = _df[_df['region'] == region]
data = [(param, __df, bounds)]
ga = pyeasyga.GeneticAlgorithm(data,
population_size=1000,
generations=5,
crossover_probability=0.8,
mutation_probability=0.2,
elitism=True,
maximise_fitness=False)
ga.create_individual = create_individual
ga.fitness_function = fitness_max_distance
ga.mutate_function = mutate
ga.run()
ga.best_individual()
best_values[region][param] = sorted(ga.best_individual()[1])
print(region, param, best_values[region][param])
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
__df = _df[_df['region'] == region]
__df['aspect_ratio_best'] = closer_to(__df['aspect_ratio'], best_values[region]['aspect_ratio'])
__df['scale_best'] = closer_to(__df['scale'], best_values[region]['scale'])
ax[0, r].hist(__df.aspect_ratio_best, bins=np.asarray(
[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['aspect_ratio']]))
ax[0, r].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.aspect_ratio),
min(__df.aspect_ratio),
np.mean(__df.aspect_ratio)))
ax[1, r].hist(__df.scale_best,
bins=np.asarray([[b - 0.01, b + 0.01] for b in best_values[region]['scale']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['scale']]))
ax[1, r].hist(__df.scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.scale),
min(__df.scale),
np.mean(__df.scale)))
ax[0, r].set_title("Optimized aspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Optimized scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description_best_max_distance.png')
plt.show()
if fitness_name in ['QTL', 'ALL']:
def quantile(df, region, param, q):
a = df[df['region'] == region][param].values
return np.quantile(a, q)
for param, region in tqdm(list(itertools.product(['aspect_ratio', 'scale'], area_names))):
best_values[region][param] = [quantile(_df, region, param, q) for q in (1 / 6, 1 / 2, 5 / 6)]
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
__df = _df[_df['region'] == region]
__df['aspect_ratio_best'] = closer_to(__df['aspect_ratio'], best_values[region]['aspect_ratio'])
__df['scale_best'] = closer_to(__df['scale'], best_values[region]['scale'])
ax[0, r].hist(__df.aspect_ratio_best, bins=np.asarray(
[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['aspect_ratio']]))
ax[0, r].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.aspect_ratio),
min(__df.aspect_ratio),
np.mean(__df.aspect_ratio)))
ax[1, r].hist(__df.scale_best,
bins=np.asarray([[b - 0.01, b + 0.01] for b in best_values[region]['scale']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['scale']]))
ax[1, r].hist(__df.scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.scale),
min(__df.scale),
np.mean(__df.scale)))
ax[0, r].set_title("Optimized aspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Optimized scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description_best_percentile.png')
plt.show()
def optimize_params_by_iou_ga():
"""
GENETIC ALGORITHM:
individual: [scale1, scale2, scale3, scale4, aspect_ratio1, aspect_ratio2, aspect_ratio3]
"""
from pyeasyga import pyeasyga
best_values = defaultdict(lambda: {'aspect_ratio': None, 'scale': None})
data = pd.read_csv(OUTPUT_FILE)[['xmax', 'xmin', 'ymax', 'ymin', 'image_height']]
data['ycenter'] = (data['ymin'] + data['ymax']) / 2
data['region'] = assign_area(data['ycenter'] / data['image_height'], text=True)
x_factor = data['xmin'] + ((data['xmax'] - data['xmin']) / 2)
y_factor = data['ymin'] + ((data['ymax'] - data['ymin']) / 2)
data['xmax'] = data['xmax'] - x_factor
data['xmin'] = data['xmin'] - x_factor
data['ymax'] = data['ymax'] - y_factor
data['ymin'] = data['ymin'] - y_factor
def create_individual(data):
_, (bounds_sc, bounds_ar) = data[0]
return [np.random.randint(bound[0] * 100, bound[1] * 100) / 100 for bound in bounds_sc] + [
np.random.randint(bound[0] * 100, bound[1] * 100) / 100 for bound in bounds_ar]
def mutate(individual):
"""Reverse the bit of a random index in an individual."""
mutate_index = random.randrange(len(individual))
individual[mutate_index] = np.random.randint(6, 500) / 100 if mutate_index < n_scale else np.random.randint(0,
500) / 100
def crossover(parent_1, parent_2):
crossover_index_aspect_ratio = random.randrange(n_scale, len(parent_1))
crossover_index_scalar = random.randrange(1, n_scale)
scales_parent1, aspect_ratio_parent1 = parent_1[:n_scale], parent_1[n_scale:]
scales_parent2, aspect_ratio_parent2 = parent_2[:n_scale], parent_2[n_scale:]
child_1 = scales_parent1[:crossover_index_scalar] + scales_parent2[
crossover_index_scalar:] + aspect_ratio_parent1[
:crossover_index_aspect_ratio] + aspect_ratio_parent2[
crossover_index_aspect_ratio:]
child_2 = scales_parent2[:crossover_index_scalar] + scales_parent1[
crossover_index_scalar:] + aspect_ratio_parent2[
:crossover_index_aspect_ratio] + aspect_ratio_parent1[
crossover_index_aspect_ratio:]
return child_1, child_2
def generate_anchors(ratios=None, scales=None, base_size=anchor_base):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def fitness_iou(individual, data):
df, bounds = data[0]
num_scales, num_aspect_ratio = len(bounds[0]), len(bounds[1])
scales, aspect_ratios = individual[:num_scales], individual[num_scales:]
# Check there are no repeated scales/aspect_ratios
if len(np.unique(scales)) != len(scales) or len(np.unique(aspect_ratios)) != len(aspect_ratios):
return 1
anchors = generate_anchors(ratios=aspect_ratios, scales=scales)
gt = df[['xmin', 'ymin', 'xmax', 'ymax']].values
iou = compute_overlap(gt, anchors).max(axis=1).mean()
return 1 - iou
for region in tqdm(area_names):
bounds = (bound_scale, bound_aspect_ratio)
_data = data[data['region'] == region]
print(region, len(_data))
ga_data = [(_data, bounds)]
ga = pyeasyga.GeneticAlgorithm(ga_data,
population_size=1000,
generations=5,
crossover_probability=0.8,
mutation_probability=0.2,
elitism=True,
maximise_fitness=False)
ga.create_individual = create_individual
ga.fitness_function = fitness_iou
ga.mutate_function = mutate
ga.crossover_function = crossover
ga.run()
ga.best_individual()
best_values[region]['scale'] = sorted(ga.best_individual()[1][:n_scale])
best_values[region]['aspect_ratio'] = sorted(ga.best_individual()[1][n_scale:])
print(region, best_values[region], ga.best_individual()[0])
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
__df = _df[_df['region'] == region]
__df['aspect_ratio_best'] = closer_to(__df['aspect_ratio'], best_values[region]['aspect_ratio'])
__df['scale_best'] = closer_to(__df['scale'], best_values[region]['scale'])
ax[0, r].hist(__df.aspect_ratio_best, bins=np.asarray(
[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['aspect_ratio']]))
ax[0, r].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.aspect_ratio),
min(__df.aspect_ratio),
np.mean(__df.aspect_ratio)))
ax[1, r].hist(__df.scale_best,
bins=np.asarray([[b - 0.01, b + 0.01] for b in best_values[region]['scale']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['scale']]))
ax[1, r].hist(__df.scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.scale),
min(__df.scale),
np.mean(__df.scale)))
ax[0, r].set_title("Optimized aspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Optimized scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description_best_distance.png')
plt.show()
# optimize_params_by_iou_ga()
def optimize_params_by_differential_evolution():
from scipy.optimize import differential_evolution
import sys
mode = 'focal'
state = {'best_result': sys.maxsize}
_area_names = ['MIDDLE', 'SKY', 'BOTTOM', 'TOP']
_sample_frac = 1
best_values = defaultdict(lambda: {'aspect_ratio': None, 'scale': None})
data = pd.read_csv(OUTPUT_FILE)
data = data[data['image_height'] != 1280]
data = data[['xmax', 'xmin', 'ymax', 'ymin', 'image_height']]
data['ycenter'] = (data['ymin'] + data['ymax']) / 2
data['region'] = assign_area(data['ycenter'] / data['image_height'], text=True)
x_factor = data['xmin'] + ((data['xmax'] - data['xmin']) / 2)
y_factor = data['ymin'] + ((data['ymax'] - data['ymin']) / 2)
data['xmax'] = data['xmax'] - x_factor
data['xmin'] = data['xmin'] - x_factor
data['ymax'] = data['ymax'] - y_factor
data['ymin'] = data['ymin'] - y_factor
data = data.sample(frac=_sample_frac)
def generate_anchors(ratios=None, scales=None, base_size=anchor_base):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def fitness_iou(individual, num_scales, df, state, mode='focal'):
scales, aspect_ratios = individual[:num_scales], individual[num_scales:]
# Check there are no repeated scales/aspect_ratios
if len(np.unique(scales)) != len(scales) or len(np.unique(aspect_ratios)) != len(aspect_ratios):
return 1
anchors = generate_anchors(ratios=aspect_ratios, scales=scales)
gt = df[['xmin', 'ymin', 'xmax', 'ymax']].values
iou = compute_overlap(gt, anchors).max(axis=1).mean()
if mode == 'avg':
result = 1 - np.average(iou)
elif mode == 'ce':
result = np.average(-np.log(iou))
elif mode == 'focal':
result = np.average(-(1 - iou) ** 2 * np.log(iou))
else:
raise Exception('Invalid mode.')
if result < state['best_result']:
state['best_result'] = result
print('Current best anchor configuration')
print(f'Ratios: {sorted(aspect_ratios)}')
print(f'Scales: {sorted(scales)}')
return result
for region in tqdm(_area_names):
bounds = bound_scale + bound_aspect_ratio
_data = data[data['region'] == region]
print(region, len(_data))
result = differential_evolution(
lambda x: fitness_iou(x, n_scale, _data, state, mode),
bounds=bounds, popsize=10, seed=1)
if hasattr(result, 'success') and result.success:
print('Optimization ended successfully!')
elif not hasattr(result, 'success'):
print('Optimization ended!')
else:
print('Optimization ended unsuccessfully!')
print(f'Reason: {result.message}')
values = result.x
opt_scales = sorted(values[:n_scale])
opt_aspect_ratio = sorted(values[n_scale:])
best_values[region]['scale'] = opt_scales
best_values[region]['aspect_ratio'] = opt_aspect_ratio
print(region, best_values[region])
fig, ax = plt.subplots(2, len(area_names), figsize=(5 * len(area_names), 6))
for r, region in enumerate(area_names):
__df = _df[_df['region'] == region]
__df['aspect_ratio_best'] = closer_to(__df['aspect_ratio'], best_values[region]['aspect_ratio'])
__df['scale_best'] = closer_to(__df['scale'], best_values[region]['scale'])
ax[0, r].hist(__df.aspect_ratio_best, bins=np.asarray(
[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['aspect_ratio']]))
ax[0, r].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.aspect_ratio),
min(__df.aspect_ratio),
np.mean(__df.aspect_ratio)))
ax[1, r].hist(__df.scale_best,
bins=np.asarray([[b - 0.01, b + 0.01] for b in best_values[region]['scale']]).flatten(),
color='r', density=True, rwidth=0.5,
label=str(['{:.2f}'.format(x) for x in best_values[region]['scale']]))
ax[1, r].hist(__df.scale, bins=100, range=(0, 4), alpha=0.7, color=region_colors[r], density=True,
label=region + '\nMAX: {:.3f}\nMIN: {:.3f}\nMEAN: {:.3f}]'.format(max(__df.scale),
min(__df.scale),
np.mean(__df.scale)))
ax[0, r].set_title("Optimized aspect ratio ({})".format(region))
ax[0, r].legend()
ax[1, r].set_title("Optimized scale ({})".format(region))
ax[1, r].legend()
fig.savefig('regions_description_best_distance.png')
plt.show()
_best_values_new = {
'SKY': [0.06, 0.11036, 0.196657, 0.360178, 0.37788, 0.76, 1.82974],
'TOP': [0.06974, 0.13669, 0.248809, 0.474911, 0.37657, 0.7821, 2.080698],
'MIDDLE': [0.07002, 0.1447, 0.2881, 0.64271, 0.4081, 0.83919, 2.2826],
'BOTTOM': [0.6936, 1.2549, 1.9353, 2.8861, 0.32057, 0.76915, 2.1001]
}
_best_values_old = {
'SKY': [0.08, 0.21, 0.47, 1., 1.89, 3.35],
'TOP': [0.09, 0.26, 0.59, 0.63, 1.49, 2.81],
'MIDDLE': [0.1, 0.37, 0.97, 0.48, 1.28, 2.5],
'BOTTOM': [1.2, 1.8, 2.46, 0.9, 2.07, 3.95]
}
_best_values_default = {
'SKY': [0.25, 0.5, 1., 2., 0.5, 1., 2.],
'TOP': [0.25, 0.5, 1., 2., 0.5, 1., 2.],
'MIDDLE': [0.25, 0.5, 1., 2., 0.5, 1., 2.],
'BOTTOM': [0.25, 0.5, 1., 2., 0.5, 1., 2.]
}
for area in area_names:
print(area)
avg_iou = 1 - fitness_iou(_best_values_default[area], 4, data[data['region'] == area], state={'best_result': 0},
mode='avg')
print('Default method average iou:', avg_iou)
avg_iou = 1 - fitness_iou(_best_values_old[area], 3, data[data['region'] == area], state={'best_result': 0},
mode='avg')
print('Old method average iou:', avg_iou)
avg_iou = 1 - fitness_iou(_best_values_new[area], 4, data[data['region'] == area], state={'best_result': 0},
mode='avg')
print('New method average iou:', avg_iou)
def ga_cyclist():
from pyeasyga import pyeasyga
_data = df[df['image_height'] != 1280.]
def create_individual(data):
_, bounds = data[0]
return [np.random.randint(bound[0] * 100000, bound[1] * 100000) / 100000 for bound in bounds]
def mutate(individual):
"""Reverse the bit of a random index in an individual."""
mutate_index = random.randrange(len(individual))
individual[mutate_index] = np.random.randint(1 * 100000, 10 * 100000) / 100000
def crossover(parent_1, parent_2):
parents = [parent_1, parent_2]
child_1 = [random.choice(parents)[i] for i in range(len(parent_1))]
child_2 = [random.choice(parents)[i] for i in range(len(parent_1))]
return child_1, child_2
def fitness_iou(individual, data):
# y=1/(ax^b) | y: aspect ratio, x: scale
df, bounds = data[0]
_df = df[['scale', 'aspect_ratio', 'label']]
num_anchors = len(_df)
a, b, c, d, e = individual
def y(x):
if x < c:
return 1 / (a * (x ** b))
elif x < e:
return d
else:
return 0
vec_y = np.vectorize(y)
_df['car_region'] = _df['aspect_ratio'] - vec_y(_df['scale'].values)
_df = _df[_df['car_region'] > 0]
num_vehicles = len(_df[_df['label'] == 1])
num_pedestrians = len(_df[_df['label'] == 2])
num_cyclists = len(_df[_df['label'] == 3])
fitness = 2 * (154 * num_cyclists + 8 * num_pedestrians) / (
num_vehicles) # (154*num_cyclists + 8*num_pedestrians) / (num_vehicles)
print(num_vehicles, num_pedestrians, num_cyclists, fitness, a, b, c, d, e)
return fitness
bounds_cyclist = [(1, 10), (1, 10), (1, 10), (1, 10), (1, 10)]
ga_data = [(_data, bounds_cyclist)]
ga = pyeasyga.GeneticAlgorithm(ga_data,
population_size=200,
generations=50,
crossover_probability=0.8,
mutation_probability=0.2,
elitism=True,
maximise_fitness=False)
ga.create_individual = create_individual
ga.fitness_function = fitness_iou
ga.mutate_function = mutate
ga.crossover_function = crossover
ga.run()
ga.best_individual()
a, b, c, d, e = ga.best_individual()[1]
def y_func(x):
if x < c:
return 1 / (a * (x ** b))
elif x < e:
return d
else:
return 0
x = [s / 1000 for s in range(1, 5000)]
y = [y_func(_x) for _x in x]
fig, ax = plt.subplots(1, figsize=(12, 9))
for i in range(3):
l = boxlabels[i + 1]
__df = df[df['label'] == (i + 1.)]
__df = __df[__df['image_height'] == 1280.]
ax.scatter(__df.scale, y=__df.aspect_ratio, marker='+', s=1, color=colors[i + 1], label=l)
ax.plot(x, y, 'y--', linewidth=2)
ax.set_title('Scale x Aspect ratio (1920x1289) [{}, {}, {}, {}, {}]'.format(a, b, c, d, e))
ax.legend()
ax.set_ylim((0, 50))
ax.set_xlim((0, 5))
fig.tight_layout()
fig.show()
return ga
def cyclist():
fig, ax = plt.subplots(2, 3, figsize=(12, 9))
for i in range(3):
l = boxlabels[i + 1]
__df = df[df['label'] == (i + 1.)]
ax[0, i].hist(__df.aspect_ratio, bins=100, range=(0, 6), alpha=0.7, color=colors[i + 1], density=True)
ax[1, i].hist(__df.scale, bins=100, range=(0, 3), alpha=0.7, color=colors[i + 1], density=True)
ax[0, i].set_title('Aspect ratio ({})'.format(l))
ax[1, i].set_title('Scale ({})'.format(l))
fig.tight_layout()
fig.show()
fig, ax = plt.subplots(1, figsize=(12, 9))
for i in range(3):
l = boxlabels[i + 1]
__df = df[df['label'] == (i + 1.)]
ax.scatter(__df.scale, y=__df.aspect_ratio, marker='+', s=1, color=colors[i + 1], label=l)
ax.set_title('Scale x Aspect ratio')
ax.legend()
fig.tight_layout()
fig.show()
fig, ax = plt.subplots(1, figsize=(12, 9))
for i in range(3):
l = boxlabels[i + 1]
__df = df[df['label'] == (i + 1.)]
ax.scatter(__df.scale, y=__df.aspect_ratio, marker='+', s=1, color=colors[i + 1], label=l)
ax.set_ylim((0, 75))
ax.set_xlim((0, 5))
ax.set_title('Scale x Aspect ratio')
ax.legend()
fig.tight_layout()
fig.show()
a, b = 1.1273, 1.2723 # 1.9483, 1.0948
x = [s / 100 for s in range(1, 500)]
y = [1 / (a * (_x ** b)) for _x in x]
fig, ax = plt.subplots(1, figsize=(12, 9))
for i in range(3):
l = boxlabels[i + 1]
__df = df[df['label'] == (i + 1.)]
__df = __df[__df['image_height'] == 1280.]
ax.scatter(__df.scale, y=__df.aspect_ratio, marker='+', s=1, color=colors[i + 1], label=l)
ax.plot(x, y, 'y--', linewidth=2)
ax.set_title('Scale x Aspect ratio (1920x1289)')
ax.legend()
ax.set_ylim((0, 50))
ax.set_xlim((0, 5))
fig.tight_layout()
fig.show()
fig, ax = plt.subplots(1, figsize=(12, 9))
for i in range(3):
l = boxlabels[i + 1]
__df = df[df['label'] == (i + 1.)]
__df = __df[__df['image_height'] == 886.]
ax.scatter(__df.scale, y=__df.aspect_ratio, marker='+', s=1, color=colors[i + 1], label=l)
ax.set_title('Scale x Aspect ratio (1920x886)')
ax.legend()
ax.set_ylim((0, 75))
ax.set_xlim((0, 5))
fig.tight_layout()
fig.show()
# Cyclist
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
ax[0].scatter(_df[_df['label'] == 3.]['xcenter'], y=_df[_df['label'] == 3.]['ycenter'], marker='+', s=1)
ax[0].set_ylim((0, 1))
ax[0].invert_yaxis()
ax[0].set_title("Cyclist (All images)")
__df = _df[_df['image_height'] == 1280.]
ax[1].scatter(__df[__df['label'] == 3.]['xcenter'], y=__df[__df['label'] == 3.]['ycenter'] * 1280, marker='+', s=1)
ax[1].set_ylim((0, 1280))
ax[1].invert_yaxis()
ax[1].set_title("Cyclist (1920x1280)")
__df = _df[_df['image_height'] == 886.]
ax[2].scatter(__df[__df['label'] == 3.]['xcenter'], y=__df[__df['label'] == 3.]['ycenter'] * 886, marker='+', s=1)
ax[2].set_ylim((0, 886))
ax[2].invert_yaxis()
ax[2].set_title("Cyclist (1920x886)")
fig.tight_layout()
fig.savefig('cyclist.png')
fig.show()
# Pedestrian
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
ax[0].scatter(_df[_df['label'] == 2.]['xcenter'], y=_df[_df['label'] == 2.]['ycenter'], marker='+', s=1)
ax[0].set_ylim((0, 1))
ax[0].invert_yaxis()
ax[0].set_title("Pedestrian (All images)")
__df = _df[_df['image_height'] == 1280.]
ax[1].scatter(__df[__df['label'] == 2.]['xcenter'], y=__df[__df['label'] == 2.]['ycenter'] * 1280, marker='+', s=1)
ax[1].set_ylim((0, 1280))
ax[1].invert_yaxis()
ax[1].set_title("Pedestrian (1920x1280)")
__df = _df[_df['image_height'] == 886.]
ax[2].scatter(__df[__df['label'] == 2.]['xcenter'], y=__df[__df['label'] == 2.]['ycenter'] * 886, marker='+', s=1)
ax[2].set_ylim((0, 886))
ax[2].invert_yaxis()
ax[2].set_title("Pedestrian (1920x886)")
fig.tight_layout()
fig.savefig('pedestrian.png')
fig.show()
# Vehicle
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
ax[0].scatter(_df[_df['label'] == 1.]['xcenter'], y=_df[_df['label'] == 1.]['ycenter'], marker='+', s=1)
ax[0].set_ylim((0, 1))
ax[0].invert_yaxis()
ax[0].set_title("Vehicle (All images)")
__df = _df[_df['image_height'] == 1280.]
ax[1].scatter(__df[__df['label'] == 1.]['xcenter'], y=__df[__df['label'] == 1.]['ycenter'] * 1280, marker='+', s=1)
ax[1].set_ylim((0, 1280))
ax[1].invert_yaxis()
ax[1].set_title("Vehicle (1920x1280)")
__df = _df[_df['image_height'] == 886.]
ax[2].scatter(__df[__df['label'] == 1.]['xcenter'], y=__df[__df['label'] == 1.]['ycenter'] * 886, marker='+', s=1)
ax[2].set_ylim((0, 886))
ax[2].invert_yaxis()
ax[2].set_title("Vehicle (1920x886)")
fig.tight_layout()
fig.savefig('vehicle.png')
fig.show()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
ax[0].scatter(_df[_df['label'] == 3.]['xcenter'], y=_df[_df['label'] == 3.]['ycenter'], marker='+', s=1)
ax[0].set_ylim((0, 1))
ax[0].invert_yaxis()
ax[0].set_title("Cyclist (All images)")
__df = _df[_df['image_height'] == 1280.]
ax[1].scatter(__df[__df['label'] == 3.]['xcenter'], y=__df[__df['label'] == 3.]['ycenter'] * 1280, marker='+', s=1)
ax[1].set_ylim((0, 1280))
ax[1].invert_yaxis()
ax[1].set_title("Cyclist (1920x1280)")
__df = _df[_df['image_height'] == 886.]
ax[2].scatter(__df[__df['label'] == 3.]['xcenter'], y=__df[__df['label'] == 3.]['ycenter'] * 886, marker='+', s=1)
ax[2].set_ylim((0, 886))
ax[2].invert_yaxis()
ax[2].set_title("Cyclist (1920x886)")
fig.tight_layout()
fig.savefig('cyclist.png')
fig.show()
# cyclist()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.mean",
"pyeasyga.pyeasyga.GeneticAlgorithm",
"numpy.arange",
"numpy.random.randint",
"utils_tf_record.read_dataset_utils.read... | [((675, 712), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (710, 712), True, 'import tensorflow as tf\n'), ((3727, 3751), 'pandas.read_csv', 'pd.read_csv', (['OUTPUT_FILE'], {}), '(OUTPUT_FILE)\n', (3738, 3751), True, 'import pandas as pd\n'), ((42468, 42527), 'collections.defaultdict', 'defaultdict', (["(lambda : {'aspect_ratio': None, 'scale': None})"], {}), "(lambda : {'aspect_ratio': None, 'scale': None})\n", (42479, 42527), False, 'from collections import defaultdict\n'), ((867, 915), 'utils_tf_record.read_dataset_utils.read_and_parse_sharded_dataset', 'read_and_parse_sharded_dataset', (['FILENAME_PATTERN'], {}), '(FILENAME_PATTERN)\n', (897, 915), False, 'from utils_tf_record.read_dataset_utils import read_and_parse_sharded_dataset\n'), ((5951, 5971), 'numpy.mean', 'np.mean', (['[1280, 886]'], {}), '([1280, 886])\n', (5958, 5971), True, 'import numpy as np\n'), ((7538, 7638), 'numpy.polynomial.polynomial.polyfit', 'polyfit', (["df[df['image_height'] == 1280]['height']", "df[df['image_height'] == 1280]['ycenter']", '(1)'], {}), "(df[df['image_height'] == 1280]['height'], df[df['image_height'] == \n 1280]['ycenter'], 1)\n", (7545, 7638), False, 'from numpy.polynomial.polynomial import polyfit\n'), ((7644, 7656), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7654, 7656), True, 'from matplotlib import pyplot as plt\n'), ((7661, 7775), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "df[df['image_height'] == 1280]['height']", 'y': "df[df['image_height'] == 1280]['ycenter']", 's': '(0.001)'}), "(x=df[df['image_height'] == 1280]['height'], y=df[df[\n 'image_height'] == 1280]['ycenter'], s=0.001)\n", (7672, 7775), True, 'from matplotlib import pyplot as plt\n'), ((8175, 8192), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (8183, 8192), True, 'from matplotlib import pyplot as plt\n'), ((8197, 8237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Object Height"""'], {'fontsize': '(14)'}), "('Object Height', fontsize=14)\n", (8207, 8237), True, 'from matplotlib import pyplot as plt\n'), ((8242, 8286), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Position"""'], {'fontsize': '(14)'}), "('Vertical Position', fontsize=14)\n", (8252, 8286), True, 'from matplotlib import pyplot as plt\n'), ((8291, 8346), 'matplotlib.pyplot.title', 'plt.title', (['"""Frontal cameras (1920 x 1280)"""'], {'fontsize': '(16)'}), "('Frontal cameras (1920 x 1280)', fontsize=16)\n", (8300, 8346), True, 'from matplotlib import pyplot as plt\n'), ((8351, 8380), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (8361, 8380), True, 'from matplotlib import pyplot as plt\n'), ((8468, 8478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8476, 8478), True, 'from matplotlib import pyplot as plt\n'), ((8541, 8641), 'numpy.polynomial.polynomial.polyfit', 'polyfit', (["df[df['image_height'] != 1280]['height']", "df[df['image_height'] != 1280]['ycenter']", '(1)'], {}), "(df[df['image_height'] != 1280]['height'], df[df['image_height'] != \n 1280]['ycenter'], 1)\n", (8548, 8641), False, 'from numpy.polynomial.polynomial import polyfit\n'), ((8692, 8704), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8702, 8704), True, 'from matplotlib import pyplot as plt\n'), ((8709, 8823), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "df[df['image_height'] != 1280]['height']", 'y': "df[df['image_height'] != 1280]['ycenter']", 's': '(0.001)'}), "(x=df[df['image_height'] != 1280]['height'], y=df[df[\n 'image_height'] != 1280]['ycenter'], s=0.001)\n", (8720, 8823), True, 'from matplotlib import pyplot as plt\n'), ((9125, 9158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Height"""'], {'fontsize': '(14)'}), "('Height', fontsize=14)\n", (9135, 9158), True, 'from matplotlib import pyplot as plt\n'), ((9163, 9207), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Position"""'], {'fontsize': '(14)'}), "('Vertical Position', fontsize=14)\n", (9173, 9207), True, 'from matplotlib import pyplot as plt\n'), ((9212, 9266), 'matplotlib.pyplot.title', 'plt.title', (['"""Lateral cameras (1920 x 886)"""'], {'fontsize': '(16)'}), "('Lateral cameras (1920 x 886)', fontsize=16)\n", (9221, 9266), True, 'from matplotlib import pyplot as plt\n'), ((9271, 9283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9281, 9283), True, 'from matplotlib import pyplot as plt\n'), ((9288, 9298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9296, 9298), True, 'from matplotlib import pyplot as plt\n'), ((9394, 9493), 'numpy.polynomial.polynomial.polyfit', 'polyfit', (["df[df['image_height'] != 1280]['width']", "df[df['image_height'] != 1280]['ycenter']", '(1)'], {}), "(df[df['image_height'] != 1280]['width'], df[df['image_height'] != \n 1280]['ycenter'], 1)\n", (9401, 9493), False, 'from numpy.polynomial.polynomial import polyfit\n'), ((9544, 9556), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9554, 9556), True, 'from matplotlib import pyplot as plt\n'), ((9561, 9674), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "df[df['image_height'] != 1280]['width']", 'y': "df[df['image_height'] != 1280]['ycenter']", 's': '(0.001)'}), "(x=df[df['image_height'] != 1280]['width'], y=df[df[\n 'image_height'] != 1280]['ycenter'], s=0.001)\n", (9572, 9674), True, 'from matplotlib import pyplot as plt\n'), ((9975, 10007), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Width"""'], {'fontsize': '(14)'}), "('Width', fontsize=14)\n", (9985, 10007), True, 'from matplotlib import pyplot as plt\n'), ((10012, 10056), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Position"""'], {'fontsize': '(14)'}), "('Vertical Position', fontsize=14)\n", (10022, 10056), True, 'from matplotlib import pyplot as plt\n'), ((10061, 10115), 'matplotlib.pyplot.title', 'plt.title', (['"""Lateral cameras (1920 x 886)"""'], {'fontsize': '(16)'}), "('Lateral cameras (1920 x 886)', fontsize=16)\n", (10070, 10115), True, 'from matplotlib import pyplot as plt\n'), ((10120, 10130), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10128, 10130), True, 'from matplotlib import pyplot as plt\n'), ((10143, 10242), 'numpy.polynomial.polynomial.polyfit', 'polyfit', (["df[df['image_height'] == 1280]['width']", "df[df['image_height'] == 1280]['ycenter']", '(1)'], {}), "(df[df['image_height'] == 1280]['width'], df[df['image_height'] == \n 1280]['ycenter'], 1)\n", (10150, 10242), False, 'from numpy.polynomial.polynomial import polyfit\n'), ((10293, 10305), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10303, 10305), True, 'from matplotlib import pyplot as plt\n'), ((10310, 10423), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "df[df['image_height'] == 1280]['width']", 'y': "df[df['image_height'] == 1280]['ycenter']", 's': '(0.001)'}), "(x=df[df['image_height'] == 1280]['width'], y=df[df[\n 'image_height'] == 1280]['ycenter'], s=0.001)\n", (10321, 10423), True, 'from matplotlib import pyplot as plt\n'), ((10724, 10756), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Width"""'], {'fontsize': '(14)'}), "('Width', fontsize=14)\n", (10734, 10756), True, 'from matplotlib import pyplot as plt\n'), ((10761, 10805), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Position"""'], {'fontsize': '(14)'}), "('Vertical Position', fontsize=14)\n", (10771, 10805), True, 'from matplotlib import pyplot as plt\n'), ((10810, 10864), 'matplotlib.pyplot.title', 'plt.title', (['"""Lateral cameras (1920 x 886)"""'], {'fontsize': '(16)'}), "('Lateral cameras (1920 x 886)', fontsize=16)\n", (10819, 10864), True, 'from matplotlib import pyplot as plt\n'), ((10869, 10879), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10877, 10879), True, 'from matplotlib import pyplot as plt\n'), ((10973, 10989), 'numpy.polynomial.polynomial.polyfit', 'polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (10980, 10989), False, 'from numpy.polynomial.polynomial import polyfit\n'), ((11000, 11029), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'x', 'y': 'y', 's': '(0.01)'}), '(x=x, y=y, s=0.01)\n', (11011, 11029), True, 'from matplotlib import pyplot as plt\n'), ((11117, 11144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Height (norm)"""'], {}), "('Height (norm)')\n", (11127, 11144), True, 'from matplotlib import pyplot as plt\n'), ((11149, 11187), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Position (norm)"""'], {}), "('Vertical Position (norm)')\n", (11159, 11187), True, 'from matplotlib import pyplot as plt\n'), ((11192, 11204), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11202, 11204), True, 'from matplotlib import pyplot as plt\n'), ((11209, 11219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11217, 11219), True, 'from matplotlib import pyplot as plt\n'), ((11449, 11509), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "df['width']", 'y': "df['ycenter']", 'marker': '"""+"""', 's': '(1)'}), "(x=df['width'], y=df['ycenter'], marker='+', s=1)\n", (11460, 11509), True, 'from matplotlib import pyplot as plt\n'), ((11514, 11533), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Width"""'], {}), "('Width')\n", (11524, 11533), True, 'from matplotlib import pyplot as plt\n'), ((11538, 11569), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Position"""'], {}), "('Vertical Position')\n", (11548, 11569), True, 'from matplotlib import pyplot as plt\n'), ((11574, 11586), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11584, 11586), True, 'from matplotlib import pyplot as plt\n'), ((11591, 11601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11599, 11601), True, 'from matplotlib import pyplot as plt\n'), ((11617, 11652), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(5, 11)'}), '(3, 1, figsize=(5, 11))\n', (11629, 11652), True, 'from matplotlib import pyplot as plt\n'), ((12090, 12151), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(5, 11)', 'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, figsize=(5, 11), sharex=True, sharey=True)\n', (12102, 12151), True, 'from matplotlib import pyplot as plt\n'), ((12646, 12707), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(5, 11)', 'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, figsize=(5, 11), sharex=True, sharey=True)\n', (12658, 12707), True, 'from matplotlib import pyplot as plt\n'), ((13201, 13236), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(12, 9)'}), '(2, 3, figsize=(12, 9))\n', (13213, 13236), True, 'from matplotlib import pyplot as plt\n'), ((14074, 14108), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(5, 6)'}), '(2, 1, figsize=(5, 6))\n', (14086, 14108), True, 'from matplotlib import pyplot as plt\n'), ((15092, 15132), 'numpy.polynomial.polynomial.polyfit', 'polyfit', (["_df['width']", "_df['xcenter']", '(1)'], {}), "(_df['width'], _df['xcenter'], 1)\n", (15099, 15132), False, 'from numpy.polynomial.polynomial import polyfit\n'), ((15143, 15205), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "_df['width']", 'y': "_df['xcenter']", 'marker': '"""+"""', 's': '(1)'}), "(x=_df['width'], y=_df['xcenter'], marker='+', s=1)\n", (15154, 15205), True, 'from matplotlib import pyplot as plt\n'), ((15278, 15297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Width"""'], {}), "('Width')\n", (15288, 15297), True, 'from matplotlib import pyplot as plt\n'), ((15302, 15335), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Horizontal Position"""'], {}), "('Horizontal Position')\n", (15312, 15335), True, 'from matplotlib import pyplot as plt\n'), ((15340, 15397), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxes in the top of the images (1920 x 1280)"""'], {}), "('Boxes in the top of the images (1920 x 1280)')\n", (15349, 15397), True, 'from matplotlib import pyplot as plt\n'), ((15402, 15414), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15412, 15414), True, 'from matplotlib import pyplot as plt\n'), ((15419, 15429), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15427, 15429), True, 'from matplotlib import pyplot as plt\n'), ((15441, 15504), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "_df['height']", 'y': "_df['ycenter']", 'marker': '"""+"""', 's': '(1)'}), "(x=_df['height'], y=_df['ycenter'], marker='+', s=1)\n", (15452, 15504), True, 'from matplotlib import pyplot as plt\n'), ((15578, 15598), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Height"""'], {}), "('Height')\n", (15588, 15598), True, 'from matplotlib import pyplot as plt\n'), ((15603, 15634), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Position"""'], {}), "('Vertical Position')\n", (15613, 15634), True, 'from matplotlib import pyplot as plt\n'), ((15639, 15696), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxes in the top of the images (1920 x 1280)"""'], {}), "('Boxes in the top of the images (1920 x 1280)')\n", (15648, 15696), True, 'from matplotlib import pyplot as plt\n'), ((15701, 15713), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15711, 15713), True, 'from matplotlib import pyplot as plt\n'), ((15718, 15728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15726, 15728), True, 'from matplotlib import pyplot as plt\n'), ((15744, 15778), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(5, 6)'}), '(2, 1, figsize=(5, 6))\n', (15756, 15778), True, 'from matplotlib import pyplot as plt\n'), ((17022, 17031), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17029, 17031), True, 'from matplotlib import pyplot as plt\n'), ((19618, 19630), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19628, 19630), True, 'from matplotlib import pyplot as plt\n'), ((20265, 20389), 'seaborn.violinplot', 'sns.violinplot', ([], {'y': '"""Value"""', 'x': '"""Characteristic"""', 'hue': '"""Cluster"""', 'data': '_df_cluster', 'split': '(True)', 'palette': '"""Blues"""', 'inner': 'None'}), "(y='Value', x='Characteristic', hue='Cluster', data=\n _df_cluster, split=True, palette='Blues', inner=None)\n", (20279, 20389), True, 'import seaborn as sns\n'), ((20503, 20549), 'matplotlib.pyplot.title', 'plt.title', (['"""Cluster distribution"""'], {'fontsize': '(16)'}), "('Cluster distribution', fontsize=16)\n", (20512, 20549), True, 'from matplotlib import pyplot as plt\n'), ((20595, 20627), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {'fontsize': '(14)'}), "('Value', fontsize=14)\n", (20605, 20627), True, 'from matplotlib import pyplot as plt\n'), ((20632, 20666), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature"""'], {'fontsize': '(14)'}), "('Feature', fontsize=14)\n", (20642, 20666), True, 'from matplotlib import pyplot as plt\n'), ((20671, 20681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20679, 20681), True, 'from matplotlib import pyplot as plt\n'), ((21317, 21387), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', 'n_clusters'], {'figsize': '(5 * n_clusters, 6)', 'sharey': '"""col"""'}), "(2, n_clusters, figsize=(5 * n_clusters, 6), sharey='col')\n", (21329, 21387), True, 'from matplotlib import pyplot as plt\n'), ((22839, 22849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22847, 22849), True, 'from matplotlib import pyplot as plt\n'), ((23002, 23058), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', 'n_clusters'], {'figsize': '(5 * n_clusters, 6)'}), '(2, n_clusters, figsize=(5 * n_clusters, 6))\n', (23014, 23058), True, 'from matplotlib import pyplot as plt\n'), ((24084, 24094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24092, 24094), True, 'from matplotlib import pyplot as plt\n'), ((25237, 25272), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(12, 9)'}), '(2, 2, figsize=(12, 9))\n', (25249, 25272), True, 'from matplotlib import pyplot as plt\n'), ((29818, 29828), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29826, 29828), True, 'from matplotlib import pyplot as plt\n'), ((30298, 30398), 'seaborn.jointplot', 'sns.jointplot', (['"""xcenter"""', '"""ycenter"""'], {'data': '_df[labels == 0]', 'kind': '"""reg"""', 'scatter_kws': "{'s': 0.001}"}), "('xcenter', 'ycenter', data=_df[labels == 0], kind='reg',\n scatter_kws={'s': 0.001})\n", (30311, 30398), True, 'import seaborn as sns\n'), ((30709, 30808), 'seaborn.jointplot', 'sns.jointplot', (['"""xcenter"""', '"""ycenter"""'], {'data': '_df[labels == 1]', 'kind': '"""reg"""', 'scatter_kws': "{'s': 0.03}"}), "('xcenter', 'ycenter', data=_df[labels == 1], kind='reg',\n scatter_kws={'s': 0.03})\n", (30722, 30808), True, 'import seaborn as sns\n'), ((32182, 32216), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(5, 9)'}), '(2, 1, figsize=(5, 9))\n', (32194, 32216), True, 'from matplotlib import pyplot as plt\n'), ((33477, 33487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33485, 33487), True, 'from matplotlib import pyplot as plt\n'), ((35973, 36021), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'figsize': '(20, 5)', 'sharey': '(True)'}), '(1, 4, figsize=(20, 5), sharey=True)\n', (35985, 36021), True, 'from matplotlib import pyplot as plt\n'), ((37014, 37048), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""regions_cluster.png"""'], {}), "('regions_cluster.png')\n", (37025, 37048), True, 'from matplotlib import pyplot as plt\n'), ((37053, 37063), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37061, 37063), True, 'from matplotlib import pyplot as plt\n'), ((37079, 37113), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(5, 6)'}), '(2, 1, figsize=(5, 6))\n', (37091, 37113), True, 'from matplotlib import pyplot as plt\n'), ((38455, 38465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38463, 38465), True, 'from matplotlib import pyplot as plt\n'), ((40189, 40199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40197, 40199), True, 'from matplotlib import pyplot as plt\n'), ((40322, 40358), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""regions_countplot.png"""'], {}), "('regions_countplot.png')\n", (40333, 40358), True, 'from matplotlib import pyplot as plt\n'), ((40363, 40373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40371, 40373), True, 'from matplotlib import pyplot as plt\n'), ((40536, 40546), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40544, 40546), True, 'from matplotlib import pyplot as plt\n'), ((40805, 40815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40813, 40815), True, 'from matplotlib import pyplot as plt\n'), ((40831, 40866), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(5)'], {'figsize': '(17, 4)'}), '(2, 5, figsize=(17, 4))\n', (40843, 40866), True, 'from matplotlib import pyplot as plt\n'), ((41154, 41230), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""label_name"""', 'data': "_df[_df['region'] == 'SKY']", 'ax': 'ax[0, 1]'}), "(x='label_name', data=_df[_df['region'] == 'SKY'], ax=ax[0, 1])\n", (41167, 41230), True, 'import seaborn as sns\n'), ((41265, 41341), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""label_name"""', 'data': "_df[_df['region'] == 'TOP']", 'ax': 'ax[0, 2]'}), "(x='label_name', data=_df[_df['region'] == 'TOP'], ax=ax[0, 2])\n", (41278, 41341), True, 'import seaborn as sns\n'), ((41376, 41455), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""label_name"""', 'data': "_df[_df['region'] == 'MIDDLE']", 'ax': 'ax[0, 3]'}), "(x='label_name', data=_df[_df['region'] == 'MIDDLE'], ax=ax[0, 3])\n", (41389, 41455), True, 'import seaborn as sns\n'), ((41493, 41572), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""label_name"""', 'data': "_df[_df['region'] == 'BOTTOM']", 'ax': 'ax[0, 4]'}), "(x='label_name', data=_df[_df['region'] == 'BOTTOM'], ax=ax[0, 4])\n", (41506, 41572), True, 'import seaborn as sns\n'), ((42281, 42317), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""regions_countplot.png"""'], {}), "('regions_countplot.png')\n", (42292, 42317), True, 'from matplotlib import pyplot as plt\n'), ((42322, 42332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (42330, 42332), True, 'from matplotlib import pyplot as plt\n'), ((43070, 43129), 'collections.defaultdict', 'defaultdict', (["(lambda : {'aspect_ratio': None, 'scale': None})"], {}), "(lambda : {'aspect_ratio': None, 'scale': None})\n", (43081, 43129), False, 'from collections import defaultdict\n'), ((45968, 45978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (45976, 45978), True, 'from matplotlib import pyplot as plt\n'), ((46293, 46352), 'collections.defaultdict', 'defaultdict', (["(lambda : {'aspect_ratio': None, 'scale': None})"], {}), "(lambda : {'aspect_ratio': None, 'scale': None})\n", (46304, 46352), False, 'from collections import defaultdict\n'), ((60490, 60549), 'collections.defaultdict', 'defaultdict', (["(lambda : {'aspect_ratio': None, 'scale': None})"], {}), "(lambda : {'aspect_ratio': None, 'scale': None})\n", (60501, 60549), False, 'from collections import defaultdict\n'), ((64659, 64675), 'tqdm.tqdm', 'tqdm', (['area_names'], {}), '(area_names)\n', (64663, 64675), False, 'from tqdm import tqdm\n'), ((67818, 67828), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (67826, 67828), True, 'from matplotlib import pyplot as plt\n'), ((68134, 68193), 'collections.defaultdict', 'defaultdict', (["(lambda : {'aspect_ratio': None, 'scale': None})"], {}), "(lambda : {'aspect_ratio': None, 'scale': None})\n", (68145, 68193), False, 'from collections import defaultdict\n'), ((68205, 68229), 'pandas.read_csv', 'pd.read_csv', (['OUTPUT_FILE'], {}), '(OUTPUT_FILE)\n', (68216, 68229), True, 'import pandas as pd\n'), ((70864, 70881), 'tqdm.tqdm', 'tqdm', (['_area_names'], {}), '(_area_names)\n', (70868, 70881), False, 'from tqdm import tqdm\n'), ((73877, 73887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (73885, 73887), True, 'from matplotlib import pyplot as plt\n'), ((77271, 77441), 'pyeasyga.pyeasyga.GeneticAlgorithm', 'pyeasyga.GeneticAlgorithm', (['ga_data'], {'population_size': '(200)', 'generations': '(50)', 'crossover_probability': '(0.8)', 'mutation_probability': '(0.2)', 'elitism': '(True)', 'maximise_fitness': '(False)'}), '(ga_data, population_size=200, generations=50,\n crossover_probability=0.8, mutation_probability=0.2, elitism=True,\n maximise_fitness=False)\n', (77296, 77441), False, 'from pyeasyga import pyeasyga\n'), ((78123, 78155), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(12, 9)'}), '(1, figsize=(12, 9))\n', (78135, 78155), True, 'from matplotlib import pyplot as plt\n'), ((78684, 78719), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(12, 9)'}), '(2, 3, figsize=(12, 9))\n', (78696, 78719), True, 'from matplotlib import pyplot as plt\n'), ((79192, 79224), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(12, 9)'}), '(1, figsize=(12, 9))\n', (79204, 79224), True, 'from matplotlib import pyplot as plt\n'), ((79529, 79561), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(12, 9)'}), '(1, figsize=(12, 9))\n', (79541, 79561), True, 'from matplotlib import pyplot as plt\n'), ((80042, 80074), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(12, 9)'}), '(1, figsize=(12, 9))\n', (80054, 80074), True, 'from matplotlib import pyplot as plt\n'), ((80529, 80561), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(12, 9)'}), '(1, figsize=(12, 9))\n', (80541, 80561), True, 'from matplotlib import pyplot as plt\n'), ((80990, 81025), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (81002, 81025), True, 'from matplotlib import pyplot as plt\n'), ((81854, 81889), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (81866, 81889), True, 'from matplotlib import pyplot as plt\n'), ((82727, 82762), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (82739, 82762), True, 'from matplotlib import pyplot as plt\n'), ((83574, 83609), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (83586, 83609), True, 'from matplotlib import pyplot as plt\n'), ((1256, 1294), 'tensorflow.cast', 'tf.cast', (["x['image/height']", 'tf.float32'], {}), "(x['image/height'], tf.float32)\n", (1263, 1294), True, 'import tensorflow as tf\n'), ((1317, 1354), 'tensorflow.cast', 'tf.cast', (["x['image/width']", 'tf.float32'], {}), "(x['image/width'], tf.float32)\n", (1324, 1354), True, 'import tensorflow as tf\n'), ((1373, 1423), 'tensorflow.cast', 'tf.cast', (["x['image/object/class/label']", 'tf.float32'], {}), "(x['image/object/class/label'], tf.float32)\n", (1380, 1423), True, 'import tensorflow as tf\n'), ((1755, 1805), 'tensorflow.stack', 'tf.stack', (['[labels, xmax, xmin, ymax, ymin]'], {'axis': '(1)'}), '([labels, xmax, xmin, ymax, ymin], axis=1)\n', (1763, 1805), True, 'import tensorflow as tf\n'), ((4094, 4111), 'numpy.vectorize', 'np.vectorize', (['_cl'], {}), '(_cl)\n', (4106, 4111), True, 'import numpy as np\n'), ((4488, 4515), 'numpy.sqrt', 'np.sqrt', (["df['aspect_ratio']"], {}), "(df['aspect_ratio'])\n", (4495, 4515), True, 'import numpy as np\n'), ((6431, 6445), 'numpy.asarray', 'np.asarray', (['Hb'], {}), '(Hb)\n', (6441, 6445), True, 'import numpy as np\n'), ((6532, 6546), 'numpy.asarray', 'np.asarray', (['Hb'], {}), '(Hb)\n', (6542, 6546), True, 'import numpy as np\n'), ((6699, 6713), 'numpy.asarray', 'np.asarray', (['Hb'], {}), '(Hb)\n', (6709, 6713), True, 'import numpy as np\n'), ((17163, 17189), 'tqdm.tqdm', 'tqdm', (['silhoutte_n_clusters'], {}), '(silhoutte_n_clusters)\n', (17167, 17189), False, 'from tqdm import tqdm\n'), ((17483, 17515), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of clusters"""'], {}), "('number of clusters')\n", (17493, 17515), True, 'from matplotlib import pyplot as plt\n'), ((17524, 17541), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSE"""'], {}), "('SSE')\n", (17534, 17541), True, 'from matplotlib import pyplot as plt\n'), ((17550, 17566), 'matplotlib.pyplot.title', 'plt.title', (['label'], {}), '(label)\n', (17559, 17566), True, 'from matplotlib import pyplot as plt\n'), ((17647, 17657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17655, 17657), True, 'from matplotlib import pyplot as plt\n'), ((21671, 21773), 'seaborn.distplot', 'sns.distplot', (['__df.aspect_ratio'], {'ax': 'ax[k, 0]', 'hist_kws': "{'range': (0, 6)}", 'kde_kws': "{'clip': (0, 6)}"}), "(__df.aspect_ratio, ax=ax[k, 0], hist_kws={'range': (0, 6)},\n kde_kws={'clip': (0, 6)})\n", (21683, 21773), True, 'import seaborn as sns\n'), ((22253, 22363), 'seaborn.distplot', 'sns.distplot', (['__df[_df.scale < 4].scale'], {'ax': 'ax[k, 1]', 'hist_kws': "{'range': (0, 4)}", 'kde_kws': "{'clip': (0, 4)}"}), "(__df[_df.scale < 4].scale, ax=ax[k, 1], hist_kws={'range': (0,\n 4)}, kde_kws={'clip': (0, 4)})\n", (22265, 22363), True, 'import seaborn as sns\n'), ((35546, 35572), 'numpy.vectorize', 'np.vectorize', (['_assign_area'], {}), '(_assign_area)\n', (35558, 35572), True, 'import numpy as np\n'), ((43234, 43350), 'scipy.optimize.shgo', 'shgo', (['optimize_param', 'bound_aspect_ratio'], {'args': "(__df, 'aspect_ratio')", 'n': '(1000)', 'iters': '(5)', 'sampling_method': '"""sobol"""'}), "(optimize_param, bound_aspect_ratio, args=(__df, 'aspect_ratio'), n=\n 1000, iters=5, sampling_method='sobol')\n", (43238, 43350), False, 'from scipy.optimize import minimize, shgo\n'), ((43406, 43507), 'scipy.optimize.shgo', 'shgo', (['optimize_param', 'bound_scale'], {'args': "(__df, 'scale')", 'n': '(1000)', 'iters': '(5)', 'sampling_method': '"""sobol"""'}), "(optimize_param, bound_scale, args=(__df, 'scale'), n=1000, iters=5,\n sampling_method='sobol')\n", (43410, 43507), False, 'from scipy.optimize import minimize, shgo\n'), ((50451, 50461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50459, 50461), True, 'from matplotlib import pyplot as plt\n'), ((54038, 54048), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54046, 54048), True, 'from matplotlib import pyplot as plt\n'), ((57641, 57651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57649, 57651), True, 'from matplotlib import pyplot as plt\n'), ((60255, 60265), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60263, 60265), True, 'from matplotlib import pyplot as plt\n'), ((60561, 60585), 'pandas.read_csv', 'pd.read_csv', (['OUTPUT_FILE'], {}), '(OUTPUT_FILE)\n', (60572, 60585), True, 'import pandas as pd\n'), ((61901, 61929), 'random.randrange', 'random.randrange', (['(1)', 'n_scale'], {}), '(1, n_scale)\n', (61917, 61929), False, 'import random\n'), ((63410, 63436), 'numpy.zeros', 'np.zeros', (['(num_anchors, 4)'], {}), '((num_anchors, 4))\n', (63418, 63436), True, 'import numpy as np\n'), ((64860, 65030), 'pyeasyga.pyeasyga.GeneticAlgorithm', 'pyeasyga.GeneticAlgorithm', (['ga_data'], {'population_size': '(1000)', 'generations': '(5)', 'crossover_probability': '(0.8)', 'mutation_probability': '(0.2)', 'elitism': '(True)', 'maximise_fitness': '(False)'}), '(ga_data, population_size=1000, generations=5,\n crossover_probability=0.8, mutation_probability=0.2, elitism=True,\n maximise_fitness=False)\n', (64885, 65030), False, 'from pyeasyga import pyeasyga\n'), ((69149, 69175), 'numpy.zeros', 'np.zeros', (['(num_anchors, 4)'], {}), '((num_anchors, 4))\n', (69157, 69175), True, 'import numpy as np\n'), ((76600, 76615), 'numpy.vectorize', 'np.vectorize', (['y'], {}), '(y)\n', (76612, 76615), True, 'import numpy as np\n'), ((1439, 1487), 'tensorflow.cast', 'tf.cast', (["x['image/object/bbox/xmax']", 'tf.float32'], {}), "(x['image/object/bbox/xmax'], tf.float32)\n", (1446, 1487), True, 'import tensorflow as tf\n'), ((1517, 1565), 'tensorflow.cast', 'tf.cast', (["x['image/object/bbox/xmin']", 'tf.float32'], {}), "(x['image/object/bbox/xmin'], tf.float32)\n", (1524, 1565), True, 'import tensorflow as tf\n'), ((1595, 1643), 'tensorflow.cast', 'tf.cast', (["x['image/object/bbox/ymax']", 'tf.float32'], {}), "(x['image/object/bbox/ymax'], tf.float32)\n", (1602, 1643), True, 'import tensorflow as tf\n'), ((1674, 1722), 'tensorflow.cast', 'tf.cast', (["x['image/object/bbox/ymin']", 'tf.float32'], {}), "(x['image/object/bbox/ymin'], tf.float32)\n", (1681, 1722), True, 'import tensorflow as tf\n'), ((3320, 3388), 'tensorflow.numpy_function', 'tf.numpy_function', (['write_to_csv', '[example[k], index, sep]', 'tf.string'], {}), '(write_to_csv, [example[k], index, sep], tf.string)\n', (3337, 3388), True, 'import tensorflow as tf\n'), ((16891, 16905), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (16903, 16905), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((17600, 17638), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('elbow_' + label + '.png')"], {}), "('elbow_' + label + '.png')\n", (17611, 17638), True, 'from matplotlib import pyplot as plt\n'), ((17687, 17742), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'n_jobs': '(8)'}), '(n_clusters=n_clusters, random_state=0, n_jobs=8)\n', (17693, 17742), False, 'from sklearn.cluster import KMeans\n'), ((19459, 19517), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {'index': '_df.index', 'columns': "['cluster']"}), "(labels, index=_df.index, columns=['cluster'])\n", (19471, 19517), True, 'import pandas as pd\n'), ((24220, 24234), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (24232, 24234), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((24312, 24367), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'n_jobs': '(8)'}), '(n_clusters=n_clusters, random_state=0, n_jobs=8)\n', (24318, 24367), False, 'from sklearn.cluster import KMeans\n'), ((24437, 24495), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {'index': '_df.index', 'columns': "['cluster']"}), "(labels, index=_df.index, columns=['cluster'])\n", (24449, 24495), True, 'import pandas as pd\n'), ((24785, 24809), 'numpy.arange', 'np.arange', (['(0.05)', '(1)', '(0.05)'], {}), '(0.05, 1, 0.05)\n', (24794, 24809), True, 'import numpy as np\n'), ((29892, 29906), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (29904, 29906), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((29984, 30039), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'n_jobs': '(8)'}), '(n_clusters=n_clusters, random_state=0, n_jobs=8)\n', (29990, 30039), False, 'from sklearn.cluster import KMeans\n'), ((30109, 30167), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {'index': '_df.index', 'columns': "['cluster']"}), "(labels, index=_df.index, columns=['cluster'])\n", (30121, 30167), True, 'import pandas as pd\n'), ((31165, 31179), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (31177, 31179), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((31257, 31312), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'n_jobs': '(8)'}), '(n_clusters=n_clusters, random_state=0, n_jobs=8)\n', (31263, 31312), False, 'from sklearn.cluster import KMeans\n'), ((31382, 31440), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {'index': '_df.index', 'columns': "['cluster']"}), "(labels, index=_df.index, columns=['cluster'])\n", (31394, 31440), True, 'import pandas as pd\n'), ((31730, 31754), 'numpy.arange', 'np.arange', (['(0.05)', '(1)', '(0.05)'], {}), '(0.05, 1, 0.05)\n', (31739, 31754), True, 'import numpy as np\n'), ((35795, 35809), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (35807, 35809), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((35869, 35924), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)', 'n_jobs': '(8)'}), '(n_clusters=n_clusters, random_state=0, n_jobs=8)\n', (35875, 35924), False, 'from sklearn.cluster import KMeans\n'), ((36386, 36415), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['region_colors'], {}), '(region_colors)\n', (36400, 36415), False, 'from matplotlib.colors import ListedColormap\n'), ((46697, 46722), 'numpy.random.randint', 'np.random.randint', (['(6)', '(500)'], {}), '(6, 500)\n', (46714, 46722), True, 'import numpy as np\n'), ((47497, 47665), 'pyeasyga.pyeasyga.GeneticAlgorithm', 'pyeasyga.GeneticAlgorithm', (['data'], {'population_size': '(1000)', 'generations': '(10)', 'crossover_probability': '(0.8)', 'mutation_probability': '(0.2)', 'elitism': '(True)', 'maximise_fitness': '(False)'}), '(data, population_size=1000, generations=10,\n crossover_probability=0.8, mutation_probability=0.2, elitism=True,\n maximise_fitness=False)\n', (47522, 47665), False, 'from pyeasyga import pyeasyga\n'), ((51071, 51238), 'pyeasyga.pyeasyga.GeneticAlgorithm', 'pyeasyga.GeneticAlgorithm', (['data'], {'population_size': '(1000)', 'generations': '(5)', 'crossover_probability': '(0.8)', 'mutation_probability': '(0.2)', 'elitism': '(True)', 'maximise_fitness': '(False)'}), '(data, population_size=1000, generations=5,\n crossover_probability=0.8, mutation_probability=0.2, elitism=True,\n maximise_fitness=False)\n', (51096, 51238), False, 'from pyeasyga import pyeasyga\n'), ((54666, 54833), 'pyeasyga.pyeasyga.GeneticAlgorithm', 'pyeasyga.GeneticAlgorithm', (['data'], {'population_size': '(1000)', 'generations': '(5)', 'crossover_probability': '(0.8)', 'mutation_probability': '(0.2)', 'elitism': '(True)', 'maximise_fitness': '(False)'}), '(data, population_size=1000, generations=5,\n crossover_probability=0.8, mutation_probability=0.2, elitism=True,\n maximise_fitness=False)\n', (54691, 54833), False, 'from pyeasyga import pyeasyga\n'), ((57812, 57829), 'numpy.quantile', 'np.quantile', (['a', 'q'], {}), '(a, q)\n', (57823, 57829), True, 'import numpy as np\n'), ((63887, 63923), 'numpy.tile', 'np.tile', (['(anchors[:, 2] * 0.5)', '(2, 1)'], {}), '(anchors[:, 2] * 0.5, (2, 1))\n', (63894, 63923), True, 'import numpy as np\n'), ((63954, 63990), 'numpy.tile', 'np.tile', (['(anchors[:, 3] * 0.5)', '(2, 1)'], {}), '(anchors[:, 3] * 0.5, (2, 1))\n', (63961, 63990), True, 'import numpy as np\n'), ((69626, 69662), 'numpy.tile', 'np.tile', (['(anchors[:, 2] * 0.5)', '(2, 1)'], {}), '(anchors[:, 2] * 0.5, (2, 1))\n', (69633, 69662), True, 'import numpy as np\n'), ((69693, 69729), 'numpy.tile', 'np.tile', (['(anchors[:, 3] * 0.5)', '(2, 1)'], {}), '(anchors[:, 3] * 0.5, (2, 1))\n', (69700, 69729), True, 'import numpy as np\n'), ((75856, 75898), 'numpy.random.randint', 'np.random.randint', (['(1 * 100000)', '(10 * 100000)'], {}), '(1 * 100000, 10 * 100000)\n', (75873, 75898), True, 'import numpy as np\n'), ((1925, 1948), 'tensorflow.slice', 'tf.slice', (['x', '(0,)', '(1,)'], {}), '(x, (0,), (1,))\n', (1933, 1948), True, 'import tensorflow as tf\n'), ((1974, 1997), 'tensorflow.slice', 'tf.slice', (['x', '(1,)', '(1,)'], {}), '(x, (1,), (1,))\n', (1982, 1997), True, 'import tensorflow as tf\n'), ((2023, 2046), 'tensorflow.slice', 'tf.slice', (['x', '(2,)', '(1,)'], {}), '(x, (2,), (1,))\n', (2031, 2046), True, 'import tensorflow as tf\n'), ((2072, 2095), 'tensorflow.slice', 'tf.slice', (['x', '(3,)', '(1,)'], {}), '(x, (3,), (1,))\n', (2080, 2095), True, 'import tensorflow as tf\n'), ((2121, 2144), 'tensorflow.slice', 'tf.slice', (['x', '(4,)', '(1,)'], {}), '(x, (4,), (1,))\n', (2129, 2144), True, 'import tensorflow as tf\n'), ((2299, 2340), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['boxes'], {}), '(boxes)\n', (2333, 2340), True, 'import tensorflow as tf\n'), ((7805, 7826), 'numpy.asarray', 'np.asarray', (['[0, 1200]'], {}), '([0, 1200])\n', (7815, 7826), True, 'import numpy as np\n'), ((8850, 8870), 'numpy.asarray', 'np.asarray', (['[0, 800]'], {}), '([0, 800])\n', (8860, 8870), True, 'import numpy as np\n'), ((9701, 9721), 'numpy.asarray', 'np.asarray', (['[0, 800]'], {}), '([0, 800])\n', (9711, 9721), True, 'import numpy as np\n'), ((10450, 10470), 'numpy.asarray', 'np.asarray', (['[0, 800]'], {}), '([0, 800])\n', (10460, 10470), True, 'import numpy as np\n'), ((11059, 11077), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {}), '([0, 1])\n', (11069, 11077), True, 'import numpy as np\n'), ((42805, 42814), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (42811, 42814), True, 'import numpy as np\n'), ((46433, 46482), 'numpy.random.randint', 'np.random.randint', (['(bound[0] * 100)', '(bound[1] * 100)'], {}), '(bound[0] * 100, bound[1] * 100)\n', (46450, 46482), True, 'import numpy as np\n'), ((47102, 47120), 'numpy.std', 'np.std', (['individual'], {}), '(individual)\n', (47108, 47120), True, 'import numpy as np\n'), ((47260, 47299), 'itertools.product', 'itertools.product', (['features', 'area_names'], {}), '(features, area_names)\n', (47277, 47299), False, 'import itertools\n'), ((50834, 50873), 'itertools.product', 'itertools.product', (['features', 'area_names'], {}), '(features, area_names)\n', (50851, 50873), False, 'import itertools\n'), ((54429, 54468), 'itertools.product', 'itertools.product', (['features', 'area_names'], {}), '(features, area_names)\n', (54446, 54468), False, 'import itertools\n'), ((57870, 57926), 'itertools.product', 'itertools.product', (["['aspect_ratio', 'scale']", 'area_names'], {}), "(['aspect_ratio', 'scale'], area_names)\n", (57887, 57926), False, 'import itertools\n'), ((61537, 61562), 'numpy.random.randint', 'np.random.randint', (['(6)', '(500)'], {}), '(6, 500)\n', (61554, 61562), True, 'import numpy as np\n'), ((61600, 61625), 'numpy.random.randint', 'np.random.randint', (['(0)', '(500)'], {}), '(0, 500)\n', (61617, 61625), True, 'import numpy as np\n'), ((70339, 70354), 'numpy.average', 'np.average', (['iou'], {}), '(iou)\n', (70349, 70354), True, 'import numpy as np\n'), ((75583, 75638), 'numpy.random.randint', 'np.random.randint', (['(bound[0] * 100000)', '(bound[1] * 100000)'], {}), '(bound[0] * 100000, bound[1] * 100000)\n', (75600, 75638), True, 'import numpy as np\n'), ((76006, 76028), 'random.choice', 'random.choice', (['parents'], {}), '(parents)\n', (76019, 76028), False, 'import random\n'), ((76082, 76104), 'random.choice', 'random.choice', (['parents'], {}), '(parents)\n', (76095, 76104), False, 'import random\n'), ((14153, 14208), 'numpy.asarray', 'np.asarray', (['[[b - 0.1, b + 0.1] for b in aspect_ratios]'], {}), '([[b - 0.1, b + 0.1] for b in aspect_ratios])\n', (14163, 14208), True, 'import numpy as np\n'), ((14572, 14620), 'numpy.asarray', 'np.asarray', (['[[b - 0.1, b + 0.1] for b in scales]'], {}), '([[b - 0.1, b + 0.1] for b in scales])\n', (14582, 14620), True, 'import numpy as np\n'), ((15824, 15879), 'numpy.asarray', 'np.asarray', (['[[b - 0.1, b + 0.1] for b in aspect_ratios]'], {}), '([[b - 0.1, b + 0.1] for b in aspect_ratios])\n', (15834, 15879), True, 'import numpy as np\n'), ((16247, 16295), 'numpy.asarray', 'np.asarray', (['[[b - 0.1, b + 0.1] for b in scales]'], {}), '([[b - 0.1, b + 0.1] for b in scales])\n', (16257, 16295), True, 'import numpy as np\n'), ((17212, 17258), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'random_state': '(0)', 'n_jobs': '(8)'}), '(n_clusters=k, random_state=0, n_jobs=8)\n', (17218, 17258), False, 'from sklearn.cluster import KMeans\n'), ((50695, 50716), 'numpy.unique', 'np.unique', (['individual'], {}), '(individual)\n', (50704, 50716), True, 'import numpy as np\n'), ((54290, 54311), 'numpy.unique', 'np.unique', (['individual'], {}), '(individual)\n', (54299, 54311), True, 'import numpy as np\n'), ((61174, 61223), 'numpy.random.randint', 'np.random.randint', (['(bound[0] * 100)', '(bound[1] * 100)'], {}), '(bound[0] * 100, bound[1] * 100)\n', (61191, 61223), True, 'import numpy as np\n'), ((61270, 61319), 'numpy.random.randint', 'np.random.randint', (['(bound[0] * 100)', '(bound[1] * 100)'], {}), '(bound[0] * 100, bound[1] * 100)\n', (61287, 61319), True, 'import numpy as np\n'), ((64312, 64329), 'numpy.unique', 'np.unique', (['scales'], {}), '(scales)\n', (64321, 64329), True, 'import numpy as np\n'), ((64353, 64377), 'numpy.unique', 'np.unique', (['aspect_ratios'], {}), '(aspect_ratios)\n', (64362, 64377), True, 'import numpy as np\n'), ((69983, 70000), 'numpy.unique', 'np.unique', (['scales'], {}), '(scales)\n', (69992, 70000), True, 'import numpy as np\n'), ((70024, 70048), 'numpy.unique', 'np.unique', (['aspect_ratios'], {}), '(aspect_ratios)\n', (70033, 70048), True, 'import numpy as np\n'), ((6626, 6645), 'math.radians', 'math.radians', (['alpha'], {}), '(alpha)\n', (6638, 6645), False, 'import math\n'), ((6793, 6812), 'math.radians', 'math.radians', (['alpha'], {}), '(alpha)\n', (6805, 6812), False, 'import math\n'), ((38691, 38746), 'numpy.asarray', 'np.asarray', (['[[b - 0.1, b + 0.1] for b in aspect_ratios]'], {}), '([[b - 0.1, b + 0.1] for b in aspect_ratios])\n', (38701, 38746), True, 'import numpy as np\n'), ((39266, 39316), 'numpy.mean', 'np.mean', (["_df[_df['region'] == region].aspect_ratio"], {}), "(_df[_df['region'] == region].aspect_ratio)\n", (39273, 39316), True, 'import numpy as np\n'), ((39411, 39459), 'numpy.asarray', 'np.asarray', (['[[b - 0.1, b + 0.1] for b in scales]'], {}), '([[b - 0.1, b + 0.1] for b in scales])\n', (39421, 39459), True, 'import numpy as np\n'), ((39925, 39968), 'numpy.mean', 'np.mean', (["_df[_df['region'] == region].scale"], {}), "(_df[_df['region'] == region].scale)\n", (39932, 39968), True, 'import numpy as np\n'), ((44291, 44370), 'numpy.asarray', 'np.asarray', (["[[b - 0.03, b + 0.03] for b in best_values[region]['aspect_ratio']]"], {}), "([[b - 0.03, b + 0.03] for b in best_values[region]['aspect_ratio']])\n", (44301, 44370), True, 'import numpy as np\n'), ((44967, 44993), 'numpy.mean', 'np.mean', (['__df.aspect_ratio'], {}), '(__df.aspect_ratio)\n', (44974, 44993), True, 'import numpy as np\n'), ((45062, 45134), 'numpy.asarray', 'np.asarray', (["[[b - 0.03, b + 0.03] for b in best_values[region]['scale']]"], {}), "([[b - 0.03, b + 0.03] for b in best_values[region]['scale']])\n", (45072, 45134), True, 'import numpy as np\n'), ((45703, 45722), 'numpy.mean', 'np.mean', (['__df.scale'], {}), '(__df.scale)\n', (45710, 45722), True, 'import numpy as np\n'), ((66119, 66198), 'numpy.asarray', 'np.asarray', (["[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]"], {}), "([[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']])\n", (66129, 66198), True, 'import numpy as np\n'), ((66808, 66834), 'numpy.mean', 'np.mean', (['__df.aspect_ratio'], {}), '(__df.aspect_ratio)\n', (66815, 66834), True, 'import numpy as np\n'), ((66903, 66975), 'numpy.asarray', 'np.asarray', (["[[b - 0.01, b + 0.01] for b in best_values[region]['scale']]"], {}), "([[b - 0.01, b + 0.01] for b in best_values[region]['scale']])\n", (66913, 66975), True, 'import numpy as np\n'), ((67544, 67563), 'numpy.mean', 'np.mean', (['__df.scale'], {}), '(__df.scale)\n', (67551, 67563), True, 'import numpy as np\n'), ((70415, 70426), 'numpy.log', 'np.log', (['iou'], {}), '(iou)\n', (70421, 70426), True, 'import numpy as np\n'), ((72178, 72257), 'numpy.asarray', 'np.asarray', (["[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]"], {}), "([[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']])\n", (72188, 72257), True, 'import numpy as np\n'), ((72867, 72893), 'numpy.mean', 'np.mean', (['__df.aspect_ratio'], {}), '(__df.aspect_ratio)\n', (72874, 72893), True, 'import numpy as np\n'), ((72962, 73034), 'numpy.asarray', 'np.asarray', (["[[b - 0.01, b + 0.01] for b in best_values[region]['scale']]"], {}), "([[b - 0.01, b + 0.01] for b in best_values[region]['scale']])\n", (72972, 73034), True, 'import numpy as np\n'), ((73603, 73622), 'numpy.mean', 'np.mean', (['__df.scale'], {}), '(__df.scale)\n', (73610, 73622), True, 'import numpy as np\n'), ((48673, 48752), 'numpy.asarray', 'np.asarray', (["[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]"], {}), "([[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']])\n", (48683, 48752), True, 'import numpy as np\n'), ((49390, 49416), 'numpy.mean', 'np.mean', (['__df.aspect_ratio'], {}), '(__df.aspect_ratio)\n', (49397, 49416), True, 'import numpy as np\n'), ((49493, 49565), 'numpy.asarray', 'np.asarray', (["[[b - 0.01, b + 0.01] for b in best_values[region]['scale']]"], {}), "([[b - 0.01, b + 0.01] for b in best_values[region]['scale']])\n", (49503, 49565), True, 'import numpy as np\n'), ((50158, 50177), 'numpy.mean', 'np.mean', (['__df.scale'], {}), '(__df.scale)\n', (50165, 50177), True, 'import numpy as np\n'), ((52255, 52334), 'numpy.asarray', 'np.asarray', (["[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]"], {}), "([[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']])\n", (52265, 52334), True, 'import numpy as np\n'), ((52972, 52998), 'numpy.mean', 'np.mean', (['__df.aspect_ratio'], {}), '(__df.aspect_ratio)\n', (52979, 52998), True, 'import numpy as np\n'), ((53075, 53147), 'numpy.asarray', 'np.asarray', (["[[b - 0.01, b + 0.01] for b in best_values[region]['scale']]"], {}), "([[b - 0.01, b + 0.01] for b in best_values[region]['scale']])\n", (53085, 53147), True, 'import numpy as np\n'), ((53740, 53759), 'numpy.mean', 'np.mean', (['__df.scale'], {}), '(__df.scale)\n', (53747, 53759), True, 'import numpy as np\n'), ((55854, 55933), 'numpy.asarray', 'np.asarray', (["[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]"], {}), "([[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']])\n", (55864, 55933), True, 'import numpy as np\n'), ((56571, 56597), 'numpy.mean', 'np.mean', (['__df.aspect_ratio'], {}), '(__df.aspect_ratio)\n', (56578, 56597), True, 'import numpy as np\n'), ((56674, 56746), 'numpy.asarray', 'np.asarray', (["[[b - 0.01, b + 0.01] for b in best_values[region]['scale']]"], {}), "([[b - 0.01, b + 0.01] for b in best_values[region]['scale']])\n", (56684, 56746), True, 'import numpy as np\n'), ((57339, 57358), 'numpy.mean', 'np.mean', (['__df.scale'], {}), '(__df.scale)\n', (57346, 57358), True, 'import numpy as np\n'), ((58470, 58549), 'numpy.asarray', 'np.asarray', (["[[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']]"], {}), "([[b - 0.02, b + 0.02] for b in best_values[region]['aspect_ratio']])\n", (58480, 58549), True, 'import numpy as np\n'), ((59187, 59213), 'numpy.mean', 'np.mean', (['__df.aspect_ratio'], {}), '(__df.aspect_ratio)\n', (59194, 59213), True, 'import numpy as np\n'), ((59290, 59362), 'numpy.asarray', 'np.asarray', (["[[b - 0.01, b + 0.01] for b in best_values[region]['scale']]"], {}), "([[b - 0.01, b + 0.01] for b in best_values[region]['scale']])\n", (59300, 59362), True, 'import numpy as np\n'), ((59955, 59974), 'numpy.mean', 'np.mean', (['__df.scale'], {}), '(__df.scale)\n', (59962, 59974), True, 'import numpy as np\n'), ((70508, 70519), 'numpy.log', 'np.log', (['iou'], {}), '(iou)\n', (70514, 70519), True, 'import numpy as np\n')] |
"""Tests for the ``bokeh_templating`` module.
Authors
-------
- <NAME>
Use
---
These tests can be run via the command line (omit the -s to
suppress verbose output to stdout):
::
pytest -s test_bokeh_templating.py
"""
import os
import numpy as np
from jwql.bokeh_templating import BokehTemplate
file_dir = os.path.dirname(os.path.realpath(__file__))
class TestTemplate(BokehTemplate):
"""
A minimal BokehTemplate app for testing purposes. This is adapted from
the example included in the ``bokeh_template`` package.
"""
_embed = True
def pre_init(self):
"""
Before creating the Bokeh interface (by parsing the interface
file), we must initialize our ``a`` and ``b`` variables, and set
the path to the interface file.
"""
self.a, self.b = 4, 2
self.format_string = None
self.interface_file = os.path.join(file_dir, "test_bokeh_tempating_interface.yaml")
# No post-initialization tasks are required.
post_init = None
@property
def x(self):
"""The x-value of the Lissajous curves."""
return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500))
@property
def y(self):
"""The y-value of the Lissajous curves."""
return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500))
def controller(self, attr, old, new):
"""
This is the controller function which is used to update the
curves when the sliders are adjusted. Note the use of the
``self.refs`` dictionary for accessing the Bokeh object
attributes.
"""
self.a = self.refs["a_slider"].value
self.b = self.refs["b_slider"].value
self.refs["figure_source"].data = {'x': self.x, 'y': self.y}
| [
"os.path.realpath",
"os.path.join",
"numpy.linspace"
] | [((346, 372), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (362, 372), False, 'import os\n'), ((913, 974), 'os.path.join', 'os.path.join', (['file_dir', '"""test_bokeh_tempating_interface.yaml"""'], {}), "(file_dir, 'test_bokeh_tempating_interface.yaml')\n", (925, 974), False, 'import os\n'), ((1165, 1195), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(500)'], {}), '(0, 2 * np.pi, 500)\n', (1176, 1195), True, 'import numpy as np\n'), ((1316, 1346), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(500)'], {}), '(0, 2 * np.pi, 500)\n', (1327, 1346), True, 'import numpy as np\n')] |
from typing import Dict
import numpy as np
from scipy.signal import lfilter
def np_softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=-1)
def geometric_cumsum(alpha, x):
"""
Adapted from https://github.com/zuoxingdong/lagom
"""
x = np.asarray(x)
if x.ndim == 1:
x = np.expand_dims(x, 0)
assert x.ndim == 2
return lfilter([1], [1, -alpha], x[:, ::-1], axis=1)[:, ::-1]
def structed2dict(array: np.ndarray):
if isinstance(array, (np.ndarray, np.void)) \
and array.dtype.fields is not None:
array = {key: array[key] for key in array.dtype.fields.keys()}
return array
def dict2structed(array: Dict):
if isinstance(array, dict):
capacity = 0
dtype = []
for key, value in array.items():
capacity = len(value)
dtype.append((key, value.dtype, value.shape[1:]))
dtype = np.dtype(dtype)
array_ = np.empty(capacity, dtype=dtype)
for key, value in array.items():
array_[key] = value
array = array_
return array
| [
"scipy.signal.lfilter",
"numpy.empty",
"numpy.asarray",
"numpy.dtype",
"numpy.expand_dims",
"numpy.max"
] | [((276, 289), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (286, 289), True, 'import numpy as np\n'), ((322, 342), 'numpy.expand_dims', 'np.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (336, 342), True, 'import numpy as np\n'), ((377, 422), 'scipy.signal.lfilter', 'lfilter', (['[1]', '[1, -alpha]', 'x[:, ::-1]'], {'axis': '(1)'}), '([1], [1, -alpha], x[:, ::-1], axis=1)\n', (384, 422), False, 'from scipy.signal import lfilter\n'), ((917, 932), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (925, 932), True, 'import numpy as np\n'), ((951, 982), 'numpy.empty', 'np.empty', (['capacity'], {'dtype': 'dtype'}), '(capacity, dtype=dtype)\n', (959, 982), True, 'import numpy as np\n'), ((119, 128), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (125, 128), True, 'import numpy as np\n')] |
import os
from math import isclose
import numpy as np
import pytest
import xarray as xr
from roocs_utils.xarray_utils.xarray_utils import get_coord_by_type
from clisops.ops.subset import subset
from ._common import CMIP6_RLDS_ONE_TIME_STEP
def open_dataset():
# use real dataset to get full longitude data
return xr.open_dataset(CMIP6_RLDS_ONE_TIME_STEP)
def setup_test():
ds = open_dataset()
# gets longitude by the correct name as used in the dataset
lon = get_coord_by_type(ds, "longitude")
return ds, lon
def calculate_offset(x):
# get resolution of data
ds, lon = setup_test()
res = lon.values[1] - lon.values[0]
# work out how many to roll by to roll data by 1
index = 1 / res
# calculate the corresponding offset needed to change data by x
offset = int(x * index)
return offset
def test_roll_lon_minus_180(load_esgf_test_data):
# test rolling longitude by -180
ds, lon = setup_test()
# check longitude is 0 to 360 initially
assert isclose(lon.values.min(), 0, abs_tol=10 ** 2)
assert isclose(lon.values.max(), 360, abs_tol=10 ** 2)
# roll longitude by -180
ds = ds.roll(shifts={f"{lon.name}": -180}, roll_coords=True)
# doesn't roll as far as we want as it hasn't taken the resolution of the grid into account
assert ds.lon.values[0] == 90.0
assert ds.lon.values[-1] == 87.5
# min and max of the data are still the same
assert ds.lon.values.min() == 0
assert ds.lon.values.max() == 357.5
def test_roll_lon_minus_180_use_res(load_esgf_test_data):
# test rolling longitude by -180
ds, lon = setup_test()
# work out how much to roll by
offset = calculate_offset(-180)
# roll longitude by calculated offset
ds_roll = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=True)
# longitude data array is rolled to [180.0..275.0..357.5,0..75..177.5]
assert ds_roll.lon.values[0] == 180.0
assert ds_roll.lon.values[-1] == 177.5
# min and max still the same
assert ds_roll.lon.values.min() == 0
assert ds_roll.lon.values.max() == 357.5
# rlds values are not equal - they have been rolled
np.testing.assert_raises(
AssertionError,
np.testing.assert_array_equal,
ds_roll.rlds.values,
ds.rlds.values,
)
def test_roll_lon_plus_180(load_esgf_test_data):
# test rolling longitude by 180
ds, lon = setup_test()
ds = ds.roll(shifts={f"{lon.name}": 180}, roll_coords=True)
# doesn't roll as far as we want as it hasn't taken the resolution of the grid into account
assert ds.lon.values[0] == 270.0
assert ds.lon.values[-1] == 267.5
assert ds.lon.values.min() == 0
assert ds.lon.values.max() == 357.5
def test_roll_lon_plus_180_use_res(load_esgf_test_data):
# test rolling longitude by -180
ds, lon = setup_test()
# work out how much to roll by
offset = calculate_offset(180)
ds = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=True)
assert ds.lon.values[0] == 180.0
assert ds.lon.values[-1] == 177.5
assert ds.lon.values.min() == 0
assert ds.lon.values.max() == 357.5
def test_plus_minus_180_equal(load_esgf_test_data):
# check that rolling +180 and -180 gives the same result - when taking the resolution into account
ds, lon = setup_test()
# work out how much to roll by
offset_minus = calculate_offset(-180)
offset_plus = calculate_offset(180)
ds_minus = ds.roll(shifts={f"{lon.name}": offset_minus}, roll_coords=True)
ds_plus = ds.roll(shifts={f"{lon.name}": offset_plus}, roll_coords=True)
# values of rlds are equal - rolling by -180 and 180 (taking res into account) is the same
np.testing.assert_allclose(ds_minus.rlds.values, ds_plus.rlds.values)
@pytest.mark.skip(reason="rolling now done within subset")
def test_xarray_roll_lon(tmpdir, load_esgf_test_data):
ds, lon = setup_test()
# work out how much to roll by
offset = calculate_offset(180)
ds_roll = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=True)
# testing after rolling still raises an error
with pytest.raises(NotImplementedError):
subset(
ds=ds_roll,
area=(-50.0, -90.0, 100.0, 90.0),
output_dir=tmpdir,
output_type="nc",
file_namer="simple",
)
@pytest.mark.skip(reason="rolling now done within subset")
def test_convert_lon_coords(tmpdir, load_esgf_test_data):
# test reassigning coords to convert to -180 to 180 for comparison
ds, lon = setup_test()
ds.coords[lon.name] = (ds.coords[lon.name] + 180) % 360 - 180
ds = ds.sortby(ds[lon.name])
assert isclose(ds.lon.values.min(), -180, abs_tol=10 ** 2)
assert isclose(ds.lon.values.max(), 180, abs_tol=10 ** 2)
result = subset(
ds=ds,
area=(-50.0, -90.0, 100.0, 90.0),
output_dir=tmpdir,
output_type="nc",
file_namer="simple",
)
assert result
@pytest.mark.skip(reason="rolling now done within subset")
def test_roll_convert_lon_coords(load_esgf_test_data):
ds, lon = setup_test()
# work out how much to roll by
offset = calculate_offset(180)
ds_roll = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=False)
# check roll with roll_coords=False actually does something
np.testing.assert_raises(
AssertionError,
np.testing.assert_array_equal,
ds_roll.rlds.values,
ds.rlds.values,
)
ds_roll.coords[lon.name] = ds_roll.coords[lon.name] - 180
assert isclose(ds_roll.lon.values.min(), -180, abs_tol=10 ** 2)
assert isclose(ds_roll.lon.values.max(), 180, abs_tol=10 ** 2)
result = subset(
ds=ds_roll,
area=(-50.0, -90.0, 100.0, 90.0),
output_type="xarray",
)
assert result
def test_roll_compare_roll_coords(load_esgf_test_data):
ds, lon = setup_test()
# work out how much to roll by
offset = calculate_offset(180)
ds_roll_coords = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=True)
ds_not_roll_coords = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=False)
# check rlds values the same with/without rolling coords
np.testing.assert_array_equal(
ds_roll_coords.rlds.values,
ds_not_roll_coords.rlds.values,
)
# check lat doesn't change with/without rolling coords
np.testing.assert_array_equal(
ds_roll_coords.lat.values,
ds_not_roll_coords.lat.values,
)
# check time doesn't change with/without rolling coords
np.testing.assert_array_equal(
ds_roll_coords.time.values,
ds_not_roll_coords.time.values,
)
# check lon changes with/without rolling coords
np.testing.assert_raises(
AssertionError,
np.testing.assert_array_equal,
ds_roll_coords.lon.values,
ds_not_roll_coords.lon.values,
)
@pytest.mark.skip(reason="rolling now done within subset")
def test_compare_methods(load_esgf_test_data):
# run subset with rolling then assigning
ds, lon = setup_test()
# work out how much to roll by
offset = calculate_offset(180)
ds_roll = ds.roll(
shifts={f"{lon.name}": offset}, roll_coords=False
) # roll coords set to false
ds_roll.coords[lon.name] = ds_roll.coords[lon.name] - 180
assert isclose(ds_roll.lon.values.min(), -180, abs_tol=10 ** 2)
assert isclose(ds_roll.lon.values.max(), 180, abs_tol=10 ** 2)
result1 = subset(
ds=ds_roll,
area=(-50.0, -90.0, 100.0, 90.0),
output_type="xarray",
)
assert result1
# run subset assign then sort by
ds, lon = setup_test()
ds.coords[lon.name] = (ds.coords[lon.name] + 180) % 360 - 180
ds = ds.sortby(ds[lon.name])
assert isclose(ds.lon.values.min(), -180, abs_tol=10 ** 2)
assert isclose(ds.lon.values.max(), 180, abs_tol=10 ** 2)
result2 = subset(
ds=ds,
area=(-50.0, -90.0, 100.0, 90.0),
output_type="xarray",
)
assert result2
# data of main variable is the same
np.testing.assert_allclose(result1[0].rlds.values, result2[0].rlds.values)
@pytest.mark.skipif(os.path.isdir("/badc") is False, reason="data not available")
def test_irregular_grid_dataset(load_esgf_test_data):
ds = xr.open_mfdataset(
"/badc/cmip6/data/CMIP6/ScenarioMIP/NCC/NorESM2-MM/"
"ssp370/r1i1p1f1/Ofx/sftof/gn/v20191108/*.nc"
)
lon = get_coord_by_type(ds, "longitude", ignore_aux_coords=False)
assert "lon" not in ds.dims
with pytest.raises(ValueError) as exc:
ds.roll(shifts={f"{lon.name}": 180}, roll_coords=False)
assert str(exc.value) == "dimensions ['longitude'] do not exist"
@pytest.mark.skipif(os.path.isdir("/badc") is False, reason="data not available")
def test_3d_grid_dataset(load_esgf_test_data):
ds = xr.open_mfdataset(
"/badc/cmip6/data/CMIP6/ScenarioMIP/NCC/NorESM2-MM/ssp370/r1i1p1f1/Amon/ta/gn/v20191108/*.nc"
)
lon = get_coord_by_type(ds, "longitude", ignore_aux_coords=False)
assert "lon" in ds.dims
offset = 180
ds_roll_coords = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=True)
ds_not_roll_coords = ds.roll(shifts={f"{lon.name}": offset}, roll_coords=False)
# check plev doesn't change with/without rolling coords
np.testing.assert_array_equal(
ds_roll_coords.plev.values,
ds_not_roll_coords.plev.values,
)
| [
"roocs_utils.xarray_utils.xarray_utils.get_coord_by_type",
"numpy.testing.assert_raises",
"numpy.testing.assert_array_equal",
"os.path.isdir",
"xarray.open_dataset",
"pytest.raises",
"clisops.ops.subset.subset",
"numpy.testing.assert_allclose",
"xarray.open_mfdataset",
"pytest.mark.skip"
] | [((3799, 3856), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""rolling now done within subset"""'}), "(reason='rolling now done within subset')\n", (3815, 3856), False, 'import pytest\n'), ((4372, 4429), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""rolling now done within subset"""'}), "(reason='rolling now done within subset')\n", (4388, 4429), False, 'import pytest\n'), ((5001, 5058), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""rolling now done within subset"""'}), "(reason='rolling now done within subset')\n", (5017, 5058), False, 'import pytest\n'), ((6920, 6977), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""rolling now done within subset"""'}), "(reason='rolling now done within subset')\n", (6936, 6977), False, 'import pytest\n'), ((326, 367), 'xarray.open_dataset', 'xr.open_dataset', (['CMIP6_RLDS_ONE_TIME_STEP'], {}), '(CMIP6_RLDS_ONE_TIME_STEP)\n', (341, 367), True, 'import xarray as xr\n'), ((487, 521), 'roocs_utils.xarray_utils.xarray_utils.get_coord_by_type', 'get_coord_by_type', (['ds', '"""longitude"""'], {}), "(ds, 'longitude')\n", (504, 521), False, 'from roocs_utils.xarray_utils.xarray_utils import get_coord_by_type\n'), ((2176, 2288), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['AssertionError', 'np.testing.assert_array_equal', 'ds_roll.rlds.values', 'ds.rlds.values'], {}), '(AssertionError, np.testing.assert_array_equal,\n ds_roll.rlds.values, ds.rlds.values)\n', (2200, 2288), True, 'import numpy as np\n'), ((3726, 3795), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ds_minus.rlds.values', 'ds_plus.rlds.values'], {}), '(ds_minus.rlds.values, ds_plus.rlds.values)\n', (3752, 3795), True, 'import numpy as np\n'), ((4826, 4935), 'clisops.ops.subset.subset', 'subset', ([], {'ds': 'ds', 'area': '(-50.0, -90.0, 100.0, 90.0)', 'output_dir': 'tmpdir', 'output_type': '"""nc"""', 'file_namer': '"""simple"""'}), "(ds=ds, area=(-50.0, -90.0, 100.0, 90.0), output_dir=tmpdir,\n output_type='nc', file_namer='simple')\n", (4832, 4935), False, 'from clisops.ops.subset import subset\n'), ((5354, 5466), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['AssertionError', 'np.testing.assert_array_equal', 'ds_roll.rlds.values', 'ds.rlds.values'], {}), '(AssertionError, np.testing.assert_array_equal,\n ds_roll.rlds.values, ds.rlds.values)\n', (5378, 5466), True, 'import numpy as np\n'), ((5715, 5789), 'clisops.ops.subset.subset', 'subset', ([], {'ds': 'ds_roll', 'area': '(-50.0, -90.0, 100.0, 90.0)', 'output_type': '"""xarray"""'}), "(ds=ds_roll, area=(-50.0, -90.0, 100.0, 90.0), output_type='xarray')\n", (5721, 5789), False, 'from clisops.ops.subset import subset\n'), ((6225, 6318), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ds_roll_coords.rlds.values', 'ds_not_roll_coords.rlds.values'], {}), '(ds_roll_coords.rlds.values,\n ds_not_roll_coords.rlds.values)\n', (6254, 6318), True, 'import numpy as np\n'), ((6402, 6494), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ds_roll_coords.lat.values', 'ds_not_roll_coords.lat.values'], {}), '(ds_roll_coords.lat.values, ds_not_roll_coords\n .lat.values)\n', (6431, 6494), True, 'import numpy as np\n'), ((6578, 6671), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ds_roll_coords.time.values', 'ds_not_roll_coords.time.values'], {}), '(ds_roll_coords.time.values,\n ds_not_roll_coords.time.values)\n', (6607, 6671), True, 'import numpy as np\n'), ((6748, 6881), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['AssertionError', 'np.testing.assert_array_equal', 'ds_roll_coords.lon.values', 'ds_not_roll_coords.lon.values'], {}), '(AssertionError, np.testing.assert_array_equal,\n ds_roll_coords.lon.values, ds_not_roll_coords.lon.values)\n', (6772, 6881), True, 'import numpy as np\n'), ((7499, 7573), 'clisops.ops.subset.subset', 'subset', ([], {'ds': 'ds_roll', 'area': '(-50.0, -90.0, 100.0, 90.0)', 'output_type': '"""xarray"""'}), "(ds=ds_roll, area=(-50.0, -90.0, 100.0, 90.0), output_type='xarray')\n", (7505, 7573), False, 'from clisops.ops.subset import subset\n'), ((7931, 8000), 'clisops.ops.subset.subset', 'subset', ([], {'ds': 'ds', 'area': '(-50.0, -90.0, 100.0, 90.0)', 'output_type': '"""xarray"""'}), "(ds=ds, area=(-50.0, -90.0, 100.0, 90.0), output_type='xarray')\n", (7937, 8000), False, 'from clisops.ops.subset import subset\n'), ((8097, 8171), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result1[0].rlds.values', 'result2[0].rlds.values'], {}), '(result1[0].rlds.values, result2[0].rlds.values)\n', (8123, 8171), True, 'import numpy as np\n'), ((8319, 8443), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['"""/badc/cmip6/data/CMIP6/ScenarioMIP/NCC/NorESM2-MM/ssp370/r1i1p1f1/Ofx/sftof/gn/v20191108/*.nc"""'], {}), "(\n '/badc/cmip6/data/CMIP6/ScenarioMIP/NCC/NorESM2-MM/ssp370/r1i1p1f1/Ofx/sftof/gn/v20191108/*.nc'\n )\n", (8336, 8443), True, 'import xarray as xr\n'), ((8469, 8528), 'roocs_utils.xarray_utils.xarray_utils.get_coord_by_type', 'get_coord_by_type', (['ds', '"""longitude"""'], {'ignore_aux_coords': '(False)'}), "(ds, 'longitude', ignore_aux_coords=False)\n", (8486, 8528), False, 'from roocs_utils.xarray_utils.xarray_utils import get_coord_by_type\n'), ((8879, 9001), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['"""/badc/cmip6/data/CMIP6/ScenarioMIP/NCC/NorESM2-MM/ssp370/r1i1p1f1/Amon/ta/gn/v20191108/*.nc"""'], {}), "(\n '/badc/cmip6/data/CMIP6/ScenarioMIP/NCC/NorESM2-MM/ssp370/r1i1p1f1/Amon/ta/gn/v20191108/*.nc'\n )\n", (8896, 9001), True, 'import xarray as xr\n'), ((9016, 9075), 'roocs_utils.xarray_utils.xarray_utils.get_coord_by_type', 'get_coord_by_type', (['ds', '"""longitude"""'], {'ignore_aux_coords': '(False)'}), "(ds, 'longitude', ignore_aux_coords=False)\n", (9033, 9075), False, 'from roocs_utils.xarray_utils.xarray_utils import get_coord_by_type\n'), ((9352, 9445), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ds_roll_coords.plev.values', 'ds_not_roll_coords.plev.values'], {}), '(ds_roll_coords.plev.values,\n ds_not_roll_coords.plev.values)\n', (9381, 9445), True, 'import numpy as np\n'), ((4143, 4177), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4156, 4177), False, 'import pytest\n'), ((4187, 4301), 'clisops.ops.subset.subset', 'subset', ([], {'ds': 'ds_roll', 'area': '(-50.0, -90.0, 100.0, 90.0)', 'output_dir': 'tmpdir', 'output_type': '"""nc"""', 'file_namer': '"""simple"""'}), "(ds=ds_roll, area=(-50.0, -90.0, 100.0, 90.0), output_dir=tmpdir,\n output_type='nc', file_namer='simple')\n", (4193, 4301), False, 'from clisops.ops.subset import subset\n'), ((8572, 8597), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8585, 8597), False, 'import pytest\n'), ((8194, 8216), 'os.path.isdir', 'os.path.isdir', (['"""/badc"""'], {}), "('/badc')\n", (8207, 8216), False, 'import os\n'), ((8761, 8783), 'os.path.isdir', 'os.path.isdir', (['"""/badc"""'], {}), "('/badc')\n", (8774, 8783), False, 'import os\n')] |
import copy
import numpy as np
import pytest
import torch
from mmdet.core import GeneralData, InstanceData
def _equal(a, b):
if isinstance(a, (torch.Tensor, np.ndarray)):
return (a == b).all()
else:
return a == b
def test_general_data():
# test init
meta_info = dict(
img_size=[256, 256],
path='dadfaff',
scale_factor=np.array([1.5, 1.5]),
img_shape=torch.rand(4))
data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
instance_data = GeneralData(meta_info=meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# test nice_repr
repr_instance_data = instance_data.new(data=data)
nice_repr = str(repr_instance_data)
for line in nice_repr.split('\n'):
if 'masks' in line:
assert 'shape' in line
assert '(4, 2, 2)' in line
if 'bboxes' in line:
assert 'shape' in line
assert 'torch.Size([4, 4])' in line
if 'path' in line:
assert 'dadfaff' in line
if 'scale_factor' in line:
assert '[1.5 1.5]' in line
instance_data = GeneralData(
meta_info=meta_info, data=dict(bboxes=torch.rand(5)))
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 5
# data should be a dict
with pytest.raises(AssertionError):
GeneralData(data=1)
# test set data
instance_data = GeneralData()
instance_data.set_data(data)
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 4
assert 'masks' in instance_data
assert len(instance_data.masks) == 4
# data should be a dict
with pytest.raises(AssertionError):
instance_data.set_data(data=1)
# test set_meta
instance_data = GeneralData()
instance_data.set_meta_info(meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# can skip same value when overwrite
instance_data.set_meta_info(meta_info)
# meta should be a dict
with pytest.raises(AssertionError):
instance_data.set_meta_info(meta_info='fjhka')
# attribute in `_meta_info_field` is immutable once initialized
instance_data.set_meta_info(meta_info)
# meta should be immutable
with pytest.raises(KeyError):
instance_data.set_meta_info(dict(img_size=[254, 251]))
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['path'] = 'dada'
instance_data.set_meta_info(duplicate_meta_info)
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['scale_factor'] = np.array([1.5, 1.6])
instance_data.set_meta_info(duplicate_meta_info)
# test new_instance_data
instance_data = GeneralData(meta_info)
new_instance_data = instance_data.new()
for k, v in instance_data.meta_info_items():
assert k in new_instance_data
_equal(v, new_instance_data[k])
instance_data = GeneralData(meta_info, data=data)
temp_meta = copy.deepcopy(meta_info)
temp_data = copy.deepcopy(data)
temp_data['time'] = '12212'
temp_meta['img_norm'] = np.random.random(3)
new_instance_data = instance_data.new(meta_info=temp_meta, data=temp_data)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, temp_meta[k])
assert k == 'img_norm'
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'time'
assert _equal(v, temp_data[k])
# test keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'bboxes' in instance_data.keys()
instance_data.b = 10
assert 'b' in instance_data
# test meta keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'path' in instance_data.meta_info_keys()
assert len(instance_data.meta_info_keys()) == len(meta_info)
instance_data.set_meta_info(dict(workdir='fafaf'))
assert 'workdir' in instance_data
assert len(instance_data.meta_info_keys()) == len(meta_info) + 1
# test values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 10 in instance_data.values()
assert len(instance_data.values()) == 1
# test meta values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
# torch 1.3 eq() can not compare str and tensor
from mmdet import digit_version
if digit_version(torch.__version__) >= [1, 4]:
assert 'dadfaff' in instance_data.meta_info_values()
assert len(instance_data.meta_info_values()) == len(meta_info)
# test items
instance_data = GeneralData(data=data)
for k, v in instance_data.items():
assert k in data
assert _equal(v, data[k])
# test meta_info_items
instance_data = GeneralData(meta_info=meta_info)
for k, v in instance_data.meta_info_items():
assert k in meta_info
assert _equal(v, meta_info[k])
# test __setattr__
new_instance_data = GeneralData(data=data)
new_instance_data.mask = torch.rand(3, 4, 5)
new_instance_data.bboxes = torch.rand(2, 4)
assert 'mask' in new_instance_data
assert len(new_instance_data.mask) == 3
assert len(new_instance_data.bboxes) == 2
# test instance_data_field has been updated
assert 'mask' in new_instance_data._data_fields
assert 'bboxes' in new_instance_data._data_fields
for k in data:
assert k in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
new_instance_data._data_fields = None
with pytest.raises(AttributeError):
new_instance_data._meta_info_fields = None
with pytest.raises(AttributeError):
del new_instance_data._data_fields
with pytest.raises(AttributeError):
del new_instance_data._meta_info_fields
# key in _meta_info_field is immutable
new_instance_data.set_meta_info(meta_info)
with pytest.raises(KeyError):
del new_instance_data.img_size
with pytest.raises(KeyError):
del new_instance_data.scale_factor
for k in new_instance_data.meta_info_keys():
with pytest.raises(AttributeError):
new_instance_data[k] = None
# test __delattr__
# test key can be removed in instance_data_field
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data.mask
assert 'mask' not in new_instance_data.keys()
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert not hasattr(new_instance_data, 'mask')
# tset __delitem__
new_instance_data.mask = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data['mask']
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
assert not hasattr(new_instance_data, 'mask')
# test __setitem__
new_instance_data['mask'] = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert hasattr(new_instance_data, 'mask')
# test data_fields has been updated
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
del new_instance_data['_data_fields']
with pytest.raises(AttributeError):
del new_instance_data['_meta_info_field']
# test __getitem__
new_instance_data.mask is new_instance_data['mask']
# test get
assert new_instance_data.get('mask') is new_instance_data.mask
assert new_instance_data.get('none_attribute', None) is None
assert new_instance_data.get('none_attribute', 1) == 1
# test pop
mask = new_instance_data.mask
assert new_instance_data.pop('mask') is mask
assert new_instance_data.pop('mask', None) is None
assert new_instance_data.pop('mask', 1) == 1
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(KeyError):
new_instance_data.pop('_data_fields')
with pytest.raises(KeyError):
new_instance_data.pop('_meta_info_field')
# attribute in `_meta_info_field` is immutable
with pytest.raises(KeyError):
new_instance_data.pop('img_size')
# test pop attribute in instance_data_filed
new_instance_data['mask'] = torch.rand(1, 2, 3)
new_instance_data.pop('mask')
# test data_field has been updated
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
# test_keys
new_instance_data.mask = torch.ones(1, 2, 3)
'mask' in new_instance_data.keys()
has_flag = False
for key in new_instance_data.keys():
if key == 'mask':
has_flag = True
assert has_flag
# test values
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.values()))
mask = new_instance_data.mask
has_flag = False
for value in new_instance_data.values():
if value is mask:
has_flag = True
assert has_flag
# test items
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.items()))
mask = new_instance_data.mask
has_flag = False
for key, value in new_instance_data.items():
if value is mask:
assert key == 'mask'
has_flag = True
assert has_flag
# test device
new_instance_data = GeneralData()
if torch.cuda.is_available():
newnew_instance_data = new_instance_data.new()
devices = ('cpu', 'cuda')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cpu()
for value in newnew_instance_data.values():
assert not value.is_cuda
newnew_instance_data = new_instance_data.new()
devices = ('cuda', 'cpu')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cuda()
for value in newnew_instance_data.values():
assert value.is_cuda
# test to
double_instance_data = instance_data.new()
double_instance_data.long = torch.LongTensor(1, 2, 3, 4)
double_instance_data.bool = torch.BoolTensor(1, 2, 3, 4)
double_instance_data = instance_data.to(torch.double)
for k, v in double_instance_data.items():
if isinstance(v, torch.Tensor):
assert v.dtype is torch.double
# test .cpu() .cuda()
if torch.cuda.is_available():
cpu_instance_data = double_instance_data.new()
cpu_instance_data.mask = torch.rand(1)
cuda_tensor = torch.rand(1, 2, 3).cuda()
cuda_instance_data = cpu_instance_data.to(cuda_tensor.device)
for value in cuda_instance_data.values():
assert value.is_cuda
cpu_instance_data = cuda_instance_data.cpu()
for value in cpu_instance_data.values():
assert not value.is_cuda
cuda_instance_data = cpu_instance_data.cuda()
for value in cuda_instance_data.values():
assert value.is_cuda
# test detach
grad_instance_data = double_instance_data.new()
grad_instance_data.mask = torch.rand(2, requires_grad=True)
grad_instance_data.mask_1 = torch.rand(2, requires_grad=True)
detach_instance_data = grad_instance_data.detach()
for value in detach_instance_data.values():
assert not value.requires_grad
# test numpy
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2, requires_grad=True)
tensor_instance_data.mask_1 = torch.rand(2, requires_grad=True)
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
if torch.cuda.is_available():
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2)
tensor_instance_data.mask_1 = torch.rand(2)
tensor_instance_data = tensor_instance_data.cuda()
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
instance_data['_c'] = 10000
instance_data.get('dad', None) is None
assert hasattr(instance_data, '_c')
del instance_data['_c']
assert not hasattr(instance_data, '_c')
instance_data.a = 1000
instance_data['a'] = 2000
assert instance_data['a'] == 2000
assert instance_data.a == 2000
assert instance_data.get('a') == instance_data['a'] == instance_data.a
instance_data._meta = 1000
assert '_meta' in instance_data.keys()
if torch.cuda.is_available():
instance_data.bbox = torch.ones(2, 3, 4, 5).cuda()
instance_data.score = torch.ones(2, 3, 4, 4)
else:
instance_data.bbox = torch.ones(2, 3, 4, 5)
assert len(instance_data.new().keys()) == 0
with pytest.raises(AttributeError):
instance_data.img_size = 100
for k, v in instance_data.items():
if k == 'bbox':
assert isinstance(v, torch.Tensor)
assert 'a' in instance_data
instance_data.pop('a')
assert 'a' not in instance_data
cpu_instance_data = instance_data.cpu()
for k, v in cpu_instance_data.items():
if isinstance(v, torch.Tensor):
assert not v.is_cuda
assert isinstance(cpu_instance_data.numpy().bbox, np.ndarray)
if torch.cuda.is_available():
cuda_resutls = instance_data.cuda()
for k, v in cuda_resutls.items():
if isinstance(v, torch.Tensor):
assert v.is_cuda
def test_instance_data():
meta_info = dict(
img_size=(256, 256),
path='dadfaff',
scale_factor=np.array([1.5, 1.5, 1, 1]))
data = dict(
bboxes=torch.rand(4, 4),
masks=torch.rand(4, 2, 2),
labels=np.random.rand(4),
size=[(i, i) for i in range(4)])
# test init
instance_data = InstanceData(meta_info)
assert 'path' in instance_data
instance_data = InstanceData(meta_info, data=data)
assert len(instance_data) == 4
instance_data.set_data(data)
assert len(instance_data) == 4
meta_info = copy.deepcopy(meta_info)
meta_info['img_name'] = 'flag'
# test newinstance_data
new_instance_data = instance_data.new(meta_info=meta_info)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, meta_info[k])
assert k == 'img_name'
# meta info is immutable
with pytest.raises(KeyError):
meta_info = copy.deepcopy(meta_info)
meta_info['path'] = 'fdasfdsd'
instance_data.new(meta_info=meta_info)
# data fields should have same length
with pytest.raises(AssertionError):
temp_data = copy.deepcopy(data)
temp_data['bboxes'] = torch.rand(5, 4)
instance_data.new(data=temp_data)
temp_data = copy.deepcopy(data)
temp_data['scores'] = torch.rand(4)
new_instance_data = instance_data.new(data=temp_data)
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'scores'
assert _equal(v, temp_data[k])
instance_data = instance_data.new()
# test __setattr__
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
# all attribute in instance_data_field should be
# (torch.Tensor, np.ndarray, list))
with pytest.raises(AssertionError):
instance_data.a = 1000
# instance_data field should has same length
new_instance_data = instance_data.new()
new_instance_data.det_bbox = torch.rand(100, 4)
new_instance_data.det_label = torch.arange(100)
with pytest.raises(AssertionError):
new_instance_data.scores = torch.rand(101, 1)
new_instance_data.none = [None] * 100
with pytest.raises(AssertionError):
new_instance_data.scores = [None] * 101
new_instance_data.numpy_det = np.random.random([100, 1])
with pytest.raises(AssertionError):
new_instance_data.scores = np.random.random([101, 1])
# isinstance(str, slice, int, torch.LongTensor, torch.BoolTensor)
item = torch.Tensor([1, 2, 3, 4])
with pytest.raises(AssertionError):
new_instance_data[item]
len(new_instance_data[item.long()]) == 1
# when input is a bool tensor, The shape of
# the input at index 0 should equal to
# the value length in instance_data_field
with pytest.raises(AssertionError):
new_instance_data[item.bool()]
for i in range(len(new_instance_data)):
assert new_instance_data[i].det_label == i
assert len(new_instance_data[i]) == 1
# assert the index should in 0 ~ len(instance_data) -1
with pytest.raises(IndexError):
new_instance_data[101]
# assert the index should not be an empty tensor
new_new_instance_data = new_instance_data.new()
with pytest.raises(AssertionError):
new_new_instance_data[0]
# test str
with pytest.raises(AssertionError):
instance_data.img_size_dummmy = meta_info['img_size']
# test slice
ten_ressults = new_instance_data[:10]
len(ten_ressults) == 10
for v in ten_ressults.values():
assert len(v) == 10
# test Longtensor
long_tensor = torch.randint(100, (50, ))
long_index_instance_data = new_instance_data[long_tensor]
assert len(long_index_instance_data) == len(long_tensor)
for key, value in long_index_instance_data.items():
if not isinstance(value, list):
assert (long_index_instance_data[key] == new_instance_data[key]
[long_tensor]).all()
else:
len(long_tensor) == len(value)
# test bool tensor
bool_tensor = torch.rand(100) > 0.5
bool_index_instance_data = new_instance_data[bool_tensor]
assert len(bool_index_instance_data) == bool_tensor.sum()
for key, value in bool_index_instance_data.items():
if not isinstance(value, list):
assert (bool_index_instance_data[key] == new_instance_data[key]
[bool_tensor]).all()
else:
assert len(value) == bool_tensor.sum()
num_instance = 1000
instance_data_list = []
# assert len(instance_lists) > 0
with pytest.raises(AssertionError):
instance_data.cat(instance_data_list)
for _ in range(2):
instance_data['bbox'] = torch.rand(num_instance, 4)
instance_data['label'] = torch.rand(num_instance, 1)
instance_data['mask'] = torch.rand(num_instance, 224, 224)
instance_data['instances_infos'] = [1] * num_instance
instance_data['cpu_bbox'] = np.random.random((num_instance, 4))
if torch.cuda.is_available():
instance_data.cuda_tensor = torch.rand(num_instance).cuda()
assert instance_data.cuda_tensor.is_cuda
cuda_instance_data = instance_data.cuda()
assert cuda_instance_data.cuda_tensor.is_cuda
assert len(instance_data[0]) == 1
with pytest.raises(IndexError):
return instance_data[num_instance + 1]
with pytest.raises(AssertionError):
instance_data.centerness = torch.rand(num_instance + 1, 1)
mask_tensor = torch.rand(num_instance) > 0.5
length = mask_tensor.sum()
assert len(instance_data[mask_tensor]) == length
index_tensor = torch.LongTensor([1, 5, 8, 110, 399])
length = len(index_tensor)
assert len(instance_data[index_tensor]) == length
instance_data_list.append(instance_data)
cat_resutls = InstanceData.cat(instance_data_list)
assert len(cat_resutls) == num_instance * 2
instances = InstanceData(data=dict(bboxes=torch.rand(4, 4)))
# cat only single instance
assert len(InstanceData.cat([instances])) == 4
| [
"torch.ones",
"copy.deepcopy",
"torch.randint",
"mmdet.core.InstanceData.cat",
"torch.LongTensor",
"numpy.random.rand",
"mmdet.core.GeneralData",
"mmdet.digit_version",
"pytest.raises",
"numpy.random.random",
"torch.cuda.is_available",
"torch.arange",
"torch.Tensor",
"torch.rand",
"numpy... | [((577, 609), 'mmdet.core.GeneralData', 'GeneralData', ([], {'meta_info': 'meta_info'}), '(meta_info=meta_info)\n', (588, 609), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((1646, 1659), 'mmdet.core.GeneralData', 'GeneralData', ([], {}), '()\n', (1657, 1659), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((1997, 2010), 'mmdet.core.GeneralData', 'GeneralData', ([], {}), '()\n', (2008, 2010), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((3172, 3194), 'mmdet.core.GeneralData', 'GeneralData', (['meta_info'], {}), '(meta_info)\n', (3183, 3194), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((3387, 3420), 'mmdet.core.GeneralData', 'GeneralData', (['meta_info'], {'data': 'data'}), '(meta_info, data=data)\n', (3398, 3420), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((3437, 3461), 'copy.deepcopy', 'copy.deepcopy', (['meta_info'], {}), '(meta_info)\n', (3450, 3461), False, 'import copy\n'), ((3478, 3497), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3491, 3497), False, 'import copy\n'), ((3558, 3577), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (3574, 3577), True, 'import numpy as np\n'), ((5188, 5210), 'mmdet.core.GeneralData', 'GeneralData', ([], {'data': 'data'}), '(data=data)\n', (5199, 5210), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((5357, 5389), 'mmdet.core.GeneralData', 'GeneralData', ([], {'meta_info': 'meta_info'}), '(meta_info=meta_info)\n', (5368, 5389), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((5556, 5578), 'mmdet.core.GeneralData', 'GeneralData', ([], {'data': 'data'}), '(data=data)\n', (5567, 5578), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((5608, 5627), 'torch.rand', 'torch.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (5618, 5627), False, 'import torch\n'), ((5659, 5675), 'torch.rand', 'torch.rand', (['(2)', '(4)'], {}), '(2, 4)\n', (5669, 5675), False, 'import torch\n'), ((7352, 7371), 'torch.rand', 'torch.rand', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (7362, 7371), False, 'import torch\n'), ((7791, 7810), 'torch.rand', 'torch.rand', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (7801, 7810), False, 'import torch\n'), ((9244, 9263), 'torch.rand', 'torch.rand', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (9254, 9263), False, 'import torch\n'), ((9525, 9544), 'torch.ones', 'torch.ones', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (9535, 9544), False, 'import torch\n'), ((10378, 10391), 'mmdet.core.GeneralData', 'GeneralData', ([], {}), '()\n', (10389, 10391), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((10399, 10424), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10422, 10424), False, 'import torch\n'), ((11272, 11300), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (11288, 11300), False, 'import torch\n'), ((11333, 11361), 'torch.BoolTensor', 'torch.BoolTensor', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (11349, 11361), False, 'import torch\n'), ((11583, 11608), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11606, 11608), False, 'import torch\n'), ((12291, 12324), 'torch.rand', 'torch.rand', (['(2)'], {'requires_grad': '(True)'}), '(2, requires_grad=True)\n', (12301, 12324), False, 'import torch\n'), ((12357, 12390), 'torch.rand', 'torch.rand', (['(2)'], {'requires_grad': '(True)'}), '(2, requires_grad=True)\n', (12367, 12390), False, 'import torch\n'), ((12637, 12670), 'torch.rand', 'torch.rand', (['(2)'], {'requires_grad': '(True)'}), '(2, requires_grad=True)\n', (12647, 12670), False, 'import torch\n'), ((12705, 12738), 'torch.rand', 'torch.rand', (['(2)'], {'requires_grad': '(True)'}), '(2, requires_grad=True)\n', (12715, 12738), False, 'import torch\n'), ((12893, 12918), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12916, 12918), False, 'import torch\n'), ((13772, 13797), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13795, 13797), False, 'import torch\n'), ((14541, 14566), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14564, 14566), False, 'import torch\n'), ((15081, 15104), 'mmdet.core.InstanceData', 'InstanceData', (['meta_info'], {}), '(meta_info)\n', (15093, 15104), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((15160, 15194), 'mmdet.core.InstanceData', 'InstanceData', (['meta_info'], {'data': 'data'}), '(meta_info, data=data)\n', (15172, 15194), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((15315, 15339), 'copy.deepcopy', 'copy.deepcopy', (['meta_info'], {}), '(meta_info)\n', (15328, 15339), False, 'import copy\n'), ((16106, 16125), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (16119, 16125), False, 'import copy\n'), ((16152, 16165), 'torch.rand', 'torch.rand', (['(4)'], {}), '(4)\n', (16162, 16165), False, 'import torch\n'), ((17008, 17026), 'torch.rand', 'torch.rand', (['(100)', '(4)'], {}), '(100, 4)\n', (17018, 17026), False, 'import torch\n'), ((17061, 17078), 'torch.arange', 'torch.arange', (['(100)'], {}), '(100)\n', (17073, 17078), False, 'import torch\n'), ((17337, 17363), 'numpy.random.random', 'np.random.random', (['[100, 1]'], {}), '([100, 1])\n', (17353, 17363), True, 'import numpy as np\n'), ((17548, 17574), 'torch.Tensor', 'torch.Tensor', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (17560, 17574), False, 'import torch\n'), ((18668, 18693), 'torch.randint', 'torch.randint', (['(100)', '(50,)'], {}), '(100, (50,))\n', (18681, 18693), False, 'import torch\n'), ((20972, 21008), 'mmdet.core.InstanceData.cat', 'InstanceData.cat', (['instance_data_list'], {}), '(instance_data_list)\n', (20988, 21008), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((1546, 1575), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1559, 1575), False, 'import pytest\n'), ((1585, 1604), 'mmdet.core.GeneralData', 'GeneralData', ([], {'data': '(1)'}), '(data=1)\n', (1596, 1604), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((1886, 1915), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1899, 1915), False, 'import pytest\n'), ((2392, 2421), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2405, 2421), False, 'import pytest\n'), ((2630, 2653), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2643, 2653), False, 'import pytest\n'), ((2727, 2750), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2740, 2750), False, 'import pytest\n'), ((2782, 2806), 'copy.deepcopy', 'copy.deepcopy', (['meta_info'], {}), '(meta_info)\n', (2795, 2806), False, 'import copy\n'), ((2918, 2941), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2931, 2941), False, 'import pytest\n'), ((2973, 2997), 'copy.deepcopy', 'copy.deepcopy', (['meta_info'], {}), '(meta_info)\n', (2986, 2997), False, 'import copy\n'), ((3044, 3064), 'numpy.array', 'np.array', (['[1.5, 1.6]'], {}), '([1.5, 1.6])\n', (3052, 3064), True, 'import numpy as np\n'), ((4978, 5010), 'mmdet.digit_version', 'digit_version', (['torch.__version__'], {}), '(torch.__version__)\n', (4991, 5010), False, 'from mmdet import digit_version\n'), ((6096, 6125), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (6109, 6125), False, 'import pytest\n'), ((6182, 6211), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (6195, 6211), False, 'import pytest\n'), ((6273, 6302), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (6286, 6302), False, 'import pytest\n'), ((6356, 6385), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (6369, 6385), False, 'import pytest\n'), ((6535, 6558), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6548, 6558), False, 'import pytest\n'), ((6608, 6631), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6621, 6631), False, 'import pytest\n'), ((8159, 8188), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (8172, 8188), False, 'import pytest\n'), ((8245, 8274), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (8258, 8274), False, 'import pytest\n'), ((8882, 8905), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (8895, 8905), False, 'import pytest\n'), ((8962, 8985), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (8975, 8985), False, 'import pytest\n'), ((9097, 9120), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (9110, 9120), False, 'import pytest\n'), ((11698, 11711), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (11708, 11711), False, 'import torch\n'), ((13014, 13027), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (13024, 13027), False, 'import torch\n'), ((13066, 13079), 'torch.rand', 'torch.rand', (['(2)'], {}), '(2)\n', (13076, 13079), False, 'import torch\n'), ((13888, 13910), 'torch.ones', 'torch.ones', (['(2)', '(3)', '(4)', '(4)'], {}), '(2, 3, 4, 4)\n', (13898, 13910), False, 'import torch\n'), ((13950, 13972), 'torch.ones', 'torch.ones', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (13960, 13972), False, 'import torch\n'), ((14031, 14060), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (14044, 14060), False, 'import pytest\n'), ((15721, 15744), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (15734, 15744), False, 'import pytest\n'), ((15766, 15790), 'copy.deepcopy', 'copy.deepcopy', (['meta_info'], {}), '(meta_info)\n', (15779, 15790), False, 'import copy\n'), ((15929, 15958), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (15942, 15958), False, 'import pytest\n'), ((15980, 15999), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (15993, 15999), False, 'import copy\n'), ((16030, 16046), 'torch.rand', 'torch.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (16040, 16046), False, 'import torch\n'), ((16557, 16586), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (16570, 16586), False, 'import pytest\n'), ((16641, 16670), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (16654, 16670), False, 'import pytest\n'), ((16819, 16848), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (16832, 16848), False, 'import pytest\n'), ((17088, 17117), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (17101, 17117), False, 'import pytest\n'), ((17154, 17172), 'torch.rand', 'torch.rand', (['(101)', '(1)'], {}), '(101, 1)\n', (17164, 17172), False, 'import torch\n'), ((17224, 17253), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (17237, 17253), False, 'import pytest\n'), ((17373, 17402), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (17386, 17402), False, 'import pytest\n'), ((17439, 17465), 'numpy.random.random', 'np.random.random', (['[101, 1]'], {}), '([101, 1])\n', (17455, 17465), True, 'import numpy as np\n'), ((17584, 17613), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (17597, 17613), False, 'import pytest\n'), ((17839, 17868), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (17852, 17868), False, 'import pytest\n'), ((18120, 18145), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (18133, 18145), False, 'import pytest\n'), ((18293, 18322), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (18306, 18322), False, 'import pytest\n'), ((18382, 18411), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (18395, 18411), False, 'import pytest\n'), ((19130, 19145), 'torch.rand', 'torch.rand', (['(100)'], {}), '(100)\n', (19140, 19145), False, 'import torch\n'), ((19654, 19683), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (19667, 19683), False, 'import pytest\n'), ((19787, 19814), 'torch.rand', 'torch.rand', (['num_instance', '(4)'], {}), '(num_instance, 4)\n', (19797, 19814), False, 'import torch\n'), ((19848, 19875), 'torch.rand', 'torch.rand', (['num_instance', '(1)'], {}), '(num_instance, 1)\n', (19858, 19875), False, 'import torch\n'), ((19908, 19942), 'torch.rand', 'torch.rand', (['num_instance', '(224)', '(224)'], {}), '(num_instance, 224, 224)\n', (19918, 19942), False, 'import torch\n'), ((20041, 20076), 'numpy.random.random', 'np.random.random', (['(num_instance, 4)'], {}), '((num_instance, 4))\n', (20057, 20076), True, 'import numpy as np\n'), ((20088, 20113), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (20111, 20113), False, 'import torch\n'), ((20771, 20808), 'torch.LongTensor', 'torch.LongTensor', (['[1, 5, 8, 110, 399]'], {}), '([1, 5, 8, 110, 399])\n', (20787, 20808), False, 'import torch\n'), ((381, 401), 'numpy.array', 'np.array', (['[1.5, 1.5]'], {}), '([1.5, 1.5])\n', (389, 401), True, 'import numpy as np\n'), ((421, 434), 'torch.rand', 'torch.rand', (['(4)'], {}), '(4)\n', (431, 434), False, 'import torch\n'), ((469, 485), 'torch.rand', 'torch.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (479, 485), False, 'import torch\n'), ((502, 515), 'torch.rand', 'torch.rand', (['(4)'], {}), '(4)\n', (512, 515), False, 'import torch\n'), ((531, 554), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)', '(2)'], {}), '(4, 2, 2)\n', (545, 554), True, 'import numpy as np\n'), ((6738, 6767), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (6751, 6767), False, 'import pytest\n'), ((10622, 10656), 'torch.rand', 'torch.rand', (['(1)', '(2)', '(3)'], {'device': 'device'}), '(1, 2, 3, device=device)\n', (10632, 10656), False, 'import torch\n'), ((11000, 11034), 'torch.rand', 'torch.rand', (['(1)', '(2)', '(3)'], {'device': 'device'}), '(1, 2, 3, device=device)\n', (11010, 11034), False, 'import torch\n'), ((14855, 14881), 'numpy.array', 'np.array', (['[1.5, 1.5, 1, 1]'], {}), '([1.5, 1.5, 1, 1])\n', (14863, 14881), True, 'import numpy as np\n'), ((14916, 14932), 'torch.rand', 'torch.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (14926, 14932), False, 'import torch\n'), ((14948, 14967), 'torch.rand', 'torch.rand', (['(4)', '(2)', '(2)'], {}), '(4, 2, 2)\n', (14958, 14967), False, 'import torch\n'), ((14984, 15001), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (14998, 15001), True, 'import numpy as np\n'), ((20408, 20433), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (20421, 20433), False, 'import pytest\n'), ((20499, 20528), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (20512, 20528), False, 'import pytest\n'), ((20569, 20600), 'torch.rand', 'torch.rand', (['(num_instance + 1)', '(1)'], {}), '(num_instance + 1, 1)\n', (20579, 20600), False, 'import torch\n'), ((20624, 20648), 'torch.rand', 'torch.rand', (['num_instance'], {}), '(num_instance)\n', (20634, 20648), False, 'import torch\n'), ((21169, 21198), 'mmdet.core.InstanceData.cat', 'InstanceData.cat', (['[instances]'], {}), '([instances])\n', (21185, 21198), False, 'from mmdet.core import GeneralData, InstanceData\n'), ((11734, 11753), 'torch.rand', 'torch.rand', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (11744, 11753), False, 'import torch\n'), ((13828, 13850), 'torch.ones', 'torch.ones', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (13838, 13850), False, 'import torch\n'), ((1413, 1426), 'torch.rand', 'torch.rand', (['(5)'], {}), '(5)\n', (1423, 1426), False, 'import torch\n'), ((20155, 20179), 'torch.rand', 'torch.rand', (['num_instance'], {}), '(num_instance)\n', (20165, 20179), False, 'import torch\n'), ((21104, 21120), 'torch.rand', 'torch.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (21114, 21120), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.