code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import geopandas as gpd
from energy_demand.read_write import data_loader, read_data
from energy_demand.basic import date_prop
from energy_demand.basic import basic_functions
from energy_demand.basic import lookup_tables
from energy_demand.technologies import tech_related
from energy_demand.plotting import basic_plot_functions
from energy_demand.plotting import result_mapping
from energy_demand.plotting import fig_p2_weather_val
def total_demand_national_scenarios(
scenario_result_paths,
sim_yrs,
fueltype_str,
path_out_plots
):
dict_scenarios_weather_yrs = {}
columns = [
'weather_yr',
'national_peak_demand']
fueltype_int = tech_related.get_fueltype_int(fueltype_str)
# ----------------
# Read all data inform of scenario, simulationyr, weather_yrs
# ----------------
for scenario_path in scenario_result_paths:
scenario_name = os.path.split(scenario_path)[-1]
dict_scenarios_weather_yrs[scenario_name] = {}
weather_yrs = []
# Get all folders with weather_yr run results (name of folder is scenario)
weather_yr_scenarios_paths = os.listdir(scenario_path)
for simulation_run in weather_yr_scenarios_paths:
if simulation_run != '_results_PDF_figs':
weather_yr_scenarios_paths = os.listdir(os.path.join(scenario_path, simulation_run))
for weather_yr_scenario_path in weather_yr_scenarios_paths:
try:
split_path_name = weather_yr_scenario_path.split("__")
weather_yr = int(split_path_name[0])
path_to_weather_yr = os.path.join(scenario_path, simulation_run, "{}__{}".format(weather_yr, 'all_stations'))
weather_yrs.append((weather_yr, path_to_weather_yr))
except:
pass
for simulation_yr in sim_yrs:
dict_scenarios_weather_yrs[scenario_name][simulation_yr] = pd.DataFrame(columns=columns)
for weather_yr, path_to_weather_yr in weather_yrs:
seasons = date_prop.get_season(year_to_model=2015)
model_yeardays_daytype, _, _ = date_prop.get_yeardays_daytype(year_to_model=2015)
results_container = read_data.read_in_results(
os.path.join(path_to_weather_yr, 'model_run_results_txt'),
seasons,
model_yeardays_daytype)
# ---------------------------------------------------
# Calculate hour with national peak demand
# This may be different depending on the weather yr
# ---------------------------------------------------
ele_regions_8760 = results_container['ed_fueltype_regs_yh'][simulation_yr][fueltype_int]
sum_all_regs_fueltype_8760 = np.sum(ele_regions_8760, axis=0) # Sum for every hour
max_day = int(basic_functions.round_down((np.argmax(sum_all_regs_fueltype_8760) / 24), 1))
max_h = np.argmax(sum_all_regs_fueltype_8760)
max_demand = np.max(sum_all_regs_fueltype_8760)
# Calculate the national peak demand in GW
national_peak_GW = np.max(sum_all_regs_fueltype_8760)
# -----------------------
# Add to final container
# -----------------------
line_entry = [[
weather_yr,
national_peak_GW
]]
line_df = pd.DataFrame(line_entry, columns=columns)
existing_df = dict_scenarios_weather_yrs[scenario_name][simulation_yr]
appended_df = existing_df.append(line_df)
dict_scenarios_weather_yrs[scenario_name][simulation_yr] = appended_df
# ------------------------------------------------------------------------------------------
# Create plot
# ------------------------------------------------------------------------------------------
print("....create plot")
weather_yr_to_plot = 1979 #TODO
color_list = ['red', 'green', 'orange', '#37AB65', '#C0E4FF', '#3DF735', '#AD6D70', '#EC2504', '#8C0B90', '#27B502', '#7C60A8', '#CF95D7', '#F6CC1D']
# Calculate quantiles
quantile_95 = 0.95
quantile_05 = 0.05
# Create dataframe with rows as scenario and lines as simulation yrs
scenarios = list(dict_scenarios_weather_yrs.keys())
# Containers
df_total_demand_2015 = pd.DataFrame(columns=scenarios)
df_q_95_scenarios = pd.DataFrame(columns=scenarios)
df_q_05_scenarios = pd.DataFrame(columns=scenarios)
for simulation_yr in sim_yrs:
line_entries_95 = []
line_entries_05 = []
line_entries_tot_h = []
for scenario_name in scenarios:
print("-- {} {}".format(scenario_name, simulation_yr))
# Calculate entires over year
df_weather_yrs = dict_scenarios_weather_yrs[scenario_name][simulation_yr]
df_q_95 = df_weather_yrs['national_peak_demand'].quantile(quantile_95)
df_q_05 = df_weather_yrs['national_peak_demand'].quantile(quantile_05)
peak_weather_yr_2015 = df_weather_yrs[df_weather_yrs['weather_yr']==weather_yr_to_plot]['national_peak_demand'].values[0]
line_entries_95.append(df_q_95)
line_entries_05.append(df_q_05)
line_entries_tot_h.append(peak_weather_yr_2015)
# Try to smooth lines
try:
sim_yrs_smoothed, line_entries_tot_h_smoothed = basic_plot_functions.smooth_data(sim_yrs, line_entries_tot_h, num=40000)
except:
sim_yrs_smoothed = sim_yrs
line_entries_tot_h_smoothed = line_entries_tot_h
df_q_95_scenarios = df_q_95_scenarios.append(pd.DataFrame([line_entries_95], columns=scenarios))
df_q_05_scenarios = df_q_05_scenarios.append(pd.DataFrame([line_entries_05], columns=scenarios))
df_total_demand_2015 = df_total_demand_2015.append(pd.DataFrame([line_entries_tot_h_smoothed], columns=scenarios))
# ----
# Set simulation year as index
# ----
df_total_demand_2015 = df_total_demand_2015.set_index([sim_yrs_smoothed])
df_q_95_scenarios = df_q_95_scenarios.set_index([sim_yrs])
df_q_05_scenarios = df_q_05_scenarios.set_index([sim_yrs])
# plot lines
for cnt, scenario in enumerate(scenarios):
# Print total demand for specific year
df_total_demand_2015[scenario].plot.line(color=color_list[cnt], style='-', label=": {}".format(scenario))
# print quantiles
#df_q_95_scenarios[scenario].plot.line(color=color_list[cnt], linestyle='--', linewidth=0.5, label="0.05")
#df_q_05_scenarios[scenario].plot.line(color=color_list[cnt], linestyle='--', linewidth=0.5, label="0.05")
# -----------------
# Uncertainty range
# -----------------
plt.fill_between(
sim_yrs, #x
df_q_95_scenarios[scenario], #y1
df_q_05_scenarios[scenario], #y2
alpha=0.15,
facecolor=color_list[cnt])#,
#label=": {}".format(scenario))
# ------------
# Legend
# ------------
plt.legend(
ncol=1,
bbox_to_anchor=(0.5, 0., 0.5, 0.5), #bbox_to_anchor=(0.2, -0.1),
prop={'size': 8},
frameon=False)
plt.ylabel("GW")
plt.xlabel("year")
# Tight layout
plt.tight_layout()
plt.margins(x=0)
plt.show()
prnt(".")
# Calculate std per simulation year
'''
std_total_demand = list(np.std(weather_yrs_total_demand, axis=0)) # across columns calculate std
std_peak_demand = list(np.std(weather_yrs_peak_demand, axis=0)) # across columns calculate std
# convert to list
df_total_demand_q_95 = df_total_demand_q_95.tolist()
df_total_demand_q_05 = df_total_demand_q_05.tolist()
df_peak_q_95 = df_peak_q_95.tolist()
# -------------------
# Base year data (2015)
# -------------------
# total demand
tot_demand_twh_2015 = []
for sim_yr, data_sim_yr in data_input[2015].items():
gwh_2015_y = np.sum(data_sim_yr[fueltype_int])
twh_2015_y = conversions.gwh_to_twh(gwh_2015_y)
tot_demand_twh_2015.append(twh_2015_y)
# peak
df_peak_2015 = []
for sim_yr, data_sim_yr in data_input[2015].items():
peak_gwh_2015_y = np.max(data_sim_yr[fueltype_int])
df_peak_2015.append(peak_gwh_2015_y)
# ---------------
# Smoothing lines
# ---------------
if len(nr_weather_yrs) > 2:
try:
sim_yrs_smoothed, tot_demand_twh_2015_smoothed = basic_plot_functions.smooth_data(columns, tot_demand_twh_2015, num=40000)
period_h_smoothed, df_total_demand_q_95_smoothed = basic_plot_functions.smooth_data(list(columns), df_total_demand_q_95, num=40000)
period_h_smoothed, df_total_demand_q_05_smoothed = basic_plot_functions.smooth_data(columns, df_total_demand_q_05, num=40000)
period_h_smoothed, df_peak_q_95_smoothed = basic_plot_functions.smooth_data(list(columns), df_peak_q_95, num=40000)
period_h_smoothed, df_peak_q_05_smoothed = basic_plot_functions.smooth_data(columns, df_peak_q_05, num=40000)
period_h_smoothed, df_peak_2015_smoothed = basic_plot_functions.smooth_data(columns, df_peak_2015, num=40000)
except:
period_h_smoothed = columns
df_total_demand_q_95_smoothed = df_total_demand_q_95
df_total_demand_q_05_smoothed = df_total_demand_q_05
df_peak_q_95_smoothed = df_peak_q_95
df_peak_q_05_smoothed = df_peak_q_05
tot_demand_twh_2015_smoothed = tot_demand_twh_2015
df_peak_2015_smoothed = df_peak_2015
else:
try:
period_h_smoothed, tot_demand_twh_2015_smoothed = basic_plot_functions.smooth_data(columns, tot_demand_twh_2015, num=40000)
period_h_smoothed, df_peak_2015_smoothed = basic_plot_functions.smooth_data(columns, df_peak_2015, num=40000)
except:
period_h_smoothed = columns
tot_demand_twh_2015_smoothed = tot_demand_twh_2015
df_peak_2015_smoothed = df_peak_2015
# --------------
# Two axis figure
# --------------
fig, ax1 = plt.subplots(
figsize=basic_plot_functions.cm2inch(15, 10))
ax2 = ax1.twinx()
# Axis label
ax1.set_xlabel('Years')
ax2.set_ylabel('Peak hour {} demand (GW)'.format(fueltype_str), color='black')
ax1.set_ylabel('Total {} demand (TWh)'.format(fueltype_str), color='black')
# Make the y-axis label, ticks and tick labels match the line color.¨
color_axis1 = 'lightgrey'
color_axis2 = 'blue'
ax1.tick_params('y', colors='black')
ax2.tick_params('y', colors='black')
if len(nr_weather_yrs) > 2:
'''
pass
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"numpy.sum",
"os.path.join",
"numpy.argmax",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.legend",
"energy_demand.basic.date_prop.get_season",
"energy_demand.plotting.basic_plot_functions.smooth_data",
"energy_demand.technologies.tech_related.get_fu... | [((779, 822), 'energy_demand.technologies.tech_related.get_fueltype_int', 'tech_related.get_fueltype_int', (['fueltype_str'], {}), '(fueltype_str)\n', (808, 822), False, 'from energy_demand.technologies import tech_related\n'), ((4674, 4705), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'scenarios'}), '(columns=scenarios)\n', (4686, 4705), True, 'import pandas as pd\n'), ((4730, 4761), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'scenarios'}), '(columns=scenarios)\n', (4742, 4761), True, 'import pandas as pd\n'), ((4786, 4817), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'scenarios'}), '(columns=scenarios)\n', (4798, 4817), True, 'import pandas as pd\n'), ((7420, 7512), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(1)', 'bbox_to_anchor': '(0.5, 0.0, 0.5, 0.5)', 'prop': "{'size': 8}", 'frameon': '(False)'}), "(ncol=1, bbox_to_anchor=(0.5, 0.0, 0.5, 0.5), prop={'size': 8},\n frameon=False)\n", (7430, 7512), True, 'import matplotlib.pyplot as plt\n'), ((7575, 7591), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GW"""'], {}), "('GW')\n", (7585, 7591), True, 'import matplotlib.pyplot as plt\n'), ((7596, 7614), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""year"""'], {}), "('year')\n", (7606, 7614), True, 'import matplotlib.pyplot as plt\n'), ((7639, 7657), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7655, 7657), True, 'import matplotlib.pyplot as plt\n'), ((7662, 7678), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)'}), '(x=0)\n', (7673, 7678), True, 'import matplotlib.pyplot as plt\n'), ((7684, 7694), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7692, 7694), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1267), 'os.listdir', 'os.listdir', (['scenario_path'], {}), '(scenario_path)\n', (1252, 1267), False, 'import os\n'), ((7121, 7248), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['sim_yrs', 'df_q_95_scenarios[scenario]', 'df_q_05_scenarios[scenario]'], {'alpha': '(0.15)', 'facecolor': 'color_list[cnt]'}), '(sim_yrs, df_q_95_scenarios[scenario], df_q_05_scenarios[\n scenario], alpha=0.15, facecolor=color_list[cnt])\n', (7137, 7248), True, 'import matplotlib.pyplot as plt\n'), ((1008, 1036), 'os.path.split', 'os.path.split', (['scenario_path'], {}), '(scenario_path)\n', (1021, 1036), False, 'import os\n'), ((2111, 2140), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (2123, 2140), True, 'import pandas as pd\n'), ((5748, 5820), 'energy_demand.plotting.basic_plot_functions.smooth_data', 'basic_plot_functions.smooth_data', (['sim_yrs', 'line_entries_tot_h'], {'num': '(40000)'}), '(sim_yrs, line_entries_tot_h, num=40000)\n', (5780, 5820), False, 'from energy_demand.plotting import basic_plot_functions\n'), ((5992, 6042), 'pandas.DataFrame', 'pd.DataFrame', (['[line_entries_95]'], {'columns': 'scenarios'}), '([line_entries_95], columns=scenarios)\n', (6004, 6042), True, 'import pandas as pd\n'), ((6097, 6147), 'pandas.DataFrame', 'pd.DataFrame', (['[line_entries_05]'], {'columns': 'scenarios'}), '([line_entries_05], columns=scenarios)\n', (6109, 6147), True, 'import pandas as pd\n'), ((6208, 6270), 'pandas.DataFrame', 'pd.DataFrame', (['[line_entries_tot_h_smoothed]'], {'columns': 'scenarios'}), '([line_entries_tot_h_smoothed], columns=scenarios)\n', (6220, 6270), True, 'import pandas as pd\n'), ((2243, 2283), 'energy_demand.basic.date_prop.get_season', 'date_prop.get_season', ([], {'year_to_model': '(2015)'}), '(year_to_model=2015)\n', (2263, 2283), False, 'from energy_demand.basic import date_prop\n'), ((2331, 2381), 'energy_demand.basic.date_prop.get_yeardays_daytype', 'date_prop.get_yeardays_daytype', ([], {'year_to_model': '(2015)'}), '(year_to_model=2015)\n', (2361, 2381), False, 'from energy_demand.basic import date_prop\n'), ((3016, 3048), 'numpy.sum', 'np.sum', (['ele_regions_8760'], {'axis': '(0)'}), '(ele_regions_8760, axis=0)\n', (3022, 3048), True, 'import numpy as np\n'), ((3202, 3239), 'numpy.argmax', 'np.argmax', (['sum_all_regs_fueltype_8760'], {}), '(sum_all_regs_fueltype_8760)\n', (3211, 3239), True, 'import numpy as np\n'), ((3269, 3303), 'numpy.max', 'np.max', (['sum_all_regs_fueltype_8760'], {}), '(sum_all_regs_fueltype_8760)\n', (3275, 3303), True, 'import numpy as np\n'), ((3399, 3433), 'numpy.max', 'np.max', (['sum_all_regs_fueltype_8760'], {}), '(sum_all_regs_fueltype_8760)\n', (3405, 3433), True, 'import numpy as np\n'), ((3711, 3752), 'pandas.DataFrame', 'pd.DataFrame', (['line_entry'], {'columns': 'columns'}), '(line_entry, columns=columns)\n', (3723, 3752), True, 'import pandas as pd\n'), ((1441, 1484), 'os.path.join', 'os.path.join', (['scenario_path', 'simulation_run'], {}), '(scenario_path, simulation_run)\n', (1453, 1484), False, 'import os\n'), ((2466, 2523), 'os.path.join', 'os.path.join', (['path_to_weather_yr', '"""model_run_results_txt"""'], {}), "(path_to_weather_yr, 'model_run_results_txt')\n", (2478, 2523), False, 'import os\n'), ((3129, 3166), 'numpy.argmax', 'np.argmax', (['sum_all_regs_fueltype_8760'], {}), '(sum_all_regs_fueltype_8760)\n', (3138, 3166), True, 'import numpy as np\n')] |
from __future__ import annotations
import h5py
from typing import List, Tuple, Dict
import pandas as pd
import numpy as np
from attrbench.metrics.result import AbstractMetricResult
from attrbench.lib import NDArrayTree
class MaskerActivationMetricResult(AbstractMetricResult):
inverted: bool
def __init__(self, method_names: List[str], maskers: List[str], activation_fns: List[str]):
super().__init__(method_names)
self.maskers = maskers
self.activation_fns = activation_fns
self._tree = NDArrayTree([
("masker", maskers),
("activation_fn", activation_fns),
("method", method_names)
])
def append(self, data: Dict, **kwargs):
self.tree.append(data, **kwargs)
def add_to_hdf(self, group: h5py.Group):
self.tree.add_to_hdf(group)
@classmethod
def load_from_hdf(cls, group: h5py.Group) -> MaskerActivationMetricResult:
maskers = list(group.keys())
activation_fns = list(group[maskers[0]].keys())
method_names = list(group[maskers[0]][activation_fns[0]].keys())
result = cls(method_names, maskers, activation_fns)
result._tree = NDArrayTree.load_from_hdf(["masker", "activation_fn", "method"], group)
return result
def _postproc_fn(self, x):
return np.squeeze(x, axis=-1)
def get_df(self, mode="raw", include_baseline=False, masker: str = "constant", activation_fn: str = "linear",
postproc_fn=None) -> Tuple[pd.DataFrame, bool]:
if postproc_fn is None:
postproc_fn = self._postproc_fn
raw_results = pd.DataFrame.from_dict(
self.tree.get(
postproc_fn=postproc_fn,
exclude=dict(method=["_BASELINE"]),
select=dict(masker=[masker], activation_fn=[activation_fn])
)[masker][activation_fn]
)
baseline_results = pd.DataFrame(self.tree.get(
postproc_fn=postproc_fn,
select=dict(method=["_BASELINE"], masker=[masker], activation_fn=[activation_fn])
)[masker][activation_fn]["_BASELINE"])
return self._get_df(raw_results, baseline_results, mode, include_baseline)
| [
"numpy.squeeze",
"attrbench.lib.NDArrayTree.load_from_hdf",
"attrbench.lib.NDArrayTree"
] | [((531, 631), 'attrbench.lib.NDArrayTree', 'NDArrayTree', (["[('masker', maskers), ('activation_fn', activation_fns), ('method',\n method_names)]"], {}), "([('masker', maskers), ('activation_fn', activation_fns), (\n 'method', method_names)])\n", (542, 631), False, 'from attrbench.lib import NDArrayTree\n'), ((1187, 1258), 'attrbench.lib.NDArrayTree.load_from_hdf', 'NDArrayTree.load_from_hdf', (["['masker', 'activation_fn', 'method']", 'group'], {}), "(['masker', 'activation_fn', 'method'], group)\n", (1212, 1258), False, 'from attrbench.lib import NDArrayTree\n'), ((1328, 1350), 'numpy.squeeze', 'np.squeeze', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (1338, 1350), True, 'import numpy as np\n')] |
import neural_network_lyapunov.barrier as mut
import neural_network_lyapunov.control_affine_system as control_affine_system
import neural_network_lyapunov.utils as utils
import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip
import torch
import numpy as np
import unittest
import gurobipy
class TestInfNormTerm(unittest.TestCase):
def test_from_bounding_box(self):
dtype = torch.float64
x_lo = torch.tensor([-3, -2, 1], dtype=dtype)
x_up = torch.tensor([-1, 2, 3], dtype=dtype)
scale = 0.5
dut = mut.InfNormTerm.from_bounding_box(x_lo, x_up, scale)
x_samples = utils.uniform_sample_on_box_boundary(x_lo, x_up, 100)
np.testing.assert_allclose(
torch.norm(dut.R @ x_samples.T - dut.p.unsqueeze(1),
p=float("inf"),
dim=0).detach().numpy(),
np.ones((x_samples.shape[0], )) * scale)
class TestBarrier(unittest.TestCase):
def setUp(self):
self.dtype = torch.float64
self.linear_system = control_affine_system.LinearSystem(
torch.tensor([[1, 3], [2, -4]], dtype=self.dtype),
torch.tensor([[1, 2, 3], [0, 1, -1]], dtype=self.dtype),
x_lo=torch.tensor([-2, -3], dtype=self.dtype),
x_up=torch.tensor([3, 1], dtype=self.dtype),
u_lo=torch.tensor([-1, -3, 2], dtype=self.dtype),
u_up=torch.tensor([2, -1, 4], dtype=self.dtype))
self.barrier_relu1 = utils.setup_relu((2, 4, 3, 1),
params=None,
negative_slope=0.01,
bias=True,
dtype=self.dtype)
self.barrier_relu1[0].weight.data = torch.tensor(
[[1, -1], [0, 2], [1, 3], [-1, -2]], dtype=self.dtype)
self.barrier_relu1[0].bias.data = torch.tensor([0, 1, -1, 2],
dtype=self.dtype)
self.barrier_relu1[2].weight.data = torch.tensor(
[[1, 0, -1, 2], [0, 2, -1, 1], [1, 0, 1, -2]], dtype=self.dtype)
self.barrier_relu1[2].bias.data = torch.tensor([0, 2, 3],
dtype=self.dtype)
self.barrier_relu1[4].weight.data = torch.tensor([[1, -3, 2]],
dtype=self.dtype)
self.barrier_relu1[4].bias.data = torch.tensor([-1], dtype=self.dtype)
def test_barrier_value(self):
dut = mut.Barrier(self.linear_system, self.barrier_relu1)
x = torch.tensor([2, 3], dtype=self.dtype)
# Test a single state.
x_star = torch.tensor([-0.2, 1], dtype=self.dtype)
c = 0.5
self.assertEqual(
dut.barrier_value(x, x_star, c).item(),
(dut.barrier_relu(x) - dut.barrier_relu(x_star) + c).item())
# Test a batch of states.
x = torch.tensor([[-1, 1], [2, -1], [0, 0.5]], dtype=self.dtype)
val = dut.barrier_value(x, x_star, c)
self.assertEqual(val.shape, (x.shape[0], 1))
for i in range(x.shape[0]):
self.assertEqual(val[i].item(),
(dut.barrier_relu(x[i]) -
dut.barrier_relu(x_star) + c).item())
# Test with inf_norm_term
inf_norm_term = mut.InfNormTerm(R=torch.tensor(
[[1, 3], [-1, 2], [0, 1]], dtype=self.dtype),
p=torch.tensor([1, 2, -3],
dtype=self.dtype))
val = dut.barrier_value(x, x_star, c, inf_norm_term)
self.assertEqual(val.shape, (x.shape[0], 1))
for i in range(x.shape[0]):
self.assertEqual(
val[i].item(),
(dut.barrier_relu(x[i]) - dut.barrier_relu(x_star) + c -
torch.max(torch.abs(inf_norm_term.R @ x[i] -
inf_norm_term.p))).item())
self.assertEqual(
val[i].item(),
dut.barrier_value(x[i], x_star, c, inf_norm_term).item())
def barrier_value_as_milp_tester(self, dut, x_star, c, region_cnstr,
inf_norm_term):
ret = dut.barrier_value_as_milp(x_star,
c,
region_cnstr,
inf_norm_term=inf_norm_term)
milp = ret.milp
x = ret.x
milp.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
milp.gurobi_model.optimize()
assert (milp.gurobi_model.status == gurobipy.GRB.Status.OPTIMAL)
# First check if the objective value is same as h(x)
x_optimal = torch.tensor([v.x for v in x], dtype=self.dtype)
self.assertAlmostEqual(
milp.gurobi_model.ObjVal,
dut.barrier_value(x_optimal, x_star, c, inf_norm_term).item())
# Now verify that x_optimal is actually in the region. We do
# this by creating an MILP that only contains region_cnstr.
milp_region = gurobi_torch_mip.GurobiTorchMIP(self.dtype)
x_region = milp_region.addVars(
dut.system.x_dim,
lb=torch.from_numpy(dut.system.x_lo_all),
ub=torch.from_numpy(dut.system.x_up_all))
milp_region.add_mixed_integer_linear_constraints(
region_cnstr, x_region, None, "region_s", "region_binary", "", "",
"", gurobipy.GRB.BINARY)
for j in range(dut.system.x_dim):
x_region[j].lb = x_optimal[j].item()
x_region[j].ub = x_optimal[j].item()
milp_region.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
milp_region.gurobi_model.optimize()
self.assertEqual(milp_region.gurobi_model.status,
gurobipy.GRB.Status.OPTIMAL)
# Now sample many states. If they are in the region, then their
# barrier function should be no larger than the MILP optimal cost.
torch.manual_seed(0)
x_samples = utils.uniform_sample_in_box(
torch.from_numpy(dut.system.x_lo_all),
torch.from_numpy(dut.system.x_up_all), 1000)
for i in range(x_samples.shape[0]):
for j in range(dut.system.x_dim):
x_region[j].lb = x_samples[i][j].item()
x_region[j].ub = x_samples[i][j].item()
milp_region.gurobi_model.optimize()
if milp_region.gurobi_model.status == gurobipy.GRB.Status.OPTIMAL:
self.assertLessEqual(
dut.barrier_value(x_samples[i], x_star, c,
inf_norm_term).item(),
milp.gurobi_model.ObjVal)
def test_barrier_value_as_milp(self):
dut = mut.Barrier(self.linear_system, self.barrier_relu1)
# The unsafe region is just x[0] <= 0
unsafe_region_cnstr1 = gurobi_torch_mip.MixedIntegerConstraintsReturn()
unsafe_region_cnstr1.Ain_input = torch.tensor([[1, 0]],
dtype=self.dtype)
unsafe_region_cnstr1.rhs_in = torch.tensor([0], dtype=self.dtype)
x_star = torch.tensor([0.2, 0.1], dtype=self.dtype)
c = 0.5
self.barrier_value_as_milp_tester(dut,
x_star,
c,
unsafe_region_cnstr1,
inf_norm_term=None)
# The unsafe region is x[0] <= 0 or x[1] >= 1
# Formulated as the mixed-integer linear constraint
# x[0] <= x_up[0] * z
# x[1] >= 1 - (1-x_lo[1])(1-z)
unsafe_region_cnstr2 = gurobi_torch_mip.MixedIntegerConstraintsReturn()
unsafe_region_cnstr2.Ain_input = torch.tensor([[1, 0], [0, -1]],
dtype=self.dtype)
unsafe_region_cnstr2.Ain_binary = torch.tensor(
[[dut.system.x_up_all[0]], [1 - dut.system.x_lo_all[1]]],
dtype=self.dtype)
unsafe_region_cnstr2.rhs_in = torch.tensor(
[0, -dut.system.x_lo_all[1]], dtype=self.dtype)
self.barrier_value_as_milp_tester(dut,
x_star,
c,
unsafe_region_cnstr2,
inf_norm_term=None)
self.barrier_value_as_milp_tester(dut,
x_star,
c,
unsafe_region_cnstr2,
inf_norm_term=mut.InfNormTerm(
torch.tensor([[1, 3], [2, -1]],
dtype=self.dtype),
torch.tensor([1, 3],
dtype=self.dtype)))
def test_add_inf_norm_term(self):
dut = mut.Barrier(self.linear_system, self.barrier_relu1)
milp = gurobi_torch_mip.GurobiTorchMIP(self.dtype)
x = milp.addVars(dut.system.x_dim, lb=-gurobipy.GRB.INFINITY)
inf_norm_term = mut.InfNormTerm(
torch.tensor([[1, 3], [-2, 4], [3, 1]], dtype=self.dtype),
torch.tensor([1, -2, 3], dtype=self.dtype))
inf_norm, inf_norm_binary = dut._add_inf_norm_term(
milp, x, inf_norm_term)
self.assertEqual(len(inf_norm), 1)
self.assertEqual(len(inf_norm_binary), inf_norm_term.R.shape[0] * 2)
x_samples = utils.uniform_sample_in_box(dut.system.x_lo,
dut.system.x_up, 100)
milp.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
for i in range(x_samples.shape[0]):
for j in range(dut.system.x_dim):
x[j].lb = x_samples[i, j].item()
x[j].ub = x_samples[i, j].item()
milp.gurobi_model.optimize()
self.assertEqual(milp.gurobi_model.status,
gurobipy.GRB.Status.OPTIMAL)
self.assertAlmostEqual(
inf_norm[0].x,
torch.norm(inf_norm_term.R @ x_samples[i] - inf_norm_term.p,
p=float("inf")).item())
inf_norm_binary_expected = np.zeros(
(2 * inf_norm_term.R.shape[0], ))
inf_norm_binary_expected[torch.argmax(
torch.cat(
(inf_norm_term.R @ x_samples[i] - inf_norm_term.p,
-inf_norm_term.R @ x_samples[i] + inf_norm_term.p)))] = 1
np.testing.assert_allclose(
np.array([v.x for v in inf_norm_binary]),
inf_norm_binary_expected)
def test_barrier_gradient(self):
dut = mut.Barrier(self.linear_system, self.barrier_relu1)
inf_norm_term = mut.InfNormTerm(
torch.tensor([[1, 3], [-2, 4], [3, 1]], dtype=self.dtype),
torch.tensor([1, -2, 3], dtype=self.dtype))
torch.manual_seed(0)
x_samples = utils.uniform_sample_in_box(dut.system.x_lo,
dut.system.x_up, 100)
for i in range(x_samples.shape[0]):
dhdx = dut._barrier_gradient(x_samples[i],
inf_norm_term=None,
zero_tol=0.)
assert (dhdx.shape[0] == 1)
x_clone = x_samples[i].clone()
x_clone.requires_grad = True
dut.barrier_value(x_clone,
torch.zeros_like(x_clone, dtype=self.dtype),
c=100.,
inf_norm_term=None).backward()
dhdx_expected = x_clone.grad
np.testing.assert_allclose(dhdx[0].detach().numpy(),
dhdx_expected.detach().numpy())
dhdx = dut._barrier_gradient(x_samples[i],
inf_norm_term,
zero_tol=0.)
assert (dhdx.shape[0] == 1)
x_clone.grad.zero_()
dut.barrier_value(x_clone,
torch.zeros_like(x_clone, dtype=self.dtype),
c=100.,
inf_norm_term=inf_norm_term).backward()
dhdx_expected = x_clone.grad
np.testing.assert_allclose(dhdx[0].detach().numpy(),
dhdx_expected.detach().numpy())
# Now try x with multiple sub-gradient.
x = torch.tensor([2, 1], dtype=self.dtype)
dhdx = dut._barrier_gradient(x, inf_norm_term, zero_tol=0.)
self.assertEqual(dhdx.shape, (2, dut.system.x_dim))
x_perturb1 = x + torch.tensor([1E-6, 0], dtype=self.dtype)
x_perturb2 = x - torch.tensor([1E-6, 0], dtype=self.dtype)
dhdx1 = dut._barrier_gradient(x_perturb1, inf_norm_term, zero_tol=0.)
dhdx2 = dut._barrier_gradient(x_perturb2, inf_norm_term, zero_tol=0.)
self.assertTrue(
torch.norm(dhdx - torch.cat((dhdx1, dhdx2), dim=0)) < 1E-10
or torch.norm(dhdx - torch.cat((dhdx2, dhdx1), dim=0)) < 1E-10)
def barrier_gradient_batch_tester(self, dut, x, inf_norm_term,
create_graph):
dhdx = dut._barrier_gradient_batch(x,
inf_norm_term,
create_graph=create_graph)
self.assertEqual(dhdx.shape, (x.shape[0], dut.system.x_dim))
dhdx_expected = torch.zeros_like(x, dtype=self.dtype)
for i in range(x.shape[0]):
dhdx_sample = dut._barrier_gradient(x[i],
inf_norm_term,
zero_tol=0.)
assert (dhdx_sample.shape[0] == 1)
dhdx_expected[i] = dhdx_sample
np.testing.assert_allclose(dhdx.detach().numpy(),
dhdx_expected.detach().numpy())
return dhdx, dhdx_expected
def test_barrier_gradient_batch(self):
dut = mut.Barrier(self.linear_system, self.barrier_relu1)
inf_norm_term = mut.InfNormTerm(
torch.tensor([[1, 3], [-2, 4], [3, 1]], dtype=self.dtype),
torch.tensor([1, -2, 3], dtype=self.dtype))
torch.manual_seed(0)
x_samples = utils.uniform_sample_in_box(dut.system.x_lo,
dut.system.x_up, 100)
self.barrier_gradient_batch_tester(dut,
x_samples,
inf_norm_term=None,
create_graph=False)
self.barrier_gradient_batch_tester(dut,
x_samples,
inf_norm_term=inf_norm_term,
create_graph=False)
for v in dut.barrier_relu.parameters():
v.requires_grad = True
dhdx, dhdx_expected = self.barrier_gradient_batch_tester(
dut, x_samples, inf_norm_term=inf_norm_term, create_graph=True)
torch.sum(dhdx).backward()
grad = [
v.grad.clone() for v in dut.barrier_relu.parameters()
if v.grad is not None
]
dut.barrier_relu.zero_grad()
torch.sum(dhdx_expected).backward()
grad_expected = [
v.grad.clone() for v in dut.barrier_relu.parameters()
if v.grad is not None
]
for (v1, v2) in zip(grad, grad_expected):
np.testing.assert_allclose(v1.detach().numpy(),
v2.detach().numpy())
dut.barrier_relu.zero_grad()
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"torch.from_numpy",
"neural_network_lyapunov.utils.setup_relu",
"neural_network_lyapunov.gurobi_torch_mip.MixedIntegerConstraintsReturn",
"torch.zeros_like",
"neural_network_lyapunov.gurobi_torch_mip.GurobiTorchMIP",
"torch.manual_seed",
"neural_network_lyapunov.utils.uniform_sample_i... | [((16110, 16125), 'unittest.main', 'unittest.main', ([], {}), '()\n', (16123, 16125), False, 'import unittest\n'), ((430, 468), 'torch.tensor', 'torch.tensor', (['[-3, -2, 1]'], {'dtype': 'dtype'}), '([-3, -2, 1], dtype=dtype)\n', (442, 468), False, 'import torch\n'), ((484, 521), 'torch.tensor', 'torch.tensor', (['[-1, 2, 3]'], {'dtype': 'dtype'}), '([-1, 2, 3], dtype=dtype)\n', (496, 521), False, 'import torch\n'), ((556, 608), 'neural_network_lyapunov.barrier.InfNormTerm.from_bounding_box', 'mut.InfNormTerm.from_bounding_box', (['x_lo', 'x_up', 'scale'], {}), '(x_lo, x_up, scale)\n', (589, 608), True, 'import neural_network_lyapunov.barrier as mut\n'), ((629, 682), 'neural_network_lyapunov.utils.uniform_sample_on_box_boundary', 'utils.uniform_sample_on_box_boundary', (['x_lo', 'x_up', '(100)'], {}), '(x_lo, x_up, 100)\n', (665, 682), True, 'import neural_network_lyapunov.utils as utils\n'), ((1485, 1582), 'neural_network_lyapunov.utils.setup_relu', 'utils.setup_relu', (['(2, 4, 3, 1)'], {'params': 'None', 'negative_slope': '(0.01)', 'bias': '(True)', 'dtype': 'self.dtype'}), '((2, 4, 3, 1), params=None, negative_slope=0.01, bias=True,\n dtype=self.dtype)\n', (1501, 1582), True, 'import neural_network_lyapunov.utils as utils\n'), ((1807, 1874), 'torch.tensor', 'torch.tensor', (['[[1, -1], [0, 2], [1, 3], [-1, -2]]'], {'dtype': 'self.dtype'}), '([[1, -1], [0, 2], [1, 3], [-1, -2]], dtype=self.dtype)\n', (1819, 1874), False, 'import torch\n'), ((1930, 1975), 'torch.tensor', 'torch.tensor', (['[0, 1, -1, 2]'], {'dtype': 'self.dtype'}), '([0, 1, -1, 2], dtype=self.dtype)\n', (1942, 1975), False, 'import torch\n'), ((2075, 2152), 'torch.tensor', 'torch.tensor', (['[[1, 0, -1, 2], [0, 2, -1, 1], [1, 0, 1, -2]]'], {'dtype': 'self.dtype'}), '([[1, 0, -1, 2], [0, 2, -1, 1], [1, 0, 1, -2]], dtype=self.dtype)\n', (2087, 2152), False, 'import torch\n'), ((2208, 2249), 'torch.tensor', 'torch.tensor', (['[0, 2, 3]'], {'dtype': 'self.dtype'}), '([0, 2, 3], dtype=self.dtype)\n', (2220, 2249), False, 'import torch\n'), ((2349, 2393), 'torch.tensor', 'torch.tensor', (['[[1, -3, 2]]'], {'dtype': 'self.dtype'}), '([[1, -3, 2]], dtype=self.dtype)\n', (2361, 2393), False, 'import torch\n'), ((2493, 2529), 'torch.tensor', 'torch.tensor', (['[-1]'], {'dtype': 'self.dtype'}), '([-1], dtype=self.dtype)\n', (2505, 2529), False, 'import torch\n'), ((2579, 2630), 'neural_network_lyapunov.barrier.Barrier', 'mut.Barrier', (['self.linear_system', 'self.barrier_relu1'], {}), '(self.linear_system, self.barrier_relu1)\n', (2590, 2630), True, 'import neural_network_lyapunov.barrier as mut\n'), ((2643, 2681), 'torch.tensor', 'torch.tensor', (['[2, 3]'], {'dtype': 'self.dtype'}), '([2, 3], dtype=self.dtype)\n', (2655, 2681), False, 'import torch\n'), ((2730, 2771), 'torch.tensor', 'torch.tensor', (['[-0.2, 1]'], {'dtype': 'self.dtype'}), '([-0.2, 1], dtype=self.dtype)\n', (2742, 2771), False, 'import torch\n'), ((2985, 3045), 'torch.tensor', 'torch.tensor', (['[[-1, 1], [2, -1], [0, 0.5]]'], {'dtype': 'self.dtype'}), '([[-1, 1], [2, -1], [0, 0.5]], dtype=self.dtype)\n', (2997, 3045), False, 'import torch\n'), ((4829, 4877), 'torch.tensor', 'torch.tensor', (['[v.x for v in x]'], {'dtype': 'self.dtype'}), '([v.x for v in x], dtype=self.dtype)\n', (4841, 4877), False, 'import torch\n'), ((5182, 5225), 'neural_network_lyapunov.gurobi_torch_mip.GurobiTorchMIP', 'gurobi_torch_mip.GurobiTorchMIP', (['self.dtype'], {}), '(self.dtype)\n', (5213, 5225), True, 'import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip\n'), ((6109, 6129), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (6126, 6129), False, 'import torch\n'), ((6881, 6932), 'neural_network_lyapunov.barrier.Barrier', 'mut.Barrier', (['self.linear_system', 'self.barrier_relu1'], {}), '(self.linear_system, self.barrier_relu1)\n', (6892, 6932), True, 'import neural_network_lyapunov.barrier as mut\n'), ((7011, 7059), 'neural_network_lyapunov.gurobi_torch_mip.MixedIntegerConstraintsReturn', 'gurobi_torch_mip.MixedIntegerConstraintsReturn', ([], {}), '()\n', (7057, 7059), True, 'import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip\n'), ((7101, 7141), 'torch.tensor', 'torch.tensor', (['[[1, 0]]'], {'dtype': 'self.dtype'}), '([[1, 0]], dtype=self.dtype)\n', (7113, 7141), False, 'import torch\n'), ((7234, 7269), 'torch.tensor', 'torch.tensor', (['[0]'], {'dtype': 'self.dtype'}), '([0], dtype=self.dtype)\n', (7246, 7269), False, 'import torch\n'), ((7288, 7330), 'torch.tensor', 'torch.tensor', (['[0.2, 0.1]'], {'dtype': 'self.dtype'}), '([0.2, 0.1], dtype=self.dtype)\n', (7300, 7330), False, 'import torch\n'), ((7831, 7879), 'neural_network_lyapunov.gurobi_torch_mip.MixedIntegerConstraintsReturn', 'gurobi_torch_mip.MixedIntegerConstraintsReturn', ([], {}), '()\n', (7877, 7879), True, 'import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip\n'), ((7921, 7970), 'torch.tensor', 'torch.tensor', (['[[1, 0], [0, -1]]'], {'dtype': 'self.dtype'}), '([[1, 0], [0, -1]], dtype=self.dtype)\n', (7933, 7970), False, 'import torch\n'), ((8067, 8159), 'torch.tensor', 'torch.tensor', (['[[dut.system.x_up_all[0]], [1 - dut.system.x_lo_all[1]]]'], {'dtype': 'self.dtype'}), '([[dut.system.x_up_all[0]], [1 - dut.system.x_lo_all[1]]],\n dtype=self.dtype)\n', (8079, 8159), False, 'import torch\n'), ((8219, 8279), 'torch.tensor', 'torch.tensor', (['[0, -dut.system.x_lo_all[1]]'], {'dtype': 'self.dtype'}), '([0, -dut.system.x_lo_all[1]], dtype=self.dtype)\n', (8231, 8279), False, 'import torch\n'), ((9195, 9246), 'neural_network_lyapunov.barrier.Barrier', 'mut.Barrier', (['self.linear_system', 'self.barrier_relu1'], {}), '(self.linear_system, self.barrier_relu1)\n', (9206, 9246), True, 'import neural_network_lyapunov.barrier as mut\n'), ((9262, 9305), 'neural_network_lyapunov.gurobi_torch_mip.GurobiTorchMIP', 'gurobi_torch_mip.GurobiTorchMIP', (['self.dtype'], {}), '(self.dtype)\n', (9293, 9305), True, 'import neural_network_lyapunov.gurobi_torch_mip as gurobi_torch_mip\n'), ((9780, 9846), 'neural_network_lyapunov.utils.uniform_sample_in_box', 'utils.uniform_sample_in_box', (['dut.system.x_lo', 'dut.system.x_up', '(100)'], {}), '(dut.system.x_lo, dut.system.x_up, 100)\n', (9807, 9846), True, 'import neural_network_lyapunov.utils as utils\n'), ((11024, 11075), 'neural_network_lyapunov.barrier.Barrier', 'mut.Barrier', (['self.linear_system', 'self.barrier_relu1'], {}), '(self.linear_system, self.barrier_relu1)\n', (11035, 11075), True, 'import neural_network_lyapunov.barrier as mut\n'), ((11253, 11273), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (11270, 11273), False, 'import torch\n'), ((11294, 11360), 'neural_network_lyapunov.utils.uniform_sample_in_box', 'utils.uniform_sample_in_box', (['dut.system.x_lo', 'dut.system.x_up', '(100)'], {}), '(dut.system.x_lo, dut.system.x_up, 100)\n', (11321, 11360), True, 'import neural_network_lyapunov.utils as utils\n'), ((12836, 12874), 'torch.tensor', 'torch.tensor', (['[2, 1]'], {'dtype': 'self.dtype'}), '([2, 1], dtype=self.dtype)\n', (12848, 12874), False, 'import torch\n'), ((13854, 13891), 'torch.zeros_like', 'torch.zeros_like', (['x'], {'dtype': 'self.dtype'}), '(x, dtype=self.dtype)\n', (13870, 13891), False, 'import torch\n'), ((14414, 14465), 'neural_network_lyapunov.barrier.Barrier', 'mut.Barrier', (['self.linear_system', 'self.barrier_relu1'], {}), '(self.linear_system, self.barrier_relu1)\n', (14425, 14465), True, 'import neural_network_lyapunov.barrier as mut\n'), ((14643, 14663), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (14660, 14663), False, 'import torch\n'), ((14684, 14750), 'neural_network_lyapunov.utils.uniform_sample_in_box', 'utils.uniform_sample_in_box', (['dut.system.x_lo', 'dut.system.x_up', '(100)'], {}), '(dut.system.x_lo, dut.system.x_up, 100)\n', (14711, 14750), True, 'import neural_network_lyapunov.utils as utils\n'), ((1097, 1146), 'torch.tensor', 'torch.tensor', (['[[1, 3], [2, -4]]'], {'dtype': 'self.dtype'}), '([[1, 3], [2, -4]], dtype=self.dtype)\n', (1109, 1146), False, 'import torch\n'), ((1160, 1215), 'torch.tensor', 'torch.tensor', (['[[1, 2, 3], [0, 1, -1]]'], {'dtype': 'self.dtype'}), '([[1, 2, 3], [0, 1, -1]], dtype=self.dtype)\n', (1172, 1215), False, 'import torch\n'), ((6191, 6228), 'torch.from_numpy', 'torch.from_numpy', (['dut.system.x_lo_all'], {}), '(dut.system.x_lo_all)\n', (6207, 6228), False, 'import torch\n'), ((6242, 6279), 'torch.from_numpy', 'torch.from_numpy', (['dut.system.x_up_all'], {}), '(dut.system.x_up_all)\n', (6258, 6279), False, 'import torch\n'), ((9429, 9486), 'torch.tensor', 'torch.tensor', (['[[1, 3], [-2, 4], [3, 1]]'], {'dtype': 'self.dtype'}), '([[1, 3], [-2, 4], [3, 1]], dtype=self.dtype)\n', (9441, 9486), False, 'import torch\n'), ((9500, 9542), 'torch.tensor', 'torch.tensor', (['[1, -2, 3]'], {'dtype': 'self.dtype'}), '([1, -2, 3], dtype=self.dtype)\n', (9512, 9542), False, 'import torch\n'), ((10544, 10585), 'numpy.zeros', 'np.zeros', (['(2 * inf_norm_term.R.shape[0],)'], {}), '((2 * inf_norm_term.R.shape[0],))\n', (10552, 10585), True, 'import numpy as np\n'), ((11129, 11186), 'torch.tensor', 'torch.tensor', (['[[1, 3], [-2, 4], [3, 1]]'], {'dtype': 'self.dtype'}), '([[1, 3], [-2, 4], [3, 1]], dtype=self.dtype)\n', (11141, 11186), False, 'import torch\n'), ((11200, 11242), 'torch.tensor', 'torch.tensor', (['[1, -2, 3]'], {'dtype': 'self.dtype'}), '([1, -2, 3], dtype=self.dtype)\n', (11212, 11242), False, 'import torch\n'), ((13028, 13070), 'torch.tensor', 'torch.tensor', (['[1e-06, 0]'], {'dtype': 'self.dtype'}), '([1e-06, 0], dtype=self.dtype)\n', (13040, 13070), False, 'import torch\n'), ((13095, 13137), 'torch.tensor', 'torch.tensor', (['[1e-06, 0]'], {'dtype': 'self.dtype'}), '([1e-06, 0], dtype=self.dtype)\n', (13107, 13137), False, 'import torch\n'), ((14519, 14576), 'torch.tensor', 'torch.tensor', (['[[1, 3], [-2, 4], [3, 1]]'], {'dtype': 'self.dtype'}), '([[1, 3], [-2, 4], [3, 1]], dtype=self.dtype)\n', (14531, 14576), False, 'import torch\n'), ((14590, 14632), 'torch.tensor', 'torch.tensor', (['[1, -2, 3]'], {'dtype': 'self.dtype'}), '([1, -2, 3], dtype=self.dtype)\n', (14602, 14632), False, 'import torch\n'), ((883, 913), 'numpy.ones', 'np.ones', (['(x_samples.shape[0],)'], {}), '((x_samples.shape[0],))\n', (890, 913), True, 'import numpy as np\n'), ((1234, 1274), 'torch.tensor', 'torch.tensor', (['[-2, -3]'], {'dtype': 'self.dtype'}), '([-2, -3], dtype=self.dtype)\n', (1246, 1274), False, 'import torch\n'), ((1293, 1331), 'torch.tensor', 'torch.tensor', (['[3, 1]'], {'dtype': 'self.dtype'}), '([3, 1], dtype=self.dtype)\n', (1305, 1331), False, 'import torch\n'), ((1350, 1393), 'torch.tensor', 'torch.tensor', (['[-1, -3, 2]'], {'dtype': 'self.dtype'}), '([-1, -3, 2], dtype=self.dtype)\n', (1362, 1393), False, 'import torch\n'), ((1412, 1454), 'torch.tensor', 'torch.tensor', (['[2, -1, 4]'], {'dtype': 'self.dtype'}), '([2, -1, 4], dtype=self.dtype)\n', (1424, 1454), False, 'import torch\n'), ((3424, 3481), 'torch.tensor', 'torch.tensor', (['[[1, 3], [-1, 2], [0, 1]]'], {'dtype': 'self.dtype'}), '([[1, 3], [-1, 2], [0, 1]], dtype=self.dtype)\n', (3436, 3481), False, 'import torch\n'), ((3538, 3580), 'torch.tensor', 'torch.tensor', (['[1, 2, -3]'], {'dtype': 'self.dtype'}), '([1, 2, -3], dtype=self.dtype)\n', (3550, 3580), False, 'import torch\n'), ((5311, 5348), 'torch.from_numpy', 'torch.from_numpy', (['dut.system.x_lo_all'], {}), '(dut.system.x_lo_all)\n', (5327, 5348), False, 'import torch\n'), ((5365, 5402), 'torch.from_numpy', 'torch.from_numpy', (['dut.system.x_up_all'], {}), '(dut.system.x_up_all)\n', (5381, 5402), False, 'import torch\n'), ((10888, 10928), 'numpy.array', 'np.array', (['[v.x for v in inf_norm_binary]'], {}), '([v.x for v in inf_norm_binary])\n', (10896, 10928), True, 'import numpy as np\n'), ((15499, 15514), 'torch.sum', 'torch.sum', (['dhdx'], {}), '(dhdx)\n', (15508, 15514), False, 'import torch\n'), ((15698, 15722), 'torch.sum', 'torch.sum', (['dhdx_expected'], {}), '(dhdx_expected)\n', (15707, 15722), False, 'import torch\n'), ((8886, 8935), 'torch.tensor', 'torch.tensor', (['[[1, 3], [2, -1]]'], {'dtype': 'self.dtype'}), '([[1, 3], [2, -1]], dtype=self.dtype)\n', (8898, 8935), False, 'import torch\n'), ((9042, 9080), 'torch.tensor', 'torch.tensor', (['[1, 3]'], {'dtype': 'self.dtype'}), '([1, 3], dtype=self.dtype)\n', (9054, 9080), False, 'import torch\n'), ((10671, 10788), 'torch.cat', 'torch.cat', (['(inf_norm_term.R @ x_samples[i] - inf_norm_term.p, -inf_norm_term.R @\n x_samples[i] + inf_norm_term.p)'], {}), '((inf_norm_term.R @ x_samples[i] - inf_norm_term.p, -inf_norm_term\n .R @ x_samples[i] + inf_norm_term.p))\n', (10680, 10788), False, 'import torch\n'), ((11816, 11859), 'torch.zeros_like', 'torch.zeros_like', (['x_clone'], {'dtype': 'self.dtype'}), '(x_clone, dtype=self.dtype)\n', (11832, 11859), False, 'import torch\n'), ((12445, 12488), 'torch.zeros_like', 'torch.zeros_like', (['x_clone'], {'dtype': 'self.dtype'}), '(x_clone, dtype=self.dtype)\n', (12461, 12488), False, 'import torch\n'), ((13348, 13380), 'torch.cat', 'torch.cat', (['(dhdx1, dhdx2)'], {'dim': '(0)'}), '((dhdx1, dhdx2), dim=0)\n', (13357, 13380), False, 'import torch\n'), ((13423, 13455), 'torch.cat', 'torch.cat', (['(dhdx2, dhdx1)'], {'dim': '(0)'}), '((dhdx2, dhdx1), dim=0)\n', (13432, 13455), False, 'import torch\n'), ((3948, 3999), 'torch.abs', 'torch.abs', (['(inf_norm_term.R @ x[i] - inf_norm_term.p)'], {}), '(inf_norm_term.R @ x[i] - inf_norm_term.p)\n', (3957, 3999), False, 'import torch\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import logging
import mcubes
from icecream import ic
def extract_fields(bound_min, bound_max, resolution, query_func):
N = 64
X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N)
Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N)
Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N)
u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
with torch.no_grad():
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs)
pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy()
# maybe replace the row before
# dis = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)
# val = query_func(pts).reshape(-1)
# val[torch.where(dis > 1.0)] = -1.0
# val = val.reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy()
u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val
return u
def extract_geometry(bound_min, bound_max, resolution, threshold, query_func):
print('threshold: {}'.format(threshold))
u = extract_fields(bound_min, bound_max, resolution, query_func)
vertices, triangles = mcubes.marching_cubes(u, threshold)
b_max_np = bound_max.detach().cpu().numpy()
b_min_np = bound_min.detach().cpu().numpy()
vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
return vertices, triangles
def sample_pdf(bins, weights, n_samples, det=False):
# This implementation is from NeRF
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples
if det:
u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples)
u = u.expand(list(cdf.shape[:-1]) + [n_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [n_samples])
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
class NeuSRenderer:
def __init__(self,
nerf,
sdf_network,
deviation_network,
color_network,
depth_network,
n_samples,
n_importance,
n_outside,
up_sample_steps,
perturb):
self.nerf = nerf
self.sdf_network = sdf_network
self.deviation_network = deviation_network
self.color_network = color_network
self.depth_network = depth_network
self.n_samples = n_samples
self.n_importance = n_importance
self.n_outside = n_outside
self.up_sample_steps = up_sample_steps
self.perturb = perturb
def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None):
"""
Render background
"""
batch_size, n_samples = z_vals.shape
# Section length
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)
mid_z_vals = z_vals + dists * 0.5
# Section midpoints
pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3
dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10)
pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4
dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3)
pts = pts.reshape(-1, 3 + int(self.n_outside > 0))
dirs = dirs.reshape(-1, 3)
# add depth_feats
density, sampled_color, sampled_feat = nerf(pts, dirs)
alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists)
alpha = alpha.reshape(batch_size, n_samples)
weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
sampled_color = sampled_color.reshape(batch_size, n_samples, 3)
color = (weights[:, :, None] * sampled_color).sum(dim=1)
if background_rgb is not None:
color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True))
depth_map = torch.sum(weights * z_vals, dim=-1)
# add depth_feats
sampled_feat = sampled_feat.reshape(batch_size, n_samples, -1)
return {
'color': color,
# add depth_feats
'sampled_feat': sampled_feat,
'sampled_color': sampled_color,
'alpha': alpha,
'weights': weights,
'depth_map': depth_map,
}
def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s):
"""
Up sampling give a fixed inv_s
"""
batch_size, n_samples = z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3
radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)
inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)
sdf = sdf.reshape(batch_size, n_samples)
prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
# ----------------------------------------------------------------------------------------------------------
# Use min value of [ cos, prev_cos ]
# Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more
# robust when meeting situations like below:
#
# SDF
# ^
# |\ -----x----...
# | \ /
# | x x
# |---\----/-------------> 0 level
# | \ /
# | \/
# |
# ----------------------------------------------------------------------------------------------------------
prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1)
cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)
cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)
cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere
dist = (next_z_vals - prev_z_vals)
prev_esti_sdf = mid_sdf - cos_val * dist * 0.5
next_esti_sdf = mid_sdf + cos_val * dist * 0.5
prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)
next_cdf = torch.sigmoid(next_esti_sdf * inv_s)
alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
weights = alpha * torch.cumprod(
torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
return z_samples
def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False):
batch_size, n_samples = z_vals.shape
_, n_importance = new_z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]
z_vals = torch.cat([z_vals, new_z_vals], dim=-1)
z_vals, index = torch.sort(z_vals, dim=-1)
if not last:
new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance)
sdf = torch.cat([sdf, new_sdf], dim=-1)
xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1)
index = index.reshape(-1)
sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)
return z_vals, sdf
def render_core(self,
rays_o,
rays_d,
z_vals,
sample_dist,
sdf_network,
deviation_network,
color_network,
# add depth_feats
depth_network=None,
background_alpha=None,
background_sampled_feat=None,
background_sampled_color=None,
background_rgb=None,
cos_anneal_ratio=0.0):
batch_size, n_samples = z_vals.shape
# Section length
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)
mid_z_vals = z_vals + dists * 0.5
# Section midpoints
pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3
dirs = rays_d[:, None, :].expand(pts.shape)
pts = pts.reshape(-1, 3)
dirs = dirs.reshape(-1, 3)
sdf_nn_output = sdf_network(pts)
sdf = sdf_nn_output[:, :1]
feature_vector = sdf_nn_output[:, 1:]
gradients = sdf_network.gradient(pts).squeeze()
sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3)
# add depth_feats
sampled_feat = depth_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, -1)
# alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists)
# alpha = alpha.reshape(batch_size, n_samples)
# weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
# sampled_color = sampled_color.reshape(batch_size, n_samples, 3)
# color = (weights[:, :, None] * sampled_color).sum(dim=1)
# if background_rgb is not None:
# color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True))
# depth_map = torch.sum(weights * z_vals, dim=-1)
inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) # Single parameter
inv_s = inv_s.expand(batch_size * n_samples, 1)
true_cos = (dirs * gradients).sum(-1, keepdim=True)
# "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes
# the cos value "not dead" at the beginning training iterations, for better convergence.
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0)
pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples)
inside_sphere = (pts_norm < 1.0).float().detach()
relax_inside_sphere = (pts_norm < 1.2).float().detach()
# Render with background
if background_alpha is not None:
alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere)
alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1)
sampled_color = sampled_color * inside_sphere[:, :, None] +\
background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None]
sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1)
# add depth_feats
sampled_feat = sampled_feat * inside_sphere[:, :, None] +\
background_sampled_feat[:, :n_samples] * (1.0 - inside_sphere)[:, :, None]
sampled_feat = torch.cat([sampled_feat, background_sampled_feat[:, n_samples:]], dim=1)
weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
weights_sum = weights.sum(dim=-1, keepdim=True)
color = (sampled_color * weights[:, :, None]).sum(dim=1)
d_feats = (sampled_feat * weights[:, :, None]).sum(dim=1)
if background_rgb is not None: # Fixed background, usually black
color = color + background_rgb * (1.0 - weights_sum)
# Eikonal loss
gradient_error = (torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2,
dim=-1) - 1.0) ** 2
gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5)
return {
# add depth_feats
'd_feats': d_feats,
'color': color,
'sdf': sdf,
'dists': dists,
'gradients': gradients.reshape(batch_size, n_samples, 3),
's_val': 1.0 / inv_s,
'mid_z_vals': mid_z_vals,
'weights': weights,
'cdf': c.reshape(batch_size, n_samples),
'gradient_error': gradient_error,
'inside_sphere': inside_sphere
}
def render(self, rays_o, rays_d, near, far, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0):
batch_size = len(rays_o)
sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere
z_vals = torch.linspace(0.0, 1.0, self.n_samples)
z_vals = near + (far - near) * z_vals[None, :]
z_vals_outside = None
if self.n_outside > 0:
z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside)
n_samples = self.n_samples
perturb = self.perturb
if perturb_overwrite >= 0:
perturb = perturb_overwrite
if perturb > 0:
t_rand = (torch.rand([batch_size, 1]) - 0.5)
z_vals = z_vals + t_rand * 2.0 / self.n_samples
if self.n_outside > 0:
mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1])
upper = torch.cat([mids, z_vals_outside[..., -1:]], -1)
lower = torch.cat([z_vals_outside[..., :1], mids], -1)
t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]])
z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand
if self.n_outside > 0:
z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples
background_alpha = None
background_sampled_color = None
background_sampled_feat = None
# Up sample
if self.n_importance > 0:
with torch.no_grad():
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples)
for i in range(self.up_sample_steps):
new_z_vals = self.up_sample(rays_o,
rays_d,
z_vals,
sdf,
self.n_importance // self.up_sample_steps,
64 * 2**i)
z_vals, sdf = self.cat_z_vals(rays_o,
rays_d,
z_vals,
new_z_vals,
sdf,
last=(i + 1 == self.up_sample_steps))
n_samples = self.n_samples + self.n_importance
# Background model
if self.n_outside > 0:
z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1)
z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1)
ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf)
background_sampled_feat = ret_outside['sampled_feat']
background_sampled_color = ret_outside['sampled_color']
background_alpha = ret_outside['alpha']
# Render core
ret_fine = self.render_core(rays_o,
rays_d,
z_vals,
sample_dist,
self.sdf_network,
self.deviation_network,
self.color_network,
self.depth_network,
background_rgb=background_rgb,
background_alpha=background_alpha,
background_sampled_feat=background_sampled_feat,
background_sampled_color=background_sampled_color,
cos_anneal_ratio=cos_anneal_ratio)
render_feats = ret_fine['d_feats']
color_fine = ret_fine['color']
weights = ret_fine['weights']
weights_sum = weights.sum(dim=-1, keepdim=True)
gradients = ret_fine['gradients']
s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True)
return {
# add depth_feats
'render_feats': render_feats,
'color_fine': color_fine,
's_val': s_val,
'cdf_fine': ret_fine['cdf'],
'weight_sum': weights_sum,
'weight_max': torch.max(weights, dim=-1, keepdim=True)[0],
'gradients': gradients,
'weights': weights,
'gradient_error': ret_fine['gradient_error'],
'inside_sphere': ret_fine['inside_sphere']
}
def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0):
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=lambda pts: -self.sdf_network.sdf(pts))
| [
"torch.cat",
"torch.arange",
"torch.no_grad",
"torch.ones",
"torch.searchsorted",
"torch.Tensor",
"torch.linalg.norm",
"torch.nn.functional.relu",
"torch.zeros",
"mcubes.marching_cubes",
"torch.zeros_like",
"torch.max",
"torch.rand",
"torch.sum",
"torch.sort",
"torch.min",
"torch.one... | [((443, 507), 'numpy.zeros', 'np.zeros', (['[resolution, resolution, resolution]'], {'dtype': 'np.float32'}), '([resolution, resolution, resolution], dtype=np.float32)\n', (451, 507), True, 'import numpy as np\n'), ((1592, 1627), 'mcubes.marching_cubes', 'mcubes.marching_cubes', (['u', 'threshold'], {}), '(u, threshold)\n', (1613, 1627), False, 'import mcubes\n'), ((2074, 2095), 'torch.cumsum', 'torch.cumsum', (['pdf', '(-1)'], {}), '(pdf, -1)\n', (2086, 2095), False, 'import torch\n'), ((2464, 2502), 'torch.searchsorted', 'torch.searchsorted', (['cdf', 'u'], {'right': '(True)'}), '(cdf, u, right=True)\n', (2482, 2502), False, 'import torch\n'), ((2649, 2680), 'torch.stack', 'torch.stack', (['[below, above]', '(-1)'], {}), '([below, above], -1)\n', (2660, 2680), False, 'import torch\n'), ((517, 532), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (530, 532), False, 'import torch\n'), ((2027, 2063), 'torch.sum', 'torch.sum', (['weights', '(-1)'], {'keepdim': '(True)'}), '(weights, -1, keepdim=True)\n', (2036, 2063), False, 'import torch\n'), ((2210, 2287), 'torch.linspace', 'torch.linspace', (['(0.0 + 0.5 / n_samples)', '(1.0 - 0.5 / n_samples)'], {'steps': 'n_samples'}), '(0.0 + 0.5 / n_samples, 1.0 - 0.5 / n_samples, steps=n_samples)\n', (2224, 2287), False, 'import torch\n'), ((2525, 2551), 'torch.zeros_like', 'torch.zeros_like', (['(inds - 1)'], {}), '(inds - 1)\n', (2541, 2551), False, 'import torch\n'), ((3014, 3036), 'torch.ones_like', 'torch.ones_like', (['denom'], {}), '(denom)\n', (3029, 3036), False, 'import torch\n'), ((4562, 4623), 'torch.cat', 'torch.cat', (['[pts / dis_to_center, 1.0 / dis_to_center]'], {'dim': '(-1)'}), '([pts / dis_to_center, 1.0 / dis_to_center], dim=-1)\n', (4571, 4623), False, 'import torch\n'), ((5456, 5491), 'torch.sum', 'torch.sum', (['(weights * z_vals)'], {'dim': '(-1)'}), '(weights * z_vals, dim=-1)\n', (5465, 5491), False, 'import torch\n'), ((6159, 6211), 'torch.linalg.norm', 'torch.linalg.norm', (['pts'], {'ord': '(2)', 'dim': '(-1)', 'keepdim': '(False)'}), '(pts, ord=2, dim=-1, keepdim=False)\n', (6176, 6211), False, 'import torch\n'), ((7342, 7386), 'torch.stack', 'torch.stack', (['[prev_cos_val, cos_val]'], {'dim': '(-1)'}), '([prev_cos_val, cos_val], dim=-1)\n', (7353, 7386), False, 'import torch\n'), ((7408, 7449), 'torch.min', 'torch.min', (['cos_val'], {'dim': '(-1)', 'keepdim': '(False)'}), '(cos_val, dim=-1, keepdim=False)\n', (7417, 7449), False, 'import torch\n'), ((7681, 7717), 'torch.sigmoid', 'torch.sigmoid', (['(prev_esti_sdf * inv_s)'], {}), '(prev_esti_sdf * inv_s)\n', (7694, 7717), False, 'import torch\n'), ((7737, 7773), 'torch.sigmoid', 'torch.sigmoid', (['(next_esti_sdf * inv_s)'], {}), '(next_esti_sdf * inv_s)\n', (7750, 7773), False, 'import torch\n'), ((8342, 8381), 'torch.cat', 'torch.cat', (['[z_vals, new_z_vals]'], {'dim': '(-1)'}), '([z_vals, new_z_vals], dim=-1)\n', (8351, 8381), False, 'import torch\n'), ((8406, 8432), 'torch.sort', 'torch.sort', (['z_vals'], {'dim': '(-1)'}), '(z_vals, dim=-1)\n', (8416, 8432), False, 'import torch\n'), ((11780, 11821), 'torch.sigmoid', 'torch.sigmoid', (['(estimated_prev_sdf * inv_s)'], {}), '(estimated_prev_sdf * inv_s)\n', (11793, 11821), False, 'import torch\n'), ((11841, 11882), 'torch.sigmoid', 'torch.sigmoid', (['(estimated_next_sdf * inv_s)'], {}), '(estimated_next_sdf * inv_s)\n', (11854, 11882), False, 'import torch\n'), ((14575, 14615), 'torch.linspace', 'torch.linspace', (['(0.0)', '(1.0)', 'self.n_samples'], {}), '(0.0, 1.0, self.n_samples)\n', (14589, 14615), False, 'import torch\n'), ((226, 280), 'torch.linspace', 'torch.linspace', (['bound_min[0]', 'bound_max[0]', 'resolution'], {}), '(bound_min[0], bound_max[0], resolution)\n', (240, 280), False, 'import torch\n'), ((298, 352), 'torch.linspace', 'torch.linspace', (['bound_min[1]', 'bound_max[1]', 'resolution'], {}), '(bound_min[1], bound_max[1], resolution)\n', (312, 352), False, 'import torch\n'), ((370, 424), 'torch.linspace', 'torch.linspace', (['bound_min[2]', 'bound_max[2]', 'resolution'], {}), '(bound_min[2], bound_max[2], resolution)\n', (384, 424), False, 'import torch\n'), ((2117, 2147), 'torch.zeros_like', 'torch.zeros_like', (['cdf[..., :1]'], {}), '(cdf[..., :1])\n', (2133, 2147), False, 'import torch\n'), ((2607, 2628), 'torch.ones_like', 'torch.ones_like', (['inds'], {}), '(inds)\n', (2622, 2628), False, 'import torch\n'), ((8570, 8603), 'torch.cat', 'torch.cat', (['[sdf, new_sdf]'], {'dim': '(-1)'}), '([sdf, new_sdf], dim=-1)\n', (8579, 8603), False, 'import torch\n'), ((12446, 12505), 'torch.cat', 'torch.cat', (['[alpha, background_alpha[:, n_samples:]]'], {'dim': '(-1)'}), '([alpha, background_alpha[:, n_samples:]], dim=-1)\n', (12455, 12505), False, 'import torch\n'), ((12711, 12785), 'torch.cat', 'torch.cat', (['[sampled_color, background_sampled_color[:, n_samples:]]'], {'dim': '(1)'}), '([sampled_color, background_sampled_color[:, n_samples:]], dim=1)\n', (12720, 12785), False, 'import torch\n'), ((13017, 13089), 'torch.cat', 'torch.cat', (['[sampled_feat, background_sampled_feat[:, n_samples:]]'], {'dim': '(1)'}), '([sampled_feat, background_sampled_feat[:, n_samples:]], dim=1)\n', (13026, 13089), False, 'import torch\n'), ((14762, 14835), 'torch.linspace', 'torch.linspace', (['(0.001)', '(1.0 - 1.0 / (self.n_outside + 1.0))', 'self.n_outside'], {}), '(0.001, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside)\n', (14776, 14835), False, 'import torch\n'), ((17002, 17045), 'torch.cat', 'torch.cat', (['[z_vals, z_vals_outside]'], {'dim': '(-1)'}), '([z_vals, z_vals_outside], dim=-1)\n', (17011, 17045), False, 'import torch\n'), ((17075, 17106), 'torch.sort', 'torch.sort', (['z_vals_feed'], {'dim': '(-1)'}), '(z_vals_feed, dim=-1)\n', (17085, 17106), False, 'import torch\n'), ((4480, 4531), 'torch.linalg.norm', 'torch.linalg.norm', (['pts'], {'ord': '(2)', 'dim': '(-1)', 'keepdim': '(True)'}), '(pts, ord=2, dim=-1, keepdim=True)\n', (4497, 4531), False, 'import torch\n'), ((7268, 7296), 'torch.zeros', 'torch.zeros', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (7279, 7296), False, 'import torch\n'), ((12046, 12097), 'torch.linalg.norm', 'torch.linalg.norm', (['pts'], {'ord': '(2)', 'dim': '(-1)', 'keepdim': '(True)'}), '(pts, ord=2, dim=-1, keepdim=True)\n', (12063, 12097), False, 'import torch\n'), ((15024, 15051), 'torch.rand', 'torch.rand', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (15034, 15051), False, 'import torch\n'), ((15260, 15307), 'torch.cat', 'torch.cat', (['[mids, z_vals_outside[..., -1:]]', '(-1)'], {}), '([mids, z_vals_outside[..., -1:]], -1)\n', (15269, 15307), False, 'import torch\n'), ((15332, 15378), 'torch.cat', 'torch.cat', (['[z_vals_outside[..., :1], mids]', '(-1)'], {}), '([z_vals_outside[..., :1], mids], -1)\n', (15341, 15378), False, 'import torch\n'), ((15404, 15454), 'torch.rand', 'torch.rand', (['[batch_size, z_vals_outside.shape[-1]]'], {}), '([batch_size, z_vals_outside.shape[-1]])\n', (15414, 15454), False, 'import torch\n'), ((15851, 15866), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15864, 15866), False, 'import torch\n'), ((18782, 18822), 'torch.max', 'torch.max', (['weights'], {'dim': '(-1)', 'keepdim': '(True)'}), '(weights, dim=-1, keepdim=True)\n', (18791, 18822), False, 'import torch\n'), ((687, 713), 'torch.meshgrid', 'torch.meshgrid', (['xs', 'ys', 'zs'], {}), '(xs, ys, zs)\n', (701, 713), False, 'import torch\n'), ((11418, 11447), 'torch.nn.functional.relu', 'F.relu', (['(-true_cos * 0.5 + 0.5)'], {}), '(-true_cos * 0.5 + 0.5)\n', (11424, 11447), True, 'import torch.nn.functional as F\n'), ((11498, 11515), 'torch.nn.functional.relu', 'F.relu', (['(-true_cos)'], {}), '(-true_cos)\n', (11504, 11515), True, 'import torch.nn.functional as F\n'), ((15606, 15643), 'torch.flip', 'torch.flip', (['z_vals_outside'], {'dims': '[-1]'}), '(z_vals_outside, dims=[-1])\n', (15616, 15643), False, 'import torch\n'), ((4212, 4239), 'torch.Tensor', 'torch.Tensor', (['[sample_dist]'], {}), '([sample_dist])\n', (4224, 4239), False, 'import torch\n'), ((9572, 9599), 'torch.Tensor', 'torch.Tensor', (['[sample_dist]'], {}), '([sample_dist])\n', (9584, 9599), False, 'import torch\n'), ((10992, 11011), 'torch.zeros', 'torch.zeros', (['[1, 3]'], {}), '([1, 3])\n', (11003, 11011), False, 'import torch\n'), ((5107, 5134), 'torch.ones', 'torch.ones', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (5117, 5134), False, 'import torch\n'), ((7903, 7930), 'torch.ones', 'torch.ones', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (7913, 7930), False, 'import torch\n'), ((13142, 13169), 'torch.ones', 'torch.ones', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (13152, 13169), False, 'import torch\n'), ((8621, 8645), 'torch.arange', 'torch.arange', (['batch_size'], {}), '(batch_size)\n', (8633, 8645), False, 'import torch\n')] |
#!/usr/bin/env python
'''
@package ion.services.dm.utility.granule.record_dictionary
@file ion/services/dm/utility/granule/record_dictionary.py
@author <NAME>
@author <NAME> <<EMAIL>>
@brief https://confluence.oceanobservatories.org/display/CIDev/Record+Dictionary
'''
from pyon.container.cc import Container
from pyon.core.exception import BadRequest, NotFound
from pyon.core.object import IonObjectSerializer
from pyon.core.interceptor.encode import encode_ion
from pyon.util.arg_check import validate_equal
from pyon.util.log import log
from pyon.util.memoize import memoize_lru
from ion.util.stored_values import StoredValueManager
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.objects import Granule, StreamDefinition
from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType
from coverage_model.parameter_functions import ParameterFunctionException
from coverage_model.parameter_values import AbstractParameterValue, ConstantValue
from coverage_model.parameter_types import ParameterFunctionType
from coverage_model import PythonFunction, NumexprFunction
import numpy as np
import numexpr as ne
from copy import copy
import msgpack
import time
class RecordDictionaryTool(object):
"""
A record dictionary is a key/value store which contains records for a particular dataset. The keys are specified by
a parameter dictionary which map to the fields in the dataset, the data types for the records as well as metadata
about the fields. Each field in a record dictionary must contain the same number of records. A record can consist of
a scalar value (typically a NumPy scalar) or it may contain an array or dictionary of values. The data type for each
field is determined by the parameter dictionary. The length of the first assignment dictates the allowable size for
the RDT - see the Tip below
The record dictionary can contain an instance of the parameter dictionary itself or it may contain a reference to
one by use of a stream definition. A stream definition is a resource persisted by the resource registry and contains
the parameter dictionary. When the record dictionary is constructed the client will specify either a stream
definition identifier or a parameter dictionary.
ParameterDictionaries are inherently large and congest message traffic through RabbitMQ, therefore it is preferred
to use stream definitions in lieu of parameter dictionaries directly.
"""
_rd = None
_pdict = None
_shp = None
_locator = None
_stream_def = None
_dirty_shape = False
_available_fields = None
_creation_timestamp = None
_stream_config = {}
_definition = None
connection_id = ''
connection_index = ''
def __init__(self,param_dictionary=None, stream_definition_id='', locator=None, stream_definition=None):
"""
"""
if type(param_dictionary) == dict:
self._pdict = ParameterDictionary.load(param_dictionary)
elif isinstance(param_dictionary,ParameterDictionary):
self._pdict = param_dictionary
elif stream_definition_id or stream_definition:
if stream_definition:
if not isinstance(stream_definition,StreamDefinition):
raise BadRequest('Improper StreamDefinition object')
self._definition = stream_definition
stream_def_obj = stream_definition or RecordDictionaryTool.read_stream_def(stream_definition_id)
pdict = stream_def_obj.parameter_dictionary
self._available_fields = stream_def_obj.available_fields or None
self._stream_config = stream_def_obj.stream_configuration
self._pdict = ParameterDictionary.load(pdict)
self._stream_def = stream_definition_id
else:
raise BadRequest('Unable to create record dictionary with improper ParameterDictionary')
if stream_definition_id:
self._stream_def=stream_definition_id
self._shp = None
self._rd = {}
self._locator = locator
self._setup_params()
def _pval_callback(self, name, slice_):
retval = np.atleast_1d(self[name])
return retval[slice_]
@classmethod
def get_paramval(cls, ptype, domain, values):
paramval = get_value_class(ptype, domain_set=domain)
if isinstance(ptype,ParameterFunctionType):
paramval.memoized_values = values
if isinstance(ptype,SparseConstantType):
values = np.atleast_1d(values)
spans = cls.spanify(values)
paramval.storage._storage = np.array([spans],dtype='object')
else:
paramval[:] = values
paramval.storage._storage.flags.writeable = False
return paramval
def lookup_values(self):
return [i for i in self._lookup_values() if not self.context(i).document_key]
def _lookup_values(self):
lookup_values = []
for field in self.fields:
if hasattr(self.context(field), 'lookup_value'):
lookup_values.append(field)
return lookup_values
@classmethod
def spanify(cls,arr):
spans = []
lastval = None
for i,val in enumerate(arr):
if i == 0:
span = Span(None,None,0,val)
spans.append(span)
lastval = val
continue
if np.atleast_1d(lastval == val).all():
continue
spans[-1].upper_bound = i
span = Span(i,None,-i,val)
spans.append(span)
lastval = val
return spans
def fetch_lookup_values(self):
doc_keys = []
for lv in self._lookup_values():
context = self.context(lv)
if context.document_key:
document_key = context.document_key
if '$designator' in context.document_key and 'reference_designator' in self._stream_config:
document_key = document_key.replace('$designator',self._stream_config['reference_designator'])
doc_keys.append(document_key)
lookup_docs = {}
if doc_keys:
svm = StoredValueManager(Container.instance)
doc_list = svm.read_value_mult(doc_keys)
lookup_docs = dict(zip(doc_keys, doc_list))
for lv in self._lookup_values():
context = self.context(lv)
if context.document_key:
document_key = context.document_key
if '$designator' in context.document_key and 'reference_designator' in self._stream_config:
document_key = document_key.replace('$designator',self._stream_config['reference_designator'])
doc = lookup_docs[document_key]
if doc is None:
log.debug('Reference Document for %s not found', document_key)
continue
if context.lookup_value in doc:
self[lv] = [doc[context.lookup_value]] * self._shp[0] if self._shp else doc[context.lookup_value]
@classmethod
def load_from_granule(cls, g):
if g.stream_definition_id:
instance = cls(stream_definition_id=g.stream_definition_id, locator=g.locator)
elif g.stream_definition:
instance = cls(stream_definition=g.stream_definition, locator=g.locator)
else:
instance = cls(param_dictionary=g.param_dictionary, locator=g.locator)
if g.domain:
instance._shp = (g.domain[0],)
if g.creation_timestamp:
instance._creation_timestamp = g.creation_timestamp
# Do time first
time_ord = instance.to_ordinal(instance.temporal_parameter)
if g.record_dictionary[time_ord] is not None:
instance._rd[instance.temporal_parameter] = g.record_dictionary[time_ord]
for k,v in g.record_dictionary.iteritems():
key = instance.from_ordinal(k)
if v is not None:
#ptype = instance._pdict.get_context(key).param_type
#paramval = cls.get_paramval(ptype, instance.domain, v)
instance._rd[key] = v
instance.connection_id = g.connection_id
instance.connection_index = g.connection_index
return instance
def to_granule(self, data_producer_id='',provider_metadata_update={}, connection_id='', connection_index=''):
granule = Granule()
granule.record_dictionary = {}
for key,val in self._rd.iteritems():
if val is not None:
granule.record_dictionary[self.to_ordinal(key)] = self[key]
else:
granule.record_dictionary[self.to_ordinal(key)] = None
granule.param_dictionary = {} if self._stream_def else self._pdict.dump()
if self._definition:
granule.stream_definition = self._definition
else:
granule.stream_definition = None
granule.stream_definition_id = self._stream_def
granule.locator = self._locator
granule.domain = self.domain.shape
granule.data_producer_id=data_producer_id
granule.provider_metadata_update=provider_metadata_update
granule.creation_timestamp = time.time()
granule.connection_id = connection_id
granule.connection_index = connection_index
return granule
def _setup_params(self):
for param in self._pdict.keys():
self._rd[param] = None
@property
def fields(self):
if self._available_fields is not None:
return list(set(self._available_fields).intersection(self._pdict.keys()))
return self._pdict.keys()
@property
def domain(self):
dom = SimpleDomainSet(self._shp)
return dom
@property
def temporal_parameter(self):
return self._pdict.temporal_parameter_name
def fill_value(self,name):
return self._pdict.get_context(name).fill_value
def _replace_hook(self, name,vals):
if vals is None:
return None
if not isinstance(self._pdict.get_context(name).param_type, QuantityType):
return vals
if isinstance(vals, (list,tuple)):
vals = [i if i is not None else self.fill_value(name) for i in vals]
if all([i is None for i in vals]):
return None
return vals
if isinstance(vals, np.ndarray):
np.place(vals,vals==np.array(None), self.fill_value(name))
try:
if (vals == np.array(self.fill_value(name))).all():
return None
except AttributeError:
pass
return np.asanyarray(vals, dtype=self._pdict.get_context(name).param_type.value_encoding)
return np.atleast_1d(vals)
def __setitem__(self, name, vals):
return self._set(name, self._replace_hook(name,vals))
def _set(self, name, vals):
"""
Set a parameter
"""
if name not in self.fields:
raise KeyError(name)
if vals is None:
self._rd[name] = None
return
context = self._pdict.get_context(name)
if self._shp is None and isinstance(context.param_type, (SparseConstantType, ConstantType, ConstantRangeType)):
self._shp = (1,)
self._dirty_shape = True
elif self._shp is None or self._dirty_shape:
if isinstance(vals, np.ndarray):
self._shp = (vals.shape[0],) # Only support 1-d right now
elif isinstance(vals, list):
self._shp = (len(vals),)
else:
raise BadRequest('No shape was defined')
log.trace('Set shape to %s', self._shp)
if self._dirty_shape:
self._dirty_shape = False
self._reshape_const()
else:
if isinstance(vals, np.ndarray):
if not vals.shape:
raise BadRequest('Invalid shape on input (dimensionless)')
validate_equal(vals.shape[0], self._shp[0], 'Invalid shape on input (%s expecting %s)' % (vals.shape, self._shp))
elif isinstance(vals, list):
validate_equal(len(vals), self._shp[0], 'Invalid shape on input')
#paramval = self.get_paramval(context.param_type, dom, vals)
self._rd[name] = vals
def param_type(self, name):
if name in self.fields:
return self._pdict.get_context(name).param_type
raise KeyError(name)
def context(self, name):
if name in self.fields:
return self._pdict.get_context(name)
raise KeyError(name)
def _reshape_const(self):
for k in self.fields:
if isinstance(self._rd[k], ConstantValue):
self._rd[k].domain_set = self.domain
def __getitem__(self, name):
"""
Get an item by nick name from the record dictionary.
"""
if not self._shp:
return None
if self._available_fields and name not in self._available_fields:
raise KeyError(name)
ptype = self._pdict.get_context(name).param_type
if isinstance(ptype, ParameterFunctionType):
if self._rd[name] is not None:
return np.atleast_1d(self._rd[name]) # It was already set
try:
return self._get_param_func(name)
except ParameterFunctionException:
log.debug('failed to get parameter function field: %s (%s)', name, self._pdict.keys(), exc_info=True)
if self._rd[name] is not None:
return np.atleast_1d(self._rd[name])
return None
def _get_param_func(self, name):
ptype = self._pdict.get_context(name).param_type
if isinstance(ptype.function, PythonFunction):
args = self._build_arg_map(name, ptype)
# For missing parameter inputs, return None
if args is None:
return None
if not hasattr(ptype.function,'_callable'):
ptype.function._import_func()
retval = ptype.function._callable(*args)
return retval
elif isinstance(ptype.function, NumexprFunction):
args = self._build_arg_map(name, ptype, return_dict=True)
# For missing parameter inputs, return None
if args is None:
return None
retval = ne.evaluate(ptype.function.expression, local_dict=args)
return retval
else:
raise BadRequest("%s not supported parameter function type" % type(ptype.function))
def _build_arg_map(self, name, ptype, return_dict=False):
# get the arg list
arg_list = ptype.function.arg_list
# the map
arg_map = ptype.function.param_map
# get the arrays for each
array_map = {}
for k,v in arg_map.iteritems():
if isinstance(v, basestring):
array_value = self[v]
if array_value is None:
log.warning("Missing inputs for parameter function %s", name)
return None
array_map[k] = array_value
else:
array_map[k] = v
if return_dict:
return array_map
return [array_map[i] for i in arg_list]
def iteritems(self):
""" D.iteritems() -> an iterator over the (key, value) items of D """
for k,v in self._rd.iteritems():
if self._available_fields and k not in self._available_fields:
continue
if v is not None:
yield k,v
def iterkeys(self):
""" D.iterkeys() -> an iterator over the keys of D """
for k,v in self._rd.iteritems():
if v is not None:
yield k
def itervalues(self):
""" D.itervalues() -> an iterator over the values of D """
for k,v in self._rd.iteritems():
if v is not None:
yield v
def __contains__(self, key):
""" D.__contains__(k) -> True if D has a key k, else False """
if self._available_fields:
return key in self._rd and key in self._available_fields
return key in self._rd
def __delitem__(self, y):
""" x.__delitem__(y) <==> del x[y] """
self._rd[y] = None
def __iter__(self):
""" x.__iter__() <==> iter(x) """
for k in self._rd.iterkeys():
yield k
def __len__(self):
""" x.__len__() <==> len(x) """
if self._shp is None:
return 0
else:
return self._shp[0]
def __repr__(self):
""" x.__repr__() <==> repr(x) """
return self.pretty_print()
def __str__(self):
return 'Record Dictionary %s' % self._rd.keys()
__hash__ = None
def pretty_print(self):
"""
@brief Pretty Print the record dictionary for debug or log purposes.
"""
from pprint import pformat
repr_dict = {}
for field in self.fields:
if self[field] is not None:
repr_dict[field] = self[field][:]
return pformat(repr_dict)
def size(self):
'''
Truly poor way to calculate the size of a granule...
returns the size in bytes.
'''
granule = self.to_granule()
serializer = IonObjectSerializer()
flat = serializer.serialize(granule)
byte_stream = msgpack.packb(flat, default=encode_ion)
return len(byte_stream)
def to_ordinal(self, key):
params = copy(self._rd.keys())
params.sort()
try:
return params.index(key)
except ValueError:
raise KeyError(key)
def from_ordinal(self, ordinal):
params = copy(self._rd.keys())
params.sort()
return params[ordinal]
@staticmethod
def read_stream_def(stream_def_id):
pubsub_cli = PubsubManagementServiceClient()
stream_def_obj = pubsub_cli.read_stream_definition(stream_def_id)
return stream_def_obj
| [
"pprint.pformat",
"interface.services.dm.ipubsub_management_service.PubsubManagementServiceClient",
"ion.util.stored_values.StoredValueManager",
"msgpack.packb",
"pyon.core.exception.BadRequest",
"pyon.util.log.log.warning",
"pyon.core.object.IonObjectSerializer",
"numexpr.evaluate",
"coverage_model... | [((4434, 4459), 'numpy.atleast_1d', 'np.atleast_1d', (['self[name]'], {}), '(self[name])\n', (4447, 4459), True, 'import numpy as np\n'), ((4577, 4618), 'coverage_model.get_value_class', 'get_value_class', (['ptype'], {'domain_set': 'domain'}), '(ptype, domain_set=domain)\n', (4592, 4618), False, 'from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType\n'), ((8754, 8763), 'interface.objects.Granule', 'Granule', ([], {}), '()\n', (8761, 8763), False, 'from interface.objects import Granule, StreamDefinition\n'), ((9586, 9597), 'time.time', 'time.time', ([], {}), '()\n', (9595, 9597), False, 'import time\n'), ((10081, 10107), 'coverage_model.SimpleDomainSet', 'SimpleDomainSet', (['self._shp'], {}), '(self._shp)\n', (10096, 10107), False, 'from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType\n'), ((11137, 11156), 'numpy.atleast_1d', 'np.atleast_1d', (['vals'], {}), '(vals)\n', (11150, 11156), True, 'import numpy as np\n'), ((17591, 17609), 'pprint.pformat', 'pformat', (['repr_dict'], {}), '(repr_dict)\n', (17598, 17609), False, 'from pprint import pformat\n'), ((17809, 17830), 'pyon.core.object.IonObjectSerializer', 'IonObjectSerializer', ([], {}), '()\n', (17828, 17830), False, 'from pyon.core.object import IonObjectSerializer\n'), ((17898, 17937), 'msgpack.packb', 'msgpack.packb', (['flat'], {'default': 'encode_ion'}), '(flat, default=encode_ion)\n', (17911, 17937), False, 'import msgpack\n'), ((18396, 18427), 'interface.services.dm.ipubsub_management_service.PubsubManagementServiceClient', 'PubsubManagementServiceClient', ([], {}), '()\n', (18425, 18427), False, 'from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient\n'), ((3169, 3211), 'coverage_model.ParameterDictionary.load', 'ParameterDictionary.load', (['param_dictionary'], {}), '(param_dictionary)\n', (3193, 3211), False, 'from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType\n'), ((4787, 4808), 'numpy.atleast_1d', 'np.atleast_1d', (['values'], {}), '(values)\n', (4800, 4808), True, 'import numpy as np\n'), ((4889, 4922), 'numpy.array', 'np.array', (['[spans]'], {'dtype': '"""object"""'}), "([spans], dtype='object')\n", (4897, 4922), True, 'import numpy as np\n'), ((5812, 5834), 'coverage_model.Span', 'Span', (['i', 'None', '(-i)', 'val'], {}), '(i, None, -i, val)\n', (5816, 5834), False, 'from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType\n'), ((6472, 6510), 'ion.util.stored_values.StoredValueManager', 'StoredValueManager', (['Container.instance'], {}), '(Container.instance)\n', (6490, 6510), False, 'from ion.util.stored_values import StoredValueManager\n'), ((14026, 14055), 'numpy.atleast_1d', 'np.atleast_1d', (['self._rd[name]'], {}), '(self._rd[name])\n', (14039, 14055), True, 'import numpy as np\n'), ((5566, 5590), 'coverage_model.Span', 'Span', (['None', 'None', '(0)', 'val'], {}), '(None, None, 0, val)\n', (5570, 5590), False, 'from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType\n'), ((12074, 12113), 'pyon.util.log.log.trace', 'log.trace', (['"""Set shape to %s"""', 'self._shp'], {}), "('Set shape to %s', self._shp)\n", (12083, 12113), False, 'from pyon.util.log import log\n'), ((13671, 13700), 'numpy.atleast_1d', 'np.atleast_1d', (['self._rd[name]'], {}), '(self._rd[name])\n', (13684, 13700), True, 'import numpy as np\n'), ((14840, 14895), 'numexpr.evaluate', 'ne.evaluate', (['ptype.function.expression'], {'local_dict': 'args'}), '(ptype.function.expression, local_dict=args)\n', (14851, 14895), True, 'import numexpr as ne\n'), ((3962, 3993), 'coverage_model.ParameterDictionary.load', 'ParameterDictionary.load', (['pdict'], {}), '(pdict)\n', (3986, 3993), False, 'from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType\n'), ((4079, 4166), 'pyon.core.exception.BadRequest', 'BadRequest', (['"""Unable to create record dictionary with improper ParameterDictionary"""'], {}), "(\n 'Unable to create record dictionary with improper ParameterDictionary')\n", (4089, 4166), False, 'from pyon.core.exception import BadRequest, NotFound\n'), ((5693, 5722), 'numpy.atleast_1d', 'np.atleast_1d', (['(lastval == val)'], {}), '(lastval == val)\n', (5706, 5722), True, 'import numpy as np\n'), ((7113, 7175), 'pyon.util.log.log.debug', 'log.debug', (['"""Reference Document for %s not found"""', 'document_key'], {}), "('Reference Document for %s not found', document_key)\n", (7122, 7175), False, 'from pyon.util.log import log\n'), ((10808, 10822), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (10816, 10822), True, 'import numpy as np\n'), ((12418, 12536), 'pyon.util.arg_check.validate_equal', 'validate_equal', (['vals.shape[0]', 'self._shp[0]', "('Invalid shape on input (%s expecting %s)' % (vals.shape, self._shp))"], {}), "(vals.shape[0], self._shp[0], \n 'Invalid shape on input (%s expecting %s)' % (vals.shape, self._shp))\n", (12432, 12536), False, 'from pyon.util.arg_check import validate_equal\n'), ((15465, 15526), 'pyon.util.log.log.warning', 'log.warning', (['"""Missing inputs for parameter function %s"""', 'name'], {}), "('Missing inputs for parameter function %s', name)\n", (15476, 15526), False, 'from pyon.util.log import log\n'), ((12026, 12060), 'pyon.core.exception.BadRequest', 'BadRequest', (['"""No shape was defined"""'], {}), "('No shape was defined')\n", (12036, 12060), False, 'from pyon.core.exception import BadRequest, NotFound\n'), ((12349, 12401), 'pyon.core.exception.BadRequest', 'BadRequest', (['"""Invalid shape on input (dimensionless)"""'], {}), "('Invalid shape on input (dimensionless)')\n", (12359, 12401), False, 'from pyon.core.exception import BadRequest, NotFound\n'), ((3523, 3569), 'pyon.core.exception.BadRequest', 'BadRequest', (['"""Improper StreamDefinition object"""'], {}), "('Improper StreamDefinition object')\n", (3533, 3569), False, 'from pyon.core.exception import BadRequest, NotFound\n')] |
# -*- coding: utf-8 -*-
from collections import namedtuple
import os
import typing
import numpy as np
import torch.utils.data
from benzina.utils.file import Track
_TrackType = typing.Union[str, Track]
_TrackPairType = typing.Tuple[_TrackType, _TrackType]
_ClassTracksType = typing.Tuple[_TrackType, _TrackType]
class Dataset(torch.utils.data.Dataset):
"""
Args:
archive (str or :class:`Track`): path to the archive or a Track. If a
Track, :attr:`track` will be ignored.
track (str or :class:`Track`, optional): track label or a Track. If a
Track, :attr:`archive` must not be specified.
(default: ``"bzna_input"``)
"""
_Item = namedtuple("Item", ["input"])
def __init__(self,
archive: typing.Union[str, _TrackType] = None,
track: _TrackType = "bzna_input"):
if isinstance(archive, Track):
track = archive
archive = None
if archive is not None:
if not isinstance(track, str):
raise ValueError("track option must be a track label when "
"archive is specified.")
archive = os.path.expanduser(archive)
archive = os.path.expandvars(archive)
if not os.path.isfile(archive):
raise ValueError("The archive {} is not present.".format(archive))
track = Track(archive, track)
elif not isinstance(track, Track):
raise ValueError("track option must be a Track when archive is "
"not specified.")
self._track = track
self._track.open()
self._filename = track.file.path
@property
def filename(self):
return self._filename
def __len__(self):
return len(self._track)
def __getitem__(self, index: int):
return Dataset._Item(self._track[index])
def __add__(self, other):
raise NotImplementedError()
class ClassificationDataset(Dataset):
"""
Args:
archive (str or pair of :class:`Track`): path to the archive or a pair
of Track. If a pair of Track, :attr:`tracks` will be ignored.
tracks (pair of str or :class:`Track`, optional): pair of input and
target tracks labels or a pair of input and target Track. If a pair
of Track, :attr:`archive` must not be specified.
(default: ``("bzna_input", "bzna_target")``)
input_label (str, optional): label of the inputs to use in the input
track. (default: ``"bzna_thumb"``)
"""
_Item = namedtuple("Item", ["input", "input_label", "target"])
def __init__(self,
archive: typing.Union[str, _TrackPairType] = None,
tracks: _ClassTracksType = ("bzna_input", "bzna_target"),
input_label: str = "bzna_thumb"):
try:
archive, tracks, input_label = \
ClassificationDataset._validate_args(
None, archive, input_label)
except (TypeError, ValueError):
archive, tracks, input_label = \
ClassificationDataset._validate_args(
archive, tracks, input_label)
if archive is not None:
input_track = Track(archive, tracks[0])
target_track = Track(archive, tracks[1])
else:
input_track, target_track = tracks
Dataset.__init__(self, input_track)
self._input_label = input_label
target_track.open()
location_first, _ = target_track[0].location
location_last, size_last = target_track[-1].location
target_track.file.seek(location_first)
buffer = target_track.file.read(location_last + size_last - location_first)
self._targets = np.full(len(self._track), -1, np.int64)
self._targets[:len(target_track)] = np.frombuffer(buffer, np.dtype("<i8"))
def __getitem__(self, index: int):
item = Dataset.__getitem__(self, index)
return self._Item(input=item.input,
input_label=self._input_label,
target=(self.targets[index],))
def __add__(self, other):
raise NotImplementedError()
@property
def targets(self):
return self._targets
@staticmethod
def _validate_args(*args):
archive, tracks, input_label = args
if archive is not None:
if any(not isinstance(t, str) for t in tracks):
raise ValueError("tracks option must be a pair of tracks "
"labels when archive is specified.")
archive = os.path.expanduser(archive)
archive = os.path.expandvars(archive)
if not os.path.isfile(archive):
raise ValueError("The archive {} is not present.".format(archive))
_, _ = tracks
elif any(not isinstance(t, Track) for t in tracks):
raise ValueError("tracks option must be a pair of Track when "
"archive is not specified.")
return archive, tracks, input_label
class ImageNet(ClassificationDataset):
"""
Args:
root (str or pair of :class:`Track`): root of the ImageNet dataset or
path to the archive or a pair of Track. If a pair of Track,
:attr:`tracks` will be ignored.
split (None or str, optional): The dataset split, supports ``test``,
``train``, ``val``. If not specified, samples will be drawn from
all splits.
tracks (pair of str or :class:`Track`, optional): pair of input and
target tracks labels or a pair of input and target Track. If a pair
of Track, :attr:`root` must not be specified.
(default: ``("bzna_input", "bzna_target")``)
input_label (str, optional): label of the inputs to use in the input
track. (default: ``"bzna_thumb"``)
"""
# Some images are missing from the dataset. Please read the README of the
# dataset for more information.
LEN_VALID = 50000 - 1
LEN_TEST = 100000 - 7
def __init__(self,
root: typing.Union[str, _TrackPairType] = None,
split: str = None,
tracks: _ClassTracksType = ("bzna_input", "bzna_target"),
input_label: str = "bzna_thumb"):
try:
archive, split, tracks, input_label = \
ImageNet._validate_args(None, split, root, input_label)
except (TypeError, ValueError):
archive, split, tracks, input_label = \
ImageNet._validate_args(root, split, tracks, input_label)
ClassificationDataset.__init__(self, archive, tracks, input_label)
self._indices = np.array(range(ClassificationDataset.__len__(self)),
np.int64)
if split == "test":
self._indices = self._indices[-self.LEN_TEST:]
self._targets = self._targets[-self.LEN_TEST:]
elif split == "train":
len_train = len(self) - self.LEN_VALID - self.LEN_TEST
self._indices = self._indices[:len_train]
self._targets = self._targets[:len_train]
elif split == "val":
len_train = len(self) - self.LEN_VALID - self.LEN_TEST
self._indices = self._indices[len_train:-self.LEN_TEST]
self._targets = self._targets[len_train:-self.LEN_TEST]
def __getitem__(self, index: int):
item = Dataset.__getitem__(self, self._indices[index])
return ImageNet._Item(input=item.input,
input_label=self._input_label,
target=(self._targets[index],))
def __len__(self):
return len(self._indices)
def __add__(self, other):
raise NotImplementedError()
@staticmethod
def _validate_args(*args):
root, split, tracks, input_label = args
archive = None
if root is not None:
if any(not isinstance(t, str) for t in tracks):
raise ValueError("tracks option must be a pair of tracks "
"labels when root is specified.")
root = os.path.expanduser(root)
root = os.path.expandvars(root)
if os.path.isfile(root):
archive = root
elif os.path.isfile(os.path.join(root, "ilsvrc2012.bzna")):
archive = os.path.join(root, "ilsvrc2012.bzna")
elif os.path.isfile(os.path.join(root, "ilsvrc2012.mp4")):
archive = os.path.join(root, "ilsvrc2012.mp4")
if archive is None:
if root.endswith(".mp4") or root.endswith(".bzna"):
raise ValueError("The archive {} is not present.".format(root))
else:
raise ValueError("The archive ilsvrc2012.[mp4|bzna] is not "
"present in root {}.".format(root))
elif any(not isinstance(t, Track) for t in tracks):
raise ValueError("tracks option must be a pair of Track when "
"root is not specified.")
if split not in {"test", "train", "val", None}:
raise ValueError("split option must be one of test, train, val")
return archive, split, tracks, input_label
| [
"os.path.join",
"benzina.utils.file.Track",
"numpy.dtype",
"os.path.expandvars",
"os.path.isfile",
"collections.namedtuple",
"os.path.expanduser"
] | [((701, 730), 'collections.namedtuple', 'namedtuple', (['"""Item"""', "['input']"], {}), "('Item', ['input'])\n", (711, 730), False, 'from collections import namedtuple\n'), ((2627, 2681), 'collections.namedtuple', 'namedtuple', (['"""Item"""', "['input', 'input_label', 'target']"], {}), "('Item', ['input', 'input_label', 'target'])\n", (2637, 2681), False, 'from collections import namedtuple\n'), ((1198, 1225), 'os.path.expanduser', 'os.path.expanduser', (['archive'], {}), '(archive)\n', (1216, 1225), False, 'import os\n'), ((1248, 1275), 'os.path.expandvars', 'os.path.expandvars', (['archive'], {}), '(archive)\n', (1266, 1275), False, 'import os\n'), ((1425, 1446), 'benzina.utils.file.Track', 'Track', (['archive', 'track'], {}), '(archive, track)\n', (1430, 1446), False, 'from benzina.utils.file import Track\n'), ((3308, 3333), 'benzina.utils.file.Track', 'Track', (['archive', 'tracks[0]'], {}), '(archive, tracks[0])\n', (3313, 3333), False, 'from benzina.utils.file import Track\n'), ((3361, 3386), 'benzina.utils.file.Track', 'Track', (['archive', 'tracks[1]'], {}), '(archive, tracks[1])\n', (3366, 3386), False, 'from benzina.utils.file import Track\n'), ((3940, 3955), 'numpy.dtype', 'np.dtype', (['"""<i8"""'], {}), "('<i8')\n", (3948, 3955), True, 'import numpy as np\n'), ((4692, 4719), 'os.path.expanduser', 'os.path.expanduser', (['archive'], {}), '(archive)\n', (4710, 4719), False, 'import os\n'), ((4742, 4769), 'os.path.expandvars', 'os.path.expandvars', (['archive'], {}), '(archive)\n', (4760, 4769), False, 'import os\n'), ((8276, 8300), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (8294, 8300), False, 'import os\n'), ((8320, 8344), 'os.path.expandvars', 'os.path.expandvars', (['root'], {}), '(root)\n', (8338, 8344), False, 'import os\n'), ((8361, 8381), 'os.path.isfile', 'os.path.isfile', (['root'], {}), '(root)\n', (8375, 8381), False, 'import os\n'), ((1296, 1319), 'os.path.isfile', 'os.path.isfile', (['archive'], {}), '(archive)\n', (1310, 1319), False, 'import os\n'), ((4790, 4813), 'os.path.isfile', 'os.path.isfile', (['archive'], {}), '(archive)\n', (4804, 4813), False, 'import os\n'), ((8447, 8484), 'os.path.join', 'os.path.join', (['root', '"""ilsvrc2012.bzna"""'], {}), "(root, 'ilsvrc2012.bzna')\n", (8459, 8484), False, 'import os\n'), ((8513, 8550), 'os.path.join', 'os.path.join', (['root', '"""ilsvrc2012.bzna"""'], {}), "(root, 'ilsvrc2012.bzna')\n", (8525, 8550), False, 'import os\n'), ((8584, 8620), 'os.path.join', 'os.path.join', (['root', '"""ilsvrc2012.mp4"""'], {}), "(root, 'ilsvrc2012.mp4')\n", (8596, 8620), False, 'import os\n'), ((8649, 8685), 'os.path.join', 'os.path.join', (['root', '"""ilsvrc2012.mp4"""'], {}), "(root, 'ilsvrc2012.mp4')\n", (8661, 8685), False, 'import os\n')] |
import numpy as np
def kMeans(X, K, maxIters = 30):
centroids = X[np.random.choice(np.arange(len(X)), K), :]
for i in range(maxIters):
C = np.array([np.argmin([np.dot(x_i-y_k, x_i-y_k) for y_k in centroids]) for x_i in X])
centroids = [X[C == k].mean(axis = 0) for k in range(K)]
return np.array(centroids) , C
def EM_init(Data, nbStates):
nbVar, nbData = np.shape(Data)
Priors = np.ndarray(shape = (1, nbStates))
Sigma = np.ndarray(shape = (nbVar, nbVar, nbStates))
Centers, Data_id = kMeans(np.transpose(Data), nbStates)
Mu = np.transpose(Centers)
for i in range (0,nbStates):
idtmp = np.nonzero(Data_id==i)
idtmp = list(idtmp)
idtmp = np.reshape(idtmp,(np.size(idtmp)))
Priors[0,i] = np.size(idtmp)
a = np.concatenate((Data[:, idtmp],Data[:, idtmp]), axis = 1)
Sigma[:,:,i] = np.cov(a)
Sigma[:,:,i] = Sigma[:,:,i] + 0.00001 * np.diag(np.diag(np.ones((nbVar,nbVar))))
Priors = Priors / nbData
return (Priors, Mu, Sigma)
| [
"numpy.size",
"numpy.transpose",
"numpy.ones",
"numpy.shape",
"numpy.nonzero",
"numpy.array",
"numpy.dot",
"numpy.cov",
"numpy.ndarray",
"numpy.concatenate"
] | [((390, 404), 'numpy.shape', 'np.shape', (['Data'], {}), '(Data)\n', (398, 404), True, 'import numpy as np\n'), ((418, 449), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, nbStates)'}), '(shape=(1, nbStates))\n', (428, 449), True, 'import numpy as np\n'), ((464, 506), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(nbVar, nbVar, nbStates)'}), '(shape=(nbVar, nbVar, nbStates))\n', (474, 506), True, 'import numpy as np\n'), ((578, 599), 'numpy.transpose', 'np.transpose', (['Centers'], {}), '(Centers)\n', (590, 599), True, 'import numpy as np\n'), ((316, 335), 'numpy.array', 'np.array', (['centroids'], {}), '(centroids)\n', (324, 335), True, 'import numpy as np\n'), ((539, 557), 'numpy.transpose', 'np.transpose', (['Data'], {}), '(Data)\n', (551, 557), True, 'import numpy as np\n'), ((649, 673), 'numpy.nonzero', 'np.nonzero', (['(Data_id == i)'], {}), '(Data_id == i)\n', (659, 673), True, 'import numpy as np\n'), ((773, 787), 'numpy.size', 'np.size', (['idtmp'], {}), '(idtmp)\n', (780, 787), True, 'import numpy as np\n'), ((800, 856), 'numpy.concatenate', 'np.concatenate', (['(Data[:, idtmp], Data[:, idtmp])'], {'axis': '(1)'}), '((Data[:, idtmp], Data[:, idtmp]), axis=1)\n', (814, 856), True, 'import numpy as np\n'), ((881, 890), 'numpy.cov', 'np.cov', (['a'], {}), '(a)\n', (887, 890), True, 'import numpy as np\n'), ((734, 748), 'numpy.size', 'np.size', (['idtmp'], {}), '(idtmp)\n', (741, 748), True, 'import numpy as np\n'), ((177, 205), 'numpy.dot', 'np.dot', (['(x_i - y_k)', '(x_i - y_k)'], {}), '(x_i - y_k, x_i - y_k)\n', (183, 205), True, 'import numpy as np\n'), ((955, 978), 'numpy.ones', 'np.ones', (['(nbVar, nbVar)'], {}), '((nbVar, nbVar))\n', (962, 978), True, 'import numpy as np\n')] |
# MIT License
# Copyright (c) 2019 Runway AI, Inc
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =========================================================================
# This example contains the minimum specifications and requirements
# to port a machine learning model to Runway.
# For more instructions on how to port a model to Runway, see the Runway Model
# SDK docs at https://sdk.runwayml.com
# RUNWAY
# www.runwayml.com
# <EMAIL>
# =========================================================================
# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image, vector
import pickle
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
import runway
import re
import dnnlib
import dnnlib.tflib as tflib
np.random.seed(0)
tf.random.set_random_seed(0)
# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.
@runway.setup(options={'checkpoint': runway.file(extension='.pkl')})
def setup(opts):
global Gs
tflib.init_tf()
# with open(opts['checkpoint'], 'rb') as file:
# G, D, Gs = pickle.load(file)
# Turn this on to develop locally
with open('model.pkl', 'rb') as file:
G, D, Gs = pickle.load(file)
return Gs
generate_inputs = {
'z': runway.vector(512, sampling_std=0.5),
'truncation': runway.number(min=0, max=3, default=0.8, step=0.01)
}
@runway.command('generate', inputs=generate_inputs, outputs={'image': runway.image})
def convert(model, inputs):
z = inputs['z']
truncation = inputs['truncation']
latents = z.reshape((1, 512))
label = np.zeros([1] + Gs.input_shapes[1][1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = model.run(latents, label, truncation_psi=truncation, randomize_noise=False, output_transform=fmt)
output = np.clip(images[0], 0, 255).astype(np.uint8)
return {'image': output}
if __name__ == '__main__':
# run the model server using the default network interface and ports,
# displayed here for convenience
runway.run(host='0.0.0.0', port=8000)
| [
"numpy.random.seed",
"runway.run",
"numpy.zeros",
"numpy.clip",
"runway.file",
"pickle.load",
"runway.vector",
"runway.number",
"runway.command",
"tensorflow.random.set_random_seed",
"dnnlib.tflib.init_tf"
] | [((1847, 1864), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1861, 1864), True, 'import numpy as np\n'), ((1865, 1893), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['(0)'], {}), '(0)\n', (1890, 1893), True, 'import tensorflow as tf\n'), ((2690, 2778), 'runway.command', 'runway.command', (['"""generate"""'], {'inputs': 'generate_inputs', 'outputs': "{'image': runway.image}"}), "('generate', inputs=generate_inputs, outputs={'image': runway\n .image})\n", (2704, 2778), False, 'import runway\n'), ((2309, 2324), 'dnnlib.tflib.init_tf', 'tflib.init_tf', ([], {}), '()\n', (2322, 2324), True, 'import dnnlib.tflib as tflib\n'), ((2578, 2614), 'runway.vector', 'runway.vector', (['(512)'], {'sampling_std': '(0.5)'}), '(512, sampling_std=0.5)\n', (2591, 2614), False, 'import runway\n'), ((2634, 2685), 'runway.number', 'runway.number', ([], {'min': '(0)', 'max': '(3)', 'default': '(0.8)', 'step': '(0.01)'}), '(min=0, max=3, default=0.8, step=0.01)\n', (2647, 2685), False, 'import runway\n'), ((2906, 2944), 'numpy.zeros', 'np.zeros', (['([1] + Gs.input_shapes[1][1:])'], {}), '([1] + Gs.input_shapes[1][1:])\n', (2914, 2944), True, 'import numpy as np\n'), ((3356, 3393), 'runway.run', 'runway.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(host='0.0.0.0', port=8000)\n", (3366, 3393), False, 'import runway\n'), ((2515, 2532), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2526, 2532), False, 'import pickle\n'), ((2242, 2271), 'runway.file', 'runway.file', ([], {'extension': '""".pkl"""'}), "(extension='.pkl')\n", (2253, 2271), False, 'import runway\n'), ((3139, 3165), 'numpy.clip', 'np.clip', (['images[0]', '(0)', '(255)'], {}), '(images[0], 0, 255)\n', (3146, 3165), True, 'import numpy as np\n')] |
"""
vanila policy gradient
"""
import numpy as np
import scipy.signal
import tensorflow as tf
from gym.spaces import Box, Discrete
class VPG:
def __init__(
self,
env,
learning_rate=0.01,
gamma=0.95,
output_graph=False,
seed=1,
ep_max=1000,
ep_steps_max=1000,
hidden_sizes=(64, 64)
):
np.random.seed(seed)
tf.set_random_seed(seed)
self.lr = learning_rate
self.gamma = gamma
self.ep_max=ep_max
self.ep_steps_max=ep_steps_max
self.s = self._get_placeholder(env.observation_space, name='observations')
print("observations: ", self.s)
self.a = self._get_placeholder(env.action_space, name='actions')
print("actions: ", self.a)
self.v = tf.placeholder(dtype=tf.float32, shape=(None, ), name="actions_value")
self._build_net(hidden_sizes=hidden_sizes, action_space=env.action_space)
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# http://0.0.0.0:6006/
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def _get_placeholder(self, space, name):
if isinstance(space, Box):
shape = space.shape # (act_dim, )
dim = (None,) if shape[0] == 1 else (None, *shape)
# dim = (None, shape) if np.isscalar(shape) else (None, *shape)
return tf.placeholder(dtype=tf.float32, shape=dim, name=name)
elif isinstance(space, Discrete):
return tf.placeholder(dtype=tf.int32, shape=(None,), name=name)
else:
raise NotImplementedError
def _gaussian_likelihood(self, x, mu, log_std):
eps = 1e-8
pre_sum = -0.5 * (((x - mu) / (tf.exp(log_std) + eps)) ** 2 + 2 * log_std + np.log(2 * np.pi))
return tf.reduce_sum(pre_sum, axis=1)
def _mlp(self, x, hidden_sizes=(64,), activation=tf.tanh, output_activation=None):
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation)
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)
def _mlp_discrete_policy(self, s, a, hidden_sizes, activation, output_activation, action_space):
"""
generate a policy network for the discrete case
:param s: state placeholder
:param a: action placeholder, e.g. input the action series as list [a_1, ..., a_T]
:param hidden_sizes: list [l1, l2, ...]
:param activation:
:param output_activation:
:param action_space: env.action_space
:return:
pi: the action chosen by the current policy at state s
logp_batch: the list of log probability corresponding to the list of actions a
logp_pi: the log probability that pi is chosen
"""
act_dim = action_space.n
act_logits = self._mlp(s, list(hidden_sizes)+[act_dim], activation, None) # [[xx, ..., xx]]
logps = tf.nn.log_softmax(act_logits) # log prob. distribution of all the actions = log(soft_max) but faster
pi = tf.squeeze(tf.multinomial(act_logits, 1), axis=1) # sample one action from act_logits [x]
# batch: list of probabilities [P_a0, P_a1, ...]
logp_batch = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logps, axis=1)
logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logps, axis=1)
return pi, logp_batch, logp_pi
def _mlp_gaussian_policy(self, s, a, hidden_sizes, activation, output_activation, action_space):
"""
generate a policy network for the continuous case
:param s: state placeholder
:param a: action placeholder, e.g. input the action matrix
a = [[a_11, ..., a_D1],
[a_12, ..., a_D2],
...
[a_1T, ..., a_DT]]
:param hidden_sizes: list [l1, l2, ...]
:param activation:
:param output_activation:
:param action_space: env.action_space
:return:
pi: the action chosen by the current policy at state s
logp_batch: the list of log probability corresponding to the list of actions a
logp_pi: the log probability that pi is chosen
"""
act_dim = action_space.shape[0]
mu = self._mlp(s, list(hidden_sizes)+[act_dim], activation, output_activation)
log_std = tf.get_variable(name='log_std', initializer=-0.5 * np.ones(act_dim, dtype=np.float32))
std = tf.exp(log_std)
pi = mu + tf.random_normal(tf.shape(mu)) * std
logp_batch = self._gaussian_likelihood(a, mu, log_std)
logp_pi = self._gaussian_likelihood(pi, mu, log_std)
return pi, logp_batch, logp_pi
def _build_net(self, hidden_sizes=(30,30), activation=tf.tanh, output_activation=None, policy=None, action_space=None):
self.ep_s, self.ep_a, self.ep_r = [], [], []
self.ep_ret = tf.placeholder(dtype=tf.float32, shape=(None, ), name='ep_returns')
# default policy
if policy is None and isinstance(action_space, Box):
policy = self._mlp_gaussian_policy
elif policy is None and isinstance(action_space, Discrete):
policy = self._mlp_discrete_policy
self.pi, logp_batch, _ = policy(self.s, self.a, hidden_sizes, activation, output_activation, action_space)
pi_loss = -tf.reduce_mean(logp_batch * self.ep_ret)
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(pi_loss)
def _choose_action(self, s):
a = self.sess.run(self.pi, feed_dict={self.s: s[np.newaxis, :]})
return a
def _store_transition(self, s, a, r):
self.ep_s.append(s)
self.ep_a.append(a)
self.ep_r.append(r)
def _process_rollout(self):
ep_ret = self._discounted_sum_vec(self.ep_r, self.gamma)
ep_ret -= np.mean(ep_ret)
ep_ret /= np.std(ep_ret)
return ep_ret
def _discounted_sum_vec(self, x, discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
def _update(self):
# discount and normalize episode reward
discounted_ep_return = self._process_rollout()
# train on episode
self.sess.run(self.train_op, feed_dict={
self.s: np.vstack(self.ep_s), # shape=(None, s_dim)
self.a: np.squeeze(np.array(self.ep_a)), # shape=(None, a_dim)
self.ep_ret: discounted_ep_return, # shape=(None, )
})
self.ep_s, self.ep_a, self.ep_r = [], [], [] # empty episode data
def train(self, env, render_threshold_reward, render=False):
for ep_index in range(self.ep_max):
s = env.reset()
for step_index in range(self.ep_steps_max):
if render:
env.render()
a = self._choose_action(s)
s_, r, done, _ = env.step(np.squeeze(a))
self._store_transition(s, a, r)
terminal = done or ((step_index+1) == self.ep_steps_max)
if terminal:
# calculate running reward
ep_rs_sum = sum(self.ep_r)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.99 + ep_rs_sum * 0.01
if running_reward > render_threshold_reward:
render = True # rendering
print("episode:", ep_index, " reward:", int(running_reward))
self._update()
break
s = s_
| [
"tensorflow.reduce_sum",
"numpy.random.seed",
"numpy.ones",
"numpy.mean",
"tensorflow.one_hot",
"tensorflow.nn.log_softmax",
"numpy.std",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.exp",
"tensorflow.name_scope",
"tensorflow.global_var... | [((410, 430), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (424, 430), True, 'import numpy as np\n'), ((439, 463), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (457, 463), True, 'import tensorflow as tf\n'), ((839, 908), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None,)', 'name': '"""actions_value"""'}), "(dtype=tf.float32, shape=(None,), name='actions_value')\n", (853, 908), True, 'import tensorflow as tf\n'), ((1014, 1026), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1024, 1026), True, 'import tensorflow as tf\n'), ((1949, 1979), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['pre_sum'], {'axis': '(1)'}), '(pre_sum, axis=1)\n', (1962, 1979), True, 'import tensorflow as tf\n'), ((2186, 2258), 'tensorflow.layers.dense', 'tf.layers.dense', (['x'], {'units': 'hidden_sizes[-1]', 'activation': 'output_activation'}), '(x, units=hidden_sizes[-1], activation=output_activation)\n', (2201, 2258), True, 'import tensorflow as tf\n'), ((3107, 3136), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['act_logits'], {}), '(act_logits)\n', (3124, 3136), True, 'import tensorflow as tf\n'), ((4620, 4635), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (4626, 4635), True, 'import tensorflow as tf\n'), ((5054, 5120), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None,)', 'name': '"""ep_returns"""'}), "(dtype=tf.float32, shape=(None,), name='ep_returns')\n", (5068, 5120), True, 'import tensorflow as tf\n'), ((6029, 6044), 'numpy.mean', 'np.mean', (['ep_ret'], {}), '(ep_ret)\n', (6036, 6044), True, 'import numpy as np\n'), ((6063, 6077), 'numpy.std', 'np.std', (['ep_ret'], {}), '(ep_ret)\n', (6069, 6077), True, 'import numpy as np\n'), ((1142, 1189), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""logs/"""', 'self.sess.graph'], {}), "('logs/', self.sess.graph)\n", (1163, 1189), True, 'import tensorflow as tf\n'), ((1213, 1246), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1244, 1246), True, 'import tensorflow as tf\n'), ((1534, 1588), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': 'dim', 'name': 'name'}), '(dtype=tf.float32, shape=dim, name=name)\n', (1548, 1588), True, 'import tensorflow as tf\n'), ((2120, 2170), 'tensorflow.layers.dense', 'tf.layers.dense', (['x'], {'units': 'h', 'activation': 'activation'}), '(x, units=h, activation=activation)\n', (2135, 2170), True, 'import tensorflow as tf\n'), ((3233, 3262), 'tensorflow.multinomial', 'tf.multinomial', (['act_logits', '(1)'], {}), '(act_logits, 1)\n', (3247, 3262), True, 'import tensorflow as tf\n'), ((5506, 5546), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(logp_batch * self.ep_ret)'], {}), '(logp_batch * self.ep_ret)\n', (5520, 5546), True, 'import tensorflow as tf\n'), ((5560, 5582), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (5573, 5582), True, 'import tensorflow as tf\n'), ((1650, 1706), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '(None,)', 'name': 'name'}), '(dtype=tf.int32, shape=(None,), name=name)\n', (1664, 1706), True, 'import tensorflow as tf\n'), ((1915, 1932), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1921, 1932), True, 'import numpy as np\n'), ((3405, 3433), 'tensorflow.one_hot', 'tf.one_hot', (['a'], {'depth': 'act_dim'}), '(a, depth=act_dim)\n', (3415, 3433), True, 'import tensorflow as tf\n'), ((3483, 3512), 'tensorflow.one_hot', 'tf.one_hot', (['pi'], {'depth': 'act_dim'}), '(pi, depth=act_dim)\n', (3493, 3512), True, 'import tensorflow as tf\n'), ((4570, 4604), 'numpy.ones', 'np.ones', (['act_dim'], {'dtype': 'np.float32'}), '(act_dim, dtype=np.float32)\n', (4577, 4604), True, 'import numpy as np\n'), ((4671, 4683), 'tensorflow.shape', 'tf.shape', (['mu'], {}), '(mu)\n', (4679, 4683), True, 'import tensorflow as tf\n'), ((5612, 5643), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (5634, 5643), True, 'import tensorflow as tf\n'), ((6461, 6481), 'numpy.vstack', 'np.vstack', (['self.ep_s'], {}), '(self.ep_s)\n', (6470, 6481), True, 'import numpy as np\n'), ((7083, 7096), 'numpy.squeeze', 'np.squeeze', (['a'], {}), '(a)\n', (7093, 7096), True, 'import numpy as np\n'), ((6539, 6558), 'numpy.array', 'np.array', (['self.ep_a'], {}), '(self.ep_a)\n', (6547, 6558), True, 'import numpy as np\n'), ((1870, 1885), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (1876, 1885), True, 'import tensorflow as tf\n')] |
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
# Axes3D import has side effects, it enables using projection='3d' in add_subplot
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
#from matplotlib.ticker import LinearLocator, FormatStrFormatter
#import pdb
def surfForcePlot(prob):
#mesh = prob['AS_point_0.coupled.wing.mesh']
def_mesh = prob['AS_point_0.coupled.wing.def_mesh']
forces = prob['AS_point_0.coupled.aero_states.wing_mesh_point_forces']
# Get XYZ forces and normalize them
Fx = forces[:,:,0]
Fy = forces[:,:,1]
Fz = forces[:,:,2]
fx = Fx/Fx.max()
fy = Fy/Fy.max()
fz = Fz/Fz.max()
#X1 = mesh[:,:,0];
#Y1 = mesh[:,:,1];
#Z1 = mesh[:,:,2];
X2 = def_mesh[:,:,0];
Y2 = def_mesh[:,:,1];
Z2 = def_mesh[:,:,2];
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([X2.max()-X2.min(), Y2.max()-Y2.min(), Z2.max()-Z2.min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X2.max()+X2.min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y2.max()+Y2.min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z2.max()+Z2.min())
import pdb; pdb.set_trace()
# Plot X forces
fig1 = plt.figure(1,figsize=(5,4),dpi=300)
ax1 = fig1.gca(projection='3d')
surf1 = ax1.plot_surface(
X2, Y2, Z2, rstride=1, cstride=1,
facecolors=cm.coolwarm(fx),
linewidth=0, antialiased=False, shade=False)
for xb, yb, zb in zip(Xb, Yb, Zb):
ax1.plot([xb], [yb], [zb], 'w')
ax1.plot_wireframe(X2,Y2,Z2,color='k',linewidth=0.25)
fig1.colorbar(surf1, shrink=0.5, aspect=5)
ax1.set_xlabel('X')
ax1.set_ylabel('Y')
ax1.set_zlabel('Z')
ax1.elev=90.
ax1.azim=255.
ax1.set_zlim([-2.,2.])
plt.grid()
plt.savefig('surf_fx.png')
plt.show()
# Plot Y forces
fig2 = plt.figure(1,figsize=(5,4),dpi=300)
ax2 = fig2.gca(projection='3d')
surf2 = ax2.plot_surface(
X2, Y2, Z2, rstride=1, cstride=1,
facecolors=cm.coolwarm(fy),
linewidth=0, antialiased=False, shade=False)
for xb, yb, zb in zip(Xb, Yb, Zb):
ax2.plot([xb], [yb], [zb], 'w')
ax2.plot_wireframe(X2,Y2,Z2,color='k',linewidth=0.25)
fig2.colorbar(surf1, shrink=0.5, aspect=5)
ax2.set_xlabel('X')
ax2.set_ylabel('Y')
ax2.set_zlabel('Z')
ax2.elev=25.
ax2.azim=255.
ax2.set_zlim([-2.,2.])
plt.grid()
plt.savefig('surf_fy.png')
plt.show()
# Plot Z forces
fig3 = plt.figure(1,figsize=(5,4),dpi=300)
ax3 = fig3.gca(projection='3d')
surf3 = ax3.plot_surface(
X2, Y2, Z2, rstride=1, cstride=1,
facecolors=cm.coolwarm(fz),
linewidth=0, antialiased=False, shade=False)
for xb, yb, zb in zip(Xb, Yb, Zb):
ax3.plot([xb], [yb], [zb], 'w')
ax3.plot_wireframe(X2,Y2,Z2,color='k',linewidth=0.25)
ax3.set_xlabel('X')
ax3.set_ylabel('Y')
ax3.set_zlabel('Z')
ax3.elev=25.
ax3.azim=255.
ax3.set_zlim([-2.,2.])
fig3.colorbar(surf1, shrink=0.5, aspect=5)
plt.grid()
plt.savefig('surf_fz.png')
plt.show()
def meshPlot(mesh,azim=225,elev=45,deformed=False,name=None,showIt=True,
bounds=[[-0.61,0.61],[-0.61,0.61],[-0.15,0.15]],axisEqual=False,\
size=(12,6)):
X = mesh[:,:,0];
Y = mesh[:,:,1];
Z = mesh[:,:,2];
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), 0.2-(-0.2)]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())
# Plot X forces
fig1 = plt.figure(1,figsize=size,dpi=500)
ax1 = fig1.gca(projection='3d')
if deformed==True:
surf = ax1.plot_surface(
X, Y, Z, rstride=1, cstride=1,
cmap='coolwarm',
linewidth=0, antialiased=False, shade=False)
m = cm.ScalarMappable(cmap=surf.cmap, norm=surf.norm)
m.set_array(Z)
cbar = plt.colorbar(m)
m.set_clim([-0.025,0.025])
cbar.set_label('Z deflection (m)')
else:
ax1.plot_surface(
X, Y, Z, rstride=1, cstride=1,
color='w',
linewidth=0, antialiased=False, shade=True)
if axisEqual:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax1.plot([xb], [yb], [zb], 'w')
ax1.plot_wireframe(X,Y,Z,color='k',linewidth=0.25)
ax1.set_xlabel('X (m)')
ax1.set_ylabel('Y (m)')
ax1.set_zlabel('Z (m)')
if bounds is not None:
ax1.set_xlim3d(bounds[0])
ax1.set_ylim3d(bounds[1])
ax1.set_zlim3d(bounds[2])
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
plt.locator_params(axis='z', nbins=3)
ax1.elev=elev
ax1.azim=azim
#plt.grid()
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax1.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax1.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
plt.axis('off')
if name is not None:
plt.savefig(name+'.png',dpi=900)
if showIt:
plt.show()
# import pdb;pdb.set_trace();
def twistPlot(mesh,azim=225,elev=45,name=None,showIt=True,minmax=[-3,3],\
bounds=[[-0.61,0.61],[-0.61,0.61],[-0.15,0.15]],axisEqual=False,\
size=(12,6),titleStr=None,noAxes=False):
X = mesh[:,:,0];
Y = mesh[:,:,1];
Z = mesh[:,:,2];
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), 0.2-(-0.2)]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())
# Plot X forces
fig1 = plt.figure(1,figsize=size,dpi=300)
ax1 = fig1.gca(projection='3d')
mesh_vec = mesh[-1,:,:] - mesh[0,:,:]
ang_vec = np.zeros(len(mesh_vec))
for j in range(len(mesh_vec)):
ang_vec[j] = 90 - (np.arccos(np.dot(np.array([0,0,-1]),mesh_vec[j,:])\
/(np.linalg.norm(mesh_vec[j,:]))) * 180./np.pi)
angs = np.reshape(ang_vec,(1,len(ang_vec))) * np.ones(np.shape(mesh[:,:,0]))
norm = matplotlib.colors.Normalize(minmax[0],minmax[1])
m = cm.ScalarMappable(norm=norm, cmap='coolwarm')
m.set_array([])
fcolors = m.to_rgba(angs)
ax1.plot_surface(X, Y, Z, facecolors=fcolors,
linewidth=0, antialiased=False, shade=False)
cbar = plt.colorbar(m)#,orientation='horizontal')
cbar.set_label('Twist (deg)')
if axisEqual:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax1.plot([xb], [yb], [zb], 'w')
ax1.plot_wireframe(X,Y,Z,color='k',linewidth=0.25)
ax1.set_xlabel('X (m)')
ax1.set_ylabel('Y (m)')
ax1.set_zlabel('Z (m)')
if bounds is not None:
ax1.set_xlim3d(bounds[0])
ax1.set_ylim3d(bounds[1])
ax1.set_zlim3d(bounds[2])
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
plt.locator_params(axis='z', nbins=3)
if titleStr is not None:
plt.title(titleStr)
ax1.elev=elev
ax1.azim=azim
if noAxes:
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax1.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax1.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
else:
plt.grid()
if name is not None:
plt.savefig(name+'.png',dpi=1200)
if showIt:
plt.show()
# import pdb;pdb.set_trace(); | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.locator_params",
"matplotlib.colors.Normalize",
"matplotlib.cm.coolwarm",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.colorbar",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.linalg.norm"... | [((1305, 1320), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1318, 1320), False, 'import pdb\n'), ((1352, 1390), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(5, 4)', 'dpi': '(300)'}), '(1, figsize=(5, 4), dpi=300)\n', (1362, 1390), True, 'import matplotlib.pyplot as plt\n'), ((1924, 1934), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1932, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1965), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""surf_fx.png"""'], {}), "('surf_fx.png')\n", (1950, 1965), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1978, 1980), True, 'import matplotlib.pyplot as plt\n'), ((2017, 2055), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(5, 4)', 'dpi': '(300)'}), '(1, figsize=(5, 4), dpi=300)\n', (2027, 2055), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2600), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2598, 2600), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2631), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""surf_fy.png"""'], {}), "('surf_fy.png')\n", (2616, 2631), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2646), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2644, 2646), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2721), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(5, 4)', 'dpi': '(300)'}), '(1, figsize=(5, 4), dpi=300)\n', (2693, 2721), True, 'import matplotlib.pyplot as plt\n'), ((3262, 3272), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3270, 3272), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3303), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""surf_fz.png"""'], {}), "('surf_fz.png')\n", (3288, 3303), True, 'import matplotlib.pyplot as plt\n'), ((3308, 3318), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3316, 3318), True, 'import matplotlib.pyplot as plt\n'), ((4032, 4068), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'size', 'dpi': '(500)'}), '(1, figsize=size, dpi=500)\n', (4042, 4068), True, 'import matplotlib.pyplot as plt\n'), ((5057, 5094), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""x"""', 'nbins': '(5)'}), "(axis='x', nbins=5)\n", (5075, 5094), True, 'import matplotlib.pyplot as plt\n'), ((5099, 5136), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""y"""', 'nbins': '(5)'}), "(axis='y', nbins=5)\n", (5117, 5136), True, 'import matplotlib.pyplot as plt\n'), ((5141, 5178), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""z"""', 'nbins': '(3)'}), "(axis='z', nbins=3)\n", (5159, 5178), True, 'import matplotlib.pyplot as plt\n'), ((5560, 5575), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5568, 5575), True, 'import matplotlib.pyplot as plt\n'), ((6451, 6487), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'size', 'dpi': '(300)'}), '(1, figsize=size, dpi=300)\n', (6461, 6487), True, 'import matplotlib.pyplot as plt\n'), ((6904, 6953), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', (['minmax[0]', 'minmax[1]'], {}), '(minmax[0], minmax[1])\n', (6931, 6953), False, 'import matplotlib\n'), ((6961, 7006), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': '"""coolwarm"""'}), "(norm=norm, cmap='coolwarm')\n", (6978, 7006), False, 'from matplotlib import cm\n'), ((7182, 7197), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['m'], {}), '(m)\n', (7194, 7197), True, 'import matplotlib.pyplot as plt\n'), ((7663, 7700), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""x"""', 'nbins': '(5)'}), "(axis='x', nbins=5)\n", (7681, 7700), True, 'import matplotlib.pyplot as plt\n'), ((7705, 7742), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""y"""', 'nbins': '(5)'}), "(axis='y', nbins=5)\n", (7723, 7742), True, 'import matplotlib.pyplot as plt\n'), ((7747, 7784), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""z"""', 'nbins': '(3)'}), "(axis='z', nbins=3)\n", (7765, 7784), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4362), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'surf.cmap', 'norm': 'surf.norm'}), '(cmap=surf.cmap, norm=surf.norm)\n', (4330, 4362), False, 'from matplotlib import cm\n'), ((4401, 4416), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['m'], {}), '(m)\n', (4413, 4416), True, 'import matplotlib.pyplot as plt\n'), ((5609, 5644), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + '.png')"], {'dpi': '(900)'}), "(name + '.png', dpi=900)\n", (5620, 5644), True, 'import matplotlib.pyplot as plt\n'), ((5674, 5684), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5682, 5684), True, 'import matplotlib.pyplot as plt\n'), ((7827, 7846), 'matplotlib.pyplot.title', 'plt.title', (['titleStr'], {}), '(titleStr)\n', (7836, 7846), True, 'import matplotlib.pyplot as plt\n'), ((8272, 8282), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8280, 8282), True, 'import matplotlib.pyplot as plt\n'), ((8316, 8352), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + '.png')"], {'dpi': '(1200)'}), "(name + '.png', dpi=1200)\n", (8327, 8352), True, 'import matplotlib.pyplot as plt\n'), ((8382, 8392), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8390, 8392), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1535), 'matplotlib.cm.coolwarm', 'cm.coolwarm', (['fx'], {}), '(fx)\n', (1531, 1535), False, 'from matplotlib import cm\n'), ((2185, 2200), 'matplotlib.cm.coolwarm', 'cm.coolwarm', (['fy'], {}), '(fy)\n', (2196, 2200), False, 'from matplotlib import cm\n'), ((2856, 2871), 'matplotlib.cm.coolwarm', 'cm.coolwarm', (['fz'], {}), '(fz)\n', (2867, 2871), False, 'from matplotlib import cm\n'), ((6864, 6887), 'numpy.shape', 'np.shape', (['mesh[:, :, 0]'], {}), '(mesh[:, :, 0])\n', (6872, 6887), True, 'import numpy as np\n'), ((6755, 6785), 'numpy.linalg.norm', 'np.linalg.norm', (['mesh_vec[j, :]'], {}), '(mesh_vec[j, :])\n', (6769, 6785), True, 'import numpy as np\n'), ((6691, 6711), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (6699, 6711), True, 'import numpy as np\n')] |
import numpy as np
from . import _sequential, _vectorized, _cuda
from .. import settings
# Operators map.
_operations = None
def set_mode(device):
"""Set The Operation Mode by Switching The Map of Operators.
:param device: str, ('default', 'sequential', 'vectorized' or 'gpu'):
which device should perform the operations.
Options are:
* 'default' and 'vectorized' will execute with numpy.
* 'sequential' implements the operations as they would usually be
on the programming language C.
* 'gpu' uses the CUDA interface to make the
computations in the GPU device.
"""
global _operations
assert device in ('default', 'sequential', 'vectorized', 'gpu')
if device in ('default', 'vectorized'):
_operations = _vectorized.operations
elif device == 'sequential':
_operations = _sequential.operations
elif device == 'gpu':
_operations = _cuda.operations
def get_mode():
"""Get the mode in which the operations are currently set.
:return str: 'vectorized', 'sequential' or 'gpu'.
"""
global _operations
if _operations == _vectorized.operations:
return 'vectorized'
if _operations == _sequential.operations:
return 'sequential'
if _operations == _cuda.operations:
return 'gpu'
# Set operation mode to the default.
set_mode(settings.DEFAULT_OPERATION_MODE)
# Operator Wrappers.
def _run_operator(op, *args, **kwargs):
"""Run An Operator.
This is a private method and should not be invoked directly.
Instead, use one of the wrappers bellow.
:param op: str, name of the operator in `_ops` map.
:param args: positional arguments for the operation.
:param kwargs: key arguments for the operation.
:return: the operation result.
"""
if op not in _operations:
raise ValueError('%s operator is not defined' % op)
return _operations[op](*args, **kwargs)
def dot(a, b, out=None):
return _run_operator('dot', a, b, out=out)
def add(a, b, out=None):
return _run_operator('add', a, b, out=out)
def sub(a, b, out=None):
return _run_operator('sub', a, b, out=out)
def scale(alpha, a, out=None):
return _run_operator('scale', alpha, a, out=out)
def hadamard(a, b, out=None):
return _run_operator('hadamard', a, b, out=out)
def conv(t, tk, stride=(1, 1), padding=(1, 1), out=None):
return _run_operator('conv', t, tk,
stride=stride, padding=padding, out=out)
def add_bias(a, bias, out=None):
return _run_operator('add_bias', a, bias, out=out)
def transpose(a, axes=None):
return _run_operator('transpose', a, axes=axes)
def sum(a, axis=None, dtype=None, out=None, keepdims=False):
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def argmax(a, axis=None, out=None):
return np.argmax(a, axis=axis, out=out)
class Device(object):
"""Helper class for clean scope setting.
Example:
>>> # Default device is 'vectorized'
>>> with Device('gpu') as s:
>>> ... # Do some work with the GPU.
>>> # Vectorized device is in use once again.
"""
def __init__(self, device_name):
self.device_name = device_name
self.previous_device = None
def __enter__(self):
self.previous_device = get_mode()
set_mode(self.device_name)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
set_mode(self.previous_device)
| [
"numpy.sum",
"numpy.argmax"
] | [((2772, 2833), 'numpy.sum', 'np.sum', (['a'], {'axis': 'axis', 'dtype': 'dtype', 'out': 'out', 'keepdims': 'keepdims'}), '(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n', (2778, 2833), True, 'import numpy as np\n'), ((2883, 2915), 'numpy.argmax', 'np.argmax', (['a'], {'axis': 'axis', 'out': 'out'}), '(a, axis=axis, out=out)\n', (2892, 2915), True, 'import numpy as np\n')] |
from typing import Callable, Optional, Tuple
import numpy as np
from numpy import ndarray
from skfem.element import (BOUNDARY_ELEMENT_MAP, DiscreteField, Element,
ElementHex0, ElementQuad0, ElementTetP0,
ElementTriP0)
from skfem.mapping import Mapping
from skfem.mesh import Mesh, MeshHex, MeshLine, MeshQuad, MeshTet, MeshTri
from .abstract_basis import AbstractBasis
from .cell_basis import CellBasis
class BoundaryFacetBasis(AbstractBasis):
"""For fields defined on the boundary of the domain."""
def __init__(self,
mesh: Mesh,
elem: Element,
mapping: Optional[Mapping] = None,
intorder: Optional[int] = None,
quadrature: Optional[Tuple[ndarray, ndarray]] = None,
facets: Optional[ndarray] = None,
_side: int = 0):
"""Precomputed global basis on boundary facets.
Parameters
----------
mesh
An object of type :class:`~skfem.mesh.Mesh`.
elem
An object of type :class:`~skfem.element.Element`.
mapping
An object of type :class:`skfem.mapping.Mapping`. If `None`, uses
`mesh.mapping`.
intorder
Optional integration order, i.e. the degree of polynomials that are
integrated exactly by the used quadrature. Not used if `quadrature`
is specified.
quadrature
Optional tuple of quadrature points and weights.
facets
Optional subset of facet indices.
"""
super(BoundaryFacetBasis, self).__init__(mesh,
elem,
mapping,
intorder,
quadrature,
mesh.brefdom)
# facets where the basis is evaluated
if facets is None:
self.find = np.nonzero(self.mesh.f2t[1] == -1)[0]
else:
self.find = facets
self.tind = self.mesh.f2t[_side, self.find]
# boundary refdom to global facet
x = self.mapping.G(self.X, find=self.find)
# global facet to refdom facet
Y = self.mapping.invF(x, tind=self.tind)
# construct normal vectors from side=0 always
Y0 = self.mapping.invF(x, tind=self.mesh.f2t[0, self.find])
self.normals = DiscreteField(
value=self.mapping.normals(Y0,
self.mesh.f2t[0, self.find],
self.find,
self.mesh.t2f)
)
self.nelems = len(self.find)
self.basis = [self.elem.gbasis(self.mapping, Y, j, tind=self.tind)
for j in range(self.Nbfun)]
self.dx = (np.abs(self.mapping.detDG(self.X, find=self.find))
* np.tile(self.W, (self.nelems, 1)))
def default_parameters(self):
"""Return default parameters for `~skfem.assembly.asm`."""
return {'x': self.global_coordinates(),
'h': self.mesh_parameters(),
'n': self.normals}
def global_coordinates(self) -> DiscreteField:
return DiscreteField(self.mapping.G(self.X, find=self.find))
def mesh_parameters(self) -> DiscreteField:
return DiscreteField((np.abs(self.mapping.detDG(self.X, self.find))
** (1. / (self.mesh.dim() - 1.)))
if self.mesh.dim() != 1 else np.array([0.]))
def _trace_project(self,
x: ndarray,
elem: Element) -> ndarray:
"""Trace mesh basis projection."""
from skfem.utils import projection
fbasis = BoundaryFacetBasis(self.mesh,
elem,
facets=self.find,
quadrature=(self.X, self.W))
I = fbasis.get_dofs(self.find).all()
if len(I) == 0: # special case: no facet DOFs
if fbasis.dofs.interior_dofs is not None:
if fbasis.dofs.interior_dofs.shape[0] > 1:
# no one-to-one restriction: requires interpolation
raise NotImplementedError
# special case: piecewise constant elem
I = fbasis.dofs.interior_dofs[:, self.tind].flatten()
else:
raise ValueError
return projection(x, fbasis, self, I=I)
def trace(self,
x: ndarray,
projection: Callable[[ndarray], ndarray],
target_elem: Optional[Element] = None) -> Tuple[CellBasis,
ndarray]:
"""Restrict solution to :math:`d-1` dimensional trace mesh.
The parameter ``projection`` defines how the boundary points are
projected to :math:`d-1` dimensional space. For example,
>>> projection = lambda p: p[0]
will keep only the `x`-coordinate in the trace mesh.
Parameters
----------
x
The solution vector.
projection
A function defining the projection of the boundary points. See
above for an example.
target_elem
Optional finite element to project to before restriction. If not
given, a piecewise constant element is used.
Returns
-------
CellBasis
An object corresponding to the trace mesh.
ndarray
A projected solution vector defined on the trace mesh.
"""
DEFAULT_TARGET = {
MeshTri: ElementTriP0,
MeshQuad: ElementQuad0,
MeshTet: ElementTetP0,
MeshHex: ElementHex0,
}
meshcls = type(self.mesh)
if meshcls not in DEFAULT_TARGET:
raise NotImplementedError("Mesh type not supported.")
if target_elem is None:
target_elem = DEFAULT_TARGET[meshcls]()
if type(target_elem) not in BOUNDARY_ELEMENT_MAP:
raise Exception("The specified element not supported.")
elemcls = BOUNDARY_ELEMENT_MAP[type(target_elem)]
target_meshcls = {
MeshTri: MeshLine,
MeshQuad: MeshLine,
MeshTet: MeshTri,
MeshHex: MeshQuad,
}[meshcls]
p, t = self.mesh._reix(self.mesh.facets[:, self.find])
return (
CellBasis(target_meshcls(projection(p), t), elemcls()),
self._trace_project(x, target_elem)
)
def with_element(self, elem: Element) -> 'BoundaryFacetBasis':
"""Return a similar basis using a different element."""
return type(self)(
self.mesh,
elem,
mapping=self.mapping,
quadrature=self.quadrature,
facets=self.find,
)
| [
"skfem.utils.projection",
"numpy.nonzero",
"numpy.tile",
"numpy.array"
] | [((4615, 4647), 'skfem.utils.projection', 'projection', (['x', 'fbasis', 'self'], {'I': 'I'}), '(x, fbasis, self, I=I)\n', (4625, 4647), False, 'from skfem.utils import projection\n'), ((3032, 3065), 'numpy.tile', 'np.tile', (['self.W', '(self.nelems, 1)'], {}), '(self.W, (self.nelems, 1))\n', (3039, 3065), True, 'import numpy as np\n'), ((2073, 2107), 'numpy.nonzero', 'np.nonzero', (['(self.mesh.f2t[1] == -1)'], {}), '(self.mesh.f2t[1] == -1)\n', (2083, 2107), True, 'import numpy as np\n'), ((3665, 3680), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (3673, 3680), True, 'import numpy as np\n'), ((6653, 6666), 'skfem.utils.projection', 'projection', (['p'], {}), '(p)\n', (6663, 6666), False, 'from skfem.utils import projection\n')] |
######################################################################################################
#
# Organization: Asociacion De Investigacion En Inteligencia Artificial Para La Leucemia Peter Moss
# Repository: HIAS: Hospital Intelligent Automation System
# Project: TassAI
#
# Author: <NAME> (<EMAIL>)
#
# Title: TassAI Class
# Description: TassAI functions for the Hospital Intelligent Automation System.
# License: MIT License
# Last Modified: 2020-08-26
#
######################################################################################################
import cv2
import os
import os.path as osp
import numpy as np
from Classes.Helpers import Helpers
from Classes.OpenVINO.ie_module import InferenceContext
from Classes.OpenVINO.landmarks_detector import LandmarksDetector
from Classes.OpenVINO.face_detector import FaceDetector
from Classes.OpenVINO.faces_database import FacesDatabase
from Classes.OpenVINO.face_identifier import FaceIdentifier
class TassAI():
def __init__(self):
""" TassAI Class
TassAI functions for the Hospital Intelligent Automation System.
"""
self.Helpers = Helpers("TassAI", False)
self.qs = 16
self.context = InferenceContext([self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["runas"], self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["runas"], self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["runas"]], "", "", "")
self.Helpers.logger.info("TassAI Helper Class initialization complete.")
def load_models(self):
""" Loads all models. """
face_detector_net = self.load_model(
self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["detection"])
face_detector_net.reshape({"data": [1, 3, 384, 672]})
landmarks_net = self.load_model(
self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["landmarks"])
face_reid_net = self.load_model(
self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["reidentification"])
self.face_detector = FaceDetector(face_detector_net,
confidence_threshold=0.6,
roi_scale_factor=1.15)
self.landmarks_detector = LandmarksDetector(landmarks_net)
self.face_identifier = FaceIdentifier(face_reid_net,
match_threshold=0.3,
match_algo='HUNGARIAN')
self.face_detector.deploy(self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["runas"], self.context)
self.landmarks_detector.deploy(self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["runas"], self.context,
queue_size=self.qs)
self.face_identifier.deploy(self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["runas"], self.context,
queue_size=self.qs)
self.Helpers.logger.info("Models loaded")
def load_model(self, model_path):
""" Loads a model from path. """
model_path = osp.abspath(model_path)
model_weights_path = osp.splitext(model_path)[0] + ".bin"
self.Helpers.logger.info("Loading the model from '%s'" % (model_path))
model = self.context.ie_core.read_network(model_path, model_weights_path)
self.Helpers.logger.info("Model loaded")
return model
def load_known(self):
""" Loads known data. """
self.faces_database = FacesDatabase(self.Helpers.confs["iotJumpWay"]["MQTT"]["TassAI"]["data"], self.face_identifier,
self.landmarks_detector, self.face_detector, True)
self.face_identifier.set_faces_database(self.faces_database)
self.Helpers.logger.info("Database is built, registered %s identities" %
(len(self.faces_database)))
def process(self, frame):
""" Processes a frame. """
orig_image = frame.copy()
frame = frame.transpose((2, 0, 1))
frame = np.expand_dims(frame, axis=0)
self.face_detector.clear()
self.landmarks_detector.clear()
self.face_identifier.clear()
self.face_detector.start_async(frame)
rois = self.face_detector.get_roi_proposals(frame)
if self.qs < len(rois):
self.Helpers.logger.info("Too many faces for processing." \
" Will be processed only %s of %s." % \
(self.qs, len(rois)))
rois = rois[:self.qs]
self.landmarks_detector.start_async(frame, rois)
landmarks = self.landmarks_detector.get_landmarks()
self.face_identifier.start_async(frame, rois, landmarks)
face_identities, unknowns = self.face_identifier.get_matches()
outputs = [rois, landmarks, face_identities]
return outputs
def draw_text_with_background(self, frame, text, origin,
font=cv2.FONT_HERSHEY_SIMPLEX, scale=1.0,
color=(0, 0, 0), thickness=1, bgcolor=(255, 255, 255)):
text_size, baseline = cv2.getTextSize(text, font, scale, thickness)
cv2.rectangle(frame,
tuple((origin + (0, baseline)).astype(int)),
tuple((origin + (text_size[0], -text_size[1])).astype(int)),
bgcolor, cv2.FILLED)
cv2.putText(frame, text,
tuple(origin.astype(int)),
font, scale, color, thickness)
return text_size, baseline
def draw_detection_roi(self, frame, roi, identity):
label = self.face_identifier.get_identity_label(identity.id)
# Draw face ROI border
cv2.rectangle(frame,
tuple(roi.position), tuple(roi.position + roi.size),
(0, 220, 0), 2)
# Draw identity label
text_scale = 0.5
font = cv2.FONT_HERSHEY_SIMPLEX
text_size = cv2.getTextSize("H1", font, text_scale, 1)
line_height = np.array([0, text_size[0][1]])
if label is "Unknown":
text = label
else:
text = "User #" + label
if identity.id != FaceIdentifier.UNKNOWN_ID:
text += ' %.2f%%' % (100.0 * (1 - identity.distance))
self.draw_text_with_background(frame, text,
roi.position - line_height * 0.5,
font, scale=text_scale)
return frame, label
def draw_detection_keypoints(self, frame, roi, landmarks):
keypoints = [landmarks.left_eye,
landmarks.right_eye,
landmarks.nose_tip,
landmarks.left_lip_corner,
landmarks.right_lip_corner,
landmarks.right_lip_corner]
for point in keypoints:
center = roi.position + roi.size * point
cv2.circle(frame, tuple(center.astype(int)), 2, (0, 255, 255), 2)
return frame
| [
"os.path.abspath",
"Classes.OpenVINO.ie_module.InferenceContext",
"Classes.OpenVINO.faces_database.FacesDatabase",
"cv2.getTextSize",
"numpy.expand_dims",
"Classes.OpenVINO.face_identifier.FaceIdentifier",
"Classes.OpenVINO.face_detector.FaceDetector",
"numpy.array",
"os.path.splitext",
"Classes.H... | [((1149, 1173), 'Classes.Helpers.Helpers', 'Helpers', (['"""TassAI"""', '(False)'], {}), "('TassAI', False)\n", (1156, 1173), False, 'from Classes.Helpers import Helpers\n'), ((1207, 1429), 'Classes.OpenVINO.ie_module.InferenceContext', 'InferenceContext', (["[self.Helpers.confs['iotJumpWay']['MQTT']['TassAI']['runas'], self.Helpers.\n confs['iotJumpWay']['MQTT']['TassAI']['runas'], self.Helpers.confs[\n 'iotJumpWay']['MQTT']['TassAI']['runas']]", '""""""', '""""""', '""""""'], {}), "([self.Helpers.confs['iotJumpWay']['MQTT']['TassAI'][\n 'runas'], self.Helpers.confs['iotJumpWay']['MQTT']['TassAI']['runas'],\n self.Helpers.confs['iotJumpWay']['MQTT']['TassAI']['runas']], '', '', '')\n", (1223, 1429), False, 'from Classes.OpenVINO.ie_module import InferenceContext\n'), ((1953, 2038), 'Classes.OpenVINO.face_detector.FaceDetector', 'FaceDetector', (['face_detector_net'], {'confidence_threshold': '(0.6)', 'roi_scale_factor': '(1.15)'}), '(face_detector_net, confidence_threshold=0.6, roi_scale_factor=1.15\n )\n', (1965, 2038), False, 'from Classes.OpenVINO.face_detector import FaceDetector\n'), ((2081, 2113), 'Classes.OpenVINO.landmarks_detector.LandmarksDetector', 'LandmarksDetector', (['landmarks_net'], {}), '(landmarks_net)\n', (2098, 2113), False, 'from Classes.OpenVINO.landmarks_detector import LandmarksDetector\n'), ((2140, 2214), 'Classes.OpenVINO.face_identifier.FaceIdentifier', 'FaceIdentifier', (['face_reid_net'], {'match_threshold': '(0.3)', 'match_algo': '"""HUNGARIAN"""'}), "(face_reid_net, match_threshold=0.3, match_algo='HUNGARIAN')\n", (2154, 2214), False, 'from Classes.OpenVINO.face_identifier import FaceIdentifier\n'), ((2742, 2765), 'os.path.abspath', 'osp.abspath', (['model_path'], {}), '(model_path)\n', (2753, 2765), True, 'import os.path as osp\n'), ((3112, 3262), 'Classes.OpenVINO.faces_database.FacesDatabase', 'FacesDatabase', (["self.Helpers.confs['iotJumpWay']['MQTT']['TassAI']['data']", 'self.face_identifier', 'self.landmarks_detector', 'self.face_detector', '(True)'], {}), "(self.Helpers.confs['iotJumpWay']['MQTT']['TassAI']['data'],\n self.face_identifier, self.landmarks_detector, self.face_detector, True)\n", (3125, 3262), False, 'from Classes.OpenVINO.faces_database import FacesDatabase\n'), ((3574, 3603), 'numpy.expand_dims', 'np.expand_dims', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (3588, 3603), True, 'import numpy as np\n'), ((4474, 4519), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thickness'], {}), '(text, font, scale, thickness)\n', (4489, 4519), False, 'import cv2\n'), ((5149, 5191), 'cv2.getTextSize', 'cv2.getTextSize', (['"""H1"""', 'font', 'text_scale', '(1)'], {}), "('H1', font, text_scale, 1)\n", (5164, 5191), False, 'import cv2\n'), ((5208, 5238), 'numpy.array', 'np.array', (['[0, text_size[0][1]]'], {}), '([0, text_size[0][1]])\n', (5216, 5238), True, 'import numpy as np\n'), ((2789, 2813), 'os.path.splitext', 'osp.splitext', (['model_path'], {}), '(model_path)\n', (2801, 2813), True, 'import os.path as osp\n')] |
from random import randint
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pickle
import os.path
MUTATE_PROBABILITY = 1 # (out of 100)
# NUM_PIXELS = 7896
NUM_MOVES = 6
class Strategy(object):
def __init__(self,
w0 = None,
b0 = None,
MUTATE_PROBABILITY=0.01,
MUTATION_FACTOR=2,
NUM_PIXELS=7896
):
# Initialize weights and bias for layer one (normal distribution in [-1, 1))
self.NUM_PIXELS = NUM_PIXELS
if(w0 is None and b0 is None):
self.w0 = 2 * np.random.rand(self.NUM_PIXELS, NUM_MOVES) - 1
self.b0 = 2 * np.random.rand(NUM_MOVES) - 1
else:
self.w0 = w0
self.b0 = b0
self.MUTATE_PROBABILITY = MUTATE_PROBABILITY
self.MUTATION_FACTOR = MUTATION_FACTOR
# Convert image to black and white and reduce size to make computation faster
def preprocessImage(self, pixelInput):
if (len(pixelInput.flatten()) == 128):
return pixelInput
observation = cv2.cvtColor(cv2.resize(pixelInput, (84, 110)), cv2.COLOR_BGR2GRAY)
observation = observation[16:110,:]
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
observation = observation.flatten()
return np.where(observation == 0, observation, 1)
def sigmoid(self, x):
return 1.0/(1.0 + np.exp(-x))
def calculateMove(self, pixelInput):
pixelInput = self.preprocessImage(pixelInput)
output = pixelInput.dot(self.w0) + self.b0
output = self.sigmoid(output)
return np.argmax(output)
def mutate(self):
for i in range(0, self.NUM_PIXELS):
for j in range(0, NUM_MOVES):
if(np.random.rand() <= self.MUTATE_PROBABILITY):
self.w0[i][j] += randint(-self.MUTATION_FACTOR, self.MUTATION_FACTOR)
for i in range(0, NUM_MOVES):
if(np.random.rand()<= self.MUTATE_PROBABILITY):
self.b0[i] += randint(-self.MUTATION_FACTOR, self.MUTATION_FACTOR)
def breed(self, other):
newWeights = np.empty([self.NUM_PIXELS, NUM_MOVES])
newBias = np.empty([NUM_MOVES])
for i in range(0, self.NUM_PIXELS):
for j in range(0, NUM_MOVES):
if(randint(1, 2) == 1):
newWeights[i][j] = self.w0[i][j]
else:
newWeights[i][j] = other.w0[i][j]
# newWeights[i][j] = (self.w0[i][j] + other.w0[i][j]) / 2.0
for i in range(0, NUM_MOVES):
if(randint(1, 2) == 1):
newBias[i] = self.b0[i]
else:
newBias[i] = other.b0[i]
# newBias = (self.b0 + other.b0) / 2.0
return Strategy(newWeights,
newBias,
MUTATE_PROBABILITY=self.MUTATE_PROBABILITY,
MUTATION_FACTOR=self.MUTATION_FACTOR,
NUM_PIXELS=self.NUM_PIXELS)
def export(self, score, path):
pickle.dump(self, open(os.path.join(path, score + '.p'), "wb"))
@staticmethod
def load_strategy(path):
return pickle.load(open(path, "rb"))
| [
"random.randint",
"numpy.argmax",
"numpy.empty",
"cv2.threshold",
"numpy.where",
"numpy.exp",
"numpy.random.rand",
"cv2.resize"
] | [((1265, 1318), 'cv2.threshold', 'cv2.threshold', (['observation', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(observation, 1, 255, cv2.THRESH_BINARY)\n', (1278, 1318), False, 'import cv2\n'), ((1375, 1417), 'numpy.where', 'np.where', (['(observation == 0)', 'observation', '(1)'], {}), '(observation == 0, observation, 1)\n', (1383, 1417), True, 'import numpy as np\n'), ((1687, 1704), 'numpy.argmax', 'np.argmax', (['output'], {}), '(output)\n', (1696, 1704), True, 'import numpy as np\n'), ((2204, 2242), 'numpy.empty', 'np.empty', (['[self.NUM_PIXELS, NUM_MOVES]'], {}), '([self.NUM_PIXELS, NUM_MOVES])\n', (2212, 2242), True, 'import numpy as np\n'), ((2261, 2282), 'numpy.empty', 'np.empty', (['[NUM_MOVES]'], {}), '([NUM_MOVES])\n', (2269, 2282), True, 'import numpy as np\n'), ((1139, 1172), 'cv2.resize', 'cv2.resize', (['pixelInput', '(84, 110)'], {}), '(pixelInput, (84, 110))\n', (1149, 1172), False, 'import cv2\n'), ((1471, 1481), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1477, 1481), True, 'import numpy as np\n'), ((2022, 2038), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2036, 2038), True, 'import numpy as np\n'), ((2097, 2149), 'random.randint', 'randint', (['(-self.MUTATION_FACTOR)', 'self.MUTATION_FACTOR'], {}), '(-self.MUTATION_FACTOR, self.MUTATION_FACTOR)\n', (2104, 2149), False, 'from random import randint\n'), ((2667, 2680), 'random.randint', 'randint', (['(1)', '(2)'], {}), '(1, 2)\n', (2674, 2680), False, 'from random import randint\n'), ((629, 671), 'numpy.random.rand', 'np.random.rand', (['self.NUM_PIXELS', 'NUM_MOVES'], {}), '(self.NUM_PIXELS, NUM_MOVES)\n', (643, 671), True, 'import numpy as np\n'), ((702, 727), 'numpy.random.rand', 'np.random.rand', (['NUM_MOVES'], {}), '(NUM_MOVES)\n', (716, 727), True, 'import numpy as np\n'), ((1833, 1849), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1847, 1849), True, 'import numpy as np\n'), ((1916, 1968), 'random.randint', 'randint', (['(-self.MUTATION_FACTOR)', 'self.MUTATION_FACTOR'], {}), '(-self.MUTATION_FACTOR, self.MUTATION_FACTOR)\n', (1923, 1968), False, 'from random import randint\n'), ((2388, 2401), 'random.randint', 'randint', (['(1)', '(2)'], {}), '(1, 2)\n', (2395, 2401), False, 'from random import randint\n')] |
import numpy as np
def _generate_autoregressive_mask(input_ordering, output_ordering):
input_size = len(input_ordering)
output_size = len(output_ordering)
connections_mask = np.ones((input_size, output_size))
for i in range(input_size):
connections_mask[i, :] *= (
input_ordering[i] <= output_ordering).astype("int32")
return connections_mask
def generate_autoregressive_masks(sizes, forced_input_ordering=None,
forced_samplings=None,
random_state=None):
if random_state is None:
random_state = np.random.RandomState()
masks = []
orderings_and_samplings = []
if forced_input_ordering is not None:
assert len(forced_input_ordering) == sizes[0]
if forced_samplings is not None:
# -2 to discount input, output
assert len(forced_samplings) == len(sizes) - 2
for n, (i, j) in enumerate(list(zip(sizes[:-1], sizes[1:]))):
if n == 0:
if forced_input_ordering is not None:
input_ordering = forced_input_ordering
else:
input_ordering = np.arange(1, sizes[0] + 1)
random_state.shuffle(input_ordering)
if forced_samplings is not None:
output_ordering = forced_samplings[0]
else:
output_ordering = random_state.randint(1, sizes[0], j)
assert min(input_ordering) == 1
assert max(input_ordering) == sizes[0]
assert len(np.unique(input_ordering)) == sizes[0]
assert min(output_ordering) > 0
assert max(output_ordering) < sizes[0]
l_mask = _generate_autoregressive_mask(
input_ordering, output_ordering)
orderings_and_samplings.extend([input_ordering, output_ordering])
masks.append(l_mask)
elif j == sizes[-1]:
input_ordering = orderings_and_samplings[-1]
output_ordering = orderings_and_samplings[0]
# invert mask generation function for last layer!
# in order to get the correct output will need to do some work
l_mask = _generate_autoregressive_mask(
output_ordering, input_ordering)
# Turn 0 to 1 and 1 to 0
l_mask = l_mask.T
l_mask[l_mask < 0.5] = 2.
l_mask[l_mask < 1.5] = 1.
l_mask -= 1.
masks.append(l_mask)
orderings_and_samplings.append(output_ordering)
else:
if forced_samplings is not None:
output_ordering = forced_samplings[n]
else:
output_ordering = random_state.randint(1, sizes[0], j)
input_ordering = orderings_and_samplings[-1]
assert min(input_ordering) > 0
assert max(input_ordering) < sizes[0]
assert min(output_ordering) > 0
assert min(output_ordering) < sizes[0]
l_mask = _generate_autoregressive_mask(
input_ordering, output_ordering)
masks.append(l_mask)
orderings_and_samplings.append(output_ordering)
return masks, orderings_and_samplings
| [
"numpy.unique",
"numpy.arange",
"numpy.ones",
"numpy.random.RandomState"
] | [((188, 222), 'numpy.ones', 'np.ones', (['(input_size, output_size)'], {}), '((input_size, output_size))\n', (195, 222), True, 'import numpy as np\n'), ((619, 642), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (640, 642), True, 'import numpy as np\n'), ((1159, 1185), 'numpy.arange', 'np.arange', (['(1)', '(sizes[0] + 1)'], {}), '(1, sizes[0] + 1)\n', (1168, 1185), True, 'import numpy as np\n'), ((1545, 1570), 'numpy.unique', 'np.unique', (['input_ordering'], {}), '(input_ordering)\n', (1554, 1570), True, 'import numpy as np\n')] |
from ..tinyhouse import HaplotypeOperations
from ..tinyhouse import BasicHMM
from ..tinyhouse import ProbMath
from ..tinyhouse import InputOutput
from . import FamilySingleLocusPeeling
import math
import numpy as np
from numba import jit, njit
import math
def imputeFamUsingFullSibs(fam, pedigree, args) :
#Pipeline:
# 0) Get LD and HD children.
# 1) Take all the children and phase via parents homozygous loci.
# 2) Take all of the LD children and impute from the parents.
# STEP 0: Get LD and HD children.
nLoci = len(fam.sire.genotypes)
if not args.gbs :
ldChildren = [off for off in fam.offspring if off.getPercentMissing() > .1]
hdChildren = [off for off in fam.offspring if off.getPercentMissing() <= .1]
if args.gbs:
ldChildren = fam.offspring
hdChildren = fam.offspring
# Let's impute everyone.
# Need to think about this.
if not args.usegenoprobs:
# childDosages is here to store the children's dosages to prevent them from being over-written.
childDosages = np.full((len(ldChildren), nLoci), 0, dtype = np.float32)
nIterations = args.niter
for cycle in range(nIterations) :
runImputationRound(fam, ldChildren, hdChildren, callingMethod = "dosages", preimpute = args.preimpute)
for i, child in enumerate(ldChildren):
childDosages[i,:] += child.dosages
childDosages /= nIterations
for i, child in enumerate(ldChildren):
child.dosages = childDosages[i,:]
if args.usegenoprobs:
childGenoProbs = np.full((len(ldChildren), 4, nLoci), 0, dtype = np.float32)
nIterations = args.niter
for cycle in range(nIterations) :
runImputationRound(fam, ldChildren, hdChildren, callingMethod = "probabilities", preimpute = args.preimpute)
for i, child in enumerate(ldChildren):
childGenoProbs[i,:,:] += child.info # Child genotype probabilities get passed to info.
childGenoProbs /= nIterations
for i, child in enumerate(ldChildren):
child.dosages = combineAndConvertToDosage(childGenoProbs[i,:,:], ProbMath.getGenotypeProbabilities_ind(child))
def runImputationRound(fam, ldChildren, hdChildren, callingMethod = "dosages", preimpute = False) :
#STEP 1: Take all of the HD children and phase/(impute?) the parents.
sireHaplotypes, damHaplotypes = phaseParentsViaEM(fam.sire, fam.dam, hdChildren, preimpute = preimpute)
# Just re-align the parents to get imputed genotypes from them on the output.
# This got taken out because we run the peeling cycle multiple times. Probably should just do something with the dosages.
# for ind in [fam.sire, fam.dam]:
# HaplotypeOperations.align_individual(ind)
#STEP 2: Take all of the LD children and impute from the parents.
nLoci = len(fam.sire.genotypes)
for child in ldChildren:
# imputeIndividual(child, np.round(sireHaplotypes), np.round(damHaplotypes))
BasicHMM.diploidHMM(child, np.round(sireHaplotypes), np.round(damHaplotypes), 0.01, 1.0/nLoci, useCalledHaps = False, callingMethod = callingMethod)
def phaseParentsViaEM(sire, dam, children, preimpute = False):
args = InputOutput.args
# Pipeline:
# 0) Initialize founder haplotypes.
if preimpute:
sire_genotypeProbabilities, dam_genotypeProbabilities = FamilySingleLocusPeeling.getParentalGenotypesWithChildren(sire, dam, children)
else:
sire_genotypeProbabilities = ProbMath.getGenotypeProbabilities_ind(sire)
dam_genotypeProbabilities = ProbMath.getGenotypeProbabilities_ind(dam)
nLoci = sire_genotypeProbabilities.shape[1]
sireHaplotypes = np.full((2, nLoci), 0, dtype = np.float32)
damHaplotypes = np.full((2, nLoci), 0, dtype = np.float32)
jitter = args.jitter
sireHaplotypes[0,:] = sire_genotypeProbabilities[1,:] + sire_genotypeProbabilities[3,:] # Prob the sire is aA or AA
sireHaplotypes[1,:] = sire_genotypeProbabilities[2,:] + sire_genotypeProbabilities[3,:] # Prob the sire is Aa or AA
damHaplotypes[0, :] = dam_genotypeProbabilities[1,:] + dam_genotypeProbabilities[3,:]
damHaplotypes[1, :] = dam_genotypeProbabilities[2,:] + dam_genotypeProbabilities[3,:]
# Add in some jitter for the haplotype assignements.
sireHaplotypes = ((1-jitter*2) * sireHaplotypes + jitter) + (1 - 2*np.random.random(sireHaplotypes.shape)) * 2/3*jitter # (0,1) -> (.15, .85) + random noise -> (.05, .95)
damHaplotypes = ((1-jitter*2) * damHaplotypes + jitter) + (1 - 2*np.random.random(damHaplotypes.shape)) * 2/3*jitter # (0,1) -> (.15, .85) + random noise -> (.05, .95)
nChildren = len(children)
nCycles = args.ncycles
# Step 0b) Construct genotype probabilities for the children. Note: These are fixed for all the iterations.
genotypeProbabilities = np.full((nChildren, 4, nLoci), 0, dtype = np.float32)
for i, child in enumerate(children):
genotypeProbabilities[i,:,:] = ProbMath.getGenotypeProbabilities_ind(child)
# Step 1 + 2: Estimate children based on parents. Estimate parents based on children.
for i in range(nCycles):
# 1) Loop to perform haplotype assignments based on current haplotypes.
segregations = np.full((nChildren, 2, 2, nLoci), 0, dtype = np.float32)
for i, child in enumerate(children):
estimateSegregation(segregations[i,:,:], genotypeProbabilities[i,:,:], sireHaplotypes, damHaplotypes)
# 2) Loop to re-estimate the founder haplotypes based on assignements.
sireHaplotypes, damHaplotypes = estimateFounders(segregations, genotypeProbabilities, sireHaplotypes, damHaplotypes, sire_genotypeProbabilities, dam_genotypeProbabilities)
return sireHaplotypes, damHaplotypes
@njit
def estimateSegregation(segregation, genotypeProbabilities, sireHaplotypes, damHaplotypes):
nLoci = segregation.shape[2]
pointEstimates = np.full((2, 2, nLoci), 0, dtype = np.float32)
# Construct point estimates, by comparing sire + dam haplotypes.
for i in range(nLoci):
for sireHap in range(2):
for damHap in range(2):
# Flag: This may be a place where we are playing too fast and loose with a normalization constant.
p1 = sireHaplotypes[sireHap, i]
p2 = damHaplotypes[damHap, i]
p_aa = genotypeProbabilities[0, i]
p_aA = genotypeProbabilities[1, i]
p_Aa = genotypeProbabilities[2, i]
p_AA = genotypeProbabilities[3, i]
# I am reasonable certain that this is right.
# p(aa | inheriting sireHap + damHap) = (1-p1)*(1-p2)
# We are calculating p(aa | inheritance, data) = p(aa|data)*p(aa|inheritance).
score = p_aa*(1-p1)*(1-p2) + p_aA*(1-p1)*p2 + p_Aa*p1*(1-p2) + p_AA*p1*p2
pointEstimates[sireHap, damHap, i] = score
recombinationRate = np.full(nLoci, 1.0/nLoci, dtype = np.float32)
# Run HMM on point estimates to get smoothed assignments.
segregation[:,:,:] = BasicHMM.diploidForwardBackward(pointEstimates, recombinationRate = recombinationRate)
@njit
def estimateFounders(segregations, genotypeProbabilities, sireHaplotypes, damHaplotypes, sire_genotypeProbabilities, dam_genotypeProbabilities):
nChildren, tmp, tmp2, nLoci = segregations.shape
# The .1 and .2 are weak priors.
sireHaplotypes_new = np.full((2, nLoci), 0.1, dtype = np.float32)
sireHaplotypes_new_counts = np.full((2, nLoci), 0.2, dtype = np.float32)
damHaplotypes_new = np.full((2, nLoci), 0.1, dtype = np.float32)
damHaplotypes_new_counts = np.full((2, nLoci), 0.2, dtype = np.float32)
values = np.full(4, 0, dtype = np.float32)
# Sire update:
for i in range(nLoci):
p1 = sireHaplotypes[0,i]
p2 = sireHaplotypes[1,i]
# This is probably a speed bottleneck
values[0]= sire_genotypeProbabilities[0,i] * (1-p1)*(1-p2)
values[1]= sire_genotypeProbabilities[1,i] * (1-p1)*p2
values[2]= sire_genotypeProbabilities[2,i] * p1*(1-p2)
values[3]= sire_genotypeProbabilities[3,i] * p1*p2
norm_1D(values)
sire_dosage = values[2] + values[3] # This is the expected allele value they recieved from their sire.
dam_dosage = values[1] + values[3] # This is the expected allele value they recieved from their dam.
sireHaplotypes_new[0, i] += sire_dosage
sireHaplotypes_new_counts[0, i] += 1
sireHaplotypes_new[1, i] += dam_dosage
sireHaplotypes_new_counts[1, i] += 1
# Dam updates
for i in range(nLoci):
p1 = damHaplotypes[0,i]
p2 = damHaplotypes[1,i]
values[0]= dam_genotypeProbabilities[0,i] * (1-p1)*(1-p2)
values[1]= dam_genotypeProbabilities[1,i] * (1-p1)*p2
values[2]= dam_genotypeProbabilities[2,i] * p1*(1-p2)
values[3]= dam_genotypeProbabilities[3,i] * p1*p2
norm_1D(values)
sire_dosage = values[2] + values[3]
dam_dosage = values[1] + values[3]
damHaplotypes_new[0, i] += sire_dosage
damHaplotypes_new_counts[0, i] += 1
damHaplotypes_new[1, i] += dam_dosage
damHaplotypes_new_counts[1, i] += 1
valueArray = np.full((2, 2, nLoci, 4), 0, dtype = np.float32)
for i in range(nLoci):
for sireHap in range(2):
for damHap in range(2):
p1 = sireHaplotypes[sireHap,i]
p2 = damHaplotypes[damHap,i]
# This produces the genotype probabilities for an offspring, conditional on having a given haplotype.
valueArray[sireHap, damHap, i, 0] = (1-p1)*(1-p2)
valueArray[sireHap, damHap, i, 1] = (1-p1)*p2
valueArray[sireHap, damHap, i, 2] = p1*(1-p2)
valueArray[sireHap, damHap, i, 3] = p1*p2
for child in range(nChildren):
for i in range(nLoci):
for sireHap in range(2):
for damHap in range(2):
# Flag: This may be a place where we are playing too fast and loose with a normalization constant.
# p1 = sireHaplotypes[sireHap,i]
# p2 = damHaplotypes[damHap,i]
for j in range(4):
values[j] = valueArray[sireHap, damHap, i, j] * genotypeProbabilities[child,j, i]
norm_1D(values)
sire_dosage = values[2] + values[3]
dam_dosage = values[1] + values[3]
sireHaplotypes_new[sireHap, i] += segregations[child,sireHap,damHap,i]*sire_dosage
sireHaplotypes_new_counts[sireHap, i] += segregations[child,sireHap,damHap,i]
damHaplotypes_new[damHap, i] += segregations[child,sireHap,damHap,i]*dam_dosage
damHaplotypes_new_counts[damHap, i] += segregations[child,sireHap,damHap,i]
return sireHaplotypes_new/sireHaplotypes_new_counts, damHaplotypes_new/damHaplotypes_new_counts
@njit
def norm_1D(vect):
count = 0
for i in range(len(vect)):
count += vect[i]
for i in range(len(vect)):
vect[i] /= count
@njit
def norm_1D_return(vect):
count = 0
for i in range(len(vect)):
count += vect[i]
for i in range(len(vect)):
vect[i] /= count
return vect
def combineAndConvertToDosage(genoProb1, genoProb2) :
tmp, nLoci = genoProb1.shape
dosages = np.full(nLoci, 0, dtype = np.float32)
for i in range(nLoci):
vect1 = norm_1D_return(genoProb1[:, i])
vect2 = norm_1D_return(genoProb2[:, i])
combined = norm_1D_return(vect1 * vect2) # Not the fastest, but shrug.
dosages[i] = combined[1] + combined[2] + 2*combined[3]
return dosages
# @profile
# def imputeIndividual(ind, sireHaplotypes, damHaplotypes):
# nLoci = len(sireHaplotypes[0])
# # Take an individual, get their genotype probabilities.
# genotypeProbabilities = ProbMath.getGenotypeProbabilities_ind(ind)
# # Use the genotype probabilities to generate segregation estimates.
# segregation = np.full((2, 2, nLoci), 0, dtype = np.float32)
# estimateSegregation(segregation, genotypeProbabilities, sireHaplotypes, damHaplotypes)
# # Use the segregation estimates to re-estimate the individuals genotypes and turn that into dosages.
# ind.dosages = getDosages(segregation, genotypeProbabilities, sireHaplotypes, damHaplotypes)
# @njit
# def getDosages(segregation, genotypeProbabilities, sireHaplotypes, damHaplotypes):
# tmp, nLoci = genotypeProbabilities.shape
# dosages = np.full(nLoci, 0, dtype = np.float32)
# for i in range(nLoci):
# for sireHap in range(2):
# for damHap in range(2):
# # Flag: This may be a place where we are playing too fast and loose with a normalization constant.
# p1 = sireHaplotypes[sireHap,i]
# p2 = damHaplotypes[damHap,i]
# # Do we really want the genotype probabilities in here as well? Isn't there a worry that we're using them to construct the likely inherited haplotypes, and using them again to get the genotypes?
# # Maybe. But there's probably also value, particularly if the haplotypes were all close to .5? (but this doesn't happen since we call them?)
# # Sire and dam haplotypes are also 0 or 1 here, no?
# # This could probably be improved. Particularly since the genotype probabilities are already likely missing for a lot of loci. Will see if this is an issue.
# values = np.array([(1-p1)*(1-p2), (1-p1)*p2, p1*(1-p2), p1*p2], dtype = np.float32) * genotypeProbabilities[:,i]
# values = values/np.sum(values)
# dosage = values[1] + values[2] + 2*values[3]
# dosages[i] += dosage * segregation[sireHap, damHap, i]
# return dosages
| [
"numpy.full",
"numpy.random.random",
"numpy.round"
] | [((3746, 3786), 'numpy.full', 'np.full', (['(2, nLoci)', '(0)'], {'dtype': 'np.float32'}), '((2, nLoci), 0, dtype=np.float32)\n', (3753, 3786), True, 'import numpy as np\n'), ((3809, 3849), 'numpy.full', 'np.full', (['(2, nLoci)', '(0)'], {'dtype': 'np.float32'}), '((2, nLoci), 0, dtype=np.float32)\n', (3816, 3849), True, 'import numpy as np\n'), ((4906, 4957), 'numpy.full', 'np.full', (['(nChildren, 4, nLoci)', '(0)'], {'dtype': 'np.float32'}), '((nChildren, 4, nLoci), 0, dtype=np.float32)\n', (4913, 4957), True, 'import numpy as np\n'), ((5989, 6032), 'numpy.full', 'np.full', (['(2, 2, nLoci)', '(0)'], {'dtype': 'np.float32'}), '((2, 2, nLoci), 0, dtype=np.float32)\n', (5996, 6032), True, 'import numpy as np\n'), ((7021, 7066), 'numpy.full', 'np.full', (['nLoci', '(1.0 / nLoci)'], {'dtype': 'np.float32'}), '(nLoci, 1.0 / nLoci, dtype=np.float32)\n', (7028, 7066), True, 'import numpy as np\n'), ((7510, 7552), 'numpy.full', 'np.full', (['(2, nLoci)', '(0.1)'], {'dtype': 'np.float32'}), '((2, nLoci), 0.1, dtype=np.float32)\n', (7517, 7552), True, 'import numpy as np\n'), ((7587, 7629), 'numpy.full', 'np.full', (['(2, nLoci)', '(0.2)'], {'dtype': 'np.float32'}), '((2, nLoci), 0.2, dtype=np.float32)\n', (7594, 7629), True, 'import numpy as np\n'), ((7657, 7699), 'numpy.full', 'np.full', (['(2, nLoci)', '(0.1)'], {'dtype': 'np.float32'}), '((2, nLoci), 0.1, dtype=np.float32)\n', (7664, 7699), True, 'import numpy as np\n'), ((7733, 7775), 'numpy.full', 'np.full', (['(2, nLoci)', '(0.2)'], {'dtype': 'np.float32'}), '((2, nLoci), 0.2, dtype=np.float32)\n', (7740, 7775), True, 'import numpy as np\n'), ((7792, 7823), 'numpy.full', 'np.full', (['(4)', '(0)'], {'dtype': 'np.float32'}), '(4, 0, dtype=np.float32)\n', (7799, 7823), True, 'import numpy as np\n'), ((9343, 9389), 'numpy.full', 'np.full', (['(2, 2, nLoci, 4)', '(0)'], {'dtype': 'np.float32'}), '((2, 2, nLoci, 4), 0, dtype=np.float32)\n', (9350, 9389), True, 'import numpy as np\n'), ((11535, 11570), 'numpy.full', 'np.full', (['nLoci', '(0)'], {'dtype': 'np.float32'}), '(nLoci, 0, dtype=np.float32)\n', (11542, 11570), True, 'import numpy as np\n'), ((5314, 5368), 'numpy.full', 'np.full', (['(nChildren, 2, 2, nLoci)', '(0)'], {'dtype': 'np.float32'}), '((nChildren, 2, 2, nLoci), 0, dtype=np.float32)\n', (5321, 5368), True, 'import numpy as np\n'), ((3070, 3094), 'numpy.round', 'np.round', (['sireHaplotypes'], {}), '(sireHaplotypes)\n', (3078, 3094), True, 'import numpy as np\n'), ((3096, 3119), 'numpy.round', 'np.round', (['damHaplotypes'], {}), '(damHaplotypes)\n', (3104, 3119), True, 'import numpy as np\n'), ((4429, 4467), 'numpy.random.random', 'np.random.random', (['sireHaplotypes.shape'], {}), '(sireHaplotypes.shape)\n', (4445, 4467), True, 'import numpy as np\n'), ((4603, 4640), 'numpy.random.random', 'np.random.random', (['damHaplotypes.shape'], {}), '(damHaplotypes.shape)\n', (4619, 4640), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import cv2
import numpy as np
def main():
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, img = cap.read()
skinMask = HSVBin(img)
contours = getContours(skinMask)
cv2.drawContours(img, contours, -1, (0, 255, 0), 2)
cv2.imshow('capture', img)
k = cv2.waitKey(10)
if k == 27:
break
def getContours(img):
kernel = np.ones((5,5),np.uint8)
closed = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
closed = cv2.morphologyEx(closed, cv2.MORPH_CLOSE, kernel)
contours, h = cv2.findContours(closed, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
validContours = [];
for cont in contours:
if cv2.contourArea(cont) > 9000:
# x,y,w,h = cv2.boundingRect(cont)
# if h/w > 0.75:
validContours.append(cv2.convexHull(cont))
# rect = cv2.minAreaRect(cont)
# box = cv2.cv.BoxPoints(rect)
# validContours.append(np.int0(box))
return validContours
def HSVBin(img):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
lower_skin = np.array([100, 50, 0])
upper_skin = np.array([125, 255, 255])
mask = cv2.inRange(hsv, lower_skin, upper_skin)
# res = cv2.bitwise_and(img, img, mask=mask)
return mask
if __name__ == '__main__':
main()
| [
"cv2.contourArea",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.waitKey",
"numpy.ones",
"cv2.VideoCapture",
"numpy.array",
"cv2.convexHull",
"cv2.drawContours",
"cv2.imshow",
"cv2.inRange",
"cv2.findContours"
] | [((75, 94), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (91, 94), False, 'import cv2\n'), ((427, 452), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (434, 452), True, 'import numpy as np\n'), ((464, 509), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img, cv2.MORPH_OPEN, kernel)\n', (480, 509), False, 'import cv2\n'), ((523, 572), 'cv2.morphologyEx', 'cv2.morphologyEx', (['closed', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(closed, cv2.MORPH_CLOSE, kernel)\n', (539, 572), False, 'import cv2\n'), ((592, 656), 'cv2.findContours', 'cv2.findContours', (['closed', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (608, 656), False, 'import cv2\n'), ((1065, 1101), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (1077, 1101), False, 'import cv2\n'), ((1120, 1142), 'numpy.array', 'np.array', (['[100, 50, 0]'], {}), '([100, 50, 0])\n', (1128, 1142), True, 'import numpy as np\n'), ((1160, 1185), 'numpy.array', 'np.array', (['[125, 255, 255]'], {}), '([125, 255, 255])\n', (1168, 1185), True, 'import numpy as np\n'), ((1198, 1238), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_skin', 'upper_skin'], {}), '(hsv, lower_skin, upper_skin)\n', (1209, 1238), False, 'import cv2\n'), ((239, 290), 'cv2.drawContours', 'cv2.drawContours', (['img', 'contours', '(-1)', '(0, 255, 0)', '(2)'], {}), '(img, contours, -1, (0, 255, 0), 2)\n', (255, 290), False, 'import cv2\n'), ((299, 325), 'cv2.imshow', 'cv2.imshow', (['"""capture"""', 'img'], {}), "('capture', img)\n", (309, 325), False, 'import cv2\n'), ((338, 353), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (349, 353), False, 'import cv2\n'), ((717, 738), 'cv2.contourArea', 'cv2.contourArea', (['cont'], {}), '(cont)\n', (732, 738), False, 'import cv2\n'), ((856, 876), 'cv2.convexHull', 'cv2.convexHull', (['cont'], {}), '(cont)\n', (870, 876), False, 'import cv2\n')] |
import unittest.mock as mock
import pytest
import numpy as np
import tensorflow as tf
from unitytrainers.bc.models import BehavioralCloningModel
from unityagents import UnityEnvironment
from .mock_communicator import MockCommunicator
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_cc_bc_model(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=0)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.sample_action, model.policy]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_dc_bc_model(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=True, visual_inputs=0)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.sample_action, model.action_probs]
feed_dict = {model.batch_size: 2,
model.dropout_rate: 1.0,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_visual_dc_bc_model(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=True, visual_inputs=2)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.sample_action, model.action_probs]
feed_dict = {model.batch_size: 2,
model.dropout_rate: 1.0,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_visual_cc_bc_model(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=2)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.sample_action, model.policy]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
if __name__ == '__main__':
pytest.main()
| [
"unitytrainers.bc.models.BehavioralCloningModel",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.variable_scope",
"numpy.ones",
"pytest.main",
"unittest.mock.patch",
"numpy.array",
"unityagents.UnityEnvironment"
] | [((239, 301), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.executable_launcher"""'], {}), "('unityagents.UnityEnvironment.executable_launcher')\n", (249, 301), True, 'import unittest.mock as mock\n'), ((303, 362), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.get_communicator"""'], {}), "('unityagents.UnityEnvironment.get_communicator')\n", (313, 362), True, 'import unittest.mock as mock\n'), ((1221, 1283), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.executable_launcher"""'], {}), "('unityagents.UnityEnvironment.executable_launcher')\n", (1231, 1283), True, 'import unittest.mock as mock\n'), ((1285, 1344), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.get_communicator"""'], {}), "('unityagents.UnityEnvironment.get_communicator')\n", (1295, 1344), True, 'import unittest.mock as mock\n'), ((2258, 2320), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.executable_launcher"""'], {}), "('unityagents.UnityEnvironment.executable_launcher')\n", (2268, 2320), True, 'import unittest.mock as mock\n'), ((2322, 2381), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.get_communicator"""'], {}), "('unityagents.UnityEnvironment.get_communicator')\n", (2332, 2381), True, 'import unittest.mock as mock\n'), ((3442, 3504), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.executable_launcher"""'], {}), "('unityagents.UnityEnvironment.executable_launcher')\n", (3452, 3504), True, 'import unittest.mock as mock\n'), ((3506, 3565), 'unittest.mock.patch', 'mock.patch', (['"""unityagents.UnityEnvironment.get_communicator"""'], {}), "('unityagents.UnityEnvironment.get_communicator')\n", (3516, 3565), True, 'import unittest.mock as mock\n'), ((423, 447), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (445, 447), True, 'import tensorflow as tf\n'), ((1405, 1429), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1427, 1429), True, 'import tensorflow as tf\n'), ((2449, 2473), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2471, 2473), True, 'import tensorflow as tf\n'), ((3633, 3657), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3655, 3657), True, 'import tensorflow as tf\n'), ((4601, 4614), 'pytest.main', 'pytest.main', ([], {}), '()\n', (4612, 4614), False, 'import pytest\n'), ((457, 469), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (467, 469), True, 'import tensorflow as tf\n'), ((1439, 1451), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1449, 1451), True, 'import tensorflow as tf\n'), ((2483, 2495), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2493, 2495), True, 'import tensorflow as tf\n'), ((3667, 3679), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3677, 3679), True, 'import tensorflow as tf\n'), ((492, 527), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""FakeGraphScope"""'], {}), "('FakeGraphScope')\n", (509, 527), True, 'import tensorflow as tf\n'), ((666, 687), 'unityagents.UnityEnvironment', 'UnityEnvironment', (['""" """'], {}), "(' ')\n", (682, 687), False, 'from unityagents import UnityEnvironment\n'), ((708, 759), 'unitytrainers.bc.models.BehavioralCloningModel', 'BehavioralCloningModel', (["env.brains['RealFakeBrain']"], {}), "(env.brains['RealFakeBrain'])\n", (730, 759), False, 'from unitytrainers.bc.models import BehavioralCloningModel\n'), ((779, 812), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (810, 812), True, 'import tensorflow as tf\n'), ((1474, 1509), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""FakeGraphScope"""'], {}), "('FakeGraphScope')\n", (1491, 1509), True, 'import tensorflow as tf\n'), ((1647, 1668), 'unityagents.UnityEnvironment', 'UnityEnvironment', (['""" """'], {}), "(' ')\n", (1663, 1668), False, 'from unityagents import UnityEnvironment\n'), ((1689, 1740), 'unitytrainers.bc.models.BehavioralCloningModel', 'BehavioralCloningModel', (["env.brains['RealFakeBrain']"], {}), "(env.brains['RealFakeBrain'])\n", (1711, 1740), False, 'from unitytrainers.bc.models import BehavioralCloningModel\n'), ((1760, 1793), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1791, 1793), True, 'import tensorflow as tf\n'), ((2518, 2553), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""FakeGraphScope"""'], {}), "('FakeGraphScope')\n", (2535, 2553), True, 'import tensorflow as tf\n'), ((2691, 2712), 'unityagents.UnityEnvironment', 'UnityEnvironment', (['""" """'], {}), "(' ')\n", (2707, 2712), False, 'from unityagents import UnityEnvironment\n'), ((2733, 2784), 'unitytrainers.bc.models.BehavioralCloningModel', 'BehavioralCloningModel', (["env.brains['RealFakeBrain']"], {}), "(env.brains['RealFakeBrain'])\n", (2755, 2784), False, 'from unitytrainers.bc.models import BehavioralCloningModel\n'), ((2804, 2837), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2835, 2837), True, 'import tensorflow as tf\n'), ((3702, 3737), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""FakeGraphScope"""'], {}), "('FakeGraphScope')\n", (3719, 3737), True, 'import tensorflow as tf\n'), ((3876, 3897), 'unityagents.UnityEnvironment', 'UnityEnvironment', (['""" """'], {}), "(' ')\n", (3892, 3897), False, 'from unityagents import UnityEnvironment\n'), ((3918, 3969), 'unitytrainers.bc.models.BehavioralCloningModel', 'BehavioralCloningModel', (["env.brains['RealFakeBrain']"], {}), "(env.brains['RealFakeBrain'])\n", (3940, 3969), False, 'from unitytrainers.bc.models import BehavioralCloningModel\n'), ((3989, 4022), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4020, 4022), True, 'import tensorflow as tf\n'), ((1039, 1089), 'numpy.array', 'np.array', (['[[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]'], {}), '([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]])\n', (1047, 1089), True, 'import numpy as np\n'), ((2076, 2126), 'numpy.array', 'np.array', (['[[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]'], {}), '([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]])\n', (2084, 2126), True, 'import numpy as np\n'), ((3120, 3170), 'numpy.array', 'np.array', (['[[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]'], {}), '([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]])\n', (3128, 3170), True, 'import numpy as np\n'), ((3268, 3291), 'numpy.ones', 'np.ones', (['[2, 40, 30, 3]'], {}), '([2, 40, 30, 3])\n', (3275, 3291), True, 'import numpy as np\n'), ((3338, 3361), 'numpy.ones', 'np.ones', (['[2, 40, 30, 3]'], {}), '([2, 40, 30, 3])\n', (3345, 3361), True, 'import numpy as np\n'), ((4249, 4299), 'numpy.array', 'np.array', (['[[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]]'], {}), '([[1, 2, 3, 1, 2, 3], [3, 4, 5, 3, 4, 5]])\n', (4257, 4299), True, 'import numpy as np\n'), ((4397, 4420), 'numpy.ones', 'np.ones', (['[2, 40, 30, 3]'], {}), '([2, 40, 30, 3])\n', (4404, 4420), True, 'import numpy as np\n'), ((4467, 4490), 'numpy.ones', 'np.ones', (['[2, 40, 30, 3]'], {}), '([2, 40, 30, 3])\n', (4474, 4490), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Community Convolutional Layer
Created on Sep 19, 2018.
Last edited on Oct 11, 2018.
@author: <NAME>
"""
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.optimizer import Optimizer
from torch.utils.data import Dataset, TensorDataset
from torch.utils.data.sampler import SequentialSampler
import math
from scipy.optimize import fsolve
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
# inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!)
return x * (torch.tanh(F.softplus(x)))
class Ranger(Optimizer):
"""Ranger deep learning optimizer - RAdam + Lookahead combined.
https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
Ranger has now been used to capture 12 records on the FastAI leaderboard.
This version = 9.3.19
Credits:
RAdam --> https://github.com/LiyuanLucasLiu/RAdam
Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
summary of changes:
full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
changes 8/31/19 - fix references to *self*.N_sma_threshold;
changed eps to 1e-5 as better default than 1e-8.
"""
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95, 0.999), eps=1e-5,
weight_decay=0):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold,
eps=eps, weight_decay=weight_decay)
super(Ranger, self).__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# now we can get to work...
# removed as we now use step from RAdam...no need for duplicate step counting
# for group in self.param_groups:
# group["step_counter"] = 0
# print("group step counter init")
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
# self.first_run_check=0
# lookahead weights
# 9/2/19 - lookahead param tensors have been moved to state storage.
# This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.
# self.slow_weights = [[p.clone().detach() for p in group['params']]
# for group in self.param_groups]
# don't use grad for lookahead weights
# for w in it.chain(*self.slow_weights):
# w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
# Uncomment if you need to use the actual closure...
if closure is not None:
loss = closure()
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if len(state) == 0: # if first time to run...init dictionary with our desired entries
# if self.first_run_check==0:
# self.first_run_check=1
# print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] # get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha
p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor
return loss
class BioVNN(nn.Module):
def __init__(self, input_dim, child_map, output_dim, feature_dim, act_func='Mish', use_sigmoid_output=True,
dropout_p=0, layer_names=None, only_combine_child_gene_group=False, neuron_min=10, neuron_ratio=0.2,
use_classification=True, child_map_fully=None, group_level_dict=None, use_average_neuron_n=False,
for_lr_finder=False):
super(BioVNN, self).__init__() # Inherited from the parent class nn.Module
self.input_dim = input_dim
self.feature_dim = feature_dim
self.output_dim = output_dim
self.use_sigmoid_output = use_sigmoid_output
self.dropout_p = dropout_p
self.only_combine_child_gene_group = only_combine_child_gene_group
self.use_classification = use_classification
self.child_map_fully = child_map_fully
self.gene_group_idx = layer_names['gene_group_idx']
self.idx_name = layer_names['idx_name']
self.group_level_dict = group_level_dict
self.level_neuron_ct = dict()
self.com_layers = nn.ModuleDict()
self.bn_layers = nn.ModuleDict()
self.output_layers = nn.ModuleDict()
if self.dropout_p > 0:
self.dropout_layers = nn.ModuleDict()
self._set_layer_names()
self.build_order = []
self.child_map = child_map
self.neuron_min = neuron_min
self.neuron_ratio = neuron_ratio
self._set_layers()
if act_func.lower() == 'tanh':
self.act_func = nn.Tanh()
elif act_func.lower() == 'mish':
self.act_func = Mish()
elif act_func.lower() == 'swish' or act_func.lower() == 'silu':
self.act_func = Swish()
self.sigmoid = nn.Sigmoid()
self.output = [None] * len(self.build_order)
if self.only_combine_child_gene_group:
logging.info("{} gene groups do not combine gene features".format(len(self.only_combine_gene_group_dict)))
self.for_lr_finder = for_lr_finder
def _set_layers(self):
neuron_n_dict = self._build_layers()
if self.child_map_fully is not None:
logging.info("Non-fully connected:")
self.report_parameter_n()
logging.debug(self.build_order)
def _set_layer_names(self):
for g in self.gene_group_idx.keys():
self.com_layers[g] = None
self.bn_layers['bn_{}'.format(g)] = None
self.output_layers['output_{}'.format(g)] = None
if self.dropout_p > 0:
self.dropout_layers['drop_{}'.format(g)] = None
def _build_layers(self, neuron_n_dict=None):
neuron_to_build = list(range(len(self.child_map)))
self.only_combine_gene_group_dict = {}
if neuron_n_dict is None:
neuron_n_dict = dict()
while len(neuron_to_build) > 0:
for i in neuron_to_build:
j = i + self.input_dim
children = self.child_map[i]
child_feat = [z for z in children if z < self.input_dim]
child_com = [self.idx_name[z] for z in children if z >= self.input_dim]
child_none = [self.com_layers[z] for z in child_com if self.com_layers[z] is None]
if len(child_none) > 0:
logging.debug("Pass Gene group {} with {} children".format(j, len(children)))
continue
neuron_name = self.idx_name[j]
# Only combine child gene groups without combine gene features if there is one child gene group
if self.only_combine_child_gene_group and len(child_com) > 0:
children_n = len(child_com)
child_feat = []
self.only_combine_gene_group_dict[neuron_name] = 1
else:
children_n = len(children)
if j == len(self.child_map) - 1:
children_n += self.input_dim
logging.debug("Building gene group {} with {} children".format(j, len(children)))
if i not in neuron_n_dict:
neuron_n = np.max([self.neuron_min, int(children_n * self.neuron_ratio)])
neuron_n_dict[i] = neuron_n
else:
neuron_n = neuron_n_dict[i]
level = self.group_level_dict[neuron_name]
if level not in self.level_neuron_ct.keys():
self.level_neuron_ct[level] = neuron_n
else:
self.level_neuron_ct[level] += neuron_n
total_in = int(len(child_feat) + np.sum([self.com_layers[z].out_features for z in child_com]))
self.com_layers[neuron_name] = nn.Linear(total_in, neuron_n)
self.bn_layers['bn_{}'.format(neuron_name)] = nn.BatchNorm1d(neuron_n)
if self.dropout_p > 0:
self.dropout_layers['drop_{}'.format(neuron_name)] = nn.Dropout(self.dropout_p)
self.output_layers['output_{}'.format(neuron_name)] = nn.Linear(neuron_n, self.output_dim)
neuron_to_build.remove(i)
self.build_order.append(i)
return neuron_n_dict
def report_parameter_n(self):
total_params = sum(p.numel() for p in self.parameters())
trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
logging.info("Total {} parameters and {} are trainable".format(total_params, trainable_params))
return trainable_params
def forward(self, features): # Forward pass: stacking each layer together
if self.for_lr_finder:
features = [features[:, i].reshape(features.shape[0], -1) for i in range(features.shape[1])]
features = features + self.output
pred = [None] * len(self.build_order)
states = [None] * len(self.build_order)
for i in self.build_order:
j = i + self.input_dim
neuron_name = self.idx_name[j]
com_layer = self.com_layers[neuron_name]
bn_layer = self.bn_layers['bn_{}'.format(neuron_name)]
children = self.child_map[i]
if neuron_name in self.only_combine_gene_group_dict:
children = [z for z in children if z >= self.input_dim]
input_list = [features[z] for z in children]
input_mat = torch.cat(input_list, axis=1)
features[j] = com_layer(input_mat)
## BN after activation
state = self.act_func(features[j])
states[i] = state
features[j] = bn_layer(state)
if self.dropout_p > 0:
drop_layer = self.dropout_layers['drop_{}'.format(neuron_name)]
features[j] = drop_layer(features[j])
output_layer = self.output_layers['output_{}'.format(neuron_name)]
if self.use_sigmoid_output:
pred[i] = self.sigmoid(output_layer(features[j]))
else:
pred[i] = output_layer(features[j])
if self.for_lr_finder:
return pred[-1][:, 1]
return pred, states
class FullyNet(BioVNN):
def __init__(self, input_dim, child_map, output_dim, feature_dim, act_func='Mish', use_sigmoid_output=True,
dropout_p=0, layer_names=None, only_combine_child_gene_group=True, neuron_min=10, neuron_ratio=0.2,
use_classification=True, child_map_fully=None, group_level_dict=None,
use_average_neuron_n=False, for_lr_finder=False):
super(FullyNet, self).__init__(input_dim, child_map, output_dim, feature_dim, act_func, use_sigmoid_output,
dropout_p, layer_names, only_combine_child_gene_group, neuron_min, neuron_ratio,
use_classification, child_map_fully, group_level_dict, use_average_neuron_n,
for_lr_finder) # Inherited from the parent class nn.Module
self.use_average_neuron_n = use_average_neuron_n
parameter_n = self.report_parameter_n()
self._build_layers_fully(parameter_n)
logging.info("Fully connected network:")
self.report_parameter_n()
def solve_neuron_n(self):
def func(i):
x = i[0]
# return [input_dim * x + (layer_n-1) * (x ** 2 + x) - parameter_n]
return [10171 * x + 12 * (x ** 2 + x) - 714077]
r = fsolve(func, [0])
return int(r[0])
def _build_layers_fully(self, parameter_n=39974, layer_n=13):
# Reset layers
self.com_layers = None
self.fully_layers = nn.ModuleDict()
self.bn_layers = nn.ModuleDict()
self.output_layers = nn.ModuleDict()
if self.dropout_p > 0:
self.dropout_layers = nn.ModuleDict()
self.build_order = []
# Total available neuron number
total_n = parameter_n // self.input_dim
if total_n / float(layer_n) < 1: # only need one hidden layer
self.build_order.append(0)
self.fully_layers['fully_0'] = nn.Linear(self.input_dim, total_n)
self.bn_layers['bn_0'] = nn.BatchNorm1d(total_n)
if self.dropout_p > 0:
self.dropout_layers['drop_0'] = nn.Dropout(self.dropout_p)
self.output_layers['output_0'] = nn.Linear(total_n, self.output_dim)
else:
neuron_per_layer = self.solve_neuron_n()
if self.use_average_neuron_n:
logging.info(
"The fully connected network has {} neurons per layer for total {} layers".format(neuron_per_layer,
layer_n))
# neuron_per_layer = total_n // layer_n
for i in range(layer_n):
self.build_order.append(i)
if self.use_average_neuron_n:
if i == 0:
self.fully_layers['fully_' + str(i)] = nn.Linear(self.input_dim, neuron_per_layer)
else:
self.fully_layers['fully_' + str(i)] = nn.Linear(neuron_per_layer, neuron_per_layer)
self.bn_layers['bn_' + str(i)] = nn.BatchNorm1d(neuron_per_layer)
self.output_layers['output_' + str(i)] = nn.Linear(neuron_per_layer, self.output_dim)
else:
if i == 0:
in_n = self.input_dim
out_n = self.level_neuron_ct[i + 1]
else:
in_n = self.level_neuron_ct[i]
out_n = self.level_neuron_ct[i + 1]
self.fully_layers['fully_' + str(i)] = nn.Linear(in_n, out_n)
logging.info("The fully connected network layer {} has {} neurons".format(i, out_n))
self.bn_layers['bn_' + str(i)] = nn.BatchNorm1d(out_n)
self.output_layers['output_' + str(i)] = nn.Linear(out_n, self.output_dim)
if self.dropout_p > 0:
self.dropout_layers['drop_' + str(i)] = nn.Dropout(self.dropout_p)
self.output = [None] * len(self.build_order)
def forward(self, features): # Forward pass: stacking each layer together
if self.for_lr_finder:
features = [features[:, i].reshape(features.shape[0], -1) for i in range(features.shape[1])]
features = features + self.output
pred = [None] * len(self.build_order)
states = [None] * len(self.build_order)
for i in self.build_order:
j = i + self.input_dim
neuron_name = i
fully_layer = self.fully_layers['fully_{}'.format(neuron_name)]
bn_layer = self.bn_layers['bn_{}'.format(neuron_name)]
if i == 0:
input_list = [features[z] for z in range(self.input_dim)]
input_mat = torch.cat(input_list, axis=1)
else:
input_mat = features[j - 1]
features[j] = fully_layer(input_mat)
## BN after activation
state = self.act_func(features[j])
states[i] = state
features[j] = bn_layer(state)
if self.dropout_p > 0:
drop_layer = self.dropout_layers['drop_{}'.format(neuron_name)]
features[j] = drop_layer(features[j])
output_layer = self.output_layers['output_{}'.format(neuron_name)]
if self.use_sigmoid_output:
pred[i] = self.sigmoid(output_layer(features[j]))
else:
pred[i] = output_layer(features[j])
if self.for_lr_finder:
return pred[-1][:, 1]
return pred, states
class DepMapDataset(TensorDataset):
def __init__(self, features, labels, community_filter=None, use_genomic_info=True, dropout_p=0,
use_deletion_vector=True, sample_class_weight_neg=None, sample_class_weight_pos=None, no_weight=False,
use_cuda=True):
self.use_cuda = use_cuda
if use_cuda:
self.features = torch.from_numpy(features).cuda()
self.labels = torch.from_numpy(labels).cuda()
else:
self.features = torch.from_numpy(features)
self.labels = torch.from_numpy(labels)
self.idx_list = [(y, z) for y in range(self.features.shape[0]) for z in range(self.labels.shape[1])]
if community_filter is not None:
if use_cuda:
self.community_filter = [torch.from_numpy(z).cuda() for z in community_filter]
else:
self.community_filter = [torch.from_numpy(z) for z in community_filter]
else:
self.community_filter = community_filter
self.use_deletion_vector = use_deletion_vector
if use_cuda:
self.deletion_array = torch.zeros(self.labels.shape[1], dtype=self.features.dtype).cuda()
else:
self.deletion_array = torch.zeros(self.labels.shape[1], dtype=self.features.dtype)
self.use_genomic_info = use_genomic_info
self.dropout_p = dropout_p
self.dropout_layer = nn.Dropout(self.dropout_p)
self.sample_class_weight_neg = sample_class_weight_neg
self.sample_class_weight_pos = sample_class_weight_pos
self.no_weight = no_weight
def __len__(self):
return self.features.shape[0] * self.labels.shape[1]
def __getitem__(self, idx):
i, j = self.idx_list[idx]
if self.use_genomic_info:
feat = self.features[i]
if self.community_filter is not None:
feat = self._apply_filter(feat, j)
if self.dropout_p > 0:
feat = self.dropout_layer(feat)
else:
if self.use_cuda:
feat = torch.zeros(self.features[i].shape, dtype=self.features[i].dtype).cuda()
else:
feat = torch.zeros(self.features[i].shape, dtype=self.features[i].dtype)
if self.use_deletion_vector:
deletion_array = self.deletion_array.clone()
deletion_array[j] = 1
X_input = torch.cat([feat, deletion_array])
else:
X_input = feat
y_label = self.labels[i, j]
if self.no_weight:
return X_input, y_label
if self.sample_class_weight_pos is not None:
if y_label < 0.5:
sample_class_weight = self.sample_class_weight_neg[j]
else:
sample_class_weight = self.sample_class_weight_pos[j]
else:
sample_class_weight = 1
return X_input, y_label, sample_class_weight
def _apply_filter(self, feat, j):
return feat * self.community_filter[j]
class SequentialResumeSampler(SequentialSampler):
r"""Samples elements sequentially, always in the same order.
Could resume loading from certain index
Arguments:
data_source (Dataset): dataset to sample from
resume_idx (int): index to resume
"""
def __init__(self, data_source, resume_idx=0):
super(SequentialResumeSampler, self).__init__(data_source)
self.resume_idx = resume_idx
def __iter__(self):
return iter(range(self.resume_idx, len(self.data_source)))
def __len__(self):
return len(self.data_source) - self.resume_idx
| [
"torch.nn.Dropout",
"torch.from_numpy",
"logging.debug",
"numpy.sum",
"torch.zeros_like",
"math.sqrt",
"torch.nn.Tanh",
"torch.nn.BatchNorm1d",
"scipy.optimize.fsolve",
"torch.cat",
"logging.info",
"torch.nn.ModuleDict",
"torch.empty_like",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.f... | [((8882, 8897), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (8895, 8897), True, 'import torch.nn as nn\n'), ((8923, 8938), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (8936, 8938), True, 'import torch.nn as nn\n'), ((8968, 8983), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (8981, 8983), True, 'import torch.nn as nn\n'), ((9551, 9563), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9561, 9563), True, 'import torch.nn as nn\n'), ((10035, 10066), 'logging.debug', 'logging.debug', (['self.build_order'], {}), '(self.build_order)\n', (10048, 10066), False, 'import logging\n'), ((15963, 16003), 'logging.info', 'logging.info', (['"""Fully connected network:"""'], {}), "('Fully connected network:')\n", (15975, 16003), False, 'import logging\n'), ((16264, 16281), 'scipy.optimize.fsolve', 'fsolve', (['func', '[0]'], {}), '(func, [0])\n', (16270, 16281), False, 'from scipy.optimize import fsolve\n'), ((16456, 16471), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (16469, 16471), True, 'import torch.nn as nn\n'), ((16497, 16512), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (16510, 16512), True, 'import torch.nn as nn\n'), ((16542, 16557), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (16555, 16557), True, 'import torch.nn as nn\n'), ((22011, 22037), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout_p'], {}), '(self.dropout_p)\n', (22021, 22037), True, 'import torch.nn as nn\n'), ((9049, 9064), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (9062, 9064), True, 'import torch.nn as nn\n'), ((9334, 9343), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9341, 9343), True, 'import torch.nn as nn\n'), ((9956, 9992), 'logging.info', 'logging.info', (['"""Non-fully connected:"""'], {}), "('Non-fully connected:')\n", (9968, 9992), False, 'import logging\n'), ((14194, 14223), 'torch.cat', 'torch.cat', (['input_list'], {'axis': '(1)'}), '(input_list, axis=1)\n', (14203, 14223), False, 'import torch\n'), ((16623, 16638), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (16636, 16638), True, 'import torch.nn as nn\n'), ((16910, 16944), 'torch.nn.Linear', 'nn.Linear', (['self.input_dim', 'total_n'], {}), '(self.input_dim, total_n)\n', (16919, 16944), True, 'import torch.nn as nn\n'), ((16982, 17005), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['total_n'], {}), '(total_n)\n', (16996, 17005), True, 'import torch.nn as nn\n'), ((17161, 17196), 'torch.nn.Linear', 'nn.Linear', (['total_n', 'self.output_dim'], {}), '(total_n, self.output_dim)\n', (17170, 17196), True, 'import torch.nn as nn\n'), ((21090, 21116), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (21106, 21116), False, 'import torch\n'), ((21143, 21167), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (21159, 21167), False, 'import torch\n'), ((21837, 21897), 'torch.zeros', 'torch.zeros', (['self.labels.shape[1]'], {'dtype': 'self.features.dtype'}), '(self.labels.shape[1], dtype=self.features.dtype)\n', (21848, 21897), False, 'import torch\n'), ((23002, 23035), 'torch.cat', 'torch.cat', (['[feat, deletion_array]'], {}), '([feat, deletion_array])\n', (23011, 23035), False, 'import torch\n'), ((690, 703), 'torch.nn.functional.softplus', 'F.softplus', (['x'], {}), '(x)\n', (700, 703), True, 'import torch.nn.functional as F\n'), ((12549, 12578), 'torch.nn.Linear', 'nn.Linear', (['total_in', 'neuron_n'], {}), '(total_in, neuron_n)\n', (12558, 12578), True, 'import torch.nn as nn\n'), ((12641, 12665), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['neuron_n'], {}), '(neuron_n)\n', (12655, 12665), True, 'import torch.nn as nn\n'), ((12875, 12911), 'torch.nn.Linear', 'nn.Linear', (['neuron_n', 'self.output_dim'], {}), '(neuron_n, self.output_dim)\n', (12884, 12911), True, 'import torch.nn as nn\n'), ((17089, 17115), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout_p'], {}), '(self.dropout_p)\n', (17099, 17115), True, 'import torch.nn as nn\n'), ((19768, 19797), 'torch.cat', 'torch.cat', (['input_list'], {'axis': '(1)'}), '(input_list, axis=1)\n', (19777, 19797), False, 'import torch\n'), ((22786, 22851), 'torch.zeros', 'torch.zeros', (['self.features[i].shape'], {'dtype': 'self.features[i].dtype'}), '(self.features[i].shape, dtype=self.features[i].dtype)\n', (22797, 22851), False, 'import torch\n'), ((5049, 5078), 'torch.zeros_like', 'torch.zeros_like', (['p_data_fp32'], {}), '(p_data_fp32)\n', (5065, 5078), False, 'import torch\n'), ((5121, 5150), 'torch.zeros_like', 'torch.zeros_like', (['p_data_fp32'], {}), '(p_data_fp32)\n', (5137, 5150), False, 'import torch\n'), ((5261, 5285), 'torch.empty_like', 'torch.empty_like', (['p.data'], {}), '(p.data)\n', (5277, 5285), False, 'import torch\n'), ((12778, 12804), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout_p'], {}), '(self.dropout_p)\n', (12788, 12804), True, 'import torch.nn as nn\n'), ((18072, 18104), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['neuron_per_layer'], {}), '(neuron_per_layer)\n', (18086, 18104), True, 'import torch.nn as nn\n'), ((18166, 18210), 'torch.nn.Linear', 'nn.Linear', (['neuron_per_layer', 'self.output_dim'], {}), '(neuron_per_layer, self.output_dim)\n', (18175, 18210), True, 'import torch.nn as nn\n'), ((18570, 18592), 'torch.nn.Linear', 'nn.Linear', (['in_n', 'out_n'], {}), '(in_n, out_n)\n', (18579, 18592), True, 'import torch.nn as nn\n'), ((18751, 18772), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_n'], {}), '(out_n)\n', (18765, 18772), True, 'import torch.nn as nn\n'), ((18834, 18867), 'torch.nn.Linear', 'nn.Linear', (['out_n', 'self.output_dim'], {}), '(out_n, self.output_dim)\n', (18843, 18867), True, 'import torch.nn as nn\n'), ((18968, 18994), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout_p'], {}), '(self.dropout_p)\n', (18978, 18994), True, 'import torch.nn as nn\n'), ((20956, 20982), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (20972, 20982), False, 'import torch\n'), ((21016, 21040), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (21032, 21040), False, 'import torch\n'), ((21497, 21516), 'torch.from_numpy', 'torch.from_numpy', (['z'], {}), '(z)\n', (21513, 21516), False, 'import torch\n'), ((21721, 21781), 'torch.zeros', 'torch.zeros', (['self.labels.shape[1]'], {'dtype': 'self.features.dtype'}), '(self.labels.shape[1], dtype=self.features.dtype)\n', (21732, 21781), False, 'import torch\n'), ((12440, 12500), 'numpy.sum', 'np.sum', (['[self.com_layers[z].out_features for z in child_com]'], {}), '([self.com_layers[z].out_features for z in child_com])\n', (12446, 12500), True, 'import numpy as np\n'), ((17840, 17883), 'torch.nn.Linear', 'nn.Linear', (['self.input_dim', 'neuron_per_layer'], {}), '(self.input_dim, neuron_per_layer)\n', (17849, 17883), True, 'import torch.nn as nn\n'), ((17973, 18018), 'torch.nn.Linear', 'nn.Linear', (['neuron_per_layer', 'neuron_per_layer'], {}), '(neuron_per_layer, neuron_per_layer)\n', (17982, 18018), True, 'import torch.nn as nn\n'), ((22672, 22737), 'torch.zeros', 'torch.zeros', (['self.features[i].shape'], {'dtype': 'self.features[i].dtype'}), '(self.features[i].shape, dtype=self.features[i].dtype)\n', (22683, 22737), False, 'import torch\n'), ((6508, 6620), 'math.sqrt', 'math.sqrt', (['((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma *\n N_sma_max / (N_sma_max - 2))'], {}), '((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) /\n N_sma * N_sma_max / (N_sma_max - 2))\n', (6517, 6620), False, 'import math\n'), ((21384, 21403), 'torch.from_numpy', 'torch.from_numpy', (['z'], {}), '(z)\n', (21400, 21403), False, 'import torch\n')] |
import numpy as np
def degrees_to_radians(degrees):
"""
Converts degrees to radians
@param: degrees
"""
radians = (degrees/360.0)*2*np.pi
return radians
def V2T( V ):
"""
V2T converts 1x6 vector into 4x4 transformation matrix
@param: V - 1x6 vector of form [x,y,z,rx,ry,rz] where x,y,z is the translation
and rx,ry,rz is an angle-axis representation of the angle where the
unit vector representing the axis has been multipled by the angle of
rotation about it.
@returns: T - a standard 4x4 transformation matrix
"""
assert(V.shape == (1,6))
T = np.eye(4)
T[0:3, 3] = V[0, 0:3]
T[0:3, 0:3] = V2R(V[0,3:6])
return T
def V2R( V ):
"""
V2R converts a 1x3 angle-axis vector into a 3x3 rotation matrix
@param: V - 1x3 vector of form [rx,ry,rz] where rx,ry,rz is an angle-axis
representation of the angle where the unit vector representing the axis
has been multipled by the angle of rotation about it.
@returns: R - a standard 3x3 transformation matrix
"""
V = V.transpose()
s = np.linalg.norm(V)
if s == 0:
R = eye(3)
else:
V = V[:] / s
V = V.reshape(3,1)
V = np.insert(V, 3, s)
R = vrrotvec2mat(V)
return R
def T2V( T ):
"""
T2V converts 4x4 transformation matrix into a 1x6 vector
@param: T - a standard 4x4 transformation matrix
@returns:V - 1x6 vector of form [x,y,z,rx,ry,rz] where x,y,z is the translation
and rx,ry,rz is an angle-axis representation of the angle where the
unit vector representing the axis has been multipled by the angle of
rotation about it
"""
assert(T.shape == (4,4))
V = np.zeros((1,6))
V[0, 0:3] = T[0:3, 3]
V[0, 3:6] = R2V(T[0:3, 0:3])
return V
def R2V( R ):
"""
R2V converts 3x3 rotation matrix into a 1x3 angle-axis vector
@param: R - a standard 3x3 transformation matrix
@returns: V - 1x3 vector of form [rx,ry,rz] where rx,ry,rz is an angle-axis
representation of the angle where the unit vector representing the axis
has been multipled by the angle of rotation about it
"""
assert(R.shape == (3,3))
R = vrrotmat2vec(R)
V = R[0, 0:3] * R[0, 2]
return V
def vrrotvec2mat(ax_ang):
"""
Create a rotation matrix corresponding to the rotation around a general
axis by a specified angle.
"""
if ax_ang.ndim == 1:
if np.size(ax_ang) == 5:
ax_ang = np.reshape(ax_ang, (5, 1))
msz = 1
elif np.size(ax_ang) == 4:
ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))
msz = 1
else:
raise Exception('Wrong Input Type')
elif ax_ang.ndim == 2:
if np.shape(ax_ang)[0] == 5:
msz = np.shape(ax_ang)[1]
elif np.shape(ax_ang)[1] == 5:
ax_ang = ax_ang.transpose()
msz = np.shape(ax_ang)[1]
else:
raise Exception('Wrong Inpuqt Type')
else:
raise Exception('Wrong Input Type')
direction = ax_ang[0:3, :]
angle = ax_ang[3, :]
d = np.array(direction, dtype=np.float64)
d /= np.linalg.norm(d, axis=0)
x = d[0, :]
y = d[1, :]
z = d[2, :]
c = np.cos(angle)
s = np.sin(angle)
tc = 1 - c
mt11 = tc*x*x + c
mt12 = tc*x*y - s*z
mt13 = tc*x*z + s*y
mt21 = tc*x*y + s*z
mt22 = tc*y*y + c
mt23 = tc*y*z - s*x
mt31 = tc*x*z - s*y
mt32 = tc*y*z + s*x
mt33 = tc*z*z + c
mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))
inds1 = np.where(ax_ang[4, :] == -1)
mtx[inds1, :] = -mtx[inds1, :]
if msz == 1:
mtx = mtx.reshape(3, 3)
else:
mtx = mtx.reshape(msz, 3, 3)
return mtx
def vrrotmat2vec(mat1, rot_type='proper'):
"""
Create an axis-angle np.array from Rotation Matrix:
====================
@param mat: The nx3x3 rotation matrices to convert
@type mat: nx3x3 numpy array
@param rot_type: 'improper' if there is a possibility of
having improper matrices in the input,
'proper' otherwise. 'proper' by default
@type rot_type: string ('proper' or 'improper')
@return: The 3D rotation axis and angle (ax_ang)
5 entries:
First 3: axis
4: angle
5: 1 for proper and -1 for improper
@rtype: numpy 5xn array
"""
mat = np.copy(mat1)
if mat.ndim == 2:
if np.shape(mat) == (3, 3):
mat = np.copy(np.reshape(mat, (1, 3, 3)))
else:
raise Exception('Wrong Input Typef')
elif mat.ndim == 3:
if np.shape(mat)[1:] != (3, 3):
raise Exception('Wrong Input Typez')
else:
raise Exception('Wrong Input Type')
msz = np.shape(mat)[0]
ax_ang = np.zeros((5, msz))
epsilon = 1e-12
if rot_type == 'proper':
ax_ang[4, :] = np.ones(np.shape(ax_ang[4, :]))
elif rot_type == 'improper':
for i in range(msz):
det1 = np.linalg.det(mat[i, :, :])
if abs(det1 - 1) < epsilon:
ax_ang[4, i] = 1
elif abs(det1 + 1) < epsilon:
ax_ang[4, i] = -1
mat[i, :, :] = -mat[i, :, :]
else:
raise Exception('Matrix is not a rotation: |det| != 1')
else:
raise Exception('Wrong Input parameter for rot_type')
mtrc = mat[:, 0, 0] + mat[:, 1, 1] + mat[:, 2, 2]
ind1 = np.where(abs(mtrc - 3) <= epsilon)[0]
ind1_sz = np.size(ind1)
if np.size(ind1) > 0:
ax_ang[:4, ind1] = np.tile(np.array([0, 1, 0, 0]), (ind1_sz, 1)).transpose()
ind2 = np.where(abs(mtrc + 1) <= epsilon)[0]
ind2_sz = np.size(ind2)
if ind2_sz > 0:
# phi = pi
# This singularity requires elaborate sign ambiguity resolution
# Compute axis of rotation, make sure all elements >= 0
# real signs are obtained by flipping algorithm below
diag_elems = np.concatenate((mat[ind2, 0, 0].reshape(ind2_sz, 1),
mat[ind2, 1, 1].reshape(ind2_sz, 1),
mat[ind2, 2, 2].reshape(ind2_sz, 1)), axis=1)
axis = np.sqrt(np.maximum((diag_elems + 1)/2, np.zeros((ind2_sz, 3))))
# axis elements that are <= epsilon are set to zero
axis = axis*((axis > epsilon).astype(int))
# Flipping
#
# The algorithm uses the elements above diagonal to determine the signs
# of rotation axis coordinate in the singular case Phi = pi.
# All valid combinations of 0, positive and negative values lead to
# 3 different cases:
# If (Sum(signs)) >= 0 ... leave all coordinates positive
# If (Sum(signs)) == -1 and all values are non-zero
# ... flip the coordinate that is missing in the term that has + sign,
# e.g. if 2AyAz is positive, flip x
# If (Sum(signs)) == -1 and 2 values are zero
# ... flip the coord next to the one with non-zero value
# ... ambiguous, we have chosen shift right
# construct vector [M23 M13 M12] ~ [2AyAz 2AxAz 2AxAy]
# (in the order to facilitate flipping): ^
# [no_x no_y no_z ]
m_upper = np.concatenate((mat[ind2, 1, 2].reshape(ind2_sz, 1),
mat[ind2, 0, 2].reshape(ind2_sz, 1),
mat[ind2, 0, 1].reshape(ind2_sz, 1)), axis=1)
# elements with || smaller than epsilon are considered to be zero
signs = np.sign(m_upper)*((abs(m_upper) > epsilon).astype(int))
sum_signs = np.sum(signs, axis=1)
t1 = np.zeros(ind2_sz,)
tind1 = np.where(sum_signs >= 0)[0]
t1[tind1] = np.ones(np.shape(tind1))
tind2 = np.where(np.all(np.vstack(((np.any(signs == 0, axis=1) == False), t1 == 0)), axis=0))[0]
t1[tind2] = 2*np.ones(np.shape(tind2))
tind3 = np.where(t1 == 0)[0]
flip = np.zeros((ind2_sz, 3))
flip[tind1, :] = np.ones((np.shape(tind1)[0], 3))
flip[tind2, :] = np.copy(-signs[tind2, :])
t2 = np.copy(signs[tind3, :])
shifted = np.column_stack((t2[:, 2], t2[:, 0], t2[:, 1]))
flip[tind3, :] = np.copy(shifted + (shifted == 0).astype(int))
axis = axis*flip
ax_ang[:4, ind2] = np.vstack((axis.transpose(), np.pi*(np.ones((1, ind2_sz)))))
ind3 = np.where(np.all(np.vstack((abs(mtrc + 1) > epsilon, abs(mtrc - 3) > epsilon)), axis=0))[0]
ind3_sz = np.size(ind3)
if ind3_sz > 0:
phi = np.arccos((mtrc[ind3]-1)/2)
den = 2*np.sin(phi)
a1 = (mat[ind3, 2, 1]-mat[ind3, 1, 2])/den
a2 = (mat[ind3, 0, 2]-mat[ind3, 2, 0])/den
a3 = (mat[ind3, 1, 0]-mat[ind3, 0, 1])/den
axis = np.column_stack((a1, a2, a3))
ax_ang[:4, ind3] = np.vstack((axis.transpose(), phi.transpose()))
return ax_ang
| [
"numpy.sum",
"numpy.ones",
"numpy.shape",
"numpy.sin",
"numpy.linalg.norm",
"numpy.copy",
"numpy.insert",
"numpy.reshape",
"numpy.column_stack",
"numpy.linalg.det",
"numpy.arccos",
"numpy.size",
"numpy.cos",
"numpy.zeros",
"numpy.any",
"numpy.where",
"numpy.array",
"numpy.sign",
... | [((631, 640), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (637, 640), True, 'import numpy as np\n'), ((1122, 1139), 'numpy.linalg.norm', 'np.linalg.norm', (['V'], {}), '(V)\n', (1136, 1139), True, 'import numpy as np\n'), ((1759, 1775), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {}), '((1, 6))\n', (1767, 1775), True, 'import numpy as np\n'), ((3225, 3262), 'numpy.array', 'np.array', (['direction'], {'dtype': 'np.float64'}), '(direction, dtype=np.float64)\n', (3233, 3262), True, 'import numpy as np\n'), ((3273, 3298), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (3287, 3298), True, 'import numpy as np\n'), ((3359, 3372), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3365, 3372), True, 'import numpy as np\n'), ((3382, 3395), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3388, 3395), True, 'import numpy as np\n'), ((3650, 3721), 'numpy.column_stack', 'np.column_stack', (['(mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33)'], {}), '((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n', (3665, 3721), True, 'import numpy as np\n'), ((3737, 3765), 'numpy.where', 'np.where', (['(ax_ang[4, :] == -1)'], {}), '(ax_ang[4, :] == -1)\n', (3745, 3765), True, 'import numpy as np\n'), ((4661, 4674), 'numpy.copy', 'np.copy', (['mat1'], {}), '(mat1)\n', (4668, 4674), True, 'import numpy as np\n'), ((5071, 5089), 'numpy.zeros', 'np.zeros', (['(5, msz)'], {}), '((5, msz))\n', (5079, 5089), True, 'import numpy as np\n'), ((5806, 5819), 'numpy.size', 'np.size', (['ind1'], {}), '(ind1)\n', (5813, 5819), True, 'import numpy as np\n'), ((6002, 6015), 'numpy.size', 'np.size', (['ind2'], {}), '(ind2)\n', (6009, 6015), True, 'import numpy as np\n'), ((8913, 8926), 'numpy.size', 'np.size', (['ind3'], {}), '(ind3)\n', (8920, 8926), True, 'import numpy as np\n'), ((1250, 1268), 'numpy.insert', 'np.insert', (['V', '(3)', 's'], {}), '(V, 3, s)\n', (1259, 1268), True, 'import numpy as np\n'), ((5040, 5053), 'numpy.shape', 'np.shape', (['mat'], {}), '(mat)\n', (5048, 5053), True, 'import numpy as np\n'), ((5828, 5841), 'numpy.size', 'np.size', (['ind1'], {}), '(ind1)\n', (5835, 5841), True, 'import numpy as np\n'), ((8002, 8023), 'numpy.sum', 'np.sum', (['signs'], {'axis': '(1)'}), '(signs, axis=1)\n', (8008, 8023), True, 'import numpy as np\n'), ((8038, 8055), 'numpy.zeros', 'np.zeros', (['ind2_sz'], {}), '(ind2_sz)\n', (8046, 8055), True, 'import numpy as np\n'), ((8360, 8382), 'numpy.zeros', 'np.zeros', (['(ind2_sz, 3)'], {}), '((ind2_sz, 3))\n', (8368, 8382), True, 'import numpy as np\n'), ((8468, 8493), 'numpy.copy', 'np.copy', (['(-signs[tind2, :])'], {}), '(-signs[tind2, :])\n', (8475, 8493), True, 'import numpy as np\n'), ((8510, 8534), 'numpy.copy', 'np.copy', (['signs[tind3, :]'], {}), '(signs[tind3, :])\n', (8517, 8534), True, 'import numpy as np\n'), ((8556, 8603), 'numpy.column_stack', 'np.column_stack', (['(t2[:, 2], t2[:, 0], t2[:, 1])'], {}), '((t2[:, 2], t2[:, 0], t2[:, 1]))\n', (8571, 8603), True, 'import numpy as np\n'), ((8963, 8994), 'numpy.arccos', 'np.arccos', (['((mtrc[ind3] - 1) / 2)'], {}), '((mtrc[ind3] - 1) / 2)\n', (8972, 8994), True, 'import numpy as np\n'), ((9192, 9221), 'numpy.column_stack', 'np.column_stack', (['(a1, a2, a3)'], {}), '((a1, a2, a3))\n', (9207, 9221), True, 'import numpy as np\n'), ((2518, 2533), 'numpy.size', 'np.size', (['ax_ang'], {}), '(ax_ang)\n', (2525, 2533), True, 'import numpy as np\n'), ((2562, 2588), 'numpy.reshape', 'np.reshape', (['ax_ang', '(5, 1)'], {}), '(ax_ang, (5, 1))\n', (2572, 2588), True, 'import numpy as np\n'), ((4710, 4723), 'numpy.shape', 'np.shape', (['mat'], {}), '(mat)\n', (4718, 4723), True, 'import numpy as np\n'), ((5175, 5197), 'numpy.shape', 'np.shape', (['ax_ang[4, :]'], {}), '(ax_ang[4, :])\n', (5183, 5197), True, 'import numpy as np\n'), ((7923, 7939), 'numpy.sign', 'np.sign', (['m_upper'], {}), '(m_upper)\n', (7930, 7939), True, 'import numpy as np\n'), ((8074, 8098), 'numpy.where', 'np.where', (['(sum_signs >= 0)'], {}), '(sum_signs >= 0)\n', (8082, 8098), True, 'import numpy as np\n'), ((8131, 8146), 'numpy.shape', 'np.shape', (['tind1'], {}), '(tind1)\n', (8139, 8146), True, 'import numpy as np\n'), ((8323, 8340), 'numpy.where', 'np.where', (['(t1 == 0)'], {}), '(t1 == 0)\n', (8331, 8340), True, 'import numpy as np\n'), ((9008, 9019), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (9014, 9019), True, 'import numpy as np\n'), ((2624, 2639), 'numpy.size', 'np.size', (['ax_ang'], {}), '(ax_ang)\n', (2631, 2639), True, 'import numpy as np\n'), ((4762, 4788), 'numpy.reshape', 'np.reshape', (['mat', '(1, 3, 3)'], {}), '(mat, (1, 3, 3))\n', (4772, 4788), True, 'import numpy as np\n'), ((5283, 5310), 'numpy.linalg.det', 'np.linalg.det', (['mat[i, :, :]'], {}), '(mat[i, :, :])\n', (5296, 5310), True, 'import numpy as np\n'), ((6549, 6571), 'numpy.zeros', 'np.zeros', (['(ind2_sz, 3)'], {}), '((ind2_sz, 3))\n', (6557, 6571), True, 'import numpy as np\n'), ((8287, 8302), 'numpy.shape', 'np.shape', (['tind2'], {}), '(tind2)\n', (8295, 8302), True, 'import numpy as np\n'), ((2848, 2864), 'numpy.shape', 'np.shape', (['ax_ang'], {}), '(ax_ang)\n', (2856, 2864), True, 'import numpy as np\n'), ((2893, 2909), 'numpy.shape', 'np.shape', (['ax_ang'], {}), '(ax_ang)\n', (2901, 2909), True, 'import numpy as np\n'), ((4892, 4905), 'numpy.shape', 'np.shape', (['mat'], {}), '(mat)\n', (4900, 4905), True, 'import numpy as np\n'), ((5883, 5905), 'numpy.array', 'np.array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (5891, 5905), True, 'import numpy as np\n'), ((8418, 8433), 'numpy.shape', 'np.shape', (['tind1'], {}), '(tind1)\n', (8426, 8433), True, 'import numpy as np\n'), ((8768, 8789), 'numpy.ones', 'np.ones', (['(1, ind2_sz)'], {}), '((1, ind2_sz))\n', (8775, 8789), True, 'import numpy as np\n'), ((2927, 2943), 'numpy.shape', 'np.shape', (['ax_ang'], {}), '(ax_ang)\n', (2935, 2943), True, 'import numpy as np\n'), ((3013, 3029), 'numpy.shape', 'np.shape', (['ax_ang'], {}), '(ax_ang)\n', (3021, 3029), True, 'import numpy as np\n'), ((2698, 2711), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2706, 2711), True, 'import numpy as np\n'), ((8195, 8221), 'numpy.any', 'np.any', (['(signs == 0)'], {'axis': '(1)'}), '(signs == 0, axis=1)\n', (8201, 8221), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
A script to test mecanum driver
"""
import time
import numpy as np
from drivers.mecanum_driver import MecanumDriver
wheels = MecanumDriver()
try:
while True:
act = np.random.randint(0,10)
wheels.set_action(int(act))
time.sleep(2)
except KeyboardInterrupt:
print("\r\nctrl + c:")
wheels.halt()
exit()
# When everything done, release the capture and stop motors
wheels.halt()
| [
"numpy.random.randint",
"drivers.mecanum_driver.MecanumDriver",
"time.sleep"
] | [((149, 164), 'drivers.mecanum_driver.MecanumDriver', 'MecanumDriver', ([], {}), '()\n', (162, 164), False, 'from drivers.mecanum_driver import MecanumDriver\n'), ((200, 224), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (217, 224), True, 'import numpy as np\n'), ((268, 281), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (278, 281), False, 'import time\n')] |
from . import config
import datarobot as dr
from datarobot.errors import ClientError
import pickle
import logging
from warnings import warn
from warnings import catch_warnings
from datarobot.models import featurelist
import pandas as pd
import numpy as np
from statistics import mean
from statistics import median
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sb
LOGGER = logging.getLogger(__name__)
def find_project(project: str) -> dr.Project:
"""Uses the DataRobot api to find a current project.
Uses datarobot.Project.get() and dr.Project.list() to test if 'project' is either an id
or possibly a name of a project in DataRobot, then returns the project found.
:Parameters:
----------
project: str
Either a project id or a search term for project name
:Returns:
----------
datarobot.Project
A datarobot project object that is either the project with the id provided, or the
first/only project returned by searching by project name. Returns None if the list is
empty.
"""
project = str(project) # make sure the project id/name provided is a string
return_project = None
try: # try finding project with id
return_project = dr.Project.get(project_id=project)
return return_project
except ClientError: # id was not provided, most likely a name
project_list = dr.Project.list(search_params={'project_name': project})
if len(project_list) == 0: # probably wrong id, check id?
warn(f"No projects found with id or search for \'{project}\'")
return None
elif len(project_list) == 1: # found one project with search, good
return project_list[0]
else: # more than one project was found
warn(f"Returning the first of multiple projects with \'{project}\': {project_list}")
return project_list[0]
# if changing get_best_model, check if it's alias get_starred_model needs changing
def get_best_model(project: dr.Project,
featurelist_prefix: str = None,
starred: bool = False,
metric: str = 'AUC') -> dr.Model:
"""Attempts to find the 'best' model in a datarobot by searching cross validation scores of all the
models in a supplied project. # TODO make dictionary for minimize/maximize
CURRENTLY SUPPORTS METRICS WHERE HIGHER = BETTER
.. warning::
Actually finding the 'best' model takes more than averageing cross validation
scores, and it is suggested that the 'best' model is decided and starred in DataRobot.
(Make sure 'starred = True' if starring the 'best' model)
.. note::
Some models may not have cross validation scores because they were not run. These
models are ignored by this function. Cross validate all models if each model should be
considered.
:Parameters:
----------
project: datarobot.Project
The project object that will be searched for the 'best' model
featurelist_prefix: str, optional (default = 'RAPA Reduced to')
The desired featurelist prefix used to search in for models using specific
rapa featurelists
starred: bool, optional (default = False)
If True, return the starred model. If there are more than one starred models,
then warn the user and return the 'best' one
metric: str, optional (default = 'AUC')
What model cross validation metric to use when averaging scores
:Returns:
----------
datarobot.Model
A datarobot model that is either the 'best', starred, or the 'best' of the starred models
from the provided datarobot project
"""
all_models = []
if featurelist_prefix: # if featurelist_prefix is not none or empty
for model in project.get_models():
if model.featurelist_name != None:
if model.featurelist_name.lower().startswith(featurelist_prefix.lower()):
all_models.append(model)
else:
all_models = project.get_models() # Retrieve all models from the supplied project
if len(all_models) == 0:
return None
if starred: # if the model is starred logic
starred_models = []
for model in all_models: # find each starred model
if model.is_starred:
starred_models.append(model)
if len(starred_models) == 0:
warn(f'There are no starred models in \'{project}\'. Will try to return the \'best\' model.')
elif len(starred_models) == 1: # if there is a starred model, return it regardless of whether or not it has been cross-validated
return starred_models[0]
else: # more than one model is starred
averages = {} # keys are average scores and values are the models
num_no_cv = 0
for starred_model in starred_models:
try:
averages[mean(starred_model.get_cross_validation_scores()['cvScores'][metric].values())] = starred_model
except ClientError: # the model wasn't cross-validated
num_no_cv += 1
if len(averages) == 0:
warn(f'The starred models were not cross-validated!')
return None
else:
return averages[sorted(averages.keys())[-1]] # highest metric is 'best' TODO: support the other metrics
else: # starred == False
averages = {} # keys are average scores and values are the models
num_no_cv = 0
for model in all_models:
try:
averages[mean(model.get_cross_validation_scores()['cvScores'][metric].values())] = model
except ClientError: # the model wasn't cross-validated
num_no_cv += 1
if len(averages) == 0:
warn(f'There were no cross-validated models in "{project}"')
return None
else:
return averages[sorted(averages.keys())[-1]] # highest metric is 'best' TODO: support the other metrics
# alias for get_best_model
def get_starred_model(project: dr.Project,
metric: str = 'AUC',
featurelist_prefix: str = None) -> dr.Model:
"""Alias for rapa.utils.get_best_model() but makes starred = True
"""
return get_best_model(project, starred = True, metric = metric, featurelist_prefix = featurelist_prefix)
def initialize_dr_api(token_key,
file_path: str = 'data/dr-tokens.pkl',
endpoint: str = 'https://app.datarobot.com/api/v2'):
"""Initializes the DataRobot API with a pickled dictionary created by the user.
.. warning:
It is advised that the user keeps the pickled dictionary in an ignored
directory if using GitHub (put the file in the .gitignore)
Accesses a file that should be a pickled dictionary. This dictionary has the API token
as the value to the provided token_key. Ex: {token_key: 'API_TOKEN'}
:Parameters:
----------
token_key: str
The API token's key in the pickled dictionary located in file_path
file_path: str, optional (default = 'data/dr-tokens.pkl')
Path to the pickled dictionary containing the API token
endpoint: str, optional (default = 'https://app.datarobot.com/api/v2')
The endpoint is usually the URL you would use to log into the DataRobot Web User Interface
"""
# load pickled dictionary and initialize api, catching FileNotFound, KeyError, and failed authentication warning
try:
datarobot_tokens = pickle.load(open(file_path, 'rb'))
with catch_warnings(record=True) as w: # appends warning to w if warning occurs
dr.Client(endpoint=endpoint, token=datarobot_tokens[token_key])
if not not w: # check to see if w is not None or empty (has a warning)
raise Exception(w[0].message)
else:
pass
except FileNotFoundError:
raise FileNotFoundError(f'The file {file_path} does not exist.') # TODO: Make a tutorial on how to create the pickled dictionary with api tokens and link here
except KeyError:
raise KeyError(f'\'{token_key}\' is not in the dictionary at \'{file_path}\'')
# TODO: I probably didn't catch all errors, make tests for this
print(f'DataRobot API initiated with endpoint \'{endpoint}\'')
def get_featurelist(featurelist: str,
project: dr.Project) -> dr.Featurelist:
"""Uses the DataRobot api to search for a desired featurelist.
Uses datarobot.Project.get_featurelists() to retrieve all the featurelists in
the project. Then, it searches the list for id's, and if it doesn't find any,
it searches the list again for names. Returns the first project it finds.
:Parameters:
----------
featurelist: str
Either a featurelist id or a search term for featurelist name
project: datarobot.Project
The project that is being searched for the featurelist
:Returns:
----------
datarobot.Featurelist
The featurelist that was found. Returns None if no featurelist is found
"""
featurelist = str(featurelist) # cast to string just in case id is an int or something
featurelists = project.get_featurelists()
dr_featurelist = [x for x in featurelists if featurelist == x.id] # loop over all the featurelists and get all that match featurelist (assuming it is an id)
if dr_featurelist: # if dr_featurelist is not empty
return dr_featurelist[0] # there should only be one id
else: # if dr_featurelist is empty
dr_featurelist = [x for x in featurelists if featurelist.lower() in str(x.name).lower()] # use python's `in` to search strings
if not dr_featurelist: # if dr_featurelist is empty
warn(f'No featurelists were found with either the id or name of \'{featurelist}\'')
return None
elif len(dr_featurelist) > 1: # if dr_featurelist has more than 1
warn(f'More than one featurelist were found: \'{dr_featurelist}\', returning the first.')
return dr_featurelist[0]
else: # dr_Featurelist has 1
return dr_featurelist[0]
def parsimony_performance_boxplot(project: dr.Project,
featurelist_prefix: str = 'RAPA Reduced to',
metric: str = 'AUC',
split: str = 'crossValidation',
featurelist_lengths: list = None):
"""Uses `seaborn`'s `boxplot` function to plot featurelist size vs performance
for all models that use that featurelist prefix. There is a different boxplot for
each featurelist length. # TODO warn about multiple prefixes, try to use new prefixes
:Paremeters:
----------
project: datarobot.Project
Either a datarobot project, or a string of it's id or name
featurelist_prefix: str, optional (default = 'RAPA Reduced to')
The desired prefix for the featurelists that will be used for plotting parsimony performance. Each featurelist
will start with the prefix, include a space, and then end with the number of features in that featurelist
metric: str, optional (default = 'AUC')
The metric used for plotting accuracy of models
split: str, optional (default = 'crossValidation')
What split's performance to take from.
Can be: ['crossValidation', 'holdout'] TODO: i think it can be more, double check
featurelist_lengths: list, optional (default = None)
A list of featurelist lengths to plot
:Returns:
----------
None TODO: return plot?
"""
# if `project` is a string, find the project
if type(project) is str:
project = find_project(project)
datarobot_project_models = project.get_models() # get all the models in the provided project
RAPA_model_featurelists = []
featurelist_performances = defaultdict(list)
for model in datarobot_project_models: # for every model, if the model has the prefix, then add it's performance
if model.featurelist_name != None and featurelist_prefix in model.featurelist_name:
RAPA_model_featurelists.append(model.featurelist_name)
num_features = int(model.featurelist_name.split(' ')[-1].strip('()')) # parse the number of features from the featurelist name
if model.metrics[metric][split] != None: # if there is no feature impact for the model/split, don't add the metric
if featurelist_lengths and num_features in featurelist_lengths:
featurelist_performances[num_features].append(model.metrics[metric][split])
elif not featurelist_lengths:
featurelist_performances[num_features].append(model.metrics[metric][split])
# Add Nones so that the arrays are the same length
last = 0
for key in featurelist_performances:
m = max(last, len(featurelist_performances[key]))
last = m
for key in featurelist_performances:
temp_len = len(featurelist_performances[key])
for _ in range(m-temp_len):
featurelist_performances[key].append(None)
featurelist_performances_df = pd.DataFrame(featurelist_performances)[sorted(featurelist_performances.keys())[::-1]]
with plt.style.context('tableau-colorblind10'):
plt.ylabel(f'{split} {metric}')
plt.xlabel('Number of Features')
plt.title(f'{project.project_name} - {featurelist_prefix}\nParsimonious Model Performance')
sb.boxplot(data=featurelist_performances_df)
return featurelist_performances_df
def feature_performance_stackplot(project: dr.Project,
featurelist_prefix: str = 'RAPA Reduced to',
starting_featurelist: str = None,
feature_impact_metric: str = 'median',
metric: str = 'AUC',
vlines: bool = False):
"""Utilizes `matplotlib.pyplot.stackplot` to show feature performance during
parsimony analysis.
:Parameters:
----------
project: datarobot.Project
Either a datarobot project, or a string of it's id or name
featurelist_prefix: str, optional (default = 'RAPA Reduced to')
The desired prefix for the featurelists that will be used for plotting feature performance. Each featurelist
will start with the prefix, include a space, and then end with the number of features in that featurelist
starting_featurelist: str, optional (default = None)
The starting featurelist used for parsimony analysis. If None, only
the featurelists with the desired prefix in `featurelist_prefix` will be plotted
feature_impact_metric: str, optional (default = mean)
Which metric to use when finding the most representative feature importance of all models in the featurelist
Options:
* median
* mean
* cumulative
metric: str, optional (default = 'AUC')
Which metric to use when finding feature importance of each model
vlines: bool, optional (default = False)
Whether to add vertical lines at the featurelist lengths or not, False by default
:Returns:
----------
None TODO: return plot?
"""
# if `project` is a string, find the project
if type(project) is str:
project = find_project(project)
if type(starting_featurelist) == str:
starting_featurelist = get_featurelist(starting_featurelist, project)
datarobot_project_models = project.get_models() # get all the models in the provided project
if starting_featurelist != None: # have the starting featurelist as well
all_feature_importances = {}
for model in datarobot_project_models:
if model.featurelist_name != None and (model.featurelist_name.startswith(featurelist_prefix) or model.featurelist_id == starting_featurelist.id): # if the model uses the starting featurelist/featurelist prefix
if model.metrics[metric]['crossValidation'] != None:
if model.featurelist_name in all_feature_importances.keys():
for x in model.get_feature_impact():
if x['featureName'] in all_feature_importances[model.featurelist_name].keys():
all_feature_importances[model.featurelist_name][x['featureName']].append(x['impactNormalized'])
else:
all_feature_importances[model.featurelist_name][x['featureName']] = [x['impactNormalized']]
else:
all_feature_importances[model.featurelist_name] = {}
for x in model.get_feature_impact():
all_feature_importances[model.featurelist_name][x['featureName']] = [x['impactNormalized']]
else: # same as if, but without starting featurelist
all_feature_importances = {}
for model in datarobot_project_models:
if model.featurelist_name.startswith(featurelist_prefix): # if the model's featurelist starts with the featurelist prefix
if model.metrics[metric]['crossValidation'] != None:
if model.featurelist_name in all_feature_importances.keys():
for x in model.get_feature_impact():
if x['featureName'] in all_feature_importances[model.featurelist_name].keys():
all_feature_importances[model.featurelist_name][x['featureName']].append(x['impactNormalized'])
else:
all_feature_importances[model.featurelist_name][x['featureName']] = [x['impactNormalized']]
else:
all_feature_importances[model.featurelist_name] = {}
for x in model.get_feature_impact():
all_feature_importances[model.featurelist_name][x['featureName']] = [x['impactNormalized']]
for featurelist_name in all_feature_importances.keys():
for feature in all_feature_importances[featurelist_name].keys():
if feature_impact_metric.lower() == 'median':
all_feature_importances[featurelist_name][feature] = median(all_feature_importances[featurelist_name][feature])
elif feature_impact_metric.lower() == 'mean':
all_feature_importances[featurelist_name][feature] = mean(all_feature_importances[featurelist_name][feature])
elif feature_impact_metric.lower() == 'cumulative':
all_feature_importances[featurelist_name][feature] = sum(all_feature_importances[featurelist_name][feature])
else:
raise Exception(f'`feature_impact_metric` provided ({feature_impact_metric}) not accepted.\nOptions: \'median\', \'mean\', or \'cumulative\'')
# create 1d array of dimension N (x), and 2d array of dimension MxN (y) for stackplot
df = pd.DataFrame(all_feature_importances).replace({np.nan: 0})
if starting_featurelist != None: # rename starting_featurelist column to have the number of features
df = df.rename(columns={starting_featurelist.name: f'{starting_featurelist.name} {len(starting_featurelist.features)}'})
df = df/df.sum()
cols = [(int(x.split(' ')[-1].strip('()')), x) for x in list(df.columns)] # get a list of tuples where (# of features, column name)
cols = sorted(cols)[::-1] # sorted descending by first object in tuple (featurelist size)
x = []
y = []
for col in cols:
x.append(str(col[0]))
y.append(list(df[col[1]]))
y = np.array(y)
y = y.T
len_smallest_featurelist = min([int(x.split(' ')[-1].strip('()')) for x in df.columns])
smallest_featurelist = featurelist_prefix + ' (' + str(len_smallest_featurelist) + ')'
# unreadable list comprehension really means: get a dictionary with keys that are the old column names (features), and values with new column names (starting with underscore)
# at least show config.MIN_FEATURES_TO_GRAPH
# this is so that the underscored names are not shown in the legend.
labels = [{x:'_' + str(x)} if i > config.NUM_FEATURES_TO_GRAPH or i >= min([int(x.split(' ')[-1].strip('()')) for x in df.columns]) else {x:x} for i, x in enumerate(df.loc[:,smallest_featurelist].sort_values(ascending=False).index)]
l = {}
for label in labels:
l.update(label)
df = df.rename(index=l)
_, ax = plt.subplots(figsize=(config.FIG_SIZE[0], config.FIG_SIZE[1]/2))
plt.xlabel('Feature List Length')
plt.ylabel('Normalized Feature Impact\n(Normalized Impact Normalized)')
plt.title(f'{project.project_name} - {featurelist_prefix}\nFeature Impact Stackplot')
if vlines:
plt.vlines([z for z in range(1,len(x)-1)], ymin=0, ymax=1, linestyles='dashed')
ax.stackplot(x, y, labels=list(df.index), colors=plt.cm.tab20.colors)
ax.legend(loc='upper left')
return None
| [
"matplotlib.pyplot.title",
"datarobot.Project.get",
"pandas.DataFrame",
"datarobot.models.featurelist.lower",
"statistics.median",
"matplotlib.pyplot.style.context",
"datarobot.Client",
"collections.defaultdict",
"datarobot.Project.list",
"seaborn.boxplot",
"numpy.array",
"warnings.catch_warni... | [((418, 445), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (435, 445), False, 'import logging\n'), ((12260, 12277), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12271, 12277), False, 'from collections import defaultdict\n'), ((20197, 20208), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (20205, 20208), True, 'import numpy as np\n'), ((21049, 21115), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(config.FIG_SIZE[0], config.FIG_SIZE[1] / 2)'}), '(figsize=(config.FIG_SIZE[0], config.FIG_SIZE[1] / 2))\n', (21061, 21115), True, 'import matplotlib.pyplot as plt\n'), ((21118, 21151), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature List Length"""'], {}), "('Feature List Length')\n", (21128, 21151), True, 'import matplotlib.pyplot as plt\n'), ((21156, 21230), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized Feature Impact\n(Normalized Impact Normalized)"""'], {}), '("""Normalized Feature Impact\n(Normalized Impact Normalized)""")\n', (21166, 21230), True, 'import matplotlib.pyplot as plt\n'), ((21232, 21330), 'matplotlib.pyplot.title', 'plt.title', (['f"""{project.project_name} - {featurelist_prefix}\nFeature Impact Stackplot"""'], {}), '(\n f"""{project.project_name} - {featurelist_prefix}\nFeature Impact Stackplot"""\n )\n', (21241, 21330), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1331), 'datarobot.Project.get', 'dr.Project.get', ([], {'project_id': 'project'}), '(project_id=project)\n', (1311, 1331), True, 'import datarobot as dr\n'), ((13552, 13590), 'pandas.DataFrame', 'pd.DataFrame', (['featurelist_performances'], {}), '(featurelist_performances)\n', (13564, 13590), True, 'import pandas as pd\n'), ((13652, 13693), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""tableau-colorblind10"""'], {}), "('tableau-colorblind10')\n", (13669, 13693), True, 'import matplotlib.pyplot as plt\n'), ((13703, 13734), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{split} {metric}"""'], {}), "(f'{split} {metric}')\n", (13713, 13734), True, 'import matplotlib.pyplot as plt\n'), ((13743, 13775), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Features"""'], {}), "('Number of Features')\n", (13753, 13775), True, 'import matplotlib.pyplot as plt\n'), ((13784, 13888), 'matplotlib.pyplot.title', 'plt.title', (['f"""{project.project_name} - {featurelist_prefix}\nParsimonious Model Performance"""'], {}), '(\n f"""{project.project_name} - {featurelist_prefix}\nParsimonious Model Performance"""\n )\n', (13793, 13888), True, 'import matplotlib.pyplot as plt\n'), ((13884, 13928), 'seaborn.boxplot', 'sb.boxplot', ([], {'data': 'featurelist_performances_df'}), '(data=featurelist_performances_df)\n', (13894, 13928), True, 'import seaborn as sb\n'), ((1451, 1507), 'datarobot.Project.list', 'dr.Project.list', ([], {'search_params': "{'project_name': project}"}), "(search_params={'project_name': project})\n", (1466, 1507), True, 'import datarobot as dr\n'), ((4564, 4663), 'warnings.warn', 'warn', (['f"""There are no starred models in \'{project}\'. Will try to return the \'best\' model."""'], {}), '(\n f"There are no starred models in \'{project}\'. Will try to return the \'best\' model."\n )\n', (4568, 4663), False, 'from warnings import warn\n'), ((5977, 6037), 'warnings.warn', 'warn', (['f"""There were no cross-validated models in "{project}\\""""'], {}), '(f\'There were no cross-validated models in "{project}"\')\n', (5981, 6037), False, 'from warnings import warn\n'), ((7802, 7829), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (7816, 7829), False, 'from warnings import catch_warnings\n'), ((7889, 7952), 'datarobot.Client', 'dr.Client', ([], {'endpoint': 'endpoint', 'token': 'datarobot_tokens[token_key]'}), '(endpoint=endpoint, token=datarobot_tokens[token_key])\n', (7898, 7952), True, 'import datarobot as dr\n'), ((10041, 10132), 'warnings.warn', 'warn', (['f"""No featurelists were found with either the id or name of \'{featurelist}\'"""'], {}), '(\n f"No featurelists were found with either the id or name of \'{featurelist}\'"\n )\n', (10045, 10132), False, 'from warnings import warn\n'), ((19537, 19574), 'pandas.DataFrame', 'pd.DataFrame', (['all_feature_importances'], {}), '(all_feature_importances)\n', (19549, 19574), True, 'import pandas as pd\n'), ((1586, 1646), 'warnings.warn', 'warn', (['f"""No projects found with id or search for \'{project}\'"""'], {}), '(f"No projects found with id or search for \'{project}\'")\n', (1590, 1646), False, 'from warnings import warn\n'), ((10235, 10332), 'warnings.warn', 'warn', (['f"""More than one featurelist were found: \'{dr_featurelist}\', returning the first."""'], {}), '(\n f"More than one featurelist were found: \'{dr_featurelist}\', returning the first."\n )\n', (10239, 10332), False, 'from warnings import warn\n'), ((18828, 18886), 'statistics.median', 'median', (['all_feature_importances[featurelist_name][feature]'], {}), '(all_feature_importances[featurelist_name][feature])\n', (18834, 18886), False, 'from statistics import median\n'), ((1843, 1935), 'warnings.warn', 'warn', (['f"""Returning the first of multiple projects with \'{project}\': {project_list}"""'], {}), '(\n f"Returning the first of multiple projects with \'{project}\': {project_list}"\n )\n', (1847, 1935), False, 'from warnings import warn\n'), ((5335, 5388), 'warnings.warn', 'warn', (['f"""The starred models were not cross-validated!"""'], {}), "(f'The starred models were not cross-validated!')\n", (5339, 5388), False, 'from warnings import warn\n'), ((9887, 9906), 'datarobot.models.featurelist.lower', 'featurelist.lower', ([], {}), '()\n', (9904, 9906), False, 'from datarobot.models import featurelist\n'), ((19014, 19070), 'statistics.mean', 'mean', (['all_feature_importances[featurelist_name][feature]'], {}), '(all_feature_importances[featurelist_name][feature])\n', (19018, 19070), False, 'from statistics import mean\n')] |
from __future__ import print_function
import os
import sys
import pdb
import copy
import sklearn
import tensorflow as tf
import pandas as pd
import PIL
import numpy as np
import keras
from keras.models import load_model
import function as func
import function_dl as func_dl
def cnn_prediction(cnv_file, model_file, use_gpu, output_file):
# GPU or CPU selection
if use_gpu == False or use_gpu == 'False':
print("Using CPU ...")
else:
cuda_available = tf.test.is_built_with_cuda()
gpu_availabel = tf.config.list_physical_devices('GPU')
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
if cuda_available and gpu_availabel:
print("Using GPU ...")
physical_devices = tf.config.experimental.list_physical_devices('GPU')
else:
print("GPU is not available. Try CPU ...")
physical_devices = tf.config.experimental.list_physical_devices('CPU')
# Initial variables
img_width, img_height = 224, 224
## Load pre-calculated model
custom_objects = {"f1_m":func_dl.f1_m, "precision_m":func_dl.precision_m, "recall_m":func_dl.recall_m}
model_name = 'MobileNet_v1'
func.showDateTime('\t')
print("Loading %s ... from %s"%(model_name, model_file))
try:
MobileNet_model = keras.models.load_model(model_file, custom_objects=custom_objects)
print("Model Loaded. ")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
# Loading CNV_info and images.
func.showDateTime('\t')
print("Loading CNV info and images ...")
cnv_info_df = pd.read_csv(cnv_file)
entire_cnv_images_path_list = cnv_info_df['img_path']
CNV_TYPE_list = func.global_variables()['CNV_TYPE']
CNV_TYPE = func.fetch_colName(CNV_TYPE_list,cnv_info_df.columns)[1]
img_np = func_dl.loadImgs(entire_cnv_images_path_list, img_width, img_height)
# ## Normalization
# Find the shape of input images and create the variable input_shape
nRows,nCols,nDims = img_np.shape[1:]
input_shape = (nRows, nCols, nDims)
print("The shape of input tensor:",input_shape)
# Change to float datatype
img_np = img_np.astype('float32')
# Scale the data to lie between 0 to 1
img_np /= 255
# Find the unique numbers from the train labels
nClasses = 3
print('Total number classes: ', nClasses)
# Prediction
print("Predict the type of copy number images by CNV-espresso ...")
img_pred = MobileNet_model.predict(img_np)
pred_output_df = copy.deepcopy(cnv_info_df)
pred_output_df.insert(pred_output_df.shape[1], 'Prob_DEL', "-")
pred_output_df.insert(pred_output_df.shape[1], 'Prob_DIP', "-")
pred_output_df.insert(pred_output_df.shape[1], 'Prob_DUP', "-")
pred_output_df.insert(pred_output_df.shape[1], 'Prob', "-")
pred_output_df.insert(pred_output_df.shape[1], 'Prediction', "-")
pred_output_df.insert(pred_output_df.shape[1], 'Status', "-")
num, correct_count = 0, 0
for i in range(len(img_pred)):
num += 1
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prob_DEL')] = img_pred[i][0]
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prob_DIP')] = img_pred[i][1]
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prob_DUP')] = img_pred[i][2]
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prob')] = np.max(img_pred[i])
if np.any(np.isnan(img_pred[i])) == True:
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prediction')] = "NaN"
else:
if np.argmax(img_pred[i]) == 0:
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prediction')] = "DEL"
elif np.argmax(img_pred[i]) == 1:
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prediction')] = "DIP_or_NotRare"
elif np.argmax(img_pred[i]) == 2:
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prediction')] = "DUP"
else:
pdb.set_trace()
pred_output_df_type_col = func.fetch_colName(CNV_TYPE_list,pred_output_df.columns)[0]
if pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prediction')] == "NaN":
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Status')] = "Error"
elif pred_output_df.iloc[i,pred_output_df.columns.get_loc('Prediction')] == pred_output_df.iloc[i, pred_output_df_type_col].upper():
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Status')] = "Positive"
else:
pred_output_df.iloc[i,pred_output_df.columns.get_loc('Status')] = "Negative"
## output to file
pred_output_df.to_csv(output_file,index=False)
func.showDateTime()
print("Done.")
| [
"keras.models.load_model",
"copy.deepcopy",
"numpy.argmax",
"pandas.read_csv",
"tensorflow.test.is_built_with_cuda",
"function.showDateTime",
"tensorflow.config.list_physical_devices",
"numpy.isnan",
"numpy.max",
"function_dl.loadImgs",
"function.global_variables",
"pdb.set_trace",
"sys.exc_... | [((1179, 1202), 'function.showDateTime', 'func.showDateTime', (['"""\t"""'], {}), "('\\t')\n", (1196, 1202), True, 'import function as func\n'), ((1523, 1546), 'function.showDateTime', 'func.showDateTime', (['"""\t"""'], {}), "('\\t')\n", (1540, 1546), True, 'import function as func\n'), ((1610, 1631), 'pandas.read_csv', 'pd.read_csv', (['cnv_file'], {}), '(cnv_file)\n', (1621, 1631), True, 'import pandas as pd\n'), ((1843, 1911), 'function_dl.loadImgs', 'func_dl.loadImgs', (['entire_cnv_images_path_list', 'img_width', 'img_height'], {}), '(entire_cnv_images_path_list, img_width, img_height)\n', (1859, 1911), True, 'import function_dl as func_dl\n'), ((2555, 2581), 'copy.deepcopy', 'copy.deepcopy', (['cnv_info_df'], {}), '(cnv_info_df)\n', (2568, 2581), False, 'import copy\n'), ((4766, 4785), 'function.showDateTime', 'func.showDateTime', ([], {}), '()\n', (4783, 4785), True, 'import function as func\n'), ((480, 508), 'tensorflow.test.is_built_with_cuda', 'tf.test.is_built_with_cuda', ([], {}), '()\n', (506, 508), True, 'import tensorflow as tf\n'), ((534, 572), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (565, 572), True, 'import tensorflow as tf\n'), ((1299, 1365), 'keras.models.load_model', 'keras.models.load_model', (['model_file'], {'custom_objects': 'custom_objects'}), '(model_file, custom_objects=custom_objects)\n', (1322, 1365), False, 'import keras\n'), ((1710, 1733), 'function.global_variables', 'func.global_variables', ([], {}), '()\n', (1731, 1733), True, 'import function as func\n'), ((1766, 1820), 'function.fetch_colName', 'func.fetch_colName', (['CNV_TYPE_list', 'cnv_info_df.columns'], {}), '(CNV_TYPE_list, cnv_info_df.columns)\n', (1784, 1820), True, 'import function as func\n'), ((3414, 3433), 'numpy.max', 'np.max', (['img_pred[i]'], {}), '(img_pred[i])\n', (3420, 3433), True, 'import numpy as np\n'), ((734, 785), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (778, 785), True, 'import tensorflow as tf\n'), ((887, 938), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""CPU"""'], {}), "('CPU')\n", (931, 938), True, 'import tensorflow as tf\n'), ((4120, 4177), 'function.fetch_colName', 'func.fetch_colName', (['CNV_TYPE_list', 'pred_output_df.columns'], {}), '(CNV_TYPE_list, pred_output_df.columns)\n', (4138, 4177), True, 'import function as func\n'), ((3466, 3487), 'numpy.isnan', 'np.isnan', (['img_pred[i]'], {}), '(img_pred[i])\n', (3474, 3487), True, 'import numpy as np\n'), ((3615, 3637), 'numpy.argmax', 'np.argmax', (['img_pred[i]'], {}), '(img_pred[i])\n', (3624, 3637), True, 'import numpy as np\n'), ((1445, 1459), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1457, 1459), False, 'import sys\n'), ((3753, 3775), 'numpy.argmax', 'np.argmax', (['img_pred[i]'], {}), '(img_pred[i])\n', (3762, 3775), True, 'import numpy as np\n'), ((3902, 3924), 'numpy.argmax', 'np.argmax', (['img_pred[i]'], {}), '(img_pred[i])\n', (3911, 3924), True, 'import numpy as np\n'), ((4057, 4072), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4070, 4072), False, 'import pdb\n')] |
import numpy as np
import pickle
import mltools as ml
from homework4.problem_2 import sample_train
if __name__ == "__main__":
X = np.genfromtxt('data/X_train.txt', delimiter=None)
x_xfm = ml.transforms.fpoly(X, 2)
Y = np.genfromtxt('data/Y_train.txt', delimiter=None)[:, np.newaxis]
raw_data = np.concatenate((x_xfm, Y), axis=1)
print(f"raw_data has shape of {raw_data.shape}")
trn_p = 20
dev_p = 5
resamping = 10
reg = np.logspace(-4, 0, 20)
reg = reg.round(6)
training_auc = np.zeros((resamping, len(reg)), dtype=float)
validating_auc = np.zeros((resamping, len(reg)), dtype=float)
for i in range(resamping):
training_auc[i], validating_auc[i] = sample_train(reg, raw_data, trn_p, dev_p)
with open(f"learners/training_auc_2b.pickle", "wb") as f:
pickle.dump(training_auc, f)
with open(f"learners/validating_auc_2b.pickle", "wb") as f:
pickle.dump(validating_auc, f)
| [
"pickle.dump",
"numpy.logspace",
"numpy.genfromtxt",
"homework4.problem_2.sample_train",
"mltools.transforms.fpoly",
"numpy.concatenate"
] | [((135, 184), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/X_train.txt"""'], {'delimiter': 'None'}), "('data/X_train.txt', delimiter=None)\n", (148, 184), True, 'import numpy as np\n'), ((197, 222), 'mltools.transforms.fpoly', 'ml.transforms.fpoly', (['X', '(2)'], {}), '(X, 2)\n', (216, 222), True, 'import mltools as ml\n'), ((311, 345), 'numpy.concatenate', 'np.concatenate', (['(x_xfm, Y)'], {'axis': '(1)'}), '((x_xfm, Y), axis=1)\n', (325, 345), True, 'import numpy as np\n'), ((457, 479), 'numpy.logspace', 'np.logspace', (['(-4)', '(0)', '(20)'], {}), '(-4, 0, 20)\n', (468, 479), True, 'import numpy as np\n'), ((231, 280), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/Y_train.txt"""'], {'delimiter': 'None'}), "('data/Y_train.txt', delimiter=None)\n", (244, 280), True, 'import numpy as np\n'), ((710, 751), 'homework4.problem_2.sample_train', 'sample_train', (['reg', 'raw_data', 'trn_p', 'dev_p'], {}), '(reg, raw_data, trn_p, dev_p)\n', (722, 751), False, 'from homework4.problem_2 import sample_train\n'), ((823, 851), 'pickle.dump', 'pickle.dump', (['training_auc', 'f'], {}), '(training_auc, f)\n', (834, 851), False, 'import pickle\n'), ((924, 954), 'pickle.dump', 'pickle.dump', (['validating_auc', 'f'], {}), '(validating_auc, f)\n', (935, 954), False, 'import pickle\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The array_analysis test suite.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import unittest
import numpy as np
from obspy.signal.array_analysis import array_rotation_strain, get_geometry
class ArrayTestCase(unittest.TestCase):
"""
Test cases for array_analysis functions.
"""
def setUp(self):
self.array_coords = np.array([[0.0, 0.0, 0.0],
[-5.0, 7.0, 0.0],
[5.0, 7.0, 0.0],
[10.0, 0.0, 0.0],
[5.0, -7.0, 0.0],
[-5.0, -7.0, 0.0],
[-10.0, 0.0, 0.0]])
self.subarray = np.array([0, 1, 2, 3, 4, 5, 6])
self.ts1 = np.empty((1000, 7))
self.ts2 = np.empty((1000, 7))
self.ts3 = np.empty((1000, 7))
self.ts1.fill(np.NaN)
self.ts2.fill(np.NaN)
self.ts3.fill(np.NaN)
self.sigmau = 0.0001
self.Vp = 1.93
self.Vs = 0.326
def tearDown(self):
pass
def test_array_rotation(self):
# tests function array_rotation_strain with synthetic data with pure
# rotation and no strain
array_coords = self.array_coords
subarray = self.subarray
ts1 = self.ts1
ts2 = self.ts2
ts3 = self.ts3
sigmau = self.sigmau
vp = self.Vp
vs = self.Vs
rotx = 0.00001 * np.exp(-1 * np.square(np.linspace(-2, 2, 1000))) * \
np.sin(np.linspace(-30 * np.pi, 30 * np.pi, 1000))
roty = 0.00001 * np.exp(-1 * np.square(np.linspace(-2, 2, 1000))) * \
np.sin(np.linspace(-20 * np.pi, 20 * np.pi, 1000))
rotz = 0.00001 * np.exp(-1 * np.square(np.linspace(-2, 2, 1000))) * \
np.sin(np.linspace(-10 * np.pi, 10 * np.pi, 1000))
for stat in range(7):
for t in range(1000):
ts1[t, stat] = -1. * array_coords[stat, 1] * rotz[t]
ts2[t, stat] = array_coords[stat, 0] * rotz[t]
ts3[t, stat] = array_coords[stat, 1] * rotx[t] - \
array_coords[stat, 0] * roty[t]
out = array_rotation_strain(subarray, ts1, ts2, ts3, vp, vs,
array_coords, sigmau)
np.testing.assert_array_almost_equal(rotx, out['ts_w1'], decimal=12)
np.testing.assert_array_almost_equal(roty, out['ts_w2'], decimal=12)
np.testing.assert_array_almost_equal(rotz, out['ts_w3'], decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_s'],
decimal=15)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_d'],
decimal=15)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_m'],
decimal=12)
def test_array_dilation(self):
# tests function array_rotation_strain with synthetic data with pure
# dilation and no rotation or shear strain
array_coords = self.array_coords
subarray = self.subarray
ts1 = self.ts1
ts2 = self.ts2
ts3 = self.ts3
sigmau = self.sigmau
vp = self.Vp
vs = self.Vs
eta = 1 - 2 * vs ** 2 / vp ** 2
dilation = .00001 * np.exp(
-1 * np.square(np.linspace(-2, 2, 1000))) * \
np.sin(np.linspace(-40 * np.pi, 40 * np.pi, 1000))
for stat in range(7):
for t in range(1000):
ts1[t, stat] = array_coords[stat, 0] * dilation[t]
ts2[t, stat] = array_coords[stat, 1] * dilation[t]
ts3[t, stat] = array_coords[stat, 2] * dilation[t]
out = array_rotation_strain(subarray, ts1, ts2, ts3, vp, vs,
array_coords, sigmau)
# remember free surface boundary conditions!
# see Spudich et al, 1995, (A2)
np.testing.assert_array_almost_equal(dilation * (2 - 2 * eta),
out['ts_d'], decimal=12)
np.testing.assert_array_almost_equal(dilation * 2, out['ts_dh'],
decimal=12)
np.testing.assert_array_almost_equal(
abs(dilation * .5 * (1 + 2 * eta)), out['ts_s'], decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_sh'],
decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_w1'],
decimal=15)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_w2'],
decimal=15)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_w3'],
decimal=15)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_m'],
decimal=12)
def test_array_horizontal_shear(self):
# tests function array_rotation_strain with synthetic data with pure
# horizontal shear strain, no rotation or dilation
array_coords = self.array_coords
subarray = self.subarray
ts1 = self.ts1
ts2 = self.ts2
sigmau = self.sigmau
vp = self.Vp
vs = self.Vs
shear_strainh = .00001 * np.exp(
-1 * np.square(np.linspace(-2, 2, 1000))) * \
np.sin(np.linspace(-10 * np.pi, 10 * np.pi, 1000))
ts3 = np.zeros((1000, 7))
for stat in range(7):
for t in range(1000):
ts1[t, stat] = array_coords[stat, 1] * shear_strainh[t]
ts2[t, stat] = array_coords[stat, 0] * shear_strainh[t]
out = array_rotation_strain(subarray, ts1, ts2, ts3, vp, vs,
array_coords, sigmau)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_d'],
decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_dh'],
decimal=12)
np.testing.assert_array_almost_equal(abs(shear_strainh), out['ts_s'],
decimal=12)
np.testing.assert_array_almost_equal(abs(shear_strainh), out['ts_sh'],
decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_w1'],
decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_w2'],
decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_w3'],
decimal=12)
np.testing.assert_array_almost_equal(np.zeros(1000), out['ts_m'],
decimal=12)
def test_get_geometry(self):
"""
test get_geometry() in array_analysis.py
"""
ll = np.array([[24.5797167, 121.4842444, 385.106],
[24.5797611, 121.4842333, 384.893],
[24.5796694, 121.4842556, 385.106]])
la = get_geometry(ll)
np.testing.assert_almost_equal(la[:, 0].sum(), 0., decimal=8)
np.testing.assert_almost_equal(la[:, 1].sum(), 0., decimal=8)
np.testing.assert_almost_equal(la[:, 2].sum(), 0., decimal=8)
ll = np.array([[10., 10., 10.],
[0., 5., 5.],
[0., 0., 0.]])
la = get_geometry(ll, coordsys='xy')
np.testing.assert_almost_equal(la[:, 0].sum(), 0., decimal=8)
np.testing.assert_almost_equal(la[:, 1].sum(), 0., decimal=8)
np.testing.assert_almost_equal(la[:, 2].sum(), 0., decimal=8)
def suite():
return unittest.makeSuite(ArrayTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
"unittest.main",
"numpy.empty",
"unittest.makeSuite",
"numpy.zeros",
"obspy.signal.array_analysis.get_geometry",
"obspy.signal.array_analysis.array_rotation_strain",
"numpy.array",
"numpy.linspace",
"numpy.testing.assert_array_almost_equal"
] | [((8101, 8142), 'unittest.makeSuite', 'unittest.makeSuite', (['ArrayTestCase', '"""test"""'], {}), "(ArrayTestCase, 'test')\n", (8119, 8142), False, 'import unittest\n'), ((8176, 8210), 'unittest.main', 'unittest.main', ([], {'defaultTest': '"""suite"""'}), "(defaultTest='suite')\n", (8189, 8210), False, 'import unittest\n'), ((498, 639), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [-5.0, 7.0, 0.0], [5.0, 7.0, 0.0], [10.0, 0.0, 0.0], [5.0,\n -7.0, 0.0], [-5.0, -7.0, 0.0], [-10.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [-5.0, 7.0, 0.0], [5.0, 7.0, 0.0], [10.0, 0.0, \n 0.0], [5.0, -7.0, 0.0], [-5.0, -7.0, 0.0], [-10.0, 0.0, 0.0]])\n', (506, 639), True, 'import numpy as np\n'), ((887, 918), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6]'], {}), '([0, 1, 2, 3, 4, 5, 6])\n', (895, 918), True, 'import numpy as np\n'), ((938, 957), 'numpy.empty', 'np.empty', (['(1000, 7)'], {}), '((1000, 7))\n', (946, 957), True, 'import numpy as np\n'), ((977, 996), 'numpy.empty', 'np.empty', (['(1000, 7)'], {}), '((1000, 7))\n', (985, 996), True, 'import numpy as np\n'), ((1016, 1035), 'numpy.empty', 'np.empty', (['(1000, 7)'], {}), '((1000, 7))\n', (1024, 1035), True, 'import numpy as np\n'), ((2355, 2431), 'obspy.signal.array_analysis.array_rotation_strain', 'array_rotation_strain', (['subarray', 'ts1', 'ts2', 'ts3', 'vp', 'vs', 'array_coords', 'sigmau'], {}), '(subarray, ts1, ts2, ts3, vp, vs, array_coords, sigmau)\n', (2376, 2431), False, 'from obspy.signal.array_analysis import array_rotation_strain, get_geometry\n'), ((2477, 2545), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['rotx', "out['ts_w1']"], {'decimal': '(12)'}), "(rotx, out['ts_w1'], decimal=12)\n", (2513, 2545), True, 'import numpy as np\n'), ((2554, 2622), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['roty', "out['ts_w2']"], {'decimal': '(12)'}), "(roty, out['ts_w2'], decimal=12)\n", (2590, 2622), True, 'import numpy as np\n'), ((2631, 2699), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['rotz', "out['ts_w3']"], {'decimal': '(12)'}), "(rotz, out['ts_w3'], decimal=12)\n", (2667, 2699), True, 'import numpy as np\n'), ((3951, 4027), 'obspy.signal.array_analysis.array_rotation_strain', 'array_rotation_strain', (['subarray', 'ts1', 'ts2', 'ts3', 'vp', 'vs', 'array_coords', 'sigmau'], {}), '(subarray, ts1, ts2, ts3, vp, vs, array_coords, sigmau)\n', (3972, 4027), False, 'from obspy.signal.array_analysis import array_rotation_strain, get_geometry\n'), ((4166, 4257), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['(dilation * (2 - 2 * eta))', "out['ts_d']"], {'decimal': '(12)'}), "(dilation * (2 - 2 * eta), out['ts_d'],\n decimal=12)\n", (4202, 4257), True, 'import numpy as np\n'), ((4307, 4383), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['(dilation * 2)', "out['ts_dh']"], {'decimal': '(12)'}), "(dilation * 2, out['ts_dh'], decimal=12)\n", (4343, 4383), True, 'import numpy as np\n'), ((5756, 5775), 'numpy.zeros', 'np.zeros', (['(1000, 7)'], {}), '((1000, 7))\n', (5764, 5775), True, 'import numpy as np\n'), ((6000, 6076), 'obspy.signal.array_analysis.array_rotation_strain', 'array_rotation_strain', (['subarray', 'ts1', 'ts2', 'ts3', 'vp', 'vs', 'array_coords', 'sigmau'], {}), '(subarray, ts1, ts2, ts3, vp, vs, array_coords, sigmau)\n', (6021, 6076), False, 'from obspy.signal.array_analysis import array_rotation_strain, get_geometry\n'), ((7295, 7418), 'numpy.array', 'np.array', (['[[24.5797167, 121.4842444, 385.106], [24.5797611, 121.4842333, 384.893], [\n 24.5796694, 121.4842556, 385.106]]'], {}), '([[24.5797167, 121.4842444, 385.106], [24.5797611, 121.4842333, \n 384.893], [24.5796694, 121.4842556, 385.106]])\n', (7303, 7418), True, 'import numpy as np\n'), ((7474, 7490), 'obspy.signal.array_analysis.get_geometry', 'get_geometry', (['ll'], {}), '(ll)\n', (7486, 7490), False, 'from obspy.signal.array_analysis import array_rotation_strain, get_geometry\n'), ((7716, 7780), 'numpy.array', 'np.array', (['[[10.0, 10.0, 10.0], [0.0, 5.0, 5.0], [0.0, 0.0, 0.0]]'], {}), '([[10.0, 10.0, 10.0], [0.0, 5.0, 5.0], [0.0, 0.0, 0.0]])\n', (7724, 7780), True, 'import numpy as np\n'), ((7832, 7863), 'obspy.signal.array_analysis.get_geometry', 'get_geometry', (['ll'], {'coordsys': '"""xy"""'}), "(ll, coordsys='xy')\n", (7844, 7863), False, 'from obspy.signal.array_analysis import array_rotation_strain, get_geometry\n'), ((2745, 2759), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (2753, 2759), True, 'import numpy as np\n'), ((2876, 2890), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (2884, 2890), True, 'import numpy as np\n'), ((3007, 3021), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (3015, 3021), True, 'import numpy as np\n'), ((4593, 4607), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (4601, 4607), True, 'import numpy as np\n'), ((4725, 4739), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (4733, 4739), True, 'import numpy as np\n'), ((4857, 4871), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (4865, 4871), True, 'import numpy as np\n'), ((4989, 5003), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (4997, 5003), True, 'import numpy as np\n'), ((5121, 5135), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (5129, 5135), True, 'import numpy as np\n'), ((6159, 6173), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (6167, 6173), True, 'import numpy as np\n'), ((6290, 6304), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (6298, 6304), True, 'import numpy as np\n'), ((6693, 6707), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (6701, 6707), True, 'import numpy as np\n'), ((6825, 6839), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (6833, 6839), True, 'import numpy as np\n'), ((6957, 6971), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (6965, 6971), True, 'import numpy as np\n'), ((7089, 7103), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (7097, 7103), True, 'import numpy as np\n'), ((1698, 1740), 'numpy.linspace', 'np.linspace', (['(-30 * np.pi)', '(30 * np.pi)', '(1000)'], {}), '(-30 * np.pi, 30 * np.pi, 1000)\n', (1709, 1740), True, 'import numpy as np\n'), ((1839, 1881), 'numpy.linspace', 'np.linspace', (['(-20 * np.pi)', '(20 * np.pi)', '(1000)'], {}), '(-20 * np.pi, 20 * np.pi, 1000)\n', (1850, 1881), True, 'import numpy as np\n'), ((1980, 2022), 'numpy.linspace', 'np.linspace', (['(-10 * np.pi)', '(10 * np.pi)', '(1000)'], {}), '(-10 * np.pi, 10 * np.pi, 1000)\n', (1991, 2022), True, 'import numpy as np\n'), ((3626, 3668), 'numpy.linspace', 'np.linspace', (['(-40 * np.pi)', '(40 * np.pi)', '(1000)'], {}), '(-40 * np.pi, 40 * np.pi, 1000)\n', (3637, 3668), True, 'import numpy as np\n'), ((5697, 5739), 'numpy.linspace', 'np.linspace', (['(-10 * np.pi)', '(10 * np.pi)', '(1000)'], {}), '(-10 * np.pi, 10 * np.pi, 1000)\n', (5708, 5739), True, 'import numpy as np\n'), ((1648, 1672), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(1000)'], {}), '(-2, 2, 1000)\n', (1659, 1672), True, 'import numpy as np\n'), ((1789, 1813), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(1000)'], {}), '(-2, 2, 1000)\n', (1800, 1813), True, 'import numpy as np\n'), ((1930, 1954), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(1000)'], {}), '(-2, 2, 1000)\n', (1941, 1954), True, 'import numpy as np\n'), ((3576, 3600), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(1000)'], {}), '(-2, 2, 1000)\n', (3587, 3600), True, 'import numpy as np\n'), ((5647, 5671), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(1000)'], {}), '(-2, 2, 1000)\n', (5658, 5671), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 4 20:25:09 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
#import os
#from multiprocessing import cpu_count, Pool
#import utils
def multi_weighted_logloss(y_true, y_pred, myweight=None, based_true=True):
"""
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
# class_weights taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with <NAME>'s post https://www.kaggle.com/kyleboone
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if y_true.shape[1] > 14:
classes.append(99)
class_weight[99] = 2
if myweight is None:
myweight = np.ones(y_true.shape[1])
y_p = y_pred * myweight
# normalize
y_p /= y_p.sum(1)[:,None]
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=0, a_max=1)
# Transform to log
y_p_log = np.log(y_p)
# Get the log for ones, .values is used to drop the index of DataFrames
# Exclude class 99 for now, since there is no class99 in the training set
# we gave a special process for that class
y_log_ones = np.sum(y_true * y_p_log, axis=0)
# Get the number of positives for each class
if based_true == True:
nb_pos = y_true.sum(axis=0).astype(float)
else:
nb_pos = pd.DataFrame(y_pred).sum(axis=0).astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.nansum(y_w) / np.sum(class_arr)
return loss
def calc_gradient(f, X):
"""
calc_gradient
偏微分を行う関数
関数fを変数xの各要素で偏微分した結果をベクトルにした勾配を返す
@params
f: 対象となる関数
X: 関数fの引数のベクトル(numpy.array)
@return
gradient: 勾配(numpy.array)
"""
h = 1e-4
gradient = np.zeros_like(X)
# 各変数についての偏微分を計算する
for i in range(X.size):
store_X = X[:]
# f(x+h)
X[i] += h
f_x_plus_h = f(X)
X = store_X[:]
# f(x-h)
X[i] -= h
f_x_minus_h = f(X)
# 偏微分
gradient[i] = (f_x_plus_h - f_x_minus_h) / (2 * h)
return gradient
def gradient_descent(f, X, learning_rate, max_iter, is_print=True, verbose_eval=100):
"""
gradient_descent
最急降下法を行う関数
@params
f: 対象となる関数
X: 関数fの引数のベクトル(numpy.array)
learning_rate: 学習率
max_iter: 繰り返し回数
@return
X: 関数の出力を最小にする(であろう)引数(numpy.array)
"""
sw_break = False
score_bk = 9999
for i in range(max_iter):
X -= (learning_rate * calc_gradient(f, X))
score = f(X)
if score_bk <= score:
sw_break = True
break
score_bk = score
if is_print and i%verbose_eval==0:
print("[{:3d}] X = {}, f(X) = {:.7f}".format(i, X, score))
if is_print and sw_break:
print("[{:3d}] X = {}, f(X) = {:.7f}".format(i, X, score))
return X
def get_weight(y_true, y_pred, weight=None, eta=1, nround=100,
is_print=True, verbose_eval=50, based_true=True):
M = y_true.shape[1]
if weight is None:
weight = np.ones(M)
f = lambda X: multi_weighted_logloss(y_true, y_pred, weight, based_true=based_true)
gradient_descent(f, weight, learning_rate=eta, max_iter=nround,
is_print=is_print, verbose_eval=verbose_eval)
return weight
| [
"pandas.DataFrame",
"numpy.nansum",
"numpy.zeros_like",
"numpy.sum",
"numpy.log",
"numpy.ones",
"numpy.clip"
] | [((1101, 1133), 'numpy.clip', 'np.clip', ([], {'a': 'y_p', 'a_min': '(0)', 'a_max': '(1)'}), '(a=y_p, a_min=0, a_max=1)\n', (1108, 1133), True, 'import numpy as np\n'), ((1171, 1182), 'numpy.log', 'np.log', (['y_p'], {}), '(y_p)\n', (1177, 1182), True, 'import numpy as np\n'), ((1401, 1433), 'numpy.sum', 'np.sum', (['(y_true * y_p_log)'], {'axis': '(0)'}), '(y_true * y_p_log, axis=0)\n', (1407, 1433), True, 'import numpy as np\n'), ((2155, 2171), 'numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (2168, 2171), True, 'import numpy as np\n'), ((925, 949), 'numpy.ones', 'np.ones', (['y_true.shape[1]'], {}), '(y_true.shape[1])\n', (932, 949), True, 'import numpy as np\n'), ((1866, 1883), 'numpy.sum', 'np.sum', (['class_arr'], {}), '(class_arr)\n', (1872, 1883), True, 'import numpy as np\n'), ((3521, 3531), 'numpy.ones', 'np.ones', (['M'], {}), '(M)\n', (3528, 3531), True, 'import numpy as np\n'), ((1849, 1863), 'numpy.nansum', 'np.nansum', (['y_w'], {}), '(y_w)\n', (1858, 1863), True, 'import numpy as np\n'), ((1592, 1612), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {}), '(y_pred)\n', (1604, 1612), True, 'import pandas as pd\n')] |
'''
This code include:
1. Training DPDist model, then it can be taken for any comparing point cloud tasks (an example for training registration is in PCRNet folder)
2. Simple auto encoder based on point net (can be replaced in any comparing point cloud task, or any other AUE)
3. Training component 2 using DPDist as its loss function.
Based on PointNet++ code: https://github.com/charlesq34/pointnet2
Multi-GPU training.
Near linear scale acceleration for multi-gpus on a single machine.
Will use H5 dataset in default. If using normal, will shift to the normal dataset.
For any issue, please contact me at: https://github.com/dahliau/DPDist/issues (or direct e-mail)
This code written by:
<NAME> 2020
'''
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
import modelnet_dataset
import trimesh
parser = argparse.ArgumentParser()
parser.add_argument('--train_comp', default='dpdist', help='train dpdist graph or a task which use dpdist as its loss [default: dpdist]')
parser.add_argument('--num_gpus', type=int, default=1, help='How many gpus to use [default: 1]')
parser.add_argument('--model', default='dpdist_and_aue', help='Model name [default: pointnet2_cls_ssg]')
parser.add_argument('--log_dir', default='log/test1_', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=64, help='Point Number [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=10001, help='Epoch to run [default: 251]')
parser.add_argument('--max_epoch_aue', type=int, default=30001, help='Epoch to run [default: 251]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate_aue', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--learning_rate_dpdist', type=float, default=0.0001, help='Initial learning rate [default: 0.0001]')
##for 64 points 0.0001 mean loss: min ~ 0.02
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=300*512, help='Decay step for lr decay [default: 200*512]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--encoder', default='3dmfv', help='dpdist encoder: 3dmfv [default:3dmfv]')
parser.add_argument('--encoder_aue', default='pn', help='AUE encoder: pn [default:pn]')
parser.add_argument('--embedding_size', type=int, default=8**3, help='embedding size')
parser.add_argument('--full_fv', default='full', help='3dmfv feature vector [default: full]')
parser.add_argument('--BN', default='0', help='0')
parser.add_argument('--weight_decay', type=float, default=0.0, help='[default: 0.0]')
parser.add_argument('--K', default='5', help='[default: 5]')
parser.add_argument('--loss_type', default='l1_dist', help='[default: l1_dist]')
parser.add_argument('--implicit_net_type', default='1', help='[default: 1]')
parser.add_argument('--category', default='chair', help='Choose single class to train on [default: None]')
parser.add_argument('--sigma3dmfv', type=float, default=2.0, help='[default: 2.0]')
parser.add_argument('--add_noise', type=float, default=0.00, help='[default: 0.00]')
parser.add_argument('--opt_type', type=str, default='ours', help='training AUE using: ours/chamfer [default: ours]')
################# PARAMS ###############
add_name='' #add to log directory name
FLAGS = parser.parse_args()
train_comp = FLAGS.train_comp
if train_comp=='dpdist':
train_dpdist=True
create_task=False
train_task=False
else:
train_dpdist=False
create_task=True
train_task=True
OPT_TYPE = FLAGS.opt_type
ENCODER = FLAGS.encoder_aue #AUE encoder
GPU_INDEX=0
DATA_TYPE = 'modelnet' #modelnet/shapenetv0
NUM_DIMS=3
overlap = True #local patches from 3dmfv has overlap betwwen them
SNmlp = [1024,1024,1024] #implicit network neurons in each layer
AUGMANTATIONS=1
AUGMANTATIONS_AUE=False
if FLAGS.category:
cat = FLAGS.category
else:
cat = 'all'
sigma3dmfv = FLAGS.sigma3dmfv*0.0625
WD = FLAGS.weight_decay
BN=int(FLAGS.BN)
encoder_type = FLAGS.encoder
embedding_size = FLAGS.embedding_size
full_fv = FLAGS.full_fv
K = int(FLAGS.K)
loss_t = FLAGS.loss_type
implicit_net_type = int(FLAGS.implicit_net_type)
# TODO: add more optins in 3dmfv pooling, currently its avg or all (avg,min,max)
if full_fv =='small':
full_fv=False
else: #full:
full_fv = True
EPOCH_CNT = 0
NUM_GPUS = FLAGS.num_gpus
BATCH_SIZE = FLAGS.batch_size
assert(BATCH_SIZE % NUM_GPUS == 0)
DEVICE_BATCH_SIZE = int(BATCH_SIZE / NUM_GPUS)
NUM_POINT = FLAGS.num_point
MAX_EPOCH_AUE = FLAGS.max_epoch_aue
BASE_LEARNING_RATE = 0.0001
BASE_LEARNING_RATE_TASK = FLAGS.learning_rate_aue
BASE_LEARNING_RATE_DPDist = FLAGS.learning_rate_dpdist
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
### LOAD Network Model file ######
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
UTIL_FILE = os.path.join(ROOT_DIR, 'utils', 'dpdist_util'+'.py')
######## Create logdir ##########
#add params to log folder name
LOG_DIR = FLAGS.log_dir+'_imp_type_'+str(implicit_net_type)\
+'enc_'+encoder_type\
+'EmbS'+str(embedding_size)\
+'BN'+str(BN)+'LR'+str(BASE_LEARNING_RATE_DPDist)[2:]\
+'wd'+str(WD)[2:]\
+'sigma'+str(sigma3dmfv)[2:] \
+'K'+str(K)\
+'AUG'+str(AUGMANTATIONS)\
+'ls'+str(loss_t)\
+'noise'+str(int(FLAGS.add_noise*100))+cat\
+'ov'+str(int(overlap))+'np'+str(NUM_POINT)
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
LOAD_OLD_MODEL=False
else:
print('need to load old model')
LOAD_OLD_MODEL=True
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp %s %s' % (UTIL_FILE, LOG_DIR)) # bkp of util def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train'+OPT_TYPE+add_name+'.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
## BN params ####
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
######### LOAD DATASET ####################
DATA_PATH = os.path.join(BASE_DIR, 'data/modelnet40_normal_resampled')
TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT*2, split='train',
normal_channel=False, batch_size=BATCH_SIZE,normalize=False,class_choice=FLAGS.category)
TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT*2, split='test',
normal_channel=False, batch_size=BATCH_SIZE,normalize=False,class_choice=FLAGS.category)
####### TRAIN ###############
def train():
if train_dpdist:
g1 = tf.Graph()
with g1.as_default():
with tf.device('/cpu:0'):
pcA_pl, pcB_pl, labels_AB_pl, labels_BA_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT,NUM_DIMS=NUM_DIMS)
is_training_pl = tf.placeholder(tf.bool, shape=())
noise_pl = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, NUM_DIMS), name='add_noise')
# print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter
# for you every time it trains.
batch = tf.get_variable('batch', [],
initializer=tf.constant_initializer(0), trainable=False)
# batch_loss = tf.get_variable('batch_loss', [],
# initializer=tf.constant_initializer(0), trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Set learning rate and optimizer
learning_rate = get_learning_rate(batch,BASE_LEARNING_RATE_DPDist)
# learning_rate = tf.placeholder(tf.float32, [], name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
# -------------------------------------------
# Get model and loss on multiple GPU devices
# -------------------------------------------
# Allocating variables on CPU first will greatly accelerate multi-gpu training.
# Ref: https://github.com/kuza55/keras-extras/issues/21
# MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
MODEL.get_model(pcA_pl,pcB_pl, is_training_pl, bn_decay=bn_decay, wd=WD,
bn=BN,sig = False,
Embedding_Size=embedding_size,
pn=encoder_type,
k=K,localSNmlp = SNmlp,overlap=overlap,full_fv=full_fv,
conv_version=implicit_net_type,sigma3dmfv=sigma3dmfv,add_noise=noise_pl)
tower_grads_s = []
tower_grads_p = []
pred_gpu_AB = []
pred_gpu_BA = []
total_loss_gpu_s = []
total_loss_gpu_p = []
for i in range(NUM_GPUS):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
with tf.device('/gpu:%d'%(i)), tf.name_scope('gpu_%d'%(i)) as scope:
# Evenly split input data to each GPU
pcA_pl_batch = tf.slice(pcA_pl,
[i*DEVICE_BATCH_SIZE,0,0], [DEVICE_BATCH_SIZE,-1,-1])
pcB_pl_batch = tf.slice(pcB_pl,
[i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])
labels_AB_batch = tf.slice(labels_AB_pl,
[i * DEVICE_BATCH_SIZE, 0], [DEVICE_BATCH_SIZE, -1])
labels_BA_batch = tf.slice(labels_BA_pl,
[i * DEVICE_BATCH_SIZE, 0], [DEVICE_BATCH_SIZE, -1])
noise_pl_batch = tf.slice(noise_pl,
[i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])
pred, end_points, emb = MODEL.get_model(pcA_pl_batch,pcB_pl_batch,
is_training_pl,
bn_decay=bn_decay, wd=WD, bn=BN,sig=False,
Embedding_Size=embedding_size,
pn=encoder_type,
k=K,localSNmlp = SNmlp,overlap=overlap,full_fv=full_fv,
conv_version=implicit_net_type,sigma3dmfv=sigma3dmfv,add_noise=noise_pl_batch)
MODEL.get_loss(pred, end_points,labels_AB_batch,loss_type = loss_t)
#loss over samples: (L1 loss)
loss_samples = tf.get_collection('loss_samples', scope)
total_loss_samples = tf.add_n(loss_samples, name='total_loss_samples')
#loss over prediction: (compare clouds' output)
loss_pred = tf.get_collection('loss_pred', scope)
total_loss_pred = tf.add_n(loss_pred, name='total_loss_pred')
# print('losses:')
for l in loss_samples + [total_loss_samples]+\
loss_pred+[total_loss_pred]:
# print(l.op.name)
tf.summary.scalar(l.op.name, l)
train_vars_s = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='pc_compare')
train_vars_p = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
grads_s = optimizer.compute_gradients(total_loss_samples,train_vars_s)
grads_p = optimizer.compute_gradients(total_loss_pred,train_vars_p)
tower_grads_s.append(grads_s)
tower_grads_p.append(grads_p)
pred_gpu_AB.append(pred['pred_listAB'])
pred_gpu_BA.append(pred['pred_listBA'])
total_loss_gpu_s.append(total_loss_samples)
total_loss_gpu_p.append(total_loss_pred)
# Merge pred and losses from multiple GPUs
pred_AB = tf.concat(pred_gpu_AB, 0)
pred_BA = tf.concat(pred_gpu_BA, 0)
total_loss_s = tf.reduce_mean(total_loss_gpu_s)
total_loss_p = tf.reduce_mean(total_loss_gpu_p)
# Get training operator
grads_s = average_gradients(tower_grads_s)
grads_p = average_gradients(tower_grads_p)
train_op_s = optimizer.apply_gradients(grads_s, global_step=batch)
train_op_p = optimizer.apply_gradients(grads_p, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
RUN_DIR=0
while os.path.exists(os.path.join(LOG_DIR, 'run'+str(RUN_DIR))):
RUN_DIR+=1
os.mkdir(os.path.join(LOG_DIR, 'run'+str(RUN_DIR)))
RUN_DIR = os.path.join(LOG_DIR, 'run'+str(RUN_DIR))
train_writer = tf.summary.FileWriter(os.path.join(RUN_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(RUN_DIR, 'test'), sess.graph)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pcA_pl': pcA_pl,
'pcB_pl': pcB_pl,
'labels_AB_pl' : labels_AB_pl,
'labels_BA_pl': labels_BA_pl,
'is_training_pl': is_training_pl,
'pred': {'pred_AB':pred_AB,'pred_BA':pred_BA},
'loss_s': total_loss_s,
'loss_p': total_loss_p,
'train_op_s': train_op_s,
'train_op_p': train_op_p,
'merged': merged,
'step': batch,
'end_points': end_points,
'noise_pl':noise_pl,
}
best_acc = -1
losses = np.zeros(100)
for epoch in range(FLAGS.max_epoch):
log_string('**** EPOCH %03d ****' % (epoch))
losses[1:] = losses[:-1]
losses[0] = train_one_epoch(sess, ops, train_writer,epoch)
# Save the variables to disk.
if epoch % 10 == 0:
eval_one_epoch(sess, ops, test_writer, epoch)
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
if create_task:
g2 = tf.Graph()
with g2.as_default():
with tf.device('/gpu:' + str(GPU_INDEX)):
batch = tf.get_variable('batch', [],
initializer=tf.constant_initializer(0), trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Set learning rate and optimizer
learning_rate = get_learning_rate(batch,BASE_LEARNING_RATE_TASK)
pcC = tf.placeholder(tf.float32,[BATCH_SIZE,NUM_POINT,NUM_DIMS],'input')
is_training_pl_2 = tf.placeholder(tf.bool, shape=())
if ENCODER == 'pn':
pcC_rec = MODEL.get_model_aue_pn(pcC, is_training_pl_2, bn_decay=bn_decay, wd=WD, bn=BN)
else: #3dmfv
pcC_rec = MODEL.get_model_aue_3dmfv(pcC, is_training_pl_2, bn_decay=bn_decay, wd=WD, bn=BN)
# loss = tf.reduce_mean(tf.square(pcC-pcC_rec))
loss = chmafer_dist(pcC, pcC_rec)
if OPTIMIZER == 'momentum':
optimizer2 = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer2 = tf.train.AdamOptimizer(learning_rate)
train_opt_l2 = optimizer2.minimize(loss)
# Add ops to save and restore all the variables.
saver2 = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess2 = tf.Session(config=config)
# Init variables
init = tf.global_variables_initializer()
sess2.run(init)
ops2 = {'pcC':pcC,
'is_training_pl_2':is_training_pl_2,
'loss':loss,
'train_opt_l2':train_opt_l2,
'pcC_rec':pcC_rec,
'step': batch,
}
saver2.save(sess2, os.path.join(LOG_DIR, "model2.ckpt"))
if train_task:
from tensorflow.python.framework import meta_graph
graph = tf.Graph()
with graph.as_default():
with tf.device('/gpu:' + str(GPU_INDEX)):
batch = tf.get_variable('batch', [],
initializer=tf.constant_initializer(0), trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Set learning rate and optimizer
learning_rate = get_learning_rate(batch,BASE_LEARNING_RATE_TASK)
learning_rate_summary = tf.summary.scalar('learning_rate_aue', learning_rate)
is_training_pl_1 = tf.placeholder(tf.bool, shape=())
is_training_pl_2 = tf.placeholder(tf.bool, shape=())
x1 = tf.placeholder(tf.float32, (BATCH_SIZE, NUM_POINT,NUM_DIMS), name='input1')
x2 = tf.placeholder(tf.float32, (BATCH_SIZE, NUM_POINT,NUM_DIMS), name='input2')
x3 = tf.placeholder(tf.float32, (BATCH_SIZE, NUM_POINT,NUM_DIMS), name='input3')
#Load models from ckpts:
saver32 = tf.train.import_meta_graph(os.path.join(LOG_DIR, 'model2.ckpt.meta'),
import_scope='g2',
input_map={'input': x1,
'Placeholder':is_training_pl_2}
)
out2 = graph.get_tensor_by_name('g2/aue/output:0')
saver31 = tf.train.import_meta_graph(os.path.join(LOG_DIR, 'model.ckpt.meta'),
import_scope='g1',
input_map={'input1': out2,
'input2': x2,
'Placeholder': is_training_pl_1,
'add_noise':x3}
)
labels12 = graph.get_tensor_by_name('g1/labels12:0')
#set optimizer:
if OPTIMIZER == 'momentum':
optimizer3 = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer3 = tf.train.AdamOptimizer(learning_rate,name='Adam2')
#optimizer only on g2 variables.
#get loss:
pred_AB3 = (graph.get_tensor_by_name('g1/pc_compare/output1:0'))
pred_BA3 = (graph.get_tensor_by_name('g1/pc_compare/output2:0'))
loss_p = (tf.reduce_mean(pred_AB3[:,:,:,0]) +
tf.reduce_mean(pred_BA3[:,:,:,0]))/2.0
loss_c = chmafer_dist(x1, out2)
# We train only the AUE (g2), DPDist is already trained (g1)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g2')
# all_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
#get grads:
grads = optimizer3.compute_gradients(loss_p, train_vars)
grads_c = optimizer3.compute_gradients(loss_c, train_vars)
train_opt_pred3 = optimizer3.apply_gradients(grads, global_step=batch)
train_opt_pred_c = optimizer3.apply_gradients(grads_c, global_step=batch)
merged_old = tf.summary.merge_all()
ours_summary = tf.summary.scalar('DPDist',loss_p)
chamf_summary = tf.summary.scalar('Chamfer',loss_c)
merged = tf.summary.merge([ours_summary,chamf_summary,learning_rate_summary])
saver3 = tf.train.Saver()
#How to init all var?
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess3 = tf.InteractiveSession(config=config)
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'+OPT_TYPE+add_name), sess3.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'+OPT_TYPE+add_name), sess3.graph)
init = tf.global_variables_initializer()
###Load previuos weights
MODEL_PATH = os.path.join(LOG_DIR, 'model2.ckpt')
saver32.restore(sess3, MODEL_PATH)
#DPDist weights loader
MODEL_PATH = os.path.join(LOG_DIR, 'model.ckpt')
saver31.restore(sess3, MODEL_PATH)
initialize_uninitialized_vars(sess3)
print('models restored')
ops3 = {
'x1':x1,
'x2':x2,
'add_noise':x3,
# 'pc_Crec':pc_Crec,
'is_training_pl1':is_training_pl_1,
'is_training_pl2': is_training_pl_2,
'train_opt':train_opt_pred3,
'train_opt_c':train_opt_pred_c,
'loss':loss_p,
'loss_c':loss_c,
'step':batch,
'out2':out2,
'merged':merged,
'labels12':labels12}
for epoch in range(MAX_EPOCH_AUE):
log_string('**** EPOCH %03d ****' % (epoch))
train_one_epoch_3d_block(sess3, ops3,train_writer,epoch)
if (epoch%2)==0:
test_one_epoch_3d_block(sess3, ops3, test_writer, epoch, )
saver3.save(sess3, os.path.join(LOG_DIR, "model3"+OPT_TYPE+add_name+".ckpt"))
# Train/test/eval:
def train_one_epoch(sess, ops, train_writer,epoch):
return train_one_epoch_3d(sess, ops, train_writer, epoch)
def train_one_epoch_3d_block(sess, ops,train_writer,epoch,):
return train_one_epoch_3d_block_modelnet(sess, ops,train_writer,epoch,)
def train_one_epoch_3d_block_modelnet(sess, ops,train_writer,epoch,):
is_training = True
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((2,BATCH_SIZE, NUM_POINT, TRAIN_DATASET.num_channel()))
loss_sum = 0
loss_sum_c = 0
batch_idx = 0
total_loss_sum = 0
total_loss_sum_c = 0
total_count = 0
while TRAIN_DATASET.has_next_batch():
batch_data, batch_label = TRAIN_DATASET.next_batch(augment=AUGMANTATIONS_AUE)
batch_data = batch_data
B = batch_data.shape[0]
batch_data = np.reshape(batch_data,[B,3,2,-1,3])
# batch_data = np.split(batch_data,3,1) #2048->1024,1024 BxNX3->3xBxN/3x3
# batch_data = np.split(batch_data,2,2)
cur_batch_data[0,:B,...] = batch_data[:,0,0,:NUM_POINT]
cur_batch_data[1,:B,...] = batch_data[:,0,1,:NUM_POINT]
feed_dict={ops['x1']:cur_batch_data[0],
ops['x2']: cur_batch_data[1],
ops['is_training_pl1']:False,
ops['is_training_pl2']:is_training,
ops['add_noise']:np.zeros_like(cur_batch_data[0]),
# train_opt_pred3:True,
}
if OPT_TYPE == 'chamfer':
opt = ops['train_opt_c']
else: #ours
opt = ops['train_opt']
summary,step, _, loss_val,loss_val_c, rec_pc = sess.run([ops['merged'],ops['step'],
opt, ops['loss'],ops['loss_c'], ops['out2']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
loss_sum_c+=loss_val_c
loss_sum += loss_val
total_count += 1
total_loss_sum += loss_val
total_loss_sum_c += loss_val_c
B_STEP=10
if ((batch_idx + 1) % B_STEP) == 0:
log_string(' ---- batch: %03d ----' % (batch_idx + 1))
log_string('mean loss: %f' % (loss_sum / B_STEP))
log_string('chamf mean loss: %f' % (loss_sum_c / B_STEP))
if ((batch_idx + 1) % 200) == 0:
pc = np.squeeze(rec_pc[0]) # 1024x3
pc2 = np.squeeze(cur_batch_data[0][0])
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.scatter(pc[:,0],pc[:,1],pc[:,2])
ax2 = fig.add_subplot(122, projection='3d')
ax2.scatter(pc2[:,0],pc2[:,1],pc2[:,2])
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_zlim(-1,1)
ax2.set_xlim(-1,1)
ax2.set_ylim(-1,1)
ax2.set_zlim(-1,1)
plt.savefig(os.path.join(LOG_DIR,str(batch_idx)+str(epoch)+'temp_rec.png'))
print('added fig')
plt.close(fig)
# log_string('accuracy: %f' % (total_correct / float(total_seen)))
loss_sum = 0
loss_sum_c = 0
batch_idx += 1
log_string(' ---- epoch: %03d ----' % (epoch + 1))
log_string('DPDist mean loss: %f' % (total_loss_sum / total_count))
log_string('chamf mean loss: %f' % (total_loss_sum_c / total_count))
TRAIN_DATASET.reset()
def test_one_epoch_3d_block(sess, ops,train_writer,epoch,):
return test_one_epoch_3d_block_modelnet(sess, ops,train_writer,epoch,)
def test_one_epoch_3d_block_modelnet(sess, ops,train_writer,epoch,):
is_training = False
log_string(str(datetime.now()))
# Make sure batch data is of same size
loss_sum = 0
loss_sum_c = 0
batch_idx = 0
total_loss_sum = 0
total_loss_sum_c = 0
total_count = 0
cur_batch_data = np.zeros((2,BATCH_SIZE, NUM_POINT, TRAIN_DATASET.num_channel()))
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
B = batch_data.shape[0]
batch_data = np.reshape(batch_data,[B,3,2,-1,3])
cur_batch_data[0,:B,...] = batch_data[:,0,0,:NUM_POINT]
cur_batch_data[1,:B,...] = batch_data[:,0,1,:NUM_POINT]
feed_dict={ops['x1']:cur_batch_data[0],
ops['x2']: cur_batch_data[1],
ops['is_training_pl1']:False,
ops['is_training_pl2']:is_training,
ops['add_noise']: np.zeros_like(cur_batch_data[0]),
}
summary,step, loss_val,loss_val_c, rec_pc = sess.run([ops['merged'],ops['step'],
ops['loss'],ops['loss_c'], ops['out2']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
loss_sum_c+=loss_val_c
loss_sum += loss_val
total_count += 1
total_loss_sum += loss_val
total_loss_sum_c += loss_val_c
B_STEP=10
if ((batch_idx + 1) % B_STEP) == 0:
log_string(' ---- batch: %03d ----' % (batch_idx + 1))
log_string('mean loss: %f' % (loss_sum / B_STEP))
log_string('chamf mean loss: %f' % (loss_sum_c / B_STEP))
if ((batch_idx + 1) % 20) == 0:
pc = np.squeeze(rec_pc[0]) # 1024x3
pc2 = np.squeeze(batch_data[0][0])
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.scatter(pc[:,0],pc[:,1],pc[:,2])
ax2 = fig.add_subplot(122, projection='3d')
ax2.scatter(pc2[:,0],pc2[:,1],pc2[:,2])
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_zlim(-1,1)
ax2.set_xlim(-1,1)
ax2.set_ylim(-1,1)
ax2.set_zlim(-1,1)
plt.savefig(os.path.join(LOG_DIR,'test'+str(batch_idx)+str(epoch)+'temp_rec.png'))
print('added fig')
plt.close(fig)
# log_string('accuracy: %f' % (total_correct / float(total_seen)))
loss_sum = 0
loss_sum_c = 0
batch_idx += 1
log_string(' ---- epoch: %03d ----' % (epoch + 1))
log_string('mean loss: %f' % (total_loss_sum / total_count))
log_string('chamf mean loss: %f' % (total_loss_sum_c / total_count))
TEST_DATASET.reset()
def train_one_epoch_3d_aue(sess, ops,epoch):
return train_one_epoch_3d_aue_modelnet(sess, ops,epoch)
def train_one_epoch_3d_aue_modelnet(sess, ops,epoch):
is_training = True
log_string(str(datetime.now()))
# Make sure batch data is of same size
loss_sum = 0
batch_idx = 0
total_loss_sum = 0
total_count = 0
while TRAIN_DATASET.has_next_batch():
batch_data, batch_label = TRAIN_DATASET.next_batch(augment=False)
batch_data = batch_data
batch_data = np.split(batch_data,2,1) #2048->1024,1024
cur_batch_data = batch_data[0][:,:NUM_POINT]
feed_dict = {ops['pcC']: cur_batch_data,
ops['is_training_pl_2']: is_training,}
step, _, loss_val, rec_pc = sess.run([ops['step'],
ops['train_opt_l2'], ops['loss'], ops['pcC_rec']], feed_dict=feed_dict)
# train_writer.add_summary(summary, step)
loss_sum += loss_val
total_count += 1
total_loss_sum += loss_val
B_STEP=10
if ((batch_idx + 1) % B_STEP) == 0:
log_string(' ---- batch: %03d ----' % (batch_idx + 1))
log_string('mean loss: %f' % (loss_sum / B_STEP))
# log_string('accuracy: %f' % (total_correct / float(total_seen)))
loss_sum = 0
if ((batch_idx + 1) % 500) == 0:
pc = np.squeeze(rec_pc[0]) # 1024x3
pc2 = np.squeeze(cur_batch_data[0])
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.scatter(pc[:,0],pc[:,1],pc[:,2])
ax2 = fig.add_subplot(122, projection='3d')
ax2.scatter(pc2[:,0],pc2[:,1],pc2[:,2])
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_zlim(-1,1)
ax2.set_xlim(-1,1)
ax2.set_ylim(-1,1)
ax2.set_zlim(-1,1)
plt.savefig(os.path.join(LOG_DIR,str(batch_idx)+'temp_rec.png'))
print('added fig')
plt.close(fig)
batch_idx += 1
log_string(' ---- epoch: %03d ----' % (epoch + 1))
log_string('mean loss: %f' % (total_loss_sum / total_count))
TRAIN_DATASET.reset()
def train_one_epoch_3d(sess, ops, train_writer,epoch):
is_training = True
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((3,BATCH_SIZE, NUM_POINT, TRAIN_DATASET.num_channel()))
cur_batch_label = np.zeros((BATCH_SIZE,NUM_POINT), dtype=np.int32)
cur_batch_label_AB = np.zeros((BATCH_SIZE,NUM_POINT), dtype=np.float32)
cur_batch_label_BA = -np.ones((BATCH_SIZE,NUM_POINT), dtype=np.float32)
loss_sum = 0
batch_idx = 0
total_loss_sum = 0
total_count = 0
H_NUM_POINT = int(NUM_POINT / 2) # training points: half rely on the surface, half off the surface.
split_off_surface = 0.5 # how many points from close to surface set,and the rest from the unit cube
while TRAIN_DATASET.has_next_batch():
batch_data, batch_label = TRAIN_DATASET.next_batch(augment=AUGMANTATIONS)
# dataset include 3 * 10k points on surface, close to surface, random from the unit cube.
batch_data = np.split(batch_data, 3, 1) # surface, close, far
batch_surface = np.split(batch_data[0], 2, 1) # take two point clouds from the same surface S_A,S_B
bsize = batch_data[0].shape[0]
cur_batch_data[0, 0:bsize, ...] = batch_surface[0][:, :NUM_POINT]
# half on surface half off-surface
batch_label = np.split(batch_label, 2, 1) # GT distances of close and far points
cur_batch_label_AB[:bsize, :] = np.concatenate(
[np.zeros([bsize, H_NUM_POINT]), batch_label[0][:, :int(H_NUM_POINT * split_off_surface)],
batch_label[1][:, int(H_NUM_POINT * split_off_surface):H_NUM_POINT]], 1)
# print(np.max(cur_batch_label_AB))
batch_off = np.concatenate([batch_data[1][:, :int(H_NUM_POINT * split_off_surface)],
batch_data[2][:, int(H_NUM_POINT * split_off_surface):H_NUM_POINT]], 1)
cur_batch_data[1, :bsize, ...] = np.concatenate([batch_surface[1][:, :H_NUM_POINT], batch_off], 1)
if FLAGS.add_noise > 0.0:
add_noise = np.random.randn(BATCH_SIZE, NUM_POINT, NUM_DIMS) * FLAGS.add_noise
else:
add_noise = np.zeros([BATCH_SIZE, NUM_POINT, NUM_DIMS], 'float32')
feed_dict = {ops['pcA_pl']: cur_batch_data[0],
ops['pcB_pl']: cur_batch_data[1],
ops['labels_AB_pl']: cur_batch_label_AB,
ops['labels_BA_pl']: cur_batch_label_BA,
ops['is_training_pl']: is_training,
ops['noise_pl']:add_noise}
#### For debug: make sure the two point clouds has different samples
# from scipy.spatial.distance import cdist
# for i in range(len(cur_batch_data[0])):
# dist = cdist(cur_batch_data[0][i], cur_batch_data[1][i]).min(1)
# print('distances',dist.max(),dist.min())
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op_s'], ops['loss_s'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
loss_sum += loss_val
total_count += 1
total_loss_sum += loss_val
B_STEP=10
if ((batch_idx + 1) % B_STEP) == 0:
log_string(' ---- batch: %03d ----' % (batch_idx + 1))
log_string('mean loss: %f' % (loss_sum / B_STEP))
# log_string('accuracy: %f' % (total_correct / float(total_seen)))
loss_sum = 0
batch_idx += 1
log_string(' ---- epoch: %03d ----' % (epoch + 1))
log_string('mean loss: %f' % (total_loss_sum / total_count))
TRAIN_DATASET.reset()
def eval_one_epoch(sess, ops, test_writer,epoch):
return eval_one_epoch_3d(sess, ops, test_writer, epoch)
def eval_one_epoch_3d(sess, ops, test_writer, epoch):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((3,BATCH_SIZE, NUM_POINT, TRAIN_DATASET.num_channel()))
cur_batch_label_AB = np.zeros((BATCH_SIZE,NUM_POINT), dtype=np.float32)
cur_batch_label_BA = np.zeros((BATCH_SIZE,NUM_POINT), dtype=np.float32) #Remain empty (does not effect training)
loss_sum = 0
batch_idx = 0
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))
H_NUM_POINT = int(NUM_POINT / 2) # training points: half rely on the surface, half off the surface.
split_off_surface = 0.5 # how many points from close to surface set,and the rest from the unit cube
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
#dataset include 3 * 10k points on surface, close to surface, random from the unit cube.
batch_data = np.split(batch_data, 3, 1) # surface, close, far
batch_surface = np.split(batch_data[0], 2, 1) # take two point clouds from the same surface S_A,S_B
bsize = batch_data[0].shape[0]
cur_batch_data[0, 0:bsize, ...] = batch_surface[0][:, :NUM_POINT]
#half on surface half off-surface
batch_label = np.split(batch_label,2,1) #GT distances of close and far points
cur_batch_label_AB[:bsize, :] = np.concatenate([np.zeros([bsize,H_NUM_POINT]),batch_label[0][:,:int(H_NUM_POINT*split_off_surface)],batch_label[1][:,int(H_NUM_POINT*split_off_surface):H_NUM_POINT]],1)
# print(np.max(cur_batch_label_AB))
batch_off = np.concatenate([batch_data[1][:,:int(H_NUM_POINT*split_off_surface)],batch_data[2][:,int(H_NUM_POINT*split_off_surface):H_NUM_POINT]],1)
cur_batch_data[1,:bsize,...] = np.concatenate([batch_surface[1][:,:H_NUM_POINT],batch_off],1)
if FLAGS.add_noise > 0.0:
add_noise = np.random.randn(BATCH_SIZE, NUM_POINT, NUM_DIMS) * FLAGS.add_noise
else:
add_noise = np.zeros([BATCH_SIZE, NUM_POINT, NUM_DIMS],'float32')
feed_dict = {ops['pcA_pl']: cur_batch_data[0],
ops['pcB_pl']: cur_batch_data[1],
ops['labels_AB_pl']: cur_batch_label_AB,
ops['labels_BA_pl']: cur_batch_label_BA,
ops['is_training_pl']: is_training,
ops['noise_pl']:add_noise,
}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss_s'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
loss_sum += loss_val
batch_idx += 1
log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
EPOCH_CNT += 1
EPOCH_CNT += 1
TEST_DATASET.reset()
return 1#total_correct / float(total_seen)
# Extra Functions
def get_batch(dataset, idxs, start_idx, end_idx,shuffle_pts=True):
bsize = end_idx-start_idx
batch_data = np.zeros((bsize, 2048, 3))
batch_label = np.zeros((bsize, 2048), dtype=np.int32)
idx = np.arange(2048)
for i in range(bsize):
ps,seg = dataset[idxs[i+start_idx]]
if shuffle_pts:
np.random.shuffle(idx)
ps = ps[idx]
batch_data[i,...] = ps
batch_label[i,:] = seg
return batch_data, batch_label
def pairwise_diff(x, y,same_cloud=False):
print('x',x)
print('y',y)
size_x = tf.shape(x)[1]
size_y = tf.shape(y)[1]
xx = tf.expand_dims(x, -1)
xx = tf.tile(xx, tf.stack([1, 1, 1, size_y]))
yy = tf.expand_dims(y, -1)
yy = tf.tile(yy, tf.stack([1, 1, 1, size_x]))
yy = tf.transpose(yy, perm=[0, 3, 2, 1])
diff = tf.subtract(xx, yy)
square_diff = tf.square(diff)
square_dist = tf.reduce_sum(square_diff, axis=2)
print(square_dist.shape)
if same_cloud:
square_dist = square_dist + tf.cast(tf.tile(tf.expand_dims(np.eye(square_dist.shape[1])*10,0),[square_dist.shape[0],1,1]),tf.float32)
print(square_dist.shape)
return square_dist
def chmafer_dist(pc, rec_pc):
s1_s2 = tf.reduce_mean(tf.reduce_min(pairwise_diff(rec_pc, pc), axis=2))
s2_s1 = tf.reduce_mean(tf.reduce_min(pairwise_diff(pc, rec_pc), axis=2))
loss = (s1_s2 + s2_s1) / 2.0
return loss
def initialize_uninitialized_vars(sess):
from itertools import compress
global_vars = tf.global_variables()
is_not_initialized = sess.run([~(tf.is_variable_initialized(var)) \
for var in global_vars])
not_initialized_vars = list(compress(global_vars, is_not_initialized))
if len(not_initialized_vars):
print('init not_initialized')
print(not_initialized_vars)
sess.run(tf.variables_initializer(not_initialized_vars))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
sys.stdout.flush()
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
From tensorflow tutorial: cifar10/cifar10_multi_gpu_train.py
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
# print('average_gradients:')
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
#for g, _ in grad_and_vars:
for g, v in grad_and_vars:
# print(g)
# print(v)
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def get_learning_rate(batch, set_base_lr = None):
if set_base_lr is not None:
base_lr = set_base_lr
else:
base_lr = BASE_LEARNING_RATE
learning_rate = tf.train.exponential_decay(
base_lr, # Base learning rate.
batch, # * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.0000001) # CLIP THE LEARNING RATE!
print('learning rate:')
print(learning_rate)
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch,#*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
| [
"os.mkdir",
"tensorflow.reduce_sum",
"argparse.ArgumentParser",
"tensorflow.get_collection",
"tensorflow.maximum",
"tensorflow.variables_initializer",
"tensorflow.constant_initializer",
"tensorflow.get_variable_scope",
"numpy.ones",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"num... | [((1029, 1054), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (1044, 1054), False, 'import sys\n'), ((1225, 1250), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1248, 1250), False, 'import argparse\n'), ((5591, 5627), 'importlib.import_module', 'importlib.import_module', (['FLAGS.model'], {}), '(FLAGS.model)\n', (5614, 5627), False, 'import importlib\n'), ((5666, 5719), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""models"""', "(FLAGS.model + '.py')"], {}), "(ROOT_DIR, 'models', FLAGS.model + '.py')\n", (5678, 5719), False, 'import os\n'), ((5731, 5785), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""utils"""', "('dpdist_util' + '.py')"], {}), "(ROOT_DIR, 'utils', 'dpdist_util' + '.py')\n", (5743, 5785), False, 'import os\n'), ((6488, 6533), 'os.system', 'os.system', (["('cp %s %s' % (MODEL_FILE, LOG_DIR))"], {}), "('cp %s %s' % (MODEL_FILE, LOG_DIR))\n", (6497, 6533), False, 'import os\n'), ((6554, 6598), 'os.system', 'os.system', (["('cp %s %s' % (UTIL_FILE, LOG_DIR))"], {}), "('cp %s %s' % (UTIL_FILE, LOG_DIR))\n", (6563, 6598), False, 'import os\n'), ((6620, 6657), 'os.system', 'os.system', (["('cp train.py %s' % LOG_DIR)"], {}), "('cp train.py %s' % LOG_DIR)\n", (6629, 6657), False, 'import os\n'), ((6947, 6967), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (6965, 6967), False, 'import socket\n'), ((7028, 7086), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""data/modelnet40_normal_resampled"""'], {}), "(BASE_DIR, 'data/modelnet40_normal_resampled')\n", (7040, 7086), False, 'import os\n'), ((7104, 7290), 'modelnet_dataset.ModelNetDataset', 'modelnet_dataset.ModelNetDataset', ([], {'root': 'DATA_PATH', 'npoints': '(NUM_POINT * 2)', 'split': '"""train"""', 'normal_channel': '(False)', 'batch_size': 'BATCH_SIZE', 'normalize': '(False)', 'class_choice': 'FLAGS.category'}), "(root=DATA_PATH, npoints=NUM_POINT * 2,\n split='train', normal_channel=False, batch_size=BATCH_SIZE, normalize=\n False, class_choice=FLAGS.category)\n", (7136, 7290), False, 'import modelnet_dataset\n'), ((7344, 7529), 'modelnet_dataset.ModelNetDataset', 'modelnet_dataset.ModelNetDataset', ([], {'root': 'DATA_PATH', 'npoints': '(NUM_POINT * 2)', 'split': '"""test"""', 'normal_channel': '(False)', 'batch_size': 'BATCH_SIZE', 'normalize': '(False)', 'class_choice': 'FLAGS.category'}), "(root=DATA_PATH, npoints=NUM_POINT * 2,\n split='test', normal_channel=False, batch_size=BATCH_SIZE, normalize=\n False, class_choice=FLAGS.category)\n", (7376, 7529), False, 'import modelnet_dataset\n'), ((980, 1005), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (995, 1005), False, 'import os\n'), ((1072, 1104), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""models"""'], {}), "(ROOT_DIR, 'models')\n", (1084, 1104), False, 'import os\n'), ((1123, 1154), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""utils"""'], {}), "(ROOT_DIR, 'utils')\n", (1135, 1154), False, 'import os\n'), ((6342, 6365), 'os.path.exists', 'os.path.exists', (['LOG_DIR'], {}), '(LOG_DIR)\n', (6356, 6365), False, 'import os\n'), ((6372, 6389), 'os.mkdir', 'os.mkdir', (['LOG_DIR'], {}), '(LOG_DIR)\n', (6380, 6389), False, 'import os\n'), ((6702, 6767), 'os.path.join', 'os.path.join', (['LOG_DIR', "('log_train' + OPT_TYPE + add_name + '.txt')"], {}), "(LOG_DIR, 'log_train' + OPT_TYPE + add_name + '.txt')\n", (6714, 6767), False, 'import os\n'), ((33998, 34047), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, NUM_POINT)'], {'dtype': 'np.int32'}), '((BATCH_SIZE, NUM_POINT), dtype=np.int32)\n', (34006, 34047), True, 'import numpy as np\n'), ((34073, 34124), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, NUM_POINT)'], {'dtype': 'np.float32'}), '((BATCH_SIZE, NUM_POINT), dtype=np.float32)\n', (34081, 34124), True, 'import numpy as np\n'), ((37911, 37962), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, NUM_POINT)'], {'dtype': 'np.float32'}), '((BATCH_SIZE, NUM_POINT), dtype=np.float32)\n', (37919, 37962), True, 'import numpy as np\n'), ((37988, 38039), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, NUM_POINT)'], {'dtype': 'np.float32'}), '((BATCH_SIZE, NUM_POINT), dtype=np.float32)\n', (37996, 38039), True, 'import numpy as np\n'), ((40825, 40851), 'numpy.zeros', 'np.zeros', (['(bsize, 2048, 3)'], {}), '((bsize, 2048, 3))\n', (40833, 40851), True, 'import numpy as np\n'), ((40871, 40910), 'numpy.zeros', 'np.zeros', (['(bsize, 2048)'], {'dtype': 'np.int32'}), '((bsize, 2048), dtype=np.int32)\n', (40879, 40910), True, 'import numpy as np\n'), ((40922, 40937), 'numpy.arange', 'np.arange', (['(2048)'], {}), '(2048)\n', (40931, 40937), True, 'import numpy as np\n'), ((41347, 41368), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(-1)'], {}), '(x, -1)\n', (41361, 41368), True, 'import tensorflow as tf\n'), ((41432, 41453), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(-1)'], {}), '(y, -1)\n', (41446, 41453), True, 'import tensorflow as tf\n'), ((41515, 41550), 'tensorflow.transpose', 'tf.transpose', (['yy'], {'perm': '[0, 3, 2, 1]'}), '(yy, perm=[0, 3, 2, 1])\n', (41527, 41550), True, 'import tensorflow as tf\n'), ((41565, 41584), 'tensorflow.subtract', 'tf.subtract', (['xx', 'yy'], {}), '(xx, yy)\n', (41576, 41584), True, 'import tensorflow as tf\n'), ((41604, 41619), 'tensorflow.square', 'tf.square', (['diff'], {}), '(diff)\n', (41613, 41619), True, 'import tensorflow as tf\n'), ((41639, 41673), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['square_diff'], {'axis': '(2)'}), '(square_diff, axis=2)\n', (41652, 41673), True, 'import tensorflow as tf\n'), ((42260, 42281), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (42279, 42281), True, 'import tensorflow as tf\n'), ((42780, 42798), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (42796, 42798), False, 'import sys\n'), ((44548, 44634), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['base_lr', 'batch', 'DECAY_STEP', 'DECAY_RATE'], {'staircase': '(True)'}), '(base_lr, batch, DECAY_STEP, DECAY_RATE,\n staircase=True)\n', (44574, 44634), True, 'import tensorflow as tf\n'), ((44898, 44930), 'tensorflow.maximum', 'tf.maximum', (['learning_rate', '(1e-07)'], {}), '(learning_rate, 1e-07)\n', (44908, 44930), True, 'import tensorflow as tf\n'), ((45089, 45199), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['BN_INIT_DECAY', 'batch', 'BN_DECAY_DECAY_STEP', 'BN_DECAY_DECAY_RATE'], {'staircase': '(True)'}), '(BN_INIT_DECAY, batch, BN_DECAY_DECAY_STEP,\n BN_DECAY_DECAY_RATE, staircase=True)\n', (45115, 45199), True, 'import tensorflow as tf\n'), ((45341, 45383), 'tensorflow.minimum', 'tf.minimum', (['BN_DECAY_CLIP', '(1 - bn_momentum)'], {}), '(BN_DECAY_CLIP, 1 - bn_momentum)\n', (45351, 45383), True, 'import tensorflow as tf\n'), ((7655, 7665), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7663, 7665), True, 'import tensorflow as tf\n'), ((16894, 16904), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (16902, 16904), True, 'import tensorflow as tf\n'), ((19173, 19183), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (19181, 19183), True, 'import tensorflow as tf\n'), ((25803, 25843), 'numpy.reshape', 'np.reshape', (['batch_data', '[B, 3, 2, -1, 3]'], {}), '(batch_data, [B, 3, 2, -1, 3])\n', (25813, 25843), True, 'import numpy as np\n'), ((29127, 29167), 'numpy.reshape', 'np.reshape', (['batch_data', '[B, 3, 2, -1, 3]'], {}), '(batch_data, [B, 3, 2, -1, 3])\n', (29137, 29167), True, 'import numpy as np\n'), ((31957, 31983), 'numpy.split', 'np.split', (['batch_data', '(2)', '(1)'], {}), '(batch_data, 2, 1)\n', (31965, 31983), True, 'import numpy as np\n'), ((34151, 34201), 'numpy.ones', 'np.ones', (['(BATCH_SIZE, NUM_POINT)'], {'dtype': 'np.float32'}), '((BATCH_SIZE, NUM_POINT), dtype=np.float32)\n', (34158, 34201), True, 'import numpy as np\n'), ((34746, 34772), 'numpy.split', 'np.split', (['batch_data', '(3)', '(1)'], {}), '(batch_data, 3, 1)\n', (34754, 34772), True, 'import numpy as np\n'), ((34821, 34850), 'numpy.split', 'np.split', (['batch_data[0]', '(2)', '(1)'], {}), '(batch_data[0], 2, 1)\n', (34829, 34850), True, 'import numpy as np\n'), ((35090, 35117), 'numpy.split', 'np.split', (['batch_label', '(2)', '(1)'], {}), '(batch_label, 2, 1)\n', (35098, 35117), True, 'import numpy as np\n'), ((35698, 35763), 'numpy.concatenate', 'np.concatenate', (['[batch_surface[1][:, :H_NUM_POINT], batch_off]', '(1)'], {}), '([batch_surface[1][:, :H_NUM_POINT], batch_off], 1)\n', (35712, 35763), True, 'import numpy as np\n'), ((38678, 38704), 'numpy.split', 'np.split', (['batch_data', '(3)', '(1)'], {}), '(batch_data, 3, 1)\n', (38686, 38704), True, 'import numpy as np\n'), ((38753, 38782), 'numpy.split', 'np.split', (['batch_data[0]', '(2)', '(1)'], {}), '(batch_data[0], 2, 1)\n', (38761, 38782), True, 'import numpy as np\n'), ((39021, 39048), 'numpy.split', 'np.split', (['batch_label', '(2)', '(1)'], {}), '(batch_label, 2, 1)\n', (39029, 39048), True, 'import numpy as np\n'), ((39540, 39605), 'numpy.concatenate', 'np.concatenate', (['[batch_surface[1][:, :H_NUM_POINT], batch_off]', '(1)'], {}), '([batch_surface[1][:, :H_NUM_POINT], batch_off], 1)\n', (39554, 39605), True, 'import numpy as np\n'), ((41293, 41304), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (41301, 41304), True, 'import tensorflow as tf\n'), ((41322, 41333), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (41330, 41333), True, 'import tensorflow as tf\n'), ((41391, 41418), 'tensorflow.stack', 'tf.stack', (['[1, 1, 1, size_y]'], {}), '([1, 1, 1, size_y])\n', (41399, 41418), True, 'import tensorflow as tf\n'), ((41476, 41503), 'tensorflow.stack', 'tf.stack', (['[1, 1, 1, size_x]'], {}), '([1, 1, 1, size_x])\n', (41484, 41503), True, 'import tensorflow as tf\n'), ((42449, 42490), 'itertools.compress', 'compress', (['global_vars', 'is_not_initialized'], {}), '(global_vars, is_not_initialized)\n', (42457, 42490), False, 'from itertools import compress\n'), ((43997, 44028), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'grads'}), '(axis=0, values=grads)\n', (44006, 44028), True, 'import tensorflow as tf\n'), ((44041, 44064), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (44055, 44064), True, 'import tensorflow as tf\n'), ((14661, 14677), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (14675, 14677), True, 'import tensorflow as tf\n'), ((14734, 14750), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (14748, 14750), True, 'import tensorflow as tf\n'), ((14920, 14945), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (14930, 14945), True, 'import tensorflow as tf\n'), ((15005, 15027), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (15025, 15027), True, 'import tensorflow as tf\n'), ((15527, 15560), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15558, 15560), True, 'import tensorflow as tf\n'), ((16302, 16315), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (16310, 16315), True, 'import numpy as np\n'), ((18334, 18350), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (18348, 18350), True, 'import tensorflow as tf\n'), ((18407, 18423), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (18421, 18423), True, 'import tensorflow as tf\n'), ((18594, 18619), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (18604, 18619), True, 'import tensorflow as tf\n'), ((18670, 18703), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (18701, 18703), True, 'import tensorflow as tf\n'), ((22695, 22717), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (22715, 22717), True, 'import tensorflow as tf\n'), ((22746, 22781), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""DPDist"""', 'loss_p'], {}), "('DPDist', loss_p)\n", (22763, 22781), True, 'import tensorflow as tf\n'), ((22810, 22846), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Chamfer"""', 'loss_c'], {}), "('Chamfer', loss_c)\n", (22827, 22846), True, 'import tensorflow as tf\n'), ((22870, 22940), 'tensorflow.summary.merge', 'tf.summary.merge', (['[ours_summary, chamf_summary, learning_rate_summary]'], {}), '([ours_summary, chamf_summary, learning_rate_summary])\n', (22886, 22940), True, 'import tensorflow as tf\n'), ((22963, 22979), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (22977, 22979), True, 'import tensorflow as tf\n'), ((23071, 23087), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (23085, 23087), True, 'import tensorflow as tf\n'), ((23258, 23294), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'config'}), '(config=config)\n', (23279, 23294), True, 'import tensorflow as tf\n'), ((23541, 23574), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (23572, 23574), True, 'import tensorflow as tf\n'), ((23639, 23675), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""model2.ckpt"""'], {}), "(LOG_DIR, 'model2.ckpt')\n", (23651, 23675), False, 'import os\n'), ((23786, 23821), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""model.ckpt"""'], {}), "(LOG_DIR, 'model.ckpt')\n", (23798, 23821), False, 'import os\n'), ((25305, 25319), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25317, 25319), False, 'from datetime import datetime\n'), ((26347, 26379), 'numpy.zeros_like', 'np.zeros_like', (['cur_batch_data[0]'], {}), '(cur_batch_data[0])\n', (26360, 26379), True, 'import numpy as np\n'), ((28674, 28688), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28686, 28688), False, 'from datetime import datetime\n'), ((29540, 29572), 'numpy.zeros_like', 'np.zeros_like', (['cur_batch_data[0]'], {}), '(cur_batch_data[0])\n', (29553, 29572), True, 'import numpy as np\n'), ((31639, 31653), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (31651, 31653), False, 'from datetime import datetime\n'), ((33825, 33839), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (33837, 33839), False, 'from datetime import datetime\n'), ((35933, 35987), 'numpy.zeros', 'np.zeros', (['[BATCH_SIZE, NUM_POINT, NUM_DIMS]', '"""float32"""'], {}), "([BATCH_SIZE, NUM_POINT, NUM_DIMS], 'float32')\n", (35941, 35987), True, 'import numpy as np\n'), ((37735, 37749), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (37747, 37749), False, 'from datetime import datetime\n'), ((38143, 38157), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (38155, 38157), False, 'from datetime import datetime\n'), ((39772, 39826), 'numpy.zeros', 'np.zeros', (['[BATCH_SIZE, NUM_POINT, NUM_DIMS]', '"""float32"""'], {}), "([BATCH_SIZE, NUM_POINT, NUM_DIMS], 'float32')\n", (39780, 39826), True, 'import numpy as np\n'), ((41049, 41071), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (41066, 41071), True, 'import numpy as np\n'), ((42623, 42669), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['not_initialized_vars'], {}), '(not_initialized_vars)\n', (42647, 42669), True, 'import tensorflow as tf\n'), ((43812, 43832), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (43826, 43832), True, 'import tensorflow as tf\n'), ((7715, 7734), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7724, 7734), True, 'import tensorflow as tf\n'), ((7902, 7935), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()'}), '(tf.bool, shape=())\n', (7916, 7935), True, 'import tensorflow as tf\n'), ((7964, 8054), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(BATCH_SIZE, NUM_POINT, NUM_DIMS)', 'name': '"""add_noise"""'}), "(tf.float32, shape=(BATCH_SIZE, NUM_POINT, NUM_DIMS), name=\n 'add_noise')\n", (7978, 8054), True, 'import tensorflow as tf\n'), ((8643, 8682), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""bn_decay"""', 'bn_decay'], {}), "('bn_decay', bn_decay)\n", (8660, 8682), True, 'import tensorflow as tf\n'), ((8927, 8976), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'learning_rate'], {}), "('learning_rate', learning_rate)\n", (8944, 8976), True, 'import tensorflow as tf\n'), ((14032, 14057), 'tensorflow.concat', 'tf.concat', (['pred_gpu_AB', '(0)'], {}), '(pred_gpu_AB, 0)\n', (14041, 14057), True, 'import tensorflow as tf\n'), ((14085, 14110), 'tensorflow.concat', 'tf.concat', (['pred_gpu_BA', '(0)'], {}), '(pred_gpu_BA, 0)\n', (14094, 14110), True, 'import tensorflow as tf\n'), ((14145, 14177), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['total_loss_gpu_s'], {}), '(total_loss_gpu_s)\n', (14159, 14177), True, 'import tensorflow as tf\n'), ((14210, 14242), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['total_loss_gpu_p'], {}), '(total_loss_gpu_p)\n', (14224, 14242), True, 'import tensorflow as tf\n'), ((15339, 15369), 'os.path.join', 'os.path.join', (['RUN_DIR', '"""train"""'], {}), "(RUN_DIR, 'train')\n", (15351, 15369), False, 'import os\n'), ((15432, 15461), 'os.path.join', 'os.path.join', (['RUN_DIR', '"""test"""'], {}), "(RUN_DIR, 'test')\n", (15444, 15461), False, 'import os\n'), ((17190, 17229), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""bn_decay"""', 'bn_decay'], {}), "('bn_decay', bn_decay)\n", (17207, 17229), True, 'import tensorflow as tf\n'), ((17386, 17456), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[BATCH_SIZE, NUM_POINT, NUM_DIMS]', '"""input"""'], {}), "(tf.float32, [BATCH_SIZE, NUM_POINT, NUM_DIMS], 'input')\n", (17400, 17456), True, 'import tensorflow as tf\n'), ((17489, 17522), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()'}), '(tf.bool, shape=())\n', (17503, 17522), True, 'import tensorflow as tf\n'), ((19038, 19074), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""model2.ckpt"""'], {}), "(LOG_DIR, 'model2.ckpt')\n", (19050, 19074), False, 'import os\n'), ((19470, 19509), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""bn_decay"""', 'bn_decay'], {}), "('bn_decay', bn_decay)\n", (19487, 19509), True, 'import tensorflow as tf\n'), ((19684, 19737), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate_aue"""', 'learning_rate'], {}), "('learning_rate_aue', learning_rate)\n", (19701, 19737), True, 'import tensorflow as tf\n'), ((19776, 19809), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()'}), '(tf.bool, shape=())\n', (19790, 19809), True, 'import tensorflow as tf\n'), ((19846, 19879), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()'}), '(tf.bool, shape=())\n', (19860, 19879), True, 'import tensorflow as tf\n'), ((19904, 19980), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(BATCH_SIZE, NUM_POINT, NUM_DIMS)'], {'name': '"""input1"""'}), "(tf.float32, (BATCH_SIZE, NUM_POINT, NUM_DIMS), name='input1')\n", (19918, 19980), True, 'import tensorflow as tf\n'), ((20002, 20078), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(BATCH_SIZE, NUM_POINT, NUM_DIMS)'], {'name': '"""input2"""'}), "(tf.float32, (BATCH_SIZE, NUM_POINT, NUM_DIMS), name='input2')\n", (20016, 20078), True, 'import tensorflow as tf\n'), ((20100, 20176), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(BATCH_SIZE, NUM_POINT, NUM_DIMS)'], {'name': '"""input3"""'}), "(tf.float32, (BATCH_SIZE, NUM_POINT, NUM_DIMS), name='input3')\n", (20114, 20176), True, 'import tensorflow as tf\n'), ((22157, 22220), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""g2"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g2')\n", (22174, 22220), True, 'import tensorflow as tf\n'), ((23345, 23397), 'os.path.join', 'os.path.join', (['LOG_DIR', "('train' + OPT_TYPE + add_name)"], {}), "(LOG_DIR, 'train' + OPT_TYPE + add_name)\n", (23357, 23397), False, 'import os\n'), ((23457, 23508), 'os.path.join', 'os.path.join', (['LOG_DIR', "('test' + OPT_TYPE + add_name)"], {}), "(LOG_DIR, 'test' + OPT_TYPE + add_name)\n", (23469, 23508), False, 'import os\n'), ((27299, 27320), 'numpy.squeeze', 'np.squeeze', (['rec_pc[0]'], {}), '(rec_pc[0])\n', (27309, 27320), True, 'import numpy as np\n'), ((27354, 27386), 'numpy.squeeze', 'np.squeeze', (['cur_batch_data[0][0]'], {}), '(cur_batch_data[0][0])\n', (27364, 27386), True, 'import numpy as np\n'), ((30315, 30336), 'numpy.squeeze', 'np.squeeze', (['rec_pc[0]'], {}), '(rec_pc[0])\n', (30325, 30336), True, 'import numpy as np\n'), ((30370, 30398), 'numpy.squeeze', 'np.squeeze', (['batch_data[0][0]'], {}), '(batch_data[0][0])\n', (30380, 30398), True, 'import numpy as np\n'), ((32833, 32854), 'numpy.squeeze', 'np.squeeze', (['rec_pc[0]'], {}), '(rec_pc[0])\n', (32843, 32854), True, 'import numpy as np\n'), ((32888, 32917), 'numpy.squeeze', 'np.squeeze', (['cur_batch_data[0]'], {}), '(cur_batch_data[0])\n', (32898, 32917), True, 'import numpy as np\n'), ((35229, 35259), 'numpy.zeros', 'np.zeros', (['[bsize, H_NUM_POINT]'], {}), '([bsize, H_NUM_POINT])\n', (35237, 35259), True, 'import numpy as np\n'), ((35826, 35874), 'numpy.random.randn', 'np.random.randn', (['BATCH_SIZE', 'NUM_POINT', 'NUM_DIMS'], {}), '(BATCH_SIZE, NUM_POINT, NUM_DIMS)\n', (35841, 35874), True, 'import numpy as np\n'), ((39142, 39172), 'numpy.zeros', 'np.zeros', (['[bsize, H_NUM_POINT]'], {}), '([bsize, H_NUM_POINT])\n', (39150, 39172), True, 'import numpy as np\n'), ((39665, 39713), 'numpy.random.randn', 'np.random.randn', (['BATCH_SIZE', 'NUM_POINT', 'NUM_DIMS'], {}), '(BATCH_SIZE, NUM_POINT, NUM_DIMS)\n', (39680, 39713), True, 'import numpy as np\n'), ((42320, 42351), 'tensorflow.is_variable_initialized', 'tf.is_variable_initialized', (['var'], {}), '(var)\n', (42346, 42351), True, 'import tensorflow as tf\n'), ((45466, 45477), 'os.getpid', 'os.getpid', ([], {}), '()\n', (45475, 45477), False, 'import os\n'), ((9055, 9115), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate'], {'momentum': 'MOMENTUM'}), '(learning_rate, momentum=MOMENTUM)\n', (9081, 9115), True, 'import tensorflow as tf\n'), ((18012, 18072), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate'], {'momentum': 'MOMENTUM'}), '(learning_rate, momentum=MOMENTUM)\n', (18038, 18072), True, 'import tensorflow as tf\n'), ((20274, 20315), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""model2.ckpt.meta"""'], {}), "(LOG_DIR, 'model2.ckpt.meta')\n", (20286, 20315), False, 'import os\n'), ((20742, 20782), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""model.ckpt.meta"""'], {}), "(LOG_DIR, 'model.ckpt.meta')\n", (20754, 20782), False, 'import os\n'), ((21436, 21496), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate'], {'momentum': 'MOMENTUM'}), '(learning_rate, momentum=MOMENTUM)\n', (21462, 21496), True, 'import tensorflow as tf\n'), ((8387, 8413), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (8410, 8413), True, 'import tensorflow as tf\n'), ((9192, 9229), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (9214, 9229), True, 'import tensorflow as tf\n'), ((16751, 16786), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""model.ckpt"""'], {}), "(LOG_DIR, 'model.ckpt')\n", (16763, 16786), False, 'import os\n'), ((17080, 17106), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (17103, 17106), True, 'import tensorflow as tf\n'), ((18150, 18187), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (18172, 18187), True, 'import tensorflow as tf\n'), ((19360, 19386), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (19383, 19386), True, 'import tensorflow as tf\n'), ((21574, 21625), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {'name': '"""Adam2"""'}), "(learning_rate, name='Adam2')\n", (21596, 21625), True, 'import tensorflow as tf\n'), ((21896, 21932), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pred_AB3[:, :, :, 0]'], {}), '(pred_AB3[:, :, :, 0])\n', (21910, 21932), True, 'import tensorflow as tf\n'), ((21959, 21995), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pred_BA3[:, :, :, 0]'], {}), '(pred_BA3[:, :, :, 0])\n', (21973, 21995), True, 'import tensorflow as tf\n'), ((24848, 24911), 'os.path.join', 'os.path.join', (['LOG_DIR', "('model3' + OPT_TYPE + add_name + '.ckpt')"], {}), "(LOG_DIR, 'model3' + OPT_TYPE + add_name + '.ckpt')\n", (24860, 24911), False, 'import os\n'), ((10434, 10457), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (10455, 10457), True, 'import tensorflow as tf\n'), ((10502, 10526), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % i)"], {}), "('/gpu:%d' % i)\n", (10511, 10526), True, 'import tensorflow as tf\n'), ((10528, 10555), 'tensorflow.name_scope', 'tf.name_scope', (["('gpu_%d' % i)"], {}), "('gpu_%d' % i)\n", (10541, 10555), True, 'import tensorflow as tf\n'), ((10677, 10753), 'tensorflow.slice', 'tf.slice', (['pcA_pl', '[i * DEVICE_BATCH_SIZE, 0, 0]', '[DEVICE_BATCH_SIZE, -1, -1]'], {}), '(pcA_pl, [i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])\n', (10685, 10753), True, 'import tensorflow as tf\n'), ((10825, 10901), 'tensorflow.slice', 'tf.slice', (['pcB_pl', '[i * DEVICE_BATCH_SIZE, 0, 0]', '[DEVICE_BATCH_SIZE, -1, -1]'], {}), '(pcB_pl, [i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])\n', (10833, 10901), True, 'import tensorflow as tf\n'), ((10998, 11073), 'tensorflow.slice', 'tf.slice', (['labels_AB_pl', '[i * DEVICE_BATCH_SIZE, 0]', '[DEVICE_BATCH_SIZE, -1]'], {}), '(labels_AB_pl, [i * DEVICE_BATCH_SIZE, 0], [DEVICE_BATCH_SIZE, -1])\n', (11006, 11073), True, 'import tensorflow as tf\n'), ((11170, 11245), 'tensorflow.slice', 'tf.slice', (['labels_BA_pl', '[i * DEVICE_BATCH_SIZE, 0]', '[DEVICE_BATCH_SIZE, -1]'], {}), '(labels_BA_pl, [i * DEVICE_BATCH_SIZE, 0], [DEVICE_BATCH_SIZE, -1])\n', (11178, 11245), True, 'import tensorflow as tf\n'), ((11343, 11421), 'tensorflow.slice', 'tf.slice', (['noise_pl', '[i * DEVICE_BATCH_SIZE, 0, 0]', '[DEVICE_BATCH_SIZE, -1, -1]'], {}), '(noise_pl, [i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])\n', (11351, 11421), True, 'import tensorflow as tf\n'), ((12425, 12465), 'tensorflow.get_collection', 'tf.get_collection', (['"""loss_samples"""', 'scope'], {}), "('loss_samples', scope)\n", (12442, 12465), True, 'import tensorflow as tf\n'), ((12516, 12565), 'tensorflow.add_n', 'tf.add_n', (['loss_samples'], {'name': '"""total_loss_samples"""'}), "(loss_samples, name='total_loss_samples')\n", (12524, 12565), True, 'import tensorflow as tf\n'), ((12684, 12721), 'tensorflow.get_collection', 'tf.get_collection', (['"""loss_pred"""', 'scope'], {}), "('loss_pred', scope)\n", (12701, 12721), True, 'import tensorflow as tf\n'), ((12769, 12812), 'tensorflow.add_n', 'tf.add_n', (['loss_pred'], {'name': '"""total_loss_pred"""'}), "(loss_pred, name='total_loss_pred')\n", (12777, 12812), True, 'import tensorflow as tf\n'), ((13169, 13240), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""pc_compare"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='pc_compare')\n", (13186, 13240), True, 'import tensorflow as tf\n'), ((13285, 13336), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (13302, 13336), True, 'import tensorflow as tf\n'), ((41792, 41820), 'numpy.eye', 'np.eye', (['square_dist.shape[1]'], {}), '(square_dist.shape[1])\n', (41798, 41820), True, 'import numpy as np\n'), ((13089, 13120), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['l.op.name', 'l'], {}), '(l.op.name, l)\n', (13106, 13120), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
from scipy import stats
import os
import sys
from wfdb import processing
from scipy.signal import find_peaks
import pyhrv.nonlinear as nl
from contextlib import contextmanager
import pywt
from numpy import cumsum, log, polyfit, sqrt, std, subtract
from scipy.signal import butter, filtfilt, iirnotch, savgol_filter
import numpy as np
@contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
# https://python-heart-rate-analysis-toolkit.readthedocs.io/en/latest/_modules/heartpy/filtering.html
'''
Functions for data filtering tasks.
'''
__all__ = ['filter_signal',
'hampel_filter',
'hampel_correcter',
'smooth_signal']
def butter_lowpass(cutoff, sample_rate, order=2):
nyq = 0.5 * sample_rate
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_highpass(cutoff, sample_rate, order=2):
nyq = 0.5 * sample_rate
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_bandpass(lowcut, highcut, sample_rate, order=2):
nyq = 0.5 * sample_rate
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def filter_signal(data, cutoff, sample_rate, order=2, filtertype='lowpass', return_top=False):
if filtertype.lower() == 'lowpass':
b, a = butter_lowpass(cutoff, sample_rate, order=order)
elif filtertype.lower() == 'highpass':
b, a = butter_highpass(cutoff, sample_rate, order=order)
elif filtertype.lower() == 'bandpass':
assert type(cutoff) == tuple or list or np.array, 'if bandpass filter is specified, \
cutoff needs to be array or tuple specifying lower and upper bound: [lower, upper].'
b, a = butter_bandpass(cutoff[0], cutoff[1], sample_rate, order=order)
elif filtertype.lower() == 'notch':
b, a = iirnotch(cutoff, Q=0.005, fs=sample_rate)
else:
raise ValueError('filtertype: %s is unknown, available are: \
lowpass, highpass, bandpass, and notch' % filtertype)
filtered_data = filtfilt(b, a, data)
if return_top:
return np.clip(filtered_data, a_min=0, a_max=None)
else:
return filtered_data
def hurstFun(ts):
"""Returns the Hurst Exponent of the time series vector ts"""
# Create the range of lag values
lags = range(2, 100)
# Calculate the array of the variances of the lagged differences
# Here it calculates the variances, but why it uses
# standard deviation and then make a root of it?
tau = [sqrt(std(subtract(ts[lag:], ts[:-lag]))) for lag in lags]
# Use a linear fit to estimate the Hurst Exponent
poly = polyfit(log(lags), log(tau), 1)
# Return the Hurst exponent from the polyfit output
return poly[0] * 2.0
def normalize_arr(data):
norm = np.linalg.norm(data)
normData = data / norm
return normData
def reject_outliers(data, m=2):
return data[abs(data - np.mean(data)) < m * np.std(data)]
def get_R_peaks(data, freq):
with suppress_stdout():
indexR = processing.xqrs_detect(sig=data, fs=freq)
if len(indexR) > 2:
indexR = indexR[1:]
intervalR = np.diff(indexR)
data = normalize_arr(data)
valuesR = []
for item in indexR:
valuesR.append(data[item])
else:
data = reject_outliers(data)
with suppress_stdout():
indexR = processing.xqrs_detect(sig=data, fs=freq)
if len(indexR) > 2:
indexR = indexR[1:]
intervalR = np.diff(indexR)
data = normalize_arr(data)
valuesR = []
for item in indexR:
valuesR.append(data[item])
else:
valuesR = [0]
indexR = np.array([0])
intervalR = np.array([0])
return valuesR, indexR, intervalR
def find_nearest_previous(array, value):
idx = np.searchsorted(array, value, side="left")
return idx, array[idx - 1]
def get_Q_peaks(data, freq, indexR):
indexQtemp = processing.gqrs_detect(sig=data, fs=freq)
if len(indexQtemp) > 2 and len(indexR) > 2:
indexQ = np.array([])
for item in indexR:
indexQ = np.append(indexQ, find_nearest_previous(indexQtemp, item)[1])
indexQ = indexQ.astype(int)
# indexQ = indexQ[1:]
intervalQ = np.diff(indexQ)
data = normalize_arr(data)
valuesQ = []
for item in indexQ:
valuesQ.append(data[item])
else:
valuesQ = [0]
indexQ = np.array([0])
intervalQ = np.array([0])
return valuesQ, indexQ, intervalQ
def s_t_peaks(data, indexR):
if len(indexR) > 2:
data = normalize_arr(data)
dataInput = data
qrs_inds = indexR
x = data
peaks, _ = find_peaks(x, distance=50)
rr = qrs_inds
peaks = peaks.tolist()
rr2 = rr.tolist()
peaksArr = []
peaksVal = []
for i in range(len(rr) - 1):
tempArr = []
tempArr2 = []
for f in peaks:
if f > rr2[i] + 50 and f < rr2[i + 1] - 50:
tempArr.append(f)
tempArr2.append(x[f])
peaksArr.append(tempArr)
peaksVal.append(tempArr2)
tIndex = []
tVal = []
for i in range(len(peaksVal)):
qs = dict(zip(peaksArr[i], peaksVal[i]))
qsFound = sorted(qs, key=qs.get)
# qsArr.append(qsFound[-2:])
if len(qsFound) > 0:
indexTop = qsFound[-1]
tIndex.append(indexTop)
tVal.append(x[indexTop])
else:
tIndex.append(0)
tVal.append(0)
x = -data
valleys, _ = find_peaks(x)
valleys = valleys.tolist()
valleysArr = []
valleysVal = []
for i in range(len(rr) - 1):
tempArr = []
tempArr2 = []
for f in valleys:
if f > rr2[i] and f < tIndex[i]:
tempArr.append(f)
tempArr2.append(x[f])
valleysArr.append(tempArr)
valleysVal.append(tempArr2)
sIndex = []
sVal = []
for i in range(len(valleysVal)):
qs = dict(zip(valleysArr[i], valleysVal[i]))
qsFound = sorted(qs, key=qs.get)
# qsArr.append(qsFound[-2:])
if len(qsFound) > 0:
indexTop = qsFound[-1]
sIndex.append(indexTop)
sVal.append(x[indexTop])
else:
sIndex.append(0)
sVal.append(0)
intervalT = np.diff(tIndex)
intervalS = np.diff(sIndex)
else:
tIndex = [0]
tVal = [0]
intervalT = np.array([0])
sIndex = [0]
sVal = [0]
intervalS = np.array([0])
return tIndex, tVal, intervalT, sIndex, sVal, intervalS
def get_peaks_distances(indexQ, indexR, indexS, indexT, data):
if len(indexR) > 2:
disQR = []
disQS = []
disQT = []
disRS = []
disRT = []
disST = []
disQRVal = []
disQSVal = []
disQTVal = []
disRSVal = []
disRTVal = []
disSTVal = []
for i in range(len(indexR) - 1):
disQR.append(indexR[i] - indexQ[i])
disQRVal.append(data[indexQ[i]:indexR[i]])
disQS.append(indexS[i] - indexQ[i])
disQSVal.append(data[indexQ[i]:indexS[i]])
disQT.append(indexT[i] - indexQ[i])
disQTVal.append(data[indexQ[i]:indexT[i]])
disRS.append(indexS[i] - indexR[i])
disRSVal.append(data[indexR[i]:indexS[i]])
disRT.append(indexT[i] - indexR[i])
disRTVal.append(data[indexR[i]:indexT[i]])
disST.append(indexT[i] - indexS[i])
disSTVal.append(data[indexS[i]:indexT[i]])
else:
disQR = [0]
disQS = [0]
disQT = [0]
disRS = [0]
disRT = [0]
disST = [0]
disQRVal = [[0]]
disQSVal = [[0]]
disQTVal = [[0]]
disRSVal = [[0]]
disRTVal = [[0]]
disSTVal = [[0]]
return [disQR, disQS, disQT, disRS, disRT, disST], [disQRVal, disQSVal, disQTVal, disRSVal, disRTVal, disSTVal]
def peaks_derivations(indexPeaks, derivation):
derValues = []
for item in indexPeaks:
derValues.append(derivation[item])
return (derValues)
def wav_info(data):
coef, freqs = pywt.cwt(data, [1, 30, 60], 'gaus1')
return coef[0], coef[1], coef[2]
# Funtion from get_123ECG_features, with a small change in the return, returning the labels
def get_12ECG_featuresTrain(data, header_data):
printInfo = 0
poincareActivation = 1
if printInfo == 1:
print("header_data")
print(header_data)
print("data")
print(data)
tmp_hea = header_data[0].split(' ')
ptID = tmp_hea[0]
num_leads = int(tmp_hea[1])
sample_Fs = int(tmp_hea[2])
gain_lead = np.zeros(num_leads)
for ii in range(num_leads):
tmp_hea = header_data[ii + 1].split(' ')
gain_lead[ii] = int(tmp_hea[2].split('/')[0])
# for testing, we included the mean age of 57 if the age is a NaN
# This value will change as more data is being released
for iline in header_data:
if iline.startswith('#Age'):
tmp_age = iline.split(': ')[1].strip()
age = int(tmp_age if tmp_age != 'NaN' else 57)
elif iline.startswith('#Sex'):
tmp_sex = iline.split(': ')[1]
if tmp_sex.strip() == 'Female':
sex = 1
else:
sex = 0
elif iline.startswith('#Dx'):
label = [iline.split(': ')[1]]
filteredData = filter_signal(data[0], 40, 500, order=2, filtertype='lowpass', return_top=False)
valuesR, indexR, intervalR = get_R_peaks(filteredData, sample_Fs)
valuesQ, indexQ, intervalQ = get_Q_peaks(filteredData, sample_Fs, indexR)
indexT, valuesT, intervalT, indexS, valuesS, intervalS = s_t_peaks(filteredData, indexR)
dis = get_peaks_distances(indexQ, indexR, indexS, indexT, data[0])
wav1, wav30, wav60 = wav_info(data[0])
#if tsfelActivation == 1:
#cfg = tsfel.get_features_by_domain()
#tsfelFeatList = []
# Extract features
#with suppress_stdout():
#tsfelFeat = tsfel.time_series_features_extractor(cfg, filteredData, fs=sample_Fs)
#tsfelFeatList = tsfelFeat.values.tolist()[0]
ecgVals = []
derivationsValues = []
for i in range(12):
lead = normalize_arr(data[i])
ecgVals.append(np.mean(lead))
ecgVals.append(np.median(lead))
ecgVals.append(np.std(lead))
ecgVals.append(stats.tvar(lead))
ecgVals.append(stats.skew(lead))
ecgVals.append(stats.kurtosis(lead))
ecgVals.append(np.amax(lead))
ecgVals.append(np.amin(lead))
ecgVals.append(hurstFun(lead))
if i > 0:
derivationsValues.append(peaks_derivations(indexQ, lead))
derivationsValues.append(peaks_derivations(indexR, lead))
derivationsValues.append(peaks_derivations(indexS, lead))
derivationsValues.append(peaks_derivations(indexT, lead))
timeSeries = [valuesR, intervalR, valuesQ, intervalQ, valuesT, intervalT, valuesS, intervalS, wav1, wav30, wav60]
timeSeries = timeSeries + derivationsValues
peaksStats = []
for item in timeSeries:
peaksStats.append(np.amax(item))
peaksStats.append(np.amin(item))
peaksStats.append(np.mean(item))
peaksStats.append(np.median(item))
peaksStats.append(np.std(item))
peaksStats.append(stats.tvar(item))
peaksStats.append(stats.skew(item))
peaksStats.append(stats.kurtosis(item))
peaksStats.append(hurstFun(item))
distancesPeaks = []
for item in dis[0]:
distancesPeaks.append(np.mean(item))
distancesPeaks.append(np.median(item))
distancesPeaks.append(np.std(item))
distancesPeaks.append(stats.tvar(item))
distancesPeaks.append(stats.skew(item))
distancesPeaks.append(stats.kurtosis(item))
distancesPeaks.append(np.amax(item))
distancesPeaks.append(np.amin(item))
distancesPeaks.append(hurstFun(item))
distancesPeaksVals = []
for item in dis[1]:
distancesPeaksValsInd = [[], [], []]
for segment in item:
if len(segment) > 0:
linReg = stats.linregress(np.arange(len(segment)), segment)
distancesPeaksValsInd[0].append(np.mean(segment))
distancesPeaksValsInd[1].append(np.std(segment))
distancesPeaksValsInd[2].append(linReg[0])
else:
distancesPeaksValsInd[0].append(0)
distancesPeaksValsInd[1].append(0)
distancesPeaksValsInd[2].append(0)
for sumary in distancesPeaksValsInd:
distancesPeaksVals.append(np.mean(sumary))
distancesPeaksVals.append(np.median(sumary))
distancesPeaksVals.append(np.std(sumary))
distancesPeaksVals.append(stats.tvar(sumary))
distancesPeaksVals.append(stats.skew(sumary))
distancesPeaksVals.append(stats.kurtosis(sumary))
distancesPeaksVals.append(np.amax(sumary))
distancesPeaksVals.append(np.amin(sumary))
if poincareActivation == 1:
pcData = []
poincare = nl.poincare(intervalR, show=False)
pcData.append(poincare[1])
pcData.append(poincare[2])
pcData.append(poincare[3])
pcData.append(poincare[4])
if printInfo == 1:
print("valuesQ")
print(valuesQ)
print("indexQ")
print(indexQ)
print("intervalQ")
print(intervalQ)
print("valuesR")
print(valuesR)
print("indexR")
print(indexR)
print("intervalR")
print(intervalR)
print("valuesS")
print(valuesS)
print("indexS")
print(indexS)
print("intervalS")
print(intervalS)
print("valuesT")
print(valuesT)
print("indexT")
print(indexT)
print("intervalT")
print(intervalT)
print("distancesPeaks")
print(distancesPeaks)
print("distancesPeaks[3]")
print(distancesPeaks[3])
# if tsfelActivation == 1:
# print("tsfelFeatList")
# print(tsfelFeatList)
# FEAT to list
features = np.hstack([age, sex])
features = np.concatenate((features, ecgVals))
if poincareActivation == 1:
features = np.concatenate((features, pcData))
features = np.concatenate((features, peaksStats))
features = np.concatenate((features, distancesPeaks))
features = np.concatenate((features, distancesPeaksVals))
#if tsfelActivation == 1:
#features = np.concatenate((features, tsfelFeatList))
return features, label
def get_12ECG_features(data, header_data):
printInfo = 0
poincareActivation = 1
#tsfelActivation = 0
if printInfo == 1:
print("header_data")
print(header_data)
print("data")
print(data)
tmp_hea = header_data[0].split(' ')
ptID = tmp_hea[0]
num_leads = int(tmp_hea[1])
sample_Fs = int(tmp_hea[2])
gain_lead = np.zeros(num_leads)
for ii in range(num_leads):
tmp_hea = header_data[ii + 1].split(' ')
gain_lead[ii] = int(tmp_hea[2].split('/')[0])
# for testing, we included the mean age of 57 if the age is a NaN
# This value will change as more data is being released
for iline in header_data:
if iline.startswith('#Age'):
tmp_age = iline.split(': ')[1].strip()
age = int(tmp_age if tmp_age != 'NaN' else 57)
elif iline.startswith('#Sex'):
tmp_sex = iline.split(': ')[1]
if tmp_sex.strip() == 'Female':
sex = 1
else:
sex = 0
elif iline.startswith('#Dx'):
label = [iline.split(': ')[1]]
filteredData = filter_signal(data[0], 40, 500, order=2, filtertype='lowpass', return_top=False)
valuesR, indexR, intervalR = get_R_peaks(filteredData, sample_Fs)
valuesQ, indexQ, intervalQ = get_Q_peaks(filteredData, sample_Fs, indexR)
indexT, valuesT, intervalT, indexS, valuesS, intervalS = s_t_peaks(filteredData, indexR)
dis = get_peaks_distances(indexQ, indexR, indexS, indexT, data[0])
wav1, wav30, wav60 = wav_info(data[0])
#if tsfelActivation == 1:
#cfg = tsfel.get_features_by_domain()
#tsfelFeatList = []
# Extract features
#with suppress_stdout():
#tsfelFeat = tsfel.time_series_features_extractor(cfg, filteredData, fs=sample_Fs)
#tsfelFeatList = tsfelFeat.values.tolist()[0]
# ECG vals
ecgVals = []
derivationsValues = []
for i in range(12):
lead = normalize_arr(data[i])
ecgVals.append(np.mean(lead))
ecgVals.append(np.median(lead))
ecgVals.append(np.std(lead))
ecgVals.append(stats.tvar(lead))
ecgVals.append(stats.skew(lead))
ecgVals.append(stats.kurtosis(lead))
ecgVals.append(np.amax(lead))
ecgVals.append(np.amin(lead))
ecgVals.append(hurstFun(lead))
if i > 0:
derivationsValues.append(peaks_derivations(indexQ, lead))
derivationsValues.append(peaks_derivations(indexR, lead))
derivationsValues.append(peaks_derivations(indexS, lead))
derivationsValues.append(peaks_derivations(indexT, lead))
timeSeries = [valuesR, intervalR, valuesQ, intervalQ, valuesT, intervalT, valuesS, intervalS, wav1, wav30, wav60]
timeSeries = timeSeries + derivationsValues
peaksStats = []
for item in timeSeries:
peaksStats.append(np.amax(item))
peaksStats.append(np.amin(item))
peaksStats.append(np.mean(item))
peaksStats.append(np.median(item))
peaksStats.append(np.std(item))
peaksStats.append(stats.tvar(item))
peaksStats.append(stats.skew(item))
peaksStats.append(stats.kurtosis(item))
peaksStats.append(hurstFun(item))
distancesPeaks = []
for item in dis[0]:
distancesPeaks.append(np.mean(item))
distancesPeaks.append(np.median(item))
distancesPeaks.append(np.std(item))
distancesPeaks.append(stats.tvar(item))
distancesPeaks.append(stats.skew(item))
distancesPeaks.append(stats.kurtosis(item))
distancesPeaks.append(np.amax(item))
distancesPeaks.append(np.amin(item))
distancesPeaks.append(hurstFun(item))
distancesPeaksVals = []
for item in dis[1]:
distancesPeaksValsInd = [[], [], []]
for segment in item:
if len(segment) > 0:
linReg = stats.linregress(np.arange(len(segment)), segment)
distancesPeaksValsInd[0].append(np.mean(segment))
distancesPeaksValsInd[1].append(np.std(segment))
distancesPeaksValsInd[2].append(linReg[0])
else:
distancesPeaksValsInd[0].append(0)
distancesPeaksValsInd[1].append(0)
distancesPeaksValsInd[2].append(0)
for sumary in distancesPeaksValsInd:
distancesPeaksVals.append(np.mean(sumary))
distancesPeaksVals.append(np.median(sumary))
distancesPeaksVals.append(np.std(sumary))
distancesPeaksVals.append(stats.tvar(sumary))
distancesPeaksVals.append(stats.skew(sumary))
distancesPeaksVals.append(stats.kurtosis(sumary))
distancesPeaksVals.append(np.amax(sumary))
distancesPeaksVals.append(np.amin(sumary))
if poincareActivation == 1:
pcData = []
poincare = nl.poincare(intervalR, show=False)
pcData.append(poincare[1])
pcData.append(poincare[2])
pcData.append(poincare[3])
pcData.append(poincare[4])
if printInfo == 1:
print("valuesQ")
print(valuesQ)
print("indexQ")
print(indexQ)
print("intervalQ")
print(intervalQ)
print("valuesR")
print(valuesR)
print("indexR")
print(indexR)
print("intervalR")
print(intervalR)
print("valuesS")
print(valuesS)
print("indexS")
print(indexS)
print("intervalS")
print(intervalS)
print("valuesT")
print(valuesT)
print("indexT")
print(indexT)
print("intervalT")
print(intervalT)
print("distancesPeaks")
print(distancesPeaks)
print("distancesPeaks[3]")
print(distancesPeaks[3])
# if tsfelActivation == 1:
# print("tsfelFeatList")
# print(tsfelFeatList)
# FEAT to list
features = np.hstack([age, sex])
features = np.concatenate((features, ecgVals))
if poincareActivation == 1:
features = np.concatenate((features, pcData))
features = np.concatenate((features, peaksStats))
features = np.concatenate((features, distancesPeaks))
features = np.concatenate((features, distancesPeaksVals))
#if tsfelActivation == 1:
#features = np.concatenate((features, tsfelFeatList))
return features | [
"numpy.amin",
"pyhrv.nonlinear.poincare",
"pywt.cwt",
"numpy.clip",
"scipy.signal.find_peaks",
"numpy.linalg.norm",
"numpy.mean",
"wfdb.processing.gqrs_detect",
"numpy.std",
"scipy.stats.tvar",
"scipy.signal.iirnotch",
"scipy.signal.butter",
"numpy.median",
"numpy.hstack",
"numpy.concate... | [((975, 1030), 'scipy.signal.butter', 'butter', (['order', 'normal_cutoff'], {'btype': '"""low"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='low', analog=False)\n", (981, 1030), False, 'from scipy.signal import butter, filtfilt, iirnotch, savgol_filter\n'), ((1172, 1228), 'scipy.signal.butter', 'butter', (['order', 'normal_cutoff'], {'btype': '"""high"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='high', analog=False)\n", (1178, 1228), False, 'from scipy.signal import butter, filtfilt, iirnotch, savgol_filter\n'), ((1394, 1434), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""band"""'}), "(order, [low, high], btype='band')\n", (1400, 1434), False, 'from scipy.signal import butter, filtfilt, iirnotch, savgol_filter\n'), ((2313, 2333), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2321, 2333), False, 'from scipy.signal import butter, filtfilt, iirnotch, savgol_filter\n'), ((3067, 3087), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {}), '(data)\n', (3081, 3087), True, 'import numpy as np\n'), ((4162, 4204), 'numpy.searchsorted', 'np.searchsorted', (['array', 'value'], {'side': '"""left"""'}), "(array, value, side='left')\n", (4177, 4204), True, 'import numpy as np\n'), ((4292, 4333), 'wfdb.processing.gqrs_detect', 'processing.gqrs_detect', ([], {'sig': 'data', 'fs': 'freq'}), '(sig=data, fs=freq)\n', (4314, 4333), False, 'from wfdb import processing\n'), ((8842, 8878), 'pywt.cwt', 'pywt.cwt', (['data', '[1, 30, 60]', '"""gaus1"""'], {}), "(data, [1, 30, 60], 'gaus1')\n", (8850, 8878), False, 'import pywt\n'), ((9377, 9396), 'numpy.zeros', 'np.zeros', (['num_leads'], {}), '(num_leads)\n', (9385, 9396), True, 'import numpy as np\n'), ((15120, 15141), 'numpy.hstack', 'np.hstack', (['[age, sex]'], {}), '([age, sex])\n', (15129, 15141), True, 'import numpy as np\n'), ((15162, 15197), 'numpy.concatenate', 'np.concatenate', (['(features, ecgVals)'], {}), '((features, ecgVals))\n', (15176, 15197), True, 'import numpy as np\n'), ((15309, 15347), 'numpy.concatenate', 'np.concatenate', (['(features, peaksStats)'], {}), '((features, peaksStats))\n', (15323, 15347), True, 'import numpy as np\n'), ((15363, 15405), 'numpy.concatenate', 'np.concatenate', (['(features, distancesPeaks)'], {}), '((features, distancesPeaks))\n', (15377, 15405), True, 'import numpy as np\n'), ((15421, 15467), 'numpy.concatenate', 'np.concatenate', (['(features, distancesPeaksVals)'], {}), '((features, distancesPeaksVals))\n', (15435, 15467), True, 'import numpy as np\n'), ((15977, 15996), 'numpy.zeros', 'np.zeros', (['num_leads'], {}), '(num_leads)\n', (15985, 15996), True, 'import numpy as np\n'), ((21578, 21599), 'numpy.hstack', 'np.hstack', (['[age, sex]'], {}), '([age, sex])\n', (21587, 21599), True, 'import numpy as np\n'), ((21616, 21651), 'numpy.concatenate', 'np.concatenate', (['(features, ecgVals)'], {}), '((features, ecgVals))\n', (21630, 21651), True, 'import numpy as np\n'), ((21755, 21793), 'numpy.concatenate', 'np.concatenate', (['(features, peaksStats)'], {}), '((features, peaksStats))\n', (21769, 21793), True, 'import numpy as np\n'), ((21809, 21851), 'numpy.concatenate', 'np.concatenate', (['(features, distancesPeaks)'], {}), '((features, distancesPeaks))\n', (21823, 21851), True, 'import numpy as np\n'), ((21867, 21913), 'numpy.concatenate', 'np.concatenate', (['(features, distancesPeaksVals)'], {}), '((features, distancesPeaksVals))\n', (21881, 21913), True, 'import numpy as np\n'), ((2369, 2412), 'numpy.clip', 'np.clip', (['filtered_data'], {'a_min': '(0)', 'a_max': 'None'}), '(filtered_data, a_min=0, a_max=None)\n', (2376, 2412), True, 'import numpy as np\n'), ((2923, 2932), 'numpy.log', 'log', (['lags'], {}), '(lags)\n', (2926, 2932), False, 'from numpy import cumsum, log, polyfit, sqrt, std, subtract\n'), ((2934, 2942), 'numpy.log', 'log', (['tau'], {}), '(tau)\n', (2937, 2942), False, 'from numpy import cumsum, log, polyfit, sqrt, std, subtract\n'), ((3308, 3349), 'wfdb.processing.xqrs_detect', 'processing.xqrs_detect', ([], {'sig': 'data', 'fs': 'freq'}), '(sig=data, fs=freq)\n', (3330, 3349), False, 'from wfdb import processing\n'), ((3425, 3440), 'numpy.diff', 'np.diff', (['indexR'], {}), '(indexR)\n', (3432, 3440), True, 'import numpy as np\n'), ((4401, 4413), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4409, 4413), True, 'import numpy as np\n'), ((4615, 4630), 'numpy.diff', 'np.diff', (['indexQ'], {}), '(indexQ)\n', (4622, 4630), True, 'import numpy as np\n'), ((4807, 4820), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4815, 4820), True, 'import numpy as np\n'), ((4841, 4854), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4849, 4854), True, 'import numpy as np\n'), ((5075, 5101), 'scipy.signal.find_peaks', 'find_peaks', (['x'], {'distance': '(50)'}), '(x, distance=50)\n', (5085, 5101), False, 'from scipy.signal import find_peaks\n'), ((6054, 6067), 'scipy.signal.find_peaks', 'find_peaks', (['x'], {}), '(x)\n', (6064, 6067), False, 'from scipy.signal import find_peaks\n'), ((6962, 6977), 'numpy.diff', 'np.diff', (['tIndex'], {}), '(tIndex)\n', (6969, 6977), True, 'import numpy as np\n'), ((6998, 7013), 'numpy.diff', 'np.diff', (['sIndex'], {}), '(sIndex)\n', (7005, 7013), True, 'import numpy as np\n'), ((7086, 7099), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (7094, 7099), True, 'import numpy as np\n'), ((7161, 7174), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (7169, 7174), True, 'import numpy as np\n'), ((14033, 14067), 'pyhrv.nonlinear.poincare', 'nl.poincare', (['intervalR'], {'show': '(False)'}), '(intervalR, show=False)\n', (14044, 14067), True, 'import pyhrv.nonlinear as nl\n'), ((15254, 15288), 'numpy.concatenate', 'np.concatenate', (['(features, pcData)'], {}), '((features, pcData))\n', (15268, 15288), True, 'import numpy as np\n'), ((20523, 20557), 'pyhrv.nonlinear.poincare', 'nl.poincare', (['intervalR'], {'show': '(False)'}), '(intervalR, show=False)\n', (20534, 20557), True, 'import pyhrv.nonlinear as nl\n'), ((21704, 21738), 'numpy.concatenate', 'np.concatenate', (['(features, pcData)'], {}), '((features, pcData))\n', (21718, 21738), True, 'import numpy as np\n'), ((3669, 3710), 'wfdb.processing.xqrs_detect', 'processing.xqrs_detect', ([], {'sig': 'data', 'fs': 'freq'}), '(sig=data, fs=freq)\n', (3691, 3710), False, 'from wfdb import processing\n'), ((3798, 3813), 'numpy.diff', 'np.diff', (['indexR'], {}), '(indexR)\n', (3805, 3813), True, 'import numpy as np\n'), ((4018, 4031), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4026, 4031), True, 'import numpy as np\n'), ((4056, 4069), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4064, 4069), True, 'import numpy as np\n'), ((11079, 11092), 'numpy.mean', 'np.mean', (['lead'], {}), '(lead)\n', (11086, 11092), True, 'import numpy as np\n'), ((11117, 11132), 'numpy.median', 'np.median', (['lead'], {}), '(lead)\n', (11126, 11132), True, 'import numpy as np\n'), ((11157, 11169), 'numpy.std', 'np.std', (['lead'], {}), '(lead)\n', (11163, 11169), True, 'import numpy as np\n'), ((11194, 11210), 'scipy.stats.tvar', 'stats.tvar', (['lead'], {}), '(lead)\n', (11204, 11210), False, 'from scipy import stats\n'), ((11235, 11251), 'scipy.stats.skew', 'stats.skew', (['lead'], {}), '(lead)\n', (11245, 11251), False, 'from scipy import stats\n'), ((11276, 11296), 'scipy.stats.kurtosis', 'stats.kurtosis', (['lead'], {}), '(lead)\n', (11290, 11296), False, 'from scipy import stats\n'), ((11321, 11334), 'numpy.amax', 'np.amax', (['lead'], {}), '(lead)\n', (11328, 11334), True, 'import numpy as np\n'), ((11359, 11372), 'numpy.amin', 'np.amin', (['lead'], {}), '(lead)\n', (11366, 11372), True, 'import numpy as np\n'), ((11971, 11984), 'numpy.amax', 'np.amax', (['item'], {}), '(item)\n', (11978, 11984), True, 'import numpy as np\n'), ((12012, 12025), 'numpy.amin', 'np.amin', (['item'], {}), '(item)\n', (12019, 12025), True, 'import numpy as np\n'), ((12053, 12066), 'numpy.mean', 'np.mean', (['item'], {}), '(item)\n', (12060, 12066), True, 'import numpy as np\n'), ((12094, 12109), 'numpy.median', 'np.median', (['item'], {}), '(item)\n', (12103, 12109), True, 'import numpy as np\n'), ((12137, 12149), 'numpy.std', 'np.std', (['item'], {}), '(item)\n', (12143, 12149), True, 'import numpy as np\n'), ((12177, 12193), 'scipy.stats.tvar', 'stats.tvar', (['item'], {}), '(item)\n', (12187, 12193), False, 'from scipy import stats\n'), ((12221, 12237), 'scipy.stats.skew', 'stats.skew', (['item'], {}), '(item)\n', (12231, 12237), False, 'from scipy import stats\n'), ((12265, 12285), 'scipy.stats.kurtosis', 'stats.kurtosis', (['item'], {}), '(item)\n', (12279, 12285), False, 'from scipy import stats\n'), ((12417, 12430), 'numpy.mean', 'np.mean', (['item'], {}), '(item)\n', (12424, 12430), True, 'import numpy as np\n'), ((12462, 12477), 'numpy.median', 'np.median', (['item'], {}), '(item)\n', (12471, 12477), True, 'import numpy as np\n'), ((12509, 12521), 'numpy.std', 'np.std', (['item'], {}), '(item)\n', (12515, 12521), True, 'import numpy as np\n'), ((12553, 12569), 'scipy.stats.tvar', 'stats.tvar', (['item'], {}), '(item)\n', (12563, 12569), False, 'from scipy import stats\n'), ((12601, 12617), 'scipy.stats.skew', 'stats.skew', (['item'], {}), '(item)\n', (12611, 12617), False, 'from scipy import stats\n'), ((12649, 12669), 'scipy.stats.kurtosis', 'stats.kurtosis', (['item'], {}), '(item)\n', (12663, 12669), False, 'from scipy import stats\n'), ((12701, 12714), 'numpy.amax', 'np.amax', (['item'], {}), '(item)\n', (12708, 12714), True, 'import numpy as np\n'), ((12746, 12759), 'numpy.amin', 'np.amin', (['item'], {}), '(item)\n', (12753, 12759), True, 'import numpy as np\n'), ((17641, 17654), 'numpy.mean', 'np.mean', (['lead'], {}), '(lead)\n', (17648, 17654), True, 'import numpy as np\n'), ((17679, 17694), 'numpy.median', 'np.median', (['lead'], {}), '(lead)\n', (17688, 17694), True, 'import numpy as np\n'), ((17719, 17731), 'numpy.std', 'np.std', (['lead'], {}), '(lead)\n', (17725, 17731), True, 'import numpy as np\n'), ((17756, 17772), 'scipy.stats.tvar', 'stats.tvar', (['lead'], {}), '(lead)\n', (17766, 17772), False, 'from scipy import stats\n'), ((17797, 17813), 'scipy.stats.skew', 'stats.skew', (['lead'], {}), '(lead)\n', (17807, 17813), False, 'from scipy import stats\n'), ((17838, 17858), 'scipy.stats.kurtosis', 'stats.kurtosis', (['lead'], {}), '(lead)\n', (17852, 17858), False, 'from scipy import stats\n'), ((17883, 17896), 'numpy.amax', 'np.amax', (['lead'], {}), '(lead)\n', (17890, 17896), True, 'import numpy as np\n'), ((17921, 17934), 'numpy.amin', 'np.amin', (['lead'], {}), '(lead)\n', (17928, 17934), True, 'import numpy as np\n'), ((18517, 18530), 'numpy.amax', 'np.amax', (['item'], {}), '(item)\n', (18524, 18530), True, 'import numpy as np\n'), ((18558, 18571), 'numpy.amin', 'np.amin', (['item'], {}), '(item)\n', (18565, 18571), True, 'import numpy as np\n'), ((18599, 18612), 'numpy.mean', 'np.mean', (['item'], {}), '(item)\n', (18606, 18612), True, 'import numpy as np\n'), ((18640, 18655), 'numpy.median', 'np.median', (['item'], {}), '(item)\n', (18649, 18655), True, 'import numpy as np\n'), ((18683, 18695), 'numpy.std', 'np.std', (['item'], {}), '(item)\n', (18689, 18695), True, 'import numpy as np\n'), ((18723, 18739), 'scipy.stats.tvar', 'stats.tvar', (['item'], {}), '(item)\n', (18733, 18739), False, 'from scipy import stats\n'), ((18767, 18783), 'scipy.stats.skew', 'stats.skew', (['item'], {}), '(item)\n', (18777, 18783), False, 'from scipy import stats\n'), ((18811, 18831), 'scipy.stats.kurtosis', 'stats.kurtosis', (['item'], {}), '(item)\n', (18825, 18831), False, 'from scipy import stats\n'), ((18955, 18968), 'numpy.mean', 'np.mean', (['item'], {}), '(item)\n', (18962, 18968), True, 'import numpy as np\n'), ((19000, 19015), 'numpy.median', 'np.median', (['item'], {}), '(item)\n', (19009, 19015), True, 'import numpy as np\n'), ((19047, 19059), 'numpy.std', 'np.std', (['item'], {}), '(item)\n', (19053, 19059), True, 'import numpy as np\n'), ((19091, 19107), 'scipy.stats.tvar', 'stats.tvar', (['item'], {}), '(item)\n', (19101, 19107), False, 'from scipy import stats\n'), ((19139, 19155), 'scipy.stats.skew', 'stats.skew', (['item'], {}), '(item)\n', (19149, 19155), False, 'from scipy import stats\n'), ((19187, 19207), 'scipy.stats.kurtosis', 'stats.kurtosis', (['item'], {}), '(item)\n', (19201, 19207), False, 'from scipy import stats\n'), ((19239, 19252), 'numpy.amax', 'np.amax', (['item'], {}), '(item)\n', (19246, 19252), True, 'import numpy as np\n'), ((19284, 19297), 'numpy.amin', 'np.amin', (['item'], {}), '(item)\n', (19291, 19297), True, 'import numpy as np\n'), ((2800, 2829), 'numpy.subtract', 'subtract', (['ts[lag:]', 'ts[:-lag]'], {}), '(ts[lag:], ts[:-lag])\n', (2808, 2829), False, 'from numpy import cumsum, log, polyfit, sqrt, std, subtract\n'), ((3218, 3230), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (3224, 3230), True, 'import numpy as np\n'), ((13541, 13556), 'numpy.mean', 'np.mean', (['sumary'], {}), '(sumary)\n', (13548, 13556), True, 'import numpy as np\n'), ((13596, 13613), 'numpy.median', 'np.median', (['sumary'], {}), '(sumary)\n', (13605, 13613), True, 'import numpy as np\n'), ((13653, 13667), 'numpy.std', 'np.std', (['sumary'], {}), '(sumary)\n', (13659, 13667), True, 'import numpy as np\n'), ((13707, 13725), 'scipy.stats.tvar', 'stats.tvar', (['sumary'], {}), '(sumary)\n', (13717, 13725), False, 'from scipy import stats\n'), ((13765, 13783), 'scipy.stats.skew', 'stats.skew', (['sumary'], {}), '(sumary)\n', (13775, 13783), False, 'from scipy import stats\n'), ((13823, 13845), 'scipy.stats.kurtosis', 'stats.kurtosis', (['sumary'], {}), '(sumary)\n', (13837, 13845), False, 'from scipy import stats\n'), ((13885, 13900), 'numpy.amax', 'np.amax', (['sumary'], {}), '(sumary)\n', (13892, 13900), True, 'import numpy as np\n'), ((13940, 13955), 'numpy.amin', 'np.amin', (['sumary'], {}), '(sumary)\n', (13947, 13955), True, 'import numpy as np\n'), ((20035, 20050), 'numpy.mean', 'np.mean', (['sumary'], {}), '(sumary)\n', (20042, 20050), True, 'import numpy as np\n'), ((20090, 20107), 'numpy.median', 'np.median', (['sumary'], {}), '(sumary)\n', (20099, 20107), True, 'import numpy as np\n'), ((20147, 20161), 'numpy.std', 'np.std', (['sumary'], {}), '(sumary)\n', (20153, 20161), True, 'import numpy as np\n'), ((20201, 20219), 'scipy.stats.tvar', 'stats.tvar', (['sumary'], {}), '(sumary)\n', (20211, 20219), False, 'from scipy import stats\n'), ((20259, 20277), 'scipy.stats.skew', 'stats.skew', (['sumary'], {}), '(sumary)\n', (20269, 20277), False, 'from scipy import stats\n'), ((20317, 20339), 'scipy.stats.kurtosis', 'stats.kurtosis', (['sumary'], {}), '(sumary)\n', (20331, 20339), False, 'from scipy import stats\n'), ((20379, 20394), 'numpy.amax', 'np.amax', (['sumary'], {}), '(sumary)\n', (20386, 20394), True, 'import numpy as np\n'), ((20434, 20449), 'numpy.amin', 'np.amin', (['sumary'], {}), '(sumary)\n', (20441, 20449), True, 'import numpy as np\n'), ((2116, 2157), 'scipy.signal.iirnotch', 'iirnotch', (['cutoff'], {'Q': '(0.005)', 'fs': 'sample_rate'}), '(cutoff, Q=0.005, fs=sample_rate)\n', (2124, 2157), False, 'from scipy.signal import butter, filtfilt, iirnotch, savgol_filter\n'), ((3197, 3210), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (3204, 3210), True, 'import numpy as np\n'), ((13125, 13141), 'numpy.mean', 'np.mean', (['segment'], {}), '(segment)\n', (13132, 13141), True, 'import numpy as np\n'), ((13191, 13206), 'numpy.std', 'np.std', (['segment'], {}), '(segment)\n', (13197, 13206), True, 'import numpy as np\n'), ((19635, 19651), 'numpy.mean', 'np.mean', (['segment'], {}), '(segment)\n', (19642, 19651), True, 'import numpy as np\n'), ((19701, 19716), 'numpy.std', 'np.std', (['segment'], {}), '(segment)\n', (19707, 19716), True, 'import numpy as np\n')] |
print("Loading...")
import sys
import logging
logging.getLogger().setLevel(logging.DEBUG)
import os
import tkinter as tk
from tkinter import filedialog
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyabf
from numpy import genfromtxt
print("Loaded external libraries")
from pyAPisolation.abf_featureextractor import folder_feature_extract, save_data_frames
from pyAPisolation.patch_utils import load_protocols
print("Load finished")
def main():
logging.basicConfig(level=logging.DEBUG)
root = tk.Tk()
root.withdraw()
files = filedialog.askdirectory(
title='Select Dir'
)
root_fold = files
##Declare our options at default
print('loading protocols...')
protocol_n = load_protocols(files)
print("protocols")
for i, x in enumerate(protocol_n):
print(str(i) + '. '+ str(x))
proto = input("enter Protocol to analyze (enter -1 to not filter to any protocol): ")
try:
proto = int(proto)
except:
proto = -1
filter = input("Allen's Gaussian Filter (recommended to be set to 0): ")
try:
filter = int(filter)
except:
filter = 0
savfilter = input("Savitzky-Golay Filter (recommended to be set in 0): ")
try:
savfilter = int(savfilter)
except:
savfilter = 0
tag = input("tag to apply output to files: ")
try:
tag = str(tag)
except:
tag = ""
plot_sweeps = input("Enter the sweep Numbers to plot [seperated by a comma] (0 to plot all sweeps, -1 to plot no sweeps): ")
try:
plot_sweeps = np.fromstring(plot_sweeps, dtype=int, sep=',')
if plot_sweeps.shape[0] < 1:
plot_sweeps = np.array([-1])
except:
plot_sweeps = -1
if proto == -1:
protocol_name = ''
else:
protocol_name = protocol_n[proto]
dv_cut = input("Enter the threshold cut off for the derivative (defaults to 7mv/s): ")
try:
dv_cut = int(dv_cut)
except:
dv_cut = 7
tp_cut = input("Enter the threshold cut off for max threshold-to-peak time (defaults to 10ms)[in ms]: ")
try:
tp_cut = (np.float64(tp_cut)/1000)
except:
tp_cut = 0.010
min_cut = input("Enter the minimum cut off for threshold-to-peak voltage (defaults to 2mV)[in mV]: ")
try:
min_cut = np.float64(min_cut)
except:
min_cut = 2
min_peak = input("Enter the mininum cut off for peak voltage (defaults to -10)[in mV]: ")
try:
min_peak = np.float64(min_peak)
except:
min_peak = -10
percent = input("Enter the percent of max DvDt used to calculate refined threshold (does not effect spike detection)(Allen defaults 5%)[in %]: ")
try:
percent = percent /100
except:
percent = 5/100
stim_find = input("Search for spikes based on applied Stimulus? (y/n): ")
try:
if stim_find == 'y' or stim_find =='Y':
bstim_find = True
else:
bstim_find = False
except:
bstim_find = False
if bstim_find:
upperlim = 0
lowerlim = 0
else:
lowerlim = input("Enter the time to start looking for spikes [in s] (enter 0 to start search at beginning): ")
upperlim = input("Enter the time to stop looking for spikes [in s] (enter 0 to search the full sweep): ")
try:
lowerlim = float(lowerlim)
upperlim = float(upperlim)
except:
upperlim = 0
lowerlim = 0
print(f"Running analysis with, dVdt thresh: {dv_cut}mV/s, thresh to peak max: {tp_cut}s, thresh to peak min height: {min_cut}mV, and min peak voltage: {min_peak}mV")
param_dict = {'filter': filter, 'dv_cutoff':dv_cut, 'start': lowerlim, 'end': upperlim, 'max_interval': tp_cut, 'min_height': min_cut, 'min_peak': min_peak, 'thresh_frac': percent,
'stim_find': bstim_find}
df = folder_feature_extract(files, param_dict, plot_sweeps, protocol_name)
print(f"Ran analysis with, dVdt thresh: {dv_cut}mV/s, thresh to peak max: {tp_cut}s, thresh to peak min height: {min_cut}mV, and min peak voltage: {min_peak}mV")
save_data_frames(df[0], df[1], df[2], root_fold, tag)
settings_col = ['dvdt Threshold', 'threshold to peak max time','threshold to peak min height', 'min peak voltage', 'allen filter', 'sav filter', 'protocol_name']
setdata = [dv_cut, tp_cut, min_cut, min_peak, filter, savfilter, protocol_name]
settings_df = pd.DataFrame(data=[setdata], columns=settings_col, index=[0])
settings_df.to_csv(root_fold + '/analysis_settings_' + tag + '.csv')
print("==== SUCCESS ====")
input('Press ENTER to exit')
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"pyAPisolation.patch_utils.load_protocols",
"logging.basicConfig",
"tkinter.filedialog.askdirectory",
"numpy.fromstring",
"numpy.array",
"pyAPisolation.abf_featureextractor.folder_feature_extract",
"numpy.float64",
"pyAPisolation.abf_featureextractor.save_data_frames",
"tkinter... | [((491, 531), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (510, 531), False, 'import logging\n'), ((543, 550), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (548, 550), True, 'import tkinter as tk\n'), ((583, 626), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'title': '"""Select Dir"""'}), "(title='Select Dir')\n", (606, 626), False, 'from tkinter import filedialog\n'), ((814, 835), 'pyAPisolation.patch_utils.load_protocols', 'load_protocols', (['files'], {}), '(files)\n', (828, 835), False, 'from pyAPisolation.patch_utils import load_protocols\n'), ((4013, 4082), 'pyAPisolation.abf_featureextractor.folder_feature_extract', 'folder_feature_extract', (['files', 'param_dict', 'plot_sweeps', 'protocol_name'], {}), '(files, param_dict, plot_sweeps, protocol_name)\n', (4035, 4082), False, 'from pyAPisolation.abf_featureextractor import folder_feature_extract, save_data_frames\n'), ((4253, 4306), 'pyAPisolation.abf_featureextractor.save_data_frames', 'save_data_frames', (['df[0]', 'df[1]', 'df[2]', 'root_fold', 'tag'], {}), '(df[0], df[1], df[2], root_fold, tag)\n', (4269, 4306), False, 'from pyAPisolation.abf_featureextractor import folder_feature_extract, save_data_frames\n'), ((4576, 4637), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[setdata]', 'columns': 'settings_col', 'index': '[0]'}), '(data=[setdata], columns=settings_col, index=[0])\n', (4588, 4637), True, 'import pandas as pd\n'), ((49, 68), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (66, 68), False, 'import logging\n'), ((1674, 1720), 'numpy.fromstring', 'np.fromstring', (['plot_sweeps'], {'dtype': 'int', 'sep': '""","""'}), "(plot_sweeps, dtype=int, sep=',')\n", (1687, 1720), True, 'import numpy as np\n'), ((2434, 2453), 'numpy.float64', 'np.float64', (['min_cut'], {}), '(min_cut)\n', (2444, 2453), True, 'import numpy as np\n'), ((2611, 2631), 'numpy.float64', 'np.float64', (['min_peak'], {}), '(min_peak)\n', (2621, 2631), True, 'import numpy as np\n'), ((1784, 1798), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (1792, 1798), True, 'import numpy as np\n'), ((2239, 2257), 'numpy.float64', 'np.float64', (['tp_cut'], {}), '(tp_cut)\n', (2249, 2257), True, 'import numpy as np\n')] |
"""
DFO_MoM.py
-- update Mom with DFO
-- Read Final_Attributes_yyyymmddhhHWRFUpdated.csv as MOM File and DFO_yyyymmdd.csv as DFO File.
-- Write the output Final_Attributes_yyyymmddhhMOM+DFOUpdated.csv and Attributes_clean_yyyymmddhhMOM+DFOUpdated.csv file.
"""
import csv,sys
import pandas as pd
import os
import scipy.stats
import numpy as np
# data file
#DFO_20210618.csv
#Final_Attributes_20210618.csv
#Attributes_Clean_20200618.csv
def read_data(file):
df = pd.read_csv(file)
df = pd.DataFrame(df)
return df
def mofunc_dfo(row):
if row['Severity'] > 0.8 or row['Hazard_Score'] > 80:
return 'Warning'
elif 0.6 < row['Severity'] < 0.80 or 60 < row['Hazard_Score'] < 80:
return 'Watch'
elif 0.35 < row['Severity'] < 0.6 or 35 < row['Hazard_Score'] < 60:
return 'Advisory'
elif 0 < row['Severity'] < 0.35 or 0 < row['Hazard_Score'] < 35:
return 'Information'
def update_DFO_MoM(adate,DFOfolder,MoMfolder,Outputfolder):
''' update MoM - DFO at a given date '''
hh = "18"
MOMOutput= MoMfolder + 'Final_Attributes_{}{}HWRFUpdated.csv'.format(adate,hh)
if not os.path.exists(MOMOutput):
print ("HWRFupdate is not exists:",adate+hh)
home = os.path.expanduser("~")
if os.path.exists(home + "/Projects"):
home = home + "/Projects/"
gfmsf = home + "/ModelofModels/data/cron_data/gfms/"
glofasf = home + "/ModelofModels/data/cron_data/glofas/"
hwrff = home + "/ModelofModels/data/cron_data/HWRF/HWRF_summary/"
HWRF_codef = home + "/ModelofModels/HWRF_Rainfall_Processing"
curdir = os.getcwd()
os.chdir(HWRF_codef)
if not HWRF_codef in sys.path:
sys.path.insert(0, HWRF_codef)
from HWRF_MoM import update_HWRF_MoM
update_HWRF_MoM(adate+hh,gfmsf,glofasf,hwrff,MoMfolder)
os.chdir(curdir)
DFO= DFOfolder + "DFO_"+ adate +'.csv'
#output
#Final_Attributes_yyyymmddhhMOM+DFOUpdated.csv
#Attributes_clean_yyyymmddhhMOM+DFOUpdated.csv
Final_Attributes_csv = Outputfolder + 'Final_Attributes_{}{}MOM+DFOUpdated.csv'.format(adate,hh)
Attributes_Clean_csv = Outputfolder + 'Attributes_Clean_{}{}MOM+DFOUpdated.csv'.format(adate,hh)
#already processed
if (os.path.exists(Final_Attributes_csv) and (Attributes_Clean_csv)):
print('already processed: ',adate)
return
weightage = read_data('Weightage_DFO.csv')
Attributes=read_data('Attributes.csv')
PDC_resilience = read_data('Copy of Resilience_Index.csv')
add_field_DFO=['DFO_area_1day_score', 'DFO_percarea_1day_score', 'DFO_area_2day_score', 'DFO_percarea_2day_score','DFO_area_3day_score', 'DFO_percarea_3day_score','DFOTotal_Score']
#Read DFO Processing data and calculate score
with open(DFO, 'r', encoding='UTF-8') as DFO_file:
DFO_reader = csv.reader(DFO_file)
DFO_w_score_csv = "DFO_w_score_{}.csv".format(adate)
csvfile = open(DFO_w_score_csv, 'w', newline='\n', encoding='utf-8')
DFO_w_score = csv.writer(csvfile)
row_count = 1
# csv_writer = csv.writer(write_obj)
for row in DFO_reader:
if row_count == 1:
for x in add_field_DFO:
row.append(x)
row_count = row_count + 1
else:
if float(row[4]) / float(weightage.DFO_Area_wt) > float(weightage.DFO_Area_max_pt):
DFO_area_1day_score = str(float(weightage.DFO_Area_max_pt)*float(weightage.one_Day_Multiplier))
else:
DFO_area_1day_score = str(float(weightage.DFO_Area_Min_pt) * float(weightage.one_Day_Multiplier)* float(row[4]) / float(weightage.DFO_Area_wt))
if float(row[5]) / float(weightage.DFO_percArea_wt) > float(weightage.DFO_percArea_Maxpt):
DFO_perc_area_1day_score = str(float(weightage.DFO_percArea_Maxpt)*float(weightage.one_Day_Multiplier))
else:
DFO_perc_area_1day_score = str(float(weightage.DFO_percArea_Minpt)*float(weightage.one_Day_Multiplier)* float(row[5]) / float(weightage.DFO_percArea_wt))
if float(row[6]) / float(weightage.DFO_Area_wt) > float(weightage.DFO_Area_max_pt):
DFO_area_2day_score = str(float(weightage.DFO_Area_max_pt)*float(weightage.two_Day_Multiplier))
else:
DFO_area_2day_score = str(float(weightage.DFO_Area_Min_pt) * float(weightage.two_Day_Multiplier)* float(row[6]) / float(weightage.DFO_Area_wt))
if float(row[7]) / float(weightage.DFO_percArea_wt) > float(weightage.DFO_percArea_Maxpt):
DFO_perc_area_2day_score = str(float(weightage.DFO_percArea_Maxpt)*float(weightage.two_Day_Multiplier))
else:
DFO_perc_area_2day_score = str(float(weightage.DFO_percArea_Minpt)*float(weightage.two_Day_Multiplier)* float(row[7]) / float(weightage.DFO_percArea_wt))
if float(row[8]) / float(weightage.DFO_Area_wt) > float(weightage.DFO_Area_max_pt):
DFO_area_3day_score = str(float(weightage.DFO_Area_max_pt)*float(weightage.three_Day_Multiplier))
else:
DFO_area_3day_score = str(float(weightage.DFO_Area_Min_pt) * float(weightage.three_Day_Multiplier)* float(row[8]) / float(weightage.DFO_Area_wt))
if float(row[9]) / float(weightage.DFO_percArea_wt) > float(weightage.DFO_percArea_Maxpt):
DFO_perc_area_3day_score = str(float(weightage.DFO_percArea_Maxpt)*float(weightage.three_Day_Multiplier))
else:
DFO_perc_area_3day_score = str(float(weightage.DFO_percArea_Minpt)*float(weightage.three_Day_Multiplier)* float(row[9]) / float(weightage.DFO_percArea_wt))
Sum_Score = str(
(float(DFO_area_1day_score) + float(DFO_perc_area_1day_score) + float(DFO_area_2day_score) + float(DFO_perc_area_2day_score)+float(DFO_area_3day_score) + float(DFO_perc_area_3day_score)))
score_field = [DFO_area_1day_score, DFO_perc_area_1day_score, DFO_area_2day_score, DFO_perc_area_2day_score, DFO_area_3day_score, DFO_perc_area_3day_score,Sum_Score]
for x in score_field:
row.append(x)
DFO_w_score.writerow(row)
csvfile.close()
DFO = read_data(DFO_w_score_csv)
DFO = DFO[DFO.DFOTotal_Score > 0.1]
DFO = DFO.iloc[:,1:]
MOM = read_data(MOMOutput)
MOM.drop(columns=['area_km2','ISO','Admin0','Admin1','rfr_score','cfr_score','Resilience_Index',' NormalizedLackofResilience ','Severity','Alert'], inplace=True)
Final_Output_0= pd.merge(MOM.set_index('pfaf_id'), DFO.set_index('pfaf_id'), on='pfaf_id', how='outer')
join1 = pd.merge(Attributes, PDC_resilience[['ISO', 'Resilience_Index', ' NormalizedLackofResilience ']], on='ISO', how='inner')
Final_Output=pd.merge(join1.set_index('pfaf_id'), Final_Output_0, on='pfaf_id', how='outer')
Final_Output[['Hazard_Score']] = Final_Output[['Hazard_Score']].fillna(value=0)
Final_Output.loc[(Final_Output['Hazard_Score']<Final_Output['DFOTotal_Score']),'Flag']=2
Final_Output['Hazard_Score'] =Final_Output[['Hazard_Score', 'DFOTotal_Score']].max(axis=1)
Final_Output = Final_Output[Final_Output.Hazard_Score != 0]
Final_Output.drop(Final_Output.index[(Final_Output['rfr_score']==0) & (Final_Output['cfr_score']==0)], inplace=True)
Final_Output = Final_Output.assign(
Scaled_Riverine_Risk=lambda x: Final_Output['rfr_score'] * 20)
Final_Output = Final_Output.assign(
Scaled_Coastal_Risk=lambda x: Final_Output['cfr_score'] * 20)
Final_Output = Final_Output.assign(
Severity=lambda x: scipy.stats.norm(np.log(100 - Final_Output[['Scaled_Riverine_Risk', 'Scaled_Coastal_Risk']].max(axis=1)), 1).cdf(
np.log(Final_Output['Hazard_Score'])))
Final_Output['Alert'] = Final_Output.apply(mofunc_dfo, axis=1)
Final_Output.loc[Final_Output['Alert']=="Information",'Flag']=''
Final_Output.loc[Final_Output['Alert']=="Advisory",'Flag']=''
Final_Output.to_csv(Final_Attributes_csv, encoding='utf-8-sig')
#Final_Output.to_csv('Final_Attributes_20210701_DFOUpdated.csv', encoding='utf-8-sig')
join1 = pd.merge(Attributes, PDC_resilience[['ISO', 'Resilience_Index', ' NormalizedLackofResilience ']], on='ISO', how='inner')
Attributes_Clean_DFO_Updated = pd.merge(join1.set_index('pfaf_id'), Final_Output[['Alert','Flag']], on='pfaf_id', how='right')
Attributes_Clean_DFO_Updated.to_csv(Attributes_Clean_csv, encoding='utf-8-sig')
os.remove(DFO_w_score_csv)
return
def batchrun():
home = os.path.expanduser("~")
if os.path.exists(home + "/Projects"):
home = home + "/Projects"
DFO_folder = home + "/ModelofModels/data/cron_data/DFO/DFO_summary/"
MoM_folder = home + "/ModelofModels/data/cron_data/HWRF/HWRF_MoM/"
Output_folder = home + "/ModelofModels/data/cron_data/DFO/DFO_MoM/"
gfmsf = home + "/ModelofModels/data/cron_data/gfms/"
glofasf = home + "/ModelofModels/data/cron_data/glofas/"
hwrff = home + "/ModelofModels/data/cron_data/HWRF/HWRF_summary/"
adate = '20210829'
#update_DFO_MoM(adate,DFO_folder,MoM_folder,Output_folder)
adate = "20210830"
#update_DFO_MoM(adate,DFO_folder,MoM_folder,Output_folder)
alist = os.listdir(DFO_folder)
alist.sort()
for item in alist:
if not '.csv' in item:
continue
datestr = item[:-4].split('_')[1]
print('Processing: ', datestr)
update_DFO_MoM(datestr,DFO_folder,MoM_folder,Output_folder)
def main():
#testdate = "20210618"
#update_DFO_MoM(testdate)
batchrun()
if __name__ == "__main__":
main() | [
"pandas.DataFrame",
"os.remove",
"csv.reader",
"csv.writer",
"numpy.log",
"pandas.read_csv",
"os.getcwd",
"pandas.merge",
"os.path.exists",
"sys.path.insert",
"os.chdir",
"HWRF_MoM.update_HWRF_MoM",
"os.path.expanduser",
"os.listdir"
] | [((499, 516), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (510, 516), True, 'import pandas as pd\n'), ((526, 542), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (538, 542), True, 'import pandas as pd\n'), ((6890, 7014), 'pandas.merge', 'pd.merge', (['Attributes', "PDC_resilience[['ISO', 'Resilience_Index', ' NormalizedLackofResilience ']]"], {'on': '"""ISO"""', 'how': '"""inner"""'}), "(Attributes, PDC_resilience[['ISO', 'Resilience_Index',\n ' NormalizedLackofResilience ']], on='ISO', how='inner')\n", (6898, 7014), True, 'import pandas as pd\n'), ((8391, 8515), 'pandas.merge', 'pd.merge', (['Attributes', "PDC_resilience[['ISO', 'Resilience_Index', ' NormalizedLackofResilience ']]"], {'on': '"""ISO"""', 'how': '"""inner"""'}), "(Attributes, PDC_resilience[['ISO', 'Resilience_Index',\n ' NormalizedLackofResilience ']], on='ISO', how='inner')\n", (8399, 8515), True, 'import pandas as pd\n'), ((8731, 8757), 'os.remove', 'os.remove', (['DFO_w_score_csv'], {}), '(DFO_w_score_csv)\n', (8740, 8757), False, 'import os\n'), ((8799, 8822), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (8817, 8822), False, 'import os\n'), ((8830, 8864), 'os.path.exists', 'os.path.exists', (["(home + '/Projects')"], {}), "(home + '/Projects')\n", (8844, 8864), False, 'import os\n'), ((9491, 9513), 'os.listdir', 'os.listdir', (['DFO_folder'], {}), '(DFO_folder)\n', (9501, 9513), False, 'import os\n'), ((1169, 1194), 'os.path.exists', 'os.path.exists', (['MOMOutput'], {}), '(MOMOutput)\n', (1183, 1194), False, 'import os\n'), ((1264, 1287), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1282, 1287), False, 'import os\n'), ((1299, 1333), 'os.path.exists', 'os.path.exists', (["(home + '/Projects')"], {}), "(home + '/Projects')\n", (1313, 1333), False, 'import os\n'), ((1661, 1672), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1670, 1672), False, 'import os\n'), ((1681, 1701), 'os.chdir', 'os.chdir', (['HWRF_codef'], {}), '(HWRF_codef)\n', (1689, 1701), False, 'import os\n'), ((1837, 1898), 'HWRF_MoM.update_HWRF_MoM', 'update_HWRF_MoM', (['(adate + hh)', 'gfmsf', 'glofasf', 'hwrff', 'MoMfolder'], {}), '(adate + hh, gfmsf, glofasf, hwrff, MoMfolder)\n', (1852, 1898), False, 'from HWRF_MoM import update_HWRF_MoM\n'), ((1910, 1926), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (1918, 1926), False, 'import os\n'), ((2322, 2358), 'os.path.exists', 'os.path.exists', (['Final_Attributes_csv'], {}), '(Final_Attributes_csv)\n', (2336, 2358), False, 'import os\n'), ((2913, 2933), 'csv.reader', 'csv.reader', (['DFO_file'], {}), '(DFO_file)\n', (2923, 2933), False, 'import csv, sys\n'), ((3094, 3113), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3104, 3113), False, 'import csv, sys\n'), ((1753, 1783), 'sys.path.insert', 'sys.path.insert', (['(0)', 'HWRF_codef'], {}), '(0, HWRF_codef)\n', (1768, 1783), False, 'import csv, sys\n'), ((7979, 8015), 'numpy.log', 'np.log', (["Final_Output['Hazard_Score']"], {}), "(Final_Output['Hazard_Score'])\n", (7985, 8015), True, 'import numpy as np\n')] |
import numpy as np
import numpy.linalg as la
class StateSpace(object):
def __init__(self, a, b, c=None, d=None, x0=None, u0=None):
self.a = a
self.b = b
self.c = c
self.d = d
self.x0 = x0
self.x = x0
self.u0 = u0
self.n = self.a.shape[0]
self.m = self.b.shape[1]
self.dt = -1.0
def initialize(self, dt, u0=None):
self.dt = dt
self.n = self.a.shape[0]
self.m = self.b.shape[1]
if self.c is None:
self.c = np.eye(self.n)
self.p = self.c.shape[0]
if self.d is None:
self.d = np.zeros((self.p, self.m))
if self.x0 is None:
self.x = np.zeros((self.n, 1))
else:
self.x = self.x0
if u0 is not None:
self.u = u0
elif self.u0 is not None:
self.u = self.u0
else:
self.u = np.zeros((self.m, 1))
eye = np.eye(self.n)
self.apr = la.inv(eye - dt * self.a)
self.bpr = np.dot(self.apr, dt * self.b)
self.y = np.dot(self.c, self.x) + np.dot(self.d, self.u)
return self.y
def step(self, u):
self.u = u
self.x = np.dot(self.apr, self.x) + np.dot(self.bpr, self.u)
self.y = np.dot(self.c, self.x) + np.dot(self.d, self.u)
return self.y
def run(self, tf):
t = np.arange(0.0, tf, self.dt)
y = np.zeros((self.n, t.size))
y[:,0:1] = self.y
for i in range(1, t.size):
y[:,i:i+1] = self.step(self.u0)
return t, y
| [
"numpy.eye",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.arange",
"numpy.dot"
] | [((985, 999), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (991, 999), True, 'import numpy as np\n'), ((1020, 1045), 'numpy.linalg.inv', 'la.inv', (['(eye - dt * self.a)'], {}), '(eye - dt * self.a)\n', (1026, 1045), True, 'import numpy.linalg as la\n'), ((1065, 1094), 'numpy.dot', 'np.dot', (['self.apr', '(dt * self.b)'], {}), '(self.apr, dt * self.b)\n', (1071, 1094), True, 'import numpy as np\n'), ((1422, 1449), 'numpy.arange', 'np.arange', (['(0.0)', 'tf', 'self.dt'], {}), '(0.0, tf, self.dt)\n', (1431, 1449), True, 'import numpy as np\n'), ((1462, 1488), 'numpy.zeros', 'np.zeros', (['(self.n, t.size)'], {}), '((self.n, t.size))\n', (1470, 1488), True, 'import numpy as np\n'), ((545, 559), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (551, 559), True, 'import numpy as np\n'), ((643, 669), 'numpy.zeros', 'np.zeros', (['(self.p, self.m)'], {}), '((self.p, self.m))\n', (651, 669), True, 'import numpy as np\n'), ((720, 741), 'numpy.zeros', 'np.zeros', (['(self.n, 1)'], {}), '((self.n, 1))\n', (728, 741), True, 'import numpy as np\n'), ((1113, 1135), 'numpy.dot', 'np.dot', (['self.c', 'self.x'], {}), '(self.c, self.x)\n', (1119, 1135), True, 'import numpy as np\n'), ((1138, 1160), 'numpy.dot', 'np.dot', (['self.d', 'self.u'], {}), '(self.d, self.u)\n', (1144, 1160), True, 'import numpy as np\n'), ((1245, 1269), 'numpy.dot', 'np.dot', (['self.apr', 'self.x'], {}), '(self.apr, self.x)\n', (1251, 1269), True, 'import numpy as np\n'), ((1272, 1296), 'numpy.dot', 'np.dot', (['self.bpr', 'self.u'], {}), '(self.bpr, self.u)\n', (1278, 1296), True, 'import numpy as np\n'), ((1314, 1336), 'numpy.dot', 'np.dot', (['self.c', 'self.x'], {}), '(self.c, self.x)\n', (1320, 1336), True, 'import numpy as np\n'), ((1339, 1361), 'numpy.dot', 'np.dot', (['self.d', 'self.u'], {}), '(self.d, self.u)\n', (1345, 1361), True, 'import numpy as np\n'), ((948, 969), 'numpy.zeros', 'np.zeros', (['(self.m, 1)'], {}), '((self.m, 1))\n', (956, 969), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# !/usr/bin/python
import os
import sys
import random
import cv2
from PIL import Image
import cv2 as cv, numpy as np
from PIL import ImageFilter
type_list = [16, 19]
number_list = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
ftp = open("./bank_txt") #不同银行卡号的开头
b_list = []
for line in ftp:
b_list.append(line.strip())
vfp_map = open("./6855map.txt", 'r') #字符字典
map_dic = dict()
for iline in vfp_map:
iline = iline.split(" ")
word_zn = unicode(iline[1].strip(), 'utf-8')
map_dic[word_zn] = iline[0].strip()
def getRandomId(num=20, length=10):
chars = [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'I', u'I', u'I', u'I', u'J', u'J', u'J', u'J', u'J',
u'K', u'L', u'M', u'N',
u'O', u'P', u'Q', u'R', u'S', u'T', u'U', u'V', u'W', u'W', u'W', u'W', u'W', u'X', u'Y', u'Z', u'1', u'1',
u'1', u'2', u'3',
u'4', u'5', u'6', u'7', u'8', u'9', u'0', u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j', u'k',
u'l', u'l', u'l', u'l', u'm', u'n',
u'o', u'p', u'q', u'r', u's', u't', u'u', u'v', u'w', u'x', u'y', u'z']
ret = []
for i in range(num):
id = u''
for j in range(length):
id += random.choice(chars)
ret.append(id) #list of 20 random ID
return ret
def get_id():
nid = getRandomId(1, 17)
return nid[0]
def random_sequence(len=5):
s = ""
for i in range(len):
s += (str(random.randint(0, 9)))
# print(str(s))
return str(s)
# random gen the bank number
def gen_bank_number(): # generate a number randomly
seed = random.randint(0, 1)
s_or_l = [16, 19]
with open('./bank_txt', 'r') as f:
f = f.readlines()
index = random.randint(0, len(f) - 1) #randomly choose a head number which is called index in this line
front = str(f[index]).split('\n')[0].strip().split('\xc2\xa0')[0]
print(front, len(front), s_or_l[seed] - len(front))
back = random_sequence(s_or_l[seed] - len(front))
sequence = front + back
seed = 0 # 16 or 19
if seed == 0:
new_s = sequence[0:4] + '#' + sequence[4:8] + '#' + sequence[8:12] + '#' + sequence[12:16]
else:
new_s = sequence[0:4] + '#' + sequence[4:8] + '#' + sequence[8:12] + '#' + sequence[12:16] + '#' + sequence[
16:18] + ''
print("sequence", new_s)
return new_s
def gen_bank_date_number(): # generate a number randomly
seed = random.randint(0, 3)
#s_or_l = [16, 19]
#with open('./bank_txt', 'r') as f:
#f = f.readlines()
#index = random.randint(0, len(f) - 1) #randomly choose a head number which is called index in this line
#front = str(f[index]).split('\n')[0].strip().split('\xc2\xa0')[0]
#print(front, len(front), s_or_l[seed] - len(front))
#back = random_sequence(s_or_l[seed] - len(front))
#sequence = front + back
#seed = 0 # 16 or 19
if seed == 0:
new_s = '0' + str(random.randint(1, 9)) + '/' + str(random.randint(1, 4)) + str(random.randint(0, 9)) #gen year of date before 2049
elif seed == 1:
new_s = '1' + str(random.randint(0, 2)) + '/' + str(random.randint(1, 4)) + str(random.randint(0, 9))
elif seed == 2:
new_s = '20' + str(random_sequence(2)) + '/' + str(random_sequence(2))
else:
new_s = str(random_sequence(2)) + '/' + '20' + str(random_sequence(2))
print("sequence", new_s)
return new_s
def get_bank_number(num_file='./16_bank_num.txt', index=0): # get a single number from generated_txt 16_bank_num.txt
with open(num_file, 'r') as f:
f = f.readlines()
bank_num = f[index].split('\n')[0]
return bank_num
def get_bank_date_number(num_file='./0000_bank_date_num.txt', index=0):
with open(num_file, 'r') as f:
f = f.readlines()
bank_num = f[index].split('\n')[0]
return bank_num
# choose the bg background pic by random
def get_random_bg():
seed = random.randint(1, 15)
if seed == 2:
seed = 3
bg_name = 'bg' + str(seed) + '.jpg'
imgroot = './bank_pic/'
# img=Image.open(imgroot+bg_name)
# img.show()
bg_path = imgroot + bg_name
return bg_path
# return "./b2.jpg"
def get_random_bg_bg():
return "./b3.jpg"
def get_random_crop_bg():
return "./bank_pic/bg3.jpg"
def auto_gen_num_list(num_file='./16_bank_num.txt', total=2000): # auto generate a list of 16/19 number
f_list = []
with open(num_file, 'w') as f:
for i in range(total):
f_list.append(gen_bank_number() + '\n')
f.writelines(f_list)
def auto_gen_date_num_list(date_num_file='./0000_bank_date.txt', total=20000): # auto generate a list of date number
f_list = []
with open(date_num_file, 'w') as f:
for i in range(total):
f_list.append(gen_bank_date_number() + '\n')
f.writelines(f_list)
def genbankpic_crop(num_file, index, des_folder='./16_bank_date_num_pic/', img_no='date_0000'):
bank_numbget_random_bger = get_bank_number(num_file, index).strip()
bg_path = get_random_bg()
# Load two images
img_f = []
number_root = "./bank_number/" # number image
len_number = len(bank_numbget_random_bger) #length of numbers of date
print(len_number)
img = Image.open(bg_path) #randomly open a bg image
box_b = (0, 0, 5, 32)
img_b = img.crop(box_b)
# img_crop.show()
img_b = cv2.cvtColor(np.asarray(img_b), cv2.COLOR_RGB2BGR)
img_b = np.array(img_b)
img_f = img_b
for i in range(len_number):
nu = 20 * i
be_num = int(nu)
box = (be_num, 0, 20 + be_num, 32)
img_crop = img.crop(box)
# img_crop.show()
img_crop = cv2.cvtColor(np.asarray(img_crop), cv2.COLOR_RGB2BGR) # crop BGR patch
img_blank = np.array(img_crop) # blank
img1 = np.array(img_crop)
line = bank_numbget_random_bger[i] # get random bank bg
if line in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "#", "/"):
if line == "0":
number_root_pic = number_root + "0b.jpg"
elif line == "1":
number_root_pic = number_root + "1b.jpg"
elif line == "2":
number_root_pic = number_root + "2b.jpg"
elif line == "3":
number_root_pic = number_root + "3b.jpg"
elif line == "4":
number_root_pic = number_root + "4b.jpg"
elif line == "5":
number_root_pic = number_root + "5b.jpg"
elif line == "6":
number_root_pic = number_root + "6b.jpg"
elif line == "7":
number_root_pic = number_root + "7b.jpg"
elif line == "8":
number_root_pic = number_root + "8b.jpg"
elif line == "9":
number_root_pic = number_root + "9b.jpg"
elif line == "/":
if random.randint(0,1)==1:
number_root_pic = number_root + "slashb.jpg"
else:
number_root_pic = number_root + "slash.jpg"
# read the number pic
img2 = cv.imread(number_root_pic)
# I want to put logo on top-left corner, So I create a ROI
rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]
# Now create a mask of logo and create its inverse mask also
img2gray = cv.cvtColor(img2, cv.COLOR_BGR2GRAY) # BGR to GRAY
ret, mask = cv.threshold(img2gray, 250, 255, cv.THRESH_BINARY) # 这个254很重要 #
mask_inv = cv.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv.bitwise_and(roi, roi, mask=mask) # 这里是mask,我参考的博文写反了,我改正了,费了不小劲
# Take only region of logo from logo image.
img2_fg = cv.bitwise_and(img2, img2, mask=mask_inv) # 这里才是mask_inv
# Put logo in ROI and modify the main image
dst = cv.add(img1_bg, img2_fg)
img1[0:rows, 0:cols] = dst
if line == "#":
img_f = np.concatenate([img_f, img_blank], axis=1)
else:
img_f = np.concatenate([img_f, img1], axis=1)
# bank_pic_name="./bank_pic/"+str(get_id)+".jpg"
bank_pic_name = des_folder + str(img_no) + '.jpg'
print(bank_pic_name)
# if you wanna debug, uncomment it
# bank_pic_name='./bank_gen/3.jpg'
img_f = np.concatenate([img_f, img_b], axis=1)
# 比较锐化的图片
# cv.imwrite(bank_pic_name, img_f)
# blend the img array
img_f_image = Image.fromarray(cv2.cvtColor(img_f, cv2.COLOR_BGR2RGB))
img_f_image_gaosi = img_f_image.filter(ImageFilter.GaussianBlur(radius=1.5))
img_f_image_gaosi = img_f_image_gaosi.convert('RGBA')
width, height = img_f_image_gaosi.size
img2 = Image.open(bg_path)
img2 = img2.convert('RGBA')
img2 = img2.crop([0, 0, width, height])
img = Image.blend(img_f_image_gaosi, img2, 0.2)
img_f_image_gaosi=img_f_image_gaosi.convert("RGB")
# img = img.convert('RGB')
img_f_image_gaosi.save(bank_pic_name)
# img.show()
bank_pic_txt = des_folder + str(img_no) + '.txt'
ftpw = open(bank_pic_txt, 'w')
bank_numbget_random_bger = bank_numbget_random_bger.replace("#", "").strip()
for inum in range(len(bank_numbget_random_bger)):
if inum < len(bank_numbget_random_bger) - 1:
ftpw.write(map_dic[bank_numbget_random_bger[inum]] + "\n")
else:
ftpw.write(map_dic[bank_numbget_random_bger[inum]])
ftpw.close()
# random gen the bank number
def gen_bank_number_19(): # generate a number randomly
ban_num = []
ftp = open('./bank_txt', 'r')
for line in ftp:
line = line.strip()
if len(line) == 6:
new_s = ""
for i in range(13):
new_s = new_s + (str(random.randint(0, 9)))
new_s = line + "#" + new_s
print("sequence", new_s)
ban_num.append(new_s)
print (len(ban_num))
return ban_num
def auto_gen_num_list_19(num_file='./19_bank_num.txt', total=312): # auto generate a list of 16/19 number
f_list = []
with open(num_file, 'w') as f:
for i in range(total):
n_list = gen_bank_number_19()
for line in n_list:
f.write(line.strip() + "\n")
def auto_gen_num_pic(num_file='./19_bank_num.txt', des_folder='./19_bank_num_pic/'):
with open(num_file, 'r') as f:
f = f.readlines()
total = len(f)
id = 0
for i in range(total):
id += 1
img_no = '19_3_' + str(id).zfill(5)
genbankpic_crop(num_file, i, des_folder, img_no)
def auto_gen_date_num_pic(num_file='./0000_bank_date.txt', des_folder='./bank_date_num_pic/'):
with open(num_file, 'r') as f:
f = f.readlines()
total = len(f)
id = 0
for i in range(total):
#for i in range(100):
id += 1
img_no = 'date_num_' + str(id).zfill(5)
genbankpic_crop(num_file, i, des_folder, img_no)
if __name__ == "__main__":
# auto_gen_num_list(num_file='./16_bank_num.txt',total=50000)#自动生成bank num list,设置的2000
#genbankpic_crop('./16_bank_num.txt',0) #单步调试把注释掉的文件名解开
# auto_gen_num_list_19(num_file='./19_bank_num.txt',total=312)#自动生成bank num list,设置的2000
#
# auto_gen_num_pic(num_file='./19_bank_num.txt', des_folder='./19_bank_num_pic/') # 自动生成bank num pic
#auto_gen_date_num_list() #generate date data finished 12/25 jason
auto_gen_date_num_pic(num_file='./0000_bank_date.txt', des_folder='./bank_date_num_pic/') #自动生成bank date num pic | [
"PIL.ImageFilter.GaussianBlur",
"cv2.bitwise_not",
"random.randint",
"cv2.bitwise_and",
"cv2.cvtColor",
"numpy.asarray",
"cv2.threshold",
"random.choice",
"PIL.Image.open",
"cv2.imread",
"numpy.array",
"PIL.Image.blend",
"cv2.add",
"numpy.concatenate"
] | [((1657, 1677), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1671, 1677), False, 'import random\n'), ((2592, 2612), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (2606, 2612), False, 'import random\n'), ((4087, 4108), 'random.randint', 'random.randint', (['(1)', '(15)'], {}), '(1, 15)\n', (4101, 4108), False, 'import random\n'), ((5406, 5425), 'PIL.Image.open', 'Image.open', (['bg_path'], {}), '(bg_path)\n', (5416, 5425), False, 'from PIL import Image\n'), ((5605, 5620), 'numpy.array', 'np.array', (['img_b'], {}), '(img_b)\n', (5613, 5620), True, 'import cv2 as cv, numpy as np\n'), ((8569, 8607), 'numpy.concatenate', 'np.concatenate', (['[img_f, img_b]'], {'axis': '(1)'}), '([img_f, img_b], axis=1)\n', (8583, 8607), True, 'import cv2 as cv, numpy as np\n'), ((8958, 8977), 'PIL.Image.open', 'Image.open', (['bg_path'], {}), '(bg_path)\n', (8968, 8977), False, 'from PIL import Image\n'), ((9064, 9105), 'PIL.Image.blend', 'Image.blend', (['img_f_image_gaosi', 'img2', '(0.2)'], {}), '(img_f_image_gaosi, img2, 0.2)\n', (9075, 9105), False, 'from PIL import Image\n'), ((5555, 5572), 'numpy.asarray', 'np.asarray', (['img_b'], {}), '(img_b)\n', (5565, 5572), True, 'import cv2 as cv, numpy as np\n'), ((5930, 5948), 'numpy.array', 'np.array', (['img_crop'], {}), '(img_crop)\n', (5938, 5948), True, 'import cv2 as cv, numpy as np\n'), ((5972, 5990), 'numpy.array', 'np.array', (['img_crop'], {}), '(img_crop)\n', (5980, 5990), True, 'import cv2 as cv, numpy as np\n'), ((8724, 8762), 'cv2.cvtColor', 'cv2.cvtColor', (['img_f', 'cv2.COLOR_BGR2RGB'], {}), '(img_f, cv2.COLOR_BGR2RGB)\n', (8736, 8762), False, 'import cv2\n'), ((8807, 8843), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', ([], {'radius': '(1.5)'}), '(radius=1.5)\n', (8831, 8843), False, 'from PIL import ImageFilter\n'), ((1272, 1292), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (1285, 1292), False, 'import random\n'), ((1501, 1521), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (1515, 1521), False, 'import random\n'), ((5852, 5872), 'numpy.asarray', 'np.asarray', (['img_crop'], {}), '(img_crop)\n', (5862, 5872), True, 'import cv2 as cv, numpy as np\n'), ((7283, 7309), 'cv2.imread', 'cv.imread', (['number_root_pic'], {}), '(number_root_pic)\n', (7292, 7309), True, 'import cv2 as cv, numpy as np\n'), ((7563, 7599), 'cv2.cvtColor', 'cv.cvtColor', (['img2', 'cv.COLOR_BGR2GRAY'], {}), '(img2, cv.COLOR_BGR2GRAY)\n', (7574, 7599), True, 'import cv2 as cv, numpy as np\n'), ((7638, 7688), 'cv2.threshold', 'cv.threshold', (['img2gray', '(250)', '(255)', 'cv.THRESH_BINARY'], {}), '(img2gray, 250, 255, cv.THRESH_BINARY)\n', (7650, 7688), True, 'import cv2 as cv, numpy as np\n'), ((7727, 7747), 'cv2.bitwise_not', 'cv.bitwise_not', (['mask'], {}), '(mask)\n', (7741, 7747), True, 'import cv2 as cv, numpy as np\n'), ((7822, 7857), 'cv2.bitwise_and', 'cv.bitwise_and', (['roi', 'roi'], {'mask': 'mask'}), '(roi, roi, mask=mask)\n', (7836, 7857), True, 'import cv2 as cv, numpy as np\n'), ((7969, 8010), 'cv2.bitwise_and', 'cv.bitwise_and', (['img2', 'img2'], {'mask': 'mask_inv'}), '(img2, img2, mask=mask_inv)\n', (7983, 8010), True, 'import cv2 as cv, numpy as np\n'), ((8102, 8126), 'cv2.add', 'cv.add', (['img1_bg', 'img2_fg'], {}), '(img1_bg, img2_fg)\n', (8108, 8126), True, 'import cv2 as cv, numpy as np\n'), ((3160, 3180), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (3174, 3180), False, 'import random\n'), ((8219, 8261), 'numpy.concatenate', 'np.concatenate', (['[img_f, img_blank]'], {'axis': '(1)'}), '([img_f, img_blank], axis=1)\n', (8233, 8261), True, 'import cv2 as cv, numpy as np\n'), ((8304, 8341), 'numpy.concatenate', 'np.concatenate', (['[img_f, img1]'], {'axis': '(1)'}), '([img_f, img1], axis=1)\n', (8318, 8341), True, 'import cv2 as cv, numpy as np\n'), ((3132, 3152), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (3146, 3152), False, 'import random\n'), ((3321, 3341), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (3335, 3341), False, 'import random\n'), ((3293, 3313), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (3307, 3313), False, 'import random\n'), ((10007, 10027), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (10021, 10027), False, 'import random\n'), ((3098, 3118), 'random.randint', 'random.randint', (['(1)', '(9)'], {}), '(1, 9)\n', (3112, 3118), False, 'import random\n'), ((3259, 3279), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (3273, 3279), False, 'import random\n'), ((7055, 7075), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (7069, 7075), False, 'import random\n')] |
#SHARED UTILITY TOOLS AND VARIABLES
import numpy
import scipy.io.wavfile
def pad_signal(signal, sampling_rate, time_step = 0.01, frame_window = 0.025):
"""segement a signal, 1D audio signal, into frames, such that:
output: N by M matrix, in which:
each row is a segment of frame_window's audio signal
"""
T = int(sampling_rate * time_step)
Fr = int(sampling_rate * frame_window)
Fr +=int(sampling_rate * frame_window > Fr)
signal = numpy.append(signal, numpy.zeros(Fr-len(signal)%T))
return signal
def compress_pause_to_time(signal, sampling_rate, time_step = 0.01, frame_window = 0.025):
"""compress pause index to time
Args:
signal (numpy.array(bool)): A list of pause sequence. True indicating pause.
sampling_rate (int): sampling frequency in Hz.
time_step (float, optional): The time interval (in seconds) between two pauses. Default to 0.01.
frame_window (float, optional): The length of speech (in seconds) used to estimate pause. Default to 0.025.
Returns:
numpy.array(bool): compressed pause.
"""
T = int(sampling_rate * time_step)
Fr = int(sampling_rate * frame_window)
Fr +=int(sampling_rate * frame_window > Fr)
length = (len(signal) - Fr)//T + 1
pause = numpy.full( length, False )
for i in range(length):
if len(numpy.where(signal[i*T:(i+1)*T])[0]) > T/2:
pause[i] = True
return pause
def is_upper_triangular( AA ):
"""Check if a matrix is upper triangular.
Args:
AA (numpy.array): a 2D matrix.
Returns:
bool:
"""
return numpy.allclose(AA, numpy.triu(AA))
def is_lower_triangular( AA ):
"""Check if a matrix is lower triangular.
Args:
AA (numpy.array): a 2D matrix.
Returns:
bool:
"""
return numpy.allclose(AA, numpy.tril(AA))
def read_wavfile( filename, channel=0 ):
"""Read in a audio file (in .wav format) and enforce the output as mono-channel.
Args:
filename (str): path to the audio file.
channel(int, optional): indicate which channel to read in. Defaults to 0.
Returns:
int: sampling frequency.
numpy.array: audio data.
"""
sampling_rate, datas = scipy.io.wavfile.read(filename)
datas = datas.astype(float)
if channel > len( datas.shape ):
print("Error: Channel {} does not exist. Note: first channel is channel 0.".format(channel))
return
elif len(datas.shape)>1:
return sampling_rate, datas[:,channel]
return sampling_rate, datas
def write_wavfile(filename, fs, data):
scipy.io.wavfile.write(filename, fs, data)
def merge_pitch_profile( pitches, speaker_id ):
"""merges n-pitch profiles into one sound based on speaker_id.
Args:
pitches (list-like(float)): a sequence of pitches.
speaker_id (list-like(int)): a list of speakers' id.
Returns:
numpy.array: merged pitch profile.
"""
N = len( speaker_id )
merged_pitch_profile = numpy.empty( N )
for i in range(N):
merged_pitch_profile[i] = pitches[speaker_id[i]][i]
return merged_pitch_profile
def artificial_signal( frequencys, sampling_frequency=16000, duration=0.025 ):
"""Concatonates a sequence of sinusoids of frequency f in frequencies.
Args:
frequencys (list-like(int)): sequence of frequencies of sinusoidual signals in Hz.
sampling_frequency (int, optional): sampling frequency in Hz. Defaults to 16000.
duration (float, optional): duration of the output sinusoid in seconds. Defaults to 0.025.
Returns:
numpy.array: artificially generated sinusoidal signal.
"""
sins = map( lambda f : sinusoid(f, sampling_frequency, duration), frequencys)
return numpy.concatenate( tuple(sins) )
def sinusoid( frequency, sampling_frequency=16000, duration=0.025 ):
"""Generate a sinusoid signal.
Args:
frequency (int): the frequency of the sinusoidal signal.
sampling_frequency (int, optional): sampling frequency in Hz. Defaults to 16000.
duration (float, optional): duration of the output sinusoid in seconds. Defaults to 0.025.
Returns:
numpy.array: a sinusoid.
"""
times = numpy.arange(int(sampling_frequency * duration))
return numpy.sin(2 * numpy.pi * frequency * times / sampling_frequency)
def random_symbols( distribution, length ):
if sum(distribution) != 1:
print("Warning: probabilites must sum to 1")
return
return numpy.random.choice( len(distribution), length, p=distribution )
def random_run( distributions, length, min_run=100, max_more=100 ):
ans = list()
k, N, M = 0, length, len(distributions)
while True:
more = numpy.random.randint(0,max_more) if max_more else 0
ext_length = min_run + more
ext_length = min( ext_length, N )
ans.extend( random_symbols( distributions[k % M], ext_length ) )
k += 1 % M
N -= ext_length
if N <= 0: return ans | [
"numpy.full",
"numpy.triu",
"numpy.tril",
"numpy.empty",
"numpy.sin",
"numpy.random.randint",
"numpy.where"
] | [((1340, 1365), 'numpy.full', 'numpy.full', (['length', '(False)'], {}), '(length, False)\n', (1350, 1365), False, 'import numpy\n'), ((3160, 3174), 'numpy.empty', 'numpy.empty', (['N'], {}), '(N)\n', (3171, 3174), False, 'import numpy\n'), ((4487, 4551), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * frequency * times / sampling_frequency)'], {}), '(2 * numpy.pi * frequency * times / sampling_frequency)\n', (4496, 4551), False, 'import numpy\n'), ((1710, 1724), 'numpy.triu', 'numpy.triu', (['AA'], {}), '(AA)\n', (1720, 1724), False, 'import numpy\n'), ((1930, 1944), 'numpy.tril', 'numpy.tril', (['AA'], {}), '(AA)\n', (1940, 1944), False, 'import numpy\n'), ((4940, 4973), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'max_more'], {}), '(0, max_more)\n', (4960, 4973), False, 'import numpy\n'), ((1412, 1450), 'numpy.where', 'numpy.where', (['signal[i * T:(i + 1) * T]'], {}), '(signal[i * T:(i + 1) * T])\n', (1423, 1450), False, 'import numpy\n')] |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/gaussian_param_inf_1d_numpyro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="11FOMaCs74vK"
# # Inference for the parameters of a 1d Gaussian using a non-conjugate prior
#
# We illustrate various inference methods using the example in sec 4.3 ("Gaussian model of height") of [Statistical Rethinking ed 2](https://xcelab.net/rm/statistical-rethinking/). This requires computing $p(\mu,\sigma|D)$ using a Gaussian likelihood but a non-conjugate prior.
# The numpyro code is from [Du Phan's site](https://fehiepsi.github.io/rethinking-numpyro/04-geocentric-models.html).
#
#
#
#
# + id="Z5wEIBws1D6i"
import numpy as np
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
import math
import os
import warnings
import pandas as pd
#from scipy.interpolate import BSpline
#from scipy.stats import gaussian_kde
# + colab={"base_uri": "https://localhost:8080/"} id="Rn0dCvGCr1YC" outputId="5a6919ff-fd7b-4205-d534-f4daae657c20"
# !mkdir figures
# + id="Xo0ejB5-7M3-"
# !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro
# + colab={"base_uri": "https://localhost:8080/"} id="qB5V5upMOMkP" outputId="47aa9cd9-16c9-4033-b48f-8dfa0ef5ff0d"
import jax
print("jax version {}".format(jax.__version__))
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
import jax.numpy as jnp
from jax import random, vmap
rng_key = random.PRNGKey(0)
rng_key, rng_key_ = random.split(rng_key)
# + id="lfOH0V2Knz_p"
import numpyro
import numpyro.distributions as dist
from numpyro.distributions import constraints
from numpyro.distributions.transforms import AffineTransform
from numpyro.diagnostics import hpdi, print_summary
from numpyro.infer import Predictive
from numpyro.infer import MCMC, NUTS
from numpyro.infer import SVI, Trace_ELBO, init_to_value
from numpyro.infer.autoguide import AutoLaplaceApproximation
import numpyro.optim as optim
# + colab={"base_uri": "https://localhost:8080/"} id="JZjT_8cKA1pP" outputId="113013a5-dc9d-4862-ef72-b6f2fa8b7dbd"
# !pip install arviz
import arviz as az
# + [markdown] id="qB83jECL_oWq"
# # Data
#
# We use the "Howell" dataset, which consists of measurements of height, weight, age and sex, of a certain foraging tribe, collected by <NAME>.
# + colab={"base_uri": "https://localhost:8080/", "height": 370} id="312Xjmye_2Lg" outputId="ae77d5a6-593e-43f5-a80a-009beff4f51c"
#url = 'https://github.com/fehiepsi/rethinking-numpyro/tree/master/data/Howell1.csv?raw=True'
url = 'https://raw.githubusercontent.com/fehiepsi/rethinking-numpyro/master/data/Howell1.csv'
Howell1 = pd.read_csv(url, sep=';')
d = Howell1
d.info()
d.head()
# + id="_mrNmkiEBPlH"
# get data for adults
d2 = d[d.age >= 18]
N = len(d2)
ndx = jax.random.permutation(rng_key, N)
data = d2.height.values[ndx]
N = 20 # take a subset of the 354 samples
data = data[:N]
# + [markdown] id="aSAr5iy2E0Cr"
# Empirical mean and std.
# + colab={"base_uri": "https://localhost:8080/"} id="QFYvhonkEpb-" outputId="7eb59b2c-fb2b-4a4f-d344-f9b1929f3ac4"
print(len(data))
print(np.mean(data))
print(np.std(data))
# + [markdown] id="oXUj4nsaCbR1"
# # Model
#
# We use the following model for the heights (in cm):
# $$
# \begin{align}
# h_i &\sim N(\mu,\sigma) \\
# \mu &\sim N(178, 20) \\
# \sigma &\sim U(0,50)
# \end{align}
# $$
#
# The prior for $\mu$ has a mean 178cm, since that is the height of
# Richard McElreath, the author of the "Statisical Rethinking" book.
# The standard deviation is 20, so that 90\% of people lie in the range 138--218.
#
# The prior for $\sigma$ has a lower bound of 0 (since it must be positive), and an upper bound of 50, so that the interval $[\mu-\sigma, \mu+\sigma]$ has width 100cm, which seems sufficiently large to capture human heights.
#
#
# Note that this is not a conjugate prior, so we will just approximate the posterior.
# But since there are just 2 unknowns, this will be easy.
#
# + [markdown] id="52c6OQskEZiT"
# # Grid posterior
# + colab={"base_uri": "https://localhost:8080/"} id="6lFJF82pEac_" outputId="96bd96a9-2444-481b-d458-436ea79a4e7e"
mu_prior = dist.Normal(178, 20)
sigma_prior = dist.Uniform(0, 50)
mu_range = [150, 160]
sigma_range = [4, 14]
ngrid = 100
plot_square = False
mu_list = jnp.linspace(start=mu_range[0], stop=mu_range[1], num=ngrid)
sigma_list = jnp.linspace(start=sigma_range[0], stop=sigma_range[1], num=ngrid)
mesh = jnp.meshgrid(mu_list, sigma_list)
print([mesh[0].shape, mesh[1].shape])
print(mesh[0].reshape(-1).shape)
post = {"mu": mesh[0].reshape(-1), "sigma": mesh[1].reshape(-1)}
post["LL"] = vmap(
lambda mu, sigma: jnp.sum(dist.Normal(mu, sigma).log_prob(data))
)(post["mu"], post["sigma"])
logprob_mu = mu_prior.log_prob(post["mu"])
logprob_sigma = sigma_prior.log_prob(post["sigma"])
post["prob"] = post["LL"] + logprob_mu + logprob_sigma
post["prob"] = jnp.exp(post["prob"] - jnp.max(post["prob"]))
prob = post["prob"] / jnp.sum(post["prob"]) # normalize over the grid
# + colab={"base_uri": "https://localhost:8080/", "height": 512} id="Cwg1FZlhGS-T" outputId="1f230ecd-f166-4a85-8988-e853989d03b6"
prob2d = prob.reshape(ngrid, ngrid)
prob_mu = jnp.sum(prob2d, axis=0)
prob_sigma = jnp.sum(prob2d, axis=1)
plt.figure()
plt.plot(mu_list, prob_mu, label='mu')
plt.legend()
plt.savefig('figures/gauss_params_1d_post_grid_marginal_mu.pdf', dpi=300)
plt.show()
plt.figure()
plt.plot(sigma_list, prob_sigma, label='sigma')
plt.legend()
plt.savefig('figures/gauss_params_1d_post_grid_marginal_sigma.pdf', dpi=300)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="9wjZvO2GEfn-" outputId="4f68306b-ec27-499f-9924-85a382fef05b"
plt.contour(
post["mu"].reshape(ngrid, ngrid),
post["sigma"].reshape(ngrid, ngrid),
post["prob"].reshape(ngrid, ngrid),
)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
if plot_square: plt.axis('square')
plt.savefig('figures/gauss_params_1d_post_grid_contours.pdf', dpi=300)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="ARQMye8bEf2J" outputId="392e01a8-d763-474b-ac32-9a5cfd84d5e6"
plt.imshow(
post["prob"].reshape(ngrid, ngrid),
origin="lower",
extent=(mu_range[0], mu_range[1], sigma_range[0], sigma_range[1]),
aspect="auto",
)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
if plot_square: plt.axis('square')
plt.savefig('figures/gauss_params_1d_post_grid_heatmap.pdf', dpi=300)
plt.show()
# + [markdown] id="YYSMPLUYF_0b"
# Posterior samples.
# + id="qx_q5zTYFzsa"
nsamples = 5000 #int(1e4)
sample_rows = dist.Categorical(probs=prob).sample(random.PRNGKey(0), (nsamples,))
sample_mu = post["mu"][sample_rows]
sample_sigma = post["sigma"][sample_rows]
samples = {'mu': sample_mu, 'sigma': sample_sigma}
# + colab={"base_uri": "https://localhost:8080/", "height": 658} id="j71jJlWnpLRP" outputId="3af318a1-076e-4668-d7e0-2453722f3efe"
print_summary(samples, 0.95, False)
plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none")
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.axis('square')
plt.show()
az.plot_kde(samples['mu'], samples['sigma']);
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
if plot_square: plt.axis('square')
plt.savefig('figures/gauss_params_1d_post_grid.pdf', dpi=300)
plt.show()
# + [markdown] id="GFzitSc_ksgZ"
# posterior marginals.
# + colab={"base_uri": "https://localhost:8080/", "height": 570} id="depUbCulkuB9" outputId="5a9539a6-b4ac-4ac5-e24b-a59b6c93dbb0"
print(hpdi(samples['mu'], 0.95))
print(hpdi(samples['sigma'], 0.95))
fig, ax = plt.subplots()
az.plot_kde(samples['mu'], ax=ax, label=r'$\mu$')
fig, ax = plt.subplots()
az.plot_kde(samples['sigma'], ax=ax, label=r'$\sigma$')
# + [markdown] id="luc7FkMXGmEw"
# # Laplace approximation
#
# See [the documentation](http://num.pyro.ai/en/stable/autoguide.html#autolaplaceapproximation)
# + [markdown] id="4lpe17A-LUUE"
# ## Optimization
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="GiStL67NGnJi" outputId="9841d3e4-5e77-4241-b8d5-367cc9fad1b4"
def model(data):
mu = numpyro.sample("mu", mu_prior)
sigma = numpyro.sample("sigma", sigma_prior)
numpyro.sample("height", dist.Normal(mu, sigma), obs=data)
guide = AutoLaplaceApproximation(model)
svi = SVI(model, guide, optim.Adam(1), Trace_ELBO(), data=data)
svi_result = svi.run(random.PRNGKey(0), 2000)
plt.figure()
plt.plot(svi_result.losses)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="bwrwHS73IJec" outputId="b044a435-9ec7-46da-d053-b1ae0a475ead"
start = {"mu": data.mean(), "sigma": data.std()}
guide = AutoLaplaceApproximation(model, init_loc_fn=init_to_value(values=start))
svi = SVI(model, guide, optim.Adam(0.1), Trace_ELBO(), data=data)
svi_result = svi.run(random.PRNGKey(0), 2000)
plt.figure()
plt.plot(svi_result.losses)
# + [markdown] id="6_s0bDxqIUEi"
# ## Posterior samples.
# + id="K6dQBDTGH3ex"
samples = guide.sample_posterior(random.PRNGKey(1), svi_result.params, (nsamples,))
# + colab={"base_uri": "https://localhost:8080/", "height": 662} id="PKb6dlS_pSKk" outputId="9fcc3d9f-62a5-43de-d535-b206a42df86d"
print_summary(samples, 0.95, False)
plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none")
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.show()
az.plot_kde(samples['mu'], samples['sigma']);
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
if plot_square: plt.axis('square')
plt.savefig('figures/gauss_params_1d_post_laplace.pdf', dpi=300)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 570} id="hag3rzUcpcv3" outputId="ae69555f-63cc-4897-e6aa-62a7784ad044"
print(hpdi(samples['mu'], 0.95))
print(hpdi(samples['sigma'], 0.95))
fig, ax = plt.subplots()
az.plot_kde(samples['mu'], ax=ax, label=r'$\mu$')
fig, ax = plt.subplots()
az.plot_kde(samples['sigma'], ax=ax, label=r'$\sigma$')
# + [markdown] id="gZHkO4iBLBv-"
# ## Extract 2d joint posterior
# + [markdown] id="E21dH5NjMnJJ"
# The Gaussian approximation is over transformed parameters.
# + colab={"base_uri": "https://localhost:8080/"} id="nUPNkY_ILDz2" outputId="4bf84d41-feea-460d-972b-f0043a290d9b"
post = guide.get_posterior(svi_result.params)
print(post.mean)
print(post.covariance_matrix)
# + colab={"base_uri": "https://localhost:8080/"} id="kckoWeKhUPDr" outputId="e1d56ba4-db33-4cf3-c790-210b331dd10d"
def logit(p):
return jnp.log(p/(1-p))
def sigmoid(a):
return 1/(1+jnp.exp(-a))
scale=50; print(logit(7.7/scale)); print(sigmoid(-1.7)*scale)
# + colab={"base_uri": "https://localhost:8080/"} id="pzubiiMsXJPG" outputId="b8a16edb-5a94-402d-b0da-359642a5911f"
unconstrained_samples = post.sample(rng_key, sample_shape=(nsamples,))
constrained_samples = guide._unpack_and_constrain(unconstrained_samples, svi_result.params)
print(unconstrained_samples.shape)
print(jnp.mean(unconstrained_samples, axis=0))
print(jnp.mean(constrained_samples['mu'], axis=0))
print(jnp.mean(constrained_samples['sigma'], axis=0))
# + [markdown] id="rMv_7FRZMqAY"
# We can sample from the posterior, which return results in the original parameterization.
# + colab={"base_uri": "https://localhost:8080/"} id="UdnupIg0IuTk" outputId="31ec5ce9-fdcd-44bf-cca7-e0a117f97366"
samples = guide.sample_posterior(random.PRNGKey(1), params, (nsamples,))
x = jnp.stack(list(samples.values()), axis=0)
print(x.shape)
print('mean of ssamples\n', jnp.mean(x, axis=1))
vcov = jnp.cov(x)
print('cov of samples\n', vcov) # variance-covariance matrix
# correlation matrix
R = vcov / jnp.sqrt(jnp.outer(jnp.diagonal(vcov), jnp.diagonal(vcov)))
print('corr of samples\n', R)
# + [markdown] id="rjvvHbB0NNme"
# # Variational inference
#
# We use
# $q(\mu,\sigma) = N(\mu|m,s) Ga(\sigma|a,b)$
#
# + colab={"base_uri": "https://localhost:8080/", "height": 363} id="rE6C50KlL3hQ" outputId="24887973-fe0d-4fd0-b283-6b8129d05129"
def guide(data):
data_mean = jnp.mean(data)
data_std = jnp.std(data)
m = numpyro.param("m", data_mean)
s = numpyro.param("s", 10, constraint=constraints.positive)
a = numpyro.param("a", data_std, constraint=constraints.positive)
b = numpyro.param("b", 1, constraint=constraints.positive)
mu = numpyro.sample("mu", dist.Normal(m, s))
sigma = numpyro.sample("sigma", dist.Gamma(a, b))
optimizer = numpyro.optim.Momentum(step_size=0.001, mass=0.1)
svi = SVI(model, guide, optimizer, loss=Trace_ELBO())
nsteps = 2000
svi_result = svi.run(rng_key_, nsteps, data=data)
print(svi_result.params)
print(svi_result.losses.shape)
plt.plot(svi_result.losses)
plt.title("ELBO")
plt.xlabel("step")
plt.ylabel("loss");
# + [markdown] id="k779nIjdTxu4"
# ## Extract Variational parameters.
#
# + colab={"base_uri": "https://localhost:8080/"} id="7ufCMqoZTpNV" outputId="3d32cd2f-a6c0-4f64-8ac6-bcbbb4337871"
print(svi_result.params)
a = np.array(svi_result.params['a'])
b = np.array(svi_result.params['b'])
m = np.array(svi_result.params['m'])
s = np.array(svi_result.params['s'])
# + colab={"base_uri": "https://localhost:8080/"} id="v51AAfH0Vh6G" outputId="4c51c80c-bb25-4f43-9267-0b04e0726d2a"
print('empirical mean', jnp.mean(data))
print('empirical std', jnp.std(data))
print(r'posterior mean and std of $\mu$')
post_mean = dist.Normal(m, s)
print([post_mean.mean, jnp.sqrt(post_mean.variance)])
print(r'posterior mean and std of unconstrained $\sigma$')
post_sigma = dist.Gamma(a,b)
print([post_sigma.mean, jnp.sqrt(post_sigma.variance)])
# + [markdown] id="jMb50OhpT10F"
# ## Posterior samples
# + id="l9KzXRibQaA2"
predictive = Predictive(guide, params=svi_result.params, num_samples=nsamples)
samples = predictive(rng_key, data)
# + colab={"base_uri": "https://localhost:8080/", "height": 662} id="qiVYfYuUqYCO" outputId="bad3ed9c-c808-485c-c510-92472b5f6356"
print_summary(samples, 0.95, False)
plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none")
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.show()
az.plot_kde(samples['mu'], samples['sigma']);
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
if plot_square: plt.axis('square')
plt.savefig('figures/gauss_params_1d_post_vi.pdf', dpi=300)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 570} id="98TZ70O_q34k" outputId="901b6055-d0ea-4960-88b2-e41045ccaed1"
print(hpdi(samples['mu'], 0.95))
print(hpdi(samples['sigma'], 0.95))
fig, ax = plt.subplots()
az.plot_kde(samples['mu'], ax=ax, label=r'$\mu$')
fig, ax = plt.subplots()
az.plot_kde(samples['sigma'], ax=ax, label=r'$\sigma$')
# + [markdown] id="Egqg5eCHcGP2"
# # MCMC
# + colab={"base_uri": "https://localhost:8080/"} id="3qy3_SVgcCpR" outputId="c157d168-48bb-415c-a18b-81fedb66695a"
conditioned_model = numpyro.handlers.condition(model, {'data': data})
nuts_kernel = NUTS(conditioned_model)
mcmc = MCMC(nuts_kernel, num_warmup=100, num_samples=nsamples)
mcmc.run(rng_key_, data)
mcmc.print_summary()
samples = mcmc.get_samples()
# + colab={"base_uri": "https://localhost:8080/", "height": 662} id="R7ZEfXCkq0gI" outputId="d363495e-4c96-4bfc-9d65-f341ae1cbbbe"
print_summary(samples, 0.95, False)
plt.scatter(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor="none")
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.show()
az.plot_kde(samples['mu'], samples['sigma']);
plt.xlim(mu_range[0], mu_range[1])
plt.ylim(sigma_range[0], sigma_range[1])
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
if plot_square: plt.axis('square')
plt.savefig('figures/gauss_params_1d_post_mcmc.pdf', dpi=300)
plt.show()
| [
"matplotlib.pyplot.title",
"numpyro.infer.MCMC",
"numpyro.infer.init_to_value",
"numpyro.distributions.Normal",
"pandas.read_csv",
"numpyro.distributions.Gamma",
"arviz.plot_kde",
"jax.random.PRNGKey",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpyro.distributions.Categorical",
"numpy.set_pr... | [((1095, 1127), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (1114, 1127), True, 'import numpy as np\n'), ((1836, 1853), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1850, 1853), False, 'from jax import random, vmap\n'), ((1874, 1895), 'jax.random.split', 'random.split', (['rng_key'], {}), '(rng_key)\n', (1886, 1895), False, 'from jax import random, vmap\n'), ((3031, 3056), 'pandas.read_csv', 'pd.read_csv', (['url'], {'sep': '""";"""'}), "(url, sep=';')\n", (3042, 3056), True, 'import pandas as pd\n'), ((3170, 3204), 'jax.random.permutation', 'jax.random.permutation', (['rng_key', 'N'], {}), '(rng_key, N)\n', (3192, 3204), False, 'import jax\n'), ((4525, 4545), 'numpyro.distributions.Normal', 'dist.Normal', (['(178)', '(20)'], {}), '(178, 20)\n', (4536, 4545), True, 'import numpyro.distributions as dist\n'), ((4560, 4579), 'numpyro.distributions.Uniform', 'dist.Uniform', (['(0)', '(50)'], {}), '(0, 50)\n', (4572, 4579), True, 'import numpyro.distributions as dist\n'), ((4668, 4728), 'jax.numpy.linspace', 'jnp.linspace', ([], {'start': 'mu_range[0]', 'stop': 'mu_range[1]', 'num': 'ngrid'}), '(start=mu_range[0], stop=mu_range[1], num=ngrid)\n', (4680, 4728), True, 'import jax.numpy as jnp\n'), ((4742, 4808), 'jax.numpy.linspace', 'jnp.linspace', ([], {'start': 'sigma_range[0]', 'stop': 'sigma_range[1]', 'num': 'ngrid'}), '(start=sigma_range[0], stop=sigma_range[1], num=ngrid)\n', (4754, 4808), True, 'import jax.numpy as jnp\n'), ((4816, 4849), 'jax.numpy.meshgrid', 'jnp.meshgrid', (['mu_list', 'sigma_list'], {}), '(mu_list, sigma_list)\n', (4828, 4849), True, 'import jax.numpy as jnp\n'), ((5562, 5585), 'jax.numpy.sum', 'jnp.sum', (['prob2d'], {'axis': '(0)'}), '(prob2d, axis=0)\n', (5569, 5585), True, 'import jax.numpy as jnp\n'), ((5599, 5622), 'jax.numpy.sum', 'jnp.sum', (['prob2d'], {'axis': '(1)'}), '(prob2d, axis=1)\n', (5606, 5622), True, 'import jax.numpy as jnp\n'), ((5624, 5636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5634, 5636), True, 'import matplotlib.pyplot as plt\n'), ((5637, 5675), 'matplotlib.pyplot.plot', 'plt.plot', (['mu_list', 'prob_mu'], {'label': '"""mu"""'}), "(mu_list, prob_mu, label='mu')\n", (5645, 5675), True, 'import matplotlib.pyplot as plt\n'), ((5676, 5688), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5686, 5688), True, 'import matplotlib.pyplot as plt\n'), ((5689, 5762), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_grid_marginal_mu.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_grid_marginal_mu.pdf', dpi=300)\n", (5700, 5762), True, 'import matplotlib.pyplot as plt\n'), ((5763, 5773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5771, 5773), True, 'import matplotlib.pyplot as plt\n'), ((5775, 5787), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5785, 5787), True, 'import matplotlib.pyplot as plt\n'), ((5788, 5835), 'matplotlib.pyplot.plot', 'plt.plot', (['sigma_list', 'prob_sigma'], {'label': '"""sigma"""'}), "(sigma_list, prob_sigma, label='sigma')\n", (5796, 5835), True, 'import matplotlib.pyplot as plt\n'), ((5836, 5848), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5846, 5848), True, 'import matplotlib.pyplot as plt\n'), ((5849, 5925), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_grid_marginal_sigma.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_grid_marginal_sigma.pdf', dpi=300)\n", (5860, 5925), True, 'import matplotlib.pyplot as plt\n'), ((5926, 5936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5934, 5936), True, 'import matplotlib.pyplot as plt\n'), ((6203, 6223), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (6213, 6223), True, 'import matplotlib.pyplot as plt\n'), ((6224, 6247), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (6234, 6247), True, 'import matplotlib.pyplot as plt\n'), ((6283, 6353), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_grid_contours.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_grid_contours.pdf', dpi=300)\n", (6294, 6353), True, 'import matplotlib.pyplot as plt\n'), ((6354, 6364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6362, 6364), True, 'import matplotlib.pyplot as plt\n'), ((6661, 6681), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (6671, 6681), True, 'import matplotlib.pyplot as plt\n'), ((6682, 6705), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (6692, 6705), True, 'import matplotlib.pyplot as plt\n'), ((6741, 6810), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_grid_heatmap.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_grid_heatmap.pdf', dpi=300)\n", (6752, 6810), True, 'import matplotlib.pyplot as plt\n'), ((6811, 6821), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6819, 6821), True, 'import matplotlib.pyplot as plt\n'), ((7272, 7307), 'numpyro.diagnostics.print_summary', 'print_summary', (['samples', '(0.95)', '(False)'], {}), '(samples, 0.95, False)\n', (7285, 7307), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((7310, 7389), 'matplotlib.pyplot.scatter', 'plt.scatter', (["samples['mu']", "samples['sigma']"], {'s': '(64)', 'alpha': '(0.1)', 'edgecolor': '"""none"""'}), "(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor='none')\n", (7321, 7389), True, 'import matplotlib.pyplot as plt\n'), ((7390, 7424), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (7398, 7424), True, 'import matplotlib.pyplot as plt\n'), ((7425, 7465), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (7433, 7465), True, 'import matplotlib.pyplot as plt\n'), ((7466, 7486), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (7476, 7486), True, 'import matplotlib.pyplot as plt\n'), ((7487, 7510), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (7497, 7510), True, 'import matplotlib.pyplot as plt\n'), ((7511, 7529), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (7519, 7529), True, 'import matplotlib.pyplot as plt\n'), ((7530, 7540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7538, 7540), True, 'import matplotlib.pyplot as plt\n'), ((7542, 7586), 'arviz.plot_kde', 'az.plot_kde', (["samples['mu']", "samples['sigma']"], {}), "(samples['mu'], samples['sigma'])\n", (7553, 7586), True, 'import arviz as az\n'), ((7588, 7622), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (7596, 7622), True, 'import matplotlib.pyplot as plt\n'), ((7623, 7663), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (7631, 7663), True, 'import matplotlib.pyplot as plt\n'), ((7664, 7684), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (7674, 7684), True, 'import matplotlib.pyplot as plt\n'), ((7685, 7708), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (7695, 7708), True, 'import matplotlib.pyplot as plt\n'), ((7744, 7805), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_grid.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_grid.pdf', dpi=300)\n", (7755, 7805), True, 'import matplotlib.pyplot as plt\n'), ((7806, 7816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7814, 7816), True, 'import matplotlib.pyplot as plt\n'), ((8086, 8100), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8098, 8100), True, 'import matplotlib.pyplot as plt\n'), ((8101, 8150), 'arviz.plot_kde', 'az.plot_kde', (["samples['mu']"], {'ax': 'ax', 'label': '"""$\\\\mu$"""'}), "(samples['mu'], ax=ax, label='$\\\\mu$')\n", (8112, 8150), True, 'import arviz as az\n'), ((8162, 8176), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8174, 8176), True, 'import matplotlib.pyplot as plt\n'), ((8177, 8232), 'arviz.plot_kde', 'az.plot_kde', (["samples['sigma']"], {'ax': 'ax', 'label': '"""$\\\\sigma$"""'}), "(samples['sigma'], ax=ax, label='$\\\\sigma$')\n", (8188, 8232), True, 'import arviz as az\n'), ((8754, 8785), 'numpyro.infer.autoguide.AutoLaplaceApproximation', 'AutoLaplaceApproximation', (['model'], {}), '(model)\n', (8778, 8785), False, 'from numpyro.infer.autoguide import AutoLaplaceApproximation\n'), ((8897, 8909), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8907, 8909), True, 'import matplotlib.pyplot as plt\n'), ((8910, 8937), 'matplotlib.pyplot.plot', 'plt.plot', (['svi_result.losses'], {}), '(svi_result.losses)\n', (8918, 8937), True, 'import matplotlib.pyplot as plt\n'), ((9313, 9325), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9323, 9325), True, 'import matplotlib.pyplot as plt\n'), ((9326, 9353), 'matplotlib.pyplot.plot', 'plt.plot', (['svi_result.losses'], {}), '(svi_result.losses)\n', (9334, 9353), True, 'import matplotlib.pyplot as plt\n'), ((9653, 9688), 'numpyro.diagnostics.print_summary', 'print_summary', (['samples', '(0.95)', '(False)'], {}), '(samples, 0.95, False)\n', (9666, 9688), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((9691, 9770), 'matplotlib.pyplot.scatter', 'plt.scatter', (["samples['mu']", "samples['sigma']"], {'s': '(64)', 'alpha': '(0.1)', 'edgecolor': '"""none"""'}), "(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor='none')\n", (9702, 9770), True, 'import matplotlib.pyplot as plt\n'), ((9771, 9805), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (9779, 9805), True, 'import matplotlib.pyplot as plt\n'), ((9806, 9846), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (9814, 9846), True, 'import matplotlib.pyplot as plt\n'), ((9847, 9867), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (9857, 9867), True, 'import matplotlib.pyplot as plt\n'), ((9868, 9891), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (9878, 9891), True, 'import matplotlib.pyplot as plt\n'), ((9892, 9902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9900, 9902), True, 'import matplotlib.pyplot as plt\n'), ((9904, 9948), 'arviz.plot_kde', 'az.plot_kde', (["samples['mu']", "samples['sigma']"], {}), "(samples['mu'], samples['sigma'])\n", (9915, 9948), True, 'import arviz as az\n'), ((9950, 9984), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (9958, 9984), True, 'import matplotlib.pyplot as plt\n'), ((9985, 10025), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (9993, 10025), True, 'import matplotlib.pyplot as plt\n'), ((10026, 10046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (10036, 10046), True, 'import matplotlib.pyplot as plt\n'), ((10047, 10070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (10057, 10070), True, 'import matplotlib.pyplot as plt\n'), ((10106, 10170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_laplace.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_laplace.pdf', dpi=300)\n", (10117, 10170), True, 'import matplotlib.pyplot as plt\n'), ((10171, 10181), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10179, 10181), True, 'import matplotlib.pyplot as plt\n'), ((10394, 10408), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10406, 10408), True, 'import matplotlib.pyplot as plt\n'), ((10409, 10458), 'arviz.plot_kde', 'az.plot_kde', (["samples['mu']"], {'ax': 'ax', 'label': '"""$\\\\mu$"""'}), "(samples['mu'], ax=ax, label='$\\\\mu$')\n", (10420, 10458), True, 'import arviz as az\n'), ((10470, 10484), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10482, 10484), True, 'import matplotlib.pyplot as plt\n'), ((10485, 10540), 'arviz.plot_kde', 'az.plot_kde', (["samples['sigma']"], {'ax': 'ax', 'label': '"""$\\\\sigma$"""'}), "(samples['sigma'], ax=ax, label='$\\\\sigma$')\n", (10496, 10540), True, 'import arviz as az\n'), ((12079, 12089), 'jax.numpy.cov', 'jnp.cov', (['x'], {}), '(x)\n', (12086, 12089), True, 'import jax.numpy as jnp\n'), ((12946, 12995), 'numpyro.optim.Momentum', 'numpyro.optim.Momentum', ([], {'step_size': '(0.001)', 'mass': '(0.1)'}), '(step_size=0.001, mass=0.1)\n', (12968, 12995), False, 'import numpyro\n'), ((13171, 13198), 'matplotlib.pyplot.plot', 'plt.plot', (['svi_result.losses'], {}), '(svi_result.losses)\n', (13179, 13198), True, 'import matplotlib.pyplot as plt\n'), ((13199, 13216), 'matplotlib.pyplot.title', 'plt.title', (['"""ELBO"""'], {}), "('ELBO')\n", (13208, 13216), True, 'import matplotlib.pyplot as plt\n'), ((13217, 13235), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), "('step')\n", (13227, 13235), True, 'import matplotlib.pyplot as plt\n'), ((13236, 13254), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (13246, 13254), True, 'import matplotlib.pyplot as plt\n'), ((13475, 13507), 'numpy.array', 'np.array', (["svi_result.params['a']"], {}), "(svi_result.params['a'])\n", (13483, 13507), True, 'import numpy as np\n'), ((13512, 13544), 'numpy.array', 'np.array', (["svi_result.params['b']"], {}), "(svi_result.params['b'])\n", (13520, 13544), True, 'import numpy as np\n'), ((13549, 13581), 'numpy.array', 'np.array', (["svi_result.params['m']"], {}), "(svi_result.params['m'])\n", (13557, 13581), True, 'import numpy as np\n'), ((13586, 13618), 'numpy.array', 'np.array', (["svi_result.params['s']"], {}), "(svi_result.params['s'])\n", (13594, 13618), True, 'import numpy as np\n'), ((13870, 13887), 'numpyro.distributions.Normal', 'dist.Normal', (['m', 's'], {}), '(m, s)\n', (13881, 13887), True, 'import numpyro.distributions as dist\n'), ((14015, 14031), 'numpyro.distributions.Gamma', 'dist.Gamma', (['a', 'b'], {}), '(a, b)\n', (14025, 14031), True, 'import numpyro.distributions as dist\n'), ((14180, 14245), 'numpyro.infer.Predictive', 'Predictive', (['guide'], {'params': 'svi_result.params', 'num_samples': 'nsamples'}), '(guide, params=svi_result.params, num_samples=nsamples)\n', (14190, 14245), False, 'from numpyro.infer import Predictive\n'), ((14416, 14451), 'numpyro.diagnostics.print_summary', 'print_summary', (['samples', '(0.95)', '(False)'], {}), '(samples, 0.95, False)\n', (14429, 14451), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((14454, 14533), 'matplotlib.pyplot.scatter', 'plt.scatter', (["samples['mu']", "samples['sigma']"], {'s': '(64)', 'alpha': '(0.1)', 'edgecolor': '"""none"""'}), "(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor='none')\n", (14465, 14533), True, 'import matplotlib.pyplot as plt\n'), ((14534, 14568), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (14542, 14568), True, 'import matplotlib.pyplot as plt\n'), ((14569, 14609), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (14577, 14609), True, 'import matplotlib.pyplot as plt\n'), ((14610, 14630), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (14620, 14630), True, 'import matplotlib.pyplot as plt\n'), ((14631, 14654), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (14641, 14654), True, 'import matplotlib.pyplot as plt\n'), ((14655, 14665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14663, 14665), True, 'import matplotlib.pyplot as plt\n'), ((14667, 14711), 'arviz.plot_kde', 'az.plot_kde', (["samples['mu']", "samples['sigma']"], {}), "(samples['mu'], samples['sigma'])\n", (14678, 14711), True, 'import arviz as az\n'), ((14713, 14747), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (14721, 14747), True, 'import matplotlib.pyplot as plt\n'), ((14748, 14788), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (14756, 14788), True, 'import matplotlib.pyplot as plt\n'), ((14789, 14809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (14799, 14809), True, 'import matplotlib.pyplot as plt\n'), ((14810, 14833), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (14820, 14833), True, 'import matplotlib.pyplot as plt\n'), ((14869, 14928), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_vi.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_vi.pdf', dpi=300)\n", (14880, 14928), True, 'import matplotlib.pyplot as plt\n'), ((14929, 14939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14937, 14939), True, 'import matplotlib.pyplot as plt\n'), ((15152, 15166), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15164, 15166), True, 'import matplotlib.pyplot as plt\n'), ((15167, 15216), 'arviz.plot_kde', 'az.plot_kde', (["samples['mu']"], {'ax': 'ax', 'label': '"""$\\\\mu$"""'}), "(samples['mu'], ax=ax, label='$\\\\mu$')\n", (15178, 15216), True, 'import arviz as az\n'), ((15228, 15242), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15240, 15242), True, 'import matplotlib.pyplot as plt\n'), ((15243, 15298), 'arviz.plot_kde', 'az.plot_kde', (["samples['sigma']"], {'ax': 'ax', 'label': '"""$\\\\sigma$"""'}), "(samples['sigma'], ax=ax, label='$\\\\sigma$')\n", (15254, 15298), True, 'import arviz as az\n'), ((15479, 15528), 'numpyro.handlers.condition', 'numpyro.handlers.condition', (['model', "{'data': data}"], {}), "(model, {'data': data})\n", (15505, 15528), False, 'import numpyro\n'), ((15543, 15566), 'numpyro.infer.NUTS', 'NUTS', (['conditioned_model'], {}), '(conditioned_model)\n', (15547, 15566), False, 'from numpyro.infer import MCMC, NUTS\n'), ((15574, 15629), 'numpyro.infer.MCMC', 'MCMC', (['nuts_kernel'], {'num_warmup': '(100)', 'num_samples': 'nsamples'}), '(nuts_kernel, num_warmup=100, num_samples=nsamples)\n', (15578, 15629), False, 'from numpyro.infer import MCMC, NUTS\n'), ((15839, 15874), 'numpyro.diagnostics.print_summary', 'print_summary', (['samples', '(0.95)', '(False)'], {}), '(samples, 0.95, False)\n', (15852, 15874), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((15877, 15956), 'matplotlib.pyplot.scatter', 'plt.scatter', (["samples['mu']", "samples['sigma']"], {'s': '(64)', 'alpha': '(0.1)', 'edgecolor': '"""none"""'}), "(samples['mu'], samples['sigma'], s=64, alpha=0.1, edgecolor='none')\n", (15888, 15956), True, 'import matplotlib.pyplot as plt\n'), ((15957, 15991), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (15965, 15991), True, 'import matplotlib.pyplot as plt\n'), ((15992, 16032), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (16000, 16032), True, 'import matplotlib.pyplot as plt\n'), ((16033, 16053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (16043, 16053), True, 'import matplotlib.pyplot as plt\n'), ((16054, 16077), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (16064, 16077), True, 'import matplotlib.pyplot as plt\n'), ((16078, 16088), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16086, 16088), True, 'import matplotlib.pyplot as plt\n'), ((16090, 16134), 'arviz.plot_kde', 'az.plot_kde', (["samples['mu']", "samples['sigma']"], {}), "(samples['mu'], samples['sigma'])\n", (16101, 16134), True, 'import arviz as az\n'), ((16136, 16170), 'matplotlib.pyplot.xlim', 'plt.xlim', (['mu_range[0]', 'mu_range[1]'], {}), '(mu_range[0], mu_range[1])\n', (16144, 16170), True, 'import matplotlib.pyplot as plt\n'), ((16171, 16211), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sigma_range[0]', 'sigma_range[1]'], {}), '(sigma_range[0], sigma_range[1])\n', (16179, 16211), True, 'import matplotlib.pyplot as plt\n'), ((16212, 16232), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu$"""'], {}), "('$\\\\mu$')\n", (16222, 16232), True, 'import matplotlib.pyplot as plt\n'), ((16233, 16256), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (16243, 16256), True, 'import matplotlib.pyplot as plt\n'), ((16292, 16353), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/gauss_params_1d_post_mcmc.pdf"""'], {'dpi': '(300)'}), "('figures/gauss_params_1d_post_mcmc.pdf', dpi=300)\n", (16303, 16353), True, 'import matplotlib.pyplot as plt\n'), ((16354, 16364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16362, 16364), True, 'import matplotlib.pyplot as plt\n'), ((3492, 3505), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (3499, 3505), True, 'import numpy as np\n'), ((3513, 3525), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (3519, 3525), True, 'import numpy as np\n'), ((5336, 5357), 'jax.numpy.sum', 'jnp.sum', (["post['prob']"], {}), "(post['prob'])\n", (5343, 5357), True, 'import jax.numpy as jnp\n'), ((6264, 6282), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (6272, 6282), True, 'import matplotlib.pyplot as plt\n'), ((6722, 6740), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (6730, 6740), True, 'import matplotlib.pyplot as plt\n'), ((6977, 6994), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (6991, 6994), False, 'from jax import random, vmap\n'), ((7725, 7743), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (7733, 7743), True, 'import matplotlib.pyplot as plt\n'), ((8012, 8037), 'numpyro.diagnostics.hpdi', 'hpdi', (["samples['mu']", '(0.95)'], {}), "(samples['mu'], 0.95)\n", (8016, 8037), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((8045, 8073), 'numpyro.diagnostics.hpdi', 'hpdi', (["samples['sigma']", '(0.95)'], {}), "(samples['sigma'], 0.95)\n", (8049, 8073), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((8602, 8632), 'numpyro.sample', 'numpyro.sample', (['"""mu"""', 'mu_prior'], {}), "('mu', mu_prior)\n", (8616, 8632), False, 'import numpyro\n'), ((8645, 8681), 'numpyro.sample', 'numpyro.sample', (['"""sigma"""', 'sigma_prior'], {}), "('sigma', sigma_prior)\n", (8659, 8681), False, 'import numpyro\n'), ((8810, 8823), 'numpyro.optim.Adam', 'optim.Adam', (['(1)'], {}), '(1)\n', (8820, 8823), True, 'import numpyro.optim as optim\n'), ((8825, 8837), 'numpyro.infer.Trace_ELBO', 'Trace_ELBO', ([], {}), '()\n', (8835, 8837), False, 'from numpyro.infer import SVI, Trace_ELBO, init_to_value\n'), ((8871, 8888), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (8885, 8888), False, 'from jax import random, vmap\n'), ((9224, 9239), 'numpyro.optim.Adam', 'optim.Adam', (['(0.1)'], {}), '(0.1)\n', (9234, 9239), True, 'import numpyro.optim as optim\n'), ((9241, 9253), 'numpyro.infer.Trace_ELBO', 'Trace_ELBO', ([], {}), '()\n', (9251, 9253), False, 'from numpyro.infer import SVI, Trace_ELBO, init_to_value\n'), ((9287, 9304), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (9301, 9304), False, 'from jax import random, vmap\n'), ((9469, 9486), 'jax.random.PRNGKey', 'random.PRNGKey', (['(1)'], {}), '(1)\n', (9483, 9486), False, 'from jax import random, vmap\n'), ((10087, 10105), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (10095, 10105), True, 'import matplotlib.pyplot as plt\n'), ((10320, 10345), 'numpyro.diagnostics.hpdi', 'hpdi', (["samples['mu']", '(0.95)'], {}), "(samples['mu'], 0.95)\n", (10324, 10345), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((10353, 10381), 'numpyro.diagnostics.hpdi', 'hpdi', (["samples['sigma']", '(0.95)'], {}), "(samples['sigma'], 0.95)\n", (10357, 10381), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((11053, 11073), 'jax.numpy.log', 'jnp.log', (['(p / (1 - p))'], {}), '(p / (1 - p))\n', (11060, 11073), True, 'import jax.numpy as jnp\n'), ((11499, 11538), 'jax.numpy.mean', 'jnp.mean', (['unconstrained_samples'], {'axis': '(0)'}), '(unconstrained_samples, axis=0)\n', (11507, 11538), True, 'import jax.numpy as jnp\n'), ((11546, 11589), 'jax.numpy.mean', 'jnp.mean', (["constrained_samples['mu']"], {'axis': '(0)'}), "(constrained_samples['mu'], axis=0)\n", (11554, 11589), True, 'import jax.numpy as jnp\n'), ((11597, 11643), 'jax.numpy.mean', 'jnp.mean', (["constrained_samples['sigma']"], {'axis': '(0)'}), "(constrained_samples['sigma'], axis=0)\n", (11605, 11643), True, 'import jax.numpy as jnp\n'), ((11922, 11939), 'jax.random.PRNGKey', 'random.PRNGKey', (['(1)'], {}), '(1)\n', (11936, 11939), False, 'from jax import random, vmap\n'), ((12051, 12070), 'jax.numpy.mean', 'jnp.mean', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (12059, 12070), True, 'import jax.numpy as jnp\n'), ((12561, 12575), 'jax.numpy.mean', 'jnp.mean', (['data'], {}), '(data)\n', (12569, 12575), True, 'import jax.numpy as jnp\n'), ((12589, 12602), 'jax.numpy.std', 'jnp.std', (['data'], {}), '(data)\n', (12596, 12602), True, 'import jax.numpy as jnp\n'), ((12609, 12638), 'numpyro.param', 'numpyro.param', (['"""m"""', 'data_mean'], {}), "('m', data_mean)\n", (12622, 12638), False, 'import numpyro\n'), ((12646, 12701), 'numpyro.param', 'numpyro.param', (['"""s"""', '(10)'], {'constraint': 'constraints.positive'}), "('s', 10, constraint=constraints.positive)\n", (12659, 12701), False, 'import numpyro\n'), ((12709, 12770), 'numpyro.param', 'numpyro.param', (['"""a"""', 'data_std'], {'constraint': 'constraints.positive'}), "('a', data_std, constraint=constraints.positive)\n", (12722, 12770), False, 'import numpyro\n'), ((12778, 12832), 'numpyro.param', 'numpyro.param', (['"""b"""', '(1)'], {'constraint': 'constraints.positive'}), "('b', 1, constraint=constraints.positive)\n", (12791, 12832), False, 'import numpyro\n'), ((13761, 13775), 'jax.numpy.mean', 'jnp.mean', (['data'], {}), '(data)\n', (13769, 13775), True, 'import jax.numpy as jnp\n'), ((13800, 13813), 'jax.numpy.std', 'jnp.std', (['data'], {}), '(data)\n', (13807, 13813), True, 'import jax.numpy as jnp\n'), ((14850, 14868), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (14858, 14868), True, 'import matplotlib.pyplot as plt\n'), ((15078, 15103), 'numpyro.diagnostics.hpdi', 'hpdi', (["samples['mu']", '(0.95)'], {}), "(samples['mu'], 0.95)\n", (15082, 15103), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((15111, 15139), 'numpyro.diagnostics.hpdi', 'hpdi', (["samples['sigma']", '(0.95)'], {}), "(samples['sigma'], 0.95)\n", (15115, 15139), False, 'from numpyro.diagnostics import hpdi, print_summary\n'), ((16273, 16291), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (16281, 16291), True, 'import matplotlib.pyplot as plt\n'), ((5291, 5312), 'jax.numpy.max', 'jnp.max', (["post['prob']"], {}), "(post['prob'])\n", (5298, 5312), True, 'import jax.numpy as jnp\n'), ((6941, 6969), 'numpyro.distributions.Categorical', 'dist.Categorical', ([], {'probs': 'prob'}), '(probs=prob)\n', (6957, 6969), True, 'import numpyro.distributions as dist\n'), ((8711, 8733), 'numpyro.distributions.Normal', 'dist.Normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (8722, 8733), True, 'import numpyro.distributions as dist\n'), ((9171, 9198), 'numpyro.infer.init_to_value', 'init_to_value', ([], {'values': 'start'}), '(values=start)\n', (9184, 9198), False, 'from numpyro.infer import SVI, Trace_ELBO, init_to_value\n'), ((12862, 12879), 'numpyro.distributions.Normal', 'dist.Normal', (['m', 's'], {}), '(m, s)\n', (12873, 12879), True, 'import numpyro.distributions as dist\n'), ((12915, 12931), 'numpyro.distributions.Gamma', 'dist.Gamma', (['a', 'b'], {}), '(a, b)\n', (12925, 12931), True, 'import numpyro.distributions as dist\n'), ((13036, 13048), 'numpyro.infer.Trace_ELBO', 'Trace_ELBO', ([], {}), '()\n', (13046, 13048), False, 'from numpyro.infer import SVI, Trace_ELBO, init_to_value\n'), ((13911, 13939), 'jax.numpy.sqrt', 'jnp.sqrt', (['post_mean.variance'], {}), '(post_mean.variance)\n', (13919, 13939), True, 'import jax.numpy as jnp\n'), ((14055, 14084), 'jax.numpy.sqrt', 'jnp.sqrt', (['post_sigma.variance'], {}), '(post_sigma.variance)\n', (14063, 14084), True, 'import jax.numpy as jnp\n'), ((1727, 1759), 'jax.lib.xla_bridge.get_backend', 'jax.lib.xla_bridge.get_backend', ([], {}), '()\n', (1757, 1759), False, 'import jax\n'), ((11101, 11112), 'jax.numpy.exp', 'jnp.exp', (['(-a)'], {}), '(-a)\n', (11108, 11112), True, 'import jax.numpy as jnp\n'), ((12203, 12221), 'jax.numpy.diagonal', 'jnp.diagonal', (['vcov'], {}), '(vcov)\n', (12215, 12221), True, 'import jax.numpy as jnp\n'), ((12223, 12241), 'jax.numpy.diagonal', 'jnp.diagonal', (['vcov'], {}), '(vcov)\n', (12235, 12241), True, 'import jax.numpy as jnp\n'), ((5035, 5057), 'numpyro.distributions.Normal', 'dist.Normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (5046, 5057), True, 'import numpyro.distributions as dist\n')] |
import logging
import numpy as np
from pomegranate.distributions.DiscreteDistribution import DiscreteDistribution
from scipy.stats import entropy
class GenerativeModel:
"""
Predictive Processing Generative Model
This is a generative model, implemented as a Bayesian Causal Network. The three functions
prediction, prediction error and prediction error minimization are defined for this model.
:param peepo : Peepo organism - implementation of Peepo class
Contains a PeepoNetwork, representing the Bayesian Causal Network. Causes are hypothesis variables,
effects are observational variables. peepo.network.to_pomegranate() will be called upon initialization to fetch
the pomegranate network upon which all the computations are done.
:param n_jobs : Number of process to spawn for multiprocessing. By default 1 = no additional processes spawned
:type peepo : Peepo
:type n_jobs : int
TODO: Model Update, e.g. through: self.atomic_updates = [self.add_node, self.add_edge, self.change_parameters]
TODO: Integrate PRECISION BASED WEIGHTING on prediction errors. E.g. prediction error minimization should only
TODO: happen if the prediction errors have enough weight assigned to them. This can depend on context,
TODO: the organism's goal, or other ways.
"""
def __init__(self, peepo, n_jobs=1):
self.peepo = peepo
self.bayesian_network = self.peepo.network.to_pomegranate()
self.n_jobs = n_jobs
def process(self, structure_learning=False):
"""
Processes one flow in the predictive processing algorithm:
1) prediction
2) prediction error
3) prediction error minimization (hypothesis or model update)
Returns the total prediction error size observed (for informational purposes...)
If structure_learning is True, only inference will be done. Hypothesis updates will not happen. This should
be used for learning the structure of a module, by manually setting the hypothesis and comparing errors of
different toplogies.
"""
total_prediction_error_size = 0
for index, node in enumerate(self.predict()):
node_name = self.bayesian_network.states[index].name
if self.is_leaf(index):
prediction = np.array([x[1] for x in sorted(node.items(), key=lambda tup: tup[0])])
observation = self.peepo.observation(node_name)
prediction_error = self.error(prediction, observation)
prediction_error_size = self.error_size(prediction, observation)
precision = self.precision(prediction)
total_prediction_error_size += prediction_error_size
# Sometimes numpy entropy calculation returns extremely small numbers when there's no error
if prediction_error_size > 0.1 and not structure_learning:
logging.debug(
"node[%s] prediction-error ||| predicted %s -vs- %s observed ||| PES %s ||| PRECISION %s",
node_name, prediction, observation, prediction_error_size, precision)
self.error_minimization(node_name=node_name,
precision=precision,
prediction_error=prediction_error,
prediction=prediction)
return total_prediction_error_size
def predict(self):
"""
Predicts the leaf nodes (i.e. the observational nodes) based on the root nodes (i.e. the belief nodes)
:return: prediction for all leaf nodes, a prediction is a probability distribution
:rtype: list of Distributions
#TODO: A fundamental problem with PP?: Cannot do prediction>error minimization with one loop per node,
#TODO: since a sister LEN node which does not yet have the correct input will revert the hypothesis update.
"""
evidence = self.get_root_values()
return self.bayesian_network.predict_proba(evidence)
@staticmethod
def error(pred, obs):
"""
Calculates the prediction error as the residual of subtracting the predicted inputs from the observed inputs
:param pred: predicted sensory inputs
:param obs: observed sensory inputs
:return: prediction error
:type pred : np.array
:type obs : np.array
:rtype : np.array
"""
return obs - pred
@staticmethod
def error_size(pred, obs):
"""
Calculates the size of the prediction error as the Kullback-Leibler divergence. This responds the magnitude
of the prediction error, how wrong the prediction was.
:param pred: predicted sensory inputs
:param obs: observed sensory inputs
:return: prediction error size
:type pred : np.array
:type obs : np.array
:rtype : float
"""
return entropy(obs, pred)
@staticmethod
def precision(pred):
"""
Calculates the precision of the prediction, indicating the certainty of the prediction.
Usually calculated as the negative log likelihood (TODO)
:param pred: Prediction to calculate the precision for
:return: precision of the prediction
:type: pred: np.array
:rtype: float
"""
return entropy(pred, base=2)
def error_minimization(self, node_name, precision, prediction_error, prediction):
"""
Attempts to minimize the prediction error by one of the possible PEM methods:
1) Hypothesis Update
2) Model Update
:param node_name: name of the node causing the prediction error
:param precision: precision of the prediction
:param prediction_error: the prediction error itself
:param prediction: prediction causing the prediction error
:type node_name : str
:type precision: float
:type prediction_error: np.array
:type prediction: np.array
"""
self.hypothesis_update(node_name, prediction_error, prediction)
def hypothesis_update(self, node_name, prediction_error, prediction):
"""
Updates the hypotheses of the generative model to minimize prediction error
:param node_name: name of the node causing the prediction error
:param prediction_error: the prediction error itself
:param prediction: prediction causing the prediction error
:type node_name : str
:type prediction_error: np.array
:type prediction: np.array
"""
if node_name in self.peepo.network.get_pro_nodes():
self.peepo.action(node_name, prediction)
else:
evidence = {node_name: np.argmax(prediction_error + prediction)}
result = self.bayesian_network.predict_proba(evidence)
for root in self.get_roots():
root_index = self.get_node_index(root.name)
old_hypo = self.bayesian_network.states[root_index].distribution.items()
new_hypo = result[root_index].items()
self.bayesian_network.states[root_index].distribution = DiscreteDistribution(dict(new_hypo))
logging.debug("node[%s] hypothesis-update from %s to %s", root.name, old_hypo, new_hypo)
def get_root_values(self):
return {x.name: x.distribution.mle() for x in self.get_roots()}
def get_roots(self):
return [x for x in self.bayesian_network.states if x.name in self.peepo.network.get_root_nodes()]
def get_leaves(self):
return [x for x in self.bayesian_network.states if x.name in self.peepo.network.get_leaf_nodes()]
def get_node_index(self, node_name):
for x, state in enumerate(self.bayesian_network.states):
if state.name == node_name:
return x
raise ValueError('Node %s does not exist in network.', node_name)
def is_leaf(self, index):
return not any(index in node_parents for node_parents in self.bayesian_network.structure)
def is_root(self, index):
return any(index in node_parents for node_parents in self.bayesian_network.structure)
| [
"scipy.stats.entropy",
"logging.debug",
"numpy.argmax"
] | [((5025, 5043), 'scipy.stats.entropy', 'entropy', (['obs', 'pred'], {}), '(obs, pred)\n', (5032, 5043), False, 'from scipy.stats import entropy\n'), ((5450, 5471), 'scipy.stats.entropy', 'entropy', (['pred'], {'base': '(2)'}), '(pred, base=2)\n', (5457, 5471), False, 'from scipy.stats import entropy\n'), ((6848, 6888), 'numpy.argmax', 'np.argmax', (['(prediction_error + prediction)'], {}), '(prediction_error + prediction)\n', (6857, 6888), True, 'import numpy as np\n'), ((7330, 7422), 'logging.debug', 'logging.debug', (['"""node[%s] hypothesis-update from %s to %s"""', 'root.name', 'old_hypo', 'new_hypo'], {}), "('node[%s] hypothesis-update from %s to %s', root.name,\n old_hypo, new_hypo)\n", (7343, 7422), False, 'import logging\n'), ((2956, 3140), 'logging.debug', 'logging.debug', (['"""node[%s] prediction-error ||| predicted %s -vs- %s observed ||| PES %s ||| PRECISION %s"""', 'node_name', 'prediction', 'observation', 'prediction_error_size', 'precision'], {}), "(\n 'node[%s] prediction-error ||| predicted %s -vs- %s observed ||| PES %s ||| PRECISION %s'\n , node_name, prediction, observation, prediction_error_size, precision)\n", (2969, 3140), False, 'import logging\n')] |
import numpy as np
import copy
from classtestify import Testify
from classcapability import Capability
from classtable import Table
from class2ndmethod import SecondMethod
from class2Randr import SecondRate
from class1stRandr import FirstRate
r"""
INPUT:
- ''demands'' -- [K*I] matrix: which user is asking for which file
- ''distribution'' -- [I*J] matrix: which file is stored by which sender
- ''connection'' -- [J*K] matrix: which sender is connected to which user
- ''M'' -- cache size of users
"""
M = 5
demands = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
distribution = np.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
connection = np.array([[1, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 1, 1]])
##
###################################################
##demands = np.array([[0, 0, 1],
## [1, 0, 0],
## [0, 1, 0]])
##M = 2
##
##demands = np.array([[1, 0, 0],
## [0, 1, 0],
## [0, 0, 1]])
##
##distribution = np.array([[1,1,1],
## [1,1,1],
## [1,1,1]])
##
##connection = np.array([[0,1,1],
## [1,0,1],
## [1,1,0]])
######################################################
##M = 1
##
##demands = np.array([[1,0,0,0],
## [0,1,0,0],
## [0,0,1,0],
## [0,0,0,1]])
##
##distribution = np.array([[1,0,0],
## [1,0,1],
## [0,1,1],
## [0,1,0]])
##
##connection = np.array([[1,0,0,1],
## [0,1,1,1],
## [1,1,1,1]])
K = demands.shape[0]
I = demands.shape[1]
J = distribution.shape[1]
t = int(M*K/I)
a = Capability(demands, distribution, connection)
demands_sender = a.capability_matrix().tolist()
b = Table(demands_sender, K, J, M)
capability_table = b.table_list()
# for the 1st method
e = FirstRate(demands_sender, t)
rate_pair_1 = e.required_rate() #[R, r]
print('1:',rate_pair_1)
# for the 2nd method
c = SecondMethod(capability_table)
track = c.assignment_phase() # or track = c.track
d = SecondRate(demands_sender, track, t)
rate_pair_2 = d.required_rate() # [R, r]
print('2:', rate_pair_2)
| [
"class2ndmethod.SecondMethod",
"class2Randr.SecondRate",
"classtable.Table",
"classcapability.Capability",
"numpy.array",
"class1stRandr.FirstRate"
] | [((529, 663), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0,\n 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (537, 663), True, 'import numpy as np\n'), ((776, 874), 'numpy.array', 'np.array', (['[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, \n 1, 1, 1]]'], {}), '([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1,\n 1], [1, 1, 1, 1]])\n', (784, 874), True, 'import numpy as np\n'), ((1010, 1104), 'numpy.array', 'np.array', (['[[1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1], [0, 0, 1, 0, 1, 1]\n ]'], {}), '([[1, 1, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1], [0, 0,\n 1, 0, 1, 1]])\n', (1018, 1104), True, 'import numpy as np\n'), ((2207, 2252), 'classcapability.Capability', 'Capability', (['demands', 'distribution', 'connection'], {}), '(demands, distribution, connection)\n', (2217, 2252), False, 'from classcapability import Capability\n'), ((2306, 2336), 'classtable.Table', 'Table', (['demands_sender', 'K', 'J', 'M'], {}), '(demands_sender, K, J, M)\n', (2311, 2336), False, 'from classtable import Table\n'), ((2397, 2425), 'class1stRandr.FirstRate', 'FirstRate', (['demands_sender', 't'], {}), '(demands_sender, t)\n', (2406, 2425), False, 'from class1stRandr import FirstRate\n'), ((2517, 2547), 'class2ndmethod.SecondMethod', 'SecondMethod', (['capability_table'], {}), '(capability_table)\n', (2529, 2547), False, 'from class2ndmethod import SecondMethod\n'), ((2603, 2639), 'class2Randr.SecondRate', 'SecondRate', (['demands_sender', 'track', 't'], {}), '(demands_sender, track, t)\n', (2613, 2639), False, 'from class2Randr import SecondRate\n')] |
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from pathlib import Path
from copy import deepcopy
from ESRNN.utils.config import ModelConfig
from ESRNN.utils.ESRNN import _ESRNN
from ESRNN.utils.losses import SmylLoss, PinballLoss
from ESRNN.utils.data import Iterator
from ESRNN.utils_evaluation import owa
class ESRNN(object):
""" Exponential Smoothing Recursive Neural Network.
Pytorch Implementation of the M4 time series forecasting competition winner.
Proposed by Smyl. The model uses a hybrid approach of Machine Learning and
statistical methods by combining recursive neural networks to model a common
trend with shared parameters across series, and multiplicative Holt-Winter
exponential smoothing.
Parameters
----------
max_epochs: int
maximum number of complete passes to train data during fit
freq_of_test: int
period for the diagnostic evaluation of the model.
learning_rate: float
size of the stochastic gradient descent steps
lr_scheduler_step_size: int
this step_size is the period for each learning rate decay
per_series_lr_multip: float
multiplier for per-series parameters smoothing and initial
seasonalities learning rate (default 1.0)
gradient_eps: float
term added to the Adam optimizer denominator to improve
numerical stability (default: 1e-8)
gradient_clipping_threshold: float
max norm of gradient vector, with all parameters treated
as a single vector
rnn_weight_decay: float
parameter to control classic L2/Tikhonov regularization
of the rnn parameters
noise_std: float
standard deviation of white noise added to input during
fit to avoid the model from memorizing the train data
level_variability_penalty: float
this parameter controls the strength of the penalization
to the wigglines of the level vector, induces smoothness
in the output
testing_percentile: float
This value is only for diagnostic evaluation.
In case of percentile predictions this parameter controls
for the value predicted, when forecasting point value,
the forecast is the median, so percentile=50.
training_percentile: float
To reduce the model's tendency to over estimate, the
training_percentile can be set to fit a smaller value
through the Pinball Loss.
batch_size: int
number of training examples for the stochastic gradient steps
seasonality: int list
list of seasonalities of the time series
Hourly [24, 168], Daily [7], Weekly [52], Monthly [12],
Quarterly [4], Yearly [].
input_size: int
input size of the recursive neural network, usually a
multiple of seasonality
output_size: int
output_size or forecast horizon of the recursive neural
network, usually multiple of seasonality
random_seed: int
random_seed for pseudo random pytorch initializer and
numpy random generator
exogenous_size: int
size of one hot encoded categorical variable, invariannt
per time series of the panel
min_inp_seq_length: int
description
max_periods: int
Parameter to chop longer series, to last max_periods,
max e.g. 40 years
cell_type: str
Type of RNN cell, available GRU, LSTM, RNN, ResidualLSTM.
state_hsize: int
dimension of hidden state of the recursive neural network
dilations: int list
each list represents one chunk of Dilated LSTMS, connected in
standard ResNet fashion
add_nl_layer: bool
whether to insert a tanh() layer between the RNN stack and the
linear adaptor (output) layers
device: str
pytorch device either 'cpu' or 'cuda'
Notes
-----
**References:**
`M4 Competition Conclusions
<https://rpubs.com/fotpetr/m4competition>`__
`Original Dynet Implementation of ESRNN
<https://github.com/M4Competition/M4-methods/tree/master/118%20-%20slaweks17>`__
"""
def __init__(self, max_epochs=15, batch_size=1, batch_size_test=64, freq_of_test=-1,
learning_rate=1e-3, lr_scheduler_step_size=9, lr_decay=0.9,
per_series_lr_multip=1.0, gradient_eps=1e-8, gradient_clipping_threshold=20,
rnn_weight_decay=0, noise_std=0.001,
level_variability_penalty=80,
testing_percentile=50, training_percentile=50, ensemble=False,
cell_type='LSTM',
state_hsize=40, dilations=[[1, 2], [4, 8]],
add_nl_layer=False, seasonality=[4], input_size=4, output_size=8,
frequency=None, max_periods=20, random_seed=1,
device='cpu', root_dir='./'):
super(ESRNN, self).__init__()
self.mc = ModelConfig(max_epochs=max_epochs, batch_size=batch_size, batch_size_test=batch_size_test,
freq_of_test=freq_of_test, learning_rate=learning_rate,
lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay,
per_series_lr_multip=per_series_lr_multip,
gradient_eps=gradient_eps, gradient_clipping_threshold=gradient_clipping_threshold,
rnn_weight_decay=rnn_weight_decay, noise_std=noise_std,
level_variability_penalty=level_variability_penalty,
testing_percentile=testing_percentile, training_percentile=training_percentile,
ensemble=ensemble,
cell_type=cell_type,
state_hsize=state_hsize, dilations=dilations, add_nl_layer=add_nl_layer,
seasonality=seasonality, input_size=input_size, output_size=output_size,
frequency=frequency, max_periods=max_periods, random_seed=random_seed,
device=device, root_dir=root_dir)
self._fitted = False
def train(self, dataloader, max_epochs,
warm_start=False, shuffle=True, verbose=True):
if self.mc.ensemble:
self.esrnn_ensemble = [deepcopy(self.esrnn).to(self.mc.device)] * 5
if verbose: print(15*'='+' Training ESRNN ' + 15*'=' + '\n')
# Optimizers
if not warm_start:
self.es_optimizer = optim.Adam(params=self.esrnn.es.parameters(),
lr=self.mc.learning_rate*self.mc.per_series_lr_multip,
betas=(0.9, 0.999), eps=self.mc.gradient_eps)
self.es_scheduler = StepLR(optimizer=self.es_optimizer,
step_size=self.mc.lr_scheduler_step_size,
gamma=0.9)
self.rnn_optimizer = optim.Adam(params=self.esrnn.rnn.parameters(),
lr=self.mc.learning_rate,
betas=(0.9, 0.999), eps=self.mc.gradient_eps,
weight_decay=self.mc.rnn_weight_decay)
self.rnn_scheduler = StepLR(optimizer=self.rnn_optimizer,
step_size=self.mc.lr_scheduler_step_size,
gamma=self.mc.lr_decay)
# Loss Functions
train_tau = self.mc.training_percentile / 100
train_loss = SmylLoss(tau=train_tau,
level_variability_penalty=self.mc.level_variability_penalty)
eval_tau = self.mc.testing_percentile / 100
eval_loss = PinballLoss(tau=eval_tau)
for epoch in range(max_epochs):
self.esrnn.train()
start = time.time()
if shuffle:
dataloader.shuffle_dataset(random_seed=epoch)
losses = []
for j in range(dataloader.n_batches):
self.es_optimizer.zero_grad()
self.rnn_optimizer.zero_grad()
batch = dataloader.get_batch()
windows_y, windows_y_hat, levels = self.esrnn(batch)
# Pinball loss on normalized values
loss = train_loss(windows_y, windows_y_hat, levels)
losses.append(loss.data.cpu().numpy())
#print("loss", loss)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.esrnn.rnn.parameters(),
self.mc.gradient_clipping_threshold)
torch.nn.utils.clip_grad_norm_(self.esrnn.es.parameters(),
self.mc.gradient_clipping_threshold)
self.rnn_optimizer.step()
self.es_optimizer.step()
# Decay learning rate
self.es_scheduler.step()
self.rnn_scheduler.step()
if self.mc.ensemble:
copy_esrnn = deepcopy(self.esrnn)
copy_esrnn.eval()
self.esrnn_ensemble.pop(0)
self.esrnn_ensemble.append(copy_esrnn)
# Evaluation
self.train_loss = np.mean(losses)
if verbose:
print("========= Epoch {} finished =========".format(epoch))
print("Training time: {}".format(round(time.time()-start, 5)))
print("Training loss ({} prc): {:.5f}".format(self.mc.training_percentile,
self.train_loss))
if (epoch % self.mc.freq_of_test == 0) and (self.mc.freq_of_test > 0):
if self.y_test_df is not None:
self.test_loss = self.model_evaluation(dataloader, eval_loss)
print("Testing loss ({} prc): {:.5f}".format(self.mc.testing_percentile,
self.test_loss))
self.evaluate_model_prediction(self.y_train_df, self.X_test_df,
self.y_test_df, self.y_hat_benchmark, epoch=epoch)
self.esrnn.train()
if verbose: print('Train finished! \n')
def per_series_evaluation(self, dataloader, criterion):
"""
Evaluate the model against data, with loss per series
Args:
mc: model parameters
model: the trained model
dataloader: a data loader
criterion: loss to evaluate
"""
with torch.no_grad():
# Create fast dataloader
if self.mc.n_series < self.mc.batch_size_test: new_batch_size = self.mc.n_series
else: new_batch_size = self.mc.batch_size_test
dataloader.update_batch_size(new_batch_size)
per_series_losses = []
for j in range(dataloader.n_batches):
batch = dataloader.get_batch()
windows_y, windows_y_hat, _ = self.esrnn(batch)
loss = criterion(windows_y, windows_y_hat)
per_series_losses += loss.data.cpu().numpy().tolist()
dataloader.update_batch_size(self.mc.batch_size)
return per_series_losses
def model_evaluation(self, dataloader, criterion):
"""
Evaluate the model against data, with overall average loss
Args:
mc: model parameters
model: the trained model
dataloader: a data loader
criterion: loss to evaluate
"""
with torch.no_grad():
# Create fast dataloader
if self.mc.n_series < self.mc.batch_size_test: new_batch_size = self.mc.n_series
else: new_batch_size = self.mc.batch_size_test
dataloader.update_batch_size(new_batch_size)
model_loss = 0.0
for j in range(dataloader.n_batches):
batch = dataloader.get_batch()
windows_y, windows_y_hat, _ = self.esrnn(batch)
loss = criterion(windows_y, windows_y_hat)
model_loss += loss.data.cpu().numpy()
model_loss /= dataloader.n_batches
dataloader.update_batch_size(self.mc.batch_size)
return model_loss
def evaluate_model_prediction(self, y_train_df, X_test_df, y_test_df, y_hat_benchmark='y_hat_naive2', epoch=None):
"""
Evaluate the model against baseline Naive2 model in y_test_df
Args:
y_train_df: pandas df
panel with columns unique_id, ds, y
X_test_df: pandas df
panel with columns unique_id, ds, x
y_test_df: pandas df
panel with columns unique_id, ds, y and a column identifying benchmark predictions
y_hat_benchmark: str
columns name of benchmark predictions, default y_hat_naive2
"""
assert self._fitted, "Model not fitted yet"
y_panel = y_test_df.filter(['unique_id', 'ds', 'y'])
y_benchmark_panel = y_test_df.filter(['unique_id', 'ds', y_hat_benchmark])
y_benchmark_panel.rename(columns={y_hat_benchmark: 'y_hat'}, inplace=True)
y_hat_panel = self.predict(X_test_df)
y_insample = y_train_df.filter(['unique_id', 'ds', 'y'])
model_owa, model_mase, model_smape = owa(y_panel, y_hat_panel,
y_benchmark_panel, y_insample,
seasonality=self.mc.naive_seasonality)
if self.min_owa > model_owa:
self.min_owa = model_owa
if epoch is not None:
self.min_epoch = epoch
print('OWA: {} '.format(np.round(model_owa, 3)))
print('SMAPE: {} '.format(np.round(model_smape, 3)))
print('MASE: {} '.format(np.round(model_mase, 3)))
return model_owa, model_mase, model_smape
def fit(self, X_df, y_df, X_test_df=None, y_test_df=None, y_hat_benchmark='y_hat_naive2',
warm_start=False, shuffle=True, verbose=True):
# Transform long dfs to wide numpy
assert type(X_df) == pd.core.frame.DataFrame
assert type(y_df) == pd.core.frame.DataFrame
assert all([(col in X_df) for col in ['unique_id', 'ds', 'x']])
assert all([(col in y_df) for col in ['unique_id', 'ds', 'y']])
if y_test_df is not None:
assert y_hat_benchmark in y_test_df.columns, 'benchmark is not present in y_test_df, use y_hat_benchmark to define it'
# Storing dfs for OWA evaluation, initializing min_owa
self.y_train_df = y_df
self.X_test_df = X_test_df
self.y_test_df = y_test_df
self.min_owa = 4.0
self.min_epoch = 0
self.int_ds = isinstance(self.y_train_df['ds'][0], (int, np.int, np.int64))
self.y_hat_benchmark = y_hat_benchmark
X, y = self.long_to_wide(X_df, y_df)
assert len(X)==len(y)
assert X.shape[1]>=3
# Exogenous variables
unique_categories = np.unique(X[:, 1])
self.mc.category_to_idx = dict((word, index) for index, word in enumerate(unique_categories))
exogenous_size = len(unique_categories)
# Create batches (device in mc)
self.train_dataloader = Iterator(mc=self.mc, X=X, y=y)
# Random Seeds (model initialization)
torch.manual_seed(self.mc.random_seed)
np.random.seed(self.mc.random_seed)
# Initialize model
n_series = self.train_dataloader.n_series
self.instantiate_esrnn(exogenous_size, n_series)
# Infer freq of model
if self.mc.frequency is None:
self.mc.frequency = pd.infer_freq(X_df.head()['ds'])
print("Infered frequency: {}".format(self.mc.frequency))
# Train model
self._fitted = True
self.train(dataloader=self.train_dataloader, max_epochs=self.mc.max_epochs,
warm_start=warm_start, shuffle=shuffle, verbose=verbose)
def instantiate_esrnn(self, exogenous_size, n_series):
self.mc.exogenous_size = exogenous_size
self.mc.n_series = n_series
self.esrnn = _ESRNN(self.mc).to(self.mc.device)
def predict(self, X_df, decomposition=False):
"""
Predictions for all stored time series
Returns:
Y_hat_panel : array-like (n_samples, 1).
Predicted values for models in Family for ids in Panel.
ds: Corresponding list of date stamps
unique_id: Corresponding list of unique_id
"""
#print(9*'='+' Predicting ESRNN ' + 9*'=' + '\n')
assert type(X_df) == pd.core.frame.DataFrame
assert 'unique_id' in X_df
assert self._fitted, "Model not fitted yet"
self.esrnn.eval()
# Create fast dataloader
if self.mc.n_series < self.mc.batch_size_test: new_batch_size = self.mc.n_series
else: new_batch_size = self.mc.batch_size_test
self.train_dataloader.update_batch_size(new_batch_size)
dataloader = self.train_dataloader
# Create Y_hat_panel placeholders
output_size = self.mc.output_size
n_unique_id = len(dataloader.sort_key['unique_id'])
panel_unique_id = pd.Series(dataloader.sort_key['unique_id']).repeat(output_size)
#access column with last train date
panel_last_ds = pd.Series(dataloader.X[:, 2])
panel_ds = []
for i in range(len(panel_last_ds)):
ranges = pd.date_range(start=panel_last_ds[i], periods=output_size+1, freq=self.mc.frequency)
panel_ds += list(ranges[1:])
panel_y_hat= np.zeros((output_size * n_unique_id))
# Predict
count = 0
for j in range(dataloader.n_batches):
batch = dataloader.get_batch()
batch_size = batch.y.shape[0]
if self.mc.ensemble:
y_hat = torch.zeros((5,batch_size,output_size))
for i in range(5):
y_hat[i,:,:] = self.esrnn_ensemble[i].predict(batch)
y_hat = torch.mean(y_hat,0)
else:
y_hat = self.esrnn.predict(batch)
y_hat = y_hat.data.cpu().numpy()
panel_y_hat[count:count+output_size*batch_size] = y_hat.flatten()
count += output_size*batch_size
Y_hat_panel_dict = {'unique_id': panel_unique_id,
'ds': panel_ds,
'y_hat': panel_y_hat}
assert len(panel_ds) == len(panel_y_hat) == len(panel_unique_id)
Y_hat_panel = pd.DataFrame.from_dict(Y_hat_panel_dict)
if 'ds' in X_df:
Y_hat_panel = X_df.merge(Y_hat_panel, on=['unique_id', 'ds'], how='left')
else:
Y_hat_panel = X_df.merge(Y_hat_panel, on=['unique_id'], how='left')
self.train_dataloader.update_batch_size(self.mc.batch_size)
return Y_hat_panel
def long_to_wide(self, X_df, y_df):
data = X_df.copy()
data['y'] = y_df['y'].copy()
sorted_ds = np.sort(data['ds'].unique())
ds_map = {}
for dmap, t in enumerate(sorted_ds):
ds_map[t] = dmap
data['ds_map'] = data['ds'].map(ds_map)
data = data.sort_values(by=['ds_map','unique_id'])
df_wide = data.pivot(index='unique_id', columns='ds_map')['y']
x_unique = data[['unique_id', 'x']].groupby('unique_id').first()
last_ds = data[['unique_id', 'ds']].groupby('unique_id').last()
assert len(x_unique)==len(data.unique_id.unique())
df_wide['x'] = x_unique
df_wide['last_ds'] = last_ds
df_wide = df_wide.reset_index().rename_axis(None, axis=1)
ds_cols = data.ds_map.unique().tolist()
X = df_wide.filter(items=['unique_id', 'x', 'last_ds']).values
y = df_wide.filter(items=ds_cols).values
return X, y
def get_dir_name(self, root_dir=None):
if not root_dir:
assert self.mc.root_dir
root_dir = self.mc.root_dir
data_dir = self.mc.dataset_name
model_parent_dir = os.path.join(root_dir, data_dir)
model_path = ['esrnn_{}'.format(str(self.mc.copy))]
model_dir = os.path.join(model_parent_dir, '_'.join(model_path))
return model_dir
def save(self, model_dir=None, copy=None):
if copy is not None:
self.mc.copy = copy
if not model_dir:
assert self.mc.root_dir
model_dir = self.get_dir_name()
if not os.path.exists(model_dir):
os.makedirs(model_dir)
rnn_filepath = os.path.join(model_dir, "rnn.model")
es_filepath = os.path.join(model_dir, "es.model")
print('Saving model to:\n {}'.format(model_dir)+'\n')
torch.save({'model_state_dict': self.es.state_dict()}, es_filepath)
torch.save({'model_state_dict': self.rnn.state_dict()}, rnn_filepath)
def load(self, model_dir=None, copy=None):
if copy is not None:
self.mc.copy = copy
if not model_dir:
assert self.mc.root_dir
model_dir = self.get_dir_name()
rnn_filepath = os.path.join(model_dir, "rnn.model")
es_filepath = os.path.join(model_dir, "es.model")
path = Path(es_filepath)
if path.is_file():
print('Loading model from:\n {}'.format(model_dir)+'\n')
checkpoint = torch.load(es_filepath, map_location=self.mc.device)
self.es.load_state_dict(checkpoint['model_state_dict'])
self.es.to(self.mc.device)
checkpoint = torch.load(rnn_filepath, map_location=self.mc.device)
self.rnn.load_state_dict(checkpoint['model_state_dict'])
self.rnn.to(self.mc.device)
else:
print('Model path {} does not exist'.format(path))
| [
"numpy.random.seed",
"torch.optim.lr_scheduler.StepLR",
"ESRNN.utils.data.Iterator",
"pathlib.Path",
"numpy.mean",
"ESRNN.utils.losses.SmylLoss",
"torch.no_grad",
"os.path.join",
"numpy.round",
"numpy.unique",
"ESRNN.utils_evaluation.owa",
"ESRNN.utils.config.ModelConfig",
"torch.load",
"o... | [((4836, 5707), 'ESRNN.utils.config.ModelConfig', 'ModelConfig', ([], {'max_epochs': 'max_epochs', 'batch_size': 'batch_size', 'batch_size_test': 'batch_size_test', 'freq_of_test': 'freq_of_test', 'learning_rate': 'learning_rate', 'lr_scheduler_step_size': 'lr_scheduler_step_size', 'lr_decay': 'lr_decay', 'per_series_lr_multip': 'per_series_lr_multip', 'gradient_eps': 'gradient_eps', 'gradient_clipping_threshold': 'gradient_clipping_threshold', 'rnn_weight_decay': 'rnn_weight_decay', 'noise_std': 'noise_std', 'level_variability_penalty': 'level_variability_penalty', 'testing_percentile': 'testing_percentile', 'training_percentile': 'training_percentile', 'ensemble': 'ensemble', 'cell_type': 'cell_type', 'state_hsize': 'state_hsize', 'dilations': 'dilations', 'add_nl_layer': 'add_nl_layer', 'seasonality': 'seasonality', 'input_size': 'input_size', 'output_size': 'output_size', 'frequency': 'frequency', 'max_periods': 'max_periods', 'random_seed': 'random_seed', 'device': 'device', 'root_dir': 'root_dir'}), '(max_epochs=max_epochs, batch_size=batch_size, batch_size_test=\n batch_size_test, freq_of_test=freq_of_test, learning_rate=learning_rate,\n lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay,\n per_series_lr_multip=per_series_lr_multip, gradient_eps=gradient_eps,\n gradient_clipping_threshold=gradient_clipping_threshold,\n rnn_weight_decay=rnn_weight_decay, noise_std=noise_std,\n level_variability_penalty=level_variability_penalty, testing_percentile\n =testing_percentile, training_percentile=training_percentile, ensemble=\n ensemble, cell_type=cell_type, state_hsize=state_hsize, dilations=\n dilations, add_nl_layer=add_nl_layer, seasonality=seasonality,\n input_size=input_size, output_size=output_size, frequency=frequency,\n max_periods=max_periods, random_seed=random_seed, device=device,\n root_dir=root_dir)\n', (4847, 5707), False, 'from ESRNN.utils.config import ModelConfig\n'), ((7339, 7428), 'ESRNN.utils.losses.SmylLoss', 'SmylLoss', ([], {'tau': 'train_tau', 'level_variability_penalty': 'self.mc.level_variability_penalty'}), '(tau=train_tau, level_variability_penalty=self.mc.\n level_variability_penalty)\n', (7347, 7428), False, 'from ESRNN.utils.losses import SmylLoss, PinballLoss\n'), ((7519, 7544), 'ESRNN.utils.losses.PinballLoss', 'PinballLoss', ([], {'tau': 'eval_tau'}), '(tau=eval_tau)\n', (7530, 7544), False, 'from ESRNN.utils.losses import SmylLoss, PinballLoss\n'), ((12617, 12717), 'ESRNN.utils_evaluation.owa', 'owa', (['y_panel', 'y_hat_panel', 'y_benchmark_panel', 'y_insample'], {'seasonality': 'self.mc.naive_seasonality'}), '(y_panel, y_hat_panel, y_benchmark_panel, y_insample, seasonality=self.\n mc.naive_seasonality)\n', (12620, 12717), False, 'from ESRNN.utils_evaluation import owa\n'), ((14225, 14243), 'numpy.unique', 'np.unique', (['X[:, 1]'], {}), '(X[:, 1])\n', (14234, 14243), True, 'import numpy as np\n'), ((14456, 14486), 'ESRNN.utils.data.Iterator', 'Iterator', ([], {'mc': 'self.mc', 'X': 'X', 'y': 'y'}), '(mc=self.mc, X=X, y=y)\n', (14464, 14486), False, 'from ESRNN.utils.data import Iterator\n'), ((14537, 14575), 'torch.manual_seed', 'torch.manual_seed', (['self.mc.random_seed'], {}), '(self.mc.random_seed)\n', (14554, 14575), False, 'import torch\n'), ((14581, 14616), 'numpy.random.seed', 'np.random.seed', (['self.mc.random_seed'], {}), '(self.mc.random_seed)\n', (14595, 14616), True, 'import numpy as np\n'), ((16440, 16469), 'pandas.Series', 'pd.Series', (['dataloader.X[:, 2]'], {}), '(dataloader.X[:, 2])\n', (16449, 16469), True, 'import pandas as pd\n'), ((16687, 16722), 'numpy.zeros', 'np.zeros', (['(output_size * n_unique_id)'], {}), '(output_size * n_unique_id)\n', (16695, 16722), True, 'import numpy as np\n'), ((17541, 17581), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['Y_hat_panel_dict'], {}), '(Y_hat_panel_dict)\n', (17563, 17581), True, 'import pandas as pd\n'), ((18962, 18994), 'os.path.join', 'os.path.join', (['root_dir', 'data_dir'], {}), '(root_dir, data_dir)\n', (18974, 18994), False, 'import os\n'), ((19435, 19471), 'os.path.join', 'os.path.join', (['model_dir', '"""rnn.model"""'], {}), "(model_dir, 'rnn.model')\n", (19447, 19471), False, 'import os\n'), ((19491, 19526), 'os.path.join', 'os.path.join', (['model_dir', '"""es.model"""'], {}), "(model_dir, 'es.model')\n", (19503, 19526), False, 'import os\n'), ((19954, 19990), 'os.path.join', 'os.path.join', (['model_dir', '"""rnn.model"""'], {}), "(model_dir, 'rnn.model')\n", (19966, 19990), False, 'import os\n'), ((20010, 20045), 'os.path.join', 'os.path.join', (['model_dir', '"""es.model"""'], {}), "(model_dir, 'es.model')\n", (20022, 20045), False, 'import os\n'), ((20058, 20075), 'pathlib.Path', 'Path', (['es_filepath'], {}), '(es_filepath)\n', (20062, 20075), False, 'from pathlib import Path\n'), ((6621, 6714), 'torch.optim.lr_scheduler.StepLR', 'StepLR', ([], {'optimizer': 'self.es_optimizer', 'step_size': 'self.mc.lr_scheduler_step_size', 'gamma': '(0.9)'}), '(optimizer=self.es_optimizer, step_size=self.mc.\n lr_scheduler_step_size, gamma=0.9)\n', (6627, 6714), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((7085, 7192), 'torch.optim.lr_scheduler.StepLR', 'StepLR', ([], {'optimizer': 'self.rnn_optimizer', 'step_size': 'self.mc.lr_scheduler_step_size', 'gamma': 'self.mc.lr_decay'}), '(optimizer=self.rnn_optimizer, step_size=self.mc.\n lr_scheduler_step_size, gamma=self.mc.lr_decay)\n', (7091, 7192), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((7625, 7636), 'time.time', 'time.time', ([], {}), '()\n', (7634, 7636), False, 'import time\n'), ((8862, 8877), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (8869, 8877), True, 'import numpy as np\n'), ((10079, 10094), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10092, 10094), False, 'import torch\n'), ((10986, 11001), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10999, 11001), False, 'import torch\n'), ((16546, 16637), 'pandas.date_range', 'pd.date_range', ([], {'start': 'panel_last_ds[i]', 'periods': '(output_size + 1)', 'freq': 'self.mc.frequency'}), '(start=panel_last_ds[i], periods=output_size + 1, freq=self.mc\n .frequency)\n', (16559, 16637), True, 'import pandas as pd\n'), ((19356, 19381), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (19370, 19381), False, 'import os\n'), ((19390, 19412), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (19401, 19412), False, 'import os\n'), ((20188, 20240), 'torch.load', 'torch.load', (['es_filepath'], {'map_location': 'self.mc.device'}), '(es_filepath, map_location=self.mc.device)\n', (20198, 20240), False, 'import torch\n'), ((20360, 20413), 'torch.load', 'torch.load', (['rnn_filepath'], {'map_location': 'self.mc.device'}), '(rnn_filepath, map_location=self.mc.device)\n', (20370, 20413), False, 'import torch\n'), ((8681, 8701), 'copy.deepcopy', 'deepcopy', (['self.esrnn'], {}), '(self.esrnn)\n', (8689, 8701), False, 'from copy import deepcopy\n'), ((12965, 12987), 'numpy.round', 'np.round', (['model_owa', '(3)'], {}), '(model_owa, 3)\n', (12973, 12987), True, 'import numpy as np\n'), ((13021, 13045), 'numpy.round', 'np.round', (['model_smape', '(3)'], {}), '(model_smape, 3)\n', (13029, 13045), True, 'import numpy as np\n'), ((13078, 13101), 'numpy.round', 'np.round', (['model_mase', '(3)'], {}), '(model_mase, 3)\n', (13086, 13101), True, 'import numpy as np\n'), ((15288, 15303), 'ESRNN.utils.ESRNN._ESRNN', '_ESRNN', (['self.mc'], {}), '(self.mc)\n', (15294, 15303), False, 'from ESRNN.utils.ESRNN import _ESRNN\n'), ((16312, 16355), 'pandas.Series', 'pd.Series', (["dataloader.sort_key['unique_id']"], {}), "(dataloader.sort_key['unique_id'])\n", (16321, 16355), True, 'import pandas as pd\n'), ((16922, 16963), 'torch.zeros', 'torch.zeros', (['(5, batch_size, output_size)'], {}), '((5, batch_size, output_size))\n', (16933, 16963), False, 'import torch\n'), ((17071, 17091), 'torch.mean', 'torch.mean', (['y_hat', '(0)'], {}), '(y_hat, 0)\n', (17081, 17091), False, 'import torch\n'), ((6194, 6214), 'copy.deepcopy', 'deepcopy', (['self.esrnn'], {}), '(self.esrnn)\n', (6202, 6214), False, 'from copy import deepcopy\n'), ((9015, 9026), 'time.time', 'time.time', ([], {}), '()\n', (9024, 9026), False, 'import time\n')] |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import argparse
import os
import pickle
import re
import imageio
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
# ----------------------------------------------------------------------------
def generate_video(
network_pkl,
interpolation_size,
truncation_psi,
outdir,
class_idx,
dlatents_count,
):
tflib.init_tf()
print('Loading networks from "%s"...' % network_pkl)
with dnnlib.util.open_url(network_pkl) as fp:
_G, _D, Gs = pickle.load(fp)
os.makedirs(outdir, exist_ok=True)
zs = []
for i in range(1, dlatents_count + 1):
path = '/content/projector/{}/dlatents.npz'.format(i)
dlatents = np.load(path)["dlatents"]
assert dlatents.shape[1:] == (12, 512) # [N, 12, 512]
zs.append(dlatents)
# interpolate
zs_int = [zs[0]]
for i in range(len(zs) - 1):
z1 = zs[i]
z2 = zs[i + 1]
zd = (z2 - z1) / (interpolation_size + 1)
for j in range(interpolation_size + 1):
z1 += zd
zs_int.append(z1.copy())
imgs = []
for z in zs_int:
images = Gs.components.synthesis.run(
z,
output_transform=dict(
func=tflib.convert_images_to_uint8, nchw_to_nhwc=True
),
)
imgs.append(PIL.Image.fromarray(images[0], "RGB"))
with imageio.get_writer(f"{outdir}/mov.mp4", mode="I", fps=60) as writer:
for image in imgs:
writer.append_data(np.array(image))
# ----------------------------------------------------------------------------
def _parse_num_range(s):
"""Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints."""
range_re = re.compile(r"^(\d+)-(\d+)$")
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2)) + 1))
vals = s.split(",")
return [int(x) for x in vals]
# ----------------------------------------------------------------------------
_examples = """examples:
# Generate curated MetFaces images without truncation (Fig.10 left)
python %(prog)s --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metfaces.pkl
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python %(prog)s --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metfaces.pkl
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python %(prog)s --outdir=out --trunc=1 --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/cifar10.pkl
# Render image from projected latent vector
python %(prog)s --outdir=out --dlatents=out/dlatents.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/ffhq.pkl
"""
# ----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Generate images using pretrained network pickle.",
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--network", help="Network pickle filename", dest="network_pkl", required=True
)
parser.add_argument(
"--dlatents_count", type=int, dest="dlatents_count", help="Generate images for saved dlatents"
)
parser.add_argument(
"--interpolation_size",
type=int,
help="Number of interpolation steps",
)
parser.add_argument(
"--trunc",
dest="truncation_psi",
type=float,
help="Truncation psi (default: %(default)s)",
default=0.5,
)
parser.add_argument(
"--class",
dest="class_idx",
type=int,
help="Class label (default: unconditional)",
)
parser.add_argument(
"--outdir", help="Where to save the output images", required=True, metavar="DIR"
)
args = parser.parse_args()
generate_video(**vars(args))
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main()
# ----------------------------------------------------------------------------
| [
"numpy.load",
"argparse.ArgumentParser",
"os.makedirs",
"dnnlib.util.open_url",
"dnnlib.tflib.init_tf",
"pickle.load",
"numpy.array",
"imageio.get_writer",
"re.compile"
] | [((846, 861), 'dnnlib.tflib.init_tf', 'tflib.init_tf', ([], {}), '()\n', (859, 861), True, 'import dnnlib.tflib as tflib\n'), ((1011, 1045), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (1022, 1045), False, 'import os\n'), ((2269, 2298), 're.compile', 're.compile', (['"""^(\\\\d+)-(\\\\d+)$"""'], {}), "('^(\\\\d+)-(\\\\d+)$')\n", (2279, 2298), False, 'import re\n'), ((3537, 3705), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate images using pretrained network pickle."""', 'epilog': '_examples', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), "(description=\n 'Generate images using pretrained network pickle.', epilog=_examples,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n", (3560, 3705), False, 'import argparse\n'), ((928, 961), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['network_pkl'], {}), '(network_pkl)\n', (948, 961), False, 'import dnnlib\n'), ((990, 1005), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1001, 1005), False, 'import pickle\n'), ((1888, 1945), 'imageio.get_writer', 'imageio.get_writer', (['f"""{outdir}/mov.mp4"""'], {'mode': '"""I"""', 'fps': '(60)'}), "(f'{outdir}/mov.mp4', mode='I', fps=60)\n", (1906, 1945), False, 'import imageio\n'), ((1188, 1201), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1195, 1201), True, 'import numpy as np\n'), ((2015, 2030), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2023, 2030), True, 'import numpy as np\n')] |
import numpy as np
def is_rated(M: np.array, index: (int, int)):
if M[index] < 0 and M[index] != -1:
raise ValueError("M[%d, %d] = %f should be -1 denoting null" % (index[0], index[1], M[index]))
return M[index] >= 0
def is_null(M: np.array, index: (int, int)):
return not is_rated(M, index)
def check_rating_matrix(M: np.array):
if M.ndim != 2:
raise ValueError("M must be two-dimension, got %d dimension(s)" % M.ndim)
for index in np.ndindex(M.shape):
is_rated(M, index)
def sort_rat(name_list: list, ratings: list, reverse: bool = True):
"""
Sort ratings associated with names. The sort is stable.
:param name_list: The list of names to be associated with the ratings respectively
:param ratings: The list of ratings to sort
:param reverse: Descending by default. False means ascending.
:return: The sorted list of names and their ratings and rankings
"""
if len(name_list) != len(ratings):
raise ValueError("# of names %d does not equal to # of ratings %d" % (len(name_list), len(ratings)))
result = list()
for i, name in enumerate(name_list):
result.append((name, ratings[i]))
def compare(name_rat):
return name_rat[1]
result.sort(key=compare, reverse=reverse)
return [(r[0], r[1], i + 1) for i, r in enumerate(result)]
| [
"numpy.ndindex"
] | [((475, 494), 'numpy.ndindex', 'np.ndindex', (['M.shape'], {}), '(M.shape)\n', (485, 494), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPyLib: Quantitative Trading Python Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np # noqa
import pandas as pd # noqa
from pandas import DataFrame
from qtpylib.algo import Algo
from qtpylib import futures
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
# from qtpylib import talib_indicators as ta
# from qtpylib import indicators as qtpylib
class Strategy002(Algo):
"""
Example: This Strategy buys/sells single contract of the
S&P E-mini Futures (ES) every 10th tick with a +/- 0.5
tick target/stop using LIMIT order.
If still in position for next 5 ticks, an exit order is issued.
"""
count = 0
# ---------------------------------------
def on_start(self):
""" initilize tick counter """
self.count = 0
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Adds several different TA indicators to the given DataFrame
Performance Note: For the best performance be frugal on the number of indicators
you are using. Let uncomment only the indicator you are using in your strategies
or your hyperopt configuration, otherwise you will waste your memory and CPU usage.
"""
# Stoch
stoch = ta.STOCH(dataframe)
dataframe['slowk'] = stoch['slowk']
# RSI
dataframe['rsi'] = ta.RSI(dataframe)
# Inverse Fisher transform on RSI, values [-1.0, 1.0] (https://goo.gl/2JGGoy)
rsi = 0.1 * (dataframe['rsi'] - 50)
dataframe['fisher_rsi'] = (np.exp(2 * rsi) - 1) / (np.exp(2 * rsi) + 1)
# Bollinger bands
bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)
dataframe['bb_lowerband'] = bollinger['lower']
# SAR Parabol
dataframe['sar'] = ta.SAR(dataframe)
# Hammer: values [0, 100]
dataframe['CDLHAMMER'] = ta.CDLHAMMER(dataframe)
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the buy signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[
(
(dataframe['rsi'] < 30) &
(dataframe['slowk'] < 20) &
(dataframe['bb_lowerband'] > dataframe['close']) &
(dataframe['CDLHAMMER'] == 100)
),
'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the sell signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[
(
(dataframe['sar'] > dataframe['close']) &
(dataframe['fisher_rsi'] > 0.3)
),
'sell'] = 1
return dataframe
# ---------------------------------------
def on_quote(self, instrument):
# quote = instrument.get_quote()
# ^^ quote data available via get_quote()
pass
# ---------------------------------------
def on_orderbook(self, instrument):
pass
# ---------------------------------------
def on_fill(self, instrument, order):
pass
# ---------------------------------------
def on_tick(self, instrument):
# tick = instrument.get_tick()
# # get OHLCV bars
# print("TICK:", tick)
pass
# ---------------------------------------
# def on_bar(self, instrument):
# # get instrument history
# bars = instrument.get_bars()
# # print(bars)
# # # make sure we have at least 20 bars to work with
# # if len(bars) < 20:
# # return
#
# indicators = self.populate_indicators(bars,None)
# bar = instrument.get_bars(lookback=1, as_dict=True)
#
#
# buy_signal = self.populate_buy_trend(indicators,None)
# if not np.isnan(buy_signal['buy'].iloc[-1]):
# # get OHLCV bars
# print('BUY::::::::::::')
# print("BAR:", bar)
# # send a buy signal
# instrument.buy(1)
# # record values for future analysis
# self.record(ma_buy=1)
#
#
# # sell_signal = self.populate_sell_trend(indicators,None)
# # if not np.isnan(buy_signal['sell'].iloc[-1]):
# # print('SELL::::::::::::')
# # print("BAR:", bar)
# # # send a buy signal
# # instrument.sell(1)
# #
# # # record values for future analysis
# # self.record(ma_sell=1)
def on_bar(self, instrument):
# Place Orders...
bar = instrument.get_bars(lookback=1, as_dict=True)
print("BAR:", bar)
bars = instrument.get_bars()
indicators = self.populate_indicators(bars,None)
# get current position data
positions = instrument.get_positions()
print('Positions :', positions)
buy_signal = self.populate_buy_trend(indicators, None)
sell_signal = self.populate_sell_trend(indicators,None)
direction=''
# if not instrument.pending_orders and positions["position"] == 0:
if not np.isnan(buy_signal['buy'].iloc[-1]):
direction='BUY'
elif not np.isnan(sell_signal['sell'].iloc[-1]):
direction='SELL'
print('Direction :', direction)
# get position direction
if instrument.positions['position'] > 0 and direction =='BUY':
# already buy order in place
print('Already BUY order in Place - So not placing order - Position - ' + str(instrument.positions['position']))
pass
if instrument.positions['position'] < 0 and direction =='SELL':
print('Already SELL order in Place - So not placing order - Position - ' + str(instrument.positions['position']))
pass
if instrument.positions['position'] > 0 and direction =='SELL':
print('exiting BUY position - placing new SELL order - Position - ' + str(instrument.positions['position']))
instrument.exit()
instrument.sell(1)
self.record(TD_SS_SELL=1)
if instrument.positions['position'] < 0 and direction =='BUY':
print('exiting SELL position - placing new BUY order - Position - ' + str(instrument.positions['position']))
instrument.exit()
instrument.buy(1)
self.record(TD_SS_BUY=1)
if not instrument.pending_orders and instrument.positions["position"] == 0:
if direction =='BUY':
print("BUY Signal and No Position - Placing Order")
instrument.buy(1)
self.record(TD_SS_BUY=1)
elif direction =='SELL':
print("Sell Signal and No Position - Placing Order")
instrument.sell(1)
self.record(TD_SS_SELL=1)
# ===========================================
if __name__ == "__main__":
# # get most active ES contract to trade
# ACTIVE_MONTH = futures.get_active_contract("ES")
# print("Active month for ES is:", ACTIVE_MONTH)
#
# strategy = Strategy002(
# instruments=[("ES", "FUT", "GLOBEX", "USD", 202009, 0.0, "")],
# resolution="5T",
# ibport=7497
# )
#
# # strategy = Strategy002(
# # instruments=[("ES", "FUT", "GLOBEX", "USD", 202009, 0.0, "")],
# # resolution="1H",
# # backtest=True,
# # ibport=7497,
# # start='2020-05-01',
# # end='2020-05-31',
# # data='/Users/sponraj/Desktop/History_Data/ES/Under_Test',
# # output='./portfolio.csv'
# # )
# strategy.run()
instruments = [("ES", "FUT", "GLOBEX", "USD", 202009, 0.0, "")]
print(lse_df)
instruments = lse_df.to_records(index=False).tolist()
print(instruments) | [
"talib.abstract.CDLHAMMER",
"numpy.isnan",
"talib.abstract.STOCH",
"numpy.exp",
"talib.abstract.RSI",
"freqtrade.vendor.qtpylib.indicators.typical_price",
"talib.abstract.SAR"
] | [((1949, 1968), 'talib.abstract.STOCH', 'ta.STOCH', (['dataframe'], {}), '(dataframe)\n', (1957, 1968), True, 'import talib.abstract as ta\n'), ((2055, 2072), 'talib.abstract.RSI', 'ta.RSI', (['dataframe'], {}), '(dataframe)\n', (2061, 2072), True, 'import talib.abstract as ta\n'), ((2513, 2530), 'talib.abstract.SAR', 'ta.SAR', (['dataframe'], {}), '(dataframe)\n', (2519, 2530), True, 'import talib.abstract as ta\n'), ((2600, 2623), 'talib.abstract.CDLHAMMER', 'ta.CDLHAMMER', (['dataframe'], {}), '(dataframe)\n', (2612, 2623), True, 'import talib.abstract as ta\n'), ((2355, 2387), 'freqtrade.vendor.qtpylib.indicators.typical_price', 'qtpylib.typical_price', (['dataframe'], {}), '(dataframe)\n', (2376, 2387), True, 'import freqtrade.vendor.qtpylib.indicators as qtpylib\n'), ((6091, 6127), 'numpy.isnan', 'np.isnan', (["buy_signal['buy'].iloc[-1]"], {}), "(buy_signal['buy'].iloc[-1])\n", (6099, 6127), True, 'import numpy as np\n'), ((2239, 2254), 'numpy.exp', 'np.exp', (['(2 * rsi)'], {}), '(2 * rsi)\n', (2245, 2254), True, 'import numpy as np\n'), ((2263, 2278), 'numpy.exp', 'np.exp', (['(2 * rsi)'], {}), '(2 * rsi)\n', (2269, 2278), True, 'import numpy as np\n'), ((6174, 6212), 'numpy.isnan', 'np.isnan', (["sell_signal['sell'].iloc[-1]"], {}), "(sell_signal['sell'].iloc[-1])\n", (6182, 6212), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from colors import *
def get_class_names(label_path):
with open(label_path, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
return classes if classes else None
class DetectBoxes:
def __init__(self, label_path, confidence_threshold=0.5, nms_threshold=0, mask_threshold=0, has_mask=False):
self.classes = get_class_names(label_path)
self.confThreshold = confidence_threshold
self.nmsThreshold = nms_threshold
self.maskThreshold = mask_threshold
self.hasMask = has_mask
self.maskColor = [255, 178, 50]
# detect bounding boxes from given frame
def detect_bounding_boxes(self, frame, output, masks=None):
height = frame.shape[0]
width = frame.shape[1]
if self.nmsThreshold is not 0:
self.detect_yolo(frame, output, width, height)
elif self.maskThreshold is not 0:
self.detect_maskrcnn(frame, output, width, height, masks)
else:
self.detect_fast_rcnn(frame, output, height, width)
def detect_fast_rcnn(self, frame, output, width, height):
for detection in output[0, 0, :, :]:
score = float(detection[2])
if score > self.confThreshold:
class_id = int(detection[1])
left = int(detection[3] * height)
top = int(detection[4] * width)
right = int(detection[5] * height)
bottom = int(detection[6] * width)
self.draw_boxes(frame, class_id, score, left, top, right, bottom)
def detect_yolo(self, frame, output, frame_width, frame_height):
# Search for all bounding boxes
# Save bounding box that have higher score than given confidence threshold
class_ids = []
confidences = []
boxes = []
for out in output:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > self.confThreshold:
center_x = int(detection[0] * frame_width)
center_y = int(detection[1] * frame_height)
width = int(detection[2] * frame_width)
height = int(detection[3] * frame_height)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Using non-maximum suppression remove overlapping boxes
# with low confidence
indices = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
self.draw_boxes(frame, class_ids[i], confidences[i], left, top, left + width, top + height)
def detect_maskrcnn(self, frame, output, width, height, masks):
numDetections = output.shape[2]
for i in range(numDetections):
box = output[0, 0, i]
mask = masks[i]
score = box[2]
if score > self.confThreshold:
class_id = int(box[1])
left = int(width * box[3])
top = int(height * box[4])
right = int(width * box[5])
bottom = int(height * box[6])
left = max(0, min(left, width - 1))
top = max(0, min(top, height - 1))
right = max(0, min(right, width - 1))
bottom = max(0, min(bottom, height - 1))
class_mask = mask[class_id]
self.draw_boxes(frame, class_id, score, left, top, right, bottom)
if self.hasMask:
self.draw_masks(frame, class_mask, left, top, right, bottom)
# draw boxes higher than confidence threshold
def draw_boxes(self, frame, class_id, conf, left, top, right, bottom):
color, txt_color = ((0, 0, 0), (0, 0, 0))
label = '{}%'.format(round((conf*100), 1))
if self.classes:
assert (class_id < len(self.classes))
label = '%s %s' % (self.classes[class_id], label)
color = STANDARD_COLORS[class_id % len(STANDARD_COLORS)]
if sum(color) < 500:
txt_color = (255, 255, 255)
# draw a bounding box
cv2.rectangle(frame, (left, top), (right, bottom), color=color, thickness=3)
# put label on top of detected bounding box
label_size, base_line = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, label_size[1])
cv2.rectangle(frame, (left, top - round(1.5 * label_size[1])),
(left + round(1.5 * label_size[0]), top + base_line),
color=color, thickness=cv2.FILLED)
cv2.putText(frame, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color=txt_color, thickness=2)
def draw_masks(self, frame, class_mask, left, top, right, bottom):
class_mask= cv2.resize(class_mask, (right - left + 1, bottom - top + 1))
mask= (class_mask > self.maskThreshold)
roi = frame[top:bottom+1, left:right+1][mask]
frame[top:bottom+1, left:right+1][mask] = ([0.3*self.maskColor[0], 0.3*self.maskColor[1],
0.3*self.maskColor[2]] + 0.7 * roi).astype(np.uint8)
mask = mask.astype(np.uint8)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame[top:bottom+1, left:right+1], contours, -1, self.maskColor, 3, cv2.LINE_8, hierarchy, 100) | [
"cv2.findContours",
"cv2.putText",
"cv2.dnn.NMSBoxes",
"numpy.argmax",
"cv2.getTextSize",
"cv2.rectangle",
"cv2.drawContours",
"cv2.resize"
] | [((2729, 2804), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', 'self.confThreshold', 'self.nmsThreshold'], {}), '(boxes, confidences, self.confThreshold, self.nmsThreshold)\n', (2745, 2804), False, 'import cv2\n'), ((4583, 4659), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)'], {'color': 'color', 'thickness': '(3)'}), '(frame, (left, top), (right, bottom), color=color, thickness=3)\n', (4596, 4659), False, 'import cv2\n'), ((4745, 4801), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(1)'], {}), '(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n', (4760, 4801), False, 'import cv2\n'), ((5052, 5156), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(left, top)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)'], {'color': 'txt_color', 'thickness': '(2)'}), '(frame, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n color=txt_color, thickness=2)\n', (5063, 5156), False, 'import cv2\n'), ((5245, 5305), 'cv2.resize', 'cv2.resize', (['class_mask', '(right - left + 1, bottom - top + 1)'], {}), '(class_mask, (right - left + 1, bottom - top + 1))\n', (5255, 5305), False, 'import cv2\n'), ((5679, 5741), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (5695, 5741), False, 'import cv2\n'), ((5750, 5871), 'cv2.drawContours', 'cv2.drawContours', (['frame[top:bottom + 1, left:right + 1]', 'contours', '(-1)', 'self.maskColor', '(3)', 'cv2.LINE_8', 'hierarchy', '(100)'], {}), '(frame[top:bottom + 1, left:right + 1], contours, -1, self.\n maskColor, 3, cv2.LINE_8, hierarchy, 100)\n', (5766, 5871), False, 'import cv2\n'), ((1978, 1995), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1987, 1995), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Functions useful for manipulating atom connectivity.
The original use of these functions is to allow for assign_charges to work and
be able to give a molecule, cluster or periodic cell of atoms the same charges as another
molecule, cluster or periodic cell.
Includes a utility to read a molecule xyz file, a population analysis in g09 and
a target cluster xyz to assign the charges to the cluster.
Usage:
assign_charges.py mol.log clust.xyz
Includes options for Mulliken or RESP and ouptut file names.
"""
import numpy as np
import sys
import argparse
import fromage.io.read_file as rf
def detect_1_connect(in_atoms):
"""
Make a matrix of first connectivities of a list of atoms.
Parameters
----------
in_atoms : Mol object
Atoms which need their connectivity detected
vectors : 3x3 array-like or None
Lattice vectors of the system if it is periodic. If not use None
Returns
-------
cnct : numpy matrix
The matrix where each row and each column correspond to one atom. If the
two atoms are bonded or the same, the matrix element is 1. Otherwise it
is 0
"""
nat_mol = len(in_atoms)
cnct = np.zeros((nat_mol, nat_mol),dtype=int)
for i, i_atom in enumerate(in_atoms):
for j, j_atom in enumerate(in_atoms):
if np.count_nonzero(in_atoms.vectors) == 0:
if in_atoms.bonded(i_atom, j_atom):
cnct[i][j] = 1
else:
if in_atoms.per_bonded(i_atom, j_atom):
cnct[i][j] = 1
return cnct
def expand_connect(in_mat):
"""
Expand a connectivity matrix
For one atom, checks which other atoms are connected to it (connectors) and
which are not (dangles). Then for each dangle checks if it has connectors in
common with the original atom and if so assigns that matrix element the
smallest combination of connectors.
Parameters
----------
in_mat : 2-d array-like
Connectivity matrix to be expanded
Returns
-------
out_mat : 2-d array-like
Once-expanded connectivity matrix
"""
out_mat = np.copy(in_mat)
for i, row in enumerate(in_mat):
# indices of unconnected atoms
dangles = []
# indices of connected atoms
connectors = []
for j, element in enumerate(row):
if element == 0 and j > i:
dangles.append(j)
elif element != 0:
connectors.append(j)
for dangle in dangles:
orders = []
for k, dangle_element in enumerate(in_mat[dangle]):
if dangle_element != 0 and k in connectors:
orders.append(dangle_element + in_mat[i][k])
# if orders is not empty
if orders:
out_mat[i][dangle] = min(orders)
out_mat[dangle][i] = min(orders)
return out_mat
def complete_expand(in_mat):
"""Expand a matrix until it stops expanding."""
mat = np.copy(in_mat)
i = 1
while True:
i += 1
temp_mat = expand_connect(mat)
if np.array_equal(mat, temp_mat):
break
mat = np.copy(temp_mat)
return mat
def get_connectivity_mat(in_mol):
"""Return the connectivity matrix of the Mol"""
first_connect = detect_1_connect(in_mol)
connect_mat = complete_expand(first_connect)
return connect_mat
def charged_kinds(in_atoms, in_kinds):
"""
Get charged atom kinds from charged atoms and kinds.
For each kind of atom to be charged, goes through the list of atoms and
makes an average of the partial atomic charge of atoms of that type.
Parameters
----------
in_atoms : Mol object
The atoms should be charged and some of them at least should be of the
relevant kind
in_kinds : list of tuples
The tuples are of the form (a,b) where a is an element string (like 'C')
and b is a frozenset of ((element string,order of connection),amount of
connections). (a,b) is known as an atom kind
Returns
-------
q_kinds : list of tuples
Each tuple is now (average charge,kind). This tuple is known as a
charged kind
"""
q_kinds = []
for kind in in_kinds:
charges = []
for atom in in_atoms:
if atom.kind == kind:
charges.append(atom.q)
if charges: # if not empty
avg_charge = sum(charges) / float(len(charges))
else:
avg_charge = 0
q_kinds.append((avg_charge, kind))
return q_kinds
def assign_charges(char_atoms, unchar_atoms):
"""
Assign charges from one list of atoms to another list of atoms.
This is based on the connectivity of the charged atoms, as defined by a
maximum bond length. The function works for periodic or non periodic systems
in the input and the output. The uncharged atoms are changed and there is no
output.
Parameters
----------
char_atoms : Mol object
Atoms which already have assigned charge
char_vectors : 3x3 array-like or None
Lattice vectors of the input system if it is periodic. If not use None
unchar_atoms : Mol object
Atoms which need charges assigned to them
unchar_vectors : 3x3 array-like or None
See char_vectors
"""
# detect the charged atom's connectivity matrix
char_first = detect_1_connect(char_atoms)
char_cnct = complete_expand(char_first)
# get charged atom kinds as a result
kinds = []
for i, atom in enumerate(char_atoms):
atom.set_connectivity(char_atoms, char_cnct[i])
kinds.append(atom.kind)
kinds = set(kinds)
q_kinds = charged_kinds(char_atoms, kinds)
# detect uncharged atom connectivity
unchar_first = detect_1_connect(unchar_atoms)
unchar_cnct = complete_expand(unchar_first)
# determine kind and cross check with charged kinds
for i, atom in enumerate(unchar_atoms):
atom.set_connectivity(unchar_atoms, unchar_cnct[i])
for q_kind in q_kinds:
if atom.kind == q_kind[1]:
atom.q = q_kind[0]
return
def main(in_xyz, in_log, target, output, bonding, thresh, kind):
if(in_xyz):
mol = rf.mol_from_file(in_xyz)
else:
mol = rf.mol_from_gauss(in_log)
charges = rf.read_g_char(in_log, kind)[0]
cluster = rf.mol_from_file(target)
mol.set_bonding(bonding=bonding, thresh=thresh)
cluster.set_bonding(bonding=bonding, thresh=thresh)
for atom, char in zip(mol, charges):
atom.q = char
assign_charges(mol, cluster)
# warning if some atoms have not been assigned or if some original charges
# were 0
bad_atoms = []
for atom in cluster:
if abs(atom.q) <= 0.000:
bad_atoms.append(atom)
if len(bad_atoms) > 0:
print("WARNING: " + str(len(bad_atoms)) + " atoms have null charge!")
print(bad_atoms)
out_file = open(output, "w")
out_file.write(str(len(cluster)) + "\n\n")
for atom in cluster:
out_file.write(str(atom) + "\n")
out_file.close()
if __name__ == '__main__':
# parse the input
parser = argparse.ArgumentParser()
parser.add_argument("in_log", help="Input .log file with RESP analysis",
default="gaussian.log")
parser.add_argument("target", help="Target .xyz file to assign charges to",
default="cluster.xyz")
parser.add_argument(
"-i", "--in_xyz", help="Input .xyz file of single molecule if the geometry in the log file is not good")
parser.add_argument("-o", "--output", help="Name of the output file",
default="out_char", type=str)
parser.add_argument("-b", "--bonding", help="Bonding type to be evaluated. Use 'dis' (default), 'cov' or 'vdw' to start calculating the distance at the centre of the atoms, the surface of the covalent sphere or the surface of the vdw sphere.", default="dis", type=str)
parser.add_argument("-t", "--threshold", help="Maximum length in Angstrom that qualifies as a bond. Default 1.7",
default=1.7, type=float)
parser.add_argument("-k", "--kind", help="Kind of population, mulliken or esp",
default="esp", type=str)
user_input = sys.argv[1:]
args = parser.parse_args(user_input)
main(args.in_xyz, args.in_log, args.target,
args.output, args.bonding, args.threshold, args.kind)
| [
"fromage.io.read_file.read_g_char",
"numpy.count_nonzero",
"argparse.ArgumentParser",
"numpy.copy",
"numpy.zeros",
"fromage.io.read_file.mol_from_gauss",
"numpy.array_equal",
"fromage.io.read_file.mol_from_file"
] | [((1213, 1252), 'numpy.zeros', 'np.zeros', (['(nat_mol, nat_mol)'], {'dtype': 'int'}), '((nat_mol, nat_mol), dtype=int)\n', (1221, 1252), True, 'import numpy as np\n'), ((2178, 2193), 'numpy.copy', 'np.copy', (['in_mat'], {}), '(in_mat)\n', (2185, 2193), True, 'import numpy as np\n'), ((3064, 3079), 'numpy.copy', 'np.copy', (['in_mat'], {}), '(in_mat)\n', (3071, 3079), True, 'import numpy as np\n'), ((6464, 6488), 'fromage.io.read_file.mol_from_file', 'rf.mol_from_file', (['target'], {}), '(target)\n', (6480, 6488), True, 'import fromage.io.read_file as rf\n'), ((7262, 7287), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7285, 7287), False, 'import argparse\n'), ((3171, 3200), 'numpy.array_equal', 'np.array_equal', (['mat', 'temp_mat'], {}), '(mat, temp_mat)\n', (3185, 3200), True, 'import numpy as np\n'), ((3234, 3251), 'numpy.copy', 'np.copy', (['temp_mat'], {}), '(temp_mat)\n', (3241, 3251), True, 'import numpy as np\n'), ((6329, 6353), 'fromage.io.read_file.mol_from_file', 'rf.mol_from_file', (['in_xyz'], {}), '(in_xyz)\n', (6345, 6353), True, 'import fromage.io.read_file as rf\n'), ((6378, 6403), 'fromage.io.read_file.mol_from_gauss', 'rf.mol_from_gauss', (['in_log'], {}), '(in_log)\n', (6395, 6403), True, 'import fromage.io.read_file as rf\n'), ((6418, 6446), 'fromage.io.read_file.read_g_char', 'rf.read_g_char', (['in_log', 'kind'], {}), '(in_log, kind)\n', (6432, 6446), True, 'import fromage.io.read_file as rf\n'), ((1355, 1389), 'numpy.count_nonzero', 'np.count_nonzero', (['in_atoms.vectors'], {}), '(in_atoms.vectors)\n', (1371, 1389), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
FakeTui
========
Simply emulates a table for testing
It listens to /arm_action_beliefs and
maintains a list of current blocks and
publishes to /blocks when it updates
"""
import rospy
from std_msgs.msg import String
from dautils import get_tuio_bounds, get_arm_bounds, arm_to_tuio, is_arm_calibrated
import json
import numpy as np
class FakeTui:
"""
"""
blocks = []
detection_threshold = 0.1
def __init__(self):
rospy.init_node("fake_tui", anonymous=False)
self.block_publisher = rospy.Publisher("/blocks", String)
self.arm_subscriber = rospy.Subscriber("/arm_action_beliefs", String, self.update_blocks)
#self.calibration_subscriber = rospy.Subscriber("/calibration_results", String, self.update_calibration)
def update_blocks(self, message):
rospy.loginfo("UPDATING BLOCKS"+message.data)
action_belief = json.loads(message.data)
new_block = action_belief['block']
action = action_belief['action']
if is_arm_calibrated() is False:
tuio_block = {'x': 0.5, 'y': 0.5, 'id': new_block['id']}
else:
tuio_x, tuio_y = arm_to_tuio(new_block['x'], new_block['y'], get_tuio_bounds(), get_arm_bounds())
tuio_block = {'x': tuio_x, 'y': tuio_y, 'id': new_block['id']}
if(action == 'add'):
self.blocks.append(tuio_block)
else:
self.remove_block(tuio_block)
rospy.loginfo(self.blocks)
self.block_publisher.publish(String(json.dumps(self.blocks)))
def remove_block(self, block):
proximities = [np.linalg.norm(np.array([b['x'], b['y']])-np.array([block['x'], block['y']]))
for b in self.blocks]
closest_block = np.argmin(proximities)
if np.min(proximities) < self.detection_threshold:
self.blocks.pop(closest_block[0])
if __name__ == '__main__':
f = FakeTui()
rospy.spin()
| [
"dautils.is_arm_calibrated",
"rospy.Subscriber",
"json.loads",
"rospy.Publisher",
"numpy.argmin",
"dautils.get_arm_bounds",
"rospy.loginfo",
"json.dumps",
"numpy.min",
"numpy.array",
"rospy.init_node",
"rospy.spin",
"dautils.get_tuio_bounds"
] | [((1945, 1957), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1955, 1957), False, 'import rospy\n'), ((470, 514), 'rospy.init_node', 'rospy.init_node', (['"""fake_tui"""'], {'anonymous': '(False)'}), "('fake_tui', anonymous=False)\n", (485, 514), False, 'import rospy\n'), ((546, 580), 'rospy.Publisher', 'rospy.Publisher', (['"""/blocks"""', 'String'], {}), "('/blocks', String)\n", (561, 580), False, 'import rospy\n'), ((611, 678), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/arm_action_beliefs"""', 'String', 'self.update_blocks'], {}), "('/arm_action_beliefs', String, self.update_blocks)\n", (627, 678), False, 'import rospy\n'), ((839, 886), 'rospy.loginfo', 'rospy.loginfo', (["('UPDATING BLOCKS' + message.data)"], {}), "('UPDATING BLOCKS' + message.data)\n", (852, 886), False, 'import rospy\n'), ((909, 933), 'json.loads', 'json.loads', (['message.data'], {}), '(message.data)\n', (919, 933), False, 'import json\n'), ((1463, 1489), 'rospy.loginfo', 'rospy.loginfo', (['self.blocks'], {}), '(self.blocks)\n', (1476, 1489), False, 'import rospy\n'), ((1766, 1788), 'numpy.argmin', 'np.argmin', (['proximities'], {}), '(proximities)\n', (1775, 1788), True, 'import numpy as np\n'), ((1029, 1048), 'dautils.is_arm_calibrated', 'is_arm_calibrated', ([], {}), '()\n', (1046, 1048), False, 'from dautils import get_tuio_bounds, get_arm_bounds, arm_to_tuio, is_arm_calibrated\n'), ((1800, 1819), 'numpy.min', 'np.min', (['proximities'], {}), '(proximities)\n', (1806, 1819), True, 'import numpy as np\n'), ((1215, 1232), 'dautils.get_tuio_bounds', 'get_tuio_bounds', ([], {}), '()\n', (1230, 1232), False, 'from dautils import get_tuio_bounds, get_arm_bounds, arm_to_tuio, is_arm_calibrated\n'), ((1234, 1250), 'dautils.get_arm_bounds', 'get_arm_bounds', ([], {}), '()\n', (1248, 1250), False, 'from dautils import get_tuio_bounds, get_arm_bounds, arm_to_tuio, is_arm_calibrated\n'), ((1534, 1557), 'json.dumps', 'json.dumps', (['self.blocks'], {}), '(self.blocks)\n', (1544, 1557), False, 'import json\n'), ((1634, 1660), 'numpy.array', 'np.array', (["[b['x'], b['y']]"], {}), "([b['x'], b['y']])\n", (1642, 1660), True, 'import numpy as np\n'), ((1661, 1695), 'numpy.array', 'np.array', (["[block['x'], block['y']]"], {}), "([block['x'], block['y']])\n", (1669, 1695), True, 'import numpy as np\n')] |
"""
This program file contains all the functions implemented to load and preprocess
the dataset for OVC
"""
import torch
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
from nltk.tokenize import sent_tokenize
import re
from machine_translation_vision.samplers import BucketBatchSampler
from collections import Counter
import unicodedata
import random
## Define a couple of parameters
SOS_token = 2
EOS_token = 3
UNK_token = 1
PAD_Token = 0
use_cuda = torch.cuda.is_available()
#Load the dataset in a text file located with data_path
def load_data(data_path):
with open(data_path,'r') as f:
data = [line.strip() for line in f.readlines()]
return data
def quick_sort(lists):
if not lists:
return []
assert isinstance(lists, list)
if len(lists) == 1:
return lists
pivot = lists.pop()
llist, rlist = [], []
for x in lists:
if x > pivot:
rlist.append(x)
else:
llist.append(x)
return quick_sort(llist) + [pivot] + quick_sort(rlist)
def heap_struct(alist):
alist.reverse()
for i in range(int((len(alist)-1)/2)):
if len(alist) < 2*i+1:
break
if alist[i] < alist[2*i+1]:
alist[i], alist[2*i+1] = alist[2*i+1], alist[i]
if alist[2*i+1] < alist[2*i+2]:
alist[2*i+2], alist[2*i+1] = alist[2*i+1], alist[2*i+2]
return alist
def heap_topk(s, k):
topk = quick_sort(s[:k])
for x in s[k:]:
if x<topk[-1]:
topk[-1] = x
topk = heap_struct(topk)
topk.reverse()
return topk
def load_VI(data_path, target_sentence):
with open(data_path,'r') as f:
data = f.readlines()
VIS = [[float(v) for v in vi.split()] for vi in data]
new_VIS = []
try:
for id, (VI, sent) in enumerate(zip(VIS, target_sentence)):
pos = -1
new_vi = []
tokens = sent.split()
if tokens[-1] == 't.' and tokens[-2] in ['bar@@', 'dar@@']:## noisy case of the bpe tokenizer, bart. -> bar@@ t.
VI = VI[:-1]
##
for i, vi in enumerate(VI):
##
temp_pos, bpe_length = pos, 1
while tokens[temp_pos].endswith('@@'):
temp_pos += 1
bpe_length += 1
vi /= float(bpe_length)
##
pos += 1
new_vi.append(vi)
while tokens[pos].endswith('@@'):
pos += 1
new_vi.append(vi)
if pos == len(tokens)-1: break
assert pos == len(tokens)-1, (pos, len(tokens)-1)
assert len(tokens) == len(new_vi), (len(tokens), len(new_vi))
assert i==len(VI)-1, (i, len(VI)-1)
## top k
# top_k = heap_topk(new_vi, 5)
# top_k = list(Counter(top_k).keys())
# if 0. in top_k: top_k.remove(0.)
# if top_k is not None:
# new_vi = [vi if vi in top_k else 0. for vi in new_vi]
# print(id)
# print(new_vi)
# print(tokens)
new_VIS.append(new_vi)
except BaseException:
print(id, len(VI), VI)
print(len(tokens), sent)
assert len(target_sentence) == len(new_VIS), (len(target_sentence), len(new_VIS), len(VIS))
return new_VIS
def format_data(data_x, data_y, IKEA=False):
if not IKEA:
data=[[x.strip(), y.strip()] for x, y in zip(data_x, data_y)]
else:
data=[]
for x, y in zip(data_x, data_y):
##conver the paragraph into sentences
x_s = sent_tokenize(x)
y_s = sent_tokenize(y)
## Check if len of the list is the same
if len(x_s) == len(y_s):
data += [[x.strip(), y.strip()] for x, y in zip(x_s, y_s)]
return data
#Construct Word2Id and Id2Word Dictionaries from a loaded vocab file
def construct_vocab_dic(vocab):
word2id = {}
id2word = {}
for i,word in enumerate(vocab):
word2id[word.strip()] = i + 1
id2word[i + 1] = word.strip()
return word2id,id2word
def _run_strip_accents(text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
# Filter out the pairs that has a sentence having
def data_filter(data, max_length):
new_data = []
for d in data:
if len(d[0].split()) <= max_length and len(d[1].split()) <= max_length:
new_data.append(d)
return new_data
missing_words = []
def indexes_from_sentence(vocab, sentence, drop_unk=False):
words = sentence.split(' ')
for i, word in enumerate(words):
if word not in vocab.keys():
if _run_strip_accents(word) in vocab.keys():
words[i] = _run_strip_accents(word)
else:
if word not in missing_words:
missing_words.append(word)
indexes = [vocab.get(word, UNK_token) for word in words]
if drop_unk: indexes = [i for i in indexes if i != UNK_token]
return indexes
def variable_from_sentence(vocab, sentence):
indexes = (vocab, sentence)
indexes.append(EOS_token)
var = Variable(torch.LongTensor(indexes).view(-1, 1))
if use_cuda:
var = var.cuda()
return var
def variables_from_pair(pair,s_vocab, t_vocab):
input_variable = variable_from_sentence(s_vocab, pair[0])
target_variable = variable_from_sentence(t_vocab, pair[1])
return (input_variable, target_variable)
# Create data pairs with each pair represented by corresponding wordids in each language.
def create_data_index(pairs, source_vocab, target_vocab, drop_unk=False):
source_indexes = [indexes_from_sentence(source_vocab, x[0], drop_unk=drop_unk) + [EOS_token] for x in pairs]
target_indexes = [indexes_from_sentence(target_vocab, x[1], drop_unk=drop_unk) + [EOS_token] for x in pairs]
return [[s, t] for s, t in zip(source_indexes, target_indexes)]
def create_data_index_VI(pairs, source_vocab, target_vocab, drop_unk=False):
source_indexes = [indexes_from_sentence(source_vocab, x[0], drop_unk=drop_unk) + [EOS_token] for x in pairs]
target_indexes = [indexes_from_sentence(target_vocab, x[1], drop_unk=drop_unk) + [EOS_token] for x in pairs]
vis = [x[2] + [0.] for x in pairs]
return [[s, t, vi] for s, t, vi in zip(source_indexes, target_indexes, vis)]
# Pad a with the PAD symbol
def pad_seq(seq, max_length):
seq_new = seq + [0 for i in range(max_length - len(seq))]
return seq_new
def data_generator(data_pairs, batch_size):
"""
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
data_size = len(data_pairs)
num_batches = math.floor(data_size / batch_size)
for i in range(0, data_size, batch_size):
if i+batch_size <= data_size:
batch_data_x = [d[0] for d in data_pairs[i:i+batch_size]]
batch_data_y = [d[1] for d in data_pairs[i:i+batch_size]]
else:
batch_data_x = [d[0] for d in data_pairs[i:data_size]]
batch_data_y = [d[1] for d in data_pairs[i:data_size]]
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
# Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
# Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
# Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
# Generate batch_x and batch_y
batch_x = Variable(torch.LongTensor(batch_x_pad_sorted))
batch_y = Variable(torch.LongTensor(batch_y_pad_sorted))
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
# Yield the batch data|
yield batch_x, \
batch_y, \
list(reversed(sorted(batch_x_lengths))), \
batch_y_lengths_sorted, \
x_reverse_sorted_index
def data_generator_tl(data_pairs, batch_size):
"""
This is an implementation of generating batches such that the target sentences always have
the same length. We borrow the bucket sampler from nmtpytorch to generate the corresponding index,
such that we can have this corresponding data_pair.
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
# Get the lengths of the target language
tl_lengths = [len(x[1]) for x in data_pairs]
# Initialize the index sampler
data_sampler = BucketBatchSampler(tl_lengths,batch_size)
# Iterate through the index sampler
for bidx in data_sampler.__iter__():
batch_data_x = [d[0] for d in [data_pairs[y] for y in bidx]]
batch_data_y = [d[1] for d in [data_pairs[y] for y in bidx]]
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
# Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
# Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
# Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
# Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
# Generate batch_x and batch_y
batch_x, batch_y = Variable(torch.LongTensor(batch_x_pad_sorted)), Variable(torch.LongTensor(batch_y_pad_sorted))
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
# Yield the batch data|
yield batch_x, batch_y, list(reversed(sorted(batch_x_lengths))), batch_y_lengths_sorted
def data_generator_single(batch_data_x):
x_length = max([len(x) for x in batch_data_x])
# Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
# Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
# Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = list(reversed(x_sorted_index))
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
batch_x = Variable(torch.LongTensor(batch_x_pad_sorted))
if use_cuda:
batch_x = batch_x.cuda()
return batch_x,list(reversed(sorted(batch_x_lengths))),x_reverse_sorted_index
def data_generator_mtv(data_pairs, data_im, batch_size):
"""
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
data_size = len(data_pairs)
num_batches = math.floor(data_size / batch_size)
for i in range(0, data_size, batch_size):
if i+batch_size <= data_size:
batch_data_x = [d[0] for d in data_pairs[i:i+batch_size]]
batch_data_y = [d[1] for d in data_pairs[i:i+batch_size]]
batch_data_im = torch.from_numpy(data_im[i:i+batch_size])
else:
batch_data_x = [d[0] for d in data_pairs[i:data_size]]
batch_data_y = [d[1] for d in data_pairs[i:data_size]]
batch_data_im = torch.from_numpy(data_im[i:data_size])
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
#Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
#Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
#Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
#Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
#Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
#Generate batch_x and batch_y
batch_x, batch_y = Variable(torch.LongTensor(batch_x_pad_sorted)), Variable(torch.LongTensor(batch_y_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_im = batch_im.cuda()
#Yield the batch data|
yield batch_x, batch_y, batch_im, list(reversed(sorted(batch_x_lengths))), batch_y_lengths_sorted, x_reverse_sorted_index
def data_generator_bta_mtv(data_pairs, data_im, data_bta_im, batch_size):
"""
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
data_bta_im:
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
data_size = len(data_pairs)
num_batches = math.floor(data_size / batch_size)
for i in range(0, data_size, batch_size):
if i+batch_size <= data_size:
batch_data_x = [d[0] for d in data_pairs[i:i+batch_size]]
batch_data_y = [d[1] for d in data_pairs[i:i+batch_size]]
batch_data_im = torch.from_numpy(data_im[i:i+batch_size])
batch_data_bta_im = torch.from_numpy(data_bta_im[i:i+batch_size])
else:
batch_data_x = [d[0] for d in data_pairs[i:data_size]]
batch_data_y = [d[1] for d in data_pairs[i:data_size]]
batch_data_im = torch.from_numpy(data_im[i:data_size])
batch_data_bta_im = torch.from_numpy(data_bta_im[i:i+batch_size])
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
#Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
#Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
#Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
#Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
#Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
batch_bta_im_sorted = torch.zeros_like(batch_data_bta_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
batch_bta_im_sorted[i] = batch_data_bta_im[x]
#Generate batch_x and batch_y
batch_x, batch_y = Variable(torch.LongTensor(batch_x_pad_sorted)), Variable(torch.LongTensor(batch_y_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
batch_bta_im = Variable(batch_bta_im_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_im = batch_im.cuda()
batch_bta_im = batch_bta_im.cuda()
#Yield the batch data|
yield batch_x, batch_y, batch_im, batch_bta_im, list(reversed(sorted(batch_x_lengths))), batch_y_lengths_sorted,x_reverse_sorted_index
def data_generator_tl_mtv_bta_vi_shuffle(data_pairs, data_im, data_bta_im, batch_size):
"""
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
data_bta_im:
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
dt = [(a,b,c) for a,b,c in zip(data_pairs, data_im, data_bta_im)]
random.shuffle(dt)
data_pairs = [a[0] for a in dt]
data_im = np.array([a[1] for a in dt])
data_bta_im = np.array([a[2] for a in dt])
data_size = len(data_pairs)
num_batches = math.floor(data_size/batch_size)
for i in range(0,data_size,batch_size):
if i+batch_size <= data_size:
batch_data_x = [d[0] for d in data_pairs[i:i+batch_size]]
batch_data_y = [d[1] for d in data_pairs[i:i+batch_size]]
batch_data_vi = [d[2] for d in data_pairs[i:i+batch_size]]
batch_data_im = torch.from_numpy(data_im[i:i+batch_size])
batch_data_bta_im = torch.from_numpy(data_bta_im[i:i+batch_size])
else:
batch_data_x = [d[0] for d in data_pairs[i:data_size]]
batch_data_y = [d[1] for d in data_pairs[i:data_size]]
batch_data_vi = [d[2] for d in data_pairs[i:data_size]]
batch_data_im = torch.from_numpy(data_im[i:data_size])
batch_data_bta_im = torch.from_numpy(data_bta_im[i:i+batch_size])
# The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
# Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
batch_vi_pad = []
# Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens, x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
# Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
# Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens, vi in zip(batch_data_y, batch_data_vi):
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens, y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
vi_pad_seq = pad_seq(vi,y_length)
batch_vi_pad.append(vi_pad_seq)
# Reorder the lengths
batch_vi_pad_sorted =[batch_vi_pad[i] for i in x_reverse_sorted_index]
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
# Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
batch_bta_im_sorted = torch.zeros_like(batch_data_bta_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
batch_bta_im_sorted[i] = batch_data_bta_im[x]
# Generate batch_x and batch_y
batch_x, batch_y, batch_vi = Variable(torch.LongTensor(batch_x_pad_sorted)), Variable(torch.LongTensor(batch_y_pad_sorted)),Variable(torch.FloatTensor(batch_vi_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
batch_bta_im = Variable(batch_bta_im_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_vi = batch_vi.cuda()
batch_im = batch_im.cuda()
batch_bta_im = batch_bta_im.cuda()
#Yield the batch data|
yield batch_x, batch_y, batch_vi, batch_im, batch_bta_im, list(reversed(sorted(batch_x_lengths))), batch_y_lengths_sorted
def data_generator_tl_mtv(data_pairs, data_im, batch_size):
"""
This is an implementation of generating batches such that the target sentences always have
the same length. We borrow the bucket sampler from nmtpytorch to generate the corresponding index,
such that we can have this corresponding data_pair.
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
# Get the lengths of the target language
tl_lengths = [len(x[1]) for x in data_pairs]
# Initialize the index sampler
data_sampler = BucketBatchSampler(tl_lengths,batch_size)
# Iterate through the index sampler
for bidx in data_sampler.__iter__():
batch_data_x = [d[0] for d in [data_pairs[y] for y in bidx]]
batch_data_y = [d[1] for d in [data_pairs[y] for y in bidx]]
#Get the corresponding image as well
batch_data_im = torch.from_numpy(data_im[bidx])
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
#Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
#Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
#Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
#Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
#Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
#Generate batch_x and batch_y
batch_x, batch_y = Variable(torch.LongTensor(batch_x_pad_sorted)), Variable(torch.LongTensor(batch_y_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_im = batch_im.cuda()
#Yield the batch data|
yield batch_x, batch_y,batch_im, list(reversed(sorted(batch_x_lengths))), batch_y_lengths_sorted
def data_generator_tl_mtv_vi(data_pairs, data_im, batch_size):
"""
This is an implementation of generating batches such that the target sentences always have
the same length. We borrow the bucket sampler from nmtpytorch to generate the corresponding index,
such that we can have this corresponding data_pair.
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
#Get the lengths of the target language
tl_lengths = [len(x[1]) for x in data_pairs]
#Initialize the index sampler
data_sampler = BucketBatchSampler(tl_lengths, batch_size)
#Iterate through the index sampler
for bidx in data_sampler.__iter__():
batch_data_x = [d[0] for d in [data_pairs[y] for y in bidx]]
batch_data_y = [d[1] for d in [data_pairs[y] for y in bidx]]
batch_data_vi = [d[2] for d in [data_pairs[y] for y in bidx]]
#Get the corresponding image as well
batch_data_im = torch.from_numpy(data_im[bidx])
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
vi_length = max([len(vi) for vi in batch_data_vi])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
batch_vi_pad = []
batch_vi_lengths = []
#Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens, x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
#Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
#Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
#Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
#Pad data_vi and reorder it with respect to the x_reverse_sorted_index
for vi in batch_data_vi:
vi_l = len(vi)
vi_pad_seq = pad_seq(vi,vi_length)
batch_vi_lengths.append(vi_l)
batch_vi_pad.append(vi_pad_seq)
#Reorder the lengths
batch_vi_pad_sorted =[batch_vi_pad[i] for i in x_reverse_sorted_index]
batch_vi_lengths_sorted = [batch_vi_lengths[i] for i in x_reverse_sorted_index]
#Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
#Generate batch_x and batch_y
batch_x, batch_y, batch_vi = Variable(torch.LongTensor(batch_x_pad_sorted)),\
Variable(torch.LongTensor(batch_y_pad_sorted)),\
Variable(torch.FloatTensor(batch_vi_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_vi = batch_vi.cuda()
batch_im = batch_im.cuda()
#Yield the batch data|
yield batch_x,batch_y,batch_vi,batch_im,list(reversed(sorted(batch_x_lengths))),batch_y_lengths_sorted
def data_generator_tl_mtv_bta_vi(data_pairs, data_im, data_bta_im, batch_size):
"""
This is an implementation of generating batches such that the target sentences always have
the same length. We borrow the bucket sampler from nmtpytorch to generate the corresponding index,
such that we can have this corresponding data_pair.
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
#Get the lengths of the target language
tl_lengths = [len(x[1]) for x in data_pairs]
#Initialize the index sampler
data_sampler = BucketBatchSampler(tl_lengths,batch_size)
#Iterate through the index sampler
for bidx in data_sampler.__iter__():
batch_data_x = [d[0] for d in [data_pairs[y] for y in bidx]]
batch_data_y = [d[1] for d in [data_pairs[y] for y in bidx]]
batch_data_vi = [d[2] for d in [data_pairs[y] for y in bidx]]
#Get the corresponding image as well
batch_data_im = torch.from_numpy(data_im[bidx])
batch_data_bta_im = torch.from_numpy(data_bta_im[bidx])
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
vi_length = max([len(vi) for vi in batch_data_vi])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
batch_vi_pad = []
batch_vi_lengths = []
#Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
#Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
#Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
#Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
#Pad data_vi and reorder it with respect to the x_reverse_sorted_index
for vi in batch_data_vi:
vi_l = len(vi)
vi_pad_seq = pad_seq(vi,vi_length)
batch_vi_lengths.append(vi_l)
batch_vi_pad.append(vi_pad_seq)
#Reorder the lengths
batch_vi_pad_sorted =[batch_vi_pad[i] for i in x_reverse_sorted_index]
batch_vi_lengths_sorted = [batch_vi_lengths[i] for i in x_reverse_sorted_index]
#Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
batch_im_bta_sorted = torch.zeros_like(batch_data_bta_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
batch_im_bta_sorted[i] = batch_data_bta_im[x]
#Generate batch_x and batch_y
batch_x, batch_y, batch_vi = Variable(torch.LongTensor(batch_x_pad_sorted)),\
Variable(torch.LongTensor(batch_y_pad_sorted)),\
Variable(torch.FloatTensor(batch_vi_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
batch_bta_im = Variable(batch_im_bta_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_vi = batch_vi.cuda()
batch_im = batch_im.cuda()
batch_bta_im = batch_bta_im.cuda()
#Yield the batch data|
yield batch_x,\
batch_y,\
batch_vi,\
batch_im,\
batch_bta_im,\
list(reversed(sorted(batch_x_lengths))),\
batch_y_lengths_sorted
def data_generator_tl_mtv_imretrieval(data_pairs, data_im, batch_size):
"""
This is an implementation of generating batches such that the target sentences always have
the same length. We borrow the bucket sampler from nmtpytorch to generate the corresponding index,
such that we can have this corresponding data_pair.
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
#Get the lengths of the target language
tl_lengths = [len(x[1]) for x in data_pairs]
#Initialize the index sampler
data_sampler = BucketBatchSampler(tl_lengths, batch_size)
#Iterate through the index sampler
for bidx in data_sampler.__iter__():
#print(bidx)
batch_data_x = [d[0] for d in [data_pairs[y] for y in bidx]]
batch_data_y = [d[1] for d in [data_pairs[y] for y in bidx]]
#Get the corresponding image as well
batch_data_im = torch.from_numpy(data_im[bidx])
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
#Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
#Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
#Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
#Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
#Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
#Generate batch_x and batch_y
batch_x, batch_y = Variable(torch.LongTensor(batch_x_pad_sorted)), Variable(torch.LongTensor(batch_y_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_im = batch_im.cuda()
index_retrieval = [bidx[x] for x in x_reverse_sorted_index]
#print(index_retrieval)
#Yield the batch data|
yield batch_x, batch_y, batch_im, list(reversed(sorted(batch_x_lengths))), index_retrieval
def data_generator_tl_mtv_imretrieval_bta(data_pairs, data_im, data_bta_im, batch_size):
"""
This is an implementation of generating batches such that the target sentences always have
the same length. We borrow the bucket sampler from nmtpytorch to generate the corresponding index,
such that we can have this corresponding data_pair.
Input:
data_pairs: List of pairs, [[data_1,target_1],[data_2,target_2],...], where data_1 and target_1 are id_indexs from 1 to their own vocabulary size. The end of each instance whould end with a EOS_token index.
batch_size: The size of the batch
data_im: The numpy matrix which contains the image features. Size: (N,I), N is the number of samples and I is the image feature size
output:
batch_x: Variable with size: B*Lx
batch_y: Variable with size: B*Ly
batch_x_lengths: A list witch contains the length of each source language sentence in the batch
batch_y_lengths: A list witch contains the length of each target language sentence in the batch
x_reverse_sorted_index: A list of index that represents the sorted batch with respect to the instance length.
"""
#Get the lengths of the target language
tl_lengths = [len(x[1]) for x in data_pairs]
#Initialize the index sampler
data_sampler = BucketBatchSampler(tl_lengths,batch_size)
#Iterate through the index sampler
for bidx in data_sampler.__iter__():
batch_data_x = [d[0] for d in [data_pairs[y] for y in bidx]]
batch_data_y = [d[1] for d in [data_pairs[y] for y in bidx]]
#Get the corresponding image as well
batch_data_im = torch.from_numpy(data_im[bidx])
batch_data_bta_im = torch.from_numpy(data_bta_im[bidx])
#The lengths for data and labels to be padded to
x_length = max([len(x) for x in batch_data_x])
y_length = max([len(y) for y in batch_data_y])
#Get a list of tokens
batch_x_pad = []
batch_x_lengths = []
batch_y_pad = []
batch_y_lengths = []
#Updated batch_x_lengths, batch_x_pad
for x_tokens in batch_data_x:
x_l = len(x_tokens)
x_pad_seq = pad_seq(x_tokens,x_length)
batch_x_lengths.append(x_l)
batch_x_pad.append(x_pad_seq)
#Reorder the lengths
x_sorted_index = list(np.argsort(batch_x_lengths))
x_reverse_sorted_index = [x for x in reversed(x_sorted_index)]
batch_x_pad_sorted = [batch_x_pad[i] for i in x_reverse_sorted_index]
#print(x_reverse_sorted_index)
#Pad data_y and reorder it with respect to the x_reverse_sorted_index
for y_tokens in batch_data_y:
y_l = len(y_tokens)
y_pad_seq = pad_seq(y_tokens,y_length)
batch_y_lengths.append(y_l)
batch_y_pad.append(y_pad_seq)
#Reorder the lengths
batch_y_pad_sorted =[batch_y_pad[i] for i in x_reverse_sorted_index]
batch_y_lengths_sorted = [batch_y_lengths[i] for i in x_reverse_sorted_index]
#Reorder the image numpy matrix with respect to the x_reverse_sorted_index
batch_im_sorted = torch.zeros_like(batch_data_im)
batch_bta_im_sorted = torch.zeros_like(batch_data_bta_im)
for i,x in enumerate(x_reverse_sorted_index):
batch_im_sorted[i] = batch_data_im[x]
batch_bta_im_sorted[i] = batch_data_bta_im[x]
#Generate batch_x and batch_y
batch_x, batch_y = Variable(torch.LongTensor(batch_x_pad_sorted)), Variable(torch.LongTensor(batch_y_pad_sorted))
batch_im = Variable(batch_im_sorted.float())
batch_bta_im = Variable(batch_bta_im_sorted.float())
if use_cuda:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_im = batch_im.cuda()
batch_bta_im = batch_bta_im.cuda()
index_retrieval = [bidx[x] for x in x_reverse_sorted_index]
#Yield the batch data|
yield batch_x, batch_y, batch_im, batch_bta_im,list(reversed(sorted(batch_x_lengths))), index_retrieval
def translation_reorder(translation, length_sorted_index, id2word):
#Reorder translation
original_translation = [None] * len(translation)
for i,t in zip(length_sorted_index, translation):
original_translation[i] = [id2word.get(x, '<unk>') for x in t]
return original_translation
def translation_reorder_BPE(translation, length_sorted_index, id2word):
#Reorder translation
original_translation = [None] * len(translation)
for i,t in zip(length_sorted_index, translation):
BPE_translation_tokens = [id2word.get(x,'<unk>') for x in t]
#Processing the original translation such that
BPE_translation = ' '.join(BPE_translation_tokens)
#Search and Replace patterns
ori_translation = re.sub(r'@@ ',"",BPE_translation)
#Tokenlize the ori_translation and keep it in the orginal_translation list
original_translation[i] = ori_translation.split()
return original_translation
def translation_reorder_ATTN(attns, length_sorted_index):
#Reorder attention
original_attn = np.zeros(attns.shape)
for i,attn in zip(length_sorted_index, attns):
original_attn[i] = attn
return original_attn
| [
"unicodedata.normalize",
"torch.zeros_like",
"torch.LongTensor",
"random.shuffle",
"unicodedata.category",
"math.floor",
"numpy.zeros",
"torch.FloatTensor",
"numpy.argsort",
"nltk.tokenize.sent_tokenize",
"torch.cuda.is_available",
"numpy.array",
"machine_translation_vision.samplers.BucketBa... | [((494, 519), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (517, 519), False, 'import torch\n'), ((4337, 4371), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (4358, 4371), False, 'import unicodedata\n'), ((7636, 7670), 'math.floor', 'math.floor', (['(data_size / batch_size)'], {}), '(data_size / batch_size)\n', (7646, 7670), False, 'import math\n'), ((10889, 10931), 'machine_translation_vision.samplers.BucketBatchSampler', 'BucketBatchSampler', (['tl_lengths', 'batch_size'], {}), '(tl_lengths, batch_size)\n', (10907, 10931), False, 'from machine_translation_vision.samplers import BucketBatchSampler\n'), ((14527, 14561), 'math.floor', 'math.floor', (['(data_size / batch_size)'], {}), '(data_size / batch_size)\n', (14537, 14561), False, 'import math\n'), ((18161, 18195), 'math.floor', 'math.floor', (['(data_size / batch_size)'], {}), '(data_size / batch_size)\n', (18171, 18195), False, 'import math\n'), ((22225, 22243), 'random.shuffle', 'random.shuffle', (['dt'], {}), '(dt)\n', (22239, 22243), False, 'import random\n'), ((22295, 22323), 'numpy.array', 'np.array', (['[a[1] for a in dt]'], {}), '([a[1] for a in dt])\n', (22303, 22323), True, 'import numpy as np\n'), ((22342, 22370), 'numpy.array', 'np.array', (['[a[2] for a in dt]'], {}), '([a[2] for a in dt])\n', (22350, 22370), True, 'import numpy as np\n'), ((22422, 22456), 'math.floor', 'math.floor', (['(data_size / batch_size)'], {}), '(data_size / batch_size)\n', (22432, 22456), False, 'import math\n'), ((27208, 27250), 'machine_translation_vision.samplers.BucketBatchSampler', 'BucketBatchSampler', (['tl_lengths', 'batch_size'], {}), '(tl_lengths, batch_size)\n', (27226, 27250), False, 'from machine_translation_vision.samplers import BucketBatchSampler\n'), ((30945, 30987), 'machine_translation_vision.samplers.BucketBatchSampler', 'BucketBatchSampler', (['tl_lengths', 'batch_size'], {}), '(tl_lengths, batch_size)\n', (30963, 30987), False, 'from machine_translation_vision.samplers import BucketBatchSampler\n'), ((35491, 35533), 'machine_translation_vision.samplers.BucketBatchSampler', 'BucketBatchSampler', (['tl_lengths', 'batch_size'], {}), '(tl_lengths, batch_size)\n', (35509, 35533), False, 'from machine_translation_vision.samplers import BucketBatchSampler\n'), ((40408, 40450), 'machine_translation_vision.samplers.BucketBatchSampler', 'BucketBatchSampler', (['tl_lengths', 'batch_size'], {}), '(tl_lengths, batch_size)\n', (40426, 40450), False, 'from machine_translation_vision.samplers import BucketBatchSampler\n'), ((44277, 44319), 'machine_translation_vision.samplers.BucketBatchSampler', 'BucketBatchSampler', (['tl_lengths', 'batch_size'], {}), '(tl_lengths, batch_size)\n', (44295, 44319), False, 'from machine_translation_vision.samplers import BucketBatchSampler\n'), ((48169, 48190), 'numpy.zeros', 'np.zeros', (['attns.shape'], {}), '(attns.shape)\n', (48177, 48190), True, 'import numpy as np\n'), ((4424, 4450), 'unicodedata.category', 'unicodedata.category', (['char'], {}), '(char)\n', (4444, 4450), False, 'import unicodedata\n'), ((13198, 13225), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (13208, 13225), True, 'import numpy as np\n'), ((13399, 13435), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (13415, 13435), False, 'import torch\n'), ((16494, 16525), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (16510, 16525), False, 'import torch\n'), ((20274, 20305), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (20290, 20305), False, 'import torch\n'), ((20336, 20371), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_bta_im'], {}), '(batch_data_bta_im)\n', (20352, 20371), False, 'import torch\n'), ((24899, 24930), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (24915, 24930), False, 'import torch\n'), ((24961, 24996), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_bta_im'], {}), '(batch_data_bta_im)\n', (24977, 24996), False, 'import torch\n'), ((27539, 27570), 'torch.from_numpy', 'torch.from_numpy', (['data_im[bidx]'], {}), '(data_im[bidx])\n', (27555, 27570), False, 'import torch\n'), ((28983, 29014), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (28999, 29014), False, 'import torch\n'), ((31347, 31378), 'torch.from_numpy', 'torch.from_numpy', (['data_im[bidx]'], {}), '(data_im[bidx])\n', (31363, 31378), False, 'import torch\n'), ((33379, 33410), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (33395, 33410), False, 'import torch\n'), ((35892, 35923), 'torch.from_numpy', 'torch.from_numpy', (['data_im[bidx]'], {}), '(data_im[bidx])\n', (35908, 35923), False, 'import torch\n'), ((35952, 35987), 'torch.from_numpy', 'torch.from_numpy', (['data_bta_im[bidx]'], {}), '(data_bta_im[bidx])\n', (35968, 35987), False, 'import torch\n'), ((37984, 38015), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (38000, 38015), False, 'import torch\n'), ((38046, 38081), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_bta_im'], {}), '(batch_data_bta_im)\n', (38062, 38081), False, 'import torch\n'), ((40760, 40791), 'torch.from_numpy', 'torch.from_numpy', (['data_im[bidx]'], {}), '(data_im[bidx])\n', (40776, 40791), False, 'import torch\n'), ((42203, 42234), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (42219, 42234), False, 'import torch\n'), ((44608, 44639), 'torch.from_numpy', 'torch.from_numpy', (['data_im[bidx]'], {}), '(data_im[bidx])\n', (44624, 44639), False, 'import torch\n'), ((44668, 44703), 'torch.from_numpy', 'torch.from_numpy', (['data_bta_im[bidx]'], {}), '(data_bta_im[bidx])\n', (44684, 44703), False, 'import torch\n'), ((46155, 46186), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_im'], {}), '(batch_data_im)\n', (46171, 46186), False, 'import torch\n'), ((46217, 46252), 'torch.zeros_like', 'torch.zeros_like', (['batch_data_bta_im'], {}), '(batch_data_bta_im)\n', (46233, 46252), False, 'import torch\n'), ((47858, 47892), 're.sub', 're.sub', (['"""@@ """', '""""""', 'BPE_translation'], {}), "('@@ ', '', BPE_translation)\n", (47864, 47892), False, 'import re\n'), ((3739, 3755), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['x'], {}), '(x)\n', (3752, 3755), False, 'from nltk.tokenize import sent_tokenize\n'), ((3774, 3790), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['y'], {}), '(y)\n', (3787, 3790), False, 'from nltk.tokenize import sent_tokenize\n'), ((8663, 8690), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (8673, 8690), True, 'import numpy as np\n'), ((9321, 9357), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (9337, 9357), False, 'import torch\n'), ((9386, 9422), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (9402, 9422), False, 'import torch\n'), ((11770, 11797), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (11780, 11797), True, 'import numpy as np\n'), ((14814, 14857), 'torch.from_numpy', 'torch.from_numpy', (['data_im[i:i + batch_size]'], {}), '(data_im[i:i + batch_size])\n', (14830, 14857), False, 'import torch\n'), ((15032, 15070), 'torch.from_numpy', 'torch.from_numpy', (['data_im[i:data_size]'], {}), '(data_im[i:data_size])\n', (15048, 15070), False, 'import torch\n'), ((15708, 15735), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (15718, 15735), True, 'import numpy as np\n'), ((18448, 18491), 'torch.from_numpy', 'torch.from_numpy', (['data_im[i:i + batch_size]'], {}), '(data_im[i:i + batch_size])\n', (18464, 18491), False, 'import torch\n'), ((18522, 18569), 'torch.from_numpy', 'torch.from_numpy', (['data_bta_im[i:i + batch_size]'], {}), '(data_bta_im[i:i + batch_size])\n', (18538, 18569), False, 'import torch\n'), ((18744, 18782), 'torch.from_numpy', 'torch.from_numpy', (['data_im[i:data_size]'], {}), '(data_im[i:data_size])\n', (18760, 18782), False, 'import torch\n'), ((18815, 18862), 'torch.from_numpy', 'torch.from_numpy', (['data_bta_im[i:i + batch_size]'], {}), '(data_bta_im[i:i + batch_size])\n', (18831, 18862), False, 'import torch\n'), ((19487, 19514), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (19497, 19514), True, 'import numpy as np\n'), ((22776, 22819), 'torch.from_numpy', 'torch.from_numpy', (['data_im[i:i + batch_size]'], {}), '(data_im[i:i + batch_size])\n', (22792, 22819), False, 'import torch\n'), ((22850, 22897), 'torch.from_numpy', 'torch.from_numpy', (['data_bta_im[i:i + batch_size]'], {}), '(data_bta_im[i:i + batch_size])\n', (22866, 22897), False, 'import torch\n'), ((23140, 23178), 'torch.from_numpy', 'torch.from_numpy', (['data_im[i:data_size]'], {}), '(data_im[i:data_size])\n', (23156, 23178), False, 'import torch\n'), ((23211, 23258), 'torch.from_numpy', 'torch.from_numpy', (['data_bta_im[i:i + batch_size]'], {}), '(data_bta_im[i:i + batch_size])\n', (23227, 23258), False, 'import torch\n'), ((23913, 23940), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (23923, 23940), True, 'import numpy as np\n'), ((28196, 28223), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (28206, 28223), True, 'import numpy as np\n'), ((32121, 32148), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (32131, 32148), True, 'import numpy as np\n'), ((36728, 36755), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (36738, 36755), True, 'import numpy as np\n'), ((41417, 41444), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (41427, 41444), True, 'import numpy as np\n'), ((45330, 45357), 'numpy.argsort', 'np.argsort', (['batch_x_lengths'], {}), '(batch_x_lengths)\n', (45340, 45357), True, 'import numpy as np\n'), ((5488, 5513), 'torch.LongTensor', 'torch.LongTensor', (['indexes'], {}), '(indexes)\n', (5504, 5513), False, 'import torch\n'), ((12437, 12473), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (12453, 12473), False, 'import torch\n'), ((12485, 12521), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (12501, 12521), False, 'import torch\n'), ((16713, 16749), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (16729, 16749), False, 'import torch\n'), ((16761, 16797), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (16777, 16797), False, 'import torch\n'), ((20617, 20653), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (20633, 20653), False, 'import torch\n'), ((20665, 20701), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (20681, 20701), False, 'import torch\n'), ((25253, 25289), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (25269, 25289), False, 'import torch\n'), ((25301, 25337), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (25317, 25337), False, 'import torch\n'), ((25348, 25386), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_vi_pad_sorted'], {}), '(batch_vi_pad_sorted)\n', (25365, 25386), False, 'import torch\n'), ((29202, 29238), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (29218, 29238), False, 'import torch\n'), ((29250, 29286), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (29266, 29286), False, 'import torch\n'), ((33601, 33637), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (33617, 33637), False, 'import torch\n'), ((33662, 33698), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (33678, 33698), False, 'import torch\n'), ((33723, 33761), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_vi_pad_sorted'], {}), '(batch_vi_pad_sorted)\n', (33740, 33761), False, 'import torch\n'), ((38329, 38365), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (38345, 38365), False, 'import torch\n'), ((38390, 38426), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (38406, 38426), False, 'import torch\n'), ((38451, 38489), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_vi_pad_sorted'], {}), '(batch_vi_pad_sorted)\n', (38468, 38489), False, 'import torch\n'), ((42422, 42458), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (42438, 42458), False, 'import torch\n'), ((42470, 42506), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (42486, 42506), False, 'import torch\n'), ((46498, 46534), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pad_sorted'], {}), '(batch_x_pad_sorted)\n', (46514, 46534), False, 'import torch\n'), ((46546, 46582), 'torch.LongTensor', 'torch.LongTensor', (['batch_y_pad_sorted'], {}), '(batch_y_pad_sorted)\n', (46562, 46582), False, 'import torch\n')] |
import grace
import grace.mask
import grace.times
import grace.ols
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as maps
import sklearn.mixture
import sklearn.decomposition
# Transform grids data
shape = grace.grids.shape
X = grace.grids.reshape(shape[0] * shape[1], shape[2])
mask = grace.mask.world()
X = X[mask.reshape(shape[0] * shape[1]), :]
pca = sklearn.decomposition.KernelPCA(kernel='rbf', fit_inverse_transform=True)
X = pca.fit_transform(X)
#
# Variance explaned
#
np.save('HPC-output/pca_kernel_lambda.npy', pca.lambdas_)
| [
"numpy.save",
"grace.mask.world",
"grace.grids.reshape"
] | [((262, 312), 'grace.grids.reshape', 'grace.grids.reshape', (['(shape[0] * shape[1])', 'shape[2]'], {}), '(shape[0] * shape[1], shape[2])\n', (281, 312), False, 'import grace\n'), ((320, 338), 'grace.mask.world', 'grace.mask.world', ([], {}), '()\n', (336, 338), False, 'import grace\n'), ((514, 571), 'numpy.save', 'np.save', (['"""HPC-output/pca_kernel_lambda.npy"""', 'pca.lambdas_'], {}), "('HPC-output/pca_kernel_lambda.npy', pca.lambdas_)\n", (521, 571), True, 'import numpy as np\n')] |
import rclpy
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from sensor_msgs.msg import LaserScan
import cv2
import numpy as np
import math
from sensor_msgs.msg import Imu
def euler_from_quaternion(x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
class ImuSubscriber(Node):
def __init__(self):
super().__init__('imu_subscriber')
self.subscription = self.create_subscription(
Imu,
'/imu',
self.listener_callback,
qos_profile_sensor_data)
# self.subscription
def listener_callback(self, scan):
x = scan.orientation.x
y = scan.orientation.y
z = scan.orientation.z
w = scan.orientation.w
ax = scan.linear_acceleration.x
ay = scan.linear_acceleration.y
az = scan.linear_acceleration.z
rate = 180/np.pi
roll_x, pitch_y, yaw_z = euler_from_quaternion(x, y, z, w)
# self.get_logger().info('orientation: "%s"' % str((roll_x, pitch_y, yaw_z)))
# self.get_logger().info('linear_acceleration: "%s"' % str((ax, ay, az)))
self.get_logger().info('rate: "%s"' % str(int(yaw_z*rate)))
pass
MEAN_UNIT = 30
TH_RANGE=50
class BasicSubscriber(Node):
def __init__(self):
super().__init__('lidar_sensor')
self.subscription = self.create_subscription(LaserScan, '/scan', self.callback, qos_profile_sensor_data)
self.subscription_imu = self.create_subscription(Imu,'/imu',self.listener_callback,qos_profile_sensor_data)
self.yaw_z = None
self.size = 1000
self.over_rate = 200
def callback(self, data):
self.x =list()
self.y =list()
laser_range = data.ranges # 정면 부분
unit = np.pi/180
for idx, point in enumerate(laser_range):
if point>4 or point<=0:
self.x.append(int(-1))
self.y.append(int(-1))
else:
self.x.append(int(point*self.over_rate*np.cos(idx*unit)+self.size//2))
self.y.append(int(point*self.over_rate*np.sin(idx*unit)+self.size//2))
img = np.zeros((self.size, self.size, 3))
pre_xx, pre_yy = None, None
for xx, yy in zip(self.x, self.y):
if xx == -1: continue
if pre_xx is None and pre_yy is None:
pre_xx=xx
pre_yy =yy
else:
point_range = np.sqrt(np.square(pre_xx-xx)+np.square(pre_yy-yy))
if point_range<TH_RANGE: # 가까운 점일 경우
# if True: # 가까운 점일 경우
cv2.line(img, (pre_xx, pre_yy), (xx, yy), (255,255,255), 2)
pre_xx, pre_yy = xx, yy
else:
pre_xx, pre_yy = None, None
# cv2.circle(img,(xx+self.size//2,yy+self.size//2), radius=1,color=(255,255,255),thickness=1)
img = cv2.rotate(img,rotateCode= cv2.ROTATE_90_COUNTERCLOCKWISE)
img = cv2.flip(img, 1)
# img_float32 = np.float32(img)
img_float32 = np.uint8(img)
gray = cv2.cvtColor(img_float32,cv2.COLOR_RGB2GRAY)
# gray = np.unit8()
edges = cv2.Canny(gray,50,100,apertureSize = 3)
lines = cv2.HoughLinesP(edges, 1, np.pi / 180., 40, minLineLength=5, maxLineGap=20)
if lines is not None: # 라인 정보를 받았으면
for i in range(lines.shape[0]):
pt1 = (lines[i][0][0], lines[i][0][1]) # 시작점 좌표 x,y
pt2 = (lines[i][0][2], lines[i][0][3]) # 끝점 좌표, 가운데는 무조건 0
cv2.line(img, pt1, pt2, (255, 0, 255), 5, cv2.LINE_AA)
cv2.rectangle(img, (self.size//2-20,self.size//2-30),(self.size//2+20,self.size//2+30),(0,255,255),2)
cv2.imshow('plot', img)
# cv2.imshow('edges', edges)
key = cv2.waitKey(100)
# pass
# rho, theta, threshold, min_line_len, max_line_gap = 1, 1*np.pi/180, 30,10,20
# lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
# if lines is not None:
# for line in lines:
# x1, y1, x2, y2 = line
# cv2.line(img,(x1, y1), (x2, y2), (255,0,0), 3)
# cv2.imshow('plot', img)
# key = cv2.waitKey(100)
if key == 27:
raise KeyboardInterrupt
def listener_callback(self, scan):
x = scan.orientation.x
y = scan.orientation.y
z = scan.orientation.z
w = scan.orientation.w
ax = scan.linear_acceleration.x
ay = scan.linear_acceleration.y
az = scan.linear_acceleration.z
rate = 180/np.pi
roll_x, pitch_y, yaw_z = euler_from_quaternion(x, y, z, w)
self.yaw_z=int(yaw_z*rate) # 절대 각도
# self.get_logger().info('orientation: "%s"' % str((roll_x, pitch_y, yaw_z)))
# self.get_logger().info('linear_acceleration: "%s"' % str((ax, ay, az)))
self.get_logger().info('rate: "%s"' % str(int(yaw_z*rate)))
pass
def main(args=None):
rclpy.init(args=args)
basic_subcriber = BasicSubscriber()
try:
rclpy.spin(basic_subcriber)
except KeyboardInterrupt:
print("프로그램 종료")
finally:
basic_subcriber.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | [
"math.asin",
"math.atan2",
"rclpy.shutdown",
"numpy.sin",
"cv2.rectangle",
"cv2.HoughLinesP",
"cv2.imshow",
"cv2.line",
"rclpy.spin",
"cv2.cvtColor",
"cv2.Canny",
"numpy.uint8",
"cv2.waitKey",
"numpy.square",
"numpy.cos",
"cv2.flip",
"cv2.rotate",
"rclpy.init",
"numpy.zeros"
] | [((616, 634), 'math.atan2', 'math.atan2', (['t0', 't1'], {}), '(t0, t1)\n', (626, 634), False, 'import math\n'), ((773, 786), 'math.asin', 'math.asin', (['t2'], {}), '(t2)\n', (782, 786), False, 'import math\n'), ((887, 905), 'math.atan2', 'math.atan2', (['t3', 't4'], {}), '(t3, t4)\n', (897, 905), False, 'import math\n'), ((5804, 5825), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (5814, 5825), False, 'import rclpy\n'), ((2886, 2921), 'numpy.zeros', 'np.zeros', (['(self.size, self.size, 3)'], {}), '((self.size, self.size, 3))\n', (2894, 2921), True, 'import numpy as np\n'), ((3651, 3709), 'cv2.rotate', 'cv2.rotate', (['img'], {'rotateCode': 'cv2.ROTATE_90_COUNTERCLOCKWISE'}), '(img, rotateCode=cv2.ROTATE_90_COUNTERCLOCKWISE)\n', (3661, 3709), False, 'import cv2\n'), ((3724, 3740), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (3732, 3740), False, 'import cv2\n'), ((3803, 3816), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (3811, 3816), True, 'import numpy as np\n'), ((3832, 3877), 'cv2.cvtColor', 'cv2.cvtColor', (['img_float32', 'cv2.COLOR_RGB2GRAY'], {}), '(img_float32, cv2.COLOR_RGB2GRAY)\n', (3844, 3877), False, 'import cv2\n'), ((3921, 3961), 'cv2.Canny', 'cv2.Canny', (['gray', '(50)', '(100)'], {'apertureSize': '(3)'}), '(gray, 50, 100, apertureSize=3)\n', (3930, 3961), False, 'import cv2\n'), ((3977, 4053), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['edges', '(1)', '(np.pi / 180.0)', '(40)'], {'minLineLength': '(5)', 'maxLineGap': '(20)'}), '(edges, 1, np.pi / 180.0, 40, minLineLength=5, maxLineGap=20)\n', (3992, 4053), False, 'import cv2\n'), ((4364, 4492), 'cv2.rectangle', 'cv2.rectangle', (['img', '(self.size // 2 - 20, self.size // 2 - 30)', '(self.size // 2 + 20, self.size // 2 + 30)', '(0, 255, 255)', '(2)'], {}), '(img, (self.size // 2 - 20, self.size // 2 - 30), (self.size //\n 2 + 20, self.size // 2 + 30), (0, 255, 255), 2)\n', (4377, 4492), False, 'import cv2\n'), ((4474, 4497), 'cv2.imshow', 'cv2.imshow', (['"""plot"""', 'img'], {}), "('plot', img)\n", (4484, 4497), False, 'import cv2\n'), ((4549, 4565), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (4560, 4565), False, 'import cv2\n'), ((5883, 5910), 'rclpy.spin', 'rclpy.spin', (['basic_subcriber'], {}), '(basic_subcriber)\n', (5893, 5910), False, 'import rclpy\n'), ((6026, 6042), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (6040, 6042), False, 'import rclpy\n'), ((4301, 4355), 'cv2.line', 'cv2.line', (['img', 'pt1', 'pt2', '(255, 0, 255)', '(5)', 'cv2.LINE_AA'], {}), '(img, pt1, pt2, (255, 0, 255), 5, cv2.LINE_AA)\n', (4309, 4355), False, 'import cv2\n'), ((3349, 3410), 'cv2.line', 'cv2.line', (['img', '(pre_xx, pre_yy)', '(xx, yy)', '(255, 255, 255)', '(2)'], {}), '(img, (pre_xx, pre_yy), (xx, yy), (255, 255, 255), 2)\n', (3357, 3410), False, 'import cv2\n'), ((3194, 3216), 'numpy.square', 'np.square', (['(pre_xx - xx)'], {}), '(pre_xx - xx)\n', (3203, 3216), True, 'import numpy as np\n'), ((3215, 3237), 'numpy.square', 'np.square', (['(pre_yy - yy)'], {}), '(pre_yy - yy)\n', (3224, 3237), True, 'import numpy as np\n'), ((2740, 2758), 'numpy.cos', 'np.cos', (['(idx * unit)'], {}), '(idx * unit)\n', (2746, 2758), True, 'import numpy as np\n'), ((2827, 2845), 'numpy.sin', 'np.sin', (['(idx * unit)'], {}), '(idx * unit)\n', (2833, 2845), True, 'import numpy as np\n')] |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import tqdm
from coarse_to_fine.input_reader import InputReader, InputReaderBaseName, InputReaderSemMat2BaseName, InputReaderSemMat2
from contours import ContourBox
import argparse
import ast
import numpy as np
import os
from PIL import Image
class GenerateGT_PNGMask:
def __init__(self, classes_to_keep, output_dir):
self.classes_to_keep = classes_to_keep # Object classes: [11, 12, 13, 14, 15, 16, 17, 18]
self.output_dir = output_dir
def _save_fname(self, filename):
city_name = filename.split('_')[0]
gt_name = filename.split('_leftImg8bit')[0] + 'gtCoarseR_labelIds.png'
def generate_save(self, gt, improved, filename):
only_objects_updated, fully_improved = self._generate_single_mask(gt, improved)
city_name = filename.split('_')[0]
gt_name_objects = filename.split('_leftImg8bit')[0] + '_gtCoarseRefObj_labelIds.png'
gt_name_alls = filename.split('_leftImg8bit')[0] + '_gtCoarseRefAll_labelIds.png'
output_dir = os.path.join(self.output_dir, city_name)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
gt_name_alls = os.path.join(output_dir, gt_name_alls)
fully_improved = Image.fromarray(fully_improved)
fully_improved.save(gt_name_alls, 'png')
gt_name_objects = os.path.join(output_dir, gt_name_objects)
only_objects_updated = Image.fromarray(only_objects_updated)
only_objects_updated.save(gt_name_objects, 'png')
def _generate_single_mask(self, gt, improved):
final_canvas = np.zeros(gt.shape[1:]).astype(np.uint8)
all_updated_canvas = np.zeros(gt.shape[1:]).astype(np.uint8)
for k, (gt_k, improved_k) in enumerate(zip(gt, improved), start=0):
if k not in self.classes_to_keep:
if np.any(gt_k):
final_canvas[gt_k != 0] = k
else:
if np.any(improved_k) and np.any(gt_k):
final_canvas[improved_k != 0] = k
all_updated_canvas[improved_k != 0] = k
#
return final_canvas, all_updated_canvas
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--coarse_dir', type=str,
default='./cityscapes-preprocess/gt_eval_coarse/gt_thin')
parser.add_argument('--in_dir', type=str,
default='./prediction_scaled_0_5')
parser.add_argument('--val_file_list', type=str,
default='./Cityscapes/benchmark/datadir/val.txt')
parser.add_argument('--n_classes', type=int, default=19)
parser.add_argument('--n_classes_start', type=int, default=1)
parser.add_argument('--level_set_method', type=str, default='MLS')
parser.add_argument('--level_set_config_dict', type=dict, default={})
# ---
parser.add_argument('--n_workers', type=int, default=8)
parser.add_argument('--smooth_lsteps', type=int, default=1)
parser.add_argument('--lambda_', type=float, default=0.0)
parser.add_argument('--alpha', type=float, default=1.0)
parser.add_argument('--step_ckpts', type=str, default="[0,60]")
parser.add_argument('--exp_name', type=str, default='test')
parser.add_argument('--output_dir', type=str, default='./refined_data_test')
parser.add_argument('--classes_to_keep', type=list, default=[])
parser.add_argument('--balloon', type=float, default=1)
parser.add_argument('--threshold', type=float, default=0.99)
parser.add_argument('--merge_weight', type=float, default=0.5)
args = parser.parse_args()
level_set_config_dict = {
'lambda_': args.lambda_,
'alpha': args.alpha,
'smoothing': args.smooth_lsteps,
'render_radius': -1,
'is_gt_semantic': True,
'method': args.level_set_method,
'balloon': args.balloon,
'threshold': args.threshold,
'merge_weight': args.merge_weight,
'step_ckpts': ast.literal_eval(args.step_ckpts)
}
args.level_set_config_dict = level_set_config_dict
return args
def do_it(args):
in_dir = args.in_dir
val_file_list = args.val_file_list
coarse_dir = args.coarse_dir
n_classes_interval = (args.n_classes_start, args.n_classes)
level_set_config_dict = args.level_set_config_dict
classes_to_keep = [11, 12, 13, 14, 15, 16, 17, 18] # args.classes_to_keep
ireader = InputReaderBaseName(in_dir, val_file_list, n_classes_interval)
#
ireader_coarse = InputReaderSemMat2BaseName(coarse_dir, val_file_list, n_classes_interval)
#
ireader_coarse.set_external_list(ireader._read_list)
cbox = ContourBox.LevelSetAlignment(n_workers=1,
fn_post_process_callback=None,
config=level_set_config_dict)
mask_generator = GenerateGT_PNGMask(classes_to_keep, args.output_dir)
for (im_filename, pred_ch), (seg_fname, seg_coarse) in tqdm.tqdm(
zip(ireader, ireader_coarse), total=len(ireader)):
assert len(pred_ch) == len(seg_coarse), 'num ch should match'
output, _ = cbox({'seg': np.expand_dims(seg_coarse, 0), 'bdry': None},
np.expand_dims(np.stack(pred_ch), 0))
# assuming the last ckpts is the one we are going to use
improved_mask = output[0, :, -1, :, :]
seg_coarse = np.stack(seg_coarse)
mask_generator.generate_save(seg_coarse, improved_mask, seg_fname)
if __name__ == "__main__":
args = parse_args()
do_it(args)
| [
"numpy.stack",
"contours.ContourBox.LevelSetAlignment",
"coarse_to_fine.input_reader.InputReaderSemMat2BaseName",
"coarse_to_fine.input_reader.InputReaderBaseName",
"ast.literal_eval",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.isdir",
"numpy.zeros",
"numpy.expand_dims",
"numpy.any",
"... | [((3693, 3718), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3716, 3718), False, 'import argparse\n'), ((5933, 5995), 'coarse_to_fine.input_reader.InputReaderBaseName', 'InputReaderBaseName', (['in_dir', 'val_file_list', 'n_classes_interval'], {}), '(in_dir, val_file_list, n_classes_interval)\n', (5952, 5995), False, 'from coarse_to_fine.input_reader import InputReader, InputReaderBaseName, InputReaderSemMat2BaseName, InputReaderSemMat2\n'), ((6024, 6097), 'coarse_to_fine.input_reader.InputReaderSemMat2BaseName', 'InputReaderSemMat2BaseName', (['coarse_dir', 'val_file_list', 'n_classes_interval'], {}), '(coarse_dir, val_file_list, n_classes_interval)\n', (6050, 6097), False, 'from coarse_to_fine.input_reader import InputReader, InputReaderBaseName, InputReaderSemMat2BaseName, InputReaderSemMat2\n'), ((6173, 6279), 'contours.ContourBox.LevelSetAlignment', 'ContourBox.LevelSetAlignment', ([], {'n_workers': '(1)', 'fn_post_process_callback': 'None', 'config': 'level_set_config_dict'}), '(n_workers=1, fn_post_process_callback=None,\n config=level_set_config_dict)\n', (6201, 6279), False, 'from contours import ContourBox\n'), ((2544, 2584), 'os.path.join', 'os.path.join', (['self.output_dir', 'city_name'], {}), '(self.output_dir, city_name)\n', (2556, 2584), False, 'import os\n'), ((2687, 2725), 'os.path.join', 'os.path.join', (['output_dir', 'gt_name_alls'], {}), '(output_dir, gt_name_alls)\n', (2699, 2725), False, 'import os\n'), ((2752, 2783), 'PIL.Image.fromarray', 'Image.fromarray', (['fully_improved'], {}), '(fully_improved)\n', (2767, 2783), False, 'from PIL import Image\n'), ((2860, 2901), 'os.path.join', 'os.path.join', (['output_dir', 'gt_name_objects'], {}), '(output_dir, gt_name_objects)\n', (2872, 2901), False, 'import os\n'), ((2934, 2971), 'PIL.Image.fromarray', 'Image.fromarray', (['only_objects_updated'], {}), '(only_objects_updated)\n', (2949, 2971), False, 'from PIL import Image\n'), ((5490, 5523), 'ast.literal_eval', 'ast.literal_eval', (['args.step_ckpts'], {}), '(args.step_ckpts)\n', (5506, 5523), False, 'import ast\n'), ((6912, 6932), 'numpy.stack', 'np.stack', (['seg_coarse'], {}), '(seg_coarse)\n', (6920, 6932), True, 'import numpy as np\n'), ((2600, 2625), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (2613, 2625), False, 'import os\n'), ((2639, 2662), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (2650, 2662), False, 'import os\n'), ((3105, 3127), 'numpy.zeros', 'np.zeros', (['gt.shape[1:]'], {}), '(gt.shape[1:])\n', (3113, 3127), True, 'import numpy as np\n'), ((3174, 3196), 'numpy.zeros', 'np.zeros', (['gt.shape[1:]'], {}), '(gt.shape[1:])\n', (3182, 3196), True, 'import numpy as np\n'), ((3357, 3369), 'numpy.any', 'np.any', (['gt_k'], {}), '(gt_k)\n', (3363, 3369), True, 'import numpy as np\n'), ((6669, 6698), 'numpy.expand_dims', 'np.expand_dims', (['seg_coarse', '(0)'], {}), '(seg_coarse, 0)\n', (6683, 6698), True, 'import numpy as np\n'), ((6755, 6772), 'numpy.stack', 'np.stack', (['pred_ch'], {}), '(pred_ch)\n', (6763, 6772), True, 'import numpy as np\n'), ((3456, 3474), 'numpy.any', 'np.any', (['improved_k'], {}), '(improved_k)\n', (3462, 3474), True, 'import numpy as np\n'), ((3479, 3491), 'numpy.any', 'np.any', (['gt_k'], {}), '(gt_k)\n', (3485, 3491), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import os
def nothing(x):
pass
def loadFile(fname):
x=np.load(fname)
x = x/x.max()
(h,w) = x.shape[:2]
center = (w/2,h/2)
#return x
rx = cv2.resize(x, (640,480))# interpolation = cv2.INTER_AREA)
return rx
def getFilenames(base_dir):
my_list=[]
for dirName, subdirList, fileList in os.walk(base_dir):
for fname in fileList:
my_list.append(dirName+"/"+fname)
return my_list
cv2.namedWindow('image')
cv2.createTrackbar('val','image', 0.0, 100.0, nothing)
spot=0
fnames = getFilenames('data_bin_50cm')
rx = loadFile(fnames[spot])
level = 50
print("Slider: change threshold (percent of max value of original")
print("Spacebar: Select next image")
print("Escape: exit")
while(1):
#print level
tmp=rx
tmp=np.clip(tmp, 0,level/100.0) #level-5, level+5)
cv2.imshow('image',tmp)
k = cv2.waitKey(1) & 0xFF
if k==27: #escape
break
if k==32: #Spacebar
spot = spot+1
rx = loadFile(fnames[spot])
level = cv2.getTrackbarPos('val','image')
cv2.destroyAllWindows()
| [
"cv2.createTrackbar",
"numpy.load",
"cv2.waitKey",
"os.walk",
"cv2.imshow",
"numpy.clip",
"cv2.namedWindow",
"cv2.getTrackbarPos",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((473, 497), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (488, 497), False, 'import cv2\n'), ((498, 553), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""val"""', '"""image"""', '(0.0)', '(100.0)', 'nothing'], {}), "('val', 'image', 0.0, 100.0, nothing)\n", (516, 553), False, 'import cv2\n'), ((1086, 1109), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1107, 1109), False, 'import cv2\n'), ((95, 109), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (102, 109), True, 'import numpy as np\n'), ((198, 223), 'cv2.resize', 'cv2.resize', (['x', '(640, 480)'], {}), '(x, (640, 480))\n', (208, 223), False, 'import cv2\n'), ((355, 372), 'os.walk', 'os.walk', (['base_dir'], {}), '(base_dir)\n', (362, 372), False, 'import os\n'), ((815, 845), 'numpy.clip', 'np.clip', (['tmp', '(0)', '(level / 100.0)'], {}), '(tmp, 0, level / 100.0)\n', (822, 845), True, 'import numpy as np\n'), ((866, 890), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'tmp'], {}), "('image', tmp)\n", (876, 890), False, 'import cv2\n'), ((1051, 1085), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""val"""', '"""image"""'], {}), "('val', 'image')\n", (1069, 1085), False, 'import cv2\n'), ((898, 912), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (909, 912), False, 'import cv2\n')] |
from pathlib import Path
from typing import List
import click
from PIL import Image
# Load image
from numpy import zeros, dtype
from numpy import asarray
def get_neighbors(x: int, y: int, source) -> List[int]:
source_height, source_width = source.shape
n = []
for n_x_o in (-1, 0, 1):
for n_y_o in (-1, 0, 1):
if n_x_o == 0 and n_y_o == 0:
continue
n_y = (y + n_y_o) % source_height
n_x = (x + n_x_o) % source_width
n.append(source[n_y, n_x])
return n
# Filter function
def custom_kernel_filter(x: int, y: int, source) -> int:
c_val = source[y, x]
neighbors = get_neighbors(x, y, source)
n_val = sum(neighbors) / float(len(neighbors))
if n_val > c_val * 1.15:
return 0
elif n_val < c_val * 0.85:
return 255
if c_val > 125:
return 255
return 0
def dither_filter(x: int, y: int, source) -> int:
return source[y, x]
def threshold_filter(x: int, y: int, source) -> int:
if source[y, x] > 128:
return 255
return 0
@click.command()
@click.argument(
'src',
nargs=1,
type=click.Path(exists=True)
)
@click.argument(
'dst',
nargs=1,
type=click.Path(exists=False)
)
@click.option(
"--mode",
required=True,
type=click.Choice(
['threshold', 'dither', 'custom_1'],
case_sensitive=False
))
def convert_grayscale_to_1bit(src: str, dst: str, mode: str):
input_filepath = Path(src)
output_filepath = Path(dst)
# Convert to numpy, and get single-channel color
input_rgb = asarray(Image.open(input_filepath.as_posix()))
input_arr = input_rgb[:, :, 0]
# Apply filter for each pixel on the map
output_arr = zeros(shape=input_arr.shape, dtype=dtype('uint8'))
if mode == 'threshold':
filter_func = threshold_filter
elif mode == 'dither':
filter_func = dither_filter
elif mode == 'custom_1':
filter_func = custom_kernel_filter
else:
raise Exception('Please specify a mode')
for output_y in range(output_arr.shape[0]):
for output_x in range(output_arr.shape[1]):
output_arr[output_y][output_x] = filter_func(
output_x,
output_y,
input_arr
)
# Output the image
# Numpy does not support 1-bit arrays, so we have to resort to first
# creating the image as a 8-bit image, then convert to 1-bit.
Image\
.fromarray(output_arr, mode='L')\
.convert('1')\
.save(output_filepath.as_posix())
if __name__ == '__main__':
convert_grayscale_to_1bit()
| [
"numpy.dtype",
"click.command",
"click.Choice",
"pathlib.Path",
"click.Path",
"PIL.Image.fromarray"
] | [((1082, 1097), 'click.command', 'click.command', ([], {}), '()\n', (1095, 1097), False, 'import click\n'), ((1486, 1495), 'pathlib.Path', 'Path', (['src'], {}), '(src)\n', (1490, 1495), False, 'from pathlib import Path\n'), ((1518, 1527), 'pathlib.Path', 'Path', (['dst'], {}), '(dst)\n', (1522, 1527), False, 'from pathlib import Path\n'), ((1148, 1171), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1158, 1171), False, 'import click\n'), ((1224, 1248), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (1234, 1248), False, 'import click\n'), ((1308, 1379), 'click.Choice', 'click.Choice', (["['threshold', 'dither', 'custom_1']"], {'case_sensitive': '(False)'}), "(['threshold', 'dither', 'custom_1'], case_sensitive=False)\n", (1320, 1379), False, 'import click\n'), ((1778, 1792), 'numpy.dtype', 'dtype', (['"""uint8"""'], {}), "('uint8')\n", (1783, 1792), False, 'from numpy import zeros, dtype\n'), ((2474, 2511), 'PIL.Image.fromarray', 'Image.fromarray', (['output_arr'], {'mode': '"""L"""'}), "(output_arr, mode='L')\n", (2489, 2511), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
import os
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
# domain specific dependencies
try:
import numpy as np
except ImportError:
np = None
class MatrixDataReader(DataReaderBase):
"""Read matrix data from disk."""
def __init__(self):
self._check_deps()
@classmethod
def from_opt(cls, opt):
return cls()
@classmethod
def _check_deps(cls):
if any([np is None]):
cls._raise_missing_dep(
"numpy")
def read(self, matrix_files, side, data_dir=None):
"""Read data into dicts.
Args:
side: "src"
Yields:
a dictionary containing matrix data, path and index for each line.
"""
if isinstance(matrix_files, str):
matrix_files = DataReaderBase._read_file(matrix_files)
for i, filename in enumerate(matrix_files):
filename = filename.decode("utf-8").strip()
mat_path = os.path.join(data_dir, filename)
if not os.path.exists(mat_path):
mat_path = filename
assert os.path.exists(mat_path), \
'matrix path %s not found' % filename
mat = torch.from_numpy(np.loadtxt(mat_path))
yield {side: mat, side + '_path': filename, 'indices': i}
def mat_sort_key(ex):
"""Sort using the number of columns in the sequence."""
return ex.src.size(0)
class MatrixField(Field):
"""Defines an matrix datatype and instructions for converting to Tensor.
See :class:`Fields` for attribute descriptions.
"""
def __init__(self, preprocessing=None, postprocessing=None,
include_lengths=True, batch_first=False, pad_index=0,
dtype=torch.float, is_target=False):
super(MatrixField, self).__init__(
sequential=False, use_vocab=False, init_token=None,
eos_token=None, fix_length=False, dtype=dtype,
preprocessing=preprocessing, postprocessing=postprocessing,
lower=False, tokenize=None, include_lengths=include_lengths,
batch_first=batch_first, pad_token=None, unk_token=None,
pad_first=False, truncate_first=False, stop_words=None,
is_target=is_target
)
self.pad_index = pad_index
def pad(self, minibatch):
"""Pad a batch of examples to the length of the longest example.
Args:
minibatch (List[torch.FloatTensor]): A list of matrix data, each
having shape len x n_feats where len is variable.
Returns:
torch.FloatTensor or Tuple[torch.FloatTensor, List[int]]: The
padded tensor of shape ``(batch_size, max_len, n_feats)``.
and a list of the lengths if `self.include_lengths` is `True`
else just returns the padded tensor.
"""
assert not self.pad_first and not self.truncate_first \
and not self.fix_length
minibatch = list(minibatch)
# minibatch[0] with shape len x n_feats
lengths = [x.size(0) for x in minibatch]
max_len = max(lengths)
nfft = minibatch[0].size(1)
matrixs = torch.full((len(minibatch), max_len, nfft), self.pad_index)
for i, (mat, len_) in enumerate(zip(minibatch, lengths)):
matrixs[i, 0:len_, :] = mat
if self.include_lengths:
return (matrixs, lengths)
return matrixs
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has ``include_lengths=True``, a tensor of lengths will be
included in the return value.
Args:
arr (torch.FloatTensor or Tuple(torch.FloatTensor, List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True. Examples have shape
``(batch_size, n_feats, max_len)`` if `self.batch_first`
else ``(max_len, batch_size, 1, n_feats)``.
device (str or torch.device): See `Field.numericalize`.
"""
assert self.use_vocab is False
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=torch.int, device=device)
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
if not self.batch_first:
arr = arr.permute(1, 0, 2)
arr = arr.contiguous()
arr = arr.to(device)
if self.include_lengths:
return arr, lengths
return arr
def matrix_fields(**kwargs):
# mat = MatrixField(
# include_lengths=kwargs['include_lengths'], batch_first=False,
# pad_index=0, dtype=torch.float)
mat = MatrixField(
include_lengths=True, batch_first=False,
pad_index=0, dtype=torch.float)
return mat
| [
"os.path.exists",
"numpy.loadtxt",
"onmt.inputters.datareader_base.DataReaderBase._read_file",
"os.path.join",
"torch.tensor"
] | [((874, 913), 'onmt.inputters.datareader_base.DataReaderBase._read_file', 'DataReaderBase._read_file', (['matrix_files'], {}), '(matrix_files)\n', (899, 913), False, 'from onmt.inputters.datareader_base import DataReaderBase\n'), ((1046, 1078), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (1058, 1078), False, 'import os\n'), ((1180, 1204), 'os.path.exists', 'os.path.exists', (['mat_path'], {}), '(mat_path)\n', (1194, 1204), False, 'import os\n'), ((4690, 4743), 'torch.tensor', 'torch.tensor', (['lengths'], {'dtype': 'torch.int', 'device': 'device'}), '(lengths, dtype=torch.int, device=device)\n', (4702, 4743), False, 'import torch\n'), ((1098, 1122), 'os.path.exists', 'os.path.exists', (['mat_path'], {}), '(mat_path)\n', (1112, 1122), False, 'import os\n'), ((1298, 1318), 'numpy.loadtxt', 'np.loadtxt', (['mat_path'], {}), '(mat_path)\n', (1308, 1318), True, 'import numpy as np\n')] |
import dolfin as df
import numpy as np
import os
from . import *
from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather
from common.bcs import Fixed, Pressure, NoSlip, ContactAngle
#
from ufl import max_value
__author__ = "<NAME>"
####################
#
# This is a 2D demo for simulating micro-droplet formation from a T-Junction.
# The demo uses an imported mesh created in GMSH and converted with Meshio.
#
# The parameter space was adapted from https://youtu.be/HtwWseX-zVM
# NOTE: The geomerties are slightly different so it is not an exact 1:1 copy
#
# Surface Tension = https://youtu.be/HtwWseX-zVM?t=267
# Contact Angle = https://youtu.be/HtwWseX-zVM?t=434
# Oil Velocity = https://youtu.be/HtwWseX-zVM?t=397
# Oil Density and Viscosity = https://youtu.be/HtwWseX-zVM?t=341
# Water Velocity = https://youtu.be/HtwWseX-zVM?t=410
# Water Density and Viscosity = https://youtu.be/HtwWseX-zVM?t=321
#
# <NAME>, 2020, University of Kent, SPS
####################
def FaceLength(faceNum, mesh, subdomains_file, dim):
# State desired face which measures are taking place upon
if mpi_is_root():
print(faceNum)
# Display how mesh is separated
# print("Node: ", MPI_rank, "Mesh Cells: ", mesh.cells().size)
# Import subdomains
mvc = df.MeshValueCollection("size_t", mesh, dim-1)
with df.XDMFFile(mpi_comm(), subdomains_file) as infile:
infile.read(mvc, "name_to_read")
facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)
## Calculate limits so inflow parabolic can work on co-ordinates not at 0
# Create empty variable space
X = []
Y = []
xInflowRange = 0
yInflowRange = 0
xInflowMin = 0
yInflowMin = 0
# Retrive all co-ords as element for desired face
It_facet = df.SubsetIterator(facet_domains,faceNum)
mpi_barrier()
# print("Faces: ", df.SubsetIterator(facet_domains,faceNum))
#https://fenicsproject.org/qa/13995/print-coordinate-of-boundary-seperately-i-e-left-boundary/
#It_mesh = vertices([facet_domains.array() == 26])
# Collected all co-ords for desired face
for facet_domains in It_facet:
for v in df.vertices(facet_domains):
X.append(v.point().x())
Y.append(v.point().y())
# Ensure all processes collect co-ords for desired face
mpi_barrier()
# Gather all co-ords to calc length/min
X = mpi_gather(X, 0)
Y = mpi_gather(Y, 0)
# Sync all parallel processes for length/min calc
mpi_barrier()
if mpi_is_root():
# Remove empty and combine all arrays
X = np.concatenate(X)
Y = np.concatenate(Y)
# Calculate length and min values
xInflowRange = np.ptp(X,axis=0)
yInflowRange = np.ptp(Y,axis=0)
xInflowMin = np.amin(X)
yInflowMin = np.amin(Y)
# END: Sync all parallel processes for length/min calc
mpi_barrier()
# Broadcast all length/min calc to all nodes used
xInflowRange = mpi_bcast(xInflowRange, 0)
yInflowRange = mpi_bcast(yInflowRange, 0)
xInflowMin = mpi_bcast(xInflowMin, 0)
yInflowMin = mpi_bcast(yInflowMin, 0)
# Clear variables
v = []
It_facet = []
facet_domains = []
return xInflowRange, yInflowRange, xInflowMin, yInflowMin
def problem():
info_cyan("Flow Focusing T-Junction.")
# 2, beta in phase 1, beta in phase 2
solutes = [["c_p", 1, 1e-4, 1e-2, 4., 1.],
["c_m", -1, 1e-4, 1e-2, 4., 1.]]
#solutes = [["c_p", 0, 1e-3, 1e-2, 4., 1.]]
# Format: name : (family, degree, is_vector)
base_elements = dict(u=["Lagrange", 2, True],
p=["Lagrange", 1, False],
phi=["Lagrange", 1, False],
g=["Lagrange", 1, False],
c=["Lagrange", 1, False],
V=["Lagrange", 1, False])
factor = 2 # Water to oil ratio (1:Factor)
scaling_factor = 0.001 # convert from mm to metres
# Default parameters to be loaded unless starting from checkpoint.
parameters = dict(
solver="basic", # Type of problem sovler
folder="results_flow_focusing", # Save folder
import_mesh = True, # If importing XDMF mesh files
scale_factor = scaling_factor, # Change mesh dimension (Use if mesh not in metres)
mesh_file = "meshes/mesh_flowfocus.xdmf", # Mesh filepath
subdomains_file = "meshes/mf_flowfocus.xdmf", # Subdomains filepath
name_Facet = "inlet", # Name of inlet within "boundaries_Facet" for Hmin/H
restart_folder=False, # Use if restarting from different folder
enable_NS=True, # Enable Navier Stokes (NS)
enable_PF=True, # Enable Phase Field (PF)
enable_EC=False, # Enable Electrochem (EC)
save_intv=5, # Export data time point interval
stats_intv=5, # Export stats interval
checkpoint_intv=50, # Export checkpoint for restart
tstep=0, # Unsure
dt=1e-5, # s Time steps
t_0=0., # s Start time
T=8., # s Total time
interface_thickness=(0.07/5)*scaling_factor, # Interface thickness between PF
solutes=solutes, # I believe are electrochem (EC)related
base_elements=base_elements, # Basic "CG"/"Lagrange" function space
WaterOilInlet=0, # 0 = all inlets, 1 = WaterInlet, 2 = OilInlet
# Assumption that both OilInlets are mirrored in Y axis
H=[0.41,0.41], # Length of inlet (Updated in "FaceLength()")
Hmin = [0,0], # Minimum of inlet (Updated in "FaceLength()")
dim = 2, # Dimensions
XInflow = True, # Direction of flow along X axis
concentration_left=1., # Concentration in PF (EC related)
#
# Contact Angle required normalisation against 180 deg
# as it was defined for phase 2 to phase 1.
# Here it is phase 1 to phase 2
contact_angle=180-135, # Deg
surface_tension=0.005, # n/m
contact_angle_init=False, # Not requried
grav_const=0.0, # N/A for microfluidics
inlet_velocity=0.0103, #m/s
inlet_velocityOil=0.0103, #m/s
V_0=0.,
#
# PF1 = Oil (1 in PF data); PF2 = Water (-1 in PF data);
pf_mobility_coeff=2e-6*scaling_factor, # Important for forming phase liquids
density=[1000, 998.2], # Kg/m3
viscosity=[6.71e-3,1.003e-3],# Kg/m.s
permittivity=[1., 1.], # EC?
#
use_iterative_solvers=True, # if False, might have memory issues
use_pressure_stabilization=False, # Seems to be a type of SUPG, unsure (see solver)
#
# Boundary related physical labels (Numbers within mf_subdomains.xdmf)
# Typically created within GMSH/Netgen and converted by Meshio
boundaries_Facet = {'inlet': 12,
'inletT': 9,
'inletB': 15,
'outlet': 4,
'wallLR' : [13,11,7,1,3,5],
'wallRL' : [14,10,8,16,2,6]
}
)
# Retrieve inlet dimensions (min/length) from mesh
[mesh1, parameters1] = mesh(parameters["mesh_file"],
parameters["subdomains_file"], parameters["XInflow"],
parameters["boundaries_Facet"], "inlet", parameters["scale_factor"], False)
# Remove temp mesh, not required
mesh1 = []
# Save parameters to main dictionary (Already bcast from mesh function)
parameters["dim"] = parameters1["dim"]
parameters["H"][0] = parameters1["H"]
parameters["Hmin"][0] = parameters1["Hmin"]
# In this case, inletT and inletB are indistinguishable with regards to y axis
# XInflow = False
# Boundary name = "inletT"
[mesh1, parameters1] = mesh(parameters["mesh_file"],
parameters["subdomains_file"], False,
parameters["boundaries_Facet"], "inletT", parameters["scale_factor"], False)
mesh1 = []
parameters["H"][1] = parameters1["H"]
parameters["Hmin"][1] = parameters1["Hmin"]
# Ensure all processes complete before return (Might be redundant)
mpi_barrier()
return parameters
def mesh(mesh_file, subdomains_file, XInflow,
boundaries_Facet, name_Facet, scale_factor,
import_mesh, **namespace):
# Load mesh from file (NETGEN mesh as .grid to .xml using DOLFIN)
mesh = df.Mesh()
with df.XDMFFile(mesh_file) as infile:
infile.read(mesh)
# # Scale mesh from mm to m
x = mesh.coordinates()
#scaling_factor = 0.001
x[:, :] *= scale_factor
# # Move mesh so co-ords always positive
#
xymin = x.min(axis=0)
mpi_barrier()
xymin = np.min(mpi_gather(xymin, 0))
mpi_barrier()
xymin = mpi_bcast(xymin, 0)
mpi_barrier()
x[:, :] = x[:, :] - xymin
# Apply to mesh
mesh.bounding_box_tree().build(mesh) # development version
# Define boundary conditions
dim = mesh.topology().dim()
if mpi_is_root():
print('Dim:',dim)
# Ensure all processes have completed
mpi_barrier()
if import_mesh: #Mesh import is true
return mesh
else: #Otherwise generating range and min of boundary facet assuming line
[X, Y, Xmin, Ymin] = FaceLength(boundaries_Facet[name_Facet], mesh,
subdomains_file, dim)
# Display boundary dimensions (inlet in most cases)
mpi_barrier()
if mpi_is_root():
info_yellow("Boundary Dimensions")
print("x: ",X)
print("y: ",Y)
print("xMin: ",Xmin)
print("yMin: ",Ymin)
# Save length/min to dictionary
# This will not overwrite prior dictionary
# as this is in an indepenent function
parameters = dict()
parameters["dim"] = dim
if XInflow == True:
parameters["H"] = Y
parameters["Hmin"] = Ymin
else:
parameters["H"] = X
parameters["Hmin"] = Xmin
# Ensure all processes have completed (Might be redundant)
mpi_barrier()
return mesh, parameters
def initialize(H, Hmin,
interface_thickness, solutes, restart_folder,
field_to_subspace, inlet_velocityOil,
inlet_velocity, concentration_left,
enable_NS, enable_PF, enable_EC,
**namespace):
""" Create the initial state.velocity
The initial states are specified in a dict indexed by field. The format
should be
w_init_field[field] = 'df.Function(...)'.
The work dicts w_ and w_1 are automatically initialized from these
functions elsewhere in the code.
Note: You only need to specify the initial states that are nonzero.
"""
w_init_field = dict()
if not restart_folder:
if enable_NS:
try:
subspace = field_to_subspace["u"].collapse()
except:
subspace = field_to_subspace["u"]
#length inlet, water inflow,
# X (0) or Y (1) dir flow,
# Positive/neg flow along axis (+1/-1),
# Hmin value
u_init = velocity_init(H[0], inlet_velocity, 0, 1, Hmin[0])
w_init_field["u"] = df.interpolate(u_init, subspace)
# Phase field
if enable_PF:
w_init_field["phi"] = df.interpolate(
df.Constant(1.),
field_to_subspace["phi"].collapse())
return w_init_field
def create_bcs(dim, H, Hmin, inlet_velocity, inlet_velocityOil,
V_0, solutes, subdomains_file, WaterOilInlet,
concentration_left,
interface_thickness,
enable_NS, enable_PF, enable_EC,
mesh, boundaries_Facet, contact_angle, **namespace):
""" The boundaries and boundary conditions are defined here. """
mvc = df.MeshValueCollection("size_t", mesh, dim-1)
with df.XDMFFile(subdomains_file) as infile:
infile.read(mvc, "name_to_read")
facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)
# Re-create boundaries with facet_domain for mesh relevance
boundaries = dict(
inlet = [facet_domains, boundaries_Facet["inlet"]],
inletT = [facet_domains, boundaries_Facet["inletT"]],
inletB = [facet_domains, boundaries_Facet["inletB"]],
outlet = [facet_domains, boundaries_Facet["outlet"]],
wallLR = [facet_domains, boundaries_Facet["wallLR"]],
wallRL = [facet_domains, boundaries_Facet["wallRL"]]
)
# Alocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
for boundary in boundaries:
bcs[boundary] = dict()
### Velocity has 3 inlets in this example due
# to the flow focusing pinching aspect
## Velocity Phase Flow In
#length inlet, water inflow, X or Y, Positive/neg flow along axis
if not WaterOilInlet == 2:
velocity_expr = velocity_init(H[0], inlet_velocity, 0, 1, Hmin[0])
velocity_in = Fixed(velocity_expr)
if enable_NS:
bcs["inlet"]["u"] = velocity_in
if WaterOilInlet == 1:
bcs["inletT"]["u"] = NoSlip()
bcs["inletB"]["u"] = NoSlip()
## Velocity Top In
#length inlet, water inflow, X or Y, Positive/neg flow along axis
if not WaterOilInlet == 1:
velocity_expr = velocity_init(H[1], inlet_velocityOil, 1, -1, Hmin[1])
velocity_inT = Fixed(velocity_expr)
## Velocity Bottom In
#length inlet, water inflow, X or Y, Positive/neg flow along axis
if not WaterOilInlet == 1:
velocity_expr = velocity_init(H[1], inlet_velocityOil, 1, 1, Hmin[1])
velocity_inB = Fixed(velocity_expr)
if enable_NS:
bcs["inletT"]["u"] = velocity_inT
bcs["inletB"]["u"] = velocity_inB
if WaterOilInlet == 2:
bcs["inlet"]["u"] = NoSlip()
pressure_out = Pressure(0.0)
noslip = NoSlip()
V_left = Fixed(V_0)
V_right = Fixed(0.)
## Define boundaries
# Note we have one outlet and two sets of walls
# from experience (FEniCS), opposite boundaries can
# behave badly when all grouped
if enable_NS:
bcs["outlet"]["p"] = pressure_out
bcs["wallLR"]["u"] = noslip
bcs["wallRL"]["u"] = noslip
# Phase field uses an expersion `tanH` which defines PF drop off
if enable_PF:
phi_expr = df.Expression(
"tanh((abs((x[1]-Hmin)-H/2)-H/16)/(sqrt(2)*eps))",
H=H[0], Hmin = Hmin[0], eps=interface_thickness,
degree=2)
phi_inlet = Fixed(phi_expr)
## PF Fixed across boundary
# as no water can enter oil inlet
# and vice-versa
bcs["inlet"]["phi"] = Fixed(df.Constant(-1.))
bcs["inletT"]["phi"] = Fixed(df.Constant(1.))
bcs["inletB"]["phi"] = Fixed(df.Constant(1.))
## Add contact angle to NS No-Slip Boudnaries
bcs["wallLR"]["phi"] = ContactAngle(contact_angle)
bcs["wallRL"]["phi"] = ContactAngle(contact_angle)
return boundaries, bcs, bcs_pointwise
def initial_phasefield(x0, y0, rad, eps, function_space):
# Phase field uses an expersion `tanH` which defines PF drop off
# Not dependent on `H` and `Hmin` due to evolution over time
# in channel
expr_str = "tanh((x[0]-x0)/(sqrt(2)*eps))"
phi_init_expr = df.Expression(expr_str, x0=x0, y0=y0, rad=rad,
eps=eps, degree=2)
phi_init = df.interpolate(phi_init_expr, function_space)
return phi_init
def velocity_init(H, inlet_velocity, XY, Pos, xyMin, degree=2):
#length inlet, water inflow, X or Y, Positive/neg flow along axis
if XY == 0:
return df.Expression(
("Pos*4*U*(x[1] - xyMin)*(H-(x[1] - xyMin))/pow(H, 2)", "0.0"),
Pos=Pos, H=H, U=inlet_velocity, xyMin = xyMin, degree=degree)
else: # if XY == 1
return df.Expression(
("0.0", "Pos*4*U*(x[0] - xyMin)*(H-(x[0] - xyMin))/pow(H, 2)"),
Pos=Pos, H=H, U=inlet_velocity, xyMin = xyMin, degree=degree)
## If you want a constant and not parabolic inflow, comment above and use...
#
# return df.Expression(("U","0.0"), U=inlet_velocity, degree=degree)
# Remember to define X or Y inflow manually if constant (current X)
def tstep_hook(t, tstep, stats_intv, statsfile, field_to_subspace,
field_to_subproblem, subproblems, w_, **namespace):
info_blue("Timestep = {}".format(tstep))
# Function which runs every simulation tick
def pf_mobility(phi, gamma):
""" Phase field mobility function. """
# return gamma * (phi**2-1.)**2
# func = 1.-phi**2 + 0.0001
# return 0.75 * gamma * max_value(func, 0.)
return gamma
# Function to control PF mobility over time.
def start_hook(newfolder, **namespace):
statsfile = os.path.join(newfolder, "Statistics/stats.dat")
return dict(statsfile=statsfile)
# Function which runs at start of simulation | [
"common.io.mpi_is_root",
"numpy.amin",
"dolfin.MeshValueCollection",
"dolfin.Constant",
"os.path.join",
"common.io.mpi_gather",
"common.bcs.ContactAngle",
"dolfin.Mesh",
"dolfin.Expression",
"common.io.mpi_bcast",
"dolfin.vertices",
"common.io.mpi_barrier",
"dolfin.cpp.mesh.MeshFunctionSizet... | [((1127, 1140), 'common.io.mpi_is_root', 'mpi_is_root', ([], {}), '()\n', (1138, 1140), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((1308, 1355), 'dolfin.MeshValueCollection', 'df.MeshValueCollection', (['"""size_t"""', 'mesh', '(dim - 1)'], {}), "('size_t', mesh, dim - 1)\n", (1330, 1355), True, 'import dolfin as df\n'), ((1476, 1516), 'dolfin.cpp.mesh.MeshFunctionSizet', 'df.cpp.mesh.MeshFunctionSizet', (['mesh', 'mvc'], {}), '(mesh, mvc)\n', (1505, 1516), True, 'import dolfin as df\n'), ((1803, 1844), 'dolfin.SubsetIterator', 'df.SubsetIterator', (['facet_domains', 'faceNum'], {}), '(facet_domains, faceNum)\n', (1820, 1844), True, 'import dolfin as df\n'), ((1848, 1861), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (1859, 1861), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((2350, 2363), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (2361, 2363), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((2417, 2433), 'common.io.mpi_gather', 'mpi_gather', (['X', '(0)'], {}), '(X, 0)\n', (2427, 2433), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((2442, 2458), 'common.io.mpi_gather', 'mpi_gather', (['Y', '(0)'], {}), '(Y, 0)\n', (2452, 2458), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((2518, 2531), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (2529, 2531), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((2544, 2557), 'common.io.mpi_is_root', 'mpi_is_root', ([], {}), '()\n', (2555, 2557), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((2915, 2928), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (2926, 2928), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((3003, 3029), 'common.io.mpi_bcast', 'mpi_bcast', (['xInflowRange', '(0)'], {}), '(xInflowRange, 0)\n', (3012, 3029), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((3049, 3075), 'common.io.mpi_bcast', 'mpi_bcast', (['yInflowRange', '(0)'], {}), '(yInflowRange, 0)\n', (3058, 3075), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((3093, 3117), 'common.io.mpi_bcast', 'mpi_bcast', (['xInflowMin', '(0)'], {}), '(xInflowMin, 0)\n', (3102, 3117), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((3135, 3159), 'common.io.mpi_bcast', 'mpi_bcast', (['yInflowMin', '(0)'], {}), '(yInflowMin, 0)\n', (3144, 3159), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((8154, 8167), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (8165, 8167), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((8420, 8429), 'dolfin.Mesh', 'df.Mesh', ([], {}), '()\n', (8427, 8429), True, 'import dolfin as df\n'), ((8697, 8710), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (8708, 8710), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((8756, 8769), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (8767, 8769), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((8782, 8801), 'common.io.mpi_bcast', 'mpi_bcast', (['xymin', '(0)'], {}), '(xymin, 0)\n', (8791, 8801), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((8806, 8819), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (8817, 8819), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((9007, 9020), 'common.io.mpi_is_root', 'mpi_is_root', ([], {}), '()\n', (9018, 9020), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((9098, 9111), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (9109, 9111), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((11916, 11963), 'dolfin.MeshValueCollection', 'df.MeshValueCollection', (['"""size_t"""', 'mesh', '(dim - 1)'], {}), "('size_t', mesh, dim - 1)\n", (11938, 11963), True, 'import dolfin as df\n'), ((12073, 12113), 'dolfin.cpp.mesh.MeshFunctionSizet', 'df.cpp.mesh.MeshFunctionSizet', (['mesh', 'mvc'], {}), '(mesh, mvc)\n', (12102, 12113), True, 'import dolfin as df\n'), ((13982, 13995), 'common.bcs.Pressure', 'Pressure', (['(0.0)'], {}), '(0.0)\n', (13990, 13995), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((14009, 14017), 'common.bcs.NoSlip', 'NoSlip', ([], {}), '()\n', (14015, 14017), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((14032, 14042), 'common.bcs.Fixed', 'Fixed', (['V_0'], {}), '(V_0)\n', (14037, 14042), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((14057, 14067), 'common.bcs.Fixed', 'Fixed', (['(0.0)'], {}), '(0.0)\n', (14062, 14067), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((15454, 15519), 'dolfin.Expression', 'df.Expression', (['expr_str'], {'x0': 'x0', 'y0': 'y0', 'rad': 'rad', 'eps': 'eps', 'degree': '(2)'}), '(expr_str, x0=x0, y0=y0, rad=rad, eps=eps, degree=2)\n', (15467, 15519), True, 'import dolfin as df\n'), ((15569, 15614), 'dolfin.interpolate', 'df.interpolate', (['phi_init_expr', 'function_space'], {}), '(phi_init_expr, function_space)\n', (15583, 15614), True, 'import dolfin as df\n'), ((16946, 16993), 'os.path.join', 'os.path.join', (['newfolder', '"""Statistics/stats.dat"""'], {}), "(newfolder, 'Statistics/stats.dat')\n", (16958, 16993), False, 'import os\n'), ((2185, 2211), 'dolfin.vertices', 'df.vertices', (['facet_domains'], {}), '(facet_domains)\n', (2196, 2211), True, 'import dolfin as df\n'), ((2617, 2634), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (2631, 2634), True, 'import numpy as np\n'), ((2647, 2664), 'numpy.concatenate', 'np.concatenate', (['Y'], {}), '(Y)\n', (2661, 2664), True, 'import numpy as np\n'), ((2730, 2747), 'numpy.ptp', 'np.ptp', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2736, 2747), True, 'import numpy as np\n'), ((2770, 2787), 'numpy.ptp', 'np.ptp', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (2776, 2787), True, 'import numpy as np\n'), ((2808, 2818), 'numpy.amin', 'np.amin', (['X'], {}), '(X)\n', (2815, 2818), True, 'import numpy as np\n'), ((2840, 2850), 'numpy.amin', 'np.amin', (['Y'], {}), '(Y)\n', (2847, 2850), True, 'import numpy as np\n'), ((8439, 8461), 'dolfin.XDMFFile', 'df.XDMFFile', (['mesh_file'], {}), '(mesh_file)\n', (8450, 8461), True, 'import dolfin as df\n'), ((8730, 8750), 'common.io.mpi_gather', 'mpi_gather', (['xymin', '(0)'], {}), '(xymin, 0)\n', (8740, 8750), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((9431, 9444), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (9442, 9444), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((9456, 9469), 'common.io.mpi_is_root', 'mpi_is_root', ([], {}), '()\n', (9467, 9469), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((10100, 10113), 'common.io.mpi_barrier', 'mpi_barrier', ([], {}), '()\n', (10111, 10113), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((11972, 12000), 'dolfin.XDMFFile', 'df.XDMFFile', (['subdomains_file'], {}), '(subdomains_file)\n', (11983, 12000), True, 'import dolfin as df\n'), ((13052, 13072), 'common.bcs.Fixed', 'Fixed', (['velocity_expr'], {}), '(velocity_expr)\n', (13057, 13072), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((13494, 13514), 'common.bcs.Fixed', 'Fixed', (['velocity_expr'], {}), '(velocity_expr)\n', (13499, 13514), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((13746, 13766), 'common.bcs.Fixed', 'Fixed', (['velocity_expr'], {}), '(velocity_expr)\n', (13751, 13766), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((14494, 14619), 'dolfin.Expression', 'df.Expression', (['"""tanh((abs((x[1]-Hmin)-H/2)-H/16)/(sqrt(2)*eps))"""'], {'H': 'H[0]', 'Hmin': 'Hmin[0]', 'eps': 'interface_thickness', 'degree': '(2)'}), "('tanh((abs((x[1]-Hmin)-H/2)-H/16)/(sqrt(2)*eps))', H=H[0],\n Hmin=Hmin[0], eps=interface_thickness, degree=2)\n", (14507, 14619), True, 'import dolfin as df\n'), ((14675, 14690), 'common.bcs.Fixed', 'Fixed', (['phi_expr'], {}), '(phi_expr)\n', (14680, 14690), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((15044, 15071), 'common.bcs.ContactAngle', 'ContactAngle', (['contact_angle'], {}), '(contact_angle)\n', (15056, 15071), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((15103, 15130), 'common.bcs.ContactAngle', 'ContactAngle', (['contact_angle'], {}), '(contact_angle)\n', (15115, 15130), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((15802, 15944), 'dolfin.Expression', 'df.Expression', (["('Pos*4*U*(x[1] - xyMin)*(H-(x[1] - xyMin))/pow(H, 2)', '0.0')"], {'Pos': 'Pos', 'H': 'H', 'U': 'inlet_velocity', 'xyMin': 'xyMin', 'degree': 'degree'}), "(('Pos*4*U*(x[1] - xyMin)*(H-(x[1] - xyMin))/pow(H, 2)', '0.0'\n ), Pos=Pos, H=H, U=inlet_velocity, xyMin=xyMin, degree=degree)\n", (15815, 15944), True, 'import dolfin as df\n'), ((16005, 16147), 'dolfin.Expression', 'df.Expression', (["('0.0', 'Pos*4*U*(x[0] - xyMin)*(H-(x[0] - xyMin))/pow(H, 2)')"], {'Pos': 'Pos', 'H': 'H', 'U': 'inlet_velocity', 'xyMin': 'xyMin', 'degree': 'degree'}), "(('0.0', 'Pos*4*U*(x[0] - xyMin)*(H-(x[0] - xyMin))/pow(H, 2)'\n ), Pos=Pos, H=H, U=inlet_velocity, xyMin=xyMin, degree=degree)\n", (16018, 16147), True, 'import dolfin as df\n'), ((1375, 1385), 'common.io.mpi_comm', 'mpi_comm', ([], {}), '()\n', (1383, 1385), False, 'from common.io import mpi_is_root, load_mesh, mpi_barrier, mpi_comm, mpi_bcast, mpi_gather\n'), ((11284, 11316), 'dolfin.interpolate', 'df.interpolate', (['u_init', 'subspace'], {}), '(u_init, subspace)\n', (11298, 11316), True, 'import dolfin as df\n'), ((14833, 14850), 'dolfin.Constant', 'df.Constant', (['(-1.0)'], {}), '(-1.0)\n', (14844, 14850), True, 'import dolfin as df\n'), ((14888, 14904), 'dolfin.Constant', 'df.Constant', (['(1.0)'], {}), '(1.0)\n', (14899, 14904), True, 'import dolfin as df\n'), ((14942, 14958), 'dolfin.Constant', 'df.Constant', (['(1.0)'], {}), '(1.0)\n', (14953, 14958), True, 'import dolfin as df\n'), ((11427, 11443), 'dolfin.Constant', 'df.Constant', (['(1.0)'], {}), '(1.0)\n', (11438, 11443), True, 'import dolfin as df\n'), ((13211, 13219), 'common.bcs.NoSlip', 'NoSlip', ([], {}), '()\n', (13217, 13219), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((13257, 13265), 'common.bcs.NoSlip', 'NoSlip', ([], {}), '()\n', (13263, 13265), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n'), ((13952, 13960), 'common.bcs.NoSlip', 'NoSlip', ([], {}), '()\n', (13958, 13960), False, 'from common.bcs import Fixed, Pressure, NoSlip, ContactAngle\n')] |
"""
Created on 5/2/2015
@author: victor
"""
import numpy
class BaharServerFilesParser(object):
"""
Reads eigenvalues/vectors generated from http://anm.csb.pitt.edu/cgi-bin/anm2/anm2.cgi
"""
def __init__(self):
pass
@classmethod
def read(cls, eigenvalues_file_path, eigenvectors_file_path):
eigenvalues = []
if eigenvalues_file_path is not None and eigenvalues_file_path != "":
eigenvalues = cls.__read_eigenvalues(eigenvalues_file_path)
return (eigenvalues, cls.__read_eigenvectors(eigenvectors_file_path))
@classmethod
def read_beta(cls, beta_factors_file):
"""
Beta factor files ("*.bfactors") have the same format than eigenvalues file.
"""
return cls.__read_eigenvalues(beta_factors_file)
@classmethod
def __read_eigenvalues(cls, eigenvalues_file_path):
"""
Reads a file with an enumeration of eigenvalues (6+30). Ex.
1 Eigval_1
2 Eigval_2
...
N Eigval_N
First 6 eigenvalues must be 0.
"""
return numpy.loadtxt(eigenvalues_file_path).T[1]
@classmethod
def __read_eigenvectors(cls, eigenvectors_file_path):
"""
Reads a file containing eigenvectors (20) written in columns.
"""
return numpy.loadtxt(eigenvectors_file_path).T[1:]
| [
"numpy.loadtxt"
] | [((1142, 1178), 'numpy.loadtxt', 'numpy.loadtxt', (['eigenvalues_file_path'], {}), '(eigenvalues_file_path)\n', (1155, 1178), False, 'import numpy\n'), ((1372, 1409), 'numpy.loadtxt', 'numpy.loadtxt', (['eigenvectors_file_path'], {}), '(eigenvectors_file_path)\n', (1385, 1409), False, 'import numpy\n')] |
import numpy as np
from pyrr import Vector3
from .mesh import Mesh
from .triangle import Triangle
def parse_index(value: str, length: int) -> int:
vi = value.split("/")
n = int(vi[0])
if n < 0:
n += length
return n
def load_obj(path: str) -> Mesh:
with open(path, "r") as file:
vs = []
triangles = []
for cnt, line in enumerate(file):
fields = line.split()
if len(fields) == 0:
continue
keyword = fields[0]
args = fields[1:]
# print("Line {}: {} -> {}".format(cnt, keyword, args))
if keyword == "v":
f = np.array(args, dtype=np.float32)
v = Vector3(f)
vs.append(v)
elif keyword == "f":
fvs = [parse_index(i, len(vs)) for i in args]
# print("Line {}: {} -> {}".format(cnt, keyword, fvs))
for i in range(1, len(fvs) - 1):
i1, i2, i3 = fvs[0], fvs[i], fvs[i + 1]
t = Triangle(vs[i1 - 1], vs[i2 - 1], vs[i3 - 1])
triangles.append(t)
return Mesh(triangles)
| [
"pyrr.Vector3",
"numpy.array"
] | [((672, 704), 'numpy.array', 'np.array', (['args'], {'dtype': 'np.float32'}), '(args, dtype=np.float32)\n', (680, 704), True, 'import numpy as np\n'), ((725, 735), 'pyrr.Vector3', 'Vector3', (['f'], {}), '(f)\n', (732, 735), False, 'from pyrr import Vector3\n')] |
import sympy as sy
import numpy as np
from .dhmatrix import DHMatrix
class SpeedKinematics:
"""Speed Kinematics algorithms"""
@classmethod
def linear_speed(cls, transform, args: dict, delta: list):
yak = cls.jacobi_linear(transform, args)
return np.dot(yak, np.array(delta).T)
@staticmethod
def jacobi_linear(transform, args: dict):
argNames = list(args.keys())
return np.array(
[[sy.diff(transform.row(i).col(3)[0], argNames[j]).subs(args).n() for j in range(len(args))] for i in
range(3)], dtype='float')
@classmethod
def angle_speed(cls, frames, args: dict, delta: list):
yak = cls.jacobi_angle(frames, args)
return np.dot(yak, np.array(delta).T).T
@staticmethod
def jacobi_angle(frames, args: dict):
return np.array([np.array(DHMatrix.rotate(frames[j]).evalf(subs=args).col(2)) for j in range(len(frames) - 1)],
dtype='float').T[0]
| [
"numpy.array"
] | [((289, 304), 'numpy.array', 'np.array', (['delta'], {}), '(delta)\n', (297, 304), True, 'import numpy as np\n'), ((737, 752), 'numpy.array', 'np.array', (['delta'], {}), '(delta)\n', (745, 752), True, 'import numpy as np\n')] |
from argparse import ArgumentParser
import os
import random
import string
import sys
import numpy as np
from logger import get_logger
logger = get_logger(__name__)
###
# file system
###
def make_dirs(path, empty=False):
"""
create dir in path and clear dir if required
"""
dir_path = os.path.dirname(path)
os.makedirs(dir_path, exist_ok=True)
if empty:
files = [os.path.join(dir_path, item) for item in os.listdir(dir_path)]
for item in files:
if os.path.isfile(item):
os.remove(item)
return dir_path
def path_join(*paths, empty=False):
"""
join paths and create dir
"""
path = os.path.abspath(os.path.join(*paths))
make_dirs(os.path.dirname(path), empty)
return path
def load_text(text_path):
with open(text_path) as f:
text = f.read()
logger.info("corpus length: %s.", len(text))
return text
###
# data processing
###
def create_dictionary():
"""
create char2id, id2char and vocab_size
from printable ascii characters.
"""
chars = sorted(ch for ch in string.printable if ch not in ("\x0b", "\x0c", "\r"))
char2id = dict((ch, i+1) for i, ch in enumerate(chars))
char2id.update({"": 0})
id2char = dict((char2id[ch], ch) for ch in char2id)
vocab_size = len(char2id)
return char2id, id2char, vocab_size
CHAR2ID, ID2CHAR, VOCAB_SIZE = create_dictionary()
def rebuild_dictionary(text):
global CHAR2ID, ID2CHAR, VOCAB_SIZE
d = dict((c, text.count(c)) for c in set(text))
char_count = [(c, d[c]) for c in sorted(d, key=d.get, reverse=True)][:99]
#print(char_count)
#chars = sorted(list(set(text)))
chars = [cc[0] for cc in char_count]
logger.info(chars)
logger.info("total chars: %d", len(chars))
CHAR2ID = dict((ch, i+1) for i, ch in enumerate(chars))
CHAR2ID.update({"": 0})
#print(CHAR2ID)
ID2CHAR = dict((CHAR2ID[ch], ch) for ch in CHAR2ID)
VOCAB_SIZE = len(CHAR2ID)
def get_CHAR2ID():
return CHAR2ID
def get_ID2CHAR():
return ID2CHAR
def get_VOCAB_SIZE():
return VOCAB_SIZE
def encode_text(text, char2id):
"""
encode text to array of integers with CHAR2ID
"""
return np.fromiter((char2id.get(ch, 0) for ch in text), int)
def decode_text(int_array, id2char):
"""
decode array of integers to text with ID2CHAR
"""
return "".join((id2char[ch] for ch in int_array))
def one_hot_encode(indices, num_classes):
"""
one-hot encoding
"""
return np.eye(num_classes)[indices]
def batch_generator(sequence, batch_size=64, seq_len=64, one_hot_features=False, one_hot_labels=False):
"""
batch generator for sequence
ensures that batches generated are continuous along axis 1
so that hidden states can be kept across batches and epochs
"""
# calculate effective length of text to use
num_batches = (len(sequence) - 1) // (batch_size * seq_len)
if num_batches == 0:
raise ValueError("No batches created. Use smaller batch size or sequence length.")
logger.info("number of batches: %s.", num_batches)
rounded_len = num_batches * batch_size * seq_len
logger.info("effective text length: %s.", rounded_len)
x = np.reshape(sequence[: rounded_len], [batch_size, num_batches * seq_len])
if one_hot_features:
x = one_hot_encode(x, get_VOCAB_SIZE())
logger.info("x shape: %s.", x.shape)
y = np.reshape(sequence[1: rounded_len + 1], [batch_size, num_batches * seq_len])
if one_hot_labels:
y = one_hot_encode(y, get_VOCAB_SIZE())
logger.info("y shape: %s.", y.shape)
epoch = 0
while True:
# roll so that no need to reset rnn states over epochs
x_epoch = np.split(np.roll(x, -epoch, axis=0), num_batches, axis=1)
y_epoch = np.split(np.roll(y, -epoch, axis=0), num_batches, axis=1)
for batch in range(num_batches):
yield x_epoch[batch], y_epoch[batch]
epoch += 1
###
# text generation
###
def generate_seed(text, seq_lens=(2, 4, 8, 16, 32)):
"""
select subsequence randomly from input text
"""
# randomly choose sequence length
seq_len = random.choice(seq_lens)
# randomly choose start index
start_index = random.randint(0, len(text) - seq_len - 1)
seed = text[start_index: start_index + seq_len]
return seed
def sample_from_probs(probs, top_n=None):
"""
truncated weighted random choice.
"""
# need 64 floating point precision
probs = np.array(probs, dtype=np.float64)
# set probabilities after top_n to 0
if top_n:
probs[np.argsort(probs)[:-top_n]] = 0
#print(probs, sum(probs))
# renormalise probabilities
probs /= np.sum(probs)
sampled_index = np.random.choice(len(probs), p=probs)
return sampled_index
###
# main
###
def main(framework, train_main, generate_main, test_main):
arg_parser = ArgumentParser(
description="{} character embeddings LSTM text generation model.".format(framework))
subparsers = arg_parser.add_subparsers(title="subcommands")
# train args
train_parser = subparsers.add_parser("train", help="train model on text file")
train_parser.add_argument("--checkpoint-path", required=True,
help="path to save or load model checkpoints (required)")
train_parser.add_argument("--text-path", required=True,
help="path of text file for training (required)")
train_parser.add_argument("--restore", nargs="?", default=False, const=True,
help="whether to restore from checkpoint_path "
"or from another path if specified")
train_parser.add_argument("--seq-len", type=int, default=64,
help="sequence length of inputs and outputs (default: %(default)s)")
train_parser.add_argument("--embedding-size", type=int, default=32,
help="character embedding size (default: %(default)s)")
train_parser.add_argument("--rnn-size", type=int, default=128,
help="size of rnn cell (default: %(default)s)")
train_parser.add_argument("--num-layers", type=int, default=2,
help="number of rnn layers (default: %(default)s)")
train_parser.add_argument("--drop-rate", type=float, default=0.,
help="dropout rate for rnn layers (default: %(default)s)")
train_parser.add_argument("--learning-rate", type=float, default=0.001,
help="learning rate (default: %(default)s)")
train_parser.add_argument("--clip-norm", type=float, default=5.,
help="max norm to clip gradient (default: %(default)s)")
train_parser.add_argument("--batch-size", type=int, default=64,
help="training batch size (default: %(default)s)")
train_parser.add_argument("--num-epochs", type=int, default=32,
help="number of epochs for training (default: %(default)s)")
train_parser.add_argument("--log-path", default=os.path.join(os.path.dirname(__file__), "main.log"),
help="path of log file (default: %(default)s)")
train_parser.add_argument("--test-path", help="path of text file for testing")
train_parser.set_defaults(main=train_main)
# generate args
generate_parser = subparsers.add_parser("generate", help="generate text from trained model")
generate_parser.add_argument("--checkpoint-path", required=True,
help="path to load model checkpoints (required)")
#group = generate_parser.add_mutually_exclusive_group(required=True)
#group.add_argument("--text-path", help="path of text file to generate seed and build dictionary")
#group.add_argument("--seed", default=None, help="seed character sequence")
generate_parser.add_argument("--text-path", required=True,
help="path of train file for building a dictionary (required)")
generate_parser.add_argument("--seed", default=None, help="seed character sequence")
generate_parser.add_argument("--length", type=int, default=1024,
help="length of character sequence to generate (default: %(default)s)")
generate_parser.add_argument("--top-n", type=int, default=None,
help="number of top choices to sample (default: %(default)s)")
generate_parser.add_argument("--log-path", default=os.path.join(os.path.dirname(__file__), "main.log"),
help="path of log file (default: %(default)s)")
generate_parser.set_defaults(main=generate_main)
# test args
test_parser = subparsers.add_parser("test", help="test the model and calculate BPC")
test_parser.add_argument("--checkpoint-path", required=True,
help="path to load model checkpoints (required)")
test_parser.add_argument("--text-path", required=True,
help="path of train file for building a dictionary (required)")
test_parser.add_argument("--test-path", required=True,
help="path of text file for testing (required)")
test_parser.add_argument("--log-path", default=os.path.join(os.path.dirname(__file__), "main.log"),
help="path of log file (default: %(default)s)")
test_parser.set_defaults(main=test_main)
args = arg_parser.parse_args()
get_logger("__main__", log_path=args.log_path, console=True)
logger = get_logger(__name__, log_path=args.log_path, console=True)
logger.debug("call: %s", " ".join(sys.argv))
logger.debug("ArgumentParser: %s", args)
try:
rebuild_dictionary(load_text(args.text_path))
args.main(args)
except Exception as e:
logger.exception(e)
| [
"os.remove",
"numpy.sum",
"os.makedirs",
"numpy.roll",
"os.path.dirname",
"random.choice",
"numpy.argsort",
"os.path.isfile",
"logger.get_logger",
"numpy.reshape",
"numpy.array",
"numpy.eye",
"os.path.join",
"os.listdir"
] | [((146, 166), 'logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (156, 166), False, 'from logger import get_logger\n'), ((306, 327), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (321, 327), False, 'import os\n'), ((332, 368), 'os.makedirs', 'os.makedirs', (['dir_path'], {'exist_ok': '(True)'}), '(dir_path, exist_ok=True)\n', (343, 368), False, 'import os\n'), ((3281, 3352), 'numpy.reshape', 'np.reshape', (['sequence[:rounded_len]', '[batch_size, num_batches * seq_len]'], {}), '(sequence[:rounded_len], [batch_size, num_batches * seq_len])\n', (3291, 3352), True, 'import numpy as np\n'), ((3477, 3553), 'numpy.reshape', 'np.reshape', (['sequence[1:rounded_len + 1]', '[batch_size, num_batches * seq_len]'], {}), '(sequence[1:rounded_len + 1], [batch_size, num_batches * seq_len])\n', (3487, 3553), True, 'import numpy as np\n'), ((4220, 4243), 'random.choice', 'random.choice', (['seq_lens'], {}), '(seq_lens)\n', (4233, 4243), False, 'import random\n'), ((4556, 4589), 'numpy.array', 'np.array', (['probs'], {'dtype': 'np.float64'}), '(probs, dtype=np.float64)\n', (4564, 4589), True, 'import numpy as np\n'), ((4774, 4787), 'numpy.sum', 'np.sum', (['probs'], {}), '(probs)\n', (4780, 4787), True, 'import numpy as np\n'), ((9664, 9724), 'logger.get_logger', 'get_logger', (['"""__main__"""'], {'log_path': 'args.log_path', 'console': '(True)'}), "('__main__', log_path=args.log_path, console=True)\n", (9674, 9724), False, 'from logger import get_logger\n'), ((9738, 9796), 'logger.get_logger', 'get_logger', (['__name__'], {'log_path': 'args.log_path', 'console': '(True)'}), '(__name__, log_path=args.log_path, console=True)\n', (9748, 9796), False, 'from logger import get_logger\n'), ((692, 712), 'os.path.join', 'os.path.join', (['*paths'], {}), '(*paths)\n', (704, 712), False, 'import os\n'), ((728, 749), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (743, 749), False, 'import os\n'), ((2566, 2585), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (2572, 2585), True, 'import numpy as np\n'), ((401, 429), 'os.path.join', 'os.path.join', (['dir_path', 'item'], {}), '(dir_path, item)\n', (413, 429), False, 'import os\n'), ((506, 526), 'os.path.isfile', 'os.path.isfile', (['item'], {}), '(item)\n', (520, 526), False, 'import os\n'), ((3788, 3814), 'numpy.roll', 'np.roll', (['x', '(-epoch)'], {'axis': '(0)'}), '(x, -epoch, axis=0)\n', (3795, 3814), True, 'import numpy as np\n'), ((3864, 3890), 'numpy.roll', 'np.roll', (['y', '(-epoch)'], {'axis': '(0)'}), '(y, -epoch, axis=0)\n', (3871, 3890), True, 'import numpy as np\n'), ((442, 462), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (452, 462), False, 'import os\n'), ((544, 559), 'os.remove', 'os.remove', (['item'], {}), '(item)\n', (553, 559), False, 'import os\n'), ((4659, 4676), 'numpy.argsort', 'np.argsort', (['probs'], {}), '(probs)\n', (4669, 4676), True, 'import numpy as np\n'), ((7218, 7243), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7233, 7243), False, 'import os\n'), ((8651, 8676), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8666, 8676), False, 'import os\n'), ((9448, 9473), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9463, 9473), False, 'import os\n')] |
# ======================================================================== #
#
# Copyright (c) 2017 - 2020 scVAE authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================== #
import matplotlib.colors
import numpy
import scipy
import seaborn
import sklearn
from matplotlib import pyplot
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scvae.analyses.figures import saving, style
def plot_heat_map(values, x_name, y_name, z_name=None, z_symbol=None,
z_min=None, z_max=None, symmetric=False, labels=None,
label_kind=None, center=None, name=None):
figure_name = saving.build_figure_name("heat_map", name)
n_examples, n_features = values.shape
if symmetric and n_examples != n_features:
raise ValueError(
"Input cannot be symmetric, when it is not given as a 2-d square"
"array or matrix."
)
figure = pyplot.figure()
axis = figure.add_subplot(1, 1, 1)
if not z_min:
z_min = values.min()
if not z_max:
z_max = values.max()
if z_symbol:
z_name = "$" + z_symbol + "$"
cbar_dict = {}
if z_name:
cbar_dict["label"] = z_name
if not symmetric:
aspect_ratio = n_examples / n_features
square_cells = 1/5 < aspect_ratio and aspect_ratio < 5
else:
square_cells = True
if labels is not None:
x_indices = numpy.argsort(labels)
y_name += " sorted"
if label_kind:
y_name += " by " + label_kind
else:
x_indices = numpy.arange(n_examples)
if symmetric:
y_indices = x_indices
x_name = y_name
else:
y_indices = numpy.arange(n_features)
seaborn.set(style="white")
seaborn.heatmap(
values[x_indices][:, y_indices],
vmin=z_min, vmax=z_max, center=center,
xticklabels=False, yticklabels=False,
cbar=True, cbar_kws=cbar_dict, cmap=style.STANDARD_COLOUR_MAP,
square=square_cells,
ax=axis
)
style.reset_plot_look()
axis.set_xlabel(x_name)
axis.set_ylabel(y_name)
return figure, figure_name
def plot_matrix(feature_matrix, plot_distances=False, center_value=None,
example_label=None, feature_label=None, value_label=None,
sorting_method=None, distance_metric="Euclidean",
labels=None, label_kind=None, class_palette=None,
feature_indices_for_plotting=None, hide_dendrogram=False,
name_parts=None):
figure_name = saving.build_figure_name(name_parts)
n_examples, n_features = feature_matrix.shape
if plot_distances:
center_value = None
feature_label = None
value_label = "Pairwise {} distances in {} space".format(
distance_metric,
value_label
)
if not plot_distances and feature_indices_for_plotting is None:
feature_indices_for_plotting = numpy.arange(n_features)
if sorting_method == "labels" and labels is None:
raise ValueError("No labels provided to sort after.")
if labels is not None and not class_palette:
raise ValueError("No class palette provided.")
# Distances (if needed)
distances = None
if plot_distances or sorting_method == "hierarchical_clustering":
distances = sklearn.metrics.pairwise_distances(
feature_matrix,
metric=distance_metric.lower()
)
# Figure initialisation
figure = pyplot.figure()
axis_heat_map = figure.add_subplot(1, 1, 1)
left_most_axis = axis_heat_map
divider = make_axes_locatable(axis_heat_map)
axis_colour_map = divider.append_axes("right", size="5%", pad=0.1)
axis_labels = None
axis_dendrogram = None
if labels is not None:
axis_labels = divider.append_axes("left", size="5%", pad=0.01)
left_most_axis = axis_labels
if sorting_method == "hierarchical_clustering" and not hide_dendrogram:
axis_dendrogram = divider.append_axes("left", size="20%", pad=0.01)
left_most_axis = axis_dendrogram
# Label colours
if labels is not None:
label_colours = [
tuple(colour) if isinstance(colour, list) else colour
for colour in [class_palette[l] for l in labels]
]
unique_colours = [
tuple(colour) if isinstance(colour, list) else colour
for colour in class_palette.values()
]
value_for_colour = {
colour: i for i, colour in enumerate(unique_colours)
}
label_colour_matrix = numpy.array(
[value_for_colour[colour] for colour in label_colours]
).reshape(n_examples, 1)
label_colour_map = matplotlib.colors.ListedColormap(unique_colours)
else:
label_colour_matrix = None
label_colour_map = None
# Heat map aspect ratio
if not plot_distances:
square_cells = False
else:
square_cells = True
seaborn.set(style="white")
# Sorting and optional dendrogram
if sorting_method == "labels":
example_indices = numpy.argsort(labels)
if not label_kind:
label_kind = "labels"
if example_label:
example_label += " sorted by " + label_kind
elif sorting_method == "hierarchical_clustering":
linkage = scipy.cluster.hierarchy.linkage(
scipy.spatial.distance.squareform(distances, checks=False),
metric="average"
)
dendrogram = seaborn.matrix.dendrogram(
distances,
linkage=linkage,
metric=None,
method="ward",
axis=0,
label=False,
rotate=True,
ax=axis_dendrogram
)
example_indices = dendrogram.reordered_ind
if example_label:
example_label += " sorted by hierarchical clustering"
elif sorting_method is None:
example_indices = numpy.arange(n_examples)
else:
raise ValueError(
"`sorting_method` should be either \"labels\""
" or \"hierarchical clustering\""
)
# Heat map of values
if plot_distances:
plot_values = distances[example_indices][:, example_indices]
else:
plot_values = feature_matrix[example_indices][
:, feature_indices_for_plotting]
if scipy.sparse.issparse(plot_values):
plot_values = plot_values.A
colour_bar_dictionary = {}
if value_label:
colour_bar_dictionary["label"] = value_label
seaborn.heatmap(
plot_values, center=center_value,
xticklabels=False, yticklabels=False,
cbar=True, cbar_kws=colour_bar_dictionary, cbar_ax=axis_colour_map,
square=square_cells, ax=axis_heat_map
)
# Colour labels
if axis_labels:
seaborn.heatmap(
label_colour_matrix[example_indices],
xticklabels=False, yticklabels=False,
cbar=False,
cmap=label_colour_map,
ax=axis_labels
)
style.reset_plot_look()
# Axis labels
if example_label:
left_most_axis.set_ylabel(example_label)
if feature_label:
axis_heat_map.set_xlabel(feature_label)
return figure, figure_name
def plot_correlation_matrix(correlation_matrix, axis_label=None, name=None):
figure_name = saving.build_figure_name(name)
figure = pyplot.figure()
axis = figure.add_subplot(1, 1, 1)
colour_bar_dictionary = {"label": "Pearson correlation coefficient"}
seaborn.set(style="white")
seaborn.heatmap(
correlation_matrix,
vmin=-1, vmax=1, center=0,
xticklabels=False, yticklabels=False,
cbar=True, cbar_kws=colour_bar_dictionary,
square=True, ax=axis
)
style.reset_plot_look()
if axis_label:
axis.set_xlabel(axis_label)
axis.set_ylabel(axis_label)
return figure, figure_name
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"seaborn.matrix.dendrogram",
"scvae.analyses.figures.saving.build_figure_name",
"seaborn.heatmap",
"scipy.sparse.issparse",
"scipy.spatial.distance.squareform",
"numpy.argsort",
"scvae.analyses.figures.style.reset_plot_look",
"matplotlib.pyplot.figure",... | [((1186, 1228), 'scvae.analyses.figures.saving.build_figure_name', 'saving.build_figure_name', (['"""heat_map"""', 'name'], {}), "('heat_map', name)\n", (1210, 1228), False, 'from scvae.analyses.figures import saving, style\n'), ((1478, 1493), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (1491, 1493), False, 'from matplotlib import pyplot\n'), ((2279, 2305), 'seaborn.set', 'seaborn.set', ([], {'style': '"""white"""'}), "(style='white')\n", (2290, 2305), False, 'import seaborn\n'), ((2310, 2540), 'seaborn.heatmap', 'seaborn.heatmap', (['values[x_indices][:, y_indices]'], {'vmin': 'z_min', 'vmax': 'z_max', 'center': 'center', 'xticklabels': '(False)', 'yticklabels': '(False)', 'cbar': '(True)', 'cbar_kws': 'cbar_dict', 'cmap': 'style.STANDARD_COLOUR_MAP', 'square': 'square_cells', 'ax': 'axis'}), '(values[x_indices][:, y_indices], vmin=z_min, vmax=z_max,\n center=center, xticklabels=False, yticklabels=False, cbar=True,\n cbar_kws=cbar_dict, cmap=style.STANDARD_COLOUR_MAP, square=square_cells,\n ax=axis)\n', (2325, 2540), False, 'import seaborn\n'), ((2587, 2610), 'scvae.analyses.figures.style.reset_plot_look', 'style.reset_plot_look', ([], {}), '()\n', (2608, 2610), False, 'from scvae.analyses.figures import saving, style\n'), ((3108, 3144), 'scvae.analyses.figures.saving.build_figure_name', 'saving.build_figure_name', (['name_parts'], {}), '(name_parts)\n', (3132, 3144), False, 'from scvae.analyses.figures import saving, style\n'), ((4059, 4074), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (4072, 4074), False, 'from matplotlib import pyplot\n'), ((4174, 4208), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axis_heat_map'], {}), '(axis_heat_map)\n', (4193, 4208), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5552, 5578), 'seaborn.set', 'seaborn.set', ([], {'style': '"""white"""'}), "(style='white')\n", (5563, 5578), False, 'import seaborn\n'), ((6943, 6977), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['plot_values'], {}), '(plot_values)\n', (6964, 6977), False, 'import scipy\n'), ((7126, 7329), 'seaborn.heatmap', 'seaborn.heatmap', (['plot_values'], {'center': 'center_value', 'xticklabels': '(False)', 'yticklabels': '(False)', 'cbar': '(True)', 'cbar_kws': 'colour_bar_dictionary', 'cbar_ax': 'axis_colour_map', 'square': 'square_cells', 'ax': 'axis_heat_map'}), '(plot_values, center=center_value, xticklabels=False,\n yticklabels=False, cbar=True, cbar_kws=colour_bar_dictionary, cbar_ax=\n axis_colour_map, square=square_cells, ax=axis_heat_map)\n', (7141, 7329), False, 'import seaborn\n'), ((7626, 7649), 'scvae.analyses.figures.style.reset_plot_look', 'style.reset_plot_look', ([], {}), '()\n', (7647, 7649), False, 'from scvae.analyses.figures import saving, style\n'), ((7941, 7971), 'scvae.analyses.figures.saving.build_figure_name', 'saving.build_figure_name', (['name'], {}), '(name)\n', (7965, 7971), False, 'from scvae.analyses.figures import saving, style\n'), ((7986, 8001), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (7999, 8001), False, 'from matplotlib import pyplot\n'), ((8120, 8146), 'seaborn.set', 'seaborn.set', ([], {'style': '"""white"""'}), "(style='white')\n", (8131, 8146), False, 'import seaborn\n'), ((8151, 8325), 'seaborn.heatmap', 'seaborn.heatmap', (['correlation_matrix'], {'vmin': '(-1)', 'vmax': '(1)', 'center': '(0)', 'xticklabels': '(False)', 'yticklabels': '(False)', 'cbar': '(True)', 'cbar_kws': 'colour_bar_dictionary', 'square': '(True)', 'ax': 'axis'}), '(correlation_matrix, vmin=-1, vmax=1, center=0, xticklabels=\n False, yticklabels=False, cbar=True, cbar_kws=colour_bar_dictionary,\n square=True, ax=axis)\n', (8166, 8325), False, 'import seaborn\n'), ((8367, 8390), 'scvae.analyses.figures.style.reset_plot_look', 'style.reset_plot_look', ([], {}), '()\n', (8388, 8390), False, 'from scvae.analyses.figures import saving, style\n'), ((1976, 1997), 'numpy.argsort', 'numpy.argsort', (['labels'], {}), '(labels)\n', (1989, 1997), False, 'import numpy\n'), ((2121, 2145), 'numpy.arange', 'numpy.arange', (['n_examples'], {}), '(n_examples)\n', (2133, 2145), False, 'import numpy\n'), ((2249, 2273), 'numpy.arange', 'numpy.arange', (['n_features'], {}), '(n_features)\n', (2261, 2273), False, 'import numpy\n'), ((3513, 3537), 'numpy.arange', 'numpy.arange', (['n_features'], {}), '(n_features)\n', (3525, 3537), False, 'import numpy\n'), ((5679, 5700), 'numpy.argsort', 'numpy.argsort', (['labels'], {}), '(labels)\n', (5692, 5700), False, 'import numpy\n'), ((7408, 7554), 'seaborn.heatmap', 'seaborn.heatmap', (['label_colour_matrix[example_indices]'], {'xticklabels': '(False)', 'yticklabels': '(False)', 'cbar': '(False)', 'cmap': 'label_colour_map', 'ax': 'axis_labels'}), '(label_colour_matrix[example_indices], xticklabels=False,\n yticklabels=False, cbar=False, cmap=label_colour_map, ax=axis_labels)\n', (7423, 7554), False, 'import seaborn\n'), ((6084, 6224), 'seaborn.matrix.dendrogram', 'seaborn.matrix.dendrogram', (['distances'], {'linkage': 'linkage', 'metric': 'None', 'method': '"""ward"""', 'axis': '(0)', 'label': '(False)', 'rotate': '(True)', 'ax': 'axis_dendrogram'}), "(distances, linkage=linkage, metric=None, method=\n 'ward', axis=0, label=False, rotate=True, ax=axis_dendrogram)\n", (6109, 6224), False, 'import seaborn\n'), ((5158, 5225), 'numpy.array', 'numpy.array', (['[value_for_colour[colour] for colour in label_colours]'], {}), '([value_for_colour[colour] for colour in label_colours])\n', (5169, 5225), False, 'import numpy\n'), ((5964, 6022), 'scipy.spatial.distance.squareform', 'scipy.spatial.distance.squareform', (['distances'], {'checks': '(False)'}), '(distances, checks=False)\n', (5997, 6022), False, 'import scipy\n'), ((6530, 6554), 'numpy.arange', 'numpy.arange', (['n_examples'], {}), '(n_examples)\n', (6542, 6554), False, 'import numpy\n')] |
"""
Module description:
"""
__version__ = '0.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import numpy as np
from scipy import sparse as sp
from sklearn.utils.extmath import randomized_svd
class PureSVDModel(object):
"""
Simple Matrix Factorization class
"""
def __init__(self, factors, data, random_seed):
self._data = data
self.factors = factors
self.random_seed = random_seed
self.train_dict = self._data.train_dict
self.user_num, self.item_num = self._data.num_users, self._data.num_items
self.user_vec, self.item_vec = None, None
def train_step(self):
U, sigma, Vt = randomized_svd(self._data.sp_i_train,
n_components=self.factors,
random_state=self.random_seed)
s_Vt = sp.diags(sigma) * Vt
self.user_vec = U
self.item_vec = s_Vt.T
def predict(self, user, item):
return self.user_vec[self._data.public_users[user], :].dot(self.item_vec[self._data.public_items[item], :])
def get_user_recs(self, user, k=100):
user_items = self._data.train_dict[user].keys()
k = min(k, self._data.num_items - len(user_items))
predictions = {i: self.predict(user, i) for i in self._data.items if i not in user_items}
indices, values = zip(*predictions.items())
indices = np.array(indices)
values = np.array(values)
partially_ordered_preds_indices = np.argpartition(values, -k)[-k:]
real_values = values[partially_ordered_preds_indices]
real_indices = indices[partially_ordered_preds_indices]
local_top_k = real_values.argsort()[::-1]
return [(real_indices[item], real_values[item]) for item in local_top_k]
def get_model_state(self):
saving_dict = {}
saving_dict['user_vec'] = self.user_vec
saving_dict['item_vec'] = self.item_vec
return saving_dict
def set_model_state(self, saving_dict):
self.user_vec = saving_dict['user_vec']
self.item_vec = saving_dict['item_vec']
| [
"numpy.argpartition",
"scipy.sparse.diags",
"numpy.array",
"sklearn.utils.extmath.randomized_svd"
] | [((676, 775), 'sklearn.utils.extmath.randomized_svd', 'randomized_svd', (['self._data.sp_i_train'], {'n_components': 'self.factors', 'random_state': 'self.random_seed'}), '(self._data.sp_i_train, n_components=self.factors,\n random_state=self.random_seed)\n', (690, 775), False, 'from sklearn.utils.extmath import randomized_svd\n'), ((1420, 1437), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (1428, 1437), True, 'import numpy as np\n'), ((1455, 1471), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1463, 1471), True, 'import numpy as np\n'), ((863, 878), 'scipy.sparse.diags', 'sp.diags', (['sigma'], {}), '(sigma)\n', (871, 878), True, 'from scipy import sparse as sp\n'), ((1514, 1541), 'numpy.argpartition', 'np.argpartition', (['values', '(-k)'], {}), '(values, -k)\n', (1529, 1541), True, 'import numpy as np\n')] |
# %%
import numpy as np
import pandas as pd
df = pd.read_csv('data_xhm.csv', sep=',', skiprows=0, names=['t','i','j','W'])
print(df.head(5))
# %%
T = np.max(df['t'])
N = np.max(df['i']) # number of workers
M = np.max(df['j'])
print('{} periods, {} workers and {} firms.'.format(T,N,M))
# %%
i = 0
W_i = df.loc[df['i']==i+1]
print(Wt)
# %%
for t in range(T):
Wt = df[df['t']==t+1] | [
"pandas.read_csv",
"numpy.max"
] | [((53, 129), 'pandas.read_csv', 'pd.read_csv', (['"""data_xhm.csv"""'], {'sep': '""","""', 'skiprows': '(0)', 'names': "['t', 'i', 'j', 'W']"}), "('data_xhm.csv', sep=',', skiprows=0, names=['t', 'i', 'j', 'W'])\n", (64, 129), True, 'import pandas as pd\n'), ((155, 170), 'numpy.max', 'np.max', (["df['t']"], {}), "(df['t'])\n", (161, 170), True, 'import numpy as np\n'), ((175, 190), 'numpy.max', 'np.max', (["df['i']"], {}), "(df['i'])\n", (181, 190), True, 'import numpy as np\n'), ((215, 230), 'numpy.max', 'np.max', (["df['j']"], {}), "(df['j'])\n", (221, 230), True, 'import numpy as np\n')] |
import numpy as np
import cupy as cp
def _fast_expand(complex_array, factor, num_per_block=200):
shp = complex_array.shape
times = shp[0]
expanded_array = np.zeros((shp[0], shp[1], shp[2] * factor))
for k in range(0, times, num_per_block):
the_max = min([k + num_per_block, times])
weights = cp.tile(np.arange(0, factor) / factor, (the_max - k, shp[1], 1))
for l in range(shp[2]):
gpu_array = cp.array(complex_array[k:the_max, :, l])
if l < shp[2] - 1:
gpu_array2 = cp.array(complex_array[k:the_max, :, l + 1])
diff_array = gpu_array2 - gpu_array
else:
diff_array = cp.zeros((the_max - k, shp[1]))
rep_array = cp.transpose(
cp.tile(gpu_array, (factor, 1, 1)), [1, 2, 0])
diff_array = cp.transpose(
cp.tile(diff_array, (factor, 1, 1)), [1, 2, 0])
temp_array = diff_array * weights + rep_array
expanded_array[k:the_max, :, factor * l:factor * (l + 1)] = temp_array.get()
return expanded_array
def _gpu_moving_average(arr, window=8, num_per_block=200):
shp = arr.shape
times = shp[0]
for k in range(0, times, num_per_block):
the_max = min([k + num_per_block, times])
gpu_arr = cp.array(arr[k:the_max, :, :])
gpu_arr2 = cp.zeros_like(gpu_arr)
for i in range(shp[2]):
the_min2 = max([0, i - int(window / 2)])
the_max2 = min([i + int(window / 2), shp[2]])
gpu_arr2[:, :, i] = cp.mean(gpu_arr[:, :, the_min2:the_max2], axis=2)
arr[k:the_max, ::] = gpu_arr2.get()
del gpu_arr, gpu_arr2
return arr | [
"cupy.zeros",
"cupy.zeros_like",
"cupy.array",
"numpy.zeros",
"cupy.mean",
"numpy.arange",
"cupy.tile"
] | [((168, 211), 'numpy.zeros', 'np.zeros', (['(shp[0], shp[1], shp[2] * factor)'], {}), '((shp[0], shp[1], shp[2] * factor))\n', (176, 211), True, 'import numpy as np\n'), ((1314, 1344), 'cupy.array', 'cp.array', (['arr[k:the_max, :, :]'], {}), '(arr[k:the_max, :, :])\n', (1322, 1344), True, 'import cupy as cp\n'), ((1364, 1386), 'cupy.zeros_like', 'cp.zeros_like', (['gpu_arr'], {}), '(gpu_arr)\n', (1377, 1386), True, 'import cupy as cp\n'), ((446, 486), 'cupy.array', 'cp.array', (['complex_array[k:the_max, :, l]'], {}), '(complex_array[k:the_max, :, l])\n', (454, 486), True, 'import cupy as cp\n'), ((1562, 1611), 'cupy.mean', 'cp.mean', (['gpu_arr[:, :, the_min2:the_max2]'], {'axis': '(2)'}), '(gpu_arr[:, :, the_min2:the_max2], axis=2)\n', (1569, 1611), True, 'import cupy as cp\n'), ((333, 353), 'numpy.arange', 'np.arange', (['(0)', 'factor'], {}), '(0, factor)\n', (342, 353), True, 'import numpy as np\n'), ((547, 591), 'cupy.array', 'cp.array', (['complex_array[k:the_max, :, l + 1]'], {}), '(complex_array[k:the_max, :, l + 1])\n', (555, 591), True, 'import cupy as cp\n'), ((691, 722), 'cupy.zeros', 'cp.zeros', (['(the_max - k, shp[1])'], {}), '((the_max - k, shp[1]))\n', (699, 722), True, 'import cupy as cp\n'), ((778, 812), 'cupy.tile', 'cp.tile', (['gpu_array', '(factor, 1, 1)'], {}), '(gpu_array, (factor, 1, 1))\n', (785, 812), True, 'import cupy as cp\n'), ((880, 915), 'cupy.tile', 'cp.tile', (['diff_array', '(factor, 1, 1)'], {}), '(diff_array, (factor, 1, 1))\n', (887, 915), True, 'import cupy as cp\n')] |
import numpy as np
import math
# import matplotlib.pyplot as plt
k = 0.1 # 前视距离系数
Lfc = 5.0 # 前视距离
Kp = 1.0 # 速度P控制器系数
dt = 0.1 # 时间间隔,单位:s
L = 1.5 # 车辆轴距,单位:m
class PurePursuit:
def __init__(self, lng=0.0, lat=0.0, yaw=0.0, v=0.0):
"""
:param x: 经度
:param y: 纬度
:param yaw: 偏航角
:param v: 速度
"""
self.lng = lng
self.lat = lat
self.yaw = yaw
self.v = v
def update(self, lng, lat, yaw, v):
self.lng = lng
self.lat = lat
self.yaw = yaw
self.v = v
def calc_target_index(self, cx, cy):
# 搜索最临近的路点
dx = [self.lng - icx for icx in cx]
dy = [self.lat - icy for icy in cy]
d = [abs(math.sqrt(idx ** 2 + idy ** 2)) for (idx, idy) in zip(dx, dy)]
ind = d.index(min(d))
L = 0.0
Lf = k * self.v + Lfc
while Lf > L and (ind + 1) < len(cx):
dx = cx[ind + 1] - cx[ind]
dy = cx[ind + 1] - cx[ind]
L += math.sqrt(dx ** 2 + dy ** 2)
ind += 1
return ind
def p_control(self, target, current):
a = Kp * (target - current)
return a
def pure_pursuit_control(self, cx, cy, pind):
ind = self.calc_target_index(cx, cy)
if pind >= ind:
ind = pind
if ind < len(cx):
tx = cx[ind]
ty = cy[ind]
else:
tx = cx[-1]
ty = cy[-1]
ind = len(cx) - 1
alpha = math.atan2(ty - self.lat, tx - self.lng) - self.yaw
if self.v < 0: # back
alpha = math.pi - alpha
Lf = k * self.v + Lfc
delta = math.atan2(2.0 * L * math.sin(alpha) / Lf, 1.0)
return delta, ind
def pid_pwm(self,distance,theta_error):
left_pwm=0
right_pwm=0
return left_pwm,right_pwm
def main():
# 设置目标路点
cx = np.arange(0, 50, 1)
cy = [math.sin(ix / 5.0) * ix / 2.0 for ix in cx]
target_speed = 10.0 / 3.6 # [m/s]
T = 100.0 # 最大模拟时间
# 设置车辆的初始状态
vehicle = PurePursuit(lng=-0.0, lat=-3.0, yaw=0.0, v=0.0)
lastIndex = len(cx) - 1
time = 0.0
x = [vehicle.lng]
y = [vehicle.lat]
yaw = [vehicle.yaw]
v = [vehicle.v]
t = [0.0]
target_ind = vehicle.calc_target_index(cx, cy)
while T >= time and lastIndex > target_ind:
ai = vehicle.p_control(target_speed, vehicle.v)
di, target_ind = vehicle.pure_pursuit_control(cx, cy, target_ind)
x_cal = vehicle.lng + vehicle.v * math.cos(vehicle.yaw) * dt
y_cal = vehicle.lat + vehicle.v * math.sin(vehicle.yaw) * dt
yaw_cal = vehicle.yaw + vehicle.v / L * math.tan(di) * dt
v_cal = vehicle.v + ai * dt
vehicle.update(x_cal, y_cal, yaw_cal, v_cal)
time = time + dt
x.append(vehicle.lng)
y.append(vehicle.lat)
yaw.append(vehicle.yaw)
v.append(vehicle.v)
t.append(time)
# plt.cla()
# plt.plot(cx, cy, ".r", label="course")
# plt.plot(x, y, "-b", label="trajectory")
# plt.plot(cx[target_ind], cy[target_ind], "go", label="target")
# plt.axis("equal")
# plt.grid(True)
# plt.title("Speed[km/h]:" + str(vehicle.v * 3.6)[:4])
# plt.pause(0.001)
# 纯追踪控制控制转向角度,使用一个简单的P控制器控制速度
if __name__ == '__main__':
main()
| [
"math.sqrt",
"math.atan2",
"math.tan",
"math.sin",
"numpy.arange",
"math.cos"
] | [((1907, 1926), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(1)'], {}), '(0, 50, 1)\n', (1916, 1926), True, 'import numpy as np\n'), ((1020, 1048), 'math.sqrt', 'math.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (1029, 1048), False, 'import math\n'), ((1512, 1552), 'math.atan2', 'math.atan2', (['(ty - self.lat)', '(tx - self.lng)'], {}), '(ty - self.lat, tx - self.lng)\n', (1522, 1552), False, 'import math\n'), ((739, 769), 'math.sqrt', 'math.sqrt', (['(idx ** 2 + idy ** 2)'], {}), '(idx ** 2 + idy ** 2)\n', (748, 769), False, 'import math\n'), ((1937, 1955), 'math.sin', 'math.sin', (['(ix / 5.0)'], {}), '(ix / 5.0)\n', (1945, 1955), False, 'import math\n'), ((1698, 1713), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (1706, 1713), False, 'import math\n'), ((2539, 2560), 'math.cos', 'math.cos', (['vehicle.yaw'], {}), '(vehicle.yaw)\n', (2547, 2560), False, 'import math\n'), ((2608, 2629), 'math.sin', 'math.sin', (['vehicle.yaw'], {}), '(vehicle.yaw)\n', (2616, 2629), False, 'import math\n'), ((2683, 2695), 'math.tan', 'math.tan', (['di'], {}), '(di)\n', (2691, 2695), False, 'import math\n')] |
from typing import Optional, Sequence
import numpy as np
class MoleculeGraphData:
"""MoleculeGraphData class
This data class is almost same as `torch_geometric.data.Data
<https://pytorch-geometric.readthedocs.io/en/latest/modules/data.html#torch_geometric.data.Data>`_.
Attributes
----------
node_features : np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
edge_index : np.ndarray, dtype int
Graph connectivity in COO format with shape [2, num_edges]
targets : np.ndarray
Graph or node targets with arbitrary shape
edge_features : np.ndarray, optional (default None)
Edge feature matrix with shape [num_edges, num_edge_features]
graph_features : np.ndarray, optional (default None)
Graph feature vector with shape [num_graph_features,]
num_nodes : int
The number of nodes in the graph
num_node_features : int
The number of features per node in the graph
num_edges : int
The number of edges in the graph
num_edges_features : int, , optional (default None)
The number of features per edge in the graph
"""
def __init__(
self,
node_features: np.ndarray,
edge_index: np.ndarray,
targets: np.ndarray,
edge_features: Optional[np.ndarray] = None,
graph_features: Optional[np.ndarray] = None,
):
"""
Parameters
----------
node_features : np.ndarray
Node feature matrix with shape [num_nodes, num_node_features]
edge_index : np.ndarray, dtype int
Graph connectivity in COO format with shape [2, num_edges]
targets : np.ndarray
Graph or node targets with arbitrary shape
edge_features : np.ndarray, optional (default None)
Edge feature matrix with shape [num_edges, num_edge_features]
graph_features : np.ndarray, optional (default None)
Graph feature vector with shape [num_graph_features,]
"""
# validate params
if isinstance(node_features, np.ndarray) is False:
raise ValueError('node_features must be np.ndarray.')
if isinstance(edge_index, np.ndarray) is False:
raise ValueError('edge_index must be np.ndarray.')
elif edge_index.dtype != np.int:
raise ValueError('edge_index.dtype must be np.int')
elif edge_index.shape[0] != 2:
raise ValueError('The shape of edge_index is [2, num_edges].')
if isinstance(targets, np.ndarray) is False:
raise ValueError('y must be np.ndarray.')
if edge_features is not None:
if isinstance(edge_features, np.ndarray) is False:
raise ValueError('edge_features must be np.ndarray or None.')
elif edge_index.shape[1] != edge_features.shape[0]:
raise ValueError('The first dimension of edge_features must be the \
same as the second dimension of edge_index.')
if graph_features is not None and isinstance(graph_features,
np.ndarray) is False:
raise ValueError('graph_features must be np.ndarray or None.')
self.node_features = node_features
self.edge_index = edge_index
self.edge_features = edge_features
self.graph_features = graph_features
self.targets = targets
self.num_nodes, self.num_node_features = self.node_features.shape
self.num_edges = edge_index.shape[1]
if self.node_features is not None:
self.num_edge_features = self.edge_features.shape[1]
def to_pyg_data(self):
"""Convert to PyTorch Geometric Data instance
Returns
-------
torch_geometric.data.Data
Molecule graph data for PyTorch Geometric
"""
try:
import torch
from torch_geometric.data import Data
except ModuleNotFoundError:
raise ValueError("This class requires PyTorch Geometric to be installed.")
return Data(
x=torch.from_numpy(self.node_features),
edge_index=torch.from_numpy(self.edge_index),
edge_attr=None if self.edge_features is None \
else torch.from_numpy(self.edge_features),
y=torch.from_numpy(self.targets),
)
class BatchMoleculeGraphData(MoleculeGraphData):
"""Batch MoleculeGraphData class
Attributes
----------
graph_index : np.ndarray, dtype int
This vector indicates which graph the node belongs with shape [num_nodes,]
"""
def __init__(self, molecule_graphs: Sequence[MoleculeGraphData]):
"""
Parameters
----------
molecule_graphs : Sequence[MoleculeGraphData]
List of MoleculeGraphData
"""
# stack features and targets
batch_node_features = np.vstack(
[graph.node_features for graph in molecule_graphs])
batch_targets = np.vstack([graph.targets for graph in molecule_graphs])
# before stacking edge_features or graph_features,
# we should check whether these are None or not
if molecule_graphs[0].edge_features is not None:
batch_edge_features = np.vstack(
[graph.edge_features for graph in molecule_graphs])
else:
batch_edge_features = None
if molecule_graphs[0].graph_features is not None:
batch_graph_features = np.vstack(
[graph.graph_features for graph in molecule_graphs])
else:
batch_graph_features = None
# create new edge index
num_nodes_list = [graph.num_nodes for graph in molecule_graphs]
batch_edge_index = np.hstack(
[graph.edge_index + prev_num_node for prev_num_node, graph \
in zip([0] + num_nodes_list[:-1], molecule_graphs)]
).astype(int)
# graph_index indicates which nodes belong to which graph
graph_index = []
for i, num_nodes in enumerate(num_nodes_list):
graph_index.extend([i] * num_nodes)
self.graph_index = np.array(graph_index, dtype=int)
super().__init__(
node_features=batch_node_features,
edge_index=batch_edge_index,
targets=batch_targets,
edge_features=batch_edge_features,
graph_features=batch_graph_features,
)
@staticmethod # type: ignore
def to_pyg_data(molecule_graphs: Sequence[MoleculeGraphData]):
"""Convert to PyTorch Geometric Batch instance
Parameters
----------
molecule_graphs : Sequence[MoleculeGraphData]
List of MoleculeGraphData
Returns
-------
torch_geometric.data.Batch
Batch data of molecule graph for PyTorch Geometric
"""
try:
from torch_geometric.data import Batch
except ModuleNotFoundError:
raise ValueError(
"This class requires PyTorch Geometric to be installed.")
data_list = [mol_graph.to_pyg_data() for mol_graph in molecule_graphs]
return Batch.from_data_list(data_list=data_list)
| [
"torch.from_numpy",
"numpy.array",
"numpy.vstack",
"torch_geometric.data.Batch.from_data_list"
] | [((4518, 4579), 'numpy.vstack', 'np.vstack', (['[graph.node_features for graph in molecule_graphs]'], {}), '([graph.node_features for graph in molecule_graphs])\n', (4527, 4579), True, 'import numpy as np\n'), ((4609, 4664), 'numpy.vstack', 'np.vstack', (['[graph.targets for graph in molecule_graphs]'], {}), '([graph.targets for graph in molecule_graphs])\n', (4618, 4664), True, 'import numpy as np\n'), ((5648, 5680), 'numpy.array', 'np.array', (['graph_index'], {'dtype': 'int'}), '(graph_index, dtype=int)\n', (5656, 5680), True, 'import numpy as np\n'), ((4854, 4915), 'numpy.vstack', 'np.vstack', (['[graph.edge_features for graph in molecule_graphs]'], {}), '([graph.edge_features for graph in molecule_graphs])\n', (4863, 4915), True, 'import numpy as np\n'), ((5054, 5116), 'numpy.vstack', 'np.vstack', (['[graph.graph_features for graph in molecule_graphs]'], {}), '([graph.graph_features for graph in molecule_graphs])\n', (5063, 5116), True, 'import numpy as np\n'), ((6595, 6636), 'torch_geometric.data.Batch.from_data_list', 'Batch.from_data_list', ([], {'data_list': 'data_list'}), '(data_list=data_list)\n', (6615, 6636), False, 'from torch_geometric.data import Batch\n'), ((3784, 3820), 'torch.from_numpy', 'torch.from_numpy', (['self.node_features'], {}), '(self.node_features)\n', (3800, 3820), False, 'import torch\n'), ((3839, 3872), 'torch.from_numpy', 'torch.from_numpy', (['self.edge_index'], {}), '(self.edge_index)\n', (3855, 3872), False, 'import torch\n'), ((3986, 4016), 'torch.from_numpy', 'torch.from_numpy', (['self.targets'], {}), '(self.targets)\n', (4002, 4016), False, 'import torch\n'), ((3940, 3976), 'torch.from_numpy', 'torch.from_numpy', (['self.edge_features'], {}), '(self.edge_features)\n', (3956, 3976), False, 'import torch\n')] |
from PIL import Image
import os
import filecmp
import subprocess
import re
import numpy as np
import matplotlib.pyplot as plt
def convert_to_raw(file):
""" Convert file to raw file.
Args:
file: file to convert.
Returns:
name of the raw file on filesystem
"""
img = Image.open(file)
img = img.convert('L') # convert to 8 bits per pixels
(x, y) = img.size
pixels = bytearray(list(img.getdata()))
filename, file_extension = os.path.splitext(file)
file2 = file.replace(file_extension, '.dat')
file_name = str(x) + 'x' + str(y) + 'x8x1' + '_' + file2
# print(file_name)
with open(file_name, 'wb') as f:
f.write(pixels)
return file_name
def convert_to_jpg(raw_file):
""" Convert a raw file to jpg file.
Args:
raw_file: file to convert.
Returns: null
"""
match = re.match('(\d+)x(\d+)x(\d+)x(\d+)_(\w+)', raw_file)
if match:
print(match.group(1))
print(match.group(2))
print(match.group(3))
print(match.group(4))
print(match.group(5))
x = int(match.group(1))
y = int(match.group(2))
bpp = int(match.group(3))
dimension = int(match.group(4))
filename = match.group(0)
rawData = open(raw_file, 'rb').read()
imgSize = (x, y)
# Use the PIL raw decoder to read the data.
# the 'F;16' informs the raw decoder that we are reading
# a little endian, unsigned integer 16 bit data.
# img = Image.fromstring('L', imgSize, rawData, 'raw', 'F;32')
img = Image.frombuffer('L', imgSize, rawData, 'raw')
img = img.rotate(180)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img.save(filename + ".jpg")
def interpolate(file_in, file_out, device, iterations, interpolation_type, new_width, new_height):
""" Wrapper function on top of the interpolation executable.
It is also a benchmarking function, it returns the name of the output
image and the time needed to do all the iterations
Args:
file_in: input raw image used for the tests.
file_out: output raw image
device: 'cpu' or 'gpu'
iterations: number of times we do the processing
(we can do it iterations times, but only return 1 output image)
interpolation_type: 'nn' or 'bl'
new_width: output image width
new_height: output image height
Returns:
a tuple containing the output image name and the time in sec needed
to do the processing iterations times
"""
command_string = 'ImageInterpolation.exe ' + device + ' ' + str(iterations) + ' ' + interpolation_type + ' ' + file_in + ' ' + file_out + ' ' + str(new_width) + ' ' + str(new_height)
program_out = str(subprocess.check_output(command_string.split(), stderr=subprocess.STDOUT), 'utf-8')
print(program_out) # can be commented, avoid output polution
program_out = program_out.splitlines()
# Attention, time and file name respectively at lines 8 and 9 of the output
seconds = float(program_out[8])
out_file = program_out[9]
return (seconds, out_file)
def benchmark_cpu_vs_gpu(input_raw_file):
""" Benchmark cpu vs gpu time wise.
Args:
input_raw_file: input raw image used for the tests.
Returns:
2 tuples containing times needed to do processing on gpu and cpu
"""
nb_iterations = 20
(cpu1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', nb_iterations, 'nn', 8000, 4000)
(gpu1, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', nb_iterations, 'nn', 8000, 4000)
(cpu2, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', nb_iterations, 'bl', 8000, 4000)
(gpu2, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', nb_iterations, 'bl', 8000, 4000)
# return ((cpu1/nb_iterations, cpu2/nb_iterations), (gpu1/nb_iterations, gpu2/nb_iterations))
return ((cpu1, cpu2), (gpu1, gpu2))
def plot_graph(durations, figure_name):
""" Plot durations in a graph
Args:
durations: processing durations.
Returns:
a file on file system
"""
# with plt.xkcd():
N = 2
# cpuMeans = (1.218, 10.303)
cpuMeans = durations[0]
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, cpuMeans, width, color='r')
# gpuMeans = (0.669, 3.46)
gpuMeans = durations[1]
rects2 = ax.bar(ind + width, gpuMeans, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Time in sec')
ax.set_title('Duration by interpolation type and device type')
ax.set_xticks(ind + width)
ax.set_xticklabels(('Nearest Neighbor', 'Bilinear'))
ax.legend((rects1[0], rects2[0]), ('Cpu', 'Gpu'))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%.3f' % height,
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
# plt.show()
plt.savefig(figure_name)
def check_bit_exactness(input_raw_file):
""" Check bit exactness on interpolation executable between Gpu vs Cpu with various parameters.
Args:
param1: input raw image used for the tests.
Returns:
Prints to the output if results are bit exact
"""
(t1, f1) = interpolate(input_raw_file, 'cpu_nn_lena.dat', 'cpu', 1, 'nn', 8000, 4000)
(t2, f2) = interpolate(input_raw_file, 'gpu_nn_lena.dat', 'gpu', 1, 'nn', 8000, 4000)
(t3, f3) = interpolate(input_raw_file, 'cpu_bl_lena.dat', 'cpu', 1, 'bl', 8000, 4000)
(t4, f4) = interpolate(input_raw_file, 'gpu_bl_lena.dat', 'gpu', 1, 'bl', 8000, 4000)
if filecmp.cmp(f1, f2, shallow=True):
print("NN interpolation on GPU is bit exact with CPU")
if filecmp.cmp(f3, f4, shallow=True):
print("Bilinear interpolation on GPU is bit exact with CPU")
def exercise(input_raw_file):
""" Exercise interpolation executable with various parameters.
No Args:
Returns: null
"""
for device in ['cpu','gpu']:
for interp in ['bl']:
for (w,h) in ((256, 300),(2000, 1000),(1000, 2000),(8000, 4000)):
(t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 1, interp, w, h)
convert_to_jpg(f)
if __name__ == '__main__':
#
# Convert Lena Tiff image to raw format
#
raw_file = convert_to_raw('Lena.tiff')
# exercise(raw_file)
# quit()
#
# Check bit eaxctness between Cpu and Gpu processing
#
print("Checking bit-exactness between GPU processing and CPU processing")
check_bit_exactness(raw_file)
#
# Perform benchmark between Cpu and Gpu processing
# plot results in a file
#
print("Benchmarking execution time Cpu vs Gpu")
durations = benchmark_cpu_vs_gpu(raw_file)
plot_graph(durations,'CpuVsGpu.png')
quit()
| [
"re.match",
"PIL.Image.open",
"numpy.arange",
"os.path.splitext",
"PIL.Image.frombuffer",
"filecmp.cmp",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((326, 342), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (336, 342), False, 'from PIL import Image\n'), ((501, 523), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (517, 523), False, 'import os\n'), ((916, 972), 're.match', 're.match', (['"""(\\\\d+)x(\\\\d+)x(\\\\d+)x(\\\\d+)_(\\\\w+)"""', 'raw_file'], {}), "('(\\\\d+)x(\\\\d+)x(\\\\d+)x(\\\\d+)_(\\\\w+)', raw_file)\n", (924, 972), False, 'import re\n'), ((1609, 1655), 'PIL.Image.frombuffer', 'Image.frombuffer', (['"""L"""', 'imgSize', 'rawData', '"""raw"""'], {}), "('L', imgSize, rawData, 'raw')\n", (1625, 1655), False, 'from PIL import Image\n'), ((4380, 4392), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4389, 4392), True, 'import numpy as np\n'), ((4489, 4503), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4501, 4503), True, 'import matplotlib.pyplot as plt\n'), ((5326, 5350), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_name'], {}), '(figure_name)\n', (5337, 5350), True, 'import matplotlib.pyplot as plt\n'), ((6022, 6055), 'filecmp.cmp', 'filecmp.cmp', (['f1', 'f2'], {'shallow': '(True)'}), '(f1, f2, shallow=True)\n', (6033, 6055), False, 'import filecmp\n'), ((6127, 6160), 'filecmp.cmp', 'filecmp.cmp', (['f3', 'f4'], {'shallow': '(True)'}), '(f3, f4, shallow=True)\n', (6138, 6160), False, 'import filecmp\n')] |
import time
import joblib
import numpy as np
from scipy.stats import ttest_1samp
from sklearn.metrics import mean_squared_error as mse
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.tree import DecisionTreeRegressor
from .base import ConditionalIndependenceTest, ConditionalIndependenceTestOutput
class FCIT(ConditionalIndependenceTest):
r"""
Fast Conditional Independence test statistic and p-value
The Fast Conditional Independence Test is a non-parametric
conditional independence test :footcite:p:`chalupka2018FastConditionalIndependence`.
Parameters
----------
model: Sklearn regressor
Regressor used to predict input data :math:`Y`
cv_grid: dict
Dictionary of parameters to cross-validate over when training regressor.
num_perm: int
Number of data permutations to estimate the p-value from marginal stats.
prop_test: float
Proportion of data to evaluate test stat on.
discrete: tuple of string
Whether :math:`X` or :math:`Y` are discrete
Notes
-----
The motivation for the test rests on the assumption that if :math:`X \not\!\perp\!\!\!\perp Y \mid Z`,
then :math:`Y` should be more accurately predicted by using both
:math:`X` and :math:`Z` as covariates as opposed to only using
:math:`Z` as a covariate. Likewise, if :math:`X \perp \!\!\! \perp Y \mid Z`,
then :math:`Y` should be predicted just as accurately by solely
using :math:`X` or soley using :math:`Z` :footcite:p:`chalupka2018FastConditionalIndependence`.
Thus, the test works by using a regressor (the default is decision tree) to
to predict input :math:`Y` using both :math:`X` and :math:`Z` and using
only :math:`Z` :footcite:p:`chalupka2018FastConditionalIndependence`. Then,
accuracy of both predictions are measured via mean-squared error (MSE).
:math:`X \perp \!\!\! \perp Y \mid Z` if and only if MSE of the algorithm
using both :math:`X` and :math:`Z` is not smaller than the MSE of the
algorithm trained using only :math:`Z` :footcite:p:`chalupka2018FastConditionalIndependence`.
References
----------
.. footbibliography::
"""
def __init__(
self,
model=DecisionTreeRegressor(),
cv_grid={"min_samples_split": [2, 8, 64, 512, 1e-2, 0.2, 0.4]},
num_perm=8,
prop_test=0.1,
discrete=(False, False),
):
self.model = model
self.cv_grid = cv_grid
self.num_perm = num_perm
self.prop_test = prop_test
self.discrete = discrete
ConditionalIndependenceTest.__init__(self)
def statistic(self, x, y, z=None):
r"""
Calculates the FCIT test statistic.
Parameters
----------
x,y,z : ndarray of float
Input data matrices.
Returns
-------
stat : float
The computed FCIT test statistic.
two_sided: float
Two-sided p-value associated with test statistic
"""
n_samples = x.shape[0]
n_test = int(n_samples * self.prop_test)
data_permutations = [
np.random.permutation(x.shape[0]) for i in range(self.num_perm)
]
clf = _cross_val(x, y, z, self.cv_grid, self.model, prop_test=self.prop_test)
datadict = {
"x": x,
"y": y,
"z": z,
"data_permutation": data_permutations,
"n_test": n_test,
"reshuffle": False,
"clf": clf,
}
d1_stats = np.array(
joblib.Parallel(n_jobs=-1, max_nbytes=100e6)(
joblib.delayed(_obtain_error)((datadict, i))
for i in range(self.num_perm)
)
)
if z.shape[1] == 0:
x_indep_y = x[np.random.permutation(n_samples)]
else:
x_indep_y = np.empty([x.shape[0], 0])
clf = _cross_val(
x_indep_y, y, z, self.cv_grid, self.model, prop_test=self.prop_test
)
datadict["reshuffle"] = True
datadict["x"] = x_indep_y
d0_stats = np.array(
joblib.Parallel(n_jobs=-1, max_nbytes=100e6)(
joblib.delayed(_obtain_error)((datadict, i))
for i in range(self.num_perm)
)
)
stat, two_sided = ttest_1samp(d0_stats / d1_stats, 1)
return stat, two_sided
def test(self, x, y, z=None):
r"""
Calculates the FCIT test statistic and p-value.
Parameters
----------
x,y,z : ndarray of float
Input data matrices.
Returns
-------
stat : float
The computed FCIT statistic.
pvalue : float
The computed FCIT p-value.
Examples
--------
>>> import numpy as np
>>> from hyppo.conditional import FCIT
>>> from sklearn.tree import DecisionTreeRegressor
>>> np.random.seed(1234)
>>> dim = 2
>>> n = 100000
>>> z1 = np.random.multivariate_normal(mean=np.zeros(dim), cov=np.eye(dim), size=(n))
>>> A1 = np.random.normal(loc=0, scale=1, size=dim * dim).reshape(dim, dim)
>>> B1 = np.random.normal(loc=0, scale=1, size=dim * dim).reshape(dim, dim)
>>> x1 = (A1 @ z1.T + np.random.multivariate_normal(mean=np.zeros(dim), cov=np.eye(dim), size=(n)).T)
>>> y1 = (B1 @ z1.T + np.random.multivariate_normal(mean=np.zeros(dim), cov=np.eye(dim), size=(n)).T)
>>> model = DecisionTreeRegressor()
>>> cv_grid = {"min_samples_split": [2, 8, 64, 512, 1e-2, 0.2, 0.4]}
>>> stat, pvalue = FCIT(model=model, cv_grid=cv_grid).test(x1.T, y1.T, z1)
>>> '%.2f, %.3f' % (stat, pvalue)
'-3.59, 0.995'
"""
n_samples = x.shape[0]
if z is None:
z = np.empty([n_samples, 0])
if self.discrete[0] and not self.discrete[1]:
x, y = y, x
elif x.shape[1] < y.shape[1]:
x, y = y, x
y = StandardScaler().fit_transform(y)
stat, two_sided = self.statistic(x, y, z)
if stat < 0:
pvalue = 1 - two_sided / 2
else:
pvalue = two_sided / 2
return ConditionalIndependenceTestOutput(stat, pvalue)
def _cross_val(x, y, z, cv_grid, model, prop_test):
"""
Choose the regression hyperparameters by
cross-validation.
"""
splitter = ShuffleSplit(n_splits=3, test_size=prop_test)
cv = GridSearchCV(estimator=model, cv=splitter, param_grid=cv_grid, n_jobs=-1)
cv.fit(_interleave(x, z), y)
return type(model)(**cv.best_params_)
def _interleave(x, z, seed=None):
"""Interleave x and z dimension-wise."""
state = np.random.get_state()
np.random.seed(seed or int(time.time()))
total_ids = np.random.permutation(x.shape[1] + z.shape[1])
np.random.set_state(state)
out = np.zeros([x.shape[0], x.shape[1] + z.shape[1]])
out[:, total_ids[: x.shape[1]]] = x
out[:, total_ids[x.shape[1] :]] = z
return out
def _obtain_error(data_and_i):
"""
A function used for multithreaded computation of the fcit test statistic.
Calculates MSE error for both trained regressors.
"""
data, i = data_and_i
x = data["x"]
y = data["y"]
z = data["z"]
if data["reshuffle"]:
perm_ids = np.random.permutation(x.shape[0])
else:
perm_ids = np.arange(x.shape[0])
data_permutation = data["data_permutation"][i]
n_test = data["n_test"]
clf = data["clf"]
x_z = _interleave(x[perm_ids], z, seed=i)
clf.fit(x_z[data_permutation][n_test:], y[data_permutation][n_test:])
return mse(
y[data_permutation][:n_test], clf.predict(x_z[data_permutation][:n_test])
)
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.StandardScaler",
"sklearn.tree.DecisionTreeRegressor",
"numpy.random.get_state",
"numpy.empty",
"numpy.zeros",
"scipy.stats.ttest_1samp",
"numpy.random.set_state",
"time.time",
"numpy.arange",
"joblib.Parallel",
"numpy.random.permu... | [((6550, 6595), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(3)', 'test_size': 'prop_test'}), '(n_splits=3, test_size=prop_test)\n', (6562, 6595), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((6605, 6678), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model', 'cv': 'splitter', 'param_grid': 'cv_grid', 'n_jobs': '(-1)'}), '(estimator=model, cv=splitter, param_grid=cv_grid, n_jobs=-1)\n', (6617, 6678), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6848, 6869), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (6867, 6869), True, 'import numpy as np\n'), ((6931, 6977), 'numpy.random.permutation', 'np.random.permutation', (['(x.shape[1] + z.shape[1])'], {}), '(x.shape[1] + z.shape[1])\n', (6952, 6977), True, 'import numpy as np\n'), ((6982, 7008), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (7001, 7008), True, 'import numpy as np\n'), ((7019, 7066), 'numpy.zeros', 'np.zeros', (['[x.shape[0], x.shape[1] + z.shape[1]]'], {}), '([x.shape[0], x.shape[1] + z.shape[1]])\n', (7027, 7066), True, 'import numpy as np\n'), ((2334, 2357), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (2355, 2357), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((4443, 4478), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['(d0_stats / d1_stats)', '(1)'], {}), '(d0_stats / d1_stats, 1)\n', (4454, 4478), False, 'from scipy.stats import ttest_1samp\n'), ((7467, 7500), 'numpy.random.permutation', 'np.random.permutation', (['x.shape[0]'], {}), '(x.shape[0])\n', (7488, 7500), True, 'import numpy as np\n'), ((7530, 7551), 'numpy.arange', 'np.arange', (['x.shape[0]'], {}), '(x.shape[0])\n', (7539, 7551), True, 'import numpy as np\n'), ((3249, 3282), 'numpy.random.permutation', 'np.random.permutation', (['x.shape[0]'], {}), '(x.shape[0])\n', (3270, 3282), True, 'import numpy as np\n'), ((3983, 4008), 'numpy.empty', 'np.empty', (['[x.shape[0], 0]'], {}), '([x.shape[0], 0])\n', (3991, 4008), True, 'import numpy as np\n'), ((5959, 5983), 'numpy.empty', 'np.empty', (['[n_samples, 0]'], {}), '([n_samples, 0])\n', (5967, 5983), True, 'import numpy as np\n'), ((3679, 3729), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-1)', 'max_nbytes': '(100000000.0)'}), '(n_jobs=-1, max_nbytes=100000000.0)\n', (3694, 3729), False, 'import joblib\n'), ((3911, 3943), 'numpy.random.permutation', 'np.random.permutation', (['n_samples'], {}), '(n_samples)\n', (3932, 3943), True, 'import numpy as np\n'), ((4239, 4289), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-1)', 'max_nbytes': '(100000000.0)'}), '(n_jobs=-1, max_nbytes=100000000.0)\n', (4254, 4289), False, 'import joblib\n'), ((6138, 6154), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6152, 6154), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6901, 6912), 'time.time', 'time.time', ([], {}), '()\n', (6910, 6912), False, 'import time\n'), ((3741, 3770), 'joblib.delayed', 'joblib.delayed', (['_obtain_error'], {}), '(_obtain_error)\n', (3755, 3770), False, 'import joblib\n'), ((4301, 4330), 'joblib.delayed', 'joblib.delayed', (['_obtain_error'], {}), '(_obtain_error)\n', (4315, 4330), False, 'import joblib\n')] |
import numpy as np
import scipy.io as scio
u = scio.loadmat('linwave_u.mat')['u']
N = 128
timeFinal = 4
CFL = 0.2
xmin = -1
xmax = 1
m = 1
h = (xmax - xmin) / N
x = np.zeros((2,N))
for i in range(0,N):
x[0,i] = xmin + i * h
x[1,i] = x[0,i] + h
def LegendreGL(m):
localx = np.zeros((m+1))
localw = np.zeros((m+1))
if m == 1:
localx = np.array([-1,1])
localw = np.array([1,1])
elif m == 2:
localx = np.array([-1,0,1])
localw = np.array([1/3,4/3,1/3])
else:
J = np.zeros((m-1))
h1 = np.linspace(0,m-2,m-1)*2 + 2
return localx,localw
def LegendreGQ(m):
localx = np.zeros((m+1))
localw = np.zeros((m+1))
if m == 0:
localx[0] = 0
localw[0] = 2
else:
J = np.zeros((m+1,m+1))
h1 = np.linspace(0,m,m+1)*2
for i0 in range(m):
J[i0,i0+1] = J[i0+1,i0] = 2/(h1[i0] + 2)*np.sqrt(
(i0+1)**4/(h1[i0] + 1)/(h1[i0] + 3))
localx,localV = np.linalg.eig(J)
localw = 2 * (localV[0,:])**2
return localx,localw
return localx,localw
def LegendreP(localx,localm):
PL = np.zeros((localm+1,len(localx)))
PL[0,:] = np.sqrt(1 / 2)
if localm == 0:
return PL[0,:]
PL[1,:] = np.sqrt(3 / 2)*localx
if localm == 1:
return PL[1,:]
aold = np.sqrt(1 / 3)
for i in range(localm - 1):
anew = 2/(2*i+2)*np.sqrt((i+1)**4/(2*i+1)/(2*i+3));
PL[i+2,:] = 1/anew*(-aold*PL[i,:] + localx*PL[i+1,:]);
aold = anew
return PL[localm,:]
def GradLegendreP(r0,m0):
dP = np.zeros((len(r0)))
if m0 > 0:
Ph = -m0*r0*LegendreP(r0,m0) + m0*np.sqrt((2*m0+1)/(2*m0-1))*LegendreP(r0,m0-1)
dPe = r0**(m0+1)*m0*(m0+1)/2*np.sqrt((2*m0+1)/2)
endP = (abs(abs(r)-1)>10*1e-10)
rh = r0 * endP
dP = ~endP * dPe + endP * Ph / (1 - rh**2)
return dP
def VandermondeDG(m,r):
V = np.zeros((len(r),m+1))
for j in range(m+1):
V[:,j] = LegendreP(r,j)
return V
def GradVandermondeDG(m,r):
Vr = np.zeros((len(r),m+1))
for j in range(m+1):
Vr[:,j] = GradLegendreP(r,j)
return Vr
r,waste = LegendreGL(m)
V = VandermondeDG(m,r)
Vr = GradVandermondeDG(m,r)
D = np.dot(Vr,np.linalg.inv(V))
Ma = np.linalg.inv(np.dot(V,np.transpose(V)))
S = np.dot(Ma,D)
Vinv = np.linalg.inv(V)
Q = np.zeros((m+1,m+1))
Pmat = np.zeros((m+1,m+1))
Xm = np.zeros((m+1,m+1))
Xp = np.zeros((m+1,m+1))
x,w = LegendreGQ(m)
x = -x
for i in range(m+1):
Pmat[i,:] = LegendreP(x,i)
Xm[i,:] = LegendreP(x-2,i)
Xp[i,:] = LegendreP(x+2,i)
for l in range(m):
lamb = np.zeros((m+1,m+1))
for i in range(m+1):
lamb[i,i] = w[i]
# Set up operator to recover derivaties
A = np.zeros((m+1-l,m+1-l))
A[0,0] = 1 / np.sqrt((2 * l + 3) * (2 * l + 1))
A[m-l,m-l] = 1 / np.sqrt(2 * (m + 2) + 1) / np.sqrt(2 * (m+2) - 1)
for i in range(1,m-l):
Ah = 1 / np.sqrt(2*(l + i) + 1) / np.sqrt(2*(l + i) - 1)
A[i,i] = Ah
A[i+1,i-1] = - Ah
# Recover derivatives at quadrature points
Ph1 = np.dot(np.linalg.inv(A),Pmat[l:m+1,:])
Pmat[0:l+1,:] = 0
Pmat[l+1:m+1,:] = Ph1[0:m-l,:]
# Compute smoothness operator for order l and update
Qtemp = np.dot(Pmat,lamb)
Qh = np.dot(Qtemp,np.transpose(Pmat))
Q = Q + 2**(2*l+1)*Qh
# Initialize operator for smoothness indicator in nodal space
Q = np.dot(np.dot(np.transpose(Vinv),Q),Vinv)
Xp = np.dot(np.transpose(Vinv),Xp)
Xm = np.dot(np.transpose(Vinv),Xm)
# Initialize extraction vector
VtoE = np.zeros((2,N))
for j in range(N):
VtoE[0,j] = j * (m + 1)
VtoE[1,j] = (j + 1) * (m + 1) - 1
# Compute smallest spatial scale timestep
rLGLmin = abs(r[0] - r[1])
time = 0
tstep = 0
# Set timestep
k = CFL*rLGLmin*h;
time = 0
timeFinal = 4
while(time < timeFinal):
time = 5
| [
"scipy.io.loadmat",
"numpy.zeros",
"numpy.transpose",
"numpy.linalg.eig",
"numpy.linalg.inv",
"numpy.array",
"numpy.linspace",
"numpy.dot",
"numpy.sqrt"
] | [((165, 181), 'numpy.zeros', 'np.zeros', (['(2, N)'], {}), '((2, N))\n', (173, 181), True, 'import numpy as np\n'), ((2330, 2343), 'numpy.dot', 'np.dot', (['Ma', 'D'], {}), '(Ma, D)\n', (2336, 2343), True, 'import numpy as np\n'), ((2350, 2366), 'numpy.linalg.inv', 'np.linalg.inv', (['V'], {}), '(V)\n', (2363, 2366), True, 'import numpy as np\n'), ((2371, 2395), 'numpy.zeros', 'np.zeros', (['(m + 1, m + 1)'], {}), '((m + 1, m + 1))\n', (2379, 2395), True, 'import numpy as np\n'), ((2398, 2422), 'numpy.zeros', 'np.zeros', (['(m + 1, m + 1)'], {}), '((m + 1, m + 1))\n', (2406, 2422), True, 'import numpy as np\n'), ((2423, 2447), 'numpy.zeros', 'np.zeros', (['(m + 1, m + 1)'], {}), '((m + 1, m + 1))\n', (2431, 2447), True, 'import numpy as np\n'), ((2448, 2472), 'numpy.zeros', 'np.zeros', (['(m + 1, m + 1)'], {}), '((m + 1, m + 1))\n', (2456, 2472), True, 'import numpy as np\n'), ((3585, 3601), 'numpy.zeros', 'np.zeros', (['(2, N)'], {}), '((2, N))\n', (3593, 3601), True, 'import numpy as np\n'), ((47, 76), 'scipy.io.loadmat', 'scio.loadmat', (['"""linwave_u.mat"""'], {}), "('linwave_u.mat')\n", (59, 76), True, 'import scipy.io as scio\n'), ((289, 304), 'numpy.zeros', 'np.zeros', (['(m + 1)'], {}), '(m + 1)\n', (297, 304), True, 'import numpy as np\n'), ((318, 333), 'numpy.zeros', 'np.zeros', (['(m + 1)'], {}), '(m + 1)\n', (326, 333), True, 'import numpy as np\n'), ((649, 664), 'numpy.zeros', 'np.zeros', (['(m + 1)'], {}), '(m + 1)\n', (657, 664), True, 'import numpy as np\n'), ((678, 693), 'numpy.zeros', 'np.zeros', (['(m + 1)'], {}), '(m + 1)\n', (686, 693), True, 'import numpy as np\n'), ((1199, 1213), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (1206, 1213), True, 'import numpy as np\n'), ((1347, 1361), 'numpy.sqrt', 'np.sqrt', (['(1 / 3)'], {}), '(1 / 3)\n', (1354, 1361), True, 'import numpy as np\n'), ((2262, 2278), 'numpy.linalg.inv', 'np.linalg.inv', (['V'], {}), '(V)\n', (2275, 2278), True, 'import numpy as np\n'), ((2639, 2663), 'numpy.zeros', 'np.zeros', (['(m + 1, m + 1)'], {}), '((m + 1, m + 1))\n', (2647, 2663), True, 'import numpy as np\n'), ((2761, 2793), 'numpy.zeros', 'np.zeros', (['(m + 1 - l, m + 1 - l)'], {}), '((m + 1 - l, m + 1 - l))\n', (2769, 2793), True, 'import numpy as np\n'), ((3277, 3295), 'numpy.dot', 'np.dot', (['Pmat', 'lamb'], {}), '(Pmat, lamb)\n', (3283, 3295), True, 'import numpy as np\n'), ((3488, 3506), 'numpy.transpose', 'np.transpose', (['Vinv'], {}), '(Vinv)\n', (3500, 3506), True, 'import numpy as np\n'), ((3523, 3541), 'numpy.transpose', 'np.transpose', (['Vinv'], {}), '(Vinv)\n', (3535, 3541), True, 'import numpy as np\n'), ((366, 383), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (374, 383), True, 'import numpy as np\n'), ((400, 416), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (408, 416), True, 'import numpy as np\n'), ((775, 799), 'numpy.zeros', 'np.zeros', (['(m + 1, m + 1)'], {}), '((m + 1, m + 1))\n', (783, 799), True, 'import numpy as np\n'), ((998, 1014), 'numpy.linalg.eig', 'np.linalg.eig', (['J'], {}), '(J)\n', (1011, 1014), True, 'import numpy as np\n'), ((1271, 1285), 'numpy.sqrt', 'np.sqrt', (['(3 / 2)'], {}), '(3 / 2)\n', (1278, 1285), True, 'import numpy as np\n'), ((2308, 2323), 'numpy.transpose', 'np.transpose', (['V'], {}), '(V)\n', (2320, 2323), True, 'import numpy as np\n'), ((2802, 2836), 'numpy.sqrt', 'np.sqrt', (['((2 * l + 3) * (2 * l + 1))'], {}), '((2 * l + 3) * (2 * l + 1))\n', (2809, 2836), True, 'import numpy as np\n'), ((2885, 2909), 'numpy.sqrt', 'np.sqrt', (['(2 * (m + 2) - 1)'], {}), '(2 * (m + 2) - 1)\n', (2892, 2909), True, 'import numpy as np\n'), ((3110, 3126), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (3123, 3126), True, 'import numpy as np\n'), ((3317, 3335), 'numpy.transpose', 'np.transpose', (['Pmat'], {}), '(Pmat)\n', (3329, 3335), True, 'import numpy as np\n'), ((3448, 3466), 'numpy.transpose', 'np.transpose', (['Vinv'], {}), '(Vinv)\n', (3460, 3466), True, 'import numpy as np\n'), ((450, 470), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (458, 470), True, 'import numpy as np\n'), ((486, 517), 'numpy.array', 'np.array', (['[1 / 3, 4 / 3, 1 / 3]'], {}), '([1 / 3, 4 / 3, 1 / 3])\n', (494, 517), True, 'import numpy as np\n'), ((532, 547), 'numpy.zeros', 'np.zeros', (['(m - 1)'], {}), '(m - 1)\n', (540, 547), True, 'import numpy as np\n'), ((808, 832), 'numpy.linspace', 'np.linspace', (['(0)', 'm', '(m + 1)'], {}), '(0, m, m + 1)\n', (819, 832), True, 'import numpy as np\n'), ((1419, 1468), 'numpy.sqrt', 'np.sqrt', (['((i + 1) ** 4 / (2 * i + 1) / (2 * i + 3))'], {}), '((i + 1) ** 4 / (2 * i + 1) / (2 * i + 3))\n', (1426, 1468), True, 'import numpy as np\n'), ((1761, 1786), 'numpy.sqrt', 'np.sqrt', (['((2 * m0 + 1) / 2)'], {}), '((2 * m0 + 1) / 2)\n', (1768, 1786), True, 'import numpy as np\n'), ((2858, 2882), 'numpy.sqrt', 'np.sqrt', (['(2 * (m + 2) + 1)'], {}), '(2 * (m + 2) + 1)\n', (2865, 2882), True, 'import numpy as np\n'), ((2977, 3001), 'numpy.sqrt', 'np.sqrt', (['(2 * (l + i) - 1)'], {}), '(2 * (l + i) - 1)\n', (2984, 3001), True, 'import numpy as np\n'), ((912, 964), 'numpy.sqrt', 'np.sqrt', (['((i0 + 1) ** 4 / (h1[i0] + 1) / (h1[i0] + 3))'], {}), '((i0 + 1) ** 4 / (h1[i0] + 1) / (h1[i0] + 3))\n', (919, 964), True, 'import numpy as np\n'), ((2952, 2976), 'numpy.sqrt', 'np.sqrt', (['(2 * (l + i) + 1)'], {}), '(2 * (l + i) + 1)\n', (2959, 2976), True, 'import numpy as np\n'), ((561, 589), 'numpy.linspace', 'np.linspace', (['(0)', '(m - 2)', '(m - 1)'], {}), '(0, m - 2, m - 1)\n', (572, 589), True, 'import numpy as np\n'), ((1678, 1714), 'numpy.sqrt', 'np.sqrt', (['((2 * m0 + 1) / (2 * m0 - 1))'], {}), '((2 * m0 + 1) / (2 * m0 - 1))\n', (1685, 1714), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use("Agg")
from pspy import so_dict, pspy_utils
import maps_to_params_utils
import numpy as np
import pylab as plt
import sys, os
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
cov_dir = "covariances"
mc_dir = "montecarlo"
cov_plot_dir = "plots/full_covariance"
pspy_utils.create_directory(cov_plot_dir)
experiments = d["experiments"]
lmax = d["lmax"]
binning_file = d["binning_file"]
multistep_path = d["multistep_path"]
bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
nbins = len(bin_hi)
spec_name = []
for id_exp1, exp1 in enumerate(experiments):
freqs1 = d["freqs_%s" % exp1]
for id_f1, f1 in enumerate(freqs1):
for id_exp2, exp2 in enumerate(experiments):
freqs2 = d["freqs_%s" % exp2]
for id_f2, f2 in enumerate(freqs2):
if (id_exp1 == id_exp2) & (id_f1 >id_f2) : continue
if (id_exp1 > id_exp2) : continue
spec_name += ["%s_%sx%s_%s" % (exp1, f1, exp2, f2)]
analytic_dict= {}
spectra = ["TT", "TE", "ET", "EE"]
nspec = len(spec_name)
for sid1, name1 in enumerate(spec_name):
for sid2, name2 in enumerate(spec_name):
if sid1 > sid2: continue
print (name1, name2)
na, nb = name1.split("x")
nc, nd = name2.split("x")
analytic_cov = np.load("%s/analytic_cov_%sx%s_%sx%s.npy" % (cov_dir, na, nb, nc, nd))
for s1, spec1 in enumerate(spectra):
for s2, spec2 in enumerate(spectra):
sub_cov = analytic_cov[s1 * nbins:(s1 + 1) * nbins, s2 * nbins:(s2 + 1) * nbins]
analytic_dict[sid1, sid2, s1, s2] = sub_cov
full_analytic_cov = np.zeros((4 * nspec * nbins, 4 * nspec * nbins))
for sid1, name1 in enumerate(spec_name):
for sid2, name2 in enumerate(spec_name):
if sid1 > sid2: continue
na, nb = name1.split("x")
nc, nd = name2.split("x")
for s1, spec1 in enumerate(spectra):
for s2, spec2 in enumerate(spectra):
id_start_1 = sid1 * nbins + s1 * nspec * nbins
id_stop_1 = (sid1 + 1) * nbins + s1 * nspec * nbins
id_start_2 = sid2 * nbins + s2 * nspec * nbins
id_stop_2 = (sid2 + 1) * nbins + s2 * nspec * nbins
full_analytic_cov[id_start_1:id_stop_1, id_start_2: id_stop_2] = analytic_dict[sid1, sid2, s1, s2]
transpose = full_analytic_cov.copy().T
transpose[full_analytic_cov != 0] = 0
full_analytic_cov += transpose
np.save("%s/full_analytic_cov.npy"%cov_dir, full_analytic_cov)
block_to_delete = []
for sid, name in enumerate(spec_name):
na, nb = name.split("x")
for s, spec in enumerate(spectra):
id_start = sid * nbins + s * nspec * nbins
id_stop = (sid + 1) * nbins + s * nspec * nbins
if (na == nb) & (spec == 'ET'):
block_to_delete = np.append(block_to_delete, np.arange(id_start, id_stop))
full_analytic_cov = np.delete(full_analytic_cov, block_to_delete, axis=1)
full_analytic_cov = np.delete(full_analytic_cov, block_to_delete, axis=0)
np.save("%s/truncated_analytic_cov.npy"%cov_dir, full_analytic_cov)
print ("is matrix positive definite:", maps_to_params_utils.is_pos_def(full_analytic_cov))
print ("is matrix symmetric :", maps_to_params_utils.is_symmetric(full_analytic_cov))
size=int(full_analytic_cov.shape[0]/nbins)
full_mc_cov = np.load("%s/cov_restricted_all_cross.npy"%mc_dir)
os.system("cp %s/multistep2.js %s/multistep2.js" % (multistep_path, cov_plot_dir))
file = "%s/covariance.html" % (cov_plot_dir)
g = open(file, mode="w")
g.write('<html>\n')
g.write('<head>\n')
g.write('<title> covariance </title>\n')
g.write('<script src="multistep2.js"></script>\n')
g.write('<script> add_step("sub", ["c","v"]) </script> \n')
g.write('<style> \n')
g.write('body { text-align: center; } \n')
g.write('img { width: 100%; max-width: 1200px; } \n')
g.write('</style> \n')
g.write('</head> \n')
g.write('<body> \n')
g.write('<div class=sub>\n')
count=0
for ispec in range(-size+1, size):
rows, cols = np.indices(full_mc_cov.shape)
row_vals = np.diag(rows, k=ispec*nbins)
col_vals = np.diag(cols, k=ispec*nbins)
mat = np.ones(full_mc_cov.shape)
mat[row_vals, col_vals] = 0
str = "cov_diagonal_%03d.png" % (count)
plt.figure(figsize=(12,8))
plt.subplot(1,2,1)
plt.plot(np.log(np.abs(full_analytic_cov.diagonal(ispec*nbins))))
plt.plot(np.log(np.abs(full_mc_cov.diagonal(ispec*nbins))), '.')
plt.legend()
plt.subplot(1,2,2)
plt.imshow(np.log(np.abs(full_analytic_cov*mat)))
plt.savefig("%s/%s"%(cov_plot_dir,str))
plt.clf()
plt.close()
g.write('<div class=sub>\n')
g.write('<img src="'+str+'" /> \n')
g.write('</div>\n')
count+=1
g.write('</body> \n')
g.write('</html> \n')
g.close()
| [
"pylab.close",
"numpy.load",
"numpy.abs",
"numpy.ones",
"pylab.figure",
"numpy.arange",
"numpy.diag",
"pylab.legend",
"numpy.save",
"pspy.so_dict.so_dict",
"os.system",
"maps_to_params_utils.is_symmetric",
"numpy.indices",
"matplotlib.use",
"pylab.subplot",
"pylab.savefig",
"numpy.de... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((164, 181), 'pspy.so_dict.so_dict', 'so_dict.so_dict', ([], {}), '()\n', (179, 181), False, 'from pspy import so_dict, pspy_utils\n'), ((299, 340), 'pspy.pspy_utils.create_directory', 'pspy_utils.create_directory', (['cov_plot_dir'], {}), '(cov_plot_dir)\n', (326, 340), False, 'from pspy import so_dict, pspy_utils\n'), ((496, 544), 'pspy.pspy_utils.read_binning_file', 'pspy_utils.read_binning_file', (['binning_file', 'lmax'], {}), '(binning_file, lmax)\n', (524, 544), False, 'from pspy import so_dict, pspy_utils\n'), ((1720, 1768), 'numpy.zeros', 'np.zeros', (['(4 * nspec * nbins, 4 * nspec * nbins)'], {}), '((4 * nspec * nbins, 4 * nspec * nbins))\n', (1728, 1768), True, 'import numpy as np\n'), ((2540, 2604), 'numpy.save', 'np.save', (["('%s/full_analytic_cov.npy' % cov_dir)", 'full_analytic_cov'], {}), "('%s/full_analytic_cov.npy' % cov_dir, full_analytic_cov)\n", (2547, 2604), True, 'import numpy as np\n'), ((2987, 3040), 'numpy.delete', 'np.delete', (['full_analytic_cov', 'block_to_delete'], {'axis': '(1)'}), '(full_analytic_cov, block_to_delete, axis=1)\n', (2996, 3040), True, 'import numpy as np\n'), ((3061, 3114), 'numpy.delete', 'np.delete', (['full_analytic_cov', 'block_to_delete'], {'axis': '(0)'}), '(full_analytic_cov, block_to_delete, axis=0)\n', (3070, 3114), True, 'import numpy as np\n'), ((3116, 3185), 'numpy.save', 'np.save', (["('%s/truncated_analytic_cov.npy' % cov_dir)", 'full_analytic_cov'], {}), "('%s/truncated_analytic_cov.npy' % cov_dir, full_analytic_cov)\n", (3123, 3185), True, 'import numpy as np\n'), ((3421, 3472), 'numpy.load', 'np.load', (["('%s/cov_restricted_all_cross.npy' % mc_dir)"], {}), "('%s/cov_restricted_all_cross.npy' % mc_dir)\n", (3428, 3472), True, 'import numpy as np\n'), ((3473, 3559), 'os.system', 'os.system', (["('cp %s/multistep2.js %s/multistep2.js' % (multistep_path, cov_plot_dir))"], {}), "('cp %s/multistep2.js %s/multistep2.js' % (multistep_path,\n cov_plot_dir))\n", (3482, 3559), False, 'import sys, os\n'), ((3224, 3274), 'maps_to_params_utils.is_pos_def', 'maps_to_params_utils.is_pos_def', (['full_analytic_cov'], {}), '(full_analytic_cov)\n', (3255, 3274), False, 'import maps_to_params_utils\n'), ((3308, 3360), 'maps_to_params_utils.is_symmetric', 'maps_to_params_utils.is_symmetric', (['full_analytic_cov'], {}), '(full_analytic_cov)\n', (3341, 3360), False, 'import maps_to_params_utils\n'), ((4100, 4129), 'numpy.indices', 'np.indices', (['full_mc_cov.shape'], {}), '(full_mc_cov.shape)\n', (4110, 4129), True, 'import numpy as np\n'), ((4145, 4175), 'numpy.diag', 'np.diag', (['rows'], {'k': '(ispec * nbins)'}), '(rows, k=ispec * nbins)\n', (4152, 4175), True, 'import numpy as np\n'), ((4189, 4219), 'numpy.diag', 'np.diag', (['cols'], {'k': '(ispec * nbins)'}), '(cols, k=ispec * nbins)\n', (4196, 4219), True, 'import numpy as np\n'), ((4228, 4254), 'numpy.ones', 'np.ones', (['full_mc_cov.shape'], {}), '(full_mc_cov.shape)\n', (4235, 4254), True, 'import numpy as np\n'), ((4341, 4368), 'pylab.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (4351, 4368), True, 'import pylab as plt\n'), ((4372, 4392), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4383, 4392), True, 'import pylab as plt\n'), ((4534, 4546), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (4544, 4546), True, 'import pylab as plt\n'), ((4551, 4571), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4562, 4571), True, 'import pylab as plt\n'), ((4628, 4670), 'pylab.savefig', 'plt.savefig', (["('%s/%s' % (cov_plot_dir, str))"], {}), "('%s/%s' % (cov_plot_dir, str))\n", (4639, 4670), True, 'import pylab as plt\n'), ((4672, 4681), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (4679, 4681), True, 'import pylab as plt\n'), ((4686, 4697), 'pylab.close', 'plt.close', ([], {}), '()\n', (4695, 4697), True, 'import pylab as plt\n'), ((1351, 1421), 'numpy.load', 'np.load', (["('%s/analytic_cov_%sx%s_%sx%s.npy' % (cov_dir, na, nb, nc, nd))"], {}), "('%s/analytic_cov_%sx%s_%sx%s.npy' % (cov_dir, na, nb, nc, nd))\n", (1358, 1421), True, 'import numpy as np\n'), ((4592, 4623), 'numpy.abs', 'np.abs', (['(full_analytic_cov * mat)'], {}), '(full_analytic_cov * mat)\n', (4598, 4623), True, 'import numpy as np\n'), ((2936, 2964), 'numpy.arange', 'np.arange', (['id_start', 'id_stop'], {}), '(id_start, id_stop)\n', (2945, 2964), True, 'import numpy as np\n')] |
import os
import argparse
import datetime
import re
import pandas as pd
import numpy as np
from . import hgmd
from . import visualize as vis
from . import quads
import sys
import multiprocessing
import time
import math
import matplotlib.pyplot as plt
import random
import scipy.io
import csv
#from docs.source import conf
def init_parser(parser):
"""Initialize parser args."""
parser.add_argument(
'marker', type=str,
help=("Marker file input")
)
parser.add_argument(
'vis', type=str,
help=("vis file input")
)
parser.add_argument(
'cluster', type=str,
help=("Cluster file input")
)
parser.add_argument(
'-g', nargs='?', default=None,
help="Optional Gene list"
)
parser.add_argument(
'output_path', type=str,
help="the output directory where output files should go"
)
parser.add_argument(
'-C', nargs='?', default=None,
help="Num of cores avail for parallelization"
)
parser.add_argument(
'-X', nargs='?', default=None,
help="X argument for XL-mHG"
)
parser.add_argument(
'-L', nargs='?', default=None,
help="L argument for XL-mHG"
)
parser.add_argument(
'-Abbrev', nargs='?',default=[],
help="Choose between abbreviated or full 3-gene computation"
)
parser.add_argument(
'-K', nargs='?',default=None,
help="K-gene combinations to include"
)
parser.add_argument(
'-Down', nargs='?',default=False,
help="Downsample"
)
parser.add_argument(
'-Trim', nargs='?',default=2000,
help="Trim output files"
)
parser.add_argument(
'-Count', nargs='?',default=False,
help="Set to True when count data is being used, for visualizations."
)
parser.add_argument(
'-tenx', nargs='?',default=False,
help="Set to True when count data is being used, for visualizations."
)
parser.add_argument(
'-online', nargs='?',default=False,
help="Set to True for online version."
)
parser.add_argument(
'-skipvis', nargs='?',default=False,
help="Set to True to skip visualizations."
)
return parser
def read_data(cls_path, tsne_path, marker_path, gene_path, D, tenx, online,skipvis):
"""
Reads in cluster series, tsne data, marker expression without complements
at given paths.
"""
cls_ser = pd.read_csv(
cls_path, sep='\t', index_col=0, names=['cell', 'cluster'], squeeze=True
)
if ',' in cls_ser.index[1]:
cls_ser = pd.read_csv(
cls_path, sep=',', index_col=0, names=['cell', 'cluster'], squeeze=True )
if skipvis == 1:
tsne = None
pass
else:
tsne = pd.read_csv(
tsne_path, sep='\t', index_col=0, names=['cell', 'tSNE_1', 'tSNE_2']
)
if np.isnan(tsne['tSNE_1'][0]):
tsne = pd.read_csv(
tsne_path, sep=',', index_col=0, names=['cell', 'tSNE_1', 'tSNE_2'] )
start_= time.time()
tenx = int(tenx)
if tenx == 1:
print('Loading 10X matrix')
mat = scipy.io.mmread(marker_path+"matrix.mtx")
features_path = marker_path + "genes.tsv"
gene_names = [row[1] for row in csv.reader(open(features_path), delimiter="\t")]
barcodes_path = marker_path + "barcodes.tsv"
barcodes = [row[0] for row in csv.reader(open(barcodes_path), delimiter="\t")]
#construct pandas dataframe w/ the pieces (gene names, barcodes, counts in sparse form)
matrix = pd.DataFrame(index = gene_names, columns = barcodes )
now = time.time()
print('assembling expression matrix')
for i,j,v in zip(mat.row, mat.col, mat.data):
matrix.iat[i,j] = v
matrix.fillna(0,inplace=True)
noww = time.time()
print(str(noww-now) + ' seconds')
print('size: ' + str(matrix.shape))
no_complement_marker_exp = matrix
no_complement_marker_exp.rename_axis('cell',axis=1,inplace=True)
else:
#Should allow either tab OR comma delimited formats
try:
no_complement_marker_exp = pd.read_csv(
marker_path,sep='\t', index_col=0
).rename_axis('cell',axis=1)
if len(no_complement_marker_exp.columns) == 0:
raise Exception
elif len(no_complement_marker_exp.index) == 0:
raise Exception
else:
pass
except:
no_complement_marker_exp = pd.read_csv(
marker_path,sep=',', index_col=0
).rename_axis('cell',axis=1)
if no_complement_marker_exp.shape[1] == cls_ser.shape[0]:
pass
else:
for index,row in cls_ser.iteritems():
if str(index) in list(no_complement_marker_exp):
continue
else:
cls_ser.drop(labels=index,inplace=True)
#gene list filtering
no_complement_marker_exp = no_complement_marker_exp.loc[~no_complement_marker_exp.index.duplicated(keep='first')]
no_complement_marker_exp = np.transpose(no_complement_marker_exp)
no_complement_marker_exp.columns = [x.upper() for x in no_complement_marker_exp.columns]
no_complement_marker_exp = no_complement_marker_exp.loc[~no_complement_marker_exp.index.duplicated(keep='first')]
#gene filtering
#-------------#
if gene_path is None:
pass
else:
#read the genes
#Compatible with single line comma list OR one per line no commas OR mix of both
master_gene_list = []
with open(gene_path, "r") as genes:
lines = genes.readlines()
if len(lines) == 1:
with open(gene_path, "r") as genes:
init_read = genes.read().splitlines()
master_str = str.upper(init_read[0])
master_gene_list = master_str.split(",")
else:
for i, line in enumerate(lines):
if '\n' in line:
master_gene_list.append(line[:-1])
else:
master_gene_list.append(line)
for item in master_gene_list[:]:
if ',' in item:
new_split = item.split(",")
master_gene_list.remove(item)
for ele in new_split:
master_gene_list.append(str.upper(ele))
new_no_comp_mark_exp = pd.DataFrame()
master_gene_list = [y.upper() for y in master_gene_list]
for gene in master_gene_list:
try:
new_no_comp_mark_exp[gene] = no_complement_marker_exp[gene]
except:
pass
no_complement_marker_exp = new_no_comp_mark_exp
'''
for column_name in no_complement_marker_exp.columns:
if str.upper(column_name) in master_gene_list:
pass
else:
try:
no_complement_marker_exp.drop(column_name, axis=1, inplace=True)
except:
pass
'''
#-------------#
#downsampling
#-------------#
if D is False:
pass
else:
# get number of genes to set downsample threshold
gene_numb = len(no_complement_marker_exp.columns)
#print(gene_numb)
if gene_numb > 3000:
if int(D) < int(2500):
pass
else:
D = int(2500)
#total number of cells input
N = len(cls_ser)
#print(N)
#downsample target
M = int(D)
if N <= M:
return (cls_ser, tsne, no_complement_marker_exp, gene_path)
clusters = sorted(cls_ser.unique())
counts = { x : 0 for x in clusters}
for clus in cls_ser:
counts[clus] = counts[clus]+1
#at this point counts has values for # cells in cls
#dict goes like ->{ cluster:#cells }
take_nums = {x : 0 for x in clusters}
for clstr in take_nums:
take_nums[clstr] = math.ceil(counts[clstr]*(M/N))
summ = 0
for key in take_nums:
summ = summ + take_nums[key]
#print('Downsampled cell num ' + str(summ))
counts= { x : 0 for x in clusters}
new_cls_ser = cls_ser.copy(deep=True)
keep_first = 0
for index,value in new_cls_ser.iteritems():
keep_first = keep_first + 1
if keep_first ==1:
counts[value] = counts[value]+1
continue
new_cls_ser.drop(index,inplace=True)
cls_ser.drop(cls_ser.index[0],inplace=True)
#Now new_cls_ser has all removed except first item, which we can keep
for num in range(N-1):
init_rand_num = random.randint(0,N-num-1-1)
if counts[cls_ser[init_rand_num]] >= take_nums[cls_ser[init_rand_num]]:
cls_ser.drop(cls_ser.index[init_rand_num], inplace=True)
continue
new_cls_ser = new_cls_ser.append(pd.Series([cls_ser[init_rand_num]], index=[cls_ser.index[init_rand_num]]))
counts[cls_ser[init_rand_num]] = counts[cls_ser[init_rand_num]]+1
cls_ser.drop(cls_ser.index[init_rand_num], inplace=True)
new_cls_ser.rename_axis('cell',inplace=True)
new_cls_ser.rename('cluster', inplace=True)
return(new_cls_ser,tsne,no_complement_marker_exp,gene_path)
#-------------#
return (cls_ser, tsne, no_complement_marker_exp, gene_path)
def process(cls,X,L,plot_pages,cls_ser,tsne,marker_exp,gene_file,csv_path,vis_path,pickle_path,cluster_number,K,abbrev,cluster_overall,Trim,count_data,skipvis):
#for cls in clusters:
# To understand the flow of this section, read the print statements.
heur_limit = min(50,len(marker_exp.columns))
start_cls_time = time.time()
print('########\n# Processing cluster ' + str(cls) + '...\n########')
print(str(K) + ' gene combinations')
if len(abbrev) == 0:
pass
else:
print('Heuristic limit set to: ' + str(heur_limit))
print('Running t test on singletons...')
try:
t_test = hgmd.batch_stats(marker_exp, cls_ser, cls)
except Exception as err:
print('t-test error')
print(err)
print('Calculating fold change')
try:
fc_test = hgmd.batch_fold_change(marker_exp, cls_ser, cls)
except Exception as err:
print('fold-change error')
print(err)
print('Running XL-mHG on singletons...')
try:
xlmhg = hgmd.batch_xlmhg(marker_exp, cls_ser, cls, X=X, L=L)
except Exception as err:
print('XLMHG error')
print(err)
try:
q_val = hgmd.batch_q(xlmhg)
except Exception as err:
print('q-val error')
print(err)
# We need to slide the cutoff indices before using them,
# to be sure they can be used in the real world. See hgmd.mhg_slide()
try:
cutoff_value = hgmd.mhg_cutoff_value(
marker_exp, xlmhg[['gene_1', 'mHG_cutoff']]
)
xlmhg = xlmhg[['gene_1', 'mHG_stat', 'mHG_pval']].merge(
hgmd.mhg_slide(marker_exp, cutoff_value), on='gene_1'
)
# Update cutoff_value after sliding
cutoff_value = pd.Series(
xlmhg['cutoff_val'].values, index=xlmhg['gene_1']
)
xlmhg = xlmhg\
.sort_values(by='mHG_stat', ascending=True)
except Exception as err:
print('error in sliding values')
print(err)
print('Creating discrete expression matrix...')
try:
discrete_exp = hgmd.discrete_exp(marker_exp, cutoff_value, abbrev, xlmhg)
except Exception as err:
print('discrete matrix construction failed')
print(err)
'''
#For checking the sliding issue
count = 0
print(discrete_exp['Reg4'].sort_values(ascending=False).head(667))
#time.sleep(100000)
print(cls_ser)
for index in discrete_exp['Reg4'].sort_values(ascending=False).head(667).iteritems():
for index2 in cls_ser.iteritems():
if index[0] == index2[0]:
if index2[1] == 0:
count = count +1
print(count)
print(discrete_exp['Reg4'].sort_values(ascending=False).head(70))
print(cls_ser.to_string())
#print(marker_exp['1600029D21Rik'].sort_values(ascending=False).head(160))
#time.sleep(100000)
'''
if skipvis == 0:
discrete_exp_full = discrete_exp.copy()
print('Finding simple true positives/negatives for singletons...')
#Gives us the singleton TP/TNs for COI and for rest of clusters
#COI is just a DF, rest of clusters are a dict of DFs
(sing_tp_tn, other_sing_tp_tn) = hgmd.tp_tn(discrete_exp, cls_ser, cls, cluster_overall)
### Take out any genes with a true positive less than 15% from the expression matrix ###
discrete_exp = discrete_exp[ sing_tp_tn['gene_1'][ sing_tp_tn['TP'] > .15 ].values ]
########################################################################################
###########
#OLD HEURISTICS
#abb = '3'
#if abb in abbrev:
# print('Heuristic Abbreviation initiated for ' + str(abbrev) )
# count = 0
# trips_list=[]
# for index,row in xlmhg.iterrows():
# if sing_tp_tn.set_index('gene_1').at[row['gene_1'],'TP'] <= .15:
# continue
# ##THREE GENE VARIABLE HERE##
# if count == trips_heur:
# break
# else:
# trips_list.append(row['gene_1'])
# count = count + 1
#else:
# trips_list = None
############
print('Finding pair expression matrix...')
(
gene_map, in_cls_count, pop_count,
in_cls_product, total_product, upper_tri_indices,
cluster_exp_matrices, cls_counts
) = hgmd.pair_product(discrete_exp, cls_ser, cls, cluster_number,cluster_overall)
if K >= 4:
print('')
print('Starting quads')
print('')
quads_in_cls, quads_total, quads_indices, odd_gene_mapped, even_gene_mapped = quads.combination_product(discrete_exp,cls_ser,cls,xlmhg)
print('')
print('')
print('')
print('')
print('HG TEST ON QUADS')
quads_fin = quads.quads_hg(gene_map,in_cls_count,pop_count,quads_in_cls,quads_total,quads_indices,odd_gene_mapped,even_gene_mapped)
if K == 3:
start_trips = time.time()
print('Finding Trips expression matrix...')
try:
trips_in_cls,trips_total,trips_indices,gene_1_mapped,gene_2_mapped,gene_3_mapped = hgmd.combination_product(discrete_exp,cls_ser,cls,abbrev,heur_limit)
except Exception as err:
print('error in 3-gene matrix construction')
print(err)
end_trips = time.time()
print(str(end_trips-start_trips) + ' seconds')
HG_start = time.time()
print('Running hypergeometric test on pairs...')
pair, revised_indices = hgmd.pair_hg(
gene_map, in_cls_count, pop_count,
in_cls_product, total_product, upper_tri_indices, abbrev, heur_limit
)
pair_q = hgmd.pairs_q(pair)
HG_end = time.time()
print(str(HG_end-HG_start) + ' seconds')
pair_out_initial = pair\
.sort_values(by='HG_pval', ascending=True)
pair_out_initial['rank'] = pair_out_initial.reset_index().index + 1
#spair_out_print = pair_out_initial.head(Trim)
pair_out_initial.to_csv(
csv_path + '/cluster_' + str(cls) + '_pair_full_unranked.csv'
)
if K == 3:
HG_start = time.time()
print('Running hypergeometric test & TP/TN on trips...')
trips = hgmd.trips_hg(
gene_map,in_cls_count,pop_count,
trips_in_cls,trips_total,trips_indices,
gene_1_mapped,gene_2_mapped,gene_3_mapped,abbrev,heur_limit
)
#print(trips)
HG_end = time.time()
print(str(HG_end-HG_start) + ' seconds')
# Pair TP/TN FOR THIS CLUSTER
print('Finding simple true positives/negatives for pairs...')
try:
pair_tp_tn = hgmd.pair_tp_tn(
gene_map, in_cls_count, pop_count,
in_cls_product, total_product, upper_tri_indices, abbrev, revised_indices
)
except Exception as err:
print(err)
#accumulates pair TP/TN vals for all other clusters
##NEW
try:
other_pair_tp_tn = {}
for key in cluster_exp_matrices:
new_pair_tp_tn = hgmd.pair_tp_tn(
gene_map, cls_counts[key], pop_count,
cluster_exp_matrices[key], total_product, upper_tri_indices,
abbrev, revised_indices
)
other_pair_tp_tn[key] = new_pair_tp_tn
other_pair_tp_tn[key].set_index(['gene_1','gene_2'],inplace=True)
except:
pass
try:
pair = pair\
.merge(pair_tp_tn, on=['gene_1', 'gene_2'], how='left')\
.merge(pair_q, on=['gene_1','gene_2'], how='left')
except:
pass
#Supplementary figure 2 code#
'''
for cl in other_pair_tp_tn:
print(cl)
print(other_pair_tp_tn[cl].loc[('LY6D', 'CD3G_negation')])
for cl in other_sing_tp_tn:
print(cl)
print(other_sing_tp_tn[cl].loc[('LY6D')])
time.sleep(10000)
'''
try:
pair_tp_tn.set_index(['gene_1','gene_2'],inplace=True)
except:
pass
# sing_tp_tn.set_index(['gene_1'], inplace=True)
rank_start = time.time()
print('Finding NEW Rank')
try:
ranked_pair,histogram = hgmd.ranker(pair,xlmhg,sing_tp_tn,other_sing_tp_tn,other_pair_tp_tn,cls_counts,in_cls_count,pop_count)
except Exception as err:
print(err)
rank_end = time.time()
print(str(rank_end - rank_start) + ' seconds')
# Save TP/TN values to be used for non-cluster-specific things
print('Pickling data for later...')
try:
sing_tp_tn.to_pickle(pickle_path + 'sing_tp_tn_' + str(cls))
pair_tp_tn.to_pickle(pickle_path + 'pair_tp_tn_' + str(cls))
except:
print('pickling failed')
#trips_tp_tn.to_pickle(pickle_path + 'trips_tp_tn' + str(cls))
print('Exporting cluster ' + str(cls) + ' output to CSV...')
try:
sing_output = xlmhg\
.merge(t_test, on='gene_1')\
.merge(fc_test, on='gene_1')\
.merge(sing_tp_tn, on='gene_1')\
.merge(q_val, on='gene_1')\
.set_index('gene_1')\
.sort_values(by='mHG_stat', ascending=True)
except Exception as err:
print(err)
sing_output = xlmhg.sort_values(by='mHG_stat',ascending=True)
sing_output.sort_values(by='gene_1',ascending=True).to_csv(
csv_path + '/cluster_' + str(cls) + '_singleton_full_unranked.csv'
)
sing_output = sing_output.loc[sing_output['TP'] >= .15]
for index, row in sing_output.iterrows():
if re.compile(".*_negation$").match(index):
if row['TN'] >= .5:
pass
else:
sing_output.drop(labels=index,axis=0,inplace=True)
# make a copy of sing_output
# run the stuff below on both the original (fine) and the copy (no negations)
sing_output_positives = sing_output.copy(deep=True)
for index, row in sing_output_positives.iterrows():
if re.compile(".*_negation$").match(index):
sing_output_positives.drop(labels=index,axis=0,inplace=True)
sing_output_positives['hgrank'] = sing_output_positives.reset_index().index + 1
sing_output_positives.sort_values(by='Log2FoldChangeAbs', ascending=False, inplace=True)
sing_output_positives['fcrank'] = sing_output_positives.reset_index().index + 1
sing_output_positives['finrank'] = sing_output_positives[['hgrank', 'fcrank']].mean(axis=1)
sing_output_positives.sort_values(by='finrank',ascending=True,inplace=True)
sing_output_positives['rank'] = sing_output_positives.reset_index().index + 1
sing_output_positives.drop('finrank',axis=1, inplace=True)
count = 1
for index,row in sing_output_positives.iterrows():
if count == 100:
break
sing_output_positives.at[index,'Plot'] = 1
count = count + 1
sing_output_positives.to_csv(
csv_path + '/cluster_' + str(cls) + '_singleton_positive_markers_ranked.csv'
)
sing_output['hgrank'] = sing_output.reset_index().index + 1
sing_output.sort_values(by='Log2FoldChangeAbs', ascending=False, inplace=True)
sing_output['fcrank'] = sing_output.reset_index().index + 1
sing_output['finrank'] = sing_output[['hgrank', 'fcrank']].mean(axis=1)
sing_output.sort_values(by='finrank',ascending=True,inplace=True)
sing_output['rank'] = sing_output.reset_index().index + 1
sing_output.drop('finrank',axis=1, inplace=True)
count = 1
for index,row in sing_output.iterrows():
if count == 100:
break
sing_output.at[index,'Plot'] = 1
count = count + 1
sing_output.to_csv(
csv_path + '/cluster_' + str(cls) + '_singleton_all_ranked.csv'
)
sing_stripped = sing_output[
['mHG_stat', 'TP', 'TN']
].reset_index().rename(index=str, columns={'gene_1': 'gene_1'})
try:
ranked_print = ranked_pair.head(Trim)
ranked_print.to_csv(
csv_path + '/cluster_' + str(cls) + '_pair_final_ranking.csv'
)
except:
print('pair file not generated, no pairs available')
ranked_pair = pd.DataFrame(data=0,index=[1,2,3],columns={'TP','TN','Plot','gene_1','gene_2','rank'})
#Add trips data pages
#does not currently do new rank scheme
if K == 3:
trips_output = trips
# .sort_values(by='HG_stat', ascending=True)
#print(trips_output)
trips_output['rank'] = trips_output.reset_index().index + 1
trips_print = trips_output.head(Trim)
trips_print.to_csv(
csv_path + '/cluster_' + str(cls) + '_trips.csv'
)
else:
trips_output = int(1)
if K >= 4:
quads_final = quads_fin\
.sort_values(by='HG_stat', ascending=True)
quads_final['rank'] = quads_final.reset_index().index + 1
quads_print = quads_final.head(Trim)
quads_print.to_csv(
csv_path + '/cluster_' + str(cls) + '_quads.csv'
)
else:
quads_final = int(1)
#plt.bar(list(histogram.keys()), histogram.values(), color='b')
#plt.savefig(vis_path + '/cluster_' + str(cls) + '_pair_histogram')
#if cls == fincls:
# cls = 0
if skipvis == 1:
print('Skipping plots...')
pass
else:
print('Drawing plots...')
vis.make_plots(
pair=ranked_pair,
sing=sing_output,
sing_tp_tn=sing_tp_tn,
xlmhg=xlmhg,
trips=trips_output,
quads_fin=quads_final,
tsne=tsne,
discrete_exp=discrete_exp_full,
marker_exp=marker_exp,
plot_pages=plot_pages,
combined_path=vis_path + '/cluster_' + str(cls) + '_pairs_as_singletons',
sing_combined_path=vis_path + '/cluster_' +
str(cls) + '_singleton',
discrete_path=vis_path + '/cluster_' + str(cls) + '_discrete_pairs',
tptn_path=vis_path + 'cluster_' + str(cls) + 'pair_TP_TN',
trips_path=vis_path + 'cluster_' + str(cls) + '_discrete_trios',
quads_path=vis_path + 'cluster_' + str(cls) + '_discrete_quads',
sing_tptn_path=vis_path + 'cluster_' + str(cls) + '_singleton_TP_TN',
count_data = count_data
)
end_cls_time=time.time()
print(str(end_cls_time - start_cls_time) + ' seconds')
#time.sleep(10000)
def main():
"""Hypergeometric marker detection. Finds markers identifying a cluster.
Reads in data from single-cell RNA sequencing. Data is in the form of 3
CSVs: gene expression data by gene by cell, 2-D tSNE data by cell, and the
clusters of interest by cell. Creates a list of genes and a list of gene
pairs (including complements), ranked by hypergeometric and t-test
significance. The highest ranked marker genes generally best identify the
cluster of interest. Saves these lists to CSV and creates gene expression
visualizations.
"""
# TODO: more precise description
#ten_x = input("10X data? [y/n]")
start_dt = datetime.datetime.now()
start_time = time.time()
print("Started on " + str(start_dt.isoformat()))
args = init_parser(argparse.ArgumentParser(
description=("Hypergeometric marker detection. Finds markers identifying a cluster. Documentation available at https://hgmd.readthedocs.io/en/latest/index.html")
)).parse_args()
output_path = args.output_path
C = args.C
K = args.K
Abbrev = args.Abbrev
Down = args.Down
X = args.X
L = args.L
marker_file = args.marker
tsne_file = args.vis
cluster_file = args.cluster
gene_file = args.g
Trim = args.Trim
count_data = args.Count
tenx = args.tenx
online = args.online
skipvis = args.skipvis
plot_pages = 30 # number of genes to plot (starting with highest ranked)
# TODO: gene pairs with expression ratio within the cluster of interest
# under [min_exp_ratio] were ignored in hypergeometric testing. This
# functionality is currently unimplemented.
# min_exp_ratio = 0.4
csv_path = output_path + 'data/'
vis_path = output_path + 'vis/'
pickle_path = output_path + '_pickles/'
try:
os.makedirs(csv_path)
except:
os.system('rm -r ' + csv_path)
os.makedirs(csv_path)
try:
os.makedirs(vis_path)
except:
os.system('rm -r ' + vis_path)
os.makedirs(vis_path)
try:
os.makedirs(pickle_path)
except:
os.system('rm -r ' + pickle_path)
os.makedirs(pickle_path)
if Trim is not None:
Trim = int(Trim)
else:
Trim = int(2000)
if C is not None:
C = abs(int(C))
else:
C = 1
if X is not None:
try:
X = float(X)
except:
raise Exception('X param must be a number between 0 and 1')
if X > 1:
X = int(1)
elif X <= 0:
X = int(0)
else:
X = float(X)
print("Set X to " + str(X) + ".")
if L is not None:
L = int(L)
print("Set L to " + str(L) + ".")
if K is not None:
K = int(K)
else:
K = 2
if K > 4:
K = 4
print('Only supports up to 4-gene combinations currently, setting K to 4')
if count_data is not None:
if count_data == str(True):
count_data = 1
print('Count Data')
elif count_data == 'yes':
count_data = 1
print('Count Data')
else:
count_data = int(0)
else:
count_data = int(0)
if tenx is not None:
if tenx == str(True):
tenx = int(1)
elif tenx == 'yes':
tenx = int(1)
else:
tenx = int(0)
else:
tenx = int(0)
if online is not None:
if online == str(True):
online = int(1)
elif online == 'yes':
online = int(1)
else:
online = int(0)
else:
online = int(0)
if skipvis is not None:
if skipvis == str(True):
skipvis = int(1)
elif skipvis == 'yes':
skipvis = int(1)
else:
skipvis = int(0)
else:
skipvis = int(0)
print("Reading data...")
if gene_file is None:
(cls_ser, tsne, no_complement_marker_exp, gene_path) = read_data(
cls_path=cluster_file,
tsne_path=tsne_file,
marker_path=marker_file,
gene_path=None,
D=Down,
tenx=tenx,
online=online,
skipvis=skipvis)
else:
(cls_ser, tsne, no_complement_marker_exp, gene_path) = read_data(
cls_path=cluster_file,
tsne_path=tsne_file,
marker_path=marker_file,
gene_path=gene_file,
D=Down,
tenx=tenx,
online=online,
skipvis=skipvis)
#throw out vals that show up in expression matrix but not in cluster assignments
cls_ser_idx = set(cls_ser.index.values.tolist())
no_complement_marker_exp.drop([ind for ind,row in no_complement_marker_exp.iterrows() if ind not in cls_ser_idx], inplace=True)
#throw out gene rows that are duplicates and print out a message to user
'''
#throw out cls_ser vals not in marker_exp
for index in cls_ser.index.values.tolist():
if index in no_complement_marker_exp.columns:
continue
else:
cls_ser.drop(index,inplace=True)
'''
print("Generating complement data...")
marker_exp = hgmd.add_complements(no_complement_marker_exp)
marker_exp.sort_values(by='cell',inplace=True)
cls_ser.sort_index(inplace=True)
# Process clusters sequentially
clusters = cls_ser.unique()
clusters.sort()
cluster_overall=clusters.copy()
#Only takes a certain number of clusters (cuts out smallest ones)
if online == 1:
max_clus_size = 15
if len(clusters) <= max_clus_size:
pass
else:
cls_helper = list(clusters.copy())
cls_size_count = {}
for item in cls_ser:
if item in cls_size_count:
cls_size_count[item] = cls_size_count[item] + 1
else:
cls_size_count[item] = 1
for counted in cls_size_count:
cls_size_count[counted] = cls_size_count[counted] / len(cls_ser)
while len(cls_helper) > max_clus_size:
lowest = 1
place = 0
for key in cls_size_count:
if cls_size_count[key] < lowest:
place = key
lowest = cls_size_count[key]
cls_helper.remove(place)
del cls_size_count[place]
clusters = np.array(cls_helper)
#Below could probably be optimized a little (new_clust not necessary),
#cores is number of simultaneous threads you want to run, can be set at will
cores = C
cluster_number = len(clusters)
# if core number is bigger than number of clusters, set it equal to number of clusters
if cores > len(clusters):
cores = len(clusters)
if cores == 1:
for cls in clusters:
process(cls,X,L,plot_pages,cls_ser,tsne,marker_exp,gene_file,csv_path,vis_path,pickle_path,cluster_number,K,Abbrev,cluster_overall,Trim,count_data,skipvis)
else:
#below loops allow for splitting the job based on core choice
group_num = math.ceil((len(clusters) / cores ))
for element in range(group_num):
new_clusters = clusters[:cores]
print(new_clusters)
jobs = []
#this loop spawns the workers and runs the code for each assigned.
#workers assigned based on the new_clusters list which is the old clusters
#split up based on core number e.g.
#clusters = [1 2 3 4 5 6] & cores = 4 --> new_clusters = [1 2 3 4], new_clusters = [5 6]
for cls in new_clusters:
p = multiprocessing.Process(target=process,
args=(cls,X,L,plot_pages,cls_ser,tsne,marker_exp,gene_file,csv_path,vis_path,pickle_path,cluster_number,K,Abbrev,cluster_overall,Trim,count_data,skipvis))
jobs.append(p)
p.start()
p.join()
new_clusters = []
clusters = clusters[cores:len(clusters)]
end_time = time.time()
# Add text file to keep track of everything
end_dt = datetime.datetime.now()
print("Ended on " + end_dt.isoformat())
metadata = open(output_path + 'metadata.txt', 'w')
metadata.write("Started: " + start_dt.isoformat())
metadata.write("\nEnded: " + end_dt.isoformat())
metadata.write("\nElapsed: " + str(end_dt - start_dt))
#metadata.write("\nGenerated by COMET version " + conf.version)
print('Took ' + str(end_time-start_time) + ' seconds')
print('Which is ' + str( (end_time-start_time)/60 ) + ' minutes')
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"os.makedirs",
"random.randint",
"math.ceil",
"pandas.read_csv",
"argparse.ArgumentParser",
"numpy.transpose",
"os.system",
"numpy.isnan",
"time.time",
"numpy.array",
"pandas.Series",
"multiprocessing.Process",
"datetime.datetime.now",
"re.compile"
] | [((2479, 2568), 'pandas.read_csv', 'pd.read_csv', (['cls_path'], {'sep': '"""\t"""', 'index_col': '(0)', 'names': "['cell', 'cluster']", 'squeeze': '(True)'}), "(cls_path, sep='\\t', index_col=0, names=['cell', 'cluster'],\n squeeze=True)\n", (2490, 2568), True, 'import pandas as pd\n'), ((3075, 3086), 'time.time', 'time.time', ([], {}), '()\n', (3084, 3086), False, 'import time\n'), ((5184, 5222), 'numpy.transpose', 'np.transpose', (['no_complement_marker_exp'], {}), '(no_complement_marker_exp)\n', (5196, 5222), True, 'import numpy as np\n'), ((10024, 10035), 'time.time', 'time.time', ([], {}), '()\n', (10033, 10035), False, 'import time\n'), ((15062, 15073), 'time.time', 'time.time', ([], {}), '()\n', (15071, 15073), False, 'import time\n'), ((15355, 15366), 'time.time', 'time.time', ([], {}), '()\n', (15364, 15366), False, 'import time\n'), ((17660, 17671), 'time.time', 'time.time', ([], {}), '()\n', (17669, 17671), False, 'import time\n'), ((17909, 17920), 'time.time', 'time.time', ([], {}), '()\n', (17918, 17920), False, 'import time\n'), ((23839, 23850), 'time.time', 'time.time', ([], {}), '()\n', (23848, 23850), False, 'import time\n'), ((24608, 24631), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24629, 24631), False, 'import datetime\n'), ((24649, 24660), 'time.time', 'time.time', ([], {}), '()\n', (24658, 24660), False, 'import time\n'), ((32058, 32069), 'time.time', 'time.time', ([], {}), '()\n', (32067, 32069), False, 'import time\n'), ((32138, 32161), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32159, 32161), False, 'import datetime\n'), ((2629, 2717), 'pandas.read_csv', 'pd.read_csv', (['cls_path'], {'sep': '""","""', 'index_col': '(0)', 'names': "['cell', 'cluster']", 'squeeze': '(True)'}), "(cls_path, sep=',', index_col=0, names=['cell', 'cluster'],\n squeeze=True)\n", (2640, 2717), True, 'import pandas as pd\n'), ((2804, 2889), 'pandas.read_csv', 'pd.read_csv', (['tsne_path'], {'sep': '"""\t"""', 'index_col': '(0)', 'names': "['cell', 'tSNE_1', 'tSNE_2']"}), "(tsne_path, sep='\\t', index_col=0, names=['cell', 'tSNE_1',\n 'tSNE_2'])\n", (2815, 2889), True, 'import pandas as pd\n'), ((2919, 2946), 'numpy.isnan', 'np.isnan', (["tsne['tSNE_1'][0]"], {}), "(tsne['tSNE_1'][0])\n", (2927, 2946), True, 'import numpy as np\n'), ((3610, 3658), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'gene_names', 'columns': 'barcodes'}), '(index=gene_names, columns=barcodes)\n', (3622, 3658), True, 'import pandas as pd\n'), ((3678, 3689), 'time.time', 'time.time', ([], {}), '()\n', (3687, 3689), False, 'import time\n'), ((3875, 3886), 'time.time', 'time.time', ([], {}), '()\n', (3884, 3886), False, 'import time\n'), ((6594, 6608), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6606, 6608), True, 'import pandas as pd\n'), ((11441, 11501), 'pandas.Series', 'pd.Series', (["xlmhg['cutoff_val'].values"], {'index': "xlmhg['gene_1']"}), "(xlmhg['cutoff_val'].values, index=xlmhg['gene_1'])\n", (11450, 11501), True, 'import pandas as pd\n'), ((14605, 14616), 'time.time', 'time.time', ([], {}), '()\n', (14614, 14616), False, 'import time\n'), ((14979, 14990), 'time.time', 'time.time', ([], {}), '()\n', (14988, 14990), False, 'import time\n'), ((15751, 15762), 'time.time', 'time.time', ([], {}), '()\n', (15760, 15762), False, 'import time\n'), ((16081, 16092), 'time.time', 'time.time', ([], {}), '()\n', (16090, 16092), False, 'import time\n'), ((25772, 25793), 'os.makedirs', 'os.makedirs', (['csv_path'], {}), '(csv_path)\n', (25783, 25793), False, 'import os\n'), ((25893, 25914), 'os.makedirs', 'os.makedirs', (['vis_path'], {}), '(vis_path)\n', (25904, 25914), False, 'import os\n'), ((26014, 26038), 'os.makedirs', 'os.makedirs', (['pickle_path'], {}), '(pickle_path)\n', (26025, 26038), False, 'import os\n'), ((2967, 3052), 'pandas.read_csv', 'pd.read_csv', (['tsne_path'], {'sep': '""","""', 'index_col': '(0)', 'names': "['cell', 'tSNE_1', 'tSNE_2']"}), "(tsne_path, sep=',', index_col=0, names=['cell', 'tSNE_1', 'tSNE_2']\n )\n", (2978, 3052), True, 'import pandas as pd\n'), ((8213, 8247), 'math.ceil', 'math.ceil', (['(counts[clstr] * (M / N))'], {}), '(counts[clstr] * (M / N))\n', (8222, 8247), False, 'import math\n'), ((8930, 8964), 'random.randint', 'random.randint', (['(0)', '(N - num - 1 - 1)'], {}), '(0, N - num - 1 - 1)\n', (8944, 8964), False, 'import random\n'), ((21670, 21769), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '(0)', 'index': '[1, 2, 3]', 'columns': "{'TP', 'TN', 'Plot', 'gene_1', 'gene_2', 'rank'}"}), "(data=0, index=[1, 2, 3], columns={'TP', 'TN', 'Plot', 'gene_1',\n 'gene_2', 'rank'})\n", (21682, 21769), True, 'import pandas as pd\n'), ((25814, 25844), 'os.system', 'os.system', (["('rm -r ' + csv_path)"], {}), "('rm -r ' + csv_path)\n", (25823, 25844), False, 'import os\n'), ((25853, 25874), 'os.makedirs', 'os.makedirs', (['csv_path'], {}), '(csv_path)\n', (25864, 25874), False, 'import os\n'), ((25935, 25965), 'os.system', 'os.system', (["('rm -r ' + vis_path)"], {}), "('rm -r ' + vis_path)\n", (25944, 25965), False, 'import os\n'), ((25974, 25995), 'os.makedirs', 'os.makedirs', (['vis_path'], {}), '(vis_path)\n', (25985, 25995), False, 'import os\n'), ((26059, 26092), 'os.system', 'os.system', (["('rm -r ' + pickle_path)"], {}), "('rm -r ' + pickle_path)\n", (26068, 26092), False, 'import os\n'), ((26101, 26125), 'os.makedirs', 'os.makedirs', (['pickle_path'], {}), '(pickle_path)\n', (26112, 26125), False, 'import os\n'), ((30411, 30431), 'numpy.array', 'np.array', (['cls_helper'], {}), '(cls_helper)\n', (30419, 30431), True, 'import numpy as np\n'), ((9185, 9258), 'pandas.Series', 'pd.Series', (['[cls_ser[init_rand_num]]'], {'index': '[cls_ser.index[init_rand_num]]'}), '([cls_ser[init_rand_num]], index=[cls_ser.index[init_rand_num]])\n', (9194, 9258), True, 'import pandas as pd\n'), ((19069, 19095), 're.compile', 're.compile', (['""".*_negation$"""'], {}), "('.*_negation$')\n", (19079, 19095), False, 'import re\n'), ((19488, 19514), 're.compile', 're.compile', (['""".*_negation$"""'], {}), "('.*_negation$')\n", (19498, 19514), False, 'import re\n'), ((24742, 24936), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hypergeometric marker detection. Finds markers identifying a cluster. Documentation available at https://hgmd.readthedocs.io/en/latest/index.html"""'}), "(description=\n 'Hypergeometric marker detection. Finds markers identifying a cluster. Documentation available at https://hgmd.readthedocs.io/en/latest/index.html'\n )\n", (24765, 24936), False, 'import argparse\n'), ((31666, 31885), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'process', 'args': '(cls, X, L, plot_pages, cls_ser, tsne, marker_exp, gene_file, csv_path,\n vis_path, pickle_path, cluster_number, K, Abbrev, cluster_overall, Trim,\n count_data, skipvis)'}), '(target=process, args=(cls, X, L, plot_pages,\n cls_ser, tsne, marker_exp, gene_file, csv_path, vis_path, pickle_path,\n cluster_number, K, Abbrev, cluster_overall, Trim, count_data, skipvis))\n', (31689, 31885), False, 'import multiprocessing\n'), ((4211, 4258), 'pandas.read_csv', 'pd.read_csv', (['marker_path'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(marker_path, sep='\\t', index_col=0)\n", (4222, 4258), True, 'import pandas as pd\n'), ((4595, 4641), 'pandas.read_csv', 'pd.read_csv', (['marker_path'], {'sep': '""","""', 'index_col': '(0)'}), "(marker_path, sep=',', index_col=0)\n", (4606, 4641), True, 'import pandas as pd\n')] |
import os
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# Load Flask-Related Libraries
from flask import Flask, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
app = Flask(__name__)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
import random
import numpy as np
from audio import read_mfcc
from batcher import sample_from_mfcc
from constants import SAMPLE_RATE, NUM_FRAMES
from conv_models import DeepSpeakerModel
from test import batch_cosine_similarity
# Sample some inputs for WAV/FLAC files for the same speaker.
# To have reproducible results every time you call this function, set the seed every time before calling it.
# np.random.seed(123)
# random.seed(123)
mfcc_001 = sample_from_mfcc(read_mfcc('samples/PhilippeRemy/PhilippeRemy_001.wav', SAMPLE_RATE), NUM_FRAMES)
mfcc_002 = sample_from_mfcc(read_mfcc('samples/PhilippeRemy/PhilippeRemy_002.wav', SAMPLE_RATE), NUM_FRAMES)
# Call the model to get the embeddings of shape (1, 512) for each file.
predict_001 = model.m.predict(np.expand_dims(mfcc_001, axis=0))
predict_002 = model.m.predict(np.expand_dims(mfcc_002, axis=0))
# Do it again with a different speaker.
mfcc_003 = sample_from_mfcc(read_mfcc('samples/1255-90413-0001.flac', SAMPLE_RATE), NUM_FRAMES)
predict_003 = model.m.predict(np.expand_dims(mfcc_003, axis=0))
# Compute the cosine similarity and check that it is higher for the same speaker.
print('SAME SPEAKER', batch_cosine_similarity(predict_001, predict_002)) # SAME SPEAKER [0.81564593]
print('DIFF SPEAKER', batch_cosine_similarity(predict_001, predict_003)) # DIFF SPEAKER [0.1419204]
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
from flask import Flask, request
app = Flask(__name__)
@app.route('/query-example')
def query_example():
return 'Query String Example'
@app.route("/result", methods = ["POST"])
def result():
print(request.files)
data = request.files['audio_data'].read()
print(data)
return 'https://avatars.dicebear.com/api/bottts/:seed.svg'
if __name__ == '__main__':
# Reproducible results.
np.random.seed(123)
random.seed(123)
# Define the model here.
global model
model = DeepSpeakerModel()
# Load the checkpoint. https://drive.google.com/file/d/1F9NvdrarWZNktdX9KlRYWWHDwRkip_aP.
# Also available here: https://share.weiyun.com/V2suEUVh (Chinese users).
model.m.load_weights('ResCNN_triplet_training_checkpoint_265.h5', by_name=True)
# run app in debug mode on port 5000
# app.run(debug=True, port=5000)
app.run(host='0.0.0.0', port=80)
| [
"numpy.random.seed",
"audio.read_mfcc",
"flask.Flask",
"numpy.expand_dims",
"test.batch_cosine_similarity",
"conv_models.DeepSpeakerModel",
"random.seed"
] | [((210, 225), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'from flask import Flask, request\n'), ((1741, 1756), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1746, 1756), False, 'from flask import Flask, request\n'), ((785, 852), 'audio.read_mfcc', 'read_mfcc', (['"""samples/PhilippeRemy/PhilippeRemy_001.wav"""', 'SAMPLE_RATE'], {}), "('samples/PhilippeRemy/PhilippeRemy_001.wav', SAMPLE_RATE)\n", (794, 852), False, 'from audio import read_mfcc\n'), ((894, 961), 'audio.read_mfcc', 'read_mfcc', (['"""samples/PhilippeRemy/PhilippeRemy_002.wav"""', 'SAMPLE_RATE'], {}), "('samples/PhilippeRemy/PhilippeRemy_002.wav', SAMPLE_RATE)\n", (903, 961), False, 'from audio import read_mfcc\n'), ((1078, 1110), 'numpy.expand_dims', 'np.expand_dims', (['mfcc_001'], {'axis': '(0)'}), '(mfcc_001, axis=0)\n', (1092, 1110), True, 'import numpy as np\n'), ((1142, 1174), 'numpy.expand_dims', 'np.expand_dims', (['mfcc_002'], {'axis': '(0)'}), '(mfcc_002, axis=0)\n', (1156, 1174), True, 'import numpy as np\n'), ((1245, 1299), 'audio.read_mfcc', 'read_mfcc', (['"""samples/1255-90413-0001.flac"""', 'SAMPLE_RATE'], {}), "('samples/1255-90413-0001.flac', SAMPLE_RATE)\n", (1254, 1299), False, 'from audio import read_mfcc\n'), ((1343, 1375), 'numpy.expand_dims', 'np.expand_dims', (['mfcc_003'], {'axis': '(0)'}), '(mfcc_003, axis=0)\n', (1357, 1375), True, 'import numpy as np\n'), ((1482, 1531), 'test.batch_cosine_similarity', 'batch_cosine_similarity', (['predict_001', 'predict_002'], {}), '(predict_001, predict_002)\n', (1505, 1531), False, 'from test import batch_cosine_similarity\n'), ((1583, 1632), 'test.batch_cosine_similarity', 'batch_cosine_similarity', (['predict_001', 'predict_003'], {}), '(predict_001, predict_003)\n', (1606, 1632), False, 'from test import batch_cosine_similarity\n'), ((2115, 2134), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2129, 2134), True, 'import numpy as np\n'), ((2139, 2155), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (2150, 2155), False, 'import random\n'), ((2215, 2233), 'conv_models.DeepSpeakerModel', 'DeepSpeakerModel', ([], {}), '()\n', (2231, 2233), False, 'from conv_models import DeepSpeakerModel\n')] |
import io
import json
import random
from collections import defaultdict
import numpy as np
import torch
from itertools import chain, groupby
from tqdm import tqdm
# sys.setdefaultencoding('utf8')
# word_lexicon_size = 29172
# word_lexicon_size = 29336
word_lexicon_size = 29571
# pa_path_lexicon_size = 29313
pa_path_lexicon_size = 29336
pp_path_lexicon_size = 11116
lex_path_max_len = 30 # 5 lemmas, 5 POS, 4 arrows
naive_path_max_len = 10
max_determined_args = 15
def end2end_dataset(data_path, data_rate):
data = open(data_path).readlines()
len_data = len(data)
data = data[:int(len_data * data_rate / 100)]
return [json.loads(line.strip()) for line in tqdm(data, mininterval=5) if line.strip()]
def instance_par_predicate(data):
for [[tss, pss], yss] in data:
for i in range(yss.size(0)):
yield [[torch.stack([tss[i]]), torch.stack([pss[i]])], torch.stack([yss[i]])]
def end2end_single_seq_instance(data, batch_generator):
for sentence in data:
if sentence["pas"]:
instances = list(batch_generator(sentence))
xss, yss = zip(*instances)
yield [list(map(lambda e: torch.stack(e), zip(*xss))), torch.stack(yss)]
# yield [[torch.stack(tss), torch.stack(pss)], torch.stack(yss)]
def end2end_single_seq_instance_with_prev_sentence(data, batch_generator):
for sentence in data:
if sentence["curr"]["pas"]:
instances = list(batch_generator(sentence))
xss, yss = zip(*instances)
yield [list(map(lambda e: torch.stack(e), zip(*xss))), torch.stack(yss)]
# yield [[torch.stack(tss), torch.stack(pss)], torch.stack(yss)]
def to_half_input(instances):
for instance in instances:
[[ws, ps, ts], ys] = instance
if torch.cuda.is_available():
yield [[ws.cuda(), ps.cuda().half(), ts.cuda().half()], ys.cuda()]
else:
yield instance
def e2e_single_seq_sentence_batch_with_multi_predicate(instance):
pas_seq = instance["pas"]
tokens = instance["tokens"]
predicates = list(map(lambda pas: int(pas["p_id"]), pas_seq))
for pas in pas_seq:
p_id = int(pas["p_id"])
ys = torch.LongTensor([int(a) for a in pas["args"]])
ws = torch.LongTensor([int(w) for w in tokens])
ps = torch.Tensor([[1.0] if i in predicates else [0.0] for i in range(len(tokens))])
ts = torch.Tensor([[1.0] if i == p_id else [0.0] for i in range(len(tokens))])
yield [[ws, ps, ts], ys]
def e2e_single_seq_pos_sentence_batch_with_multi_predicate(instance):
pas_seq = instance["pas"]
tokens = instance["tokens"]
pos = instance["pos"]
predicates = list(map(lambda pas: int(pas["p_id"]), pas_seq))
p_types = list(map(lambda pas: int(pas["p_type"]), pas_seq))
for pas in pas_seq:
p_id = int(pas["p_id"])
p_type = int(pas["p_type"])
ys = torch.LongTensor([int(a) for a in pas["args"]])
ws = torch.LongTensor([int(w) for w in tokens])
ss = torch.LongTensor([int(p) for p in pos])
ps = torch.LongTensor([p_types[predicates.index(i)] if i in predicates else 0 for i in range(len(tokens))])
ts = torch.LongTensor([p_type if i == p_id else 0 for i in range(len(tokens))])
yield [[ws, ss, ps, ts], ys]
def e2e_single_seq_sentence_batch_ap(instance):
pas_seq = instance["pas"]
tokens = instance["tokens"]
for pas in pas_seq:
p_id = int(pas["p_id"])
ys = torch.LongTensor([int(a) for a in pas["args"]])
ts = torch.LongTensor([int(t) for t in tokens])
# pathes = torch.Tensor([[1.0] if i == p_id else [0.0] for i in range(len(tokens))])
# pathes = [[[1.0] if i == j else [0.0] for i in range(len(tokens))] for i in range(len(tokens))]
ps = torch.Tensor([[1.0] if i == p_id else [0.0] for i in range(len(tokens))])
yield [[ts, ps], ys]
def e2e_single_seq_sentence_batch(instance):
pas_seq = instance["pas"]
tokens = instance["tokens"]
for pas in pas_seq:
p_id = int(pas["p_id"])
ys = torch.LongTensor([int(a) for a in pas["args"]])
ts = torch.LongTensor([int(t) for t in tokens])
ps = torch.Tensor([[1.0] if i == p_id else [0.0] for i in range(len(tokens))])
yield [[ts, ps], ys]
def e2e_single_seq_pos_sentence_batch(instance):
pas_seq = instance["pas"]
tokens = instance["tokens"]
pos = instance["pos"]
for pas in pas_seq:
p_id = int(pas["p_id"])
ys = torch.LongTensor([int(a) for a in pas["args"]])
ts = torch.LongTensor([int(t) for t in tokens])
ss = torch.LongTensor([int(p) for p in pos])
ps = torch.Tensor([[1.0] if i == p_id else [0.0] for i in range(len(tokens))])
yield [[ts, ss, ps], ys]
def e2e_instance_for_pred(instances, batch_size):
random.shuffle(instances)
instances = sorted(instances, key=lambda instance: len(instance["tokens"]))
batches = []
for (k, g) in groupby(instances, key=lambda instance: len(instance["tokens"])):
batches += e2e_batch_grouped_by_sentence_length_for_pred(list(g), batch_size)
random.shuffle(batches)
return batches
def e2e_batch_grouped_by_sentence_length_for_pred(instances, batch_size):
remaining = 0 if len(instances) % batch_size == 0 else 1
num_batch = len(instances) // batch_size + remaining
# print(len(instances), len(instances) / batch_size, remaining)
for i in range(num_batch):
start = i * batch_size
b = instances[start:start + batch_size]
xs = []
ys = []
for instance in b:
pas_seq = instance["pas"]
tokens = torch.LongTensor([int(t) for t in instance["tokens"]])
pos = torch.LongTensor([int(p) for p in instance["pos"]])
ps = torch.LongTensor([0 for i in range(len(tokens))])
for pas in pas_seq:
p_id = int(pas["p_id"])
p_type = int(pas["p_type"])
ps[p_id] = p_type
xs.append([tokens, pos])
ys.append(ps)
yield [list(map(lambda e: torch.stack(e), zip(*xs))), torch.stack(ys)]
def batch_generator(xys, max_feature, batch_size, train_num_batch, single_batch):
for i in range(train_num_batch):
start = i * batch_size
xs, ys = zip(*xys[start:start + batch_size])
yss = np.array(ys).T
yield single_batch(xs, [yss[0], yss[1], yss[2]], max_feature)
def e2e_sentence_batch_with_prev_sentence(instance):
pas_seq = instance["curr"]["pas"]
prev_tokens = instance["prev"]["tokens"]
tokens = instance["curr"]["tokens"]
predicates = list(map(lambda pas: int(pas["p_id"]), pas_seq))
for pas in pas_seq:
p_id = int(pas["p_id"])
ys = torch.LongTensor([int(a) for a in pas["args"]])
ws = torch.LongTensor([int(w) for w in tokens])
p_ws = torch.LongTensor([int(t) for t in prev_tokens]) if prev_tokens else torch.LongTensor([0])
ps = torch.Tensor([[1.0] if i in predicates else [0.0] for i in range(len(tokens))])
ts = torch.Tensor([[1.0] if i == p_id else [0.0] for i in range(len(tokens))])
yield [[p_ws, ws, ps, ts], ys]
def end2end_single_seq_instance_with_prev_sentences(data):
for sentence in data:
if sentence["curr"]["pas"]:
prev_ss = sentence["prev"]
pas_seq = sentence["curr"]["pas"]
tokens = sentence["curr"]["tokens"]
predicates = list(map(lambda pas: int(pas["p_id"]), pas_seq))
p_wss = []
for prev_s in prev_ss:
prev_tokens = prev_s["tokens"]
p_ws = torch.LongTensor([int(t) for t in prev_tokens]) if prev_tokens else torch.LongTensor([0])
p_wss += [p_ws]
instances = []
for pas in pas_seq:
p_id = int(pas["p_id"])
ys = torch.LongTensor([int(a) for a in pas["args"]])
ws = torch.LongTensor([int(w) for w in tokens])
ps = torch.Tensor([[1.0] if i in predicates else [0.0] for i in range(len(tokens))])
ts = torch.Tensor([[1.0] if i == p_id else [0.0] for i in range(len(tokens))])
instances += [[ws, ps, ts], ys]
xss, yss = zip(*instances)
yield [p_wss, list(map(lambda e: torch.stack(e), zip(*xss))), torch.stack(yss)]
def e2e_single_seq_batch_generator(instances, num_instances, batch_size):
for i in range(num_instances):
start = i * batch_size
xss, yss = zip(*instances[start:start + batch_size])
tss, pss = zip(*xss)
print("tss", torch.stack(tss))
print("pss", pss)
print("yss", yss)
yield [torch.stack(tss), torch.stack(pss)], torch.stack(yss)
def dataset(data_path, path_max_len, parse_line):
print('Read data...', data_path, flush=True)
data, max_feature = read_data(data_path, path_max_len, parse_line)
size = len(data)
print("batch size", size, flush=True)
return data, size, max_feature
def sentence_wise_dataset(data_path, path_max_len, parse_line, data_rate):
print('Read data...', data_path, flush=True)
data, max_feature = predicatewise_sentence_batches(data_path, path_max_len, parse_line, data_rate)
size = len(data)
print("batch size", size, flush=True)
return data, size, int(max_feature)
def read_data_division(division_path):
return [[np.array(train), np.array(test)] for train, test in json.load(open(division_path))]
def prediate_wise_data_2_token_wise_data(data):
return list(chain.from_iterable(chain.from_iterable(data)))
# return data.flatten().flatten()
def read_data(data_path, path_max_len, parse_line):
data = tqdm(open(data_path).readlines(), mininterval=5)
xys = []
max_feature = 0
xys_append = xys.append
for line in data:
if line == "" or line.startswith("SOD") or line.startswith("SOS") or line.startswith("SOP") or \
line.startswith("EOD") or line.startswith("EOS") or line.startswith("EOP"):
continue
else:
x, y, max_feature = parse_line(line, max_feature, path_max_len)
xys_append([x, y])
data.close()
return xys, max_feature + 1
def batch_generator(xys, max_feature, batch_size, train_num_batch, single_batch):
for i in range(train_num_batch):
start = i * batch_size
xs, ys = zip(*xys[start:start + batch_size])
yss = np.array(ys).T
yield single_batch(xs, [yss[0], yss[1], yss[2]], max_feature)
def batch_generator_multiclass(xys, max_feature, batch_size, train_num_batch, single_batch):
for i in range(train_num_batch):
start = i * batch_size
xs, ys = zip(*xys[start:start + batch_size])
yield single_batch(xs, np.array(ys), max_feature)
def create_batch(xys, max_feature, single_batch):
xs, ys = zip(*xys)
yss = np.array(ys).T # ys for each label
return single_batch(xs, yss, max_feature)
def create_predicate_batch_for_local_model(xys, max_feature):
xs, ys = zip(*xys)
yss = np.array(ys).T # ys for each label
return single_batch_path_pa_bin(xs, yss, max_feature)
def create_predicate_batch_for_global_model(xs_global, yss, pred_arg_reprs, max_feature, determined_args, pp_path):
return single_batch_global_papa(xs_global, yss, max_feature, determined_args, pp_path)
def create_predicate_batch(xys, max_feature, determined_args, pp_path):
xs, ys = zip(*xys)
yss = np.array(ys).T # ys for each label
return single_batch_global_path_pa_bin(xs, yss, max_feature, determined_args, pp_path)
def create_batch_multiclass(xys, max_feature, single_batch):
xs, ys = zip(*xys)
return single_batch(xs, np.array(ys), max_feature)
def predicatewise_sentence_batches(data_path, path_max_len, parse_line, data_rate):
# data = tqdm(io.open(data_path, "r", encoding="utf-8").readlines(), mininterval=5)
data = io.open(data_path, "r", encoding="utf-8").readlines()
count_doc = sum(line.startswith("EOD") for line in data)
count_doc = int(count_doc * data_rate / 100)
bs = []
sb = []
pb = []
max_feature = 0
s_id = 0
p_id = 0
doc_count = 0
bs_append = bs.append
sb_append = sb.append
pb_append = pb.append
for line in tqdm(data, mininterval=5):
if line == "" or line.startswith("SOD"):
continue
elif line.startswith("EOD"):
doc_count += 1
if doc_count > count_doc:
break
continue
elif line.startswith("SOP"):
pb = []
pb_append = pb.append
continue
elif line.startswith("EOP"):
if pb:
sb_append(pb)
continue
elif line.startswith("SOS"):
sb = []
sb_append = sb.append
continue
elif line.startswith("EOS"):
if sb:
bs_append(sb)
continue
else:
x, y, max_feature = parse_line(line, max_feature, path_max_len)
pb_append([x, y])
return bs, max_feature + 1
def parse_data_line_roth(line, max_feature, path_max_len):
labels, pred_arg, syn_path, binary_features = line.split('\t')[0:4]
# label_arr = label_vec(labels)
pred_arg = np.array([int(idx) for idx in pred_arg.split(":")])
syn_path = np.array([int(idx) for idx in syn_path.split("|")][:-1:])
syn_path = np.pad(syn_path, (path_max_len - len(syn_path), 0), 'constant', constant_values=0)
fs = np.array([int(idx) for idx in binary_features.split(' ')], dtype='int')
max_feature = max([max(fs), max_feature])
return np.array([pred_arg, syn_path, fs]), label(labels), max_feature
def parse_data_line(line, max_feature, path_max_len):
labels, pred_arg, syn_path, binary_features = line.split('\t')[0:4]
label_arr = label_vec(labels)
pred_arg = [int(idx) for idx in pred_arg.split(":")]
syn_path = np.array([int(idx) for idx in syn_path.split("|")])
syn_path = np.pad(syn_path, (path_max_len - len(syn_path), 0), 'constant', constant_values=0)
fs = np.array([int(idx) for idx in binary_features.split(' ')], dtype='int')
max_feature = max([max(fs), max_feature])
return np.array([pred_arg, syn_path, fs]), label_arr, max_feature
def read_preds_path_data(data_path):
pp_path = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
data = tqdm(open(data_path).readlines(), mininterval=5)
for line in data:
sentence_id, pred1_posi, pred2_posi, syn_path, pred1_idx, pred2_idx = line.strip().split('\t')
syn_path = np.array([int(idx) for idx in syn_path.split("|")])
syn_path = np.pad(syn_path, (lex_path_max_len - len(syn_path), 0), 'constant', constant_values=0)
pp_path[int(sentence_id)][int(pred1_posi)][int(pred2_posi)] = syn_path
return pp_path
def label(labels):
ls = np.array([int(idx) for idx in labels.split(",")], dtype='int')
return ls[0]
def label_vec(labels):
label_arr = np.zeros(4, dtype='int')
ls = np.array([int(idx) for idx in labels.split(",")], dtype='int')
label_arr[ls] = 1
return label_arr
def feature_vec(binary_features, max_feature: int):
fs = np.zeros(max_feature)
fs[binary_features] = 1
return torch.from_numpy(fs).float()
def pretrained_word_vecs(data_path, index_file_name, size_e):
wIdx = {}
lexicon_size = 0
wIdxData = open(data_path + index_file_name)
for line in wIdxData:
w, idx = line.rstrip().split("\t")
idx = int(idx)
wIdx[w] = idx
if lexicon_size < idx:
lexicon_size = idx + 1
wIdxData.close()
vocab_size = 0
# wVecData = open("{0}/word_vec_{1}.txt".format(data_path, size_e))
wVecData = open("{0}/lemma-oov_vec-{1}-jawiki20160901-filtered.txt".format(data_path, size_e))
matrix = np.random.uniform(-0.05, 0.05, (lexicon_size, size_e))
for line in wVecData:
values = line.rstrip().split(" ")
word = values[0]
# print(word)
if word in wIdx:
matrix[wIdx[word]] = np.asarray(values[1:], dtype='float32')
vocab_size += 1
wVecData.close()
matrix[0] = np.zeros(size_e)
print("vocab size: ", vocab_size)
return torch.from_numpy(matrix).float()
def pretrained_word_vecs_tmp(data_path, index_file_name, size_e, lexicon_size):
wIdx = {}
wIdxData = open(data_path + index_file_name)
for line in wIdxData:
w, idx = line.rstrip().split("\t")
wIdx[w] = int(idx)
wIdxData.close()
vocab_size = 0
wVecData = open("{0}/word_vec_{1}.txt".format(data_path, size_e))
matrix = torch.Tensor(lexicon_size, size_e)
for line in wVecData:
values = line.rstrip().split(" ")
word = values[0]
if word in wIdx:
matrix[wIdx[word]].append(map(float, values[1:]))
vocab_size += 1
wVecData.close()
matrix[0] = torch.zeros(size_e)
print("vocab size: ", vocab_size)
return matrix
def single_batch_global_papa(xs_global, pa_repr, yss, max_feature, determined_args, pp_path):
xs = []
xs_append = xs.append
for pred_arg, syn_path, fs, s_id, p1_id in xs_global:
p1, a1 = pred_arg
def determined_args_features(i):
if i < len(determined_args):
p2_id, p2, a2, casemk, pred_no = determined_args[i]
p_id_same = 1 if p1_id == p2_id else 0
a_same = 1 if a1 == a2 else 0
bias = 1
cm = [0, 0, 0]
cm[casemk] = 1
return [pp_path[s_id][p1_id][p2_id], [p1, a1], [p2, a2], cm, [p_id_same, a_same, bias]]
else:
return [np.zeros(lex_path_max_len), [0, 0], [0, 0], [0, 0, 0], [0, 0, 0]]
pp_path_seq, pa1_seq, pa2_seq, cm_seq, papa_bin_seq = \
zip(*[determined_args_features(i) for i in range(max_determined_args)])
xs_append(
[syn_path, pred_arg, feature_vec(fs, max_feature),
np.array(pp_path_seq), np.array(pa1_seq), np.array(pa2_seq), np.array(cm_seq), np.array(papa_bin_seq)])
syn_path, pred_arg, f_vec, pp_path_seq, pa1_seq, pa2_seq, cm_seq, papa_bin_seq = zip(*xs)
return [np.array(syn_path), np.array(pred_arg), np.array(f_vec),
np.array(pp_path_seq), np.array(pa1_seq), np.array(pa2_seq), np.array(cm_seq), np.array(papa_bin_seq)], \
yss
def single_batch_global_path_pa_bin(batch_X, batch_Y, max_feature, determined_args, pp_path):
xs_local = []
xs_global = []
xs_local_append = xs_local.append
xs_global_append = xs_global.append
for pred_arg, syn_path, fs, s_id, p1_id in batch_X:
p1, a1 = pred_arg
xs_local_append([syn_path, pred_arg, feature_vec(fs, max_feature)])
xs_global_append([s_id, p1_id, p1, a1])
syn_path, pred_arg, f_vec = zip(*xs_local)
s_ids, p1_ids, p1s, a1s = zip(*xs_global)
xs_local = [np.array(syn_path), np.array(pred_arg), np.array(f_vec)]
xs_global = [np.array(s_ids), np.array(p1_ids), np.array(p1s), np.array(a1s)]
return xs_local, xs_global, batch_Y
def single_batch_path_pa_bin(batch_X, batch_Y, max_feature):
X = [[torch.from_numpy(syn_path).long(), pred_arg[0], pred_arg[1], feature_vec(fs, max_feature)] for
pred_arg, syn_path, fs in batch_X]
syn_path, pred, arg, f_vec = zip(*X)
return [torch.stack(syn_path), torch.Tensor(pred).long(), torch.Tensor(arg).long(), torch.stack(f_vec)], batch_Y
def single_batch_path_pa(batch_X, batch_Y, max_feature):
X = [[syn_path, pred_arg] for pred_arg, syn_path, fs in batch_X]
syn_path, pred_arg = zip(*X)
return [np.array(syn_path), np.array(pred_arg)], batch_Y
def single_batch_pa_bin(batch_X, batch_Y, max_feature):
X = [[pred_arg, feature_vec(fs, max_feature)] for pred_arg, syn_path, fs in batch_X]
pred_arg, f_vec = zip(*X)
return [np.array(pred_arg), np.array(f_vec)], batch_Y
def single_batch_path_bin(batch_X, batch_Y, max_feature):
X = [[syn_path, feature_vec(fs, max_feature)] for pred_arg, syn_path, fs in batch_X]
syn_path, f_vec = zip(*X)
return [np.array(syn_path), np.array(f_vec)], batch_Y
def single_batch_bin(batch_X, batch_Y, max_feature):
X = [feature_vec(fs, max_feature) for pred_arg, syn_path, fs in batch_X]
return torch.stack(X), batch_Y
def single_batch_path(batch_X, batch_Y, max_feature):
X = [syn_path for pred_arg, syn_path, fs in batch_X]
return np.array(X), batch_Y
def single_batch_pa(batch_X, batch_Y, max_feature):
X = [pred_arg for pred_arg, syn_path, fs in batch_X]
return np.array(X), batch_Y
| [
"numpy.random.uniform",
"tqdm.tqdm",
"torch.stack",
"torch.LongTensor",
"random.shuffle",
"numpy.asarray",
"numpy.zeros",
"collections.defaultdict",
"torch.Tensor",
"torch.cuda.is_available",
"numpy.array",
"io.open",
"torch.zeros",
"itertools.chain.from_iterable",
"torch.from_numpy"
] | [((4858, 4883), 'random.shuffle', 'random.shuffle', (['instances'], {}), '(instances)\n', (4872, 4883), False, 'import random\n'), ((5157, 5180), 'random.shuffle', 'random.shuffle', (['batches'], {}), '(batches)\n', (5171, 5180), False, 'import random\n'), ((12350, 12375), 'tqdm.tqdm', 'tqdm', (['data'], {'mininterval': '(5)'}), '(data, mininterval=5)\n', (12354, 12375), False, 'from tqdm import tqdm\n'), ((15105, 15129), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': '"""int"""'}), "(4, dtype='int')\n", (15113, 15129), True, 'import numpy as np\n'), ((15308, 15329), 'numpy.zeros', 'np.zeros', (['max_feature'], {}), '(max_feature)\n', (15316, 15329), True, 'import numpy as np\n'), ((15951, 16005), 'numpy.random.uniform', 'np.random.uniform', (['(-0.05)', '(0.05)', '(lexicon_size, size_e)'], {}), '(-0.05, 0.05, (lexicon_size, size_e))\n', (15968, 16005), True, 'import numpy as np\n'), ((16286, 16302), 'numpy.zeros', 'np.zeros', (['size_e'], {}), '(size_e)\n', (16294, 16302), True, 'import numpy as np\n'), ((16751, 16785), 'torch.Tensor', 'torch.Tensor', (['lexicon_size', 'size_e'], {}), '(lexicon_size, size_e)\n', (16763, 16785), False, 'import torch\n'), ((17033, 17052), 'torch.zeros', 'torch.zeros', (['size_e'], {}), '(size_e)\n', (17044, 17052), False, 'import torch\n'), ((1796, 1821), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1819, 1821), False, 'import torch\n'), ((10945, 10957), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (10953, 10957), True, 'import numpy as np\n'), ((11125, 11137), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (11133, 11137), True, 'import numpy as np\n'), ((11535, 11547), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (11543, 11547), True, 'import numpy as np\n'), ((11777, 11789), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (11785, 11789), True, 'import numpy as np\n'), ((13731, 13765), 'numpy.array', 'np.array', (['[pred_arg, syn_path, fs]'], {}), '([pred_arg, syn_path, fs])\n', (13739, 13765), True, 'import numpy as np\n'), ((14321, 14355), 'numpy.array', 'np.array', (['[pred_arg, syn_path, fs]'], {}), '([pred_arg, syn_path, fs])\n', (14329, 14355), True, 'import numpy as np\n'), ((19058, 19076), 'numpy.array', 'np.array', (['syn_path'], {}), '(syn_path)\n', (19066, 19076), True, 'import numpy as np\n'), ((19078, 19096), 'numpy.array', 'np.array', (['pred_arg'], {}), '(pred_arg)\n', (19086, 19096), True, 'import numpy as np\n'), ((19098, 19113), 'numpy.array', 'np.array', (['f_vec'], {}), '(f_vec)\n', (19106, 19113), True, 'import numpy as np\n'), ((19132, 19147), 'numpy.array', 'np.array', (['s_ids'], {}), '(s_ids)\n', (19140, 19147), True, 'import numpy as np\n'), ((19149, 19165), 'numpy.array', 'np.array', (['p1_ids'], {}), '(p1_ids)\n', (19157, 19165), True, 'import numpy as np\n'), ((19167, 19180), 'numpy.array', 'np.array', (['p1s'], {}), '(p1s)\n', (19175, 19180), True, 'import numpy as np\n'), ((19182, 19195), 'numpy.array', 'np.array', (['a1s'], {}), '(a1s)\n', (19190, 19195), True, 'import numpy as np\n'), ((20445, 20459), 'torch.stack', 'torch.stack', (['X'], {}), '(X)\n', (20456, 20459), False, 'import torch\n'), ((20593, 20604), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (20601, 20604), True, 'import numpy as np\n'), ((20736, 20747), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (20744, 20747), True, 'import numpy as np\n'), ((678, 703), 'tqdm.tqdm', 'tqdm', (['data'], {'mininterval': '(5)'}), '(data, mininterval=5)\n', (682, 703), False, 'from tqdm import tqdm\n'), ((6396, 6408), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (6404, 6408), True, 'import numpy as np\n'), ((6981, 7002), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (6997, 7002), False, 'import torch\n'), ((8665, 8681), 'torch.stack', 'torch.stack', (['tss'], {}), '(tss)\n', (8676, 8681), False, 'import torch\n'), ((9460, 9475), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (9468, 9475), True, 'import numpy as np\n'), ((9477, 9491), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (9485, 9491), True, 'import numpy as np\n'), ((9630, 9655), 'itertools.chain.from_iterable', 'chain.from_iterable', (['data'], {}), '(data)\n', (9649, 9655), False, 'from itertools import chain, groupby\n'), ((10501, 10513), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (10509, 10513), True, 'import numpy as np\n'), ((11989, 12030), 'io.open', 'io.open', (['data_path', '"""r"""'], {'encoding': '"""utf-8"""'}), "(data_path, 'r', encoding='utf-8')\n", (11996, 12030), False, 'import io\n'), ((15369, 15389), 'torch.from_numpy', 'torch.from_numpy', (['fs'], {}), '(fs)\n', (15385, 15389), False, 'import torch\n'), ((16180, 16219), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (16190, 16219), True, 'import numpy as np\n'), ((16353, 16377), 'torch.from_numpy', 'torch.from_numpy', (['matrix'], {}), '(matrix)\n', (16369, 16377), False, 'import torch\n'), ((18338, 18356), 'numpy.array', 'np.array', (['syn_path'], {}), '(syn_path)\n', (18346, 18356), True, 'import numpy as np\n'), ((18358, 18376), 'numpy.array', 'np.array', (['pred_arg'], {}), '(pred_arg)\n', (18366, 18376), True, 'import numpy as np\n'), ((18378, 18393), 'numpy.array', 'np.array', (['f_vec'], {}), '(f_vec)\n', (18386, 18393), True, 'import numpy as np\n'), ((18407, 18428), 'numpy.array', 'np.array', (['pp_path_seq'], {}), '(pp_path_seq)\n', (18415, 18428), True, 'import numpy as np\n'), ((18430, 18447), 'numpy.array', 'np.array', (['pa1_seq'], {}), '(pa1_seq)\n', (18438, 18447), True, 'import numpy as np\n'), ((18449, 18466), 'numpy.array', 'np.array', (['pa2_seq'], {}), '(pa2_seq)\n', (18457, 18466), True, 'import numpy as np\n'), ((18468, 18484), 'numpy.array', 'np.array', (['cm_seq'], {}), '(cm_seq)\n', (18476, 18484), True, 'import numpy as np\n'), ((18486, 18508), 'numpy.array', 'np.array', (['papa_bin_seq'], {}), '(papa_bin_seq)\n', (18494, 18508), True, 'import numpy as np\n'), ((19503, 19524), 'torch.stack', 'torch.stack', (['syn_path'], {}), '(syn_path)\n', (19514, 19524), False, 'import torch\n'), ((19579, 19597), 'torch.stack', 'torch.stack', (['f_vec'], {}), '(f_vec)\n', (19590, 19597), False, 'import torch\n'), ((19781, 19799), 'numpy.array', 'np.array', (['syn_path'], {}), '(syn_path)\n', (19789, 19799), True, 'import numpy as np\n'), ((19801, 19819), 'numpy.array', 'np.array', (['pred_arg'], {}), '(pred_arg)\n', (19809, 19819), True, 'import numpy as np\n'), ((20019, 20037), 'numpy.array', 'np.array', (['pred_arg'], {}), '(pred_arg)\n', (20027, 20037), True, 'import numpy as np\n'), ((20039, 20054), 'numpy.array', 'np.array', (['f_vec'], {}), '(f_vec)\n', (20047, 20054), True, 'import numpy as np\n'), ((20256, 20274), 'numpy.array', 'np.array', (['syn_path'], {}), '(syn_path)\n', (20264, 20274), True, 'import numpy as np\n'), ((20276, 20291), 'numpy.array', 'np.array', (['f_vec'], {}), '(f_vec)\n', (20284, 20291), True, 'import numpy as np\n'), ((6160, 6175), 'torch.stack', 'torch.stack', (['ys'], {}), '(ys)\n', (6171, 6175), False, 'import torch\n'), ((8787, 8803), 'torch.stack', 'torch.stack', (['yss'], {}), '(yss)\n', (8798, 8803), False, 'import torch\n'), ((10833, 10845), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (10841, 10845), True, 'import numpy as np\n'), ((18126, 18147), 'numpy.array', 'np.array', (['pp_path_seq'], {}), '(pp_path_seq)\n', (18134, 18147), True, 'import numpy as np\n'), ((18149, 18166), 'numpy.array', 'np.array', (['pa1_seq'], {}), '(pa1_seq)\n', (18157, 18166), True, 'import numpy as np\n'), ((18168, 18185), 'numpy.array', 'np.array', (['pa2_seq'], {}), '(pa2_seq)\n', (18176, 18185), True, 'import numpy as np\n'), ((18187, 18203), 'numpy.array', 'np.array', (['cm_seq'], {}), '(cm_seq)\n', (18195, 18203), True, 'import numpy as np\n'), ((18205, 18227), 'numpy.array', 'np.array', (['papa_bin_seq'], {}), '(papa_bin_seq)\n', (18213, 18227), True, 'import numpy as np\n'), ((896, 917), 'torch.stack', 'torch.stack', (['[yss[i]]'], {}), '([yss[i]])\n', (907, 917), False, 'import torch\n'), ((1193, 1209), 'torch.stack', 'torch.stack', (['yss'], {}), '(yss)\n', (1204, 1209), False, 'import torch\n'), ((1589, 1605), 'torch.stack', 'torch.stack', (['yss'], {}), '(yss)\n', (1600, 1605), False, 'import torch\n'), ((7749, 7770), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (7765, 7770), False, 'import torch\n'), ((8394, 8410), 'torch.stack', 'torch.stack', (['yss'], {}), '(yss)\n', (8405, 8410), False, 'import torch\n'), ((8750, 8766), 'torch.stack', 'torch.stack', (['tss'], {}), '(tss)\n', (8761, 8766), False, 'import torch\n'), ((8768, 8784), 'torch.stack', 'torch.stack', (['pss'], {}), '(pss)\n', (8779, 8784), False, 'import torch\n'), ((14473, 14490), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14484, 14490), False, 'from collections import defaultdict\n'), ((17815, 17841), 'numpy.zeros', 'np.zeros', (['lex_path_max_len'], {}), '(lex_path_max_len)\n', (17823, 17841), True, 'import numpy as np\n'), ((19311, 19337), 'torch.from_numpy', 'torch.from_numpy', (['syn_path'], {}), '(syn_path)\n', (19327, 19337), False, 'import torch\n'), ((19526, 19544), 'torch.Tensor', 'torch.Tensor', (['pred'], {}), '(pred)\n', (19538, 19544), False, 'import torch\n'), ((19553, 19570), 'torch.Tensor', 'torch.Tensor', (['arg'], {}), '(arg)\n', (19565, 19570), False, 'import torch\n'), ((849, 870), 'torch.stack', 'torch.stack', (['[tss[i]]'], {}), '([tss[i]])\n', (860, 870), False, 'import torch\n'), ((872, 893), 'torch.stack', 'torch.stack', (['[pss[i]]'], {}), '([pss[i]])\n', (883, 893), False, 'import torch\n'), ((6132, 6146), 'torch.stack', 'torch.stack', (['e'], {}), '(e)\n', (6143, 6146), False, 'import torch\n'), ((1164, 1178), 'torch.stack', 'torch.stack', (['e'], {}), '(e)\n', (1175, 1178), False, 'import torch\n'), ((1560, 1574), 'torch.stack', 'torch.stack', (['e'], {}), '(e)\n', (1571, 1574), False, 'import torch\n'), ((8365, 8379), 'torch.stack', 'torch.stack', (['e'], {}), '(e)\n', (8376, 8379), False, 'import torch\n')] |
from numpy import matmul
x = [
[1, 0, 1, 0], # input 1
[0, 2, 2, 2], # input 2
[1, 1, 1, 1], # input 3
]
W_query = [
[0, 0, 1],
[1, 1, 0],
[0, 1, 0],
[1, 1, 0],
]
Q = [
[0, 1, 1], # Query for input 1
[4, 6, 0], # Query for input 2
[2, 3, 1], # Query for input 3
]
print(matmul(x, W_query))
| [
"numpy.matmul"
] | [((318, 336), 'numpy.matmul', 'matmul', (['x', 'W_query'], {}), '(x, W_query)\n', (324, 336), False, 'from numpy import matmul\n')] |
# Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla as nn
import nnabla.solvers as S
import numpy as np
from solver_test_utils import solver_tester, RefSolver, MixinWeightDecayFused
from nbla_test_utils import list_context
ctxs = list_context('Lamb')
def _f(x):
return np.asarray(x, dtype=np.float32)
class RefLamb(MixinWeightDecayFused, RefSolver):
def __init__(self, eta, beta1, beta2, gamma_l, gamma_u, eps, bias_correction):
super().__init__()
self.eta = _f(eta)
self.beta1 = _f(beta1)
self.beta2 = _f(beta2)
self.gamma_l = _f(gamma_l)
self.gamma_u = _f(gamma_u)
self.eps = _f(eps)
self.bias_correction = bias_correction
self.v = {}
self.t = {}
def _set_state_impl(self, key, param):
self.v[key] = {}
self.v[key]["m"] = np.zeros_like(param)
self.v[key]["v"] = np.zeros_like(param)
self.t[key] = 0
def _update_impl(self, key, w, g):
self.t[key] = min(self.t[key] + 1, np.iinfo(np.int32).max)
t = self.t[key]
weight_decay = self.weight_decay_rate
m = self.v[key]["m"]
v = self.v[key]["v"]
m[...] = self.beta1 * m + (1 - self.beta1) * g
v[...] = self.beta2 * v + (1 - self.beta2) * g * g
corr1 = 1
corr2 = 1
if self.bias_correction:
corr1 = 1 - self.beta1 ** t
corr2 = 1 - self.beta2 ** t
r = (m / corr1) / (np.sqrt(v / corr2) + self.eps)
r = r + weight_decay * w
v_norm = np.linalg.norm(w)
v_norm = np.clip(v_norm, a_min=self.gamma_l, a_max=self.gamma_u)
g_norm = np.linalg.norm(r)
if g_norm > self.eps:
local_lr = v_norm / g_norm
else:
local_lr = _f(1.0)
w[...] -= self.eta * local_lr * r
@pytest.mark.parametrize("ctx, solver_name", ctxs)
@pytest.mark.parametrize("decay", [1e-4])
@pytest.mark.parametrize("eta", [1e-2, 1e-4])
@pytest.mark.parametrize("beta1", [0.9, 0.5])
@pytest.mark.parametrize("beta2", [0.999, 0.5])
@pytest.mark.parametrize("gamma_l", [1e-6, 0.1])
@pytest.mark.parametrize("gamma_u", [10, 100])
@pytest.mark.parametrize("eps", [1e-8])
@pytest.mark.parametrize("bias_correction", [False, True])
@pytest.mark.parametrize("seed", [313])
def test_lamb(seed, eta, beta1, beta2, gamma_l, gamma_u, eps, bias_correction, decay, ctx, solver_name):
rng = np.random.RandomState(seed)
solver_tester(
rng, S.Lamb, RefLamb,
[eta, beta1, beta2, gamma_l, gamma_u, eps, bias_correction], atol=1e-6, decay=decay,
ctx=ctx, solver_name=solver_name)
| [
"numpy.zeros_like",
"solver_test_utils.solver_tester",
"numpy.asarray",
"numpy.iinfo",
"numpy.random.RandomState",
"numpy.clip",
"nbla_test_utils.list_context",
"numpy.linalg.norm",
"pytest.mark.parametrize",
"numpy.sqrt"
] | [((795, 815), 'nbla_test_utils.list_context', 'list_context', (['"""Lamb"""'], {}), "('Lamb')\n", (807, 815), False, 'from nbla_test_utils import list_context\n'), ((2391, 2440), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ctx, solver_name"""', 'ctxs'], {}), "('ctx, solver_name', ctxs)\n", (2414, 2440), False, 'import pytest\n'), ((2442, 2484), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""decay"""', '[0.0001]'], {}), "('decay', [0.0001])\n", (2465, 2484), False, 'import pytest\n'), ((2484, 2530), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""eta"""', '[0.01, 0.0001]'], {}), "('eta', [0.01, 0.0001])\n", (2507, 2530), False, 'import pytest\n'), ((2530, 2574), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""beta1"""', '[0.9, 0.5]'], {}), "('beta1', [0.9, 0.5])\n", (2553, 2574), False, 'import pytest\n'), ((2576, 2622), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""beta2"""', '[0.999, 0.5]'], {}), "('beta2', [0.999, 0.5])\n", (2599, 2622), False, 'import pytest\n'), ((2624, 2672), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gamma_l"""', '[1e-06, 0.1]'], {}), "('gamma_l', [1e-06, 0.1])\n", (2647, 2672), False, 'import pytest\n'), ((2673, 2718), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gamma_u"""', '[10, 100]'], {}), "('gamma_u', [10, 100])\n", (2696, 2718), False, 'import pytest\n'), ((2720, 2759), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""eps"""', '[1e-08]'], {}), "('eps', [1e-08])\n", (2743, 2759), False, 'import pytest\n'), ((2760, 2817), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias_correction"""', '[False, True]'], {}), "('bias_correction', [False, True])\n", (2783, 2817), False, 'import pytest\n'), ((2819, 2857), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', '[313]'], {}), "('seed', [313])\n", (2842, 2857), False, 'import pytest\n'), ((840, 871), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (850, 871), True, 'import numpy as np\n'), ((2973, 3000), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2994, 3000), True, 'import numpy as np\n'), ((3005, 3169), 'solver_test_utils.solver_tester', 'solver_tester', (['rng', 'S.Lamb', 'RefLamb', '[eta, beta1, beta2, gamma_l, gamma_u, eps, bias_correction]'], {'atol': '(1e-06)', 'decay': 'decay', 'ctx': 'ctx', 'solver_name': 'solver_name'}), '(rng, S.Lamb, RefLamb, [eta, beta1, beta2, gamma_l, gamma_u,\n eps, bias_correction], atol=1e-06, decay=decay, ctx=ctx, solver_name=\n solver_name)\n', (3018, 3169), False, 'from solver_test_utils import solver_tester, RefSolver, MixinWeightDecayFused\n'), ((1402, 1422), 'numpy.zeros_like', 'np.zeros_like', (['param'], {}), '(param)\n', (1415, 1422), True, 'import numpy as np\n'), ((1450, 1470), 'numpy.zeros_like', 'np.zeros_like', (['param'], {}), '(param)\n', (1463, 1470), True, 'import numpy as np\n'), ((2106, 2123), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (2120, 2123), True, 'import numpy as np\n'), ((2141, 2196), 'numpy.clip', 'np.clip', (['v_norm'], {'a_min': 'self.gamma_l', 'a_max': 'self.gamma_u'}), '(v_norm, a_min=self.gamma_l, a_max=self.gamma_u)\n', (2148, 2196), True, 'import numpy as np\n'), ((2214, 2231), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (2228, 2231), True, 'import numpy as np\n'), ((1578, 1596), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1586, 1596), True, 'import numpy as np\n'), ((2024, 2042), 'numpy.sqrt', 'np.sqrt', (['(v / corr2)'], {}), '(v / corr2)\n', (2031, 2042), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
def load_mnist(return_test = False, min_val = -1., max_val = 1., shuffle=True, seed = 1234567890):
"""
Loads the MNIST Dataset
"""
from keras.datasets import mnist
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Reshape data to theano order (img_index, channels, height, width)
x_train = x_train.reshape(x_train.shape[0],-1,x_train.shape[-2],x_train.shape[-1]).astype('float32')
x_test = x_test.reshape(x_test.shape[0],-1,x_test.shape[-2],x_test.shape[-1]).astype('float32')
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
if(shuffle):
np.random.seed(seed)
np.random.shuffle(x_train)
np.random.seed(seed)
np.random.shuffle(y_train)
if(return_test):
return x_train, y_train, x_test, y_test
else:
return x_train, y_train
def load_fashion_mnist(return_test = False, min_val = -1., max_val = 1., shuffle=True, seed = 1234567890):
"""
Loads the Fashion-MNIST Dataset
"""
from keras.datasets import fashion_mnist
# Load data
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
# Reshape data to theano order (img_index, channels, height, width)
x_train = x_train.reshape(x_train.shape[0],-1,x_train.shape[-2],x_train.shape[-1]).astype('float32')
x_test = x_test.reshape(x_test.shape[0],-1,x_test.shape[-2],x_test.shape[-1]).astype('float32')
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
if(shuffle):
np.random.seed(seed)
np.random.shuffle(x_train)
np.random.seed(seed)
np.random.shuffle(y_train)
if(return_test):
return x_train, y_train, x_test, y_test
else:
return x_train, y_train
def load_cifar10(return_test = False, min_val = -1., max_val = 1., shuffle=True, seed = 1234567890):
"""
Loads the Cifar10 Dataset
"""
from keras.datasets import cifar10
# Load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Reshape data to theano order (img_index, channels, height, width)
x_train = x_train.reshape(x_train.shape[0],-1,x_train.shape[-2],x_train.shape[-1]).astype('float32')
x_test = x_test.reshape(x_test.shape[0],-1,x_test.shape[-2],x_test.shape[-1]).astype('float32')
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
if(shuffle):
np.random.seed(seed)
np.random.shuffle(x_train)
np.random.seed(seed)
np.random.shuffle(y_train)
if(return_test):
return x_train, y_train, x_test, y_test
else:
return x_train, y_train
def load_cifar100(return_test = False, min_val = -1., max_val = 1., shuffle=True, seed = 1234567890):
"""
Loads the Cifar100 Dataset
"""
from keras.datasets import cifar100
# Load data
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
# Reshape data to theano order (img_index, channels, height, width)
x_train = x_train.reshape(x_train.shape[0],-1,x_train.shape[-2],x_train.shape[-1]).astype('float32')
x_test = x_test.reshape(x_test.shape[0],-1,x_test.shape[-2],x_test.shape[-1]).astype('float32')
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
if(shuffle):
np.random.seed(seed)
np.random.shuffle(x_train)
np.random.seed(seed)
np.random.shuffle(y_train)
if(return_test):
return x_train, y_train, x_test, y_test
else:
return x_train, y_train
def load_LSUN(return_test = False):
"""
Loads the LSUN Dataset
"""
raise NotImplementedError
def create_pairs(x, digit_indices, num_classes):
"""
Based on the "create_pairs" function from:
https://github.com/keras-team/keras/blob/master/examples/mnist_siamese.py
"""
pairs = []
labels = []
min_class_length = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for class_n in range(num_classes):
for i in range(min_class_length):
# Generate intra-class pair
z1, z2 = digit_indices[class_n][i], digit_indices[class_n][i + 1]
pairs += [[x[z1], x[z2]]]
# Generate inter-class pair
class_inc = np.random.randint(1, num_classes)
dn = (class_n + class_inc) % num_classes
z1, z2 = digit_indices[class_n][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
DATASET = {
"mnist": load_mnist,
"fashion_mnist": load_fashion_mnist,
"cifar10": load_cifar10,
"cifar100": load_cifar100,
"LSUN" : load_LSUN,
} | [
"numpy.random.seed",
"keras.datasets.cifar10.load_data",
"keras.datasets.mnist.load_data",
"keras.datasets.cifar100.load_data",
"numpy.random.randint",
"numpy.array",
"numpy.random.shuffle",
"keras.datasets.fashion_mnist.load_data"
] | [((307, 324), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (322, 324), False, 'from keras.datasets import mnist\n'), ((1322, 1347), 'keras.datasets.fashion_mnist.load_data', 'fashion_mnist.load_data', ([], {}), '()\n', (1345, 1347), False, 'from keras.datasets import fashion_mnist\n'), ((2331, 2350), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2348, 2350), False, 'from keras.datasets import cifar10\n'), ((3327, 3347), 'keras.datasets.cifar100.load_data', 'cifar100.load_data', ([], {}), '()\n', (3345, 3347), False, 'from keras.datasets import cifar100\n'), ((816, 836), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (830, 836), True, 'import numpy as np\n'), ((845, 871), 'numpy.random.shuffle', 'np.random.shuffle', (['x_train'], {}), '(x_train)\n', (862, 871), True, 'import numpy as np\n'), ((880, 900), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (894, 900), True, 'import numpy as np\n'), ((909, 935), 'numpy.random.shuffle', 'np.random.shuffle', (['y_train'], {}), '(y_train)\n', (926, 935), True, 'import numpy as np\n'), ((1835, 1855), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1849, 1855), True, 'import numpy as np\n'), ((1864, 1890), 'numpy.random.shuffle', 'np.random.shuffle', (['x_train'], {}), '(x_train)\n', (1881, 1890), True, 'import numpy as np\n'), ((1899, 1919), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1913, 1919), True, 'import numpy as np\n'), ((1928, 1954), 'numpy.random.shuffle', 'np.random.shuffle', (['y_train'], {}), '(y_train)\n', (1945, 1954), True, 'import numpy as np\n'), ((2837, 2857), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2851, 2857), True, 'import numpy as np\n'), ((2866, 2892), 'numpy.random.shuffle', 'np.random.shuffle', (['x_train'], {}), '(x_train)\n', (2883, 2892), True, 'import numpy as np\n'), ((2901, 2921), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2915, 2921), True, 'import numpy as np\n'), ((2930, 2956), 'numpy.random.shuffle', 'np.random.shuffle', (['y_train'], {}), '(y_train)\n', (2947, 2956), True, 'import numpy as np\n'), ((3837, 3857), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3851, 3857), True, 'import numpy as np\n'), ((3866, 3892), 'numpy.random.shuffle', 'np.random.shuffle', (['x_train'], {}), '(x_train)\n', (3883, 3892), True, 'import numpy as np\n'), ((3901, 3921), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3915, 3921), True, 'import numpy as np\n'), ((3930, 3956), 'numpy.random.shuffle', 'np.random.shuffle', (['y_train'], {}), '(y_train)\n', (3947, 3956), True, 'import numpy as np\n'), ((5086, 5101), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (5094, 5101), True, 'import numpy as np\n'), ((5103, 5119), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5111, 5119), True, 'import numpy as np\n'), ((4826, 4859), 'numpy.random.randint', 'np.random.randint', (['(1)', 'num_classes'], {}), '(1, num_classes)\n', (4843, 4859), True, 'import numpy as np\n')] |
#!/home/arthurb/bin/anaconda/bin/python
# python ax1.py params_file FILEIN FILEOUT
# python ax1.py params_file FILEIN FILEOUT START STOP
# python ax1.py FS NFFT NW K PVAL FILEIN FILEOUT
# python ax1.py FS NFFT NW K PVAL FILEIN FILEOUT START STOP
#
# analyze a set of time series with multi-taper spectral analysis and
# create a sparse matrix of just the time-frequency pixels whose F-test
# passes PVAL.
#
# typical usage consists of one or more input files being analyzed by one
# or more parameter sets. for example, four microphone recordings of the
# same vocalizing mouse analyzed with three different NFFTs and the same
# NW, K, and PVAL. <filename>.ch[1-4] yield <filename>-[1-3].ax
#
# FS: sampling rate in Hertz
# NFFT: FFT window size in seconds, rounds up to the next power of 2 tics
# NW: multi-taper time-bandwidth product
# K: number of tapers
# PVAL: F-test p-val threshold
# FILEIN: the base filename and path of [0-9].wav files with a single channel each,
# or .ch[0-9] files containing float32s
# FILEOUT: an integer to append to FILEIN to differentiate parameter sets used
# START,STOP: optional time range, in seconds
#
# output is a binary file with a time x frequency x amplitude x channel
# array of hot pixels
#
# python ax1.py 'ultrasonic_params.txt' 'urine' '1'
# python ax1.py 200e3 0.001 15 29 0.01 'urine' '1'
# python ax1.py 450450 0.001 15 29 0.01 0 30 'groundtruth' '1'
# /home/arthurb/bin/anaconda/bin/kernprof.py -l -v ax1.py 450450 0.00025 22 43 0.01 /groups/egnor/egnorlab/ben/Test_D_1 7 0 4
from ax1b import do_it, nextpow2
import struct
import time
import numpy as np
import glob
import sys
import os
from multiprocessing import Pool, cpu_count
#import pdb
import math
from scipy import stats
import pyfftw
from dpss import dpss
import wave
if __name__ == "__main__":
if (len(sys.argv)!=4) and (len(sys.argv)!=6) and (len(sys.argv)!=8) and (len(sys.argv)!=10):
print('invalid args')
exit
tstart=time.time()
if (len(sys.argv)<8):
execfile(sys.argv[1])
FILEIN=sys.argv[2]
FILEOUT=sys.argv[3]
else:
FS=sys.argv[1]
NFFT=sys.argv[2]
NW=sys.argv[3]
K=sys.argv[4]
PVAL=sys.argv[5]
FILEIN=sys.argv[6]
FILEOUT=sys.argv[7]
if ((len(sys.argv)==6) or (len(sys.argv)==10)):
START=sys.argv[-2]
STOP=sys.argv[-1]
if (isinstance(FS,str)):
FS = int(FS)
if (isinstance(NFFT,str)):
NFFT = float(NFFT)
if (isinstance(NW,str)):
NW = int(NW)
if (isinstance(K,str)):
K = int(K)
if (isinstance(PVAL,str)):
PVAL = float(PVAL)
if ((len(sys.argv)==6) or (len(sys.argv)==10)):
if (isinstance(START,str)):
START = float(START)
if (isinstance(STOP,str)):
STOP = float(STOP)
VERSION=1
SUBSAMPLE=1
NWORKERS=cpu_count()
FS=int(FS/SUBSAMPLE);
NFFT=int(nextpow2(NFFT*FS)) # convert to ticks
NWINDOWS_PER_WORKER=int(12*256*1000/NFFT) # NFFT/2 ticks
FIRST_MT=float('nan')
LAST_MT=float('nan')
FRACTION_MT=float('nan')
tapers,eig = dpss(NFFT, NW, K)
tapers = np.array(tapers, dtype=np.float32)
#tapers = tapers * np.sqrt(FS)
f=np.array(range(0,NFFT//2+1))*FS/NFFT
df=f[1]-f[0];
DIROUT=os.path.dirname(FILEIN);
FILEINs=sorted(glob.glob(FILEIN+'.ch*'));
FILE_TYPE=1
if (len(FILEINs)==0):
FILEINs=sorted(glob.glob(FILEIN+'*.wav'));
FILE_TYPE=2
if (len(FILEINs)==0):
print(["can't find any .wav or .ch files with basename '"+FILEIN]);
exit
NCHANNELS=len(FILEINs);
REMAP=list();
for i in range(0,NCHANNELS):
filei=os.path.join(DIROUT,FILEINs[i])
if FILE_TYPE==1:
try:
fid=open(filei,'rb')
except:
print(["can't open file '"+filei+"'"])
exit
fid.seek(0,2);
FILE_LEN=fid.tell()/4/FS;
fid.close()
REMAP.append(FILEINs[i][-1]);
if FILE_TYPE==2:
try:
fid=wave.open(filei,'rb')
except:
print(["can't open file '"+filei+"'"])
exit
FILE_LEN=fid.getnframes()/FS
fid.close();
REMAP.append(FILEINs[i][-5]);
if 'START' not in locals():
tmp=FILE_LEN*FS/(NFFT//2)-1
print('Processing {:.3g} min = {:.3g} windows = {:3g} chunks of data in {:s}'.format(FILE_LEN/60, tmp, tmp/NWINDOWS_PER_WORKER, FILEINs[i]));
t_offset_tic=0;
t_now_sec=0;
else:
tmp=(STOP-START)*FS/(NFFT//2)-1
print('Processing {:.3g} min = {:.3g} windows = {:3g} chunks of data in {:s}'.format((STOP-START)/60, tmp, tmp/NWINDOWS_PER_WORKER, FILEINs[i]));
t_offset_tic=round(START*FS);
t_now_sec=START;
fid_out=open(FILEIN+'-'+FILEOUT+'.ax','wb')
# L=8 bytes on 64-bit systems
fid_out.write(struct.pack('B',VERSION))
fid_out.write(struct.pack('B',SUBSAMPLE))
fid_out.write(struct.pack('B',0))
fid_out.write(struct.pack('I',FS))
fid_out.write(struct.pack('I',NFFT))
fid_out.write(struct.pack('H',NW))
fid_out.write(struct.pack('H',K))
fid_out.write(struct.pack('d',PVAL))
fid_out.write(struct.pack('d',df))
t_now=0
tloop=time.time()
pool=Pool()
while ((t_now_sec<FILE_LEN) and (('STOP' not in locals()) or (t_now_sec<STOP))):
if ((time.time()-tloop)>10):
tmp=t_now_sec
tmp2=0
if 'START' in locals():
tmp=tmp-START
tmp2=START
if 'STOP' in locals():
tmp=tmp/(STOP-tmp2)
else:
tmp=tmp/(FILE_LEN-tmp2)
print('{:d} sec processed; {:d}% done'.format(int(round(t_now_sec-tmp2)),int(round(100*tmp))))
tloop=time.time()
#idx=map(do_it, \
idx=pool.map(do_it, \
[(DIROUT, FILEINs, t_now, NW,K,PVAL,FS,NFFT, NWINDOWS_PER_WORKER, tapers, x, t_offset_tic, FILE_TYPE, round(FILE_LEN*FS)) for x in range(0,NWORKERS)])
for i in idx:
for j in i:
fid_out.write(struct.pack('dddd', \
float(t_now)+j[0], j[1], j[2], float(REMAP[j[3]])))
t_now_sec = t_now_sec+float(NFFT//2)/FS*NWORKERS*NWINDOWS_PER_WORKER
t_now = t_now+NWORKERS*NWINDOWS_PER_WORKER
fid_out.write('Z'.encode('ascii'))
fid_out.close()
tstop = time.time() - tstart
print('Run time was {:.3g} minutes.'.format(tstop/60))
pool.close()
| [
"dpss.dpss",
"wave.open",
"os.path.dirname",
"struct.pack",
"time.time",
"numpy.array",
"multiprocessing.Pool",
"glob.glob",
"os.path.join",
"ax1b.nextpow2",
"multiprocessing.cpu_count"
] | [((1964, 1975), 'time.time', 'time.time', ([], {}), '()\n', (1973, 1975), False, 'import time\n'), ((2760, 2771), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2769, 2771), False, 'from multiprocessing import Pool, cpu_count\n'), ((3000, 3017), 'dpss.dpss', 'dpss', (['NFFT', 'NW', 'K'], {}), '(NFFT, NW, K)\n', (3004, 3017), False, 'from dpss import dpss\n'), ((3029, 3063), 'numpy.array', 'np.array', (['tapers'], {'dtype': 'np.float32'}), '(tapers, dtype=np.float32)\n', (3037, 3063), True, 'import numpy as np\n'), ((3165, 3188), 'os.path.dirname', 'os.path.dirname', (['FILEIN'], {}), '(FILEIN)\n', (3180, 3188), False, 'import os\n'), ((4993, 5004), 'time.time', 'time.time', ([], {}), '()\n', (5002, 5004), False, 'import time\n'), ((5012, 5018), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (5016, 5018), False, 'from multiprocessing import Pool, cpu_count\n'), ((2809, 2828), 'ax1b.nextpow2', 'nextpow2', (['(NFFT * FS)'], {}), '(NFFT * FS)\n', (2817, 2828), False, 'from ax1b import do_it, nextpow2\n'), ((3207, 3233), 'glob.glob', 'glob.glob', (["(FILEIN + '.ch*')"], {}), "(FILEIN + '.ch*')\n", (3216, 3233), False, 'import glob\n'), ((3530, 3562), 'os.path.join', 'os.path.join', (['DIROUT', 'FILEINs[i]'], {}), '(DIROUT, FILEINs[i])\n', (3542, 3562), False, 'import os\n'), ((4643, 4668), 'struct.pack', 'struct.pack', (['"""B"""', 'VERSION'], {}), "('B', VERSION)\n", (4654, 4668), False, 'import struct\n'), ((4685, 4712), 'struct.pack', 'struct.pack', (['"""B"""', 'SUBSAMPLE'], {}), "('B', SUBSAMPLE)\n", (4696, 4712), False, 'import struct\n'), ((4729, 4748), 'struct.pack', 'struct.pack', (['"""B"""', '(0)'], {}), "('B', 0)\n", (4740, 4748), False, 'import struct\n'), ((4765, 4785), 'struct.pack', 'struct.pack', (['"""I"""', 'FS'], {}), "('I', FS)\n", (4776, 4785), False, 'import struct\n'), ((4802, 4824), 'struct.pack', 'struct.pack', (['"""I"""', 'NFFT'], {}), "('I', NFFT)\n", (4813, 4824), False, 'import struct\n'), ((4841, 4861), 'struct.pack', 'struct.pack', (['"""H"""', 'NW'], {}), "('H', NW)\n", (4852, 4861), False, 'import struct\n'), ((4878, 4897), 'struct.pack', 'struct.pack', (['"""H"""', 'K'], {}), "('H', K)\n", (4889, 4897), False, 'import struct\n'), ((4914, 4936), 'struct.pack', 'struct.pack', (['"""d"""', 'PVAL'], {}), "('d', PVAL)\n", (4925, 4936), False, 'import struct\n'), ((4953, 4973), 'struct.pack', 'struct.pack', (['"""d"""', 'df'], {}), "('d', df)\n", (4964, 4973), False, 'import struct\n'), ((6008, 6019), 'time.time', 'time.time', ([], {}), '()\n', (6017, 6019), False, 'import time\n'), ((3291, 3318), 'glob.glob', 'glob.glob', (["(FILEIN + '*.wav')"], {}), "(FILEIN + '*.wav')\n", (3300, 3318), False, 'import glob\n'), ((5455, 5466), 'time.time', 'time.time', ([], {}), '()\n', (5464, 5466), False, 'import time\n'), ((3848, 3870), 'wave.open', 'wave.open', (['filei', '"""rb"""'], {}), "(filei, 'rb')\n", (3857, 3870), False, 'import wave\n'), ((5112, 5123), 'time.time', 'time.time', ([], {}), '()\n', (5121, 5123), False, 'import time\n')] |
import simulation
import numpy as np
import datetime
import time as timer
"""
Description: Given an input time, determine the largest distance the car can travel in that time.
[time -> distance]
Note: this example assumes constant speed throughout
"""
start = timer.perf_counter()
# ----- Simulation input -----
simulation_duration = 60 * 60 * 9
tick = 1
speed_increment = 1
# ----- Simulation constants -----
incident_sunlight = 1000
initial_battery_charge = 0.9
lvs_power_loss = 0
max_speed = 104
# ----- Component initialisation -----
basic_array = simulation.BasicArray(incident_sunlight)
basic_array.set_produced_energy(0)
basic_battery = simulation.BasicBattery(initial_battery_charge)
basic_lvs = simulation.BasicLVS(lvs_power_loss * tick)
basic_motor = simulation.BasicMotor()
# ----- Array initialisation -----
time = np.arange(0, simulation_duration + tick, tick, dtype='f4')
# stores the speeds that are considered in the simulation
speeds_simulated = np.arange(1, max_speed + speed_increment, speed_increment, dtype='f4')
# creates a 2D array of simulated speeds at each time step
speed_kmh = np.meshgrid(time, speeds_simulated)[-1]
# used to calculate car's time in motion
tick_array = np.full_like(speed_kmh, fill_value=tick, dtype='f4')
tick_array[:, 0] = 0
# ----- Energy calculations -----
basic_array.update(tick)
basic_lvs.update(tick)
lvs_consumed_energy = basic_lvs.get_consumed_energy()
basic_motor.calculate_power_in(speed_kmh)
basic_motor.update(tick)
motor_consumed_energy = basic_motor.get_consumed_energy()
produced_energy = basic_array.get_produced_energy()
consumed_energy = motor_consumed_energy + lvs_consumed_energy
# array that stores the energy transferred from/to battery for each simulated speed and each time step
delta_energy = produced_energy - consumed_energy
# ----- Array calculations -----
cumulative_delta_energy = np.cumsum(delta_energy, axis=1)
battery_variables_array = basic_battery.update_array(cumulative_delta_energy)
state_of_charge = battery_variables_array[0]
state_of_charge[np.abs(state_of_charge) < 1e-03] = 0
# when the battery SOC is empty, the car doesn't move
speed_kmh = np.logical_and(speed_kmh, state_of_charge) * speed_kmh
time_in_motion = np.logical_and(tick_array, state_of_charge) * tick
time_taken = np.sum(time_in_motion, axis=1)
# stores final SOC for each simulated speed
final_soc = state_of_charge[:, -1] * 100
distance = speed_kmh * (time_in_motion / 3600)
# array storing the total distance travelled at each speed
distance_travelled = np.sum(distance, axis=1)
# ----- Simulation output -----
max_distance = np.amax(distance_travelled)
max_distance_index = np.argmax(distance_travelled)
# speed that produces the maximum distance
max_distance_speed = speeds_simulated[max_distance_index]
# time taken to travel the maximum distance
max_distance_time = time_taken[max_distance_index]
max_distance_time = str(datetime.timedelta(seconds=int(max_distance_time)))
# final battery SOC when max distance is travelled
max_distance_final_soc = final_soc[max_distance_index]
print(f"Simulation complete!\n\n"
f"Time taken: {max_distance_time}\n"
f"Maximum distance traversable: {max_distance:.2f}km\n"
f"Speed: {max_distance_speed}km/h\n"
f"Final battery SOC: {max_distance_final_soc:.2f}%\n")
stop = timer.perf_counter()
print(f"Calculation time: {stop - start:.3f}s")
| [
"simulation.BasicLVS",
"numpy.full_like",
"numpy.sum",
"numpy.meshgrid",
"numpy.logical_and",
"numpy.argmax",
"numpy.abs",
"time.perf_counter",
"numpy.amax",
"numpy.cumsum",
"simulation.BasicArray",
"numpy.arange",
"simulation.BasicBattery",
"simulation.BasicMotor"
] | [((264, 284), 'time.perf_counter', 'timer.perf_counter', ([], {}), '()\n', (282, 284), True, 'import time as timer\n'), ((562, 602), 'simulation.BasicArray', 'simulation.BasicArray', (['incident_sunlight'], {}), '(incident_sunlight)\n', (583, 602), False, 'import simulation\n'), ((655, 702), 'simulation.BasicBattery', 'simulation.BasicBattery', (['initial_battery_charge'], {}), '(initial_battery_charge)\n', (678, 702), False, 'import simulation\n'), ((716, 758), 'simulation.BasicLVS', 'simulation.BasicLVS', (['(lvs_power_loss * tick)'], {}), '(lvs_power_loss * tick)\n', (735, 758), False, 'import simulation\n'), ((774, 797), 'simulation.BasicMotor', 'simulation.BasicMotor', ([], {}), '()\n', (795, 797), False, 'import simulation\n'), ((842, 900), 'numpy.arange', 'np.arange', (['(0)', '(simulation_duration + tick)', 'tick'], {'dtype': '"""f4"""'}), "(0, simulation_duration + tick, tick, dtype='f4')\n", (851, 900), True, 'import numpy as np\n'), ((979, 1049), 'numpy.arange', 'np.arange', (['(1)', '(max_speed + speed_increment)', 'speed_increment'], {'dtype': '"""f4"""'}), "(1, max_speed + speed_increment, speed_increment, dtype='f4')\n", (988, 1049), True, 'import numpy as np\n'), ((1217, 1269), 'numpy.full_like', 'np.full_like', (['speed_kmh'], {'fill_value': 'tick', 'dtype': '"""f4"""'}), "(speed_kmh, fill_value=tick, dtype='f4')\n", (1229, 1269), True, 'import numpy as np\n'), ((1885, 1916), 'numpy.cumsum', 'np.cumsum', (['delta_energy'], {'axis': '(1)'}), '(delta_energy, axis=1)\n', (1894, 1916), True, 'import numpy as np\n'), ((2300, 2330), 'numpy.sum', 'np.sum', (['time_in_motion'], {'axis': '(1)'}), '(time_in_motion, axis=1)\n', (2306, 2330), True, 'import numpy as np\n'), ((2546, 2570), 'numpy.sum', 'np.sum', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (2552, 2570), True, 'import numpy as np\n'), ((2620, 2647), 'numpy.amax', 'np.amax', (['distance_travelled'], {}), '(distance_travelled)\n', (2627, 2647), True, 'import numpy as np\n'), ((2669, 2698), 'numpy.argmax', 'np.argmax', (['distance_travelled'], {}), '(distance_travelled)\n', (2678, 2698), True, 'import numpy as np\n'), ((3332, 3352), 'time.perf_counter', 'timer.perf_counter', ([], {}), '()\n', (3350, 3352), True, 'import time as timer\n'), ((1122, 1157), 'numpy.meshgrid', 'np.meshgrid', (['time', 'speeds_simulated'], {}), '(time, speeds_simulated)\n', (1133, 1157), True, 'import numpy as np\n'), ((2162, 2204), 'numpy.logical_and', 'np.logical_and', (['speed_kmh', 'state_of_charge'], {}), '(speed_kmh, state_of_charge)\n', (2176, 2204), True, 'import numpy as np\n'), ((2235, 2278), 'numpy.logical_and', 'np.logical_and', (['tick_array', 'state_of_charge'], {}), '(tick_array, state_of_charge)\n', (2249, 2278), True, 'import numpy as np\n'), ((2058, 2081), 'numpy.abs', 'np.abs', (['state_of_charge'], {}), '(state_of_charge)\n', (2064, 2081), True, 'import numpy as np\n')] |
from copy import deepcopy
import numpy as np
try:
import rospy
import actionlib
from franka_msgs.msg import FrankaState
from ros_panda_controller.msg import RobotModel
from franka_gripper.msg import GraspAction, GraspGoal, GraspEpsilon, \
HomingAction, HomingGoal, MoveAction, MoveGoal
from sensor_msgs.msg import JointState
ready_for_real_world = True
missing_module = None
except ImportError as e:
missing_module = e.name
ready_for_real_world = False
class Gripper:
def __init__(self, homing=True):
# check if everything is ready for real-world training
if not ready_for_real_world:
raise ImportError("You can not run any real-world training. The following package is missed:\n %s" %
missing_module)
# create the gripper action clients
self.homing_action_client = actionlib.SimpleActionClient("/franka_gripper/homing", HomingAction)
self.move_action_client = actionlib.SimpleActionClient("/franka_gripper/move", MoveAction)
self.grasp_action_client = actionlib.SimpleActionClient("/franka_gripper/grasp", GraspAction)
# wait for the interfaces
rospy.loginfo("Waiting for gripper action servers ...")
self.homing_action_client.wait_for_server()
self.move_action_client.wait_for_server()
self.grasp_action_client.wait_for_server()
rospy.loginfo("Found gripper action servers!")
# setup a subscriber for information about the gripper fingers
self._joint_positions = dict()
self._joint_names = ('panda_finger_joint1', 'panda_finger_joint2')
self._joint_velocity = dict()
self._joint_effort = dict()
self._joint_states_state_sub = rospy.Subscriber('/franka_gripper/joint_states', JointState,
self._joint_states_callback, queue_size=1, tcp_nodelay=True)
# gripper_pose
self._gripper_position = None
self._gripper_orientation = None
self._gripper_pose = None
# gripper grasp indicator
self._last_action = None
self._grasped_obj = False
self.gripper_open = True if homing else None
# lets make a homing action first
if homing:
print("Homing. Please wait ...")
self.homing()
print("Finished Homing. The Gripper is ready to use!")
# setup some default values
self.def_force = 10 # 10 NM
self.max_force = 30 # maximum allowed force -> Note that the gripper actually supports more!
self.def_speed = 0.1
self.max_speed = 0.2
self.def_epsilon = 0.005 # 0.005 m
# -------------- Callbacks --------------
def _joint_states_callback(self, msg):
"""
Callback for gripper finger states.
Parameters
----------
msg: JointState
Message containing information about the gripper fingers.
"""
for idx, name in enumerate(msg.name):
if name in self._joint_names:
self._joint_positions[name] = msg.position[idx]
self._joint_velocity[name] = msg.velocity[idx]
self._joint_effort[name] = msg.effort[idx]
# -------------- Getters --------------
def get_joint_positions(self):
"""
Returns the gripper finger positions.
Returns
-------
np.array
2-dim array containing the positions of the gripper fingers.
"""
return deepcopy(np.fromiter(self._joint_positions.values(), dtype=float))
def get_joint_velocities(self):
"""
Returns the gripper finger velocities.
Returns
-------
np.array
2-dim array containing the velocities of the gripper fingers.
"""
return deepcopy(np.fromiter(self._joint_velocity.values(), dtype=float))
def get_gripper_position(self):
"""
Returns the end-effector (gripper) position.
Returns
-------
np.array
3-dim array containing the position of the end-effector with respect to the Panda robot base.
"""
return deepcopy(self._gripper_position)
def get_gripper_orientation(self):
"""
Returns the end-effector (gripper) orientation. Currently not supported!
Returns
-------
np.array
4-dim array containing the orientation in quaternions (qx,qy,qz,qw) of the end-effector
with respect to the Panda robot base.
"""
return deepcopy(self._gripper_orientation)
def get_gripper_pose(self):
"""
Returns the pose of the end-effector (gripper). Currently not supported
Returns
-------
np.array
7-dim array (x,y,z,qx,qy,qz,qw) containing the pose of the end-effector.
"""
return deepcopy(np.concatenate([self._gripper_position + self._gripper_orientation]))
def check_if_object_grasped(self):
"""
Checks whether the object has been grasped successfully or not.
Returns
-------
bool
True, if object has been grasped successfully.
"""
if self._last_action is None:
raise ValueError("You can not check for grasped object, before making an open or grasp action!")
elif self._last_action == "open":
return False
elif self._last_action == "grasp":
# check the result
result = self.grasp_action_client.get_result()
if result is None:
self._grasped_obj = False
return self._grasped_obj
elif result.success:
self._grasped_obj = True
return self._grasped_obj
else:
self._grasped_obj = False
return self._grasped_obj
# -------------- Setters --------------
def set_gripper_position(self, position):
"""
Set the internal end-effector (gripper) position. This should only be called from the robot class to set
the position received from the callback.
Parameters
----------
position : np.array
3-dim (x,y,z) array containing the position of the end-effector.
"""
self._gripper_position = position
def set_gripper_orientation(self, orientation):
"""
Set the internal end-effector (gripper) orientation. This should only be called from the robot class to set
the orientation received from the callback.
Parameters
----------
orientation : np.array
4-dim (qx,qy,qz,qw) array containing the orientation of the end-effector ion quaternions.
"""
self._gripper_orientation = orientation
# -------------- Actions --------------
def grasp(self, width, speed=None, force=None, epsilon=None, wait=False):
"""
Grasps an object with a given width, speed and force. The operation is successful if the distance d between
the gripper fingers is: width−epsilon_inner < d < width+epsilon_outer.
Parameters
----------
width : float
Width of the object to grasp.
speed : float
Speed of the gripper fingers while grasping.
force : float
Force of the gripper fingers while grasping.
epsilon : float
Threshold used to indicate whether the grasp was successful or not.
wait : bool
If true, it is waited until the grasp has been completed
"""
# check what values are provided and fallback to defaults if needed
if not speed:
speed = self.def_speed
if speed > self.max_speed:
print("Speed is too high. Reducing to %f." % self.max_speed)
speed = self.max_speed
if not force:
force = self.def_force
if force > self.max_force:
print("Force is too high. Reducing to %f." % self.max_force)
force = self.max_force
if not epsilon:
epsilon = self.def_epsilon
# prepare goal
goal = GraspGoal()
goal.width = width
goal.speed = speed
goal.force = force
goal.epsilon.inner = epsilon
goal.epsilon.outer = epsilon
# send goal
self.grasp_action_client.send_goal(goal)
self._last_action = "grasp"
self.gripper_open = False
if wait:
self.grasp_action_client.wait_for_result()
def homing(self):
"""
Performs homing to calibrate gripper fingers.
Returns
-------
bool
True, if homing was successful.
"""
self.homing_action_client.send_goal_and_wait(HomingGoal())
self._last_action = None
self.gripper_open = True
return self.homing_action_client.get_result().success
def open(self):
"""
Moves the gripper to the maximum width.
Returns
-------
bool
True, if opening was successful.
"""
self._last_action = "open"
self.gripper_open = True
return self._move(0.15)
def _move(self, width, speed=None):
"""
Moves the gripper finger to a certain width with a certain speed.
Parameters
----------
width : float
Width between the gripper fingers to be reached.
speed : float
Speed of the fingers used while moving.
Returns
-------
bool
True, if moving was successful.
"""
# check what values are provided and fallback to defaults if needed
if not speed:
speed = self.def_speed
if speed > self.max_speed:
print("Speed is too high. Reducing to %f." % self.max_speed)
speed = self.max_speed
# prepare goal
goal = MoveGoal()
goal.width = width
goal.speed = speed
# send goal and wait
self.move_action_client.send_goal_and_wait(goal)
return self.move_action_client.get_result().success
| [
"copy.deepcopy",
"franka_gripper.msg.HomingGoal",
"rospy.Subscriber",
"actionlib.SimpleActionClient",
"rospy.loginfo",
"franka_gripper.msg.GraspGoal",
"franka_gripper.msg.MoveGoal",
"numpy.concatenate"
] | [((897, 965), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""/franka_gripper/homing"""', 'HomingAction'], {}), "('/franka_gripper/homing', HomingAction)\n", (925, 965), False, 'import actionlib\n'), ((1000, 1064), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""/franka_gripper/move"""', 'MoveAction'], {}), "('/franka_gripper/move', MoveAction)\n", (1028, 1064), False, 'import actionlib\n'), ((1100, 1166), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""/franka_gripper/grasp"""', 'GraspAction'], {}), "('/franka_gripper/grasp', GraspAction)\n", (1128, 1166), False, 'import actionlib\n'), ((1210, 1265), 'rospy.loginfo', 'rospy.loginfo', (['"""Waiting for gripper action servers ..."""'], {}), "('Waiting for gripper action servers ...')\n", (1223, 1265), False, 'import rospy\n'), ((1427, 1473), 'rospy.loginfo', 'rospy.loginfo', (['"""Found gripper action servers!"""'], {}), "('Found gripper action servers!')\n", (1440, 1473), False, 'import rospy\n'), ((1773, 1899), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/franka_gripper/joint_states"""', 'JointState', 'self._joint_states_callback'], {'queue_size': '(1)', 'tcp_nodelay': '(True)'}), "('/franka_gripper/joint_states', JointState, self.\n _joint_states_callback, queue_size=1, tcp_nodelay=True)\n", (1789, 1899), False, 'import rospy\n'), ((4252, 4284), 'copy.deepcopy', 'deepcopy', (['self._gripper_position'], {}), '(self._gripper_position)\n', (4260, 4284), False, 'from copy import deepcopy\n'), ((4646, 4681), 'copy.deepcopy', 'deepcopy', (['self._gripper_orientation'], {}), '(self._gripper_orientation)\n', (4654, 4681), False, 'from copy import deepcopy\n'), ((8275, 8286), 'franka_gripper.msg.GraspGoal', 'GraspGoal', ([], {}), '()\n', (8284, 8286), False, 'from franka_gripper.msg import GraspAction, GraspGoal, GraspEpsilon, HomingAction, HomingGoal, MoveAction, MoveGoal\n'), ((10067, 10077), 'franka_gripper.msg.MoveGoal', 'MoveGoal', ([], {}), '()\n', (10075, 10077), False, 'from franka_gripper.msg import GraspAction, GraspGoal, GraspEpsilon, HomingAction, HomingGoal, MoveAction, MoveGoal\n'), ((4978, 5046), 'numpy.concatenate', 'np.concatenate', (['[self._gripper_position + self._gripper_orientation]'], {}), '([self._gripper_position + self._gripper_orientation])\n', (4992, 5046), True, 'import numpy as np\n'), ((8899, 8911), 'franka_gripper.msg.HomingGoal', 'HomingGoal', ([], {}), '()\n', (8909, 8911), False, 'from franka_gripper.msg import GraspAction, GraspGoal, GraspEpsilon, HomingAction, HomingGoal, MoveAction, MoveGoal\n')] |
from typing import Tuple
import numpy as np
def split_dataset(full_ds: np.ndarray, test_split: float = 0.2) -> Tuple[np.ndarray, np.ndarray]:
"""Splits dataset into train, test datsets.
Parameters
----------
full_ds : tf.data.Dataset
full dataset to split
test_split : float, optional
ratio of samples to use for test set, by default 0.2
Returns
-------
Tuple[np.ndarray, np.ndarray]
tuple of train, test datasets
"""
split_idx: int = int(test_split * full_ds.shape[0])
np.random.shuffle(full_ds)
return full_ds[:-split_idx], full_ds[-split_idx:]
def split_features_and_labels(ds: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Splits features and labels in dataset.
Parameters
----------
ds : np.ndarray
dataset to split into features and labels
Returns
-------
Tuple[np.ndarray, np.ndarray]
tuple of features, labels arrays
"""
return ds[:, :-1], ds[:, -1]
| [
"numpy.random.shuffle"
] | [((543, 569), 'numpy.random.shuffle', 'np.random.shuffle', (['full_ds'], {}), '(full_ds)\n', (560, 569), True, 'import numpy as np\n')] |
import numpy as np
from collections import namedtuple
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.nn.functional as F
import random
import cv2
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward', 'done'))
class ReplayMemory(object):
def __init__(self, capacity=130000):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
def plot_rewards(rewards):
plt.figure(2)
plt.clf()
rewards_t = torch.tensor(rewards, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Cumulative reward')
plt.grid(True)
plt.plot(rewards_t.numpy())
# Take 100 episode averages and plot them too
if len(rewards_t) >= 100:
means = rewards_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
class DQN(nn.Module):
def __init__(self, state_space_dim, action_space_dim, hidden=32):
super(DQN, self).__init__()
self.hidden = hidden
self.fc1 = nn.Linear(state_space_dim, hidden)
self.fc2 = nn.Linear(hidden, action_space_dim)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def process_state(state, size, ignore_opponent=False):
mask = np.all(state == [43, 48, 58], axis=-1)
state[mask] = [0, 0, 0]
state = np.mean(state, axis=-1)
if ignore_opponent:
state[:, 180:] = 0
if size < 200:
state = cv2.resize(state, (size, size))
state = state.astype(int)
state = state.reshape((1, size, size))
return state
| [
"matplotlib.pyplot.title",
"cv2.resize",
"torch.nn.functional.relu",
"matplotlib.pyplot.clf",
"random.sample",
"numpy.all",
"matplotlib.pyplot.figure",
"numpy.mean",
"collections.namedtuple",
"matplotlib.pyplot.pause",
"torch.nn.Linear",
"torch.zeros",
"matplotlib.pyplot.ylabel",
"matplotl... | [((221, 298), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'next_state', 'reward', 'done')"], {}), "('Transition', ('state', 'action', 'next_state', 'reward', 'done'))\n", (231, 298), False, 'from collections import namedtuple\n'), ((916, 929), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (926, 929), True, 'import matplotlib.pyplot as plt\n'), ((934, 943), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (941, 943), True, 'import matplotlib.pyplot as plt\n'), ((960, 1000), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float'}), '(rewards, dtype=torch.float)\n', (972, 1000), False, 'import torch\n'), ((1005, 1029), 'matplotlib.pyplot.title', 'plt.title', (['"""Training..."""'], {}), "('Training...')\n", (1014, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1055), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (1044, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1060, 1091), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative reward"""'], {}), "('Cumulative reward')\n", (1070, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1110), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1104, 1110), True, 'import matplotlib.pyplot as plt\n'), ((1373, 1389), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (1382, 1389), True, 'import matplotlib.pyplot as plt\n'), ((1881, 1919), 'numpy.all', 'np.all', (['(state == [43, 48, 58])'], {'axis': '(-1)'}), '(state == [43, 48, 58], axis=-1)\n', (1887, 1919), True, 'import numpy as np\n'), ((1960, 1983), 'numpy.mean', 'np.mean', (['state'], {'axis': '(-1)'}), '(state, axis=-1)\n', (1967, 1983), True, 'import numpy as np\n'), ((788, 826), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (801, 826), False, 'import random\n'), ((1609, 1643), 'torch.nn.Linear', 'nn.Linear', (['state_space_dim', 'hidden'], {}), '(state_space_dim, hidden)\n', (1618, 1643), True, 'import torch.nn as nn\n'), ((1663, 1698), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'action_space_dim'], {}), '(hidden, action_space_dim)\n', (1672, 1698), True, 'import torch.nn as nn\n'), ((1762, 1771), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1768, 1771), True, 'import torch.nn.functional as F\n'), ((2072, 2103), 'cv2.resize', 'cv2.resize', (['state', '(size, size)'], {}), '(state, (size, size))\n', (2082, 2103), False, 'import cv2\n'), ((1311, 1326), 'torch.zeros', 'torch.zeros', (['(99)'], {}), '(99)\n', (1322, 1326), False, 'import torch\n')] |
import csv
import numpy as np
import re
import pandas as pd
class PreProcessing:
def __init__(self):
self.r1 = "[\s+\.\!\-\?\/_,$%^*(+\"]+|[+——!:,。?、~@#¥%……&*()]+"
self.r2 = '(\s\'+\s)|(\'(\s|$))|\)'
self.node2pap = {}
self.text_id = {}
self.train = []
self.test = []
def load_data(self):
with open('./data/nodeid2paperid.csv', 'r') as n:
fn = csv.reader(n)
header = next(fn)
for line in fn:
self.node2pap.update({line[0]: line[1]})
with open('./data/text.csv', 'r', encoding='utf-8') as t:
ft = csv.reader(t)
for line in ft:
self.text_id.update({line[0]: line[1:]})
with open('./data/train.csv', 'r') as f:
ff = csv.reader(f)
for line in ff:
tag = line[0]
pid = line[1]
text = self.text_id[self.node2pap[pid]]
text = ' '.join(text)
self.train.append([text, tag, pid])
with open('./data/test.csv', 'r') as m:
fm = csv.reader(m)
for line in fm:
tid = line[0]
t_text = self.text_id[self.node2pap[tid]]
t_text = ' '.join(t_text)
self.test.append([t_text, tid])
def process_train(self, file):
data = np.array(self.train)
text = data[:, 0]
label = data[:, 1]
trid = data[:, 2]
for i in range(len(text)):
test = text[i]
result = re.sub(self.r1, ' ', test)
result = re.sub(self.r2, ' ', result)
result = re.sub('\d+', ' ', result)
result = re.sub(r'\\', ' ', result)
result = re.sub('[^a-zA-Z]', ' ', result)
result = re.sub('\s+', ' ', result)
text[i] = result
with open(file, 'w', encoding='utf-8') as wf:
wf.write('text,label\n')
for i in range(len(text)):
wf.write(text[i] + ',' + label[i] + ',' + trid[i] + '\n')
def process_test(self, file):
for i in range(len(self.test)):
test = self.test[i][0]
result = re.sub(self.r1, ' ', test)
result = re.sub(self.r2, ' ', result)
result = re.sub('\d+', ' ', result)
result = re.sub(r'\\', ' ', result)
result = re.sub('[^a-zA-Z]', ' ', result)
result = re.sub('\s+', ' ', result)
self.test[i][0] = result
with open(file, 'w', encoding='utf-8') as pf:
pf.write('test_text,id\n')
for line in self.test:
pf.write(line[0] + ',' + line[1] + '\n')
def process_text(self,text):
result = re.sub(self.r1, ' ', text)
result = re.sub(self.r2, ' ', result)
result = re.sub('\d+', ' ', result)
result = re.sub(r'\\', ' ', result)
result = re.sub('[^a-zA-Z]', ' ', result)
result = re.sub('\s+', ' ', result)
return result
if __name__ == '__main__':
df = pd.read_csv('./data/test.csv')
df.columns = ['label','text']
pp = PreProcessing()
clean_text = list(map(pp.process_text,df.text))
labels = df.label.to_numpy()
with open('./data/clean.csv','w',encoding='utf-8') as f:
f.write('text,label\n')
for i in range(len(clean_text)):
f.write(str(labels[i])+","+clean_text[i]+'\n')
# f.write(clean_text[i]+","+str(labels[i])+'\n')
| [
"pandas.read_csv",
"csv.reader",
"numpy.array",
"re.sub"
] | [((3069, 3099), 'pandas.read_csv', 'pd.read_csv', (['"""./data/test.csv"""'], {}), "('./data/test.csv')\n", (3080, 3099), True, 'import pandas as pd\n'), ((1385, 1405), 'numpy.array', 'np.array', (['self.train'], {}), '(self.train)\n', (1393, 1405), True, 'import numpy as np\n'), ((2755, 2781), 're.sub', 're.sub', (['self.r1', '""" """', 'text'], {}), "(self.r1, ' ', text)\n", (2761, 2781), False, 'import re\n'), ((2799, 2827), 're.sub', 're.sub', (['self.r2', '""" """', 'result'], {}), "(self.r2, ' ', result)\n", (2805, 2827), False, 'import re\n'), ((2845, 2872), 're.sub', 're.sub', (['"""\\\\d+"""', '""" """', 'result'], {}), "('\\\\d+', ' ', result)\n", (2851, 2872), False, 'import re\n'), ((2889, 2916), 're.sub', 're.sub', (['"""\\\\\\\\"""', '""" """', 'result'], {}), "('\\\\\\\\', ' ', result)\n", (2895, 2916), False, 'import re\n'), ((2933, 2965), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'result'], {}), "('[^a-zA-Z]', ' ', result)\n", (2939, 2965), False, 'import re\n'), ((2983, 3010), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'result'], {}), "('\\\\s+', ' ', result)\n", (2989, 3010), False, 'import re\n'), ((422, 435), 'csv.reader', 'csv.reader', (['n'], {}), '(n)\n', (432, 435), False, 'import csv\n'), ((635, 648), 'csv.reader', 'csv.reader', (['t'], {}), '(t)\n', (645, 648), False, 'import csv\n'), ((801, 814), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (811, 814), False, 'import csv\n'), ((1114, 1127), 'csv.reader', 'csv.reader', (['m'], {}), '(m)\n', (1124, 1127), False, 'import csv\n'), ((1568, 1594), 're.sub', 're.sub', (['self.r1', '""" """', 'test'], {}), "(self.r1, ' ', test)\n", (1574, 1594), False, 'import re\n'), ((1616, 1644), 're.sub', 're.sub', (['self.r2', '""" """', 'result'], {}), "(self.r2, ' ', result)\n", (1622, 1644), False, 'import re\n'), ((1666, 1693), 're.sub', 're.sub', (['"""\\\\d+"""', '""" """', 'result'], {}), "('\\\\d+', ' ', result)\n", (1672, 1693), False, 'import re\n'), ((1714, 1741), 're.sub', 're.sub', (['"""\\\\\\\\"""', '""" """', 'result'], {}), "('\\\\\\\\', ' ', result)\n", (1720, 1741), False, 'import re\n'), ((1762, 1794), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'result'], {}), "('[^a-zA-Z]', ' ', result)\n", (1768, 1794), False, 'import re\n'), ((1816, 1843), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'result'], {}), "('\\\\s+', ' ', result)\n", (1822, 1843), False, 'import re\n'), ((2207, 2233), 're.sub', 're.sub', (['self.r1', '""" """', 'test'], {}), "(self.r1, ' ', test)\n", (2213, 2233), False, 'import re\n'), ((2255, 2283), 're.sub', 're.sub', (['self.r2', '""" """', 'result'], {}), "(self.r2, ' ', result)\n", (2261, 2283), False, 'import re\n'), ((2305, 2332), 're.sub', 're.sub', (['"""\\\\d+"""', '""" """', 'result'], {}), "('\\\\d+', ' ', result)\n", (2311, 2332), False, 'import re\n'), ((2353, 2380), 're.sub', 're.sub', (['"""\\\\\\\\"""', '""" """', 'result'], {}), "('\\\\\\\\', ' ', result)\n", (2359, 2380), False, 'import re\n'), ((2401, 2433), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'result'], {}), "('[^a-zA-Z]', ' ', result)\n", (2407, 2433), False, 'import re\n'), ((2455, 2482), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'result'], {}), "('\\\\s+', ' ', result)\n", (2461, 2482), False, 'import re\n')] |
import numpy as np
def confusion_matrix(y_pred, y_real):
"""Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
Returns:
np.ndarray: Confusion matrix.
"""
if isinstance(y_pred, list):
y_pred = np.array(y_pred)
if not isinstance(y_pred, np.ndarray):
raise TypeError(
f'y_pred must be list or np.ndarray, but got {type(y_pred)}')
if not y_pred.dtype == np.int64:
raise TypeError(
f'y_pred dtype must be np.int64, but got {y_pred.dtype}')
if isinstance(y_real, list):
y_real = np.array(y_real)
if not isinstance(y_real, np.ndarray):
raise TypeError(
f'y_real must be list or np.ndarray, but got {type(y_real)}')
if not y_real.dtype == np.int64:
raise TypeError(
f'y_real dtype must be np.int64, but got {y_real.dtype}')
label_set = np.unique(np.concatenate((y_pred, y_real)))
num_labels = len(label_set)
label_map = {label: i for i, label in enumerate(label_set)}
confusion_mat = np.zeros((num_labels, num_labels), dtype=np.int64)
for rlabel, plabel in zip(y_real, y_pred):
index_real = label_map[rlabel]
index_pred = label_map[plabel]
confusion_mat[index_real][index_pred] += 1
return confusion_mat
def mean_class_accuracy(scores, labels):
"""Calculate mean class accuracy.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
Returns:
np.ndarray: Mean class accuracy.
"""
pred = np.argmax(scores, axis=1)
cf = confusion_matrix(pred, labels).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
mean_class_acc = np.mean(
[hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])
return mean_class_acc
def top_k_accuracy(scores, labels, topk=(1, )):
"""Calculate top k accuracy score.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).
Returns:
list[float]: Top k accuracy score for each k.
"""
res = []
labels = np.array(labels)[:, np.newaxis]
for k in topk:
max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1]
match_array = np.logical_or.reduce(max_k_preds == labels, axis=1)
topk_acc_score = match_array.sum() / match_array.shape[0]
res.append(topk_acc_score)
return res
def mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[np.ndarray]): Ground truth many-hot vector.
Returns:
np.float: The mean average precision.
"""
results = []
for i in range(len(scores)):
precision, recall, _ = binary_precision_recall_curve(
scores[i], labels[i])
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
return np.mean(results)
def binary_precision_recall_curve(y_score, y_true):
"""Calculate the binary precision recall curve at step thresholds.
Args:
y_score (np.ndarray): Prediction scores for each class.
Shape should be (num_classes, ).
y_true (np.ndarray): Ground truth many-hot vector.
Shape should be (num_classes, ).
Returns:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
thresholds (np.ndarray): Different thresholds at which precison and
recall are tested.
"""
assert isinstance(y_score, np.ndarray)
assert isinstance(y_true, np.ndarray)
assert y_score.shape == y_true.shape
# make y_true a boolean vector
y_true = (y_true == 1)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# There may be ties in values, therefore find the `distinct_value_inds`
distinct_value_inds = np.where(np.diff(y_score))[0]
threshold_inds = np.r_[distinct_value_inds, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = np.cumsum(y_true)[threshold_inds]
fps = 1 + threshold_inds - tps
thresholds = y_score[threshold_inds]
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def pairwise_temporal_iou(candidate_segments, target_segments):
"""Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 2-dim array in format
[m x 2:=[init, end]].
target_segments (np.ndarray): 2-dim array in format
[n x 2:=[init, end]].
Returns:
temporal_iou (np.ndarray): 2-dim array [n x m] with IoU ratio.
"""
if target_segments.ndim != 2 or candidate_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = target_segments.shape[0], candidate_segments.shape[0]
temporal_iou = np.empty((n, m))
for i in range(m):
candidate_segment = candidate_segments[i, :]
tt1 = np.maximum(candidate_segment[0], target_segments[:, 0])
tt2 = np.minimum(candidate_segment[1], target_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = ((target_segments[:, 1] - target_segments[:, 0]) +
(candidate_segment[1] - candidate_segment[0]) -
segments_intersection)
# Compute overlap as the ratio of the intersection
# over union of two segments.
temporal_iou[:, i] = (
segments_intersection.astype(float) / segments_union)
return temporal_iou
def average_recall_at_avg_proposals(ground_truth,
proposals,
total_num_proposals,
max_avg_proposals=None,
temporal_iou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Computes the average recall given an average number (percentile) of
proposals per video.
Args:
ground_truth (dict): Dict containing the ground truth instances.
proposals (dict): Dict containing the proposal instances.
total_num_proposals (int): Total number of proposals in the
proposal dict.
max_avg_proposals (int | None): Max number of proposals for one video.
Default: None.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: np.linspace(0.5, 0.95, 10).
Returns:
tuple([np.ndarray, np.ndarray, np.ndarray, float]):
(recall, average_recall, proposals_per_video, auc)
In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold
at the j-th average number (percentile) of average number of
proposals per video. The average_recall is recall averaged
over a list of temporal_iou threshold (1D array). This is
equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``
is the average number of proposals per video. The auc is the area
under AR@AN curve.
"""
total_num_videos = len(ground_truth)
if not max_avg_proposals:
max_avg_proposals = float(total_num_proposals) / total_num_videos
ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals)
# For each video, compute temporal_iou scores among the retrieved proposals
score_list = []
total_num_retrieved_proposals = 0
for video_id in ground_truth:
# Get proposals for this video.
proposals_video_id = proposals[video_id]
this_video_proposals = proposals_video_id[:, :2]
# Sort proposals by score.
sort_idx = proposals_video_id[:, 2].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :].astype(
np.float32)
# Get ground-truth instances associated to this video.
ground_truth_video_id = ground_truth[video_id]
this_video_ground_truth = ground_truth_video_id[:, :2].astype(
np.float32)
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_list.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(
this_video_ground_truth, axis=0)
num_retrieved_proposals = np.minimum(
int(this_video_proposals.shape[0] * ratio),
this_video_proposals.shape[0])
total_num_retrieved_proposals += num_retrieved_proposals
this_video_proposals = this_video_proposals[:
num_retrieved_proposals, :]
# Compute temporal_iou scores.
temporal_iou = pairwise_temporal_iou(this_video_proposals,
this_video_ground_truth)
score_list.append(temporal_iou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_list = np.arange(1, 101) / 100.0 * (
max_avg_proposals * float(total_num_videos) /
total_num_retrieved_proposals)
matches = np.empty((total_num_videos, pcn_list.shape[0]))
positives = np.empty(total_num_videos)
recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0]))
# Iterates over each temporal_iou threshold.
for ridx, temporal_iou in enumerate(temporal_iou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_list):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum temporal_iou threshold.
true_positives_temporal_iou = score >= temporal_iou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum(
(score.shape[1] * pcn_list).astype(np.int), score.shape[1])
for j, num_retrieved_proposals in enumerate(pcn_proposals):
# Compute the number of matches
# for each percentage of the proposals
matches[i, j] = np.count_nonzero(
(true_positives_temporal_iou[:, :num_retrieved_proposals]
).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_list * (
float(total_num_retrieved_proposals) / total_num_videos)
# Get AUC
area_under_curve = np.trapz(avg_recall, proposals_per_video)
auc = 100. * float(area_under_curve) / proposals_per_video[-1]
return recall, avg_recall, proposals_per_video, auc
def get_weighted_score(score_list, coeff_list):
"""Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Return:
list[np.ndarray]: List of weighted scores.
"""
assert len(score_list) == len(coeff_list)
num_samples = len(score_list[0])
for i in range(1, len(score_list)):
assert len(score_list[i]) == num_samples
scores = np.array(score_list) # (num_coeff, num_samples, num_classes)
coeff = np.array(coeff_list) # (num_coeff, )
weighted_scores = list(np.dot(scores.T, coeff).T)
return weighted_scores
| [
"numpy.maximum",
"numpy.argmax",
"numpy.empty",
"numpy.isnan",
"numpy.argsort",
"numpy.mean",
"numpy.arange",
"numpy.diag",
"numpy.logical_or.reduce",
"numpy.cumsum",
"numpy.linspace",
"numpy.trapz",
"numpy.minimum",
"numpy.dot",
"numpy.concatenate",
"numpy.zeros",
"numpy.expand_dims... | [((1155, 1205), 'numpy.zeros', 'np.zeros', (['(num_labels, num_labels)'], {'dtype': 'np.int64'}), '((num_labels, num_labels), dtype=np.int64)\n', (1163, 1205), True, 'import numpy as np\n'), ((1692, 1717), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (1701, 1717), True, 'import numpy as np\n'), ((1816, 1827), 'numpy.diag', 'np.diag', (['cf'], {}), '(cf)\n', (1823, 1827), True, 'import numpy as np\n'), ((3227, 3243), 'numpy.mean', 'np.mean', (['results'], {}), '(results)\n', (3234, 3243), True, 'import numpy as np\n'), ((5601, 5617), 'numpy.empty', 'np.empty', (['(n, m)'], {}), '((n, m))\n', (5609, 5617), True, 'import numpy as np\n'), ((6666, 6692), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.95)', '(10)'], {}), '(0.5, 0.95, 10)\n', (6677, 6692), True, 'import numpy as np\n'), ((10323, 10370), 'numpy.empty', 'np.empty', (['(total_num_videos, pcn_list.shape[0])'], {}), '((total_num_videos, pcn_list.shape[0]))\n', (10331, 10370), True, 'import numpy as np\n'), ((10387, 10413), 'numpy.empty', 'np.empty', (['total_num_videos'], {}), '(total_num_videos)\n', (10395, 10413), True, 'import numpy as np\n'), ((10427, 10490), 'numpy.empty', 'np.empty', (['(temporal_iou_thresholds.shape[0], pcn_list.shape[0])'], {}), '((temporal_iou_thresholds.shape[0], pcn_list.shape[0]))\n', (10435, 10490), True, 'import numpy as np\n'), ((11920, 11961), 'numpy.trapz', 'np.trapz', (['avg_recall', 'proposals_per_video'], {}), '(avg_recall, proposals_per_video)\n', (11928, 11961), True, 'import numpy as np\n'), ((12964, 12984), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (12972, 12984), True, 'import numpy as np\n'), ((13038, 13058), 'numpy.array', 'np.array', (['coeff_list'], {}), '(coeff_list)\n', (13046, 13058), True, 'import numpy as np\n'), ((345, 361), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (353, 361), True, 'import numpy as np\n'), ((687, 703), 'numpy.array', 'np.array', (['y_real'], {}), '(y_real)\n', (695, 703), True, 'import numpy as np\n'), ((1005, 1037), 'numpy.concatenate', 'np.concatenate', (['(y_pred, y_real)'], {}), '((y_pred, y_real))\n', (1019, 1037), True, 'import numpy as np\n'), ((2352, 2368), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2360, 2368), True, 'import numpy as np\n'), ((2491, 2542), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(max_k_preds == labels)'], {'axis': '(1)'}), '(max_k_preds == labels, axis=1)\n', (2511, 2542), True, 'import numpy as np\n'), ((4121, 4158), 'numpy.argsort', 'np.argsort', (['y_score'], {'kind': '"""mergesort"""'}), "(y_score, kind='mergesort')\n", (4131, 4158), True, 'import numpy as np\n'), ((4516, 4533), 'numpy.cumsum', 'np.cumsum', (['y_true'], {}), '(y_true)\n', (4525, 4533), True, 'import numpy as np\n'), ((4675, 4694), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (4683, 4694), True, 'import numpy as np\n'), ((5708, 5763), 'numpy.maximum', 'np.maximum', (['candidate_segment[0]', 'target_segments[:, 0]'], {}), '(candidate_segment[0], target_segments[:, 0])\n', (5718, 5763), True, 'import numpy as np\n'), ((5778, 5833), 'numpy.minimum', 'np.minimum', (['candidate_segment[1]', 'target_segments[:, 1]'], {}), '(candidate_segment[1], target_segments[:, 1])\n', (5788, 5833), True, 'import numpy as np\n'), ((4358, 4374), 'numpy.diff', 'np.diff', (['y_score'], {}), '(y_score)\n', (4365, 4374), True, 'import numpy as np\n'), ((9145, 9189), 'numpy.expand_dims', 'np.expand_dims', (['this_video_proposals'], {'axis': '(0)'}), '(this_video_proposals, axis=0)\n', (9159, 9189), True, 'import numpy as np\n'), ((9274, 9321), 'numpy.expand_dims', 'np.expand_dims', (['this_video_ground_truth'], {'axis': '(0)'}), '(this_video_ground_truth, axis=0)\n', (9288, 9321), True, 'import numpy as np\n'), ((10186, 10203), 'numpy.arange', 'np.arange', (['(1)', '(101)'], {}), '(1, 101)\n', (10195, 10203), True, 'import numpy as np\n'), ((13103, 13126), 'numpy.dot', 'np.dot', (['scores.T', 'coeff'], {}), '(scores.T, coeff)\n', (13109, 13126), True, 'import numpy as np\n'), ((2425, 2451), 'numpy.argsort', 'np.argsort', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (2435, 2451), True, 'import numpy as np\n'), ((9027, 9043), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (9035, 9043), True, 'import numpy as np\n'), ((3145, 3160), 'numpy.diff', 'np.diff', (['recall'], {}), '(recall)\n', (3152, 3160), True, 'import numpy as np\n'), ((3163, 3182), 'numpy.array', 'np.array', (['precision'], {}), '(precision)\n', (3171, 3182), True, 'import numpy as np\n')] |
""" Utilities
"""
import numpy as np
import scipy as sp
import os
import shutil
import tarfile
from subfunc.showdata import *
from subfunc.munkres import Munkres
# =============================================================
# =============================================================
def correlation(x, y, method='Pearson'):
"""Evaluate correlation
Args:
x: data to be sorted
y: target data
method: correlation method ('Pearson' or 'Spearman')
Returns:
corr_sort: correlation matrix between x and y (after sorting)
sort_idx: sorting index
x_sort: x after sorting
"""
print('Calculating correlation...')
x = x.copy().T
y = y.copy().T
dimx = x.shape[0]
dimy = y.shape[0]
# Calculate correlation -----------------------------------
if method == 'Pearson':
corr = np.corrcoef(y, x)
corr = corr[0:dimy, dimy:]
elif method == 'Spearman':
corr, pvalue = sp.stats.spearmanr(y.T, x.T)
corr = corr[0:dimy, dimy:]
else:
raise ValueError
# Sort ----------------------------------------------------
munk = Munkres()
indexes = munk.compute(-np.absolute(corr))
sort_idx = np.zeros(dimy, dtype=int)
for i in range(dimy):
sort_idx[i] = indexes[i][1]
sort_idx_other = np.setdiff1d(np.arange(0, dimx), sort_idx)
sort_idx = np.concatenate([sort_idx, sort_idx_other])
x_sort = x[sort_idx, :]
# Re-calculate correlation --------------------------------
if method == 'Pearson':
corr_sort = np.corrcoef(y, x_sort)
corr_sort = corr_sort[0:dimy, dimy:]
elif method == 'Spearman':
corr_sort, pvalue = sp.stats.spearmanr(y.T, x_sort.T)
corr_sort = corr_sort[0:dimy, dimy:]
else:
raise ValueError
return corr_sort, sort_idx, x_sort
# ===============================================================
# ===============================================================
def unzip(loadfile, unzipfolder, necessary_word='/storage'):
"""
unzip trained model (loadfile) to unzipfolder
"""
print('load: %s...' % loadfile)
if loadfile.find(".tar.gz") > -1:
if unzipfolder.find(necessary_word) > -1:
if os.path.exists(unzipfolder):
print('delete savefolder: %s...' % unzipfolder)
shutil.rmtree(unzipfolder) # remove folder
archive = tarfile.open(loadfile)
archive.extractall(unzipfolder)
archive.close()
else:
assert False, "unzip folder doesn't include necessary word"
else:
if os.path.exists(unzipfolder):
print('delete savefolder: %s...' % unzipfolder)
shutil.rmtree(unzipfolder) # remove folder
os.makedirs(unzipfolder)
src_files = os.listdir(loadfile)
for fn in src_files:
full_file_name = os.path.join(loadfile, fn)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, unzipfolder + '/')
# unzipfolder = loadfile
if not os.path.exists(unzipfolder):
raise ValueError
| [
"numpy.absolute",
"os.makedirs",
"numpy.corrcoef",
"scipy.stats.spearmanr",
"numpy.zeros",
"os.path.exists",
"shutil.copy",
"os.path.isfile",
"numpy.arange",
"subfunc.munkres.Munkres",
"tarfile.open",
"shutil.rmtree",
"os.path.join",
"os.listdir",
"numpy.concatenate"
] | [((1165, 1174), 'subfunc.munkres.Munkres', 'Munkres', ([], {}), '()\n', (1172, 1174), False, 'from subfunc.munkres import Munkres\n'), ((1238, 1263), 'numpy.zeros', 'np.zeros', (['dimy'], {'dtype': 'int'}), '(dimy, dtype=int)\n', (1246, 1263), True, 'import numpy as np\n'), ((1405, 1447), 'numpy.concatenate', 'np.concatenate', (['[sort_idx, sort_idx_other]'], {}), '([sort_idx, sort_idx_other])\n', (1419, 1447), True, 'import numpy as np\n'), ((883, 900), 'numpy.corrcoef', 'np.corrcoef', (['y', 'x'], {}), '(y, x)\n', (894, 900), True, 'import numpy as np\n'), ((1360, 1378), 'numpy.arange', 'np.arange', (['(0)', 'dimx'], {}), '(0, dimx)\n', (1369, 1378), True, 'import numpy as np\n'), ((1590, 1612), 'numpy.corrcoef', 'np.corrcoef', (['y', 'x_sort'], {}), '(y, x_sort)\n', (1601, 1612), True, 'import numpy as np\n'), ((2651, 2678), 'os.path.exists', 'os.path.exists', (['unzipfolder'], {}), '(unzipfolder)\n', (2665, 2678), False, 'import os\n'), ((2804, 2828), 'os.makedirs', 'os.makedirs', (['unzipfolder'], {}), '(unzipfolder)\n', (2815, 2828), False, 'import os\n'), ((2849, 2869), 'os.listdir', 'os.listdir', (['loadfile'], {}), '(loadfile)\n', (2859, 2869), False, 'import os\n'), ((3110, 3137), 'os.path.exists', 'os.path.exists', (['unzipfolder'], {}), '(unzipfolder)\n', (3124, 3137), False, 'import os\n'), ((990, 1018), 'scipy.stats.spearmanr', 'sp.stats.spearmanr', (['y.T', 'x.T'], {}), '(y.T, x.T)\n', (1008, 1018), True, 'import scipy as sp\n'), ((1203, 1220), 'numpy.absolute', 'np.absolute', (['corr'], {}), '(corr)\n', (1214, 1220), True, 'import numpy as np\n'), ((1717, 1750), 'scipy.stats.spearmanr', 'sp.stats.spearmanr', (['y.T', 'x_sort.T'], {}), '(y.T, x_sort.T)\n', (1735, 1750), True, 'import scipy as sp\n'), ((2274, 2301), 'os.path.exists', 'os.path.exists', (['unzipfolder'], {}), '(unzipfolder)\n', (2288, 2301), False, 'import os\n'), ((2449, 2471), 'tarfile.open', 'tarfile.open', (['loadfile'], {}), '(loadfile)\n', (2461, 2471), False, 'import tarfile\n'), ((2752, 2778), 'shutil.rmtree', 'shutil.rmtree', (['unzipfolder'], {}), '(unzipfolder)\n', (2765, 2778), False, 'import shutil\n'), ((2928, 2954), 'os.path.join', 'os.path.join', (['loadfile', 'fn'], {}), '(loadfile, fn)\n', (2940, 2954), False, 'import os\n'), ((2970, 3000), 'os.path.isfile', 'os.path.isfile', (['full_file_name'], {}), '(full_file_name)\n', (2984, 3000), False, 'import os\n'), ((2383, 2409), 'shutil.rmtree', 'shutil.rmtree', (['unzipfolder'], {}), '(unzipfolder)\n', (2396, 2409), False, 'import shutil\n'), ((3018, 3064), 'shutil.copy', 'shutil.copy', (['full_file_name', "(unzipfolder + '/')"], {}), "(full_file_name, unzipfolder + '/')\n", (3029, 3064), False, 'import shutil\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: <EMAIL>
@Software: PyCharm
@File: plt_time_weight.py
@Time: 2020/11/28 13:35
@Overview:
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from scipy import interpolate
import Process_Data.constants as c
from Lime import gassuan_weight
# from Process_Data.xfcc.common import get_filterbanks
time_data = np.load('Lime/LoResNet8/timit/soft/time.data.pickle', allow_pickle=True)
data = time_data[0][0]
grad = time_data[0][1]
fig = plt.figure(figsize=(8, 6))
# fig.tight_layout() # 调整整体空白
# plt.subplots_adjust(left=0, bottom=0, right=1, top=1, hspace=0, wspace=0)
ax = plt.subplot(312)
plt.imshow(data.transpose(), aspect='auto')
ax.set_xticklabels([])
ax.set_title('Log Spectrogram')
ax = plt.subplot(311)
plt.plot(np.log(np.exp(data).sum(axis=1)))
plt.xlim(0, 320)
ax.set_xticklabels([])
ax.set_title('Log Power Energy')
ax = plt.subplot(313)
plt.plot(np.abs(grad).mean(axis=1)/np.abs(grad).mean(axis=1).sum())
plt.xlim(0, 320)
ax.set_title('Gradient along time axis')
# plt.subplot(414)
# plt.plot(np.abs(data).mean(axis=1)/np.abs(data).mean(axis=1).sum()*np.abs(grad).mean(axis=1))
# plt.xlim(0, 320)
# fb64_m = 700 * (10 ** (fb64_m / 2595.0) - 1)
# plt.ylabel('Weight', fontsize=18)
# plt.xlabel('Frequency', fontsize=18)
# pdf.savefig()
# pdf.close()
plt.show()
| [
"matplotlib.pyplot.subplot",
"numpy.load",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.figure",
"numpy.exp"
] | [((438, 510), 'numpy.load', 'np.load', (['"""Lime/LoResNet8/timit/soft/time.data.pickle"""'], {'allow_pickle': '(True)'}), "('Lime/LoResNet8/timit/soft/time.data.pickle', allow_pickle=True)\n", (445, 510), True, 'import numpy as np\n'), ((565, 591), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (575, 591), True, 'import matplotlib.pyplot as plt\n'), ((707, 723), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (718, 723), True, 'import matplotlib.pyplot as plt\n'), ((829, 845), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (840, 845), True, 'import matplotlib.pyplot as plt\n'), ((889, 905), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(320)'], {}), '(0, 320)\n', (897, 905), True, 'import matplotlib.pyplot as plt\n'), ((968, 984), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (979, 984), True, 'import matplotlib.pyplot as plt\n'), ((1053, 1069), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(320)'], {}), '(0, 320)\n', (1061, 1069), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1412), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1410, 1412), True, 'import matplotlib.pyplot as plt\n'), ((862, 874), 'numpy.exp', 'np.exp', (['data'], {}), '(data)\n', (868, 874), True, 'import numpy as np\n'), ((994, 1006), 'numpy.abs', 'np.abs', (['grad'], {}), '(grad)\n', (1000, 1006), True, 'import numpy as np\n'), ((1020, 1032), 'numpy.abs', 'np.abs', (['grad'], {}), '(grad)\n', (1026, 1032), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cmocean
import math
data = np.zeros((19, 19))
def read_cross_val(file):
cvmin = float("inf")
cross_val = []
with open(file, "r") as f:
f.readline()
for line in f:
line = line.split(",")
test = line[0]
baseline = line[-1]
norms = np.array(line[2:-1:2]).astype(float)
cross_val += [norms]
cvmin = min(cvmin, np.min(norms))
print(cvmin)
return cross_val
data = [np.array(read_cross_val("data/mar27_1426_circ_audio/cross_val_1.csv"))]
fig = plt.subplots()
# normal cmap
im0 = plt.imshow(
data[0],
interpolation="none",
cmap="RdBu_r",
norm=mcolors.LogNorm(vmax=10, vmin=0.1),
)
cbar = plt.colorbar(im0, ticks=[0.1, 0.2, 0.5, 1, 2, 5, 10])
cbar.ax.set_yticklabels(
[
("%sx") % (("%f" % i).rstrip("0").rstrip("."))
for i in [0.1, 0.2, 0.5, 1, 2, 5, 10]
]
)
plt.xticks(np.arange(0, 8, 1), ["A","B","C","D","E","F","G","H"])
plt.xlabel("Training Scenario of Model")
plt.yticks(np.arange(0, 8, 1), ["A","B","C","D","E","F","G","H"])
plt.ylabel("Testing Scenario")
plt.subplots_adjust(left=0.0, right=1.0, top=0.90, bottom=0.10)
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.colorbar",
"numpy.min",
"matplotlib.colors.LogNorm",
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((192, 210), 'numpy.zeros', 'np.zeros', (['(19, 19)'], {}), '((19, 19))\n', (200, 210), True, 'import numpy as np\n'), ((712, 726), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (724, 726), True, 'import matplotlib.pyplot as plt\n'), ((872, 925), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im0'], {'ticks': '[0.1, 0.2, 0.5, 1, 2, 5, 10]'}), '(im0, ticks=[0.1, 0.2, 0.5, 1, 2, 5, 10])\n', (884, 925), True, 'import matplotlib.pyplot as plt\n'), ((1132, 1172), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training Scenario of Model"""'], {}), "('Training Scenario of Model')\n", (1142, 1172), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1269), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Testing Scenario"""'], {}), "('Testing Scenario')\n", (1249, 1269), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1332), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.0)', 'right': '(1.0)', 'top': '(0.9)', 'bottom': '(0.1)'}), '(left=0.0, right=1.0, top=0.9, bottom=0.1)\n', (1290, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1343, 1345), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1095), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(1)'], {}), '(0, 8, 1)\n', (1086, 1095), True, 'import numpy as np\n'), ((1184, 1202), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(1)'], {}), '(0, 8, 1)\n', (1193, 1202), True, 'import numpy as np\n'), ((827, 861), 'matplotlib.colors.LogNorm', 'mcolors.LogNorm', ([], {'vmax': '(10)', 'vmin': '(0.1)'}), '(vmax=10, vmin=0.1)\n', (842, 861), True, 'import matplotlib.colors as mcolors\n'), ((571, 584), 'numpy.min', 'np.min', (['norms'], {}), '(norms)\n', (577, 584), True, 'import numpy as np\n'), ((470, 492), 'numpy.array', 'np.array', (['line[2:-1:2]'], {}), '(line[2:-1:2])\n', (478, 492), True, 'import numpy as np\n')] |
import numpy as np
# todo:
# consider making this faster,
# per https://stackoverflow.com/questions/12321899/
# this is used in the 'compilation' step
# todo: this big docstring isn't necessary...
'''
Provides:
ortho_map = ((1,0), (0,-1), (-1,0), (0,1))
diag_map = ((1,-1), (-1,-1),(-1,1), (1,1))
These are used to calculate adjacent pixels.
Useful for the contiguities and adjacencies dictionaries for RegionMapper
RegionMapper(image,
class_dict,
contiguities = {},
adjacencies = {},
sparse = True,
wrap = False)
Parameters:
image: A Numpy array of shape (width, height, n_channels)
E.g. image[x,y] = [255, 0, 0]
Note: Axis 0 and 1 should represent x and y, respectively!
If they're wrong, use np.swapaxes(picture, 0, 1)
class_dict: A dictionary mapping tuple of size n_channels --> int
E.g. class_dict = { (255,0,0):1, (0,255,0):2, (0,0,255):3 }
# Red is class 1, green is class 2, blue is class 3
contiguities: A dictionary mapping classes to what they consider 'contiguous regions'
E.g. contiguities = { 1 : ortho_map + diag_map, 3: diag_map }
# Red regions are contiguous if they touch orthogonally or diagonally
# Blue regions are contiguous only if they touch diagonally
# Green regions are orthogonally contiguous (default)
adjacencies: Same format as contiguities. Defines what is considered a neighbour for a given class.
E.g. adjacencies = {} means (by default) regions of any class will only consider
other regions to be its neighbour if a pixel of that region is orthogonally adjacent
sparse: Boolean. Set True if most of the pixels in the image do not belong to a given class.
wrap: Boolean. If True, the image is considered a torus. The top edge is adjacent to the bottom edge,
and the left edge is adjacent to the right edge.
Provides:
RegionMapper.region_at_pixel(x,y):
Returns the ID of the region at that pixel, or
-1 if that region does not belong to a class.
RegionMapper.regions(region_id):
Given the ID of a region, return (class_number, list of pixels in that region)
RegionMapper.regions_with_class(class_number):
Given the number of a class, return all regions with that class.
RegionMapper.adjacent_regions(region_id):
Given the ID of a region, return all regions adjacent to it (as defined by
its entry in the adjacencies dictionary)
'''
# Package constants, useful for defining neighborhoods.
# ortho_map defines an orthogonal neighborhood
# diag_map defines a diagonal neighborhood
ortho_map = ((1,0), (0,-1), (-1,0), (0,1))
diag_map = ((1,-1), (-1,-1),(-1,1), (1,1))
def _value_to_class(class_dict, value):
""" Map numpy array values using a dictionary.
Convenience function for dictionaries that are keyed on tuples or integers.
Used to map integers (numpy arrays of integers, shape (3,)) to integers.
These integers enumerate the class that a pixel belongs to.
:param class_dict: A mapping of tuples of integers (i.e. RGB pixels) to integers
:type class_dict: dict
:param value: The key used to index the class_dict.
:type value: numpy.ndarray
:raises ValueError: If the value has a shape with 2 or more axes.
:returns: A mapping per class_dict, or 0 if that value is not available.
:rtype: Anything
>>> _value_to_class(
... {(1,2,3) : 'some_class', 789 : 'another_class'},
... np.array([1,2,3]))
'some_class'
>>> _value_to_class(
... {(1,2,3) : 'some_class', 789 : 'another_class'},
... np.array(789))
'another_class'
"""
# This was written when I was a baby. There's probably a better way to do
# this using dictionaries.
# E.g. If the shape of the array = (3,), e.g. np.array([255, 255, 255])
if len(value.shape) == 1:
if tuple(value) in class_dict.keys():
return class_dict[tuple(value)]
else:
return 0
# E.g. if the shape of the array = (), e.g. np.array(12345)
elif len(value.shape) == 0:
if value in class_dict.keys():
return class_dict[value]
else:
return 0
else:
raise ValueError
def _class_to_map(nbhd_offsets, value, default=ortho_map):
"""Index a dictionary 'nbhd_offsets' on 'value', returning 'default' if
'value' is not a valid key.
:param nbhd_offsets:
:type nbhd_offsets: dict
:param value: The key used to index the dictionary 'nbhd_offsets'
:type value: Any hashable object
:param default: Default value to return if value is not in the keys, defaults
to regionmapper.ortho_map
:type default: Any object
:returns: todo
:rtype:
"""
# e.g. _class_to_map(contiguities, 3)
# where contiguities = { 3 : ortho_map + diag_map, ... }
# returns contiguities[3]
# This is used to identify the x and y offsets used in the contiguities and adjacencies
# This was writen when I was a baby.
# This can probably be replaced with collections.defaultdict... todo!
if value in nbhd_offsets.keys():
return nbhd_offsets[value]
else:
return default
def _get_adjacent_pixels(x, y, w, h, nbhd_map = ortho_map, wrap = False):
"""Returns a list of indices for adjacent pixels given a 'neighborhood map'.
Where (x,y) is the central pixel index, this will return a list of adjacent
pixels, as defined by the offsets in 'nbhd_map'.
E.g. for (x,y) = (3,6) and nbhd_map = ((1,0), (0,-1), (-1,0), (0,1)),
this will return [(4, 6), (3, 5), (2, 6), (3, 7)].
The 'wrap' parameter is only important if (x,y) are near the (w,h) boundaries.
See the examples
:param x: X-coordinate of the image
:type x: Int
:param y: Y-coordinate of the image
:type y: Int
:param w: Width of the image
:type w: Int
:param h: Height of the image
:type h: Int
:param nbhd_map: Iterable of tuple defining the "neighborhood". Defaults to
region_mapper.ortho_map.
:type nbhd_map:
:param wrap: If True, wrap around the dictionary.
:type wrap: Bool
:returns: List of indices representing 'neighbors' of a pixel.
:rtype: list of tuple of int
>>> _get_adjacent_pixels(3,6,10,10)
[(4, 6), (3, 5), (2, 6), (3, 7)]
>>> _get_adjacent_pixels(x=3,y=6,w=4,h=7)
[(3, 5), (2, 6)]
>>> _get_adjacent_pixels(x=3,y=6,w=4,h=7, wrap=True)
[(0, 6), (3, 5), (2, 6), (3, 0)]
"""
# Returns only valid pixels; order of pixels not guaranteed
# E.g. adjacent_pixels(0,0,3,4) = [(1,0),(0,3),(0,2),(0,1)]
# E.g. adjacent_pixles(0,0,0,0, wrap=False) = [(1,0),(0,1)]
adj_pixels = []
for offset_x, offset_y in nbhd_map:
if wrap:
adj_pixels.append(((x+offset_x) % w, (y+offset_y) % h))
else:
if 0 <= x + offset_x < w and 0 <= y + offset_y < h:
adj_pixels.append((x+offset_x, y + offset_y))
return adj_pixels
class RegionMapper:
"""
TODO -- document this, the init function, and clean up the comments
Given an image, the goal is to identify contiguous regions of the same color
pixel, where 'contiguity' is defined per-color.
A small glossary:
- Pixel: The basic element used in the region-mapper. Each pixel has colors
defined as 3 ints (e.g. such as (255,0,127)) and an (x,y) coordinate.
- Coordinate: Or pixel index, i.e. the (x,y) coordinate that defines a
unique location in an image.
- Class: An integer associated with the color of a pixel.
E.g. One might have {(255,0,0) : 1, (0, 255, 0) : 2, (0, 0, 255) : 3},
i.e. associated red, green, and blue with integers 1, 2, and 3.
- Contiguity: Contiguity is defined per-class. For example, red pixels might
only be contiguous on orthogonal neighbors, while blue pixels might be
contiguous on both orthogonal and diagonal neighbors.
"Contiguity" is defined per-class for flexibility.
- Region: A collection of pixels sharing the same class, defined by their
'contiguity'. Every region has a unique integer 'region id'.
- Region ID: A unique integer associated with each region after the
region-mapping algorithm is finished.
- Adjacency: A region that is next to another region. Adjacencies to regions
are defined similarly to contiguities.
:param image: A numpy array of integers (0-255) in shape (w, h, 3)
:type image: numpy.ndarray
:param class_dict: Dictionary mapping 3-tuples (pixel) to ints (class)
:type class_dict: Dict
:param contiguities: Dictionary of class int --> list of coordinates.
(E.g. One might use 'ortho_map' here.)
:type contiguities: Dict
:param adjacencies: Dictionary of class int --> list of coordinates.
(E.g. One might use 'ortho_map' here.)
:type adjacencies: Dict
:param sparse: If True, use a dictionary instead of an array to map pixels to regions.
(This is useful if most of the pixels in the image don't correspond to
a class.)
:type sparse: Bool
:param wrap: If True, region adjacencies wrap around the edge of the image.
(Like a torus, or teleporting through the side of the screen like in Pacman.)
:type wrap: Bool
"""
def __init__(self,
image, # 2D Numpy Array
class_dict, # Dict of (value) --> Int >= 1
contiguities = {}, # Dict of class int --> map (like ortho_map)
adjacencies = {}, # Dict of class int --> map (like ortho map)
sparse = True,
wrap = False):
# TODO: Split this off into other functions!
width, height = image.shape[:2]
assert(len(image.shape) == 3 or len(image.shape) == 2), \
"image should be np array shaped (width, height) or (width, height, number_of_channels_in_image)"
# Create self._image, holding a 2D numpy array of class ints
# I.e. Convert an rgb-image (w,h,3) to class-image (w,h)
# todo: this can be vectorized !!!
self._image = np.zeros((width, height))
for ii in range(width):
for jj in range(height):
self._image[ii, jj] = _value_to_class(class_dict, image[ii,jj])
# Mapping of pixel coordinates to region indices.
# If Sparse, self._region_at_pixel this is a dict mapping (x, y) tuples to int
# otherwise, self._region_at_pixel is an *array* mapping [x,y] to int
# Where the 'int' is a unique integer mapping to the region
# An ID of -1 means there is no region at this pixel
self._regions = []
if sparse:
self._region_at_pixel = dict()
else:
self._region_at_pixel = np.zeros((width, height)) - 1
# _regions_with_class:
# E.g. _regions_with_class[2] = [1,3,4] # Regions 1, 3, and 4 are the ones with class 3
# Basically, we also want a mapping of all regions that have a certain class.
self._regions_with_class = dict()
# The busy work!
# 'Region' integer IDs are incremented from 0 up
region = 0
# if rec[x,y] == True, then this pixel has already been recorded.
rec = np.zeros((width, height), dtype=np.bool)
for x in range(width):
for y in range(height):
# Each pixel already has its classed assigned,
# where 0 indicates no class
region_class = self._image[x,y]
if rec[x,y] == False and not region_class == 0:
# An unexplored region with a class!
# Let's explore it, and list all the pixels in it.
list_of_pixels_in_region = []
# Here, we fill up Fill up list_of_pixels_in_region using BFS
# contig_map is A list of what we consider "contigous" pixels.
contig_map = _class_to_map(nbhd_offsets = contiguities, value = region_class)
# pixels_under_consideration:
# Every pixel we are looking at but have not computed yet.
pixels_under_consideration = [(x,y)]
rec[x,y] = True
while len(pixels_under_consideration) > 0:
# Loop through all our pixels. (BFS starts here.)
# Takes the first element from pixels_under_consideration...
xi, yi = pixels_under_consideration.pop()
list_of_pixels_in_region.append((xi, yi)) # = [..., (x,y)]
self._region_at_pixel[xi, yi] = region
# For each adjacent pixel,
# if it's the same class,
# add it to the list of pixels we're going to explore
for xJ, yJ in _get_adjacent_pixels(
x=xi, y=yi, w=width, h=height, nbhd_map = contig_map, wrap = wrap
):
if self._image[xJ, yJ] == region_class and rec[xJ, yJ] == False:
# if the pixel is inside the region of our origin pixel,
# and if it is not already recorded...
pixels_under_consideration.append((xJ, yJ))
rec[xJ, yJ] = True
# we're done! add to our regions our
# (region_class, pixel_pixel_pixel ...)
self._regions.append((region_class, list_of_pixels_in_region))
# Add to our mapping of class --> region ID
# (and create the entry for the class index first,
# if this is our first time seeing it.)
if not (region_class) in self._regions_with_class.keys():
self._regions_with_class[region_class] = [region]
else:
self._regions_with_class[region_class].append(region)
# finally, increment our region index
region += 1
# We did it, we mapped all our regions!
# Now it's time to identify adjacent regions.
# self._adjacent_regions[region_id] provides a list of region_ids
# of adjacent regions, as defined by the 'adjacencies'
# associated with that class.
self._adjacent_regions = [[] for _ in range(len(self._regions))]
# ii is region id
for ii, (region_class, list_of_pixels) in enumerate(self._regions):
# nbhd_map = The list of pixel offsets of what this class considers its neighbours
nbhd_map = _class_to_map(nbhd_offsets = adjacencies, value = region_class)
# For every pixel in region ii,
for xi, yi in list_of_pixels:
# For every pixel adjacent to that pixel,
for xJ, yJ in _get_adjacent_pixels(
x=xi, y=yi, w=width, h=height, nbhd_map=nbhd_map, wrap=wrap
):
# If the neighbour is a valid region (not empty)
if not self._image[xJ,yJ] == 0:
neighbour = self._region_at_pixel[xJ,yJ]
if not (neighbour == ii) and \
not neighbour in self._adjacent_regions[ii]:
# If the neighbour is not us
# and it's not the same class as us
# and if we didn't already consider this neighbour...
# (neighbour is the integer ID for that region...)
# Add that neighbour to our list of regions!
self._adjacent_regions[ii].append(neighbour)
# Helper functions from here on.
def region_at_pixel(self, x, y):
"""Returns the ID of the region at that pixel,
or -1 if that region does not belong to a class.
:param x: x-index of pixel contained in the Region Mapper
:type x: Int
:param y: y-index of pixel contained in the Region Mapper
:type y: Int
:returns: The class at pixel (x,y), or -1 if such a class does not exist.
:rtype: Int
"""
# todo: What was I thinking with this design?
# What was I thinking in 2018?
# If you see this, don't judge me~
if self._image[x,y] == 0:
return -1
else:
return self._region_at_pixel[x,y]
def regions(self, region_id):
"""Given the ID of a region, return
(class_number, list of pixels indices in that region),
i.e. a list of (x,y) coordinates.
E.g. regions(region_at_pixel(x=10,y=12)) will tell you (1) the type of
region at (10,12), and (2) every other pixel at that region.
:param region_id: Integer that maps to a region.
:type region_id: Int
:returns: Tuple of (class) and (list of pixel indices) in that region.
:rtype: Tuple
"""
return self._regions[region_id]
def regions_with_class(self, class_number):
"""Given the number of a class, return all regions with that class.
For a class with integer 3, return a list of all region indices
that have that class index.
To get a list of pixel indices for a given region index,
:param class_number: The integer associated with a class.
:type class_number: Integer
:returns: List of region IDs that have a given class number.
:rtype: List of int
"""
return self._regions_with_class[class_number]
def adjacent_regions(self,region_id):
""" Given the ID of a region, return all regions adjacent to it
(as defined by its entry in the adjacencies dictionary)
:param region_id: The ID associated with a region of pixels.
:type region_id: Int
:returns: List of all region IDs of regions adjacent to a given region ID.
:rtype: List of int (or empty list)
"""
return self._adjacent_regions[region_id]
| [
"numpy.zeros"
] | [((10391, 10416), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (10399, 10416), True, 'import numpy as np\n'), ((11534, 11574), 'numpy.zeros', 'np.zeros', (['(width, height)'], {'dtype': 'np.bool'}), '((width, height), dtype=np.bool)\n', (11542, 11574), True, 'import numpy as np\n'), ((11058, 11083), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (11066, 11083), True, 'import numpy as np\n')] |
"""
..
Copyright (c) 2014-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing public functions for the magni.imaging.measurements
subpackage.
Routine listings
----------------
uniform_line_sample_image(h, w, scan_length, num_points)
Function for uniform line sampling an image.
uniform_line_sample_surface(l, w, speed, sample_rate, time)
Function for uniform line sampling a surface.
"""
from __future__ import division
import numpy as np
from magni.imaging.measurements import _util
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_numeric as _numeric
__all__ = ['uniform_line_sample_image', 'uniform_line_sample_surface']
_min_l = _util.min_l
_min_w = _util.min_w
_min_speed = _util.min_speed
_min_sample_rate = _util.min_sample_rate
_min_time = _util.min_time
_min_scan_length = _util.min_scan_length
_min_num_points = _util.min_num_points
def uniform_line_sample_image(h, w, scan_length, num_points):
"""
Sample an image using a set of uniformly distributed straight lines.
The coordinates (in units of pixels) resulting from sampling an image of
size `h` times `w` using a pattern based on a set of uniformly distributed
straight lines are determined. The `scan_length` determines the length of
the path scanned whereas `num_points` indicates the number of samples taken
on that path.
Parameters
----------
h : int
The height of the area to scan in units of pixels.
w : int
The width of the area to scan in units of pixels.
scan_length : float
The length of the path to scan in units of pixels.
num_points : int
The number of samples to take on the scanned path.
Returns
-------
coords : ndarray
The coordinates of the samples arranged into a 2D array, such that each
row is a coordinate pair (x, y).
Notes
-----
The orientation of the coordinate system is such that the width `w` is
measured along the x-axis whereas the height `h` is measured along the
y-axis.
Each of the scanned lines span the entire width of the image with the
exception of the last line that may only be partially scanned if the
`scan_length` implies this. The top and bottom lines of the image are
always included in the scan.
Examples
--------
For example,
>>> import numpy as np
>>> from magni.imaging.measurements import uniform_line_sample_image
>>> h = 10
>>> w = 10
>>> scan_length = 50.0
>>> num_points = 12
>>> np.set_printoptions(suppress=True)
>>> uniform_line_sample_image(h, w, scan_length, num_points)
array([[ 0.5 , 0.5 ],
[ 4.59090909, 0.5 ],
[ 8.68181818, 0.5 ],
[ 9.22727273, 3.5 ],
[ 5.13636364, 3.5 ],
[ 1.04545455, 3.5 ],
[ 1.04545455, 6.5 ],
[ 5.13636364, 6.5 ],
[ 9.22727273, 6.5 ],
[ 8.68181818, 9.5 ],
[ 4.59090909, 9.5 ],
[ 0.5 , 9.5 ]])
"""
@_decorate_validation
def validate_input():
_numeric('h', 'integer', range_='[2;inf)')
_numeric('w', 'integer', range_='[2;inf)')
_numeric('scan_length', 'floating',
range_='[{};inf)'.format(_min_scan_length))
_numeric('num_points', 'integer',
range_='[{};inf)'.format(_min_num_points))
validate_input()
coords = uniform_line_sample_surface(float(h - 1), float(w - 1),
scan_length, float(num_points - 1),
1.0)
coords = coords + 0.5
return coords
def uniform_line_sample_surface(l, w, speed, sample_rate, time):
"""
Sample aa surface area using a set of uniformly distributed straight lines.
The coordinates (in units of meters) resulting from sampling an area of
size `l` times `w` using a pattern based on a set of uniformly distributed
straight lines are determined. The scanned path is determined from the
probe `speed` and the scan `time`.
Parameters
----------
l : float
The length of the area to scan in units of meters.
w : float
The width of the area to scan in units of meters.
speed : float
The probe speed in units of meters/second.
sample_rate : float
The sample rate in units of Hertz.
time : float
The scan time in units of seconds.
Returns
-------
coords : ndarray
The coordinates of the samples arranged into a 2D array, such that each
row is a coordinate pair (x, y).
Notes
-----
The orientation of the coordinate system is such that the width `w` is
measured along the x-axis whereas the height `l` is measured along the
y-axis.
Each of the scanned lines span the entire width of the image with the
exception of the last line that may only be partially scanned if the
`scan_length` implies this. The top and bottom lines of the image are
always included in the scan.
Examples
--------
For example,
>>> import numpy as np
>>> from magni.imaging.measurements import uniform_line_sample_surface
>>> l = 2e-6
>>> w = 2e-6
>>> speed = 7e-7
>>> sample_rate = 1.0
>>> time = 12.0
>>> np.set_printoptions(suppress=True)
>>> uniform_line_sample_surface(l, w, speed, sample_rate, time)
array([[ 0. , 0. ],
[ 0.00000067, 0. ],
[ 0.00000133, 0. ],
[ 0.000002 , 0. ],
[ 0.000002 , 0.00000067],
[ 0.00000167, 0.000001 ],
[ 0.000001 , 0.000001 ],
[ 0.00000033, 0.000001 ],
[ 0. , 0.00000133],
[ 0. , 0.000002 ],
[ 0.00000067, 0.000002 ],
[ 0.00000133, 0.000002 ],
[ 0.000002 , 0.000002 ]])
"""
@_decorate_validation
def validate_input():
_numeric('l', 'floating', range_='[{};inf)'.format(_min_l))
_numeric('w', 'floating', range_='[{};inf)'.format(_min_w))
_numeric('speed', 'floating', range_='[{};inf)'.format(_min_speed))
_numeric('sample_rate', 'floating',
range_='[{};inf)'.format(_min_sample_rate))
_numeric('time', 'floating', range_='[{};inf)'.format(_min_time))
validate_input()
num_lines = int(np.floor((speed * time - l) / w))
# We should always at least partially scan top and bottom lines.
if num_lines < 2:
num_lines = 2
coords = np.zeros((2 * num_lines, 2))
coords[1::4, 0] = coords[2::4, 0] = w
coords[0::2, 1] = coords[1::2, 1] = np.linspace(0, l, num_lines)
return _util.sample_lines(coords, speed, sample_rate, time)
| [
"magni.imaging.measurements._util.sample_lines",
"numpy.floor",
"numpy.zeros",
"magni.utils.validation.validate_numeric",
"numpy.linspace"
] | [((6778, 6806), 'numpy.zeros', 'np.zeros', (['(2 * num_lines, 2)'], {}), '((2 * num_lines, 2))\n', (6786, 6806), True, 'import numpy as np\n'), ((6889, 6917), 'numpy.linspace', 'np.linspace', (['(0)', 'l', 'num_lines'], {}), '(0, l, num_lines)\n', (6900, 6917), True, 'import numpy as np\n'), ((6930, 6982), 'magni.imaging.measurements._util.sample_lines', '_util.sample_lines', (['coords', 'speed', 'sample_rate', 'time'], {}), '(coords, speed, sample_rate, time)\n', (6948, 6982), False, 'from magni.imaging.measurements import _util\n'), ((3288, 3330), 'magni.utils.validation.validate_numeric', '_numeric', (['"""h"""', '"""integer"""'], {'range_': '"""[2;inf)"""'}), "('h', 'integer', range_='[2;inf)')\n", (3296, 3330), True, 'from magni.utils.validation import validate_numeric as _numeric\n'), ((3339, 3381), 'magni.utils.validation.validate_numeric', '_numeric', (['"""w"""', '"""integer"""'], {'range_': '"""[2;inf)"""'}), "('w', 'integer', range_='[2;inf)')\n", (3347, 3381), True, 'from magni.utils.validation import validate_numeric as _numeric\n'), ((6616, 6648), 'numpy.floor', 'np.floor', (['((speed * time - l) / w)'], {}), '((speed * time - l) / w)\n', (6624, 6648), True, 'import numpy as np\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore.nn as nn
import mindspore as ms
from mindspore import Tensor, context, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.context import _Context
from ....train_step_wrap import train_step_with_loss_warp
class MatMulCell(nn.Cell):
def __init__(self):
super(MatMulCell, self).__init__()
self.reshape = P.Reshape()
self.matmul0 = P.MatMul()
self.weight = Parameter(initializer("ones", [128, 64], ms.float32), name="weight")
self.relu = P.ReLU().shard(((1, 8),))
def construct(self, x):
x = self.matmul0(x, self.weight)
x = self.reshape(x, (32, 128))
x = self.relu(x)
return x
class DenseMutMulNet(nn.Cell):
def __init__(self):
super(DenseMutMulNet, self).__init__()
self.fc1 = nn.Dense(128, 768, activation='relu')
self.fc2 = nn.Dense(128, 768, activation='relu')
self.fc3 = nn.Dense(128, 768, activation='relu')
self.fc4 = nn.Dense(768, 768, activation='relu')
self.fc1.matmul.shard(((1, 1), (1, 8)))
self.fc2.matmul.shard(((1, 1), (1, 8)))
self.fc3.matmul.shard(((1, 1), (1, 8)))
self.relu4 = nn.ReLU()
self.relu5 = nn.ReLU()
self.transpose = P.Transpose()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
self.matmul_cell = MatMulCell()
self.fc1.recompute(mp_comm_recompute=False)
self.fc2.recompute(mp_comm_recompute=False)
self.fc3.recompute(mp_comm_recompute=False)
self.matmul_cell.recompute(mp_comm_recompute=False)
def construct(self, x):
x = self.matmul_cell(x)
q = self.fc1(x)
k = self.fc2(x)
v = self.fc3(x)
k = self.transpose(k, (1, 0))
c = self.relu4(self.matmul1(q, k))
s = self.relu5(self.matmul2(c, v))
s = self.fc4(s)
return s
def test_dmnet_train_step():
context.reset_auto_parallel_context()
_Context().set_backend_policy("vm")
context.set_context(mode=context.GRAPH_MODE)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8)
input_ = Tensor(np.ones([64, 128]).astype(np.float32) * 0.01)
label = Tensor(np.zeros([32, 768]).astype(np.float32))
net = train_step_with_loss_warp(DenseMutMulNet())
net.set_auto_parallel()
net.set_train()
_cell_graph_executor.compile(net, input_, label)
_Context().set_backend_policy("ge")
| [
"mindspore.context._Context",
"mindspore.context.set_context",
"mindspore.ops.operations.Reshape",
"mindspore.context.reset_auto_parallel_context",
"mindspore.nn.ReLU",
"mindspore.common.api._cell_graph_executor.compile",
"numpy.zeros",
"mindspore.context.set_auto_parallel_context",
"numpy.ones",
... | [((2637, 2674), 'mindspore.context.reset_auto_parallel_context', 'context.reset_auto_parallel_context', ([], {}), '()\n', (2672, 2674), False, 'from mindspore import Tensor, context, Parameter\n'), ((2719, 2763), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE'}), '(mode=context.GRAPH_MODE)\n', (2738, 2763), False, 'from mindspore import Tensor, context, Parameter\n'), ((2768, 2855), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(8)'}), "(parallel_mode='semi_auto_parallel',\n device_num=8)\n", (2801, 2855), False, 'from mindspore import Tensor, context, Parameter\n'), ((3083, 3131), 'mindspore.common.api._cell_graph_executor.compile', '_cell_graph_executor.compile', (['net', 'input_', 'label'], {}), '(net, input_, label)\n', (3111, 3131), False, 'from mindspore.common.api import _cell_graph_executor\n'), ((1071, 1082), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (1080, 1082), True, 'from mindspore.ops import operations as P\n'), ((1106, 1116), 'mindspore.ops.operations.MatMul', 'P.MatMul', ([], {}), '()\n', (1114, 1116), True, 'from mindspore.ops import operations as P\n'), ((1526, 1563), 'mindspore.nn.Dense', 'nn.Dense', (['(128)', '(768)'], {'activation': '"""relu"""'}), "(128, 768, activation='relu')\n", (1534, 1563), True, 'import mindspore.nn as nn\n'), ((1583, 1620), 'mindspore.nn.Dense', 'nn.Dense', (['(128)', '(768)'], {'activation': '"""relu"""'}), "(128, 768, activation='relu')\n", (1591, 1620), True, 'import mindspore.nn as nn\n'), ((1640, 1677), 'mindspore.nn.Dense', 'nn.Dense', (['(128)', '(768)'], {'activation': '"""relu"""'}), "(128, 768, activation='relu')\n", (1648, 1677), True, 'import mindspore.nn as nn\n'), ((1697, 1734), 'mindspore.nn.Dense', 'nn.Dense', (['(768)', '(768)'], {'activation': '"""relu"""'}), "(768, 768, activation='relu')\n", (1705, 1734), True, 'import mindspore.nn as nn\n'), ((1900, 1909), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1907, 1909), True, 'import mindspore.nn as nn\n'), ((1931, 1940), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1938, 1940), True, 'import mindspore.nn as nn\n'), ((1966, 1979), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (1977, 1979), True, 'from mindspore.ops import operations as P\n'), ((2003, 2013), 'mindspore.ops.operations.MatMul', 'P.MatMul', ([], {}), '()\n', (2011, 2013), True, 'from mindspore.ops import operations as P\n'), ((2037, 2047), 'mindspore.ops.operations.MatMul', 'P.MatMul', ([], {}), '()\n', (2045, 2047), True, 'from mindspore.ops import operations as P\n'), ((1149, 1191), 'mindspore.common.initializer.initializer', 'initializer', (['"""ones"""', '[128, 64]', 'ms.float32'], {}), "('ones', [128, 64], ms.float32)\n", (1160, 1191), False, 'from mindspore.common.initializer import initializer\n'), ((2679, 2689), 'mindspore.context._Context', '_Context', ([], {}), '()\n', (2687, 2689), False, 'from mindspore.context import _Context\n'), ((3136, 3146), 'mindspore.context._Context', '_Context', ([], {}), '()\n', (3144, 3146), False, 'from mindspore.context import _Context\n'), ((1228, 1236), 'mindspore.ops.operations.ReLU', 'P.ReLU', ([], {}), '()\n', (1234, 1236), True, 'from mindspore.ops import operations as P\n'), ((2937, 2956), 'numpy.zeros', 'np.zeros', (['[32, 768]'], {}), '([32, 768])\n', (2945, 2956), True, 'import numpy as np\n'), ((2872, 2890), 'numpy.ones', 'np.ones', (['[64, 128]'], {}), '([64, 128])\n', (2879, 2890), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import numpy as np
import os.path as op
import sklearn
import sklearn.mixture
# version comparison
from pkg_resources import parse_version
import scipy.ndimage
import scipy.stats
from . import features
if parse_version(sklearn.__version__) > parse_version("0.10"):
# new versions
gmm__cvtype = "covariance_type"
gmm__cvtype_bad = "cvtype"
defaultmodelparams = {
"type": "gmmsame",
"params": {"covariance_type": "full"},
"fv_type": "intensity",
}
else:
gmm__cvtype = "cvtype"
gmm__cvtype_bad = "covariance_type"
defaultmodelparams = {
"type": "gmmsame",
"params": {"cvtype": "full"},
"fv_type": "intensity",
}
methods = ["graphcut", "multiscale_graphcut_lo2hi", "multiscale_graphcut_hi2lo"]
accepted_methods = [
"graphcut",
"gc",
"multiscale_graphcut",
"multiscale_gc",
"msgc",
"msgc_lo2hi",
"lo2hi",
"multiscale_graphcut_lo2hi",
"msgc_hi2lo",
"hi2lo",
"multiscale_graphcut_hi2lo",
]
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softplus(x, max_error=1, keep_dtype=True):
x = np.asarray(x)
dtype = x.dtype
result = max_error * np.log(1 + np.exp(x / max_error))
if keep_dtype:
result = result.astype(dtype)
return result
class Model3D(object):
""" Model for image intensity. Last dimension represent feature vector.
m = Model()
m.train(cla, clb)
X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
m.likelihood(X,0)
modelparams['type']: type of model estimation. Gaussian mixture from EM
algorithm is implemented as 'gmmsame'. Gaussian kernel density estimation
is implemented as 'gaussian_kde'. General kernel estimation ('kernel')
is from scipy version 0.14 and it is not tested.
fv_type: feature vector type is defined with one of fallowing string
intensity - based on seeds and data the intensity as feature vector is used
voxel - information in voxel1 and voxel2 is used
fv_extern - external feature vector function specified in fv_extern label
fv001 - pixel and gaussian blur
fv_extern:
function `fv_function(data, voxelsize, seeds, unique_cls)`. It is used only
if fv_type is set to "fv_extern"
mdl_stored_file:
string or False. Default is false. The string is path to file with stored model.
This model is loaded and
adaptation:
- retrain: no adaptatin
- original_data: train every class only once
"""
def __init__(self, modelparams):
# modelparams = {}
# modelparams.update(parameters['modelparams'])
if "params" in modelparams.keys() and gmm__cvtype_bad in modelparams["params"]:
value = modelparams["params"].pop(gmm__cvtype_bad)
modelparams["params"][gmm__cvtype] = value
self.mdl = {}
self.modelparams = defaultmodelparams.copy()
self.modelparams.update({"adaptation": "retrain"})
# if modelparams are updated after load, there are problems with some setting comming from outside and rewriting
# for example "fv_type" into "intensity"
self.modelparams.update(modelparams)
if "mdl_stored_file" in modelparams.keys() and modelparams["mdl_stored_file"]:
mdl_file = modelparams["mdl_stored_file"]
self.load(mdl_file)
def fit_from_image(self, data, voxelsize, seeds, unique_cls):
"""
This Method allows computes feature vector and train model.
:cls: list of index number of requested classes in seeds
"""
fvs, clsselected = self.features_from_image(data, voxelsize, seeds, unique_cls)
self.fit(fvs, clsselected)
# import pdb
# pdb.set_trace()
# for fv, cl in zip(fvs, cls):
# fvs, clsselected = self.features_from_image(data, voxelsize, seeds, cl)
# logger.debug('cl: ' + str(cl))
# self.train(fv, cl)
def save(self, filename):
"""
Save model to pickle file. External feature function is not stored
"""
import dill
tmpmodelparams = self.modelparams.copy()
# fv_extern_src = None
fv_extern_name = None
# try:
# fv_extern_src = dill.source.getsource(tmpmodelparams['fv_extern'])
# tmpmodelparams.pop('fv_extern')
# except:
# pass
# fv_extern_name = dill.source.getname(tmpmodelparams['fv_extern'])
if "fv_extern" in tmpmodelparams:
tmpmodelparams.pop("fv_extern")
sv = {
"modelparams": tmpmodelparams,
"mdl": self.mdl,
# 'fv_extern_src': fv_extern_src,
# 'fv_extern_src_name': fv_extern_src_name,
# 'fv_extern_name': fv_extern_src_name,
#
}
sss = dill.dumps(self.modelparams)
logger.debug("pickled " + str(sss))
dill.dump(sv, open(filename, "wb"))
def load(self, mdl_file):
"""
load model from file. fv_type is not set with this function. It is expected to set it before.
"""
import dill as pickle
mdl_file_e = op.expanduser(mdl_file)
sv = pickle.load(open(mdl_file_e, "rb"))
self.mdl = sv["mdl"]
# self.mdl[2] = self.mdl[0]
# try:
# eval(sv['fv_extern_src'])
# eval("fv_extern_temp_name = " + sv['fv_extern_src_name'])
# sv['fv_extern'] = fv_extern_temp_name
# except:
# print "pomoc,necoje blbe"
# pass
self.modelparams.update(sv["modelparams"])
logger.debug("loaded model from path: " + mdl_file_e)
# from PyQt4 import QtCore; QtCore.pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
def likelihood_from_image(self, data, voxelsize, cl):
sha = data.shape
likel = self.likelihood(self.features_from_image(data, voxelsize), cl)
return likel.reshape(sha)
class Model(Model3D):
# def __init__(self, nObjects=2, modelparams={}):
# super(Model3D, self).__init__()
# fix change of cvtype and covariancetype
# print modelparams
def features_from_image(
self, data, voxelsize, seeds=None, unique_cls=None
): # , voxels=None):
"""
Input data is 3d image
:param data: is 3d image
:param seeds: ndimage with same shape as data, nonzero values means seeds.
:param unique_cls: can select only fv for seeds from specific class.
f.e. unique_cls = [1, 2] ignores label 0
funcion is called twice in graph cut
first call is with all params, second is only with data.
based on self.modelparams['fv_type'] the feature vector is computed
keywords "intensity", "voxels", "fv001", "fv_extern" can be used.
modelparams['fv_type'] = 'fv_extern' allows to use external fv function
Example of exter feature function. For easier implementation of return values use function return_fv_by_seeds().
def fv_function(data, voxelsize, seeds=None, cl=None):
data2 = scipy.ndimage.filters.gaussian_filter(data, sigma=5)
arrs = [data.reshape(-1, 1), data2.reshape(-1, 1)]
fv = np.concatenate(arrs, axis=1)
return imcut.features.return_fv_by_seeds(fv, seeds, unique_cls)
modelparams['fv_extern'] = fv_function
"""
fv_type = self.modelparams["fv_type"]
logger.debug("fv_type " + fv_type)
fv = []
if fv_type == "intensity":
fv = data.reshape(-1, 1)
if seeds is not None:
logger.debug("seeds: %s", scipy.stats.describe(seeds, axis=None))
sd = seeds.reshape(-1, 1)
selection = np.in1d(sd, unique_cls)
fv = fv[selection]
sd = sd[selection]
# sd = sd[]
return fv, sd
return fv
# elif fv_type in ("voxels"):
# if seeds is not None:
# fv = np.asarray(voxels).reshape(-1, 1)
# else:
# fv = data
# fv = fv.reshape(-1, 1)
elif fv_type in ("fv001", "FV001", "intensity_and_blur"):
# intensity in pixel, gaussian blur intensity
return features.fv_function_intensity_and_smoothing(
data, voxelsize, seeds, unique_cls
)
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# print fv1.shape
# print fv2.shape
# print fv.shape
elif fv_type == "fv_extern":
fv_function = self.modelparams["fv_extern"]
return fv_function(data, voxelsize, seeds, unique_cls)
else:
logger.error("Unknown feature vector type: " + self.modelparams["fv_type"])
return fv
# def trainFromSomething(self, data, seeds, cls, voxels):
# """
# This Method allows computes feature vector and train model.
#
# :cl: scalar index number of class
# """
# for cl, voxels_i in zip(cls, voxels):
# logger.debug('cl: ' + str(cl))
# fv = self.createFV(data, seeds, cl, voxels_i)
# self.train(fv, cl)
def fit(self, clx, cla):
"""
Args:
clx: feature vector
cl: class, scalar or array
Returns:
"""
# TODO for now only sclar is used. Do not use scalar cl if future.
# Model is not trained from other class konwledge
# use model trained by all classes number.
if np.isscalar(cla):
self._fit_one_class(clx, cla)
else:
cla = np.asarray(cla)
clx = np.asarray(clx)
# import pdb
# pdb.set_trace()
for cli in np.unique(cla):
selection = cla == cli
clxsel = clx[np.nonzero(selection)[0]]
self._fit_one_class(clxsel, cli)
def _fit_one_class(self, clx, cl):
""" Train clas number cl with data clx.
Use trainFromImageAndSeeds() function if you want to use 3D image data
as an input.
clx: data, 2d matrix
cl: label, integer
label: gmmsame, gaussian_kde, dpgmm, stored
"""
logger.debug("clx " + str(clx[:10, :]))
logger.debug("clx type" + str(clx.dtype))
# name = 'clx' + str(cl) + '.npy'
# print name
# np.save(name, clx)
logger.debug("_fit()")
if self.modelparams["adaptation"] == "original_data":
if cl in self.mdl.keys():
return
# if True:
# return
logger.debug("training continues")
if self.modelparams["type"] == "gmmsame":
if len(clx.shape) == 1:
logger.warning(
"reshaping in train will be removed. Use \
\ntrainFromImageAndSeeds() function"
)
print("Warning deprecated feature in train() function")
# je to jen jednorozměrný vektor, tak je potřeba to
# převést na 2d matici
clx = clx.reshape(-1, 1)
gmmparams = self.modelparams["params"]
self.mdl[cl] = sklearn.mixture.GaussianMixture(**gmmparams)
self.mdl[cl].fit(clx)
elif self.modelparams["type"] == "kernel":
# Not working (probably) in old versions of scikits
# from sklearn.neighbors.kde import KernelDensity
from sklearn.neighbors import KernelDensity
# kernelmodelparams = {'kernel': 'gaussian', 'bandwidth': 0.2}
kernelmodelparams = self.modelparams["params"]
self.mdl[cl] = KernelDensity(**kernelmodelparams).fit(clx)
elif self.modelparams["type"] == "gaussian_kde":
# print clx
import scipy.stats
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# gaussian_kde works only with floating point types
self.mdl[cl] = scipy.stats.gaussian_kde(clx.astype(np.float))
elif self.modelparams["type"] == "dpgmm":
# print 'clx.shape ', clx.shape
# print 'cl ', cl
gmmparams = self.modelparams["params"]
self.mdl[cl] = sklearn.mixture.DPGMM(**gmmparams)
# todo here is a hack
# dpgmm z nějakého důvodu nefunguje pro naše data
# vždy natrénuje jednu složku v blízkosti nuly
# patrně to bude mít něco společného s parametrem alpha
# přenásobí-li se to malým číslem, zázračně to chodí
self.mdl[cl].fit(clx * 0.001)
elif self.modelparams["type"] == "stored":
# Classifer is trained before segmentation and stored to pickle
import pickle
print("stored")
logger.warning("deprecated use of stored parameters")
mdl_file = self.modelparams["params"]["mdl_file"]
self.mdl = pickle.load(open(mdl_file, "rb"))
elif type(self.modelparams['type'] == 'custom'):
self.mdl[cl].fit(clx)
else:
raise NameError("Unknown model type")
# pdb.set_trace();
# TODO remove saving
# self.save('classif.p')
def likelihood(self, x, cl):
"""
X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0)
"""
# sha = x.shape
# xr = x.reshape(-1, sha[-1])
# outsha = sha[:-1]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
logger.debug("likel " + str(x.shape))
if self.modelparams["type"] == "gmmsame":
px = self.mdl[cl].score_samples(x)
# todo ošetřit více dimenzionální fv
# px = px.reshape(outsha)
elif self.modelparams["type"] == "kernel":
px = self.mdl[cl].score_samples(x)
elif self.modelparams["type"] == "gaussian_kde":
# print x
# np.log because it is likelihood
# @TODO Zde je patrně problém s reshape
# old
# px = np.log(self.mdl[cl](x.reshape(-1)))
# new
px = np.log(self.mdl[cl](x))
# px = px.reshape(outsha)
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
elif self.modelparams["type"] == "dpgmm":
# todo here is a hack
# dpgmm z nějakého důvodu nefunguje pro naše data
# vždy natrénuje jednu složku v blízkosti nuly
# patrně to bude mít něco společného s parametrem alpha
# přenásobí-li se to malým číslem, zázračně to chodí
logger.warning(".score() replaced with .score_samples() . Check it.")
# px = self.mdl[cl].score(x * 0.01)
px = self.mdl[cl].score_samples(x * 0.01)
elif self.modelparams["type"] == "stored":
px = self.mdl[cl].score(x)
elif self.modelparams["type"] == "custom":
px = self.mdl[cl].score_samples(x)
else:
logger.error(f"Unknown type {self.modelparams['type']}")
return px
| [
"pkg_resources.parse_version",
"numpy.isscalar",
"sklearn.neighbors.KernelDensity",
"numpy.asarray",
"numpy.unique",
"sklearn.mixture.GaussianMixture",
"dill.dumps",
"numpy.nonzero",
"numpy.exp",
"sklearn.mixture.DPGMM",
"os.path.expanduser",
"logging.getLogger",
"numpy.in1d"
] | [((71, 98), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (88, 98), False, 'import logging\n'), ((308, 342), 'pkg_resources.parse_version', 'parse_version', (['sklearn.__version__'], {}), '(sklearn.__version__)\n', (321, 342), False, 'from pkg_resources import parse_version\n'), ((345, 366), 'pkg_resources.parse_version', 'parse_version', (['"""0.10"""'], {}), "('0.10')\n", (358, 366), False, 'from pkg_resources import parse_version\n'), ((1221, 1234), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1231, 1234), True, 'import numpy as np\n'), ((4994, 5022), 'dill.dumps', 'dill.dumps', (['self.modelparams'], {}), '(self.modelparams)\n', (5004, 5022), False, 'import dill\n'), ((5321, 5344), 'os.path.expanduser', 'op.expanduser', (['mdl_file'], {}), '(mdl_file)\n', (5334, 5344), True, 'import os.path as op\n'), ((9802, 9818), 'numpy.isscalar', 'np.isscalar', (['cla'], {}), '(cla)\n', (9813, 9818), True, 'import numpy as np\n'), ((1152, 1162), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1158, 1162), True, 'import numpy as np\n'), ((9894, 9909), 'numpy.asarray', 'np.asarray', (['cla'], {}), '(cla)\n', (9904, 9909), True, 'import numpy as np\n'), ((9928, 9943), 'numpy.asarray', 'np.asarray', (['clx'], {}), '(clx)\n', (9938, 9943), True, 'import numpy as np\n'), ((10022, 10036), 'numpy.unique', 'np.unique', (['cla'], {}), '(cla)\n', (10031, 10036), True, 'import numpy as np\n'), ((11490, 11534), 'sklearn.mixture.GaussianMixture', 'sklearn.mixture.GaussianMixture', ([], {}), '(**gmmparams)\n', (11521, 11534), False, 'import sklearn\n'), ((1291, 1312), 'numpy.exp', 'np.exp', (['(x / max_error)'], {}), '(x / max_error)\n', (1297, 1312), True, 'import numpy as np\n'), ((7934, 7957), 'numpy.in1d', 'np.in1d', (['sd', 'unique_cls'], {}), '(sd, unique_cls)\n', (7941, 7957), True, 'import numpy as np\n'), ((10106, 10127), 'numpy.nonzero', 'np.nonzero', (['selection'], {}), '(selection)\n', (10116, 10127), True, 'import numpy as np\n'), ((11965, 11999), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '(**kernelmodelparams)\n', (11978, 11999), False, 'from sklearn.neighbors import KernelDensity\n'), ((12558, 12592), 'sklearn.mixture.DPGMM', 'sklearn.mixture.DPGMM', ([], {}), '(**gmmparams)\n', (12579, 12592), False, 'import sklearn\n')] |
import cv2
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from skimage.transform import warp
from .augmentor import DataAugment
class Elastic(DataAugment):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5.
.. [Simard2003] Simard, <NAME> Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Args:
alpha (float): maximum pixel-moving distance of elastic transformation.
sigma (float): standard deviation of the Gaussian filter.
p (float): probability of applying the augmentation.
"""
def __init__(self,
alpha=10.0,
sigma=4.0,
p=0.5):
super(Elastic, self).__init__(p)
self.alpha = alpha
self.sigma = sigma
self.image_interpolation = 1
self.label_interpolation = 0
self.border_mode = cv2.BORDER_CONSTANT
self.set_params()
def set_input_sz(self, image_sz):
self.z, self.y, self.x = np.mgrid[:image_sz[0], :image_sz[1], :image_sz[2]]
self.mapz = self.z.astype(np.float32)
def set_params(self):
max_margin = int(self.alpha) + 1
self.sample_params['add'] = [0, max_margin, max_margin]
def __call__(self, data, random_state=None):
image = data['image']
depth, height, width = image.shape[-3:] # (z, y, x)
if random_state is None:
random_state = np.random.RandomState()
dx = np.float32(gaussian_filter((random_state.rand(height, width) * 2 - 1), self.sigma) * self.alpha)
dy = np.float32(gaussian_filter((random_state.rand(height, width) * 2 - 1), self.sigma) * self.alpha)
mapy, mapx = np.float32(self.y + dy), np.float32(self.x + dx)
output = {}
for key, image in data.items():
if key in ['skeleton', 'flux', 'context', 'skeleton_probability']:
output[key] = image
elif key == 'image':
output[key] = warp(image, np.array([self.mapz, mapy, mapx]), order=self.image_interpolation)
elif key == 'label' or key == 'mask' or key == 'weight':
output[key] = warp(image, np.array([self.mapz, mapy, mapx]), order=self.label_interpolation)
else:
raise TypeError('Input data key not identified, Key was: ' + key)
return output | [
"numpy.float32",
"numpy.array",
"numpy.random.RandomState"
] | [((1666, 1689), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (1687, 1689), True, 'import numpy as np\n'), ((1931, 1954), 'numpy.float32', 'np.float32', (['(self.y + dy)'], {}), '(self.y + dy)\n', (1941, 1954), True, 'import numpy as np\n'), ((1956, 1979), 'numpy.float32', 'np.float32', (['(self.x + dx)'], {}), '(self.x + dx)\n', (1966, 1979), True, 'import numpy as np\n'), ((2231, 2264), 'numpy.array', 'np.array', (['[self.mapz, mapy, mapx]'], {}), '([self.mapz, mapy, mapx])\n', (2239, 2264), True, 'import numpy as np\n'), ((2409, 2442), 'numpy.array', 'np.array', (['[self.mapz, mapy, mapx]'], {}), '([self.mapz, mapy, mapx])\n', (2417, 2442), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
x_label = "Percentage of computed source vertices"
v_size = 3.5
d17_knc_we = [18.6413, 38.0379, 55.209, 74.8671, 84.7064, 108.733, 132.209, 150.944, 159.577, 182.999]
d17_knc_hybrid = [19.1711, 34.4665, 52.3645, 75.2577, 91.2469, 107.445, 141.024, 149.422, 156.733, 178.92]
d17_knc_1_deg = [19.3719, 36.3215, 52.8099, 70.2425, 88.0959, 107.775, 126.43, 147.911, 152.238, 176.194]
d17_knc_static = [19.3369, 37.6855, 59.8228, 72.8868, 91.8513, 103.344, 138.005, 142.849, 167.422, 175.782]
d17_knl_we = [7.66781, 14.7215, 21.8407, 29.4956, 37.1857, 44.3433, 50.1171, 58.0227, 66.0501, 71.3788]
d17_knl_hybrid = [8.30653, 14.8145, 21.8704, 29.7544, 36.3582, 43.914, 51.1906, 59.5881, 66.6089, 70.9231]
d17_knl_1_deg = [7.64442, 14.7256, 22.2512, 29.5719, 37.98, 44.8056, 50.1526, 58.5009, 66.5349, 73.0537]
d17_knl_static = [8.05157, 15.6002, 22.1642, 30.2022, 37.7766, 44.1086, 51.5989, 57.8864, 66.5574, 73.7597]
d17_cpu_we = [7.92808, 14.0702, 22.4746, 27.156, 36.6575, 40.5894, 50.9973, 53.8429, 64.2043, 66.1022]
d17_cpu_hybrid = [7.90877, 13.994, 22.3328, 27.328, 36.5353, 40.3604, 50.9741, 53.7826, 64.8632, 66.1333]
d17_cpu_1_deg = [8.16191, 14.1579, 22.3922, 27.4464, 36.7004, 40.2593, 51.4501, 53.5133, 65.2999, 66.725]
d17_cpu_static = [7.41953, 14.2521, 20.8835, 27.7985, 34.8896, 41.047, 48.1257, 54.8109, 62.4582, 68.4668]
d17_gpu_we = [35.44178, 70.8376541, 106.20118, 141.616058, 177.007004, 212.457275, 247.871445, 283.191254, 318.640198,
353.991241]
d17_gpu_hybrid = [36.5964546, 73.1559601, 109.631493, 146.09584, 182.566376, 219.149506, 255.555191, 292.022369,
328.54837, 365.179688]
d17_gpu_1_deg = [36.021328, 72.0097885, 107.974327, 143.984177, 179.958359, 215.897125, 251.892273, 287.840393,
323.775177, 359.81424]
d17_gpu_static = [35.3266373, 70.95802, 106.409904, 141.867767, 177.140427, 212.541519, 248.006454, 283.339294,
318.882294, 354.26791]
if __name__ == '__main__':
fig3, (ax4, ax1, ax2, ax3) = plt.subplots(1, 4, sharey=True, sharex=True, figsize=(16, v_size))
line_styles = ['-', 'o-', 'v-', '-^', '-<', '->', '-s',
'p-', '*-', 'd-', 'x', 'D', 'd', '|', '--', '-.', '.-', ',-', '-+']
line_lables = ['Work-effcient-only', 'Hybrid with sampling', '1-degree reduction', 'GPU hybrid_BC', "static"]
# fig1
xtick1 = np.arange(1, 11, 1)
ax1.set_xlim(0.5, 10.5)
ax1.set_xticks(xtick1)
ax1.set_xticklabels(['10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%'])
line1, = ax1.plot(xtick1, d17_knc_we, '.-', label=line_lables[0], linewidth=1.0, markersize=10)
line2, = ax1.plot(xtick1, d17_knc_hybrid, 'v-', label=line_lables[1], linewidth=1.0, markersize=7)
line3, = ax1.plot(xtick1, d17_knc_1_deg, '--', label=line_lables[2], linewidth=1.0, markersize=10)
line4, = ax1.plot(xtick1, d17_knc_static, '*-', label=line_lables[4], linewidth=1.0, markersize=7)
ax2.set_xticks(xtick1)
ax2.set_xticklabels(['10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%'])
ax2.plot(xtick1, d17_knl_we, '.-', label=line_lables[0], linewidth=1.0, markersize=10)
ax2.plot(xtick1, d17_knl_hybrid, 'v-', label=line_lables[1], linewidth=1.0, markersize=7)
ax2.plot(xtick1, d17_knl_1_deg, '--', label=line_lables[2], linewidth=1.0, markersize=10)
ax2.plot(xtick1, d17_knl_static, '*-', label=line_lables[4], linewidth=1.0, markersize=7)
ax3.set_xticks(xtick1)
ax3.set_xticklabels(['10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%'])
ax3.plot(xtick1, d17_cpu_we, '.-', label=line_lables[0], linewidth=1.0, markersize=10)
ax3.plot(xtick1, d17_cpu_hybrid, 'v-', label=line_lables[1], linewidth=1.0, markersize=7)
ax3.plot(xtick1, d17_cpu_1_deg, '--', label=line_lables[2], linewidth=1.0, markersize=10)
ax3.plot(xtick1, d17_cpu_static, '*-', label=line_lables[4], linewidth=1.0, markersize=7)
ax4.set_xticks(xtick1)
ax4.set_xticklabels(['10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%'])
ax4.plot(xtick1, d17_gpu_we, '.-', label=line_lables[0], linewidth=1.0, markersize=10)
ax4.plot(xtick1, d17_gpu_hybrid, 'v-', label=line_lables[1], linewidth=1.0, markersize=7)
ax4.plot(xtick1, d17_gpu_1_deg, '--', label=line_lables[2], linewidth=1.0, markersize=10)
ax4.plot(xtick1, d17_gpu_static, '*-', label=line_lables[4], linewidth=1.0, markersize=7)
ax1.yaxis.set_ticks(np.arange(0, 360, 50))
ax2.yaxis.set_ticks(np.arange(0, 360, 50))
ax3.yaxis.set_ticks(np.arange(0, 360, 50))
ax4.yaxis.set_ticks(np.arange(0, 360, 50))
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
# Create new figure and two subplots, sharing both axes
# ax1.scatter(19,150)
# ax1.plot((18,19),(k40[7],k40_half_d[0]), linestyles[9], label='k40_half')
# ax1.legend(loc=0,prop={'size':11})
ax4.set_ylabel('Time in seconds')
ax4.set_xlabel(x_label)
ax1.set_xlabel(x_label)
ax2.set_xlabel(x_label)
ax3.set_xlabel(x_label)
ax4.set_title('(a) GPU')
ax1.set_title('(b) KNC')
ax2.set_title('(c) KNL')
ax3.set_title('(d) CPU')
fig3.legend((line1, line2, line4, line3),
('work-efficient-only', 'sampling-dynamic-switching', 'sampling-static-switching',
'1-degree work-efficient'),
loc=2, bbox_to_anchor=[0.789, 0.89])
fig3.subplots_adjust(wspace=0)
plt.tight_layout()
plt.savefig('delaunay_n17_scale_three.pdf', dpi=1200)
fig3.show()
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.savefig"
] | [((2054, 2120), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'sharey': '(True)', 'sharex': '(True)', 'figsize': '(16, v_size)'}), '(1, 4, sharey=True, sharex=True, figsize=(16, v_size))\n', (2066, 2120), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2427), 'numpy.arange', 'np.arange', (['(1)', '(11)', '(1)'], {}), '(1, 11, 1)\n', (2417, 2427), True, 'import numpy as np\n'), ((5510, 5528), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5526, 5528), True, 'import matplotlib.pyplot as plt\n'), ((5533, 5586), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""delaunay_n17_scale_three.pdf"""'], {'dpi': '(1200)'}), "('delaunay_n17_scale_three.pdf', dpi=1200)\n", (5544, 5586), True, 'import matplotlib.pyplot as plt\n'), ((4508, 4529), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(50)'], {}), '(0, 360, 50)\n', (4517, 4529), True, 'import numpy as np\n'), ((4555, 4576), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(50)'], {}), '(0, 360, 50)\n', (4564, 4576), True, 'import numpy as np\n'), ((4602, 4623), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(50)'], {}), '(0, 360, 50)\n', (4611, 4623), True, 'import numpy as np\n'), ((4649, 4670), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(50)'], {}), '(0, 360, 50)\n', (4658, 4670), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
import trimesh
from im2mesh import config
from im2mesh.utils import binvox_rw, voxels
from im2mesh.checkpoints import CheckpointIO
from im2mesh.utils.visualize import visualize_pointcloud, visualize_voxels
def load_binvox(file_path: str):
with open(file_path, "rb") as f:
voxels_in = binvox_rw.read_as_3d_array(f)
return voxels_in.data.astype(np.float32)
def load_pointcloud(file_path):
pointcloud_dict = np.load(file_path)
return pointcloud_dict['points'].astype(np.float32)
def load_mesh(file_path: str, process: bool = True, padding: float = 0.1):
mesh = trimesh.load(file_path, process=False)
if process:
total_size = (mesh.bounds[1] - mesh.bounds[0]).max()
scale = total_size / (1 - padding)
centers = (mesh.bounds[1] + mesh.bounds[0]) / 2
mesh.apply_translation(-centers)
mesh.apply_scale(1 / scale)
return mesh
def process_mesh(mesh, padding: float = 0, flip_yz: bool = False, with_transforms: bool = False):
bbox = mesh.bounding_box.bounds
loc = (bbox[0] + bbox[1]) / 2
scale = (bbox[1] - bbox[0]).max() / (1 - padding)
mesh.apply_translation(-loc)
mesh.apply_scale(1 / scale)
if flip_yz:
angle = 90 / 180 * np.pi
R = trimesh.transformations.rotation_matrix(angle, [1, 0, 0])
mesh.apply_transform(R)
if with_transforms:
return mesh, loc, scale
return mesh
def visualize_all(file_path):
visualize_pointcloud(load_pointcloud(os.path.join(file_path, "points.npz")), show=True)
visualize_voxels(load_binvox(os.path.join(file_path, "model.binvox")), show=True)
def visualize_from_mesh(file_path: str, flip_yz: bool = False, use_trimes: bool = False):
mesh = load_mesh(file_path)
mesh, loc, scale = process_mesh(mesh, flip_yz=flip_yz, with_transforms=True)
pointcloud = mesh.sample(2048).astype(np.float32)
if use_trimes:
voxel = trimesh.exchange.binvox.voxelize_mesh(mesh,
dimension=32,
remove_internal=False,
center=True,
binvox_path="/home/matthias/Downloads/binvox")
binvox = trimesh.exchange.binvox.export_binvox(voxel) # Writes in 'xzy' format by default
with open("viz.binvox", "wb") as f:
f.write(binvox)
else:
voxels_occ = voxels.voxelize_ray(mesh, 32)
voxels_out = binvox_rw.Voxels(voxels_occ, (32,) * 3,
translate=loc, scale=scale,
axis_order="xyz") # 'xyz' means 'voxel_occ' is in this format
with open("viz.binvox", "wb") as f:
voxels_out.write(f) # Always writes in 'xzy' format
with open("viz.binvox", "rb") as f:
voxels_in = binvox_rw.read_as_3d_array(f) # Expects data in 'xzy' format (otherwise set 'fix_coords' to 'False'
voxels_in = voxels_in.data.astype(np.float32)
visualize_pointcloud(pointcloud, show=True)
visualize_voxels(voxels_in, show=True)
def from_pointcloud(visualize=False):
path_prefix = "/home/matthias/Data/Ubuntu/git/occupancy_networks"
default_path = os.path.join(path_prefix, "configs/default.yaml")
model_path = os.path.join(path_prefix, "configs/pointcloud/onet_pretrained.yaml")
cfg = config.load_config(model_path, default_path)
device = torch.device("cuda")
mesh = load_mesh("/home/matthias/Data/Ubuntu/data/aae_workspace/models/case.ply")
# mesh = load_mesh(os.path.join(path_prefix, "data/ShapeNet.build/03797390/2_watertight/cc5b14ef71e87e9165ba97214ebde03.off"))
mesh = process_mesh(mesh, flip_yz=True)
points = mesh.sample(100000).astype(np.float32)
side = np.random.randint(3)
xb = [points[:, side].min(), points[:, side].max()]
length = np.random.uniform(0.7 * (xb[1] - xb[0]), (xb[1] - xb[0]))
ind = (points[:, side] - xb[0]) <= length
points = points[ind]
indices = np.random.randint(points.shape[0], size=300)
points = points[indices, :]
noise = 0.005 * np.random.randn(*points.shape)
noise = noise.astype(np.float32)
points = points + noise
if visualize:
# visualize_pointcloud(points, show=True)
trimesh.PointCloud(points).show()
data = {'inputs': torch.unsqueeze(torch.from_numpy(points), dim=0)}
model = config.get_model(cfg, device)
checkpoint_io = CheckpointIO("..", model=model)
# checkpoint_io.load(os.path.join(path_prefix, cfg['test']['model_file']))
checkpoint_io.load(cfg['test']['model_file'])
model.eval()
print(model)
generator = config.get_generator(model, cfg, device)
mesh = generator.generate_mesh(data, return_stats=False)
if visualize:
mesh.show()
else:
mesh.export("smile.off")
def from_voxel_grid(use_trimesh: bool = True):
path_prefix = "/home/matthias/Data/Ubuntu/git/occupancy_networks"
default_path = os.path.join(path_prefix, "configs/default.yaml")
model_path = os.path.join(path_prefix, "configs/voxels/onet_pretrained.yaml")
cfg = config.load_config(model_path, default_path)
device = torch.device("cuda")
# mesh = load_mesh("/home/matthias/Data/Ubuntu/data/aae_workspace/models/case.ply")
# mesh = load_mesh(os.path.join(path_prefix, "data/ShapeNet.build/02876657/2_watertight/1ae823260851f7d9ea600d1a6d9f6e07.off"))
# mesh, loc, scale = process_mesh(mesh, with_transforms=True, flip_yz=False)
# assert mesh.is_watertight
#
# if use_trimesh:
# voxel = trimesh.exchange.binvox.voxelize_mesh(mesh,
# dimension=32,
# remove_internal=False,
# center=True,
# binvox_path="/home/matthias/Downloads/binvox")
#
# binvox = trimesh.exchange.binvox.export_binvox(voxel)
# with open("smile.binvox", "wb") as f:
# f.write(binvox)
# else:
# voxels_occ = voxels.voxelize_ray(mesh, 32)
# voxels_out = binvox_rw.Voxels(voxels_occ, (32,) * 3,
# translate=loc, scale=scale,
# axis_order="xyz") # 'xyz' means 'voxel_occ' is in this format
# with open("smile.binvox", "wb") as f:
# voxels_out.write(f) # Always writes in 'xzy' format
#
# with open("smile.binvox", "rb") as f:
# voxels_in = binvox_rw.read_as_3d_array(f)
# with open(os.path.join(path_prefix, "data/ShapeNet/02958343/1a0bc9ab92c915167ae33d942430658c/model.binvox"), "rb") as f:
# voxels_in = binvox_rw.read_as_3d_array(f)
#
# voxels_in = voxels_in.data.astype(np.float32)
# visualize_voxels(voxels_in, show=True)
# data = {'inputs': torch.unsqueeze(torch.from_numpy(voxels_in), dim=0)}
dataset = config.get_dataset('test', cfg, return_idx=True)
test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=0, shuffle=True)
data = next(iter(test_loader))
visualize_voxels(data["voxels"][0].cpu().numpy(), show=True)
model = config.get_model(cfg, device, dataset)
checkpoint_io = CheckpointIO("..", model=model)
checkpoint_io.load(cfg['test']['model_file'])
model.eval()
generator = config.get_generator(model, cfg, device)
mesh = generator.generate_mesh(data, return_stats=False)
mesh.export("smile.off")
if __name__ == "__main__":
from_pointcloud(visualize=True)
| [
"numpy.load",
"trimesh.load",
"im2mesh.checkpoints.CheckpointIO",
"im2mesh.utils.visualize.visualize_pointcloud",
"numpy.random.randint",
"trimesh.exchange.binvox.voxelize_mesh",
"torch.device",
"os.path.join",
"im2mesh.config.get_generator",
"im2mesh.config.get_model",
"im2mesh.utils.voxels.vox... | [((473, 491), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (480, 491), True, 'import numpy as np\n'), ((636, 674), 'trimesh.load', 'trimesh.load', (['file_path'], {'process': '(False)'}), '(file_path, process=False)\n', (648, 674), False, 'import trimesh\n'), ((3112, 3155), 'im2mesh.utils.visualize.visualize_pointcloud', 'visualize_pointcloud', (['pointcloud'], {'show': '(True)'}), '(pointcloud, show=True)\n', (3132, 3155), False, 'from im2mesh.utils.visualize import visualize_pointcloud, visualize_voxels\n'), ((3160, 3198), 'im2mesh.utils.visualize.visualize_voxels', 'visualize_voxels', (['voxels_in'], {'show': '(True)'}), '(voxels_in, show=True)\n', (3176, 3198), False, 'from im2mesh.utils.visualize import visualize_pointcloud, visualize_voxels\n'), ((3328, 3377), 'os.path.join', 'os.path.join', (['path_prefix', '"""configs/default.yaml"""'], {}), "(path_prefix, 'configs/default.yaml')\n", (3340, 3377), False, 'import os\n'), ((3395, 3463), 'os.path.join', 'os.path.join', (['path_prefix', '"""configs/pointcloud/onet_pretrained.yaml"""'], {}), "(path_prefix, 'configs/pointcloud/onet_pretrained.yaml')\n", (3407, 3463), False, 'import os\n'), ((3474, 3518), 'im2mesh.config.load_config', 'config.load_config', (['model_path', 'default_path'], {}), '(model_path, default_path)\n', (3492, 3518), False, 'from im2mesh import config\n'), ((3532, 3552), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3544, 3552), False, 'import torch\n'), ((3879, 3899), 'numpy.random.randint', 'np.random.randint', (['(3)'], {}), '(3)\n', (3896, 3899), True, 'import numpy as np\n'), ((3969, 4024), 'numpy.random.uniform', 'np.random.uniform', (['(0.7 * (xb[1] - xb[0]))', '(xb[1] - xb[0])'], {}), '(0.7 * (xb[1] - xb[0]), xb[1] - xb[0])\n', (3986, 4024), True, 'import numpy as np\n'), ((4113, 4157), 'numpy.random.randint', 'np.random.randint', (['points.shape[0]'], {'size': '(300)'}), '(points.shape[0], size=300)\n', (4130, 4157), True, 'import numpy as np\n'), ((4502, 4531), 'im2mesh.config.get_model', 'config.get_model', (['cfg', 'device'], {}), '(cfg, device)\n', (4518, 4531), False, 'from im2mesh import config\n'), ((4552, 4583), 'im2mesh.checkpoints.CheckpointIO', 'CheckpointIO', (['""".."""'], {'model': 'model'}), "('..', model=model)\n", (4564, 4583), False, 'from im2mesh.checkpoints import CheckpointIO\n'), ((4764, 4804), 'im2mesh.config.get_generator', 'config.get_generator', (['model', 'cfg', 'device'], {}), '(model, cfg, device)\n', (4784, 4804), False, 'from im2mesh import config\n'), ((5086, 5135), 'os.path.join', 'os.path.join', (['path_prefix', '"""configs/default.yaml"""'], {}), "(path_prefix, 'configs/default.yaml')\n", (5098, 5135), False, 'import os\n'), ((5153, 5217), 'os.path.join', 'os.path.join', (['path_prefix', '"""configs/voxels/onet_pretrained.yaml"""'], {}), "(path_prefix, 'configs/voxels/onet_pretrained.yaml')\n", (5165, 5217), False, 'import os\n'), ((5228, 5272), 'im2mesh.config.load_config', 'config.load_config', (['model_path', 'default_path'], {}), '(model_path, default_path)\n', (5246, 5272), False, 'from im2mesh import config\n'), ((5286, 5306), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5298, 5306), False, 'import torch\n'), ((7091, 7139), 'im2mesh.config.get_dataset', 'config.get_dataset', (['"""test"""', 'cfg'], {'return_idx': '(True)'}), "('test', cfg, return_idx=True)\n", (7109, 7139), False, 'from im2mesh import config\n'), ((7158, 7237), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'num_workers': '(0)', 'shuffle': '(True)'}), '(dataset, batch_size=1, num_workers=0, shuffle=True)\n', (7185, 7237), False, 'import torch\n'), ((7352, 7390), 'im2mesh.config.get_model', 'config.get_model', (['cfg', 'device', 'dataset'], {}), '(cfg, device, dataset)\n', (7368, 7390), False, 'from im2mesh import config\n'), ((7411, 7442), 'im2mesh.checkpoints.CheckpointIO', 'CheckpointIO', (['""".."""'], {'model': 'model'}), "('..', model=model)\n", (7423, 7442), False, 'from im2mesh.checkpoints import CheckpointIO\n'), ((7527, 7567), 'im2mesh.config.get_generator', 'config.get_generator', (['model', 'cfg', 'device'], {}), '(model, cfg, device)\n', (7547, 7567), False, 'from im2mesh import config\n'), ((342, 371), 'im2mesh.utils.binvox_rw.read_as_3d_array', 'binvox_rw.read_as_3d_array', (['f'], {}), '(f)\n', (368, 371), False, 'from im2mesh.utils import binvox_rw, voxels\n'), ((1299, 1356), 'trimesh.transformations.rotation_matrix', 'trimesh.transformations.rotation_matrix', (['angle', '[1, 0, 0]'], {}), '(angle, [1, 0, 0])\n', (1338, 1356), False, 'import trimesh\n'), ((1968, 2113), 'trimesh.exchange.binvox.voxelize_mesh', 'trimesh.exchange.binvox.voxelize_mesh', (['mesh'], {'dimension': '(32)', 'remove_internal': '(False)', 'center': '(True)', 'binvox_path': '"""/home/matthias/Downloads/binvox"""'}), "(mesh, dimension=32, remove_internal=\n False, center=True, binvox_path='/home/matthias/Downloads/binvox')\n", (2005, 2113), False, 'import trimesh\n'), ((2343, 2387), 'trimesh.exchange.binvox.export_binvox', 'trimesh.exchange.binvox.export_binvox', (['voxel'], {}), '(voxel)\n', (2380, 2387), False, 'import trimesh\n'), ((2528, 2557), 'im2mesh.utils.voxels.voxelize_ray', 'voxels.voxelize_ray', (['mesh', '(32)'], {}), '(mesh, 32)\n', (2547, 2557), False, 'from im2mesh.utils import binvox_rw, voxels\n'), ((2579, 2668), 'im2mesh.utils.binvox_rw.Voxels', 'binvox_rw.Voxels', (['voxels_occ', '((32,) * 3)'], {'translate': 'loc', 'scale': 'scale', 'axis_order': '"""xyz"""'}), "(voxels_occ, (32,) * 3, translate=loc, scale=scale,\n axis_order='xyz')\n", (2595, 2668), False, 'from im2mesh.utils import binvox_rw, voxels\n'), ((2956, 2985), 'im2mesh.utils.binvox_rw.read_as_3d_array', 'binvox_rw.read_as_3d_array', (['f'], {}), '(f)\n', (2982, 2985), False, 'from im2mesh.utils import binvox_rw, voxels\n'), ((4210, 4240), 'numpy.random.randn', 'np.random.randn', (['*points.shape'], {}), '(*points.shape)\n', (4225, 4240), True, 'import numpy as np\n'), ((1535, 1572), 'os.path.join', 'os.path.join', (['file_path', '"""points.npz"""'], {}), "(file_path, 'points.npz')\n", (1547, 1572), False, 'import os\n'), ((1619, 1658), 'os.path.join', 'os.path.join', (['file_path', '"""model.binvox"""'], {}), "(file_path, 'model.binvox')\n", (1631, 1658), False, 'import os\n'), ((4455, 4479), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (4471, 4479), False, 'import torch\n'), ((4383, 4409), 'trimesh.PointCloud', 'trimesh.PointCloud', (['points'], {}), '(points)\n', (4401, 4409), False, 'import trimesh\n')] |
#!/usr/bin/python3
# Logistic Equation Library
# Copyright (C) 2016-2018 <NAME> <<EMAIL>>
# SPDX-License-Identifier: Apache-2.0
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2016-2018 <NAME>"
__license__ = "Apache License 2.0"
__version__ = "3"
__email__ = "<EMAIL>"
__status__ = "stable"
import numpy as np
import matplotlib.pyplot as plt
from math import pi, sin
class Map(object):
"""Class that provides the map functions along with r and y ranges """
def __init__(self, mapname='logistic'):
params = {
# rmin rmax ymin ymax function
'cubic' : [ 0, 6.5, 0, 1, lambda r, x: r * x**2 * (1.0 - x) ],
'logistic': [ 0, 4.0, 0, 1, lambda r, x: r * x * (1.0 - x) ],
'sine' : [ 0, 2.0, 0, 2, lambda r, x: r * sin(pi * x / 2.0) ]
}
self.map_name = mapname
self.map_longname = "%s Equation" % mapname.capitalize()
try:
self.map_rmin, self.map_rmax, \
self.map_ymin, self.map_ymax, \
self.map_function = params[mapname]
self.map = self._mapper
except Exception as e:
raise type(e)('Unknown map name ' + mapname)
@staticmethod
def ensure(expression, message, *argv):
if not expression:
raise AssertionError(message % (argv) if argv else message)
def _mapper(self, r, x):
self.ensure(
(r >= self.map_rmin and r <= self.map_rmax),
'The growth parameter r must be between %g and %g',
self.map_rmin, self.map_rmax)
return self.map_function(r, x)
class Logistic(Map):
"""Class for plotting a Logistic/Cubic/Sine Map """
def __init__(self, r, n, x0, s=0, mapname='logistic'):
Map.__init__(self, mapname)
self.r = r # Growth rate parameter
self.n = n # Number of iterations
self.s = s # Number of iterations to skip in the plot
self.x0 = x0 # The 1st initial condition
self.x = self.y1 = []
self._dotsonly = False
self.ensure(n > 0, 'The number of iterations must be greater than zero.')
self.ensure(s >= 0, 'You cannot skip a negative number of iterations.')
self.ensure(x0 >= self.map_ymin and x0 <= self.map_ymax,
'The initial condition x0 should be in [%g, %g].',
self.map_ymin, self.map_ymax)
def _plotline(self, x, y, color):
"""Plot the dots (x, y) connected by straight lines
if the parameter 'dotsonly' if set to False """
self.ensure(x.any() and y.any(), '_plotline(): internal error')
plt.plot(x, y, color=color, linestyle='',
markerfacecolor=color, marker='o', markersize=5)
if self.plotdots:
plt.plot(x, y, color=color, alpha=0.6)
def getxy(self, fill_value=None):
"""Set the numpy vectors 'x' and 'y1' containing
the iterations (1..n) and the corresponding values
of the choosen Map """
# do not initialize twice the x and y1 vectors
if len(self.x) > 0: return
vectlen = self.n + self.s + 1
self.x = np.arange(vectlen)
self.y1 = np.arange(0, vectlen, 1.)
self.y1[0] = self.x0
for t in self.x[1:]:
self.y1[t] = self.map(self.r, self.y1[t-1])
return self.x, self.y1
def plot(self):
"""Plot a Logistic, Cubic or Sine map """
self.getxy()
plt.suptitle('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')
plt.title(self.map_longname)
plt.xlabel('time t')
plt.ylim([self.map_ymin, self.map_ymax])
plt.grid(True)
self._plotline(self.x[self.s:], self.y1[self.s:], 'mediumseagreen')
plt.show()
@property
def plotdots(self):
return self._dotsonly
@plotdots.setter
def plotdots(self, value):
"""Set whether to plot or not the dots in a logistic graph """
self._dotsonly = value
class FinalState(Logistic):
"""Derived class for plotting a Final State Diagram """
# By default, set the initial state to .5
# make 3000 iterations and do no plot the first 2000 ones
def __init__(self, r, n=1000, x0=.5, s=2000, mapname='logistic'):
Logistic.__init__(self, r, n, x0, s, mapname)
def getxy(self, fill_value=.5):
"""Set the numpy vectors 'x' and 'y1' containing the values of the
choosen Map for the first n iterations """
# do not initialize twice the x and y1 vectors
if len(self.x) > 0: return
vectlen = self.n + self.s + 1
self.x = np.full(vectlen, self.x0, dtype=np.float64)
for t in range(1, vectlen):
self.x[t] = self.map(self.r, self.x[t-1])
self.y1 = np.full(vectlen, fill_value, dtype=np.float)
return self.x, self.y1
def plot(self):
"""Plot a Final State Diagram """
self.getxy()
plt.suptitle('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')
plt.title('Final State Diagram for the ' + self.map_longname)
plt.xlim([self.map_ymin, self.map_ymax])
plt.ylim([0, 1.])
plt.yticks([])
plt.grid(True)
plt.plot([self.map_ymin, self.map_ymax], [.5, .5],
color='black', lw=1)
plt.plot(self.x[self.s:], self.y1[self.s:], color='black', linestyle='',
markerfacecolor='black', marker='o', markersize=8)
plt.text(.1 * self.map_ymax, .4, 'r = %g' % self.r, style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
plt.show()
class LogisticDiff(Logistic):
"""Derived class for plotting a Logistic/Cubic/Sine Map
with two different initial conditions, followed by a plot of
their differences (for a visualization of the Butterfly Effect) """
def __init__(self, r, n, x0, x1, s=0, mapname='logistic'):
Logistic.__init__(self, r, n, x0, s, mapname)
self.ensure(x1 >= self.map_ymin and x1 <= self.map_ymax,
'The initial condition x1 should be in [%g, %g].',
self.map_ymin, self.map_ymax)
self.x1 = x1 # The 2st initial condition
self.y2 = []
def getxy(self, fill_value=None):
"""Set the numpy vectors 'x', 'y1', and 'y2' containing
the iterations (1..n) and the corresponding values
of the choosen Map """
x, y1 = super(LogisticDiff, self).getxy()
# do not initialize twice the vector y2
if len(self.y2) > 0: return
self.y2 = np.arange(0, self.n + self.s + 1, 1.)
self.y2[0] = self.x1
for t in self.x[1:]:
self.y2[t] = self.map(self.r, self.y2[t-1])
return x, y1, self.y2
def getdiffy(self):
"""Return the difference between the two vectors y2 and y1 """
return self.y2 - self.y1
def plot(self):
"""Plot a Logistic, Cubic or Sine map with two different seeds (two plots)
followed by their difference """
self.getxy()
plt.figure(1)
plt.suptitle('Dynamic Systems and Chaos',
fontsize=14, fontweight='bold')
plt.subplot(211)
plt.title('Time series for a ' + self.map_longname + \
' with two different initial conditions')
plt.ylabel(r'$y_1(t),\ y_2(t)$', fontsize=14)
plt.ylim([self.map_ymin, self.map_ymax])
plt.grid(True)
self._plotline(self.x[self.s:], self.y1[self.s:], 'indianred')
self._plotline(self.x[self.s:], self.y2[self.s:], 'mediumseagreen')
ydiff = self.y2 - self.y1
plt.subplot(212)
plt.title('Difference between the two time series')
plt.xlabel('time t')
plt.ylabel(r'$y_2(t) - y_1(t)$', fontsize=14)
plt.grid(True)
self._plotline(self.x[self.s:], ydiff[self.s:], 'royalblue')
plt.show()
class Bifurcation(Map):
"""Class for plotting a Logistic/Cubic/Sine Bifurcation Diagram """
def __init__(self, r, y, n=100, s=200, mapname='logistic'):
Map.__init__(self, mapname)
self.ensure(len(r) == 2, 'The growth rate vector should contains two elements')
self.ensure(r[0] >= self.map_rmin and r[0] < r[1] and r[1] <= self.map_rmax,
('The parameters [r0, r1] must be between %g and %g, '
'and in ascending order.'), self.map_rmin, self.map_rmax)
self.ensure(len(y) == 2, 'The y range vector should contains two elements')
self.ensure(y[0] >= self.map_ymin and y[0] < y[1] and y[1] <= self.map_ymax,
('The parameters [y0, y1] must be between %g and %g, '
'and in ascending order.'), self.map_ymin, self.map_ymax)
self.rmin = r[0] # Range of the growth rate for plot()
self.rmax = r[1]
self.ymin = y[0] # Range of the population for plot()
self.ymax = y[1]
self.ensure(n > 0, 'The number of iterations must be greater than zero.')
self.n = n # Number of iterations
self.ensure(s >= 0, 'You cannot skip a negative number of iterations.')
self.s = s # Number of iterations to skip in the plot
def plot(self):
plt.suptitle('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')
plt.title('Bifurcation Diagram for the ' + self.map_longname)
plt.xlim([self.rmin, self.rmax])
plt.xticks([round(i, 1) for i in np.linspace(self.rmin, self.rmax, 5)])
plt.xlabel('r')
plt.ylim([self.ymin, self.ymax])
plt.ylabel('final states')
for r in np.linspace(self.rmin, self.rmax, 1000):
x, y = FinalState(r, self.n, .5, self.s, self.map_name).getxy(r)
plt.plot(y[self.s:], x[self.s:], color='black', linestyle='',
markerfacecolor='black', marker=',', markersize=1)
plt.show()
if __name__ == '__main__':
from lelib_test import tests
tests()
print("All tests successfully passed!")
| [
"matplotlib.pyplot.title",
"numpy.full",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.yticks",
"math.sin",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figur... | [((10075, 10082), 'lelib_test.tests', 'tests', ([], {}), '()\n', (10080, 10082), False, 'from lelib_test import tests\n'), ((2687, 2781), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'color', 'linestyle': '""""""', 'markerfacecolor': 'color', 'marker': '"""o"""', 'markersize': '(5)'}), "(x, y, color=color, linestyle='', markerfacecolor=color, marker='o',\n markersize=5)\n", (2695, 2781), True, 'import matplotlib.pyplot as plt\n'), ((3213, 3231), 'numpy.arange', 'np.arange', (['vectlen'], {}), '(vectlen)\n', (3222, 3231), True, 'import numpy as np\n'), ((3251, 3277), 'numpy.arange', 'np.arange', (['(0)', 'vectlen', '(1.0)'], {}), '(0, vectlen, 1.0)\n', (3260, 3277), True, 'import numpy as np\n'), ((3525, 3598), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Dynamic Systems and Chaos"""'], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')\n", (3537, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3607, 3635), 'matplotlib.pyplot.title', 'plt.title', (['self.map_longname'], {}), '(self.map_longname)\n', (3616, 3635), True, 'import matplotlib.pyplot as plt\n'), ((3644, 3664), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time t"""'], {}), "('time t')\n", (3654, 3664), True, 'import matplotlib.pyplot as plt\n'), ((3673, 3713), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[self.map_ymin, self.map_ymax]'], {}), '([self.map_ymin, self.map_ymax])\n', (3681, 3713), True, 'import matplotlib.pyplot as plt\n'), ((3722, 3736), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3730, 3736), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3832), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3830, 3832), True, 'import matplotlib.pyplot as plt\n'), ((4694, 4737), 'numpy.full', 'np.full', (['vectlen', 'self.x0'], {'dtype': 'np.float64'}), '(vectlen, self.x0, dtype=np.float64)\n', (4701, 4737), True, 'import numpy as np\n'), ((4847, 4891), 'numpy.full', 'np.full', (['vectlen', 'fill_value'], {'dtype': 'np.float'}), '(vectlen, fill_value, dtype=np.float)\n', (4854, 4891), True, 'import numpy as np\n'), ((5018, 5091), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Dynamic Systems and Chaos"""'], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')\n", (5030, 5091), True, 'import matplotlib.pyplot as plt\n'), ((5100, 5161), 'matplotlib.pyplot.title', 'plt.title', (["('Final State Diagram for the ' + self.map_longname)"], {}), "('Final State Diagram for the ' + self.map_longname)\n", (5109, 5161), True, 'import matplotlib.pyplot as plt\n'), ((5171, 5211), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[self.map_ymin, self.map_ymax]'], {}), '([self.map_ymin, self.map_ymax])\n', (5179, 5211), True, 'import matplotlib.pyplot as plt\n'), ((5220, 5238), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.0]'], {}), '([0, 1.0])\n', (5228, 5238), True, 'import matplotlib.pyplot as plt\n'), ((5246, 5260), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5256, 5260), True, 'import matplotlib.pyplot as plt\n'), ((5270, 5284), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5278, 5284), True, 'import matplotlib.pyplot as plt\n'), ((5294, 5367), 'matplotlib.pyplot.plot', 'plt.plot', (['[self.map_ymin, self.map_ymax]', '[0.5, 0.5]'], {'color': '"""black"""', 'lw': '(1)'}), "([self.map_ymin, self.map_ymax], [0.5, 0.5], color='black', lw=1)\n", (5302, 5367), True, 'import matplotlib.pyplot as plt\n'), ((5391, 5518), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x[self.s:]', 'self.y1[self.s:]'], {'color': '"""black"""', 'linestyle': '""""""', 'markerfacecolor': '"""black"""', 'marker': '"""o"""', 'markersize': '(8)'}), "(self.x[self.s:], self.y1[self.s:], color='black', linestyle='',\n markerfacecolor='black', marker='o', markersize=8)\n", (5399, 5518), True, 'import matplotlib.pyplot as plt\n'), ((5540, 5666), 'matplotlib.pyplot.text', 'plt.text', (['(0.1 * self.map_ymax)', '(0.4)', "('r = %g' % self.r)"], {'style': '"""italic"""', 'bbox': "{'facecolor': 'red', 'alpha': 0.5, 'pad': 10}"}), "(0.1 * self.map_ymax, 0.4, 'r = %g' % self.r, style='italic', bbox=\n {'facecolor': 'red', 'alpha': 0.5, 'pad': 10})\n", (5548, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5683, 5693), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5691, 5693), True, 'import matplotlib.pyplot as plt\n'), ((6659, 6697), 'numpy.arange', 'np.arange', (['(0)', '(self.n + self.s + 1)', '(1.0)'], {}), '(0, self.n + self.s + 1, 1.0)\n', (6668, 6697), True, 'import numpy as np\n'), ((7151, 7164), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (7161, 7164), True, 'import matplotlib.pyplot as plt\n'), ((7173, 7246), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Dynamic Systems and Chaos"""'], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')\n", (7185, 7246), True, 'import matplotlib.pyplot as plt\n'), ((7277, 7293), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (7288, 7293), True, 'import matplotlib.pyplot as plt\n'), ((7302, 7400), 'matplotlib.pyplot.title', 'plt.title', (["('Time series for a ' + self.map_longname +\n ' with two different initial conditions')"], {}), "('Time series for a ' + self.map_longname +\n ' with two different initial conditions')\n", (7311, 7400), True, 'import matplotlib.pyplot as plt\n'), ((7425, 7470), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y_1(t),\\\\ y_2(t)$"""'], {'fontsize': '(14)'}), "('$y_1(t),\\\\ y_2(t)$', fontsize=14)\n", (7435, 7470), True, 'import matplotlib.pyplot as plt\n'), ((7479, 7519), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[self.map_ymin, self.map_ymax]'], {}), '([self.map_ymin, self.map_ymax])\n', (7487, 7519), True, 'import matplotlib.pyplot as plt\n'), ((7528, 7542), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7536, 7542), True, 'import matplotlib.pyplot as plt\n'), ((7734, 7750), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (7745, 7750), True, 'import matplotlib.pyplot as plt\n'), ((7759, 7810), 'matplotlib.pyplot.title', 'plt.title', (['"""Difference between the two time series"""'], {}), "('Difference between the two time series')\n", (7768, 7810), True, 'import matplotlib.pyplot as plt\n'), ((7819, 7839), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time t"""'], {}), "('time t')\n", (7829, 7839), True, 'import matplotlib.pyplot as plt\n'), ((7848, 7892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y_2(t) - y_1(t)$"""'], {'fontsize': '(14)'}), "('$y_2(t) - y_1(t)$', fontsize=14)\n", (7858, 7892), True, 'import matplotlib.pyplot as plt\n'), ((7902, 7916), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7910, 7916), True, 'import matplotlib.pyplot as plt\n'), ((7995, 8005), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8003, 8005), True, 'import matplotlib.pyplot as plt\n'), ((9340, 9413), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Dynamic Systems and Chaos"""'], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')\n", (9352, 9413), True, 'import matplotlib.pyplot as plt\n'), ((9422, 9483), 'matplotlib.pyplot.title', 'plt.title', (["('Bifurcation Diagram for the ' + self.map_longname)"], {}), "('Bifurcation Diagram for the ' + self.map_longname)\n", (9431, 9483), True, 'import matplotlib.pyplot as plt\n'), ((9493, 9525), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[self.rmin, self.rmax]'], {}), '([self.rmin, self.rmax])\n', (9501, 9525), True, 'import matplotlib.pyplot as plt\n'), ((9614, 9629), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r"""'], {}), "('r')\n", (9624, 9629), True, 'import matplotlib.pyplot as plt\n'), ((9639, 9671), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[self.ymin, self.ymax]'], {}), '([self.ymin, self.ymax])\n', (9647, 9671), True, 'import matplotlib.pyplot as plt\n'), ((9680, 9706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""final states"""'], {}), "('final states')\n", (9690, 9706), True, 'import matplotlib.pyplot as plt\n'), ((9725, 9764), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax', '(1000)'], {}), '(self.rmin, self.rmax, 1000)\n', (9736, 9764), True, 'import numpy as np\n'), ((9998, 10008), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10006, 10008), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2872), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'color', 'alpha': '(0.6)'}), '(x, y, color=color, alpha=0.6)\n', (2842, 2872), True, 'import matplotlib.pyplot as plt\n'), ((9855, 9971), 'matplotlib.pyplot.plot', 'plt.plot', (['y[self.s:]', 'x[self.s:]'], {'color': '"""black"""', 'linestyle': '""""""', 'markerfacecolor': '"""black"""', 'marker': '""","""', 'markersize': '(1)'}), "(y[self.s:], x[self.s:], color='black', linestyle='',\n markerfacecolor='black', marker=',', markersize=1)\n", (9863, 9971), True, 'import matplotlib.pyplot as plt\n'), ((9567, 9603), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax', '(5)'], {}), '(self.rmin, self.rmax, 5)\n', (9578, 9603), True, 'import numpy as np\n'), ((839, 856), 'math.sin', 'sin', (['(pi * x / 2.0)'], {}), '(pi * x / 2.0)\n', (842, 856), False, 'from math import pi, sin\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Distributed rendering with extension
#
# This test demonstrates distributed rendering with user-defined component.
# %load_ext autoreload
# %autoreload 2
import os
import imageio
import pandas as pd
import numpy as np
import multiprocessing as mp
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import lmfunctest as ft
import lmscene
import lightmetrica as lm
os.getpid()
# %load_ext lightmetrica_jupyter
# ### Worker process
#
# To create an process on Jupyter notebook in Windows, we need to separate the function to be processed in a different file and add the invocation of the process must be enclosed by `if __name__ == '__main__'` clause.
# + {"magic_args": "_lm_renderer_ao.py"}
# %%writefile _lm_renderer_ao.py
import lightmetrica as lm
import pickle
import numpy as np
@lm.pylm_component('renderer::ao')
class Renderer_AO(lm.Renderer):
"""Simple ambient occlusion renderer"""
def construct(self, prop):
self.film = lm.Film.castFrom(lm.comp.get(prop['output']))
if self.film is None:
return False
self.spp = prop['spp']
return True
def save(self):
return pickle.dumps((self.film.loc(), self.spp))
def load(self, s):
loc, self.spp = pickle.loads(s)
self.film = lm.Film.castFrom(lm.comp.get(loc))
def render(self, scene):
w = self.film.size().w
h = self.film.size().h
rng = lm.Rng(42)
def process(index, threadid):
x = index % w
y = int(index / w)
rp = np.array([(x+.5)/w, (y+.5)/h])
ray = scene.primaryRay(rp, self.film.aspectRatio())
hit = scene.intersect(ray)
if hit is None:
return
V = 0
for i in range(self.spp):
n, u, v = hit.geom.orthonormalBasis(-ray.d)
d = lm.math.sampleCosineWeighted(rng)
r = lm.Ray(hit.geom.p, np.dot(d, [u,v,n]))
if scene.intersect(r, lm.Eps, .2) is None:
V += 1
V /= self.spp
self.film.setPixel(x, y, np.full(3, V))
lm.parallel.foreach(w*h, process)
# + {"magic_args": "_run_worker_process.py"}
# %%writefile _run_worker_process.py
import os
import uuid
import traceback
import lightmetrica as lm
import _lm_renderer_ao
def run_worker_process():
try:
lm.init('user::default', {})
lm.info()
lm.log.setSeverity(1000)
lm.log.log(lm.log.LogLevel.Err, lm.log.LogLevel.Info, '', 0, 'pid={}'.format(os.getpid()))
lm.dist.worker.init('dist::worker::default', {
'name': uuid.uuid4().hex,
'address': 'localhost',
'port': 5000,
'numThreads': 1
})
lm.dist.worker.run()
lm.dist.shutdown()
lm.shutdown()
except Exception:
tr = traceback.print_exc()
lm.log.log(lm.log.LogLevel.Err, lm.log.LogLevel.Info, '', 0, str(tr))
# -
from _run_worker_process import *
if __name__ == '__main__':
pool = mp.Pool(2, run_worker_process)
# ### Master process
import _lm_renderer_ao
lm.init()
lm.log.init('logger::jupyter', {})
lm.progress.init('progress::jupyter', {})
lm.dist.init('dist::master::default', {
'port': 5000
})
lm.dist.printWorkerInfo()
lmscene.load(ft.env.scene_path, 'fireplace_room')
lm.build('accel::sahbvh', {})
lm.asset('film_output', 'film::bitmap', {'w': 320, 'h': 180})
lm.renderer('renderer::ao', {
'output': lm.asset('film_output'),
'spp': 3
})
lm.dist.allowWorkerConnection(False)
lm.dist.sync()
lm.render()
lm.dist.gatherFilm(lm.asset('film_output'))
lm.dist.allowWorkerConnection(True)
img = np.copy(lm.buffer(lm.asset('film_output')))
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img,1/2.2),0,1), origin='lower')
plt.show()
# Termination of the worker process is necessary for Windows
# because fork() is not supported in Windows.
# cf. https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
pool.terminate()
pool.join()
| [
"lightmetrica.log.setSeverity",
"lightmetrica.init",
"lightmetrica.dist.init",
"lightmetrica.dist.sync",
"matplotlib.pyplot.figure",
"lightmetrica.log.init",
"numpy.full",
"lightmetrica.dist.allowWorkerConnection",
"lightmetrica.comp.get",
"traceback.print_exc",
"lightmetrica.dist.shutdown",
"... | [((724, 735), 'os.getpid', 'os.getpid', ([], {}), '()\n', (733, 735), False, 'import os\n'), ((1148, 1181), 'lightmetrica.pylm_component', 'lm.pylm_component', (['"""renderer::ao"""'], {}), "('renderer::ao')\n", (1165, 1181), True, 'import lightmetrica as lm\n'), ((3474, 3483), 'lightmetrica.init', 'lm.init', ([], {}), '()\n', (3481, 3483), True, 'import lightmetrica as lm\n'), ((3484, 3518), 'lightmetrica.log.init', 'lm.log.init', (['"""logger::jupyter"""', '{}'], {}), "('logger::jupyter', {})\n", (3495, 3518), True, 'import lightmetrica as lm\n'), ((3519, 3560), 'lightmetrica.progress.init', 'lm.progress.init', (['"""progress::jupyter"""', '{}'], {}), "('progress::jupyter', {})\n", (3535, 3560), True, 'import lightmetrica as lm\n'), ((3561, 3614), 'lightmetrica.dist.init', 'lm.dist.init', (['"""dist::master::default"""', "{'port': 5000}"], {}), "('dist::master::default', {'port': 5000})\n", (3573, 3614), True, 'import lightmetrica as lm\n'), ((3621, 3646), 'lightmetrica.dist.printWorkerInfo', 'lm.dist.printWorkerInfo', ([], {}), '()\n', (3644, 3646), True, 'import lightmetrica as lm\n'), ((3648, 3697), 'lmscene.load', 'lmscene.load', (['ft.env.scene_path', '"""fireplace_room"""'], {}), "(ft.env.scene_path, 'fireplace_room')\n", (3660, 3697), False, 'import lmscene\n'), ((3698, 3727), 'lightmetrica.build', 'lm.build', (['"""accel::sahbvh"""', '{}'], {}), "('accel::sahbvh', {})\n", (3706, 3727), True, 'import lightmetrica as lm\n'), ((3728, 3789), 'lightmetrica.asset', 'lm.asset', (['"""film_output"""', '"""film::bitmap"""', "{'w': 320, 'h': 180}"], {}), "('film_output', 'film::bitmap', {'w': 320, 'h': 180})\n", (3736, 3789), True, 'import lightmetrica as lm\n'), ((3876, 3912), 'lightmetrica.dist.allowWorkerConnection', 'lm.dist.allowWorkerConnection', (['(False)'], {}), '(False)\n', (3905, 3912), True, 'import lightmetrica as lm\n'), ((3913, 3927), 'lightmetrica.dist.sync', 'lm.dist.sync', ([], {}), '()\n', (3925, 3927), True, 'import lightmetrica as lm\n'), ((3928, 3939), 'lightmetrica.render', 'lm.render', ([], {}), '()\n', (3937, 3939), True, 'import lightmetrica as lm\n'), ((3984, 4019), 'lightmetrica.dist.allowWorkerConnection', 'lm.dist.allowWorkerConnection', (['(True)'], {}), '(True)\n', (4013, 4019), True, 'import lightmetrica as lm\n'), ((4075, 4103), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (4085, 4103), True, 'import matplotlib.pyplot as plt\n'), ((4187, 4197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4195, 4197), True, 'import matplotlib.pyplot as plt\n'), ((3396, 3426), 'multiprocessing.Pool', 'mp.Pool', (['(2)', 'run_worker_process'], {}), '(2, run_worker_process)\n', (3403, 3426), True, 'import multiprocessing as mp\n'), ((3959, 3982), 'lightmetrica.asset', 'lm.asset', (['"""film_output"""'], {}), "('film_output')\n", (3967, 3982), True, 'import lightmetrica as lm\n'), ((1596, 1611), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (1608, 1611), False, 'import pickle\n'), ((1773, 1783), 'lightmetrica.Rng', 'lm.Rng', (['(42)'], {}), '(42)\n', (1779, 1783), True, 'import lightmetrica as lm\n'), ((2482, 2517), 'lightmetrica.parallel.foreach', 'lm.parallel.foreach', (['(w * h)', 'process'], {}), '(w * h, process)\n', (2501, 2517), True, 'import lightmetrica as lm\n'), ((2731, 2759), 'lightmetrica.init', 'lm.init', (['"""user::default"""', '{}'], {}), "('user::default', {})\n", (2738, 2759), True, 'import lightmetrica as lm\n'), ((2768, 2777), 'lightmetrica.info', 'lm.info', ([], {}), '()\n', (2775, 2777), True, 'import lightmetrica as lm\n'), ((2786, 2810), 'lightmetrica.log.setSeverity', 'lm.log.setSeverity', (['(1000)'], {}), '(1000)\n', (2804, 2810), True, 'import lightmetrica as lm\n'), ((3112, 3132), 'lightmetrica.dist.worker.run', 'lm.dist.worker.run', ([], {}), '()\n', (3130, 3132), True, 'import lightmetrica as lm\n'), ((3141, 3159), 'lightmetrica.dist.shutdown', 'lm.dist.shutdown', ([], {}), '()\n', (3157, 3159), True, 'import lightmetrica as lm\n'), ((3168, 3181), 'lightmetrica.shutdown', 'lm.shutdown', ([], {}), '()\n', (3179, 3181), True, 'import lightmetrica as lm\n'), ((3834, 3857), 'lightmetrica.asset', 'lm.asset', (['"""film_output"""'], {}), "('film_output')\n", (3842, 3857), True, 'import lightmetrica as lm\n'), ((4045, 4068), 'lightmetrica.asset', 'lm.asset', (['"""film_output"""'], {}), "('film_output')\n", (4053, 4068), True, 'import lightmetrica as lm\n'), ((4145, 4167), 'numpy.power', 'np.power', (['img', '(1 / 2.2)'], {}), '(img, 1 / 2.2)\n', (4153, 4167), True, 'import numpy as np\n'), ((1327, 1354), 'lightmetrica.comp.get', 'lm.comp.get', (["prop['output']"], {}), "(prop['output'])\n", (1338, 1354), True, 'import lightmetrica as lm\n'), ((1649, 1665), 'lightmetrica.comp.get', 'lm.comp.get', (['loc'], {}), '(loc)\n', (1660, 1665), True, 'import lightmetrica as lm\n'), ((1896, 1936), 'numpy.array', 'np.array', (['[(x + 0.5) / w, (y + 0.5) / h]'], {}), '([(x + 0.5) / w, (y + 0.5) / h])\n', (1904, 1936), True, 'import numpy as np\n'), ((3217, 3238), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3236, 3238), False, 'import traceback\n'), ((2217, 2250), 'lightmetrica.math.sampleCosineWeighted', 'lm.math.sampleCosineWeighted', (['rng'], {}), '(rng)\n', (2245, 2250), True, 'import lightmetrica as lm\n'), ((2459, 2472), 'numpy.full', 'np.full', (['(3)', 'V'], {}), '(3, V)\n', (2466, 2472), True, 'import numpy as np\n'), ((2896, 2907), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2905, 2907), False, 'import os\n'), ((2290, 2310), 'numpy.dot', 'np.dot', (['d', '[u, v, n]'], {}), '(d, [u, v, n])\n', (2296, 2310), True, 'import numpy as np\n'), ((2985, 2997), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2995, 2997), False, 'import uuid\n')] |
""" find peaks in a periodic signal such as ECG
Assume:
1. cycle is stable and have one strong period signal
2. target peaks have enough features
Proprosal:
1. peak value features
2. cycle priors
3. cycle transition priors
4. autodetect to detech peaks or valleys
Implemention:
1. merge findpeaks results
2. analysis peak results
3. adjust findpeaks function parameters
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from collections import deque
import peakutils
from absl import app
import pdb
class AutoPeaks(object):
""" online findpeaks for periodic signals """
def __init__(self, thres=0.65, min_dist=80, fs=200, buffer_size=None):
if buffer_size is None:
buffer_size = int(fs * 3)
self._signal_buffer = deque(maxlen=buffer_size)
self._peak_value_buffer = deque()
self._peak_index_buffer = deque()
self._signal_index = -1 # starts from 0
self._update_counter = 0
self.thres = thres
self.min_dist = min_dist
@property
def peak_values(self):
""" get the peak values """
values = list(self._peak_value_buffer)
self._peak_value_buffer.clear()
return values
#while self._peak_value_buffer:
# index = self._peak_value_buffer.popleft()
# yield index
@property
def peak_indexes(self):
""" get the peak indexes """
indexes = list(self._peak_index_buffer)
self._peak_index_buffer.clear()
return indexes
#while self._peak_index_buffer:
# index = self._peak_index_buffer.popleft()
# yield index
def clear(self):
""" clear signal buffer and reset signal index"""
self._signal_buffer.clear()
self._signal_index = -1
def findpeaks(self, value):
""" find peaks in signal buffer,
add peak indexes and peak values to buffer
"""
self._signal_index += 1
self._update_counter += 1
signal = self._signal_buffer
signal.append(value)
if len(signal) != signal.maxlen:
return
update_thres = signal.maxlen / 2
if self._update_counter < update_thres:
return
self._update_counter = 0
ipks = peakutils.indexes(
np.asarray(signal), thres=self.thres, min_dist=self.min_dist)
ipk_offset = self._signal_index - signal.maxlen + 1
chosen_ipks_start = int( signal.maxlen / 4 )
chosen_ipks_end = signal.maxlen - int( signal.maxlen / 4 )
for ipk in ipks:
if ipk_offset == 0 and ipk < chosen_ipks_end:
self._peak_index_buffer.append(ipk)
self._peak_value_buffer.append(signal[ipk])
continue
if ipk < chosen_ipks_start or ipk >= chosen_ipks_end:
continue
self._peak_index_buffer.append(ipk + ipk_offset)
self._peak_value_buffer.append(signal[ipk])
def main(argv):
del argv
from scipy.io import loadmat
filepath = "/home/guo/data/sleepstage/database/ISRUC_Sleep/" + \
"ExtractedChannels/subgroupI-Extractedchannels/subject1.mat"
data = loadmat(filepath)
ecg = data["X2"].flatten()[:5000]
autopeaks = AutoPeaks(thres=0.65, min_dist=80, fs=200)
list(map(autopeaks.findpeaks, ecg))
ipks = autopeaks.peak_indexes
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ecg)
ax.plot(ipks, ecg[ipks], 'o')
plt.show()
if __name__ == "__main__":
app.run(main)
| [
"matplotlib.pyplot.show",
"scipy.io.loadmat",
"numpy.asarray",
"matplotlib.pyplot.figure",
"absl.app.run",
"collections.deque"
] | [((3332, 3349), 'scipy.io.loadmat', 'loadmat', (['filepath'], {}), '(filepath)\n', (3339, 3349), False, 'from scipy.io import loadmat\n'), ((3567, 3579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3577, 3579), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3673, 3675), True, 'import matplotlib.pyplot as plt\n'), ((3712, 3725), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (3719, 3725), False, 'from absl import app\n'), ((903, 928), 'collections.deque', 'deque', ([], {'maxlen': 'buffer_size'}), '(maxlen=buffer_size)\n', (908, 928), False, 'from collections import deque\n'), ((963, 970), 'collections.deque', 'deque', ([], {}), '()\n', (968, 970), False, 'from collections import deque\n'), ((1005, 1012), 'collections.deque', 'deque', ([], {}), '()\n', (1010, 1012), False, 'from collections import deque\n'), ((2449, 2467), 'numpy.asarray', 'np.asarray', (['signal'], {}), '(signal)\n', (2459, 2467), True, 'import numpy as np\n')] |
import numpy as np
import os
import time
from utils import lp_norm
class oracle:
def __init__(self,input,lp,radius):
self.input = input
self.measurement = lp
self.radius = radius
def passOracle(self,test):
n = np.count_nonzero(self.input- test)
return np.linalg.norm(self.input - test,ord=self.measurement)/float(n) <= self.radius
def measure(self,test):
return np.linalg.norm(self.input - test,ord=self.measurement) | [
"numpy.linalg.norm",
"numpy.count_nonzero"
] | [((264, 299), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.input - test)'], {}), '(self.input - test)\n', (280, 299), True, 'import numpy as np\n'), ((438, 493), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.input - test)'], {'ord': 'self.measurement'}), '(self.input - test, ord=self.measurement)\n', (452, 493), True, 'import numpy as np\n'), ((314, 369), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.input - test)'], {'ord': 'self.measurement'}), '(self.input - test, ord=self.measurement)\n', (328, 369), True, 'import numpy as np\n')] |
import pytest
# https://github.com/scikit-learn/scikit-learn/issues/8959
import numpy as np
try:
np.set_printoptions(sign=' ')
except TypeError:
pass
collect_ignore = ['setup.py', 'docs/conf.py']
pytest.register_assert_rewrite("skfda")
| [
"pytest.register_assert_rewrite",
"numpy.set_printoptions"
] | [((207, 246), 'pytest.register_assert_rewrite', 'pytest.register_assert_rewrite', (['"""skfda"""'], {}), "('skfda')\n", (237, 246), False, 'import pytest\n'), ((102, 131), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'sign': '""" """'}), "(sign=' ')\n", (121, 131), True, 'import numpy as np\n')] |
from bisect import bisect_right
from typing import Tuple, List, Dict, Callable, Optional
import numpy as np
from scipy.optimize import linear_sum_assignment
def span_matching(tagging_A: List[Tuple[int, int]],
tagging_B: List[Tuple[int, int]],
keep_A: bool = False) -> Dict[int, int]:
"""
Assume we have a list of tokens which was tagged with spans by two different approaches A and B.
This method tries to find the best 1:1 assignment of spans from B to spans from A. If there are more spans in A than
in B, then spans from B will go unused and vice versa. The quality of an assignment between two spans depends on
their overlap in tokens. This method removes entirely disjunct pairs of spans.
Note: In case A contains two (or more) spans of the same length which are a single span in B (or vice versa),
either of the spans from A may be mapped to the span in B. Which exact span from A is mapped is undefined.
:param tagging_A: list of spans, defined by (start, end) token offsets (exclusive!), must be non-overlapping!
:param tagging_B: a second list of spans over the same sequence in the same format as tagging_A
:param keep_A: include unmatched spans from A as [idx_A, None] in the returned value
:return: Dict[int,int] where keys are indices from A and values are indices from B
"""
if not tagging_A:
return {}
elif not tagging_B:
if keep_A:
return {i:None for i in range(len(tagging_A))}
else:
return {}
# Our cost function is span overlap:
# (1) the basis: min(end indices) - max(start indices)
# (2) If two spans are entirely disjunct, the result of (1) will be negative. Use max(0, ...) to set those
# cases to 0.
# (3) High overlap should result in low costs, therefore multiply by -1
overlap = lambda idx_a, idx_b: -1 * max(0,
(min([tagging_A[idx_a][1],
tagging_B[idx_b][1]]) -
max([tagging_A[idx_a][0],
tagging_B[idx_b][0]])))
cost_matrix = np.fromfunction(np.vectorize(overlap), (len(tagging_A), len(tagging_B)), dtype=np.int) # type: np.ndarray
a_indices, b_indices = linear_sum_assignment(cost_matrix)
# throw away mappings which have no token overlap at all (i.e. costs == 0)
assignment_costs = cost_matrix[a_indices, b_indices]
valid_assignments = [i for i in range(len(a_indices)) if assignment_costs[i] < 0]
# dropped_assignments = len(a_indices) - len(valid_assignments)
# if dropped_assignments:
# self.logger.debug(f"Threw away {dropped_assignments} assignment without token overlap")
# collect valid assignments
assignments = {a_idx: b_idx for i, (a_idx, b_idx) in enumerate(zip(a_indices, b_indices)) if i in valid_assignments}
if keep_A:
a_to_none = {i: None for i in range(len(tagging_A))}
a_to_none.update(assignments)
assignments = a_to_none
return assignments
def get_monotonous_character_alignment_func(orig: str, longer: str) -> Callable[[int], Optional[int]]:
"""
Assume you detokenized a sequence of tokens and you applied span detection on the result. You now have character
offsets into the detokenized sequence, but you want them for the original untokenized sequence. This method returns
a function which, given a character offset into the detokenized version, returns the character offset in the
original untokenized sequence. If the given offset does not have a corresponding character in the original sequence,
`None` is returned. This offset does not necessarily need to conform to token boundaries, but there are other
methods for fixing this.
:param orig: `"".join(your_token_sequence)`
:param longer: `detokenizer(your_token_sequence)` -> must contain the same characters as `orig` with additional
ones in between! Otherwise, the result of this function is undefined.
:return: function as described above
"""
# TODO there might be computationally more efficient approaches, but this one works
assert len(longer) > len(orig)
checkpoints_orig = []
checkpoints_longer = []
idx_longer = 0
for idx_orig in range(len(longer)):
idx_orig_in_bounds = idx_orig < len(orig)
does_char_match = idx_orig_in_bounds and longer[idx_longer] == orig[idx_orig]
if does_char_match and len(checkpoints_longer) > 0:
idx_longer += 1
else:
if not does_char_match:
# create checkpoint for non-matching chars
checkpoints_longer.append(idx_longer)
checkpoints_orig.append(None)
if not idx_orig_in_bounds:
# If we reach this point, we are in a situation where longer has superfluous characters at its end.
# We only need to create one last checkpoint for this case (which we have done above), therefore
# bail out.
break
# advance index for longer until we find a matching char again
while idx_longer < len(longer) and longer[idx_longer] != orig[idx_orig]:
idx_longer += 1
# If we make it to this point, we have exhausted all characters in s_longer because we did not find a match for
# the previous character in s. This means the strings are unalignable
if idx_longer == len(longer):
raise ValueError
# create checkpoint for matching pair
checkpoints_longer.append(idx_longer)
checkpoints_orig.append(idx_orig)
idx_longer += 1
assert checkpoints_longer[0] == 0 # the first char should always receive a checkpoint
def func(i: int) -> Optional[int]:
if i < 0 or i >= len(longer):
raise IndexError
# find the closest checkpoint preceding the given integer
i_of_closest_preceding_checkpoint = bisect_right(checkpoints_longer, i) - 1
aligned_value = checkpoints_orig[i_of_closest_preceding_checkpoint]
if aligned_value is not None:
# if there is a corresponding character in the shorter string, compute its exact index using the checkpoint
distance_to_checkpoint = i - checkpoints_longer[i_of_closest_preceding_checkpoint]
return aligned_value + distance_to_checkpoint
else:
# if there is no corresponding character, return None
return None
return func | [
"bisect.bisect_right",
"numpy.vectorize",
"scipy.optimize.linear_sum_assignment"
] | [((2355, 2389), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['cost_matrix'], {}), '(cost_matrix)\n', (2376, 2389), False, 'from scipy.optimize import linear_sum_assignment\n'), ((2235, 2256), 'numpy.vectorize', 'np.vectorize', (['overlap'], {}), '(overlap)\n', (2247, 2256), True, 'import numpy as np\n'), ((6150, 6185), 'bisect.bisect_right', 'bisect_right', (['checkpoints_longer', 'i'], {}), '(checkpoints_longer, i)\n', (6162, 6185), False, 'from bisect import bisect_right\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import argparse as ap
import re
from JorGpi.POSCARloader import POSCARloader
from JorGpi.heisenberg import EquationSolver,NaiveHeisenberg,apply_mirrors_xyz
from JorGpi.pickup.read_vasprun import MAGMOMloaderXML
class Zero(dict):
def __missing__(self,key):
return 0.0
class EnergyConverter:
energyRatios = {'eV' : 1.0,
'meV': 1000.0,
'Ry' : np.reciprocal(13.6056980659),
'mRy': 1000.0*np.reciprocal(13.6056980659),
'He' : 0.5*np.reciprocal(13.6056980659),
'mHe': 500.0*np.reciprocal(13.6056980659),
'K' : 11604.51812}
default = { 'moments' : Zero(),
'units' : 'meV' }
types = [ " without moments",
"moments included" ]
@staticmethod
def multiply(arr,*args):
return [[ arg*element for element in arr ] for arg in args ]
@staticmethod
def convert(*args,**kwargs):
# Returns array of J values with different conventions (see types)
settings = EnergyConverter.default
settings.update(kwargs)
data = np.array([np.copy(args),np.copy(args)])*EnergyConverter.energyRatios[kwargs['units']]
for i,_ in enumerate(args):
data[1][i] *= kwargs['moments'][i]
return data
#
#░█▀▀░█▄█░█▀█░█▀▄░▀█▀░░░█▀█░▀█▀░█▀▀░█░█░░░░░█░█░█▀█
#░▀▀█░█░█░█▀█░█▀▄░░█░░░░█▀▀░░█░░█░░░█▀▄░▄▄▄░█░█░█▀▀
#░▀▀▀░▀░▀░▀░▀░▀░▀░░▀░░░░▀░░░▀▀▀░▀▀▀░▀░▀░░░░░▀▀▀░▀░░
#
class SmartPickUp:
def __init__(self, numberOfNeighbors,
namesOfInteractingAtoms):
self.numberOfNeighbors = numberOfNeighbors
self.namesOfInteractingAtoms = namesOfInteractingAtoms
self.types = EnergyConverter.types
def read_poscars(self,*args):
lastBackSlashRemoved = [ re.sub('/$','',arg) for arg in args ]
poscars = [ "%s/POSCAR"%arg for arg in lastBackSlashRemoved ]
self.poscars = POSCARloader(*poscars)
self.poscars.parse()
def read_magmoms(self,*args):
vaspruns = [ "%s/vasprun.xml"%arg for arg in args ]
self.magmoms = MAGMOMloaderXML(*vaspruns,trapez=True)
self.magmoms.parse()
def read(self,*args,**kwargs):
self.read_magmoms(*args)
self.read_poscars(*args)
if 'reference' in kwargs:
self.reference = self.poscars(0)['cell'][kwargs['reference']][1]
self.ref = kwargs['reference']
else:
print("Warning: reference @ 0. Is that ok?")
self.ref = 0
self.reference = self.poscars(0)['cell'][0][1]
def make_crystal(self,idx=0):
self.crystal = self.poscars(idx)['cell']
try:
self.crystal = [[atom[0],atom[1],
self.magmoms.get_moments()[i+1]]\
for i,atom in enumerate(self.crystal) ]
except KeyError as err:
print(self.magmoms.get_moments())
print(err)
exit(-1)
self.crystal8 = apply_mirrors_xyz(self.poscars(0)['directions'],self.crystal)
def map_distances(self,idx=0):
self.distances = set([])
self.make_crystal(idx)
for atom in self.poscars(idx)['cell']:
distance = np.around(np.linalg.norm(atom[1]-self.reference),decimals=2)
if atom[0] in self.namesOfInteractingAtoms:
self.distances.add(distance)
self.distances = np.sort(np.array(list(self.distances)))[1:1+self.numberOfNeighbors]
# for sorted!
def get_system_of_equations(self):
self.map_distances()
self.systemOfEquations = []
deltaEnergy = []
self.flippingConfigurations = []
for i in range(1,len(self.magmoms)):
try:
deltaEnergy.append(self.magmoms(i)['energy']-self.magmoms(0)['energy'])
except TypeError:
print("VASP hasn't finished this run (%d/%d)"%(i,len(self.magmoms)-1))
continue
self.set_flipps(i)
self.model = NaiveHeisenberg(self.flippingConfigurations,
self.crystal,
self.crystal8)
self.flipped = np.unique(np.where(self.flippingConfigurations)[1])
self.systemOfEquations = self.model.generate(self.namesOfInteractingAtoms,
self.distances, self.magmoms)
self.solver = EquationSolver(self.systemOfEquations,deltaEnergy)
self.solver.remove_tautologies()
def set_flipps(self,i):
self.flippingConfigurations.append([])
for idx,atom in enumerate(self.poscars(0)['cell']):
self.get_flip(i,idx,atom)
def get_flip(self,i,idx,atom):
if atom[0] not in self.namesOfInteractingAtoms:
self.flippingConfigurations[-1].append(False)
return
momentA = self.magmoms(0)['moments'][idx+1]
momentB = self.magmoms(i)['moments'][idx+1]
scalar = momentA * momentB
if (abs(scalar) > 1e-5 and scalar < 0.0):
self.flippingConfigurations[-1].append(True)
return
self.flippingConfigurations[-1].append(False)
def solve(self,**kwargs):
try:
self.solver
except AttributeError:
self.get_system_of_equations()
self._J_ij = np.array(EnergyConverter.convert(*(self.solver.solve()),
moments=self.model.get_average_moments(), **kwargs))
return self._J_ij
def __str__(self):
metaData = self.model.get_metadata()
try:
strout = ' '
strout += ''.join([ "%s | "%name for name in metaData.names ]) + '\n'
except AttributeError:
return "Error"
try:
strout += ''.join([ (" %s:\t"+len(self._J_ij[0])*" % 8.3f "+"\n")\
%(typeName,
*self._J_ij[i],)\
for i,typeName in enumerate(self.types) ])
strout += ' <|µ|> (µB): '
strout += ''.join([ "% 8.3f "%mu for mu in metaData.moments]) + '\n'
except AttributeError:
return strout
try:
strout += ' <Δµ/µ>: '
strout += ''.join([ "% 8.3f "%corr for corr in metaData.corrections])
except AttributeError:
return strout
return strout
class Reference:
def __init__(self,POSCAR):
loader = POSCARloader(POSCAR)
loader.parse()
firstLine = loader()['comment']
try:
self.reference = int(re.search('NewRef:\s*([0-9]+),',firstLine).group(1))
except AttributeError as err:
print("Reference not found in POSCAR comment! Taking 0!")
print(err)
self.reference = 0
except ValueError as err:
print("Reference cannot be converted to int! Taking 0!")
print(err)
self.reference = 0
if self.reference < 0:
print("Wrong reference (%d < 0)! Taking 0!"%self.reference)
self.reference = 0
def __str__(self):
return str(self.reference)
def __call__(self):
return self.reference
class CommandLineOptions:
def __init__(self, *args):
self.parser = ap.ArgumentParser(description='Finding Js')
self.parser.add_argument('--number-of-interactions', '-J',
type=int, default=1, metavar="#J",
help='number of exchange-interaction magnitudes to-be-included in calculations')
self.parser.add_argument('--reference', '--noFlip', '-R', metavar='dir', required=True,
help='reference directory (usually noFlip/)')
self.parser.add_argument('--units', '-U', default='meV',
choices=['eV', 'meV', 'Ry', 'mRy', 'He', 'mHe', 'K'],
help='units of energy')
self.parser.add_argument('--elements', '--atoms', '-E', metavar='symbol',
nargs="+", required=True,
help='Symbol of elements taken into account in calculations')
self.parser.add_argument('--directories', '-D', metavar='dir', nargs='+', required=True,
help='Directories containing flipped configurations (eg. flip00000/)')
self.opt = self.parser.parse_args(args[1:])
def __call__(self, key):
if key == 'elements':
elements = ''.join(self.opt.__dict__[key])
elements = re.search('([A-Z][a-z]?)(.*)$',elements)
mask = ''
while elements is not None:
mask += elements.group(1)+"$"
elements = re.search('([A-Z][a-z]?)(.*)$',elements.group(2))
return mask
try:
return self.opt.__dict__[key]
except KeyError:
exit(-1)
| [
"JorGpi.pickup.read_vasprun.MAGMOMloaderXML",
"argparse.ArgumentParser",
"JorGpi.heisenberg.EquationSolver",
"JorGpi.POSCARloader.POSCARloader",
"numpy.copy",
"numpy.reciprocal",
"numpy.where",
"numpy.linalg.norm",
"re.search",
"JorGpi.heisenberg.NaiveHeisenberg",
"re.sub"
] | [((452, 480), 'numpy.reciprocal', 'np.reciprocal', (['(13.6056980659)'], {}), '(13.6056980659)\n', (465, 480), True, 'import numpy as np\n'), ((2035, 2057), 'JorGpi.POSCARloader.POSCARloader', 'POSCARloader', (['*poscars'], {}), '(*poscars)\n', (2047, 2057), False, 'from JorGpi.POSCARloader import POSCARloader\n'), ((2205, 2244), 'JorGpi.pickup.read_vasprun.MAGMOMloaderXML', 'MAGMOMloaderXML', (['*vaspruns'], {'trapez': '(True)'}), '(*vaspruns, trapez=True)\n', (2220, 2244), False, 'from JorGpi.pickup.read_vasprun import MAGMOMloaderXML\n'), ((4170, 4243), 'JorGpi.heisenberg.NaiveHeisenberg', 'NaiveHeisenberg', (['self.flippingConfigurations', 'self.crystal', 'self.crystal8'], {}), '(self.flippingConfigurations, self.crystal, self.crystal8)\n', (4185, 4243), False, 'from JorGpi.heisenberg import EquationSolver, NaiveHeisenberg, apply_mirrors_xyz\n'), ((4626, 4677), 'JorGpi.heisenberg.EquationSolver', 'EquationSolver', (['self.systemOfEquations', 'deltaEnergy'], {}), '(self.systemOfEquations, deltaEnergy)\n', (4640, 4677), False, 'from JorGpi.heisenberg import EquationSolver, NaiveHeisenberg, apply_mirrors_xyz\n'), ((6729, 6749), 'JorGpi.POSCARloader.POSCARloader', 'POSCARloader', (['POSCAR'], {}), '(POSCAR)\n', (6741, 6749), False, 'from JorGpi.POSCARloader import POSCARloader\n'), ((7557, 7600), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '"""Finding Js"""'}), "(description='Finding Js')\n", (7574, 7600), True, 'import argparse as ap\n'), ((519, 547), 'numpy.reciprocal', 'np.reciprocal', (['(13.6056980659)'], {}), '(13.6056980659)\n', (532, 547), True, 'import numpy as np\n'), ((583, 611), 'numpy.reciprocal', 'np.reciprocal', (['(13.6056980659)'], {}), '(13.6056980659)\n', (596, 611), True, 'import numpy as np\n'), ((649, 677), 'numpy.reciprocal', 'np.reciprocal', (['(13.6056980659)'], {}), '(13.6056980659)\n', (662, 677), True, 'import numpy as np\n'), ((1904, 1925), 're.sub', 're.sub', (['"""/$"""', '""""""', 'arg'], {}), "('/$', '', arg)\n", (1910, 1925), False, 'import re\n'), ((8764, 8805), 're.search', 're.search', (['"""([A-Z][a-z]?)(.*)$"""', 'elements'], {}), "('([A-Z][a-z]?)(.*)$', elements)\n", (8773, 8805), False, 'import re\n'), ((3359, 3399), 'numpy.linalg.norm', 'np.linalg.norm', (['(atom[1] - self.reference)'], {}), '(atom[1] - self.reference)\n', (3373, 3399), True, 'import numpy as np\n'), ((4385, 4422), 'numpy.where', 'np.where', (['self.flippingConfigurations'], {}), '(self.flippingConfigurations)\n', (4393, 4422), True, 'import numpy as np\n'), ((1209, 1222), 'numpy.copy', 'np.copy', (['args'], {}), '(args)\n', (1216, 1222), True, 'import numpy as np\n'), ((1223, 1236), 'numpy.copy', 'np.copy', (['args'], {}), '(args)\n', (1230, 1236), True, 'import numpy as np\n'), ((6859, 6903), 're.search', 're.search', (['"""NewRef:\\\\s*([0-9]+),"""', 'firstLine'], {}), "('NewRef:\\\\s*([0-9]+),', firstLine)\n", (6868, 6903), False, 'import re\n')] |
"""
Extracts features that are based on characters and a condition if the characters should be counted.
"""
import string
import numpy as np
from code.feature_extraction.feature_extractor import FeatureExtractor
from code.util import SUFFIX_PUNC_NUM, SUFFIX_CAP_LETTERS
class ConditionalCharCounter(FeatureExtractor):
"""
Class to count the characters in a text that fulfill a certain condition.
"""
def __init__(self, input_column, output_column):
super().__init__([input_column], output_column)
def _get_values(self, inputs):
"""Counts the characters in a text that fulfill a certain condition."""
num_of_chars = []
for text in inputs[0]:
number = 0
for character in text:
if self._check_condition(character):
number = number + 1
num_of_chars.append(number)
num_of_chars = np.array(num_of_chars).reshape(-1, 1)
return num_of_chars
def _check_condition(self, char: str):
"""Checks if the condition for counting the character is met."""
return True
class PunctuationNum(ConditionalCharCounter):
"""
Class to count the number of punctuation characters in an input.
"""
def __init__(self, input_column):
super().__init__(input_column, input_column + SUFFIX_PUNC_NUM)
def _check_condition(self, char: str):
return char in string.punctuation
class CapLettersNum(ConditionalCharCounter):
"""
Class to count the number of capital letters.
"""
def __init__(self, input_column):
super().__init__(input_column, input_column + SUFFIX_CAP_LETTERS)
def _check_condition(self, char: str):
return char.isupper()
| [
"numpy.array"
] | [((914, 936), 'numpy.array', 'np.array', (['num_of_chars'], {}), '(num_of_chars)\n', (922, 936), True, 'import numpy as np\n')] |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import MapLabelValue
from tests.utils import TEST_NDARRAYS
TESTS = []
for p in TEST_NDARRAYS:
TESTS.extend(
[
[{"orig_labels": [3, 2, 1], "target_labels": [0, 1, 2]}, p([[3, 1], [1, 2]]), p([[0.0, 2.0], [2.0, 1.0]])],
[
{"orig_labels": [3, 5, 8], "target_labels": [0, 1, 2]},
p([[[3], [5], [5], [8]]]),
p([[[0.0], [1.0], [1.0], [2.0]]]),
],
[{"orig_labels": [1, 2, 3], "target_labels": [0, 1, 2]}, p([3, 1, 1, 2]), p([2.0, 0.0, 0.0, 1.0])],
[{"orig_labels": [1, 2, 3], "target_labels": [0.5, 1.5, 2.5]}, p([3, 1, 1, 2]), p([2.5, 0.5, 0.5, 1.5])],
]
)
# note: PyTorch 1.5.1 doesn't support rich dtypes
TESTS.append(
[
{"orig_labels": [1.5, 2.5, 3.5], "target_labels": [0, 1, 2], "dtype": np.int8},
p([3.5, 1.5, 1.5, 2.5]),
p([2, 0, 0, 1]),
]
)
TESTS.extend(
[
[
{"orig_labels": ["label3", "label2", "label1"], "target_labels": [0, 1, 2]},
np.array([["label3", "label1"], ["label1", "label2"]]),
np.array([[0, 2], [2, 1]]),
],
[
{"orig_labels": [3.5, 2.5, 1.5], "target_labels": ["label0", "label1", "label2"], "dtype": "str"},
np.array([[3.5, 1.5], [1.5, 2.5]]),
np.array([["label0", "label2"], ["label2", "label1"]]),
],
[
{
"orig_labels": ["label3", "label2", "label1"],
"target_labels": ["label1", "label2", "label3"],
"dtype": "str",
},
np.array([["label3", "label1"], ["label1", "label2"]]),
np.array([["label1", "label3"], ["label3", "label2"]]),
],
]
)
class TestMapLabelValue(unittest.TestCase):
@parameterized.expand(TESTS)
def test_shape(self, input_param, input_data, expected_value):
result = MapLabelValue(**input_param)(input_data)
if isinstance(expected_value, torch.Tensor):
torch.testing.assert_allclose(result, expected_value)
else:
np.testing.assert_equal(result, expected_value)
self.assertTupleEqual(result.shape, expected_value.shape)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"torch.testing.assert_allclose",
"parameterized.parameterized.expand",
"numpy.array",
"numpy.testing.assert_equal",
"monai.transforms.MapLabelValue"
] | [((2517, 2544), 'parameterized.parameterized.expand', 'parameterized.expand', (['TESTS'], {}), '(TESTS)\n', (2537, 2544), False, 'from parameterized import parameterized\n'), ((2962, 2977), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2975, 2977), False, 'import unittest\n'), ((1757, 1811), 'numpy.array', 'np.array', (["[['label3', 'label1'], ['label1', 'label2']]"], {}), "([['label3', 'label1'], ['label1', 'label2']])\n", (1765, 1811), True, 'import numpy as np\n'), ((1825, 1851), 'numpy.array', 'np.array', (['[[0, 2], [2, 1]]'], {}), '([[0, 2], [2, 1]])\n', (1833, 1851), True, 'import numpy as np\n'), ((1997, 2031), 'numpy.array', 'np.array', (['[[3.5, 1.5], [1.5, 2.5]]'], {}), '([[3.5, 1.5], [1.5, 2.5]])\n', (2005, 2031), True, 'import numpy as np\n'), ((2045, 2099), 'numpy.array', 'np.array', (["[['label0', 'label2'], ['label2', 'label1']]"], {}), "([['label0', 'label2'], ['label2', 'label1']])\n", (2053, 2099), True, 'import numpy as np\n'), ((2323, 2377), 'numpy.array', 'np.array', (["[['label3', 'label1'], ['label1', 'label2']]"], {}), "([['label3', 'label1'], ['label1', 'label2']])\n", (2331, 2377), True, 'import numpy as np\n'), ((2391, 2445), 'numpy.array', 'np.array', (["[['label1', 'label3'], ['label3', 'label2']]"], {}), "([['label1', 'label3'], ['label3', 'label2']])\n", (2399, 2445), True, 'import numpy as np\n'), ((2629, 2657), 'monai.transforms.MapLabelValue', 'MapLabelValue', ([], {}), '(**input_param)\n', (2642, 2657), False, 'from monai.transforms import MapLabelValue\n'), ((2735, 2788), 'torch.testing.assert_allclose', 'torch.testing.assert_allclose', (['result', 'expected_value'], {}), '(result, expected_value)\n', (2764, 2788), False, 'import torch\n'), ((2815, 2862), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected_value'], {}), '(result, expected_value)\n', (2838, 2862), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 7 14:02:13 2018
@author: elizabethhutton
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
##############################################################################
######################### Data Processing ###########################
#read data into dataframes
in_file = "TN_ABCD.txt"
target_file = "TNP1_ABCD.txt"
test_file = "ABCD_test_items.txt"
num_trials = 1023
df_in = pd.read_csv(in_file, header = None, delim_whitespace = True)
df_target = pd.read_csv(target_file, header = None, delim_whitespace = True)
df_test_pairs = pd.read_csv(test_file, header = None, delim_whitespace = True)
#split test items into input and output
test_in = df_test_pairs.values[:,1:3]
test_out = df_test_pairs.values[:,3:5]
#reformat data s.t. input b/w [-1,1], output b/w [0,1]
data_in = df_in.values
data_in = ((data_in - 1)/3.5)-1
target = df_target.values
target = ((target - 1)/7)
test_in = ((test_in - .1)/.35)-1
test_out = ((test_out - .1)/.7)
#get category label from input coordinates
def get_input_label(x,y) :
sp = 0
category = np.zeros([1,4])
if x<=sp and y <=sp:
category = [0,1,0,0] #B
elif x>sp and y<=sp:
category = [0,0,0,1] #D
elif x<=sp and y>sp:
category = [1,0,0,0] #A
elif x>sp and y>sp:
category = [0,0,1,0] #C
else:
print("Error, separation point")
return category
#generate labels for input
labels = np.zeros([num_trials, 4])
cat_names = ['A','B','C','D']
for i in range(num_trials):
labels[i,:] = get_input_label(data_in[i,0],data_in[i,1])
#generate labels for test items (to test classification)
n_test = test_in.shape[0]
test_labels = np.zeros([n_test, 4])
for i in range(n_test):
test_labels[i,:] = get_input_label(test_in[i,0],test_in[i,1])
##############################################################################
######################### Model Design ###########################
def model(input_, target, test_input, test_target, l_rate):
#parameters
n_epochs = 2500
n_features = input_.shape[1]
n_outputs = target.shape[1]
#input and output placeholders
with tf.name_scope('input'):
x_in = tf.placeholder(tf.float64,[None, n_features])
y_target = tf.placeholder(tf.float64,[None, n_outputs])
#num hidden layers and sizes
n_hidden_1 = 10
n_hidden_2 = 10
# Define weights and biases
W = {
'h1': tf.Variable(tf.truncated_normal([n_features, n_hidden_1],dtype = tf.float64)),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],dtype = tf.float64)),
'out': tf.Variable(tf.truncated_normal([n_hidden_2, n_outputs],dtype = tf.float64))
}
b = {
'b1': tf.Variable(tf.truncated_normal([n_hidden_1], dtype = tf.float64)),
'b2': tf.Variable(tf.truncated_normal([n_hidden_2], dtype = tf.float64)),
'out': tf.Variable(tf.truncated_normal([n_outputs],dtype = tf.float64))
}
#build model
def neural_net(x):
# Hidden layers with sigmoid activation
with tf.name_scope('layer1'):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, W['h1']), b['b1']))
with tf.name_scope('layer2'):
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, W['h2']), b['b2']))
# Output layer
with tf.name_scope('output_layer'):
out_layer = tf.add(tf.matmul(layer_2, W['out']), b['out'])
return out_layer
#generate predictions for train and test
y_out = neural_net(input_)
#train_pred = tf.nn.softmax(y_out)
test_out = neural_net(test_input)
#test_pred = tf.nn.softmax(test_out)
errors = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(test_out, test_target)),1,keepdims=True))
#define loss and optimizer
with tf.name_scope('cost'):
cost_fxn = tf.reduce_mean(tf.losses.mean_squared_error(predictions = y_out, labels = y_target))
tf.summary.scalar('cost', cost_fxn)
optimizer = tf.train.GradientDescentOptimizer(learning_rate = l_rate)
with tf.name_scope('train'):
train_op = optimizer.minimize(cost_fxn)
# Evaluate model accuracy
with tf.name_scope('train_accuracy'):
train_correct = tf.equal(tf.argmax(y_out, 1), tf.argmax(target, 1))
train_acc = tf.reduce_mean(tf.cast(train_correct, tf.float32))
with tf.name_scope('test_accuracy'):
test_correct = tf.equal(tf.argmax(test_out, 1), tf.argmax(test_target, 1))
test_acc = tf.reduce_mean(tf.cast(test_correct, tf.float32))
test_loss = tf.reduce_mean(tf.losses.mean_squared_error(predictions = test_out, labels = test_target))
#initialize variables
init = tf.global_variables_initializer()
#save mse and accuracy info
cost_log = []
accuracy_log = []
test_cost_log = []
test_accuracy_log = []
#errors = []
log_step = 500
# Training
with tf.Session() as sess:
#define file writer
#writer = tf.summary.FileWriter('./graphs', sess.graph)
#run initializer
sess.run(init)
#train
for i in range(n_epochs):
sess.run(train_op,feed_dict = {x_in: input_, y_target: target})
#generate cost and accuracy info
cost, train_accuracy = sess.run([cost_fxn, train_acc],feed_dict = \
{x_in: input_, y_target: target})
cost_log.append(cost)
accuracy_log.append(train_accuracy)
#get accuracy on test items
test_error,test_accuracy = sess.run([test_loss,test_acc],feed_dict = \
{x_in: test_input, y_target: test_target})
test_cost_log.append(test_error)
test_accuracy_log.append(test_accuracy)
#add to summary
#merged = tf.summary.merge_all()
#summary = sess.run(merged, feed_dict = {x_in: data_in, y_target: labels})
#writer.add_summary(summary, i)
if i == 0 or (i+1) % log_step == 0:
#print progress
print("Epoch " + str(i+1) + ", Loss= " + \
"{:.3f}".format(cost) + ", Train Accuracy= " \
+ "{:.3f}".format(train_accuracy) + ", Test Accuracy= " \
+ "{:.3f}".format(test_accuracy))
#save output layer activations on last epoch
if i == n_epochs-1:
output_layer = neural_net(test_in).eval()
#errors.append(item_error)
errors = errors.eval()
plt.figure()
plt.plot(cost_log, label='Cost')
plt.plot(test_accuracy_log, label = 'Test')
plt.plot(accuracy_log, label = 'Train')
plt.legend()
#writer.close()
return output_layer, errors
#category training
output_layer,tmp = model(data_in,labels,test_in,test_labels, l_rate = 0.5)
#prediction after training
input_ = np.concatenate([data_in,output_layer],1)
test_input = np.concatenate([test_in,np.zeros([16,4])],1)
output, trained_errors = model(input_,target,test_input,test_out,l_rate = 0.1)
#pure prediction
output, simple_errors = model(data_in, target,test_in,test_out,l_rate = 0.1)
### run models multiple times
iterations = 3
for i in range(iterations):
output,trained = model(input_,target,test_input,test_out,l_rate = 0.1)
trained_errors = np.concatenate((trained_errors,trained),axis=1)
output, simple = model(data_in, target,test_in,test_out,l_rate = 0.1)
simple_errors = np.concatenate((simple_errors,simple),axis=1)
error_by_cat_trained = np.array([np.mean(trained_errors[0:4,:]),\
np.mean(trained_errors[4:8,:]),\
np.mean(trained_errors[8:12,:]),\
np.mean(trained_errors[12:16,:])])
error_by_cat_simple = np.array([np.mean(simple_errors[0:4,:]),\
np.mean(simple_errors[4:8,:]),\
np.mean(simple_errors[8:12,:]),\
np.mean(simple_errors[12:16,:])])
###plot results
test_cats = ('Word', 'Part Word', 'Cor. Traj.', 'Inc. Traj')
fig, ax = plt.subplots()
x_pos = np.arange(len(test_cats))
bar_width = 0.4
plt.bar(x_pos, error_by_cat_simple, -bar_width, align = 'edge',alpha=0.5, label='Simple')
plt.bar(x_pos, error_by_cat_trained, bar_width, align = 'edge',alpha=0.5, label='Trained')
plt.xticks(x_pos, test_cats)
plt.ylabel('Error')
plt.title('Test Errors by Condition')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"tensorflow.matmul",
"matplotlib.pyplot.figure",
"numpy.mean",
"tensorflow.truncated_normal",
"tensorflow.subtract",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.name_scope",
"matplotlib.pyplot.xticks",
"matpl... | [((596, 652), 'pandas.read_csv', 'pd.read_csv', (['in_file'], {'header': 'None', 'delim_whitespace': '(True)'}), '(in_file, header=None, delim_whitespace=True)\n', (607, 652), True, 'import pandas as pd\n'), ((669, 729), 'pandas.read_csv', 'pd.read_csv', (['target_file'], {'header': 'None', 'delim_whitespace': '(True)'}), '(target_file, header=None, delim_whitespace=True)\n', (680, 729), True, 'import pandas as pd\n'), ((750, 808), 'pandas.read_csv', 'pd.read_csv', (['test_file'], {'header': 'None', 'delim_whitespace': '(True)'}), '(test_file, header=None, delim_whitespace=True)\n', (761, 808), True, 'import pandas as pd\n'), ((1615, 1640), 'numpy.zeros', 'np.zeros', (['[num_trials, 4]'], {}), '([num_trials, 4])\n', (1623, 1640), True, 'import numpy as np\n'), ((1859, 1880), 'numpy.zeros', 'np.zeros', (['[n_test, 4]'], {}), '([n_test, 4])\n', (1867, 1880), True, 'import numpy as np\n'), ((7277, 7319), 'numpy.concatenate', 'np.concatenate', (['[data_in, output_layer]', '(1)'], {}), '([data_in, output_layer], 1)\n', (7291, 7319), True, 'import numpy as np\n'), ((8546, 8560), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8558, 8560), True, 'import matplotlib.pyplot as plt\n'), ((8611, 8703), 'matplotlib.pyplot.bar', 'plt.bar', (['x_pos', 'error_by_cat_simple', '(-bar_width)'], {'align': '"""edge"""', 'alpha': '(0.5)', 'label': '"""Simple"""'}), "(x_pos, error_by_cat_simple, -bar_width, align='edge', alpha=0.5,\n label='Simple')\n", (8618, 8703), True, 'import matplotlib.pyplot as plt\n'), ((8701, 8794), 'matplotlib.pyplot.bar', 'plt.bar', (['x_pos', 'error_by_cat_trained', 'bar_width'], {'align': '"""edge"""', 'alpha': '(0.5)', 'label': '"""Trained"""'}), "(x_pos, error_by_cat_trained, bar_width, align='edge', alpha=0.5,\n label='Trained')\n", (8708, 8794), True, 'import matplotlib.pyplot as plt\n'), ((8792, 8820), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_pos', 'test_cats'], {}), '(x_pos, test_cats)\n', (8802, 8820), True, 'import matplotlib.pyplot as plt\n'), ((8821, 8840), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (8831, 8840), True, 'import matplotlib.pyplot as plt\n'), ((8841, 8878), 'matplotlib.pyplot.title', 'plt.title', (['"""Test Errors by Condition"""'], {}), "('Test Errors by Condition')\n", (8850, 8878), True, 'import matplotlib.pyplot as plt\n'), ((8879, 8891), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8889, 8891), True, 'import matplotlib.pyplot as plt\n'), ((8892, 8902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8900, 8902), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1278), 'numpy.zeros', 'np.zeros', (['[1, 4]'], {}), '([1, 4])\n', (1270, 1278), True, 'import numpy as np\n'), ((4191, 4246), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'l_rate'}), '(learning_rate=l_rate)\n', (4224, 4246), True, 'import tensorflow as tf\n'), ((4911, 4944), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4942, 4944), True, 'import tensorflow as tf\n'), ((6926, 6938), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6936, 6938), True, 'import matplotlib.pyplot as plt\n'), ((6944, 6976), 'matplotlib.pyplot.plot', 'plt.plot', (['cost_log'], {'label': '"""Cost"""'}), "(cost_log, label='Cost')\n", (6952, 6976), True, 'import matplotlib.pyplot as plt\n'), ((6981, 7022), 'matplotlib.pyplot.plot', 'plt.plot', (['test_accuracy_log'], {'label': '"""Test"""'}), "(test_accuracy_log, label='Test')\n", (6989, 7022), True, 'import matplotlib.pyplot as plt\n'), ((7029, 7066), 'matplotlib.pyplot.plot', 'plt.plot', (['accuracy_log'], {'label': '"""Train"""'}), "(accuracy_log, label='Train')\n", (7037, 7066), True, 'import matplotlib.pyplot as plt\n'), ((7073, 7085), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7083, 7085), True, 'import matplotlib.pyplot as plt\n'), ((7735, 7784), 'numpy.concatenate', 'np.concatenate', (['(trained_errors, trained)'], {'axis': '(1)'}), '((trained_errors, trained), axis=1)\n', (7749, 7784), True, 'import numpy as np\n'), ((7877, 7924), 'numpy.concatenate', 'np.concatenate', (['(simple_errors, simple)'], {'axis': '(1)'}), '((simple_errors, simple), axis=1)\n', (7891, 7924), True, 'import numpy as np\n'), ((2335, 2357), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (2348, 2357), True, 'import tensorflow as tf\n'), ((2374, 2420), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64', '[None, n_features]'], {}), '(tf.float64, [None, n_features])\n', (2388, 2420), True, 'import tensorflow as tf\n'), ((2439, 2484), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64', '[None, n_outputs]'], {}), '(tf.float64, [None, n_outputs])\n', (2453, 2484), True, 'import tensorflow as tf\n'), ((4003, 4024), 'tensorflow.name_scope', 'tf.name_scope', (['"""cost"""'], {}), "('cost')\n", (4016, 4024), True, 'import tensorflow as tf\n'), ((4138, 4173), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cost"""', 'cost_fxn'], {}), "('cost', cost_fxn)\n", (4155, 4173), True, 'import tensorflow as tf\n'), ((4258, 4280), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (4271, 4280), True, 'import tensorflow as tf\n'), ((4371, 4402), 'tensorflow.name_scope', 'tf.name_scope', (['"""train_accuracy"""'], {}), "('train_accuracy')\n", (4384, 4402), True, 'import tensorflow as tf\n'), ((4569, 4599), 'tensorflow.name_scope', 'tf.name_scope', (['"""test_accuracy"""'], {}), "('test_accuracy')\n", (4582, 4599), True, 'import tensorflow as tf\n'), ((5135, 5147), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5145, 5147), True, 'import tensorflow as tf\n'), ((7355, 7372), 'numpy.zeros', 'np.zeros', (['[16, 4]'], {}), '([16, 4])\n', (7363, 7372), True, 'import numpy as np\n'), ((7962, 7993), 'numpy.mean', 'np.mean', (['trained_errors[0:4, :]'], {}), '(trained_errors[0:4, :])\n', (7969, 7993), True, 'import numpy as np\n'), ((8028, 8059), 'numpy.mean', 'np.mean', (['trained_errors[4:8, :]'], {}), '(trained_errors[4:8, :])\n', (8035, 8059), True, 'import numpy as np\n'), ((8094, 8126), 'numpy.mean', 'np.mean', (['trained_errors[8:12, :]'], {}), '(trained_errors[8:12, :])\n', (8101, 8126), True, 'import numpy as np\n'), ((8161, 8194), 'numpy.mean', 'np.mean', (['trained_errors[12:16, :]'], {}), '(trained_errors[12:16, :])\n', (8168, 8194), True, 'import numpy as np\n'), ((8229, 8259), 'numpy.mean', 'np.mean', (['simple_errors[0:4, :]'], {}), '(simple_errors[0:4, :])\n', (8236, 8259), True, 'import numpy as np\n'), ((8294, 8324), 'numpy.mean', 'np.mean', (['simple_errors[4:8, :]'], {}), '(simple_errors[4:8, :])\n', (8301, 8324), True, 'import numpy as np\n'), ((8359, 8390), 'numpy.mean', 'np.mean', (['simple_errors[8:12, :]'], {}), '(simple_errors[8:12, :])\n', (8366, 8390), True, 'import numpy as np\n'), ((8425, 8457), 'numpy.mean', 'np.mean', (['simple_errors[12:16, :]'], {}), '(simple_errors[12:16, :])\n', (8432, 8457), True, 'import numpy as np\n'), ((2629, 2692), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[n_features, n_hidden_1]'], {'dtype': 'tf.float64'}), '([n_features, n_hidden_1], dtype=tf.float64)\n', (2648, 2692), True, 'import tensorflow as tf\n'), ((2722, 2785), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[n_hidden_1, n_hidden_2]'], {'dtype': 'tf.float64'}), '([n_hidden_1, n_hidden_2], dtype=tf.float64)\n', (2741, 2785), True, 'import tensorflow as tf\n'), ((2816, 2878), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[n_hidden_2, n_outputs]'], {'dtype': 'tf.float64'}), '([n_hidden_2, n_outputs], dtype=tf.float64)\n', (2835, 2878), True, 'import tensorflow as tf\n'), ((2928, 2979), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[n_hidden_1]'], {'dtype': 'tf.float64'}), '([n_hidden_1], dtype=tf.float64)\n', (2947, 2979), True, 'import tensorflow as tf\n'), ((3010, 3061), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[n_hidden_2]'], {'dtype': 'tf.float64'}), '([n_hidden_2], dtype=tf.float64)\n', (3029, 3061), True, 'import tensorflow as tf\n'), ((3093, 3143), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[n_outputs]'], {'dtype': 'tf.float64'}), '([n_outputs], dtype=tf.float64)\n', (3112, 3143), True, 'import tensorflow as tf\n'), ((3264, 3287), 'tensorflow.name_scope', 'tf.name_scope', (['"""layer1"""'], {}), "('layer1')\n", (3277, 3287), True, 'import tensorflow as tf\n'), ((3378, 3401), 'tensorflow.name_scope', 'tf.name_scope', (['"""layer2"""'], {}), "('layer2')\n", (3391, 3401), True, 'import tensorflow as tf\n'), ((3526, 3555), 'tensorflow.name_scope', 'tf.name_scope', (['"""output_layer"""'], {}), "('output_layer')\n", (3539, 3555), True, 'import tensorflow as tf\n'), ((4060, 4124), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'predictions': 'y_out', 'labels': 'y_target'}), '(predictions=y_out, labels=y_target)\n', (4088, 4124), True, 'import tensorflow as tf\n'), ((4441, 4460), 'tensorflow.argmax', 'tf.argmax', (['y_out', '(1)'], {}), '(y_out, 1)\n', (4450, 4460), True, 'import tensorflow as tf\n'), ((4462, 4482), 'tensorflow.argmax', 'tf.argmax', (['target', '(1)'], {}), '(target, 1)\n', (4471, 4482), True, 'import tensorflow as tf\n'), ((4519, 4553), 'tensorflow.cast', 'tf.cast', (['train_correct', 'tf.float32'], {}), '(train_correct, tf.float32)\n', (4526, 4553), True, 'import tensorflow as tf\n'), ((4637, 4659), 'tensorflow.argmax', 'tf.argmax', (['test_out', '(1)'], {}), '(test_out, 1)\n', (4646, 4659), True, 'import tensorflow as tf\n'), ((4661, 4686), 'tensorflow.argmax', 'tf.argmax', (['test_target', '(1)'], {}), '(test_target, 1)\n', (4670, 4686), True, 'import tensorflow as tf\n'), ((4722, 4755), 'tensorflow.cast', 'tf.cast', (['test_correct', 'tf.float32'], {}), '(test_correct, tf.float32)\n', (4729, 4755), True, 'import tensorflow as tf\n'), ((4792, 4862), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'predictions': 'test_out', 'labels': 'test_target'}), '(predictions=test_out, labels=test_target)\n', (4820, 4862), True, 'import tensorflow as tf\n'), ((3588, 3616), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "W['out']"], {}), "(layer_2, W['out'])\n", (3597, 3616), True, 'import tensorflow as tf\n'), ((3908, 3942), 'tensorflow.subtract', 'tf.subtract', (['test_out', 'test_target'], {}), '(test_out, test_target)\n', (3919, 3942), True, 'import tensorflow as tf\n'), ((3332, 3353), 'tensorflow.matmul', 'tf.matmul', (['x', "W['h1']"], {}), "(x, W['h1'])\n", (3341, 3353), True, 'import tensorflow as tf\n'), ((3446, 3473), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "W['h2']"], {}), "(layer_1, W['h2'])\n", (3455, 3473), True, 'import tensorflow as tf\n')] |
from multiprocessing import Process
import os
import subprocess
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
from random import shuffle
import numpy as np
import time
def read_dev_output(file_name):
# read generated SMILES in seq2seq dev output file
with open(file_name,'r') as f:
smis = []
for l in f:
smi = ''.join(l.split('\t')[0].split(' '))
smis.append(smi)
return smis
def get_novelty(gen_smis, ref_smis, return_novelty=False, ref_can=False):
"""
Get novelty generated MOLs which are not exist in training dataset
para gen_smis: generated SMILES, in list format
para ref_smis: training SMILES, in list format
para return_novelty: if return novelty MOLs, in canonical SMILES format, default False
"""
c_gen_smis = []
for s in gen_smis:
try:
cs = Chem.MolToSmiles(Chem.MolFromSmiles(s))
except:
pass
else:
c_gen_smis.append(cs)
if ref_can:
c_ref_smis = ref_smis
else:
c_ref_smis = [Chem.MolToSmiles(Chem.MolFromSmiles(s)) for s in ref_smis]
c_ref_smis = list(set(c_ref_smis))
c_gen_smis = list(set(c_gen_smis))
nove_smis = [i for i in c_gen_smis if i not in c_ref_smis]
if return_novelty:
return nove_smis
else:
return len(nove_smis)/len(gen_smis)
def get_novelty_smi(gen_smis, ref_smis, return_novelty=False,):
"""
Get novelty generated SMILES which are not exist in training dataset
para gen_smis: generated SMILES, in list format
para ref_smis: training SMILES, in list format
para return_novelty: if return novelty MOLs, in canonical SMILES format, default False
"""
nov_smis = [i for i in gen_smis if i not in ref_smis]
if return_novelty:
return nov_smis
else:
return len(nov_smis)/len(gen_smis)
def get_valid(gen_smis, return_valid=False):
"""
Get valid SMILES in generated samples
para gen_smis: generated SMILES, in list format
para return_valid: if return unique SMILESs, else return the fraction, default False
"""
valid_smi = []
for smi in gen_smis:
try:
m = Chem.MolFromSmiles(smi)
except:
pass
else:
if m != None:
valid_smi.append(smi)
if return_valid:
return valid_smi
else:
return len(valid_smi)/len(gen_smis)
def get_unique(gen_smis, random_sample_n=-1, valid=True, return_unique=False):
"""
Get unique generated samples
para gen_smis: generated SMILES, in list format
para random_sample_n: the number of sampled SMILESs from gen_smis for uniqueness calculation,
-1 means using the whole gen_smis, default -1
para valid: if the unique SMILES should be valid SMILES
para return_unique: if return unique SMILESs, default False
"""
base = get_valid(gen_smis, return_valid=True) if valid else gen_smis
total_smi_n = len(base)
if random_sample_n>total_smi_n or random_sample_n == -1:
sample_n = total_smi_n
else:
sample_n = random_sample_n
base_index = list(range(total_smi_n))
shuffle(base_index)
sample_smis = [base[base_index[i]] for i in range(sample_n)]
unique_smis = list(set(sample_smis))
if return_unique:
return unique_smis
else:
if sample_n == 0:
return 0
else:
return len(unique_smis)/sample_n
def eva_dl(file_list, ref, ids):
"""
The Distribution-Learning evaluation of the generated SMILES
para file_list: the files store the generated SMILES, in list format
para ref: the number of sampled SMILESs from gen_smis for uniqueness calculation,
-1 means using the whole gen_smis, default -1
para ids: job id in multi-process, default None, and would return the metircs in Dataframe, otherwise will write to a csv file
"""
rec_file = open('eva_rec.log','a')
ref_smis = ref
vs = []
us = []
ns = []
for idx, file in enumerate(file_list):
smis = read_dev_output(file)
v_smis = get_valid(smis, return_valid=True)
n_smis = get_novelty_smi(v_smis, ref_smis, return_novelty=True)
vs.append(len(v_smis)/len(smis))
us.append(get_unique(smis))
ns.append(len(n_smis)/len(v_smis))
rec_file.write('DL-evaluation for {0} done\n'.format(file))
rec_file.close()
dl_metrics = pd.DataFrame({'valid_score':vs, 'unique_score':us, 'novelty_score':ns})
if ids == None:
return dl_metrics
else:
dl_metrics.to_csv('subprocess_{0}.csv'.format(ids), index=False)
def eva_gd(file_list, target, ids):
"""
The Goal-Directed evaluation of the generated SMILES
para file_list: the files store the generated SMILES, in list format
para target: the pre-defined goal for generated SMILES, in list format
para ids: job id in multi-process, default None, and would return the metircs in Dataframe, otherwise will write to a csv file
"""
rec_file = open('eva_rec.log','a')
ave_diff = []
ave_p = []
top_1 = []
top_2 = []
top_3 = []
for idx, file in enumerate(file_list):
smis = read_dev_output(file)
if len(smis) != len(target):
cut_ = min(len(smis), len(target))
smis = smis[:cut_]
target_e = target[:cut_]
else:
target_e = target[:]
properties = [0,0,0]
diff = []
for sidx, smi in enumerate(smis):
try:
mol = Chem.MolFromSmiles(smi)
q = Descriptors.qed(mol)
except:
pass
else:
diff.append(abs(q-target_e[sidx]))
properties.append(q)
properties = sorted(properties)[::-1]
top_1.append(properties[0])
top_2.append(properties[1])
top_3.append(properties[2])
ave_p.append(np.mean(properties))
ave_diff.append(np.mean(diff))
rec_file.write('GD-evaluation for {0} done\n'.format(file))
rec_file.close()
gd_metrics = pd.DataFrame({'ave_diff':ave_diff, 'ave_property':ave_p, 'top_1':top_1, 'top_2':top_2, 'top_3':top_3})
if ids == None:
return gd_metrics
else:
gd_metrics.to_csv('subprocess_{0}.csv'.format(ids), index=False)
def multi_process(eva_func, file_dir, file_n, ref, n_jobs, to_file=False):
"""
Evaluate the generated SMILES in multi-processing
para eva_func: evaluation function
para file_dir: the dir to where store the generated molecules
para file_n: number of store file
para ref: reference, training SMILES in Distribution-Learning evaluation and target value in Goal-Directed evaluation
para n_jobs: number of processings in multi-processing
para to_file: the output file name, default False, means return the metircs to the python console
"""
# prepare tasks for each subprocesses
n_jobs = max(n_jobs, 1)
if file_dir.endswith('/'):
file_list = ['{0}dev_epoch{1}'.format(file_dir, i) for i in range(file_n)]
else:
file_list = ['{0}/dev_epoch{1}'.format(file_dir, i) for i in range(file_n)]
filen_per_job = round(file_n/n_jobs)
file_lists = [file_list[i*filen_per_job:(i+1)*filen_per_job] for i in range(n_jobs-1)]
file_lists.append(file_list[(n_jobs-1)*filen_per_job:])
# define subprocesses and call the subprocesses
sub_process = []
for sp in range(n_jobs):
sub_process.append(Process(target=eva_func, args=(file_lists[sp], ref, sp)))
for sp in sub_process:
sp.start()
for sp in sub_process:
sp.join()
# merge files and remove temporary files
for spf in range(n_jobs):
sbcsv = pd.read_csv('subprocess_{0}.csv'.format(spf))
if spf == 0:
merged_ = sbcsv
else:
merged_ = merged_.append(sbcsv)
subprocess.call('rm subprocess_{0}.csv'.format(spf), shell=True)
merged_.index = list(range(len(merged_)))
if to_file:
merged_.to_csv(to_file)
else:
return merged_
if __name__ == '__main__':
ref_file = './package/task_data/zinc250k_gd/train.tsv'
n_jobs = 12
#file_dir = './output_store/qm9_dl/step40w/'
file_dir = './output_store/zinc250k_gd_200'
file_n = 240
to_file = 'zinc250k_gd_gd_40w.csv'
with open(ref_file,'r') as f:
ref_smis = []
h = 1
for l in f:
if h :
h = 0
else:
ref_smis.append(l.strip().split('\t')[1])
tgt = [0.948 for _ in range(10000)]
start_time = time.time()
#k = multi_process(eva_dl, file_dir, file_n, ref_smis, n_jobs, to_file)
k = multi_process(eva_gd, file_dir, file_n, tgt, n_jobs, to_file)
#file_lists = ['./output_store/zinc250k_gd/dev_epoch{0}'.format(i) for i in range(file_n)]
#k = eva_gd(file_lists, tgt, None)
end_time = time.time()
| [
"pandas.DataFrame",
"random.shuffle",
"time.time",
"rdkit.Chem.Descriptors.qed",
"numpy.mean",
"multiprocessing.Process",
"rdkit.Chem.MolFromSmiles"
] | [((3222, 3241), 'random.shuffle', 'shuffle', (['base_index'], {}), '(base_index)\n', (3229, 3241), False, 'from random import shuffle\n'), ((4517, 4591), 'pandas.DataFrame', 'pd.DataFrame', (["{'valid_score': vs, 'unique_score': us, 'novelty_score': ns}"], {}), "({'valid_score': vs, 'unique_score': us, 'novelty_score': ns})\n", (4529, 4591), True, 'import pandas as pd\n'), ((6188, 6299), 'pandas.DataFrame', 'pd.DataFrame', (["{'ave_diff': ave_diff, 'ave_property': ave_p, 'top_1': top_1, 'top_2':\n top_2, 'top_3': top_3}"], {}), "({'ave_diff': ave_diff, 'ave_property': ave_p, 'top_1': top_1,\n 'top_2': top_2, 'top_3': top_3})\n", (6200, 6299), True, 'import pandas as pd\n'), ((8721, 8732), 'time.time', 'time.time', ([], {}), '()\n', (8730, 8732), False, 'import time\n'), ((9028, 9039), 'time.time', 'time.time', ([], {}), '()\n', (9037, 9039), False, 'import time\n'), ((2226, 2249), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (2244, 2249), False, 'from rdkit import Chem\n'), ((6021, 6040), 'numpy.mean', 'np.mean', (['properties'], {}), '(properties)\n', (6028, 6040), True, 'import numpy as np\n'), ((6066, 6079), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (6073, 6079), True, 'import numpy as np\n'), ((7603, 7659), 'multiprocessing.Process', 'Process', ([], {'target': 'eva_func', 'args': '(file_lists[sp], ref, sp)'}), '(target=eva_func, args=(file_lists[sp], ref, sp))\n', (7610, 7659), False, 'from multiprocessing import Process\n'), ((907, 928), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['s'], {}), '(s)\n', (925, 928), False, 'from rdkit import Chem\n'), ((1106, 1127), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['s'], {}), '(s)\n', (1124, 1127), False, 'from rdkit import Chem\n'), ((5634, 5657), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (5652, 5657), False, 'from rdkit import Chem\n'), ((5678, 5698), 'rdkit.Chem.Descriptors.qed', 'Descriptors.qed', (['mol'], {}), '(mol)\n', (5693, 5698), False, 'from rdkit.Chem import Descriptors\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 255, 255, 255, 0, 0],
[0, 0, 255, 255, 255, 0, 0],
[0, 0, 255, 255, 255, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
open = cv2.morphologyEx(img,
cv2.MORPH_OPEN,
kernel)
close = cv2.morphologyEx(img,
cv2.MORPH_CLOSE,
kernel)
tophat = cv2.morphologyEx(img,
cv2.MORPH_TOPHAT,
kernel)
blackhat = cv2.morphologyEx(img,
cv2.MORPH_BLACKHAT,
kernel)
hitmiss = cv2.morphologyEx(img,
cv2.MORPH_HITMISS,
kernel)
titles=['Original', 'Open',
'Close', 'Top hat',
'Black hat', 'Hit Miss']
output=[img, open, close,
tophat, blackhat,
hitmiss]
for i in range(6):
plt.subplot(2, 3, i+1)
plt.imshow(output[i], cmap='gray')
plt.title(titles[i])
plt.axis('off')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"cv2.morphologyEx",
"cv2.getStructuringElement",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"numpy.array"
] | [((74, 289), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 255, 255, 255, 0, 0],\n [0, 0, 255, 255, 255, 0, 0], [0, 0, 255, 255, 255, 0, 0], [0, 0, 0, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.uint8'}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 255, 255, \n 255, 0, 0], [0, 0, 255, 255, 255, 0, 0], [0, 0, 255, 255, 255, 0, 0], [\n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)\n', (82, 289), True, 'import numpy as np\n'), ((386, 436), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_CROSS', '(3, 3)'], {}), '(cv2.MORPH_CROSS, (3, 3))\n', (411, 436), False, 'import cv2\n'), ((445, 490), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img, cv2.MORPH_OPEN, kernel)\n', (461, 490), False, 'import cv2\n'), ((547, 593), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(img, cv2.MORPH_CLOSE, kernel)\n', (563, 593), False, 'import cv2\n'), ((651, 698), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_TOPHAT', 'kernel'], {}), '(img, cv2.MORPH_TOPHAT, kernel)\n', (667, 698), False, 'import cv2\n'), ((758, 807), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_BLACKHAT', 'kernel'], {}), '(img, cv2.MORPH_BLACKHAT, kernel)\n', (774, 807), False, 'import cv2\n'), ((866, 914), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_HITMISS', 'kernel'], {}), '(img, cv2.MORPH_HITMISS, kernel)\n', (882, 914), False, 'import cv2\n'), ((1252, 1262), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1260, 1262), True, 'from matplotlib import pyplot as plt\n'), ((1145, 1169), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(i + 1)'], {}), '(2, 3, i + 1)\n', (1156, 1169), True, 'from matplotlib import pyplot as plt\n'), ((1172, 1206), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output[i]'], {'cmap': '"""gray"""'}), "(output[i], cmap='gray')\n", (1182, 1206), True, 'from matplotlib import pyplot as plt\n'), ((1211, 1231), 'matplotlib.pyplot.title', 'plt.title', (['titles[i]'], {}), '(titles[i])\n', (1220, 1231), True, 'from matplotlib import pyplot as plt\n'), ((1236, 1251), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1244, 1251), True, 'from matplotlib import pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.