code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
def suppressor(data, tolerance, pad=0):
"""Identify elements of data close to 0
Parameters
----------
data : array
This can be either real or complex data.
tolerance : float
Data of absolute value below this tolerance will be set to zero.
pad : int, optional
The function will only suppress data that has at least `pad` points between
it and the nearest value that is above the tolerance. Defaults to 0.
"""
import numpy as np
# Start off suppressing everything below tolerance
suppressed = np.abs(data) < tolerance
if pad > 0:
# Count how many neighbors (within `pad` points to either side) are also suppressed
counter = np.ones(2*pad+1, dtype=int)
count_neighboring_suppressions = np.convolve(suppressed, counter, mode='same')
# Only suppress those whose neighbors are all suppressed
suppressed = count_neighboring_suppressions == np.sum(counter)
return suppressed
def suppress(data, tolerance, pad=0, inplace=True):
"""Set data close to 0 to exactly 0
Parameters
----------
data : array
This can be either real or complex data.
tolerance : float
Data of absolute value below this tolerance will be set to zero.
pad : int, optional
The function will only suppress data that has at least `pad` points between
it and the nearest value that is above the tolerance. Defaults to 0.
inplace : bool, optional
If True (the default), overwrite the `data` array and return it; if False,
copy it, suppress small numbers, and return the copy.
"""
import numpy as np
if inplace:
data[suppressor(data, tolerance, pad=pad)] = 0.0
output = data
else:
output = np.copy(data)
output[suppressor(data, tolerance, pad=pad)] = 0.0
return output
| [
"numpy.sum",
"numpy.abs",
"numpy.copy",
"numpy.ones",
"numpy.convolve"
] | [((566, 578), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (572, 578), True, 'import numpy as np\n'), ((717, 748), 'numpy.ones', 'np.ones', (['(2 * pad + 1)'], {'dtype': 'int'}), '(2 * pad + 1, dtype=int)\n', (724, 748), True, 'import numpy as np\n'), ((786, 831), 'numpy.convolve', 'np.convolve', (['suppressed', 'counter'], {'mode': '"""same"""'}), "(suppressed, counter, mode='same')\n", (797, 831), True, 'import numpy as np\n'), ((1790, 1803), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (1797, 1803), True, 'import numpy as np\n'), ((952, 967), 'numpy.sum', 'np.sum', (['counter'], {}), '(counter)\n', (958, 967), True, 'import numpy as np\n')] |
import pytest
import emgfit as emg
import numpy as np
class Test_spectrum:
# Create simulated spectrum data
from emgfit.sample import simulate_events
true_sigma = 7.77901056381226e-05
true_theta = 0.6591808159640057
true_eta_m1 = 0.7393102752716145
true_eta_m2 = 0.2606897247283855
true_tau_m1 = 4.4723478031626915e-05
true_tau_m2 = 0.00011112601042960299
true_eta_p1 = 0.7315780388972555
true_eta_p2 = 0.2684219611027445
true_tau_p1 = 7.130854298242941e-05
true_tau_p2 = 0.0002741372066519157
true_bkg_c = 1.036125336704966
shape_pars = {'sigma' : true_sigma,
'theta' : true_theta,
'eta_m1': true_eta_m1,
'eta_m2': true_eta_m2,
'tau_m1': true_tau_m1,
'tau_m2': true_tau_m2,
'eta_p1': true_eta_p1,
'eta_p2': true_eta_p2,
'tau_p1': true_tau_p1,
'tau_p2': true_tau_p2,
'bkg_c' : true_bkg_c}
# Get literature mass values from AME2020
m_e = 0.000548579909065 # CODATA value from physics.nist.gov
m_Ni58 = 57.935341650
m_err_Ni58 = 0.374e-06
m_Co58 = 57.935751292
m_err_Co58 = 1.237e-06
m_Mn58 = 57.940066643
m_err_Mn58 = 2.900e-06
m_Sn116 = 115.901742825
m_err_Sn116 = 0.103
ME_Sn116_keV = -91525.979
true_mus = [m_Ni58 - m_e, m_Co58 - m_e, m_Mn58 - m_e, m_Sn116/2 - m_e] #[57.93479320009094, 57.935203, 57.93959511435116,
# 115.90064566418187/2]
true_amps = [0.38916170, 0.05940254, 0.94656384, 0.20934518]
true_N_events = 67636
x_min = true_mus[0] - 0.004
x_max = true_mus[-1] + 0.005
bin_width = 2.37221e-05
N_bins = int((x_max - x_min)/bin_width)
# Set random seed for reproducibility, other seeds can result in
# assertion errors below
np.random.seed(12)
data = simulate_events(shape_pars, true_mus, true_amps, true_bkg_c,
true_N_events, x_min, x_max, out='hist',
N_bins=N_bins)
def test_grabbing_of_AME_values(self):
# Define reference literature values
m_Ni58_AME16 = 57.935341780 - self.m_e
m_err_Ni58_AME16 = 0.400e-06
m_Co58_AME16 = 57.935751429 - self.m_e
m_err_Co58_AME16 = 1.245e-06
atol = 1e-09 # tolerance [u] up to which absolute agreement is demanded
# Instantiate spectrum object
spec = emg.spectrum(df=self.data, show_plot=False)
spec.add_peak(57.9, species="Ni58:-1e")
spec.add_peak(57.95, species="Co58:-1e", lit_src="AME2016")
# Test defaulting to most recent AME database
p0 = spec.peaks[0]
msg0 = "default m_AME value of 'Ni58:-1e' deviates from AME2020 value"
assert np.isclose(p0.m_AME, self.m_Ni58, atol=atol), msg0
msg1 = "default m_AME_error of 'Ni58:-1e' deviates from AME2020 value"
assert np.isclose(p0.m_AME_error, self.m_err_Ni58, atol=atol), msg1
# Test switching to older AME database via add_peak()
p1 = spec.peaks[1]
msg2 = "AME2016 value invoked with add_peak() deviates from reference"
assert np.isclose(p1.m_AME, m_Co58_AME16, atol), msg2
msg3 = "AME2016 error invoked with add_peak() deviates from reference"
assert np.isclose(p1.m_AME_error, m_err_Co58_AME16, atol), msg3
msg4 = "Flagging for AME2016 values invoked with add_peak() failed"
assert 'lit_src: AME2016' in p1.comment, msg4
# Test switching to older AME database via assign_species()
spec.assign_species("Ni58:-1e", peak_index=0, lit_src = 'AME2016')
msg5 = "AME2016 value invoked with assign_species() deviates from ref."
assert np.isclose(p0.m_AME, m_Ni58_AME16, atol), msg5
msg6 = "AME2016 error invoked with assign_species() deviates from ref."
assert np.isclose(p0.m_AME_error, m_err_Ni58_AME16, atol), msg6
msg7 = "Flagging for AME16 values invoked with assign_species() failed"
assert 'lit_src: AME2016' in p0.comment, msg7
def test_fitting_accuracy(self):
"""Check accuracy of fitting using simulated spectrum and test
calculation of literature values for doubly charged and isomeric species
"""
# Instantiate spectrum object, calibrate peak shape and fit all peaks
spec = emg.spectrum(df=self.data,show_plot=False)
spec.detect_peaks(thres=0.0053, plot_smoothed_spec=False,
plot_2nd_deriv=False, plot_detection_result=False)
msg0 = "Incorrect number of peaks detected."
assert len(spec.peaks) == len(self.true_mus), msg0
spec.assign_species(["Ni58:-1e","Co58:-1e","Mn58?:-1e","Sn116:-2e"])
spec.assign_species("Mn58m?:-1e", peak_index=2, Ex=71.77, Ex_error=0.05)
spec.determine_peak_shape(species_shape_calib="Mn58m?:-1e",
show_plots=False)
spec.fit_peaks(species_mass_calib="Ni58:-1e",show_plots=False)
# Perform accuracy checks
for p in spec.peaks:
if p.species == "Ni58:-1e":
continue # skip calibrant
msg1 = "ME deviates from literature by more than 1 sigma."
assert p.m_dev_keV <= p.mass_error_keV, msg1
# Check calculation of (atomic) ME for doubly charged species
if p.species == "Sn116:-2e":
ME_dev_keV = p.atomic_ME_keV - self.ME_Sn116_keV
msg2 = str("Respective deviation of ionic mass and atomic mass "
"excess from literature differ by > 1 sigma for "
"Sn116:-2e.")
assert abs(ME_dev_keV - p.m_dev_keV) < p.mass_error_keV, msg2
| [
"emgfit.spectrum",
"emgfit.sample.simulate_events",
"numpy.random.seed",
"numpy.isclose"
] | [((1932, 1950), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (1946, 1950), True, 'import numpy as np\n'), ((1963, 2083), 'emgfit.sample.simulate_events', 'simulate_events', (['shape_pars', 'true_mus', 'true_amps', 'true_bkg_c', 'true_N_events', 'x_min', 'x_max'], {'out': '"""hist"""', 'N_bins': 'N_bins'}), "(shape_pars, true_mus, true_amps, true_bkg_c, true_N_events,\n x_min, x_max, out='hist', N_bins=N_bins)\n", (1978, 2083), False, 'from emgfit.sample import simulate_events\n'), ((2542, 2585), 'emgfit.spectrum', 'emg.spectrum', ([], {'df': 'self.data', 'show_plot': '(False)'}), '(df=self.data, show_plot=False)\n', (2554, 2585), True, 'import emgfit as emg\n'), ((2885, 2929), 'numpy.isclose', 'np.isclose', (['p0.m_AME', 'self.m_Ni58'], {'atol': 'atol'}), '(p0.m_AME, self.m_Ni58, atol=atol)\n', (2895, 2929), True, 'import numpy as np\n'), ((3032, 3086), 'numpy.isclose', 'np.isclose', (['p0.m_AME_error', 'self.m_err_Ni58'], {'atol': 'atol'}), '(p0.m_AME_error, self.m_err_Ni58, atol=atol)\n', (3042, 3086), True, 'import numpy as np\n'), ((3282, 3322), 'numpy.isclose', 'np.isclose', (['p1.m_AME', 'm_Co58_AME16', 'atol'], {}), '(p1.m_AME, m_Co58_AME16, atol)\n', (3292, 3322), True, 'import numpy as np\n'), ((3425, 3475), 'numpy.isclose', 'np.isclose', (['p1.m_AME_error', 'm_err_Co58_AME16', 'atol'], {}), '(p1.m_AME_error, m_err_Co58_AME16, atol)\n', (3435, 3475), True, 'import numpy as np\n'), ((3858, 3898), 'numpy.isclose', 'np.isclose', (['p0.m_AME', 'm_Ni58_AME16', 'atol'], {}), '(p0.m_AME, m_Ni58_AME16, atol)\n', (3868, 3898), True, 'import numpy as np\n'), ((4002, 4052), 'numpy.isclose', 'np.isclose', (['p0.m_AME_error', 'm_err_Ni58_AME16', 'atol'], {}), '(p0.m_AME_error, m_err_Ni58_AME16, atol)\n', (4012, 4052), True, 'import numpy as np\n'), ((4501, 4544), 'emgfit.spectrum', 'emg.spectrum', ([], {'df': 'self.data', 'show_plot': '(False)'}), '(df=self.data, show_plot=False)\n', (4513, 4544), True, 'import emgfit as emg\n')] |
import unittest
import numpy as np
from cosmogenic import parma
from .TestBase import TestBase
class TestParma(TestBase):
def setUp(self):
self.proton = parma.Proton()
self.alpha = parma.Alpha()
# force field potential, MV
self.s = 1200
# atmospheric depths g/cm2
self.depths = np.linspace(0, 1000, 20)
# energy in MeV
self.E = 1000
self.rc = 1.0 # GV?
def test_flux_pri(self):
flux_pri = self.proton.flux_pri(self.s, self.depths, self.E)
self.assertTrue(flux_pri is not None)
@unittest.expectedFailure
def test_proton_flux(self):
pf = self.proton.flux(self.s, self.rc, self.depths, self.E)
self.assertTrue(pf is not None)
@unittest.expectedFailure
def test_alpha_flux(self):
af = self.alpha.flux(self.s, self.rc, self.depths, self.E)
self.assertTrue(af is not None)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"cosmogenic.parma.Proton",
"cosmogenic.parma.Alpha",
"numpy.linspace"
] | [((957, 972), 'unittest.main', 'unittest.main', ([], {}), '()\n', (970, 972), False, 'import unittest\n'), ((170, 184), 'cosmogenic.parma.Proton', 'parma.Proton', ([], {}), '()\n', (182, 184), False, 'from cosmogenic import parma\n'), ((206, 219), 'cosmogenic.parma.Alpha', 'parma.Alpha', ([], {}), '()\n', (217, 219), False, 'from cosmogenic import parma\n'), ((337, 361), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(20)'], {}), '(0, 1000, 20)\n', (348, 361), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import random
import subprocess
from termcolor import colored
import matplotlib.pyplot as plt
MIN_NUMBERS = 8
MAX_NUMBERS = 24
NUMBER_STEP = 1
TEST_REPEAT = 5
MIN_RANGE = 0
MAX_RANGE = 100
FLOAT_MIN = -3.40282e+38
def compute_angles(numbers):
angles = [round(np.arctan( (number - numbers[0]) / (idx+1)), 5) for idx, number in enumerate(numbers[1:])]
angles.insert(0, FLOAT_MIN)
angles_series = pd.Series(angles)
# max-scan
min_previous_max = angles_series.cummax().tolist()
# remove overall maximum (transform to max-prescan)
del min_previous_max[-1]
# add neutral item I (transform to max-prescan)
min_previous_max.insert(0, FLOAT_MIN)
return min_previous_max
def run_test_check():
for numbers_count in range (MIN_NUMBERS, MAX_NUMBERS, NUMBER_STEP):
for _ in range (0, TEST_REPEAT):
input_num = [random.randint(MIN_RANGE, MAX_RANGE) for _ in range (0, numbers_count)]
input_str = ','.join([str(number) for number in input_num])
reference_output = compute_angles(input_num)
for option in range(1, 4):
out = subprocess.check_output(["./test.sh", input_str, str(option)])
out_num = out.decode("utf-8").split("\n")[0].split(",")
out_num = \
[round(float(number), 5) if float(number) != FLOAT_MIN else float(number) for number in out_num]
if out_num == reference_output:
print(colored("Test (" + str(numbers_count) + " - " + str(option) + ") successful.", 'green'))
else:
print(colored("Test (" + str(numbers_count) + " - " + str(option) + ") unsuccessful.", 'red'))
print("-----------------------------------------------------")
print(input_num)
print(reference_output)
print(out_num)
print("-----------------------------------------------------")
def create_graph(elapsed_time, samples):
fig, ax = plt.subplots()
print(samples, elapsed_time[0])
ax.plot(samples, elapsed_time[0], linestyle='-', marker='o', color='b')
ax.plot(samples, elapsed_time[1], linestyle='-', marker='o', color='r')
ax.plot(samples, elapsed_time[2], linestyle='-', marker='o', color='g')
ax.set(xlabel='n - points count', ylabel='time (us)',
title='Line-of-Sight')
ax.grid()
fig.savefig("plot.png")
plt.show()
def rewrite_results(results, filename):
with open(filename, 'r+') as f:
_ = f.read()
f.seek(0, 0)
for result in results:
f.write(str(result) + '\n')
def run_test_measure():
elapsed_times = [[],[],[]]
for numbers_count in range (MIN_NUMBERS, MAX_NUMBERS, NUMBER_STEP):
print(numbers_count)
sub_times_log = []
sub_times_n_2 = []
sub_times_n = []
for _ in range (0, TEST_REPEAT):
input_num = [random.randint(MIN_RANGE, MAX_RANGE) for _ in range (0, numbers_count)]
input_str = ','.join([str(number) for number in input_num])
for option in range(1, 4):
out = subprocess.check_output(["./test.sh", input_str, str(option)])
if option == 1:
sub_times_log.append(float(out.decode("utf-8").split("\n")[0]))
elif option == 2:
sub_times_n_2.append(float(out.decode("utf-8").split("\n")[0]))
elif option == 3:
sub_times_n.append(float(out.decode("utf-8").split("\n")[0]))
elapsed_times[0].append(min(sub_times_log))
elapsed_times[1].append(min(sub_times_n_2))
elapsed_times[2].append(min(sub_times_n))
rewrite_results(elapsed_times[0], "results_1.txt")
rewrite_results(elapsed_times[1], "results_2.txt")
rewrite_results(elapsed_times[2], "results_3.txt")
create_graph(elapsed_times, range(MIN_NUMBERS, MAX_NUMBERS, NUMBER_STEP))
if __name__ == '__main__':
run_test_check()
# run_test_measure() | [
"matplotlib.pyplot.show",
"random.randint",
"pandas.Series",
"numpy.arctan",
"matplotlib.pyplot.subplots"
] | [((448, 465), 'pandas.Series', 'pd.Series', (['angles'], {}), '(angles)\n', (457, 465), True, 'import pandas as pd\n'), ((2084, 2098), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2096, 2098), True, 'import matplotlib.pyplot as plt\n'), ((2501, 2511), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2509, 2511), True, 'import matplotlib.pyplot as plt\n'), ((304, 348), 'numpy.arctan', 'np.arctan', (['((number - numbers[0]) / (idx + 1))'], {}), '((number - numbers[0]) / (idx + 1))\n', (313, 348), True, 'import numpy as np\n'), ((904, 940), 'random.randint', 'random.randint', (['MIN_RANGE', 'MAX_RANGE'], {}), '(MIN_RANGE, MAX_RANGE)\n', (918, 940), False, 'import random\n'), ((3004, 3040), 'random.randint', 'random.randint', (['MIN_RANGE', 'MAX_RANGE'], {}), '(MIN_RANGE, MAX_RANGE)\n', (3018, 3040), False, 'import random\n')] |
from graphviz import Digraph
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# +
# Split a dataset based on an attribute and an attribute value
def test_split(index, value, dataset):
return dataset[dataset[:, index] == value,
], dataset[dataset[:, index] != value, ]
def proportion_explained(groups):
return groups[0].shape[0] / (groups[0].shape[0] + groups[1].shape[0])
# Select the best split point for a dataset
def get_split(dataset, col_indices):
b_index, b_value, b_score, b_groups = 999, 999, -1, None
for index in col_indices:
mode, count = pd.Series(dataset[:, index]).value_counts().head(
1).reset_index().iloc[0, ]
proportion = count / dataset.shape[0]
groups = test_split(index, mode, dataset)
if proportion > b_score:
b_index, b_value, b_score, b_groups = index, mode, proportion, groups
return {
'index': b_index,
'value': b_value,
'groups': b_groups,
'col_indices': col_indices,
'proportion': b_score,
'proportion_str': f"{np.round(b_score,2)} ({groups[0].shape[0]}/{dataset.shape[0]})"}
def to_terminal(group):
return group.shape[0]
# Create child splits for a node or make terminal
def split(node, max_depth, min_size, depth):
left, right = node['groups']
del(node['groups'])
# check for a no split
if not left.shape[0] or not right.shape[0] or not node['col_indices']:
node['left'], node['right'] = to_terminal(left), to_terminal(right)
return
# check for max depth
if depth >= max_depth:
node['left'], node['right'] = to_terminal(left), to_terminal(right)
return
# process left child
if len(left) <= min_size:
node['left'] = to_terminal(left)
else:
du_col_indices = list(node['col_indices'])
du_col_indices.remove(node['index'])
if du_col_indices:
node['left'] = get_split(left, du_col_indices)
# print(node['left'])
split(node['left'], max_depth, min_size, depth + 1)
else:
node['left'] = to_terminal(left)
# process right child
if len(right) <= min_size:
node['right'] = to_terminal(right)
else:
node['right'] = get_split(right, node['col_indices'])
split(node['right'], max_depth, min_size, depth + 1)
# Build a decision tree
def build_tree(train, max_depth, min_size):
root = get_split(train, list(range(train.shape[1])))
split(root, max_depth, min_size, 1)
return root
# Print a decision tree
def print_tree(node, depth=0, col_dict=None):
if not col_dict:
col_dict = {index: 'X' + str(index + 1)
for index in node['col_indices']}
if isinstance(node, dict):
print('%s[%s = %s] %.3f' % (
(depth * ' ', (col_dict[node['index']]), node['value'], node['proportion'])))
print_tree(node['left'], depth + 1, col_dict)
print_tree(node['right'], depth + 1, col_dict)
else:
print('%s leaf [%s]' % ((depth * ' ', node)))
# +
def make_dot(tree, col_dict):
dot = Digraph('test_tree')
num_nodes = 0
def add_data(node, col_dict=None, depth=0):
global num_nodes
num_nodes = num_nodes + 1
if not col_dict:
col_dict = {index: 'X' + str(index + 1)
for index in node['col_indices']}
if isinstance(node, dict):
node_curr = (str(num_nodes), '[%s = %s] \\n %s' % (
((col_dict[node['index']]), node['value'], node['proportion_str'])))
left_node = add_data(node['left'], col_dict, depth + 1)
right_node = add_data(node['right'], col_dict, depth + 1)
dot.node(node_curr[0], node_curr[1])
dot.edge(node_curr[0], left_node[0], 'yes')
dot.edge(node_curr[0], right_node[0], 'no')
else:
node_curr = (str(num_nodes), f"leaf [{node}]")
return node_curr
add_data(tree, col_dict)
return dot
# +
# to visualize things on note book
# import pydotplus
# dot = make_dot(tree, col_dict)
# graph = pydotplus.graph_from_dot_data(dot_source
# from IPython.display import Image
# Image(graph.create_png())
| [
"numpy.round",
"graphviz.Digraph",
"pandas.Series"
] | [((3159, 3179), 'graphviz.Digraph', 'Digraph', (['"""test_tree"""'], {}), "('test_tree')\n", (3166, 3179), False, 'from graphviz import Digraph\n'), ((1112, 1132), 'numpy.round', 'np.round', (['b_score', '(2)'], {}), '(b_score, 2)\n', (1120, 1132), True, 'import numpy as np\n'), ((623, 651), 'pandas.Series', 'pd.Series', (['dataset[:, index]'], {}), '(dataset[:, index])\n', (632, 651), True, 'import pandas as pd\n')] |
# Copyright 2021 ETH Zurich and the NPBench authors. All rights reserved.
import numpy as np
def initialize(N):
from numpy.random import default_rng
rng = default_rng(42)
data = rng.integers(0, 256, size=(N, ), dtype=np.uint8)
return data
| [
"numpy.random.default_rng"
] | [((166, 181), 'numpy.random.default_rng', 'default_rng', (['(42)'], {}), '(42)\n', (177, 181), False, 'from numpy.random import default_rng\n')] |
from qiskit import *
from QMeasure import HadamardTest, HadamardTest_Analytical, state_backend
from QAnsatz import Ansatze
from QCircuit import MixedStateGenerationCircuit
from QHamiltonian import Hamiltonian_in_Pauli_String
import random
import numpy as np
class SubspaceEigSolverError(Exception):
pass
class SubspaceEigSolver:
"""
Hamiltonian is in the format of the sum of weighted Pauli string
L(x) = Tr(ha)
a = sum_{i=0}^{m-1} U(x)|i><i|U^{-1}(x)
h is the observable state which may be a mixed state
"""
def __init__(self, Hamiltonian: Hamiltonian_in_Pauli_String, ansatze: Ansatze, weight_list: list):
"""
Class of state subspace eigen solver.
:param Hamiltonian: H in Hx = 入x
:param ansatze: The quantum circuit network with respect to parameters.
:param weight_list: The guidance of subspace searching. c.f. Von Neumann theorem.
"""
self.state_scale = Hamiltonian.qubits
if len(weight_list) > 2 ** self.state_scale:
raise SubspaceEigSolverError('Error in StateSubspaceEigSolver! Incorrect weight list size')
self.ansatze = ansatze
self.weight_list = weight_list
self.Hamiltonian = Hamiltonian
def AnsatzStateGenerationCircuit(self, partial_flag: bool = False, pid: int = 0, pn=None):
return MixedStateGenerationCircuit(self.ansatze.circuit(partial_flag, pid, pn),
list(np.sqrt(abs(np.array(self.weight_list)))))
def LossFunctionAnalytical(self):
return self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=self.AnsatzStateGenerationCircuit(),
active_qubits=[i for i in range(self.state_scale)])
def PartialDerivativeAnalytical(self, pid):
"""
Partial derivative of parameter pid. (c.f. <NAME>, Quantum circuit learning)
:param pid: parameter identifier.
:return: Partial derivative = 1/2*ppd+1/2*npd.
"""
ppd = self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=self.AnsatzStateGenerationCircuit(partial_flag=True,
pid=pid,
pn='+'),
active_qubits=[i for i in range(self.state_scale)])
npd = self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=self.AnsatzStateGenerationCircuit(partial_flag=True,
pid=pid,
pn='-'),
active_qubits=[i for i in range(self.state_scale)])
return np.real(1 / 2 * ppd - 1 / 2 * npd)
def GetJacobianAnalytical(self, par: list):
if len(par) != self.ansatze.getParameterLength():
raise SubspaceEigSolverError(
'Error in SubspaceEigSolver GetJacobian! Incorrect parameter length')
self.setParameter(par)
jac = [0 for i in range(len(par))]
for i in range(len(par)):
jac[i] = self.PartialDerivativeAnalytical(i)
return np.array(jac)
def LossFunction(self, shots: int = 10000):
return self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=self.AnsatzStateGenerationCircuit(),
active_qubits=[i for i in range(self.state_scale)],
shots=shots)
def PartialDerivative(self, pid, shots: int = 10000):
"""
Partial derivative of parameter pid. (c.f. <NAME>, Quantum circuit learning)
:param pid: Parameter identifier.
:param shots: How many times the DSWAPT measure the density matrix product trace.
:return: Partial derivative = 1/2*ppd+1/2*npd.
"""
ppd = self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=self.AnsatzStateGenerationCircuit(partial_flag=True,
pid=pid,
pn='+'),
active_qubits=[i for i in range(self.state_scale)],
shots=shots)
npd = self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=self.AnsatzStateGenerationCircuit(partial_flag=True,
pid=pid,
pn='-'),
active_qubits=[i for i in range(self.state_scale)],
shots=shots)
return np.real(1 / 2 * ppd - 1 / 2 * npd)
def GetJacobian(self, par: list, shots: int = 10000):
if len(par) != self.ansatze.getParameterLength():
raise SubspaceEigSolverError(
'Error in SubspaceEigSolver GetJacobian! Incorrect parameter length')
self.setParameter(par)
jac = [0 for i in range(len(par))]
for i in range(len(par)):
jac[i] = self.PartialDerivative(i, shots)
return np.array(jac)
def EigTrace(self, getEigenstate: bool = False, getLossFunction: bool = False):
eigval = []
eigvec = []
lossfun = 0
initvec = np.zeros(2 ** self.state_scale)
for i in range(len(self.weight_list)):
initvec[i] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
initvec[i] = 0
check_circuit.compose(self.ansatze.circuit(), [i for i in range(self.state_scale)], inplace=True)
eigval.append(self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=check_circuit,
active_qubits=[i for i in range(self.state_scale)]))
if getEigenstate:
job = execute(check_circuit, state_backend)
result = job.result()
eigvec.append(result.get_statevector(check_circuit, decimals=3))
if getLossFunction:
lossfun = self.LossFunctionAnalytical()
return {'eigval': eigval, 'eigvec': eigvec, 'lossfun': lossfun}
def setParameter(self, new_parameter: list):
self.ansatze.setParameter(new_parameter)
def getLossFunction(self, parameter: np.array):
p = [parameter[i] for i in range(self.ansatze.getParameter().__len__())]
self.setParameter(parameter)
return self.LossFunction()
def getLossFunctionAnalytical(self, parameter: np.array):
p = [parameter[i] for i in range(self.ansatze.getParameter().__len__())]
self.setParameter(parameter)
return self.LossFunctionAnalytical()
def getParameter(self):
return self.ansatze.getParameter()
def getEigenData(self, par, vector_required: bool = True, lossfun_required: bool = True):
self.setParameter(par)
return self.EigTrace(vector_required, lossfun_required)
def showStateVector(self, parameter: list):
self.setParameter(parameter)
backend = BasicAer.get_backend('statevector_simulator')
qc = self.ansatze.circuit()
print(qc.draw('text'))
job = execute(qc, backend)
result = job.result()
return result.get_statevector(qc, decimals=3)
class SubspaceEigSolver_ClassicalEfficientSimulator:
"""
Hamiltonian is in the format of the sum of weighted Pauli string
L(x) = Tr(ha)
a = sum_{i=0}^{m-1} U(x)|i><i|U^{-1}(x)
h is the observable state which may be a mixed state
"""
def __init__(self, Hamiltonian: Hamiltonian_in_Pauli_String, ansatze: Ansatze, weight_list: list):
"""
Class of state subspace eigen solver.
:param Hamiltonian: H in Hx = 入x
:param ansatze: The quantum circuit network with respect to parameters.
:param weight_list: The guidance of subspace searching. c.f. Von Neumann theorem.
"""
self.state_scale = Hamiltonian.qubits
if len(weight_list) > 2 ** self.state_scale:
raise SubspaceEigSolverError('Error in StateSubspaceEigSolver! Incorrect weight list size')
self.ansatze = ansatze
self.weight_list = weight_list
self.Hamiltonian = Hamiltonian
def LossFunctionAnalytical(self):
res = 0
for j in range(len(self.weight_list)):
initvec = np.zeros(2 ** self.state_scale)
initvec[j] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
check_circuit.compose(self.ansatze.circuit(), [i for i in range(self.state_scale)], inplace=True)
job = execute(check_circuit, state_backend)
result = job.result()
state = result.get_statevector(check_circuit, decimals=3)
res += self.weight_list[j] * np.dot(np.dot(state.conj(), self.Hamiltonian.hamiltonian_mat), state)
return np.real(res)
def PartialDerivativeAnalytical(self, pid):
"""
Partial derivative of parameter pid. (c.f. <NAME>, Quantum circuit learning)
:param pid: parameter identifier.
:return: Partial derivative = 1/2*ppd-1/2*npd.
"""
ppd = 0
initvec = np.zeros(2 ** self.state_scale)
for j in range(len(self.weight_list)):
initvec[j] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
check_circuit.compose(self.ansatze.circuit(partial_flag=True,
pid=pid,
pn='+'),
[i for i in range(self.state_scale)], inplace=True)
job = execute(check_circuit, state_backend)
result = job.result()
state = result.get_statevector(check_circuit)
ppd += self.weight_list[j] * np.dot(np.dot(state.conj(), self.Hamiltonian.hamiltonian_mat), state)
initvec[j] = 0
npd = 0
for j in range(len(self.weight_list)):
initvec[j] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
check_circuit.compose(self.ansatze.circuit(partial_flag=True,
pid=pid,
pn='-'),
[i for i in range(self.state_scale)], inplace=True)
job = execute(check_circuit, state_backend)
result = job.result()
state = result.get_statevector(check_circuit)
npd += self.weight_list[j] * np.dot(np.dot(state.conj(), self.Hamiltonian.hamiltonian_mat), state)
initvec[j] = 0
return np.real(1 / 2 * ppd - 1 / 2 * npd)
def GetJacobianAnalytical(self, par: list):
if len(par) != self.ansatze.getParameterLength():
raise SubspaceEigSolverError(
'Error in SubspaceEigSolver GetJacobian! Incorrect parameter length')
self.setParameter(par)
jac = [0 for i in range(len(par))]
for i in range(len(par)):
jac[i] = self.PartialDerivativeAnalytical(i)
return np.array(jac)
def LossFunction(self, shots: int = 10000):
res = 0
for i in range(len(self.weight_list)):
initvec = np.zeros(2 ** self.state_scale)
initvec[i] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
check_circuit.compose(self.ansatze.circuit(), [i for i in range(self.state_scale)], inplace=True)
res += self.weight_list[i] * self.Hamiltonian.ExpectationMeasurement(
MeasurementMethod=HadamardTest,
test_circuit=check_circuit,
active_qubits=[i for i in range(self.state_scale)],
shots=shots)
return res
def PartialDerivative(self, pid, shots: int = 10000):
"""
Partial derivative of parameter pid. (c.f. <NAME>, Quantum circuit learning)
:param pid: Parameter identifier.
:param shots: How many times the DSWAPT measure the density matrix product trace.
:return: Partial derivative = 1/2*ppd+1/2*npd.
"""
ppd = 0
for i in range(len(self.weight_list)):
initvec = np.zeros(2 ** self.state_scale)
initvec[i] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
check_circuit.compose(self.ansatze.circuit(partial_flag=True,
pid=pid,
pn='+'),
[i for i in range(self.state_scale)], inplace=True)
ppd += self.weight_list[i] * self.Hamiltonian.ExpectationMeasurement(
MeasurementMethod=HadamardTest,
test_circuit=check_circuit,
active_qubits=[i for i in range(self.state_scale)],
shots=shots)
npd = 0
for i in range(len(self.weight_list)):
initvec = np.zeros(2 ** self.state_scale)
initvec[i] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
check_circuit.compose(self.ansatze.circuit(partial_flag=True,
pid=pid,
pn='-'),
[i for i in range(self.state_scale)], inplace=True)
npd += self.weight_list[i] * self.Hamiltonian.ExpectationMeasurement(
MeasurementMethod=HadamardTest,
test_circuit=check_circuit,
active_qubits=[i for i in range(self.state_scale)],
shots=shots)
return np.real(1 / 2 * ppd - 1 / 2 * npd)
def GetJacobian(self, par: list, shots: int = 10000):
if len(par) != self.ansatze.getParameterLength():
raise SubspaceEigSolverError(
'Error in SubspaceEigSolver GetJacobian! Incorrect parameter length')
self.setParameter(par)
jac = [0 for i in range(len(par))]
for i in range(len(par)):
jac[i] = self.PartialDerivative(i, shots)
return np.array(jac)
def EigTrace(self, getEigenstate: bool = False, getLossFunction: bool = False):
eigval = []
eigvec = []
lossfun = 0
initvec = np.zeros(2 ** self.state_scale)
for i in range(len(self.weight_list)):
initvec[i] = 1
check_circuit = QuantumCircuit(self.state_scale)
check_circuit.initialize(initvec, [i for i in range(self.state_scale)])
initvec[i] = 0
check_circuit.compose(self.ansatze.circuit(), [i for i in range(self.state_scale)], inplace=True)
eigval.append(self.Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=check_circuit,
active_qubits=[i for i in range(self.state_scale)]))
if getEigenstate:
job = execute(check_circuit, state_backend)
result = job.result()
eigvec.append(result.get_statevector(check_circuit, decimals=3))
if getLossFunction:
lossfun = self.LossFunctionAnalytical()
return {'eigval': eigval, 'eigvec': eigvec, 'lossfun': lossfun}
def setParameter(self, new_parameter: list):
self.ansatze.setParameter(new_parameter)
def getLossFunction(self, parameter: np.array):
p = [parameter[i] for i in range(self.ansatze.getParameter().__len__())]
self.setParameter(parameter)
return self.LossFunction()
def getLossFunctionAnalytical(self, parameter: np.array):
p = [parameter[i] for i in range(self.ansatze.getParameter().__len__())]
self.setParameter(parameter)
return self.LossFunctionAnalytical()
def getParameter(self):
return self.ansatze.getParameter()
def getEigenData(self, par, vector_required: bool = True, lossfun_required: bool = True):
self.setParameter(par)
return self.EigTrace(vector_required, lossfun_required)
def showStateVector(self, parameter: list):
self.setParameter(parameter)
backend = BasicAer.get_backend('statevector_simulator')
qc = self.ansatze.circuit()
print(qc.draw('text'))
job = execute(qc, backend)
result = job.result()
return result.get_statevector(qc, decimals=3)
| [
"numpy.zeros",
"numpy.array",
"numpy.real"
] | [((3278, 3312), 'numpy.real', 'np.real', (['(1 / 2 * ppd - 1 / 2 * npd)'], {}), '(1 / 2 * ppd - 1 / 2 * npd)\n', (3285, 3312), True, 'import numpy as np\n'), ((3738, 3751), 'numpy.array', 'np.array', (['jac'], {}), '(jac)\n', (3746, 3751), True, 'import numpy as np\n'), ((5796, 5830), 'numpy.real', 'np.real', (['(1 / 2 * ppd - 1 / 2 * npd)'], {}), '(1 / 2 * ppd - 1 / 2 * npd)\n', (5803, 5830), True, 'import numpy as np\n'), ((6263, 6276), 'numpy.array', 'np.array', (['jac'], {}), '(jac)\n', (6271, 6276), True, 'import numpy as np\n'), ((6446, 6477), 'numpy.zeros', 'np.zeros', (['(2 ** self.state_scale)'], {}), '(2 ** self.state_scale)\n', (6454, 6477), True, 'import numpy as np\n'), ((10408, 10420), 'numpy.real', 'np.real', (['res'], {}), '(res)\n', (10415, 10420), True, 'import numpy as np\n'), ((10719, 10750), 'numpy.zeros', 'np.zeros', (['(2 ** self.state_scale)'], {}), '(2 ** self.state_scale)\n', (10727, 10750), True, 'import numpy as np\n'), ((12400, 12434), 'numpy.real', 'np.real', (['(1 / 2 * ppd - 1 / 2 * npd)'], {}), '(1 / 2 * ppd - 1 / 2 * npd)\n', (12407, 12434), True, 'import numpy as np\n'), ((12860, 12873), 'numpy.array', 'np.array', (['jac'], {}), '(jac)\n', (12868, 12873), True, 'import numpy as np\n'), ((15736, 15770), 'numpy.real', 'np.real', (['(1 / 2 * ppd - 1 / 2 * npd)'], {}), '(1 / 2 * ppd - 1 / 2 * npd)\n', (15743, 15770), True, 'import numpy as np\n'), ((16203, 16216), 'numpy.array', 'np.array', (['jac'], {}), '(jac)\n', (16211, 16216), True, 'import numpy as np\n'), ((16386, 16417), 'numpy.zeros', 'np.zeros', (['(2 ** self.state_scale)'], {}), '(2 ** self.state_scale)\n', (16394, 16417), True, 'import numpy as np\n'), ((9799, 9830), 'numpy.zeros', 'np.zeros', (['(2 ** self.state_scale)'], {}), '(2 ** self.state_scale)\n', (9807, 9830), True, 'import numpy as np\n'), ((13013, 13044), 'numpy.zeros', 'np.zeros', (['(2 ** self.state_scale)'], {}), '(2 ** self.state_scale)\n', (13021, 13044), True, 'import numpy as np\n'), ((14078, 14109), 'numpy.zeros', 'np.zeros', (['(2 ** self.state_scale)'], {}), '(2 ** self.state_scale)\n', (14086, 14109), True, 'import numpy as np\n'), ((14943, 14974), 'numpy.zeros', 'np.zeros', (['(2 ** self.state_scale)'], {}), '(2 ** self.state_scale)\n', (14951, 14974), True, 'import numpy as np\n'), ((1519, 1545), 'numpy.array', 'np.array', (['self.weight_list'], {}), '(self.weight_list)\n', (1527, 1545), True, 'import numpy as np\n')] |
#=============================================================================#
# exadata #
# #
# #
# #
# Authors: <NAME>, <NAME> #
# Contacts: jcanton(at)mech.kth.se, nicolo(at)mech.kth.se #
# Last edit: 2016-01-28 #
#=============================================================================#
import numpy as np
#==============================================================================
class datalims:
"""
datalims
A class containing the extrema of all quantities stored in the mesh
"""
def __init__(self, var):
# x,y,z min,max
self.pos = np.zeros((3 , 2))
# u,v,w min,max
self.vel = np.zeros((3 , 2))
# p min,max
self.pres = np.zeros((var[2], 2))
# T min,max
self.temp = np.zeros((var[3], 2))
# s_i min,max
self.scal = np.zeros((var[4], 2))
#==============================================================================
#class elem:
# """
# elem
# A class containing one nek element/SIMSON flow field
# """
# def __init__(self, var, lr1):
# # x,y,z lz ly lx
# self.pos = np.zeros((3 , lr1[2], lr1[1], lr1[0]))
# # one per edge
# self.curv = np.zeros((12, 1))
# # u,v,w lz ly lx
# self.vel = np.zeros((3 , lr1[2], lr1[1], lr1[0]))
# # p lz ly lx
# self.pres = np.zeros((var[2], lr1[2], lr1[1], lr1[0]))
# # T lz ly lx
# self.temp = np.zeros((var[3], lr1[2], lr1[1], lr1[0]))
# # s_i lz ly lx
# self.scal = np.zeros((var[4], lr1[2], lr1[1], lr1[0]))
# # list of 8 parameters, one per face
# self.bcs = np.zeros((6), dtype='a1, i4, i4, f8, f8, f8, f8, f8')
##==============================================================================
class elem:
"""
elem
A class containing one nek element/SIMSON flow field
"""
def __init__(self, var,nel,lr1):
# x,y,z Nx*Ny*Nz lz ly lx
self.pos = np.zeros((3 , nel, lr1[2], lr1[1], lr1[0]))
# one per edge
self.curv = np.zeros((12, 1))
# u,v,w Nx*Ny*Nz lz ly lx
self.vel = np.zeros((3 , nel, lr1[2], lr1[1], lr1[0]))
# p Nx*Ny*Nz lz ly lx
self.pres = np.zeros((var[2], nel, lr1[2], lr1[1], lr1[0]))
# T Nx*Ny*Nz lz ly lx
self.temp = np.zeros((var[3], nel, lr1[2], lr1[1], lr1[0]))
# s_i Nx*Ny*Nz lz ly lx
self.scal = np.zeros((var[4], nel, lr1[2], lr1[1], lr1[0]))
# list of 8 parameters, one per face
self.bcs = np.zeros((6), dtype='a1, i4, i4, f8, f8, f8, f8, f8')
#==============================================================================
class exadata:
"""
data
A class containing data for reading/writing binary simulation files
"""
def __init__(self, ndim, nel, lr1, var):
self.ndim = ndim
self.nel = nel
self.ncurv = []
self.var = var
self.lr1 = lr1
self.time = []
self.istep = []
self.wdsz = []
self.endian = []
self.lims = datalims(var)
self.elems = elem(var,nel,lr1)#[elem(var, lr1) for i in range(nel)]
| [
"numpy.zeros"
] | [((995, 1011), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (1003, 1011), True, 'import numpy as np\n'), ((1070, 1086), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (1078, 1086), True, 'import numpy as np\n'), ((1145, 1166), 'numpy.zeros', 'np.zeros', (['(var[2], 2)'], {}), '((var[2], 2))\n', (1153, 1166), True, 'import numpy as np\n'), ((1220, 1241), 'numpy.zeros', 'np.zeros', (['(var[3], 2)'], {}), '((var[3], 2))\n', (1228, 1241), True, 'import numpy as np\n'), ((1295, 1316), 'numpy.zeros', 'np.zeros', (['(var[4], 2)'], {}), '((var[4], 2))\n', (1303, 1316), True, 'import numpy as np\n'), ((2574, 2616), 'numpy.zeros', 'np.zeros', (['(3, nel, lr1[2], lr1[1], lr1[0])'], {}), '((3, nel, lr1[2], lr1[1], lr1[0]))\n', (2582, 2616), True, 'import numpy as np\n'), ((2672, 2689), 'numpy.zeros', 'np.zeros', (['(12, 1)'], {}), '((12, 1))\n', (2680, 2689), True, 'import numpy as np\n'), ((2763, 2805), 'numpy.zeros', 'np.zeros', (['(3, nel, lr1[2], lr1[1], lr1[0])'], {}), '((3, nel, lr1[2], lr1[1], lr1[0]))\n', (2771, 2805), True, 'import numpy as np\n'), ((2893, 2940), 'numpy.zeros', 'np.zeros', (['(var[2], nel, lr1[2], lr1[1], lr1[0])'], {}), '((var[2], nel, lr1[2], lr1[1], lr1[0]))\n', (2901, 2940), True, 'import numpy as np\n'), ((3023, 3070), 'numpy.zeros', 'np.zeros', (['(var[3], nel, lr1[2], lr1[1], lr1[0])'], {}), '((var[3], nel, lr1[2], lr1[1], lr1[0]))\n', (3031, 3070), True, 'import numpy as np\n'), ((3153, 3200), 'numpy.zeros', 'np.zeros', (['(var[4], nel, lr1[2], lr1[1], lr1[0])'], {}), '((var[4], nel, lr1[2], lr1[1], lr1[0]))\n', (3161, 3200), True, 'import numpy as np\n'), ((3273, 3324), 'numpy.zeros', 'np.zeros', (['(6)'], {'dtype': '"""a1, i4, i4, f8, f8, f8, f8, f8"""'}), "(6, dtype='a1, i4, i4, f8, f8, f8, f8, f8')\n", (3281, 3324), True, 'import numpy as np\n')] |
from typing import Any, Dict, List, Optional, Union
from copy import copy
from random import randint
import datetime as dt
import re
from lunchbox.enforce import Enforce
from pandas import DataFrame, DatetimeIndex
from schematics.exceptions import DataError
import cufflinks as cf # noqa: F401
import lunchbox.tools as lbt
import numpy as np
import pandasql
import pyparsing as pp
import rolling_pin.blob_etl as rpb
import webcolors
from shekels.core.config import ConformAction
import shekels.core.config as cfg
import shekels.enforce.enforce_tools as eft
# ------------------------------------------------------------------------------
COLOR_COERCION_LUT = {
'#00CC96': '#5F95DE',
'#0D0887': '#444459',
'#19D3F3': '#5F95DE',
'#242424': '#242424',
'#276419': '#343434',
'#2A3F5F': '#444459',
'#343434': '#343434',
'#444444': '#444444',
'#46039F': '#444459',
'#4D9221': '#444444',
'#636EFA': '#5F95DE',
'#7201A8': '#5D5D7A',
'#7FBC41': '#8BD155',
'#8E0152': '#444459',
'#9C179E': '#5D5D7A',
'#A4A4A4': '#A4A4A4',
'#AB63FA': '#AC92DE',
'#B6E880': '#A0D17B',
'#B6ECF3': '#B6ECF3',
'#B8E186': '#A0D17B',
'#BD3786': '#F77E70',
'#C51B7D': '#F77E70',
'#C8D4E3': '#B6ECF3',
'#D8576B': '#F77E70',
'#DE77AE': '#DE958E',
'#DE958E': '#DE958E',
'#E5ECF6': '#F4F4F4',
'#E6F5D0': '#E9EABE',
'#EBF0F8': '#F4F4F4',
'#ED7953': '#F77E70',
'#EF553B': '#F77E70',
'#F0F921': '#E8EA7E',
'#F1B6DA': '#C98FDE',
'#F4F4F4': '#F4F4F4',
'#F7F7F7': '#F4F4F4',
'#FB9F3A': '#EB9E58',
'#FDCA26': '#EB9E58',
'#FDE0EF': '#F4F4F4',
'#FECB52': '#EB9E58',
'#FF6692': '#F77E70',
'#FF97FF': '#C98FDE',
'#FFA15A': '#EB9E58',
}
def conform(data, actions=[], columns=[]):
# type: (DataFrame, List[dict], List[str]) -> DataFrame
'''
Conform given mint transaction data.
Args:
data (DataFrame): Mint transactions DataFrame.
actions (list[dict], optional): List of conform actions. Default: [].
columns (list[str], optional): List of columns. Default: [].
Raises:
DataError: If invalid conform action given.
ValueError: If source column not found in data columns.
Returns:
DataFrame: Conformed DataFrame.
'''
for action in actions:
ConformAction(action).validate()
data.rename(lbt.to_snakecase, axis=1, inplace=True)
lut = dict(
account_name='account',
transaction_type='type'
)
data.rename(lambda x: lut.get(x, x), axis=1, inplace=True)
data.date = DatetimeIndex(data.date)
data.amount = data.amount.astype(float)
data.category = data.category \
.apply(lambda x: re.sub('&', 'and', lbt.to_snakecase(x)))
data.account = data.account.apply(lbt.to_snakecase)
for action in actions:
source = action['source_column']
if source not in data.columns:
msg = f'Source column {source} not found in columns. '
msg += f'Legal columns include: {data.columns.tolist()}.'
raise ValueError(msg)
target = action['target_column']
if target not in data.columns:
data[target] = None
for regex, val in action['mapping'].items():
if action['action'] == 'overwrite':
mask = data[source] \
.apply(lambda x: re.search(regex, x, flags=re.I)).astype(bool)
data.loc[mask, target] = val
elif action['action'] == 'substitute':
data[target] = data[source] \
.apply(lambda x: re.sub(regex, val, str(x), flags=re.I))
if columns != []:
data = data[columns]
return data
def filter_data(data, column, comparator, value):
# type: (DataFrame, str, str, Any) -> DataFrame
'''
Filters given data via comparator(column value, value).
Legal comparators:
* == ``lambda a, b: a == b``
* != ``lambda a, b: a != b``
* > ``lambda a, b: a > b``
* >= ``lambda a, b: a >= b``
* < ``lambda a, b: a < b``
* =< ``lambda a, b: a <= b``
* ~ ``lambda a, b: bool(re.search(a, b, flags=re.I))``
* !~ ``lambda a, b: not bool(re.search(a, b, flags=re.I))``
Args:
data (DataFrame): DataFrame to be filtered.
column (str): Column name.
comparator (str): String representation of comparator.
value (object): Value to be compared.
Raises:
EnforceError: If data is not a DataFrame.
EnforceError: If column is not a string.
EnforceError: If column not in data columns.
EnforceError: If illegal comparator given.
EnforceError: If comparator is ~ or !~ and value is not a string.
Returns:
DataFrame: Filtered data.
'''
Enforce(data, 'instance of', DataFrame)
msg = 'Column must be a str. {a} is not str.'
Enforce(column, 'instance of', str, message=msg)
eft.enforce_columns_in_dataframe([column], data)
lut = {
'==': lambda a, b: a == b,
'!=': lambda a, b: a != b,
'>': lambda a, b: a > b,
'>=': lambda a, b: a >= b,
'<': lambda a, b: a < b,
'<=': lambda a, b: a <= b,
'~': lambda a, b: bool(re.search(b, a, flags=re.I)),
'!~': lambda a, b: not bool(re.search(b, a, flags=re.I)),
}
msg = 'Illegal comparator. {a} not in [==, !=, >, >=, <, <=, ~, !~].'
Enforce(comparator, 'in', lut.keys(), message=msg)
if comparator in ['~', '!~']:
msg = 'Value must be string if comparator is ~ or !~. {a} is not str.'
Enforce(value, 'instance of', str, message=msg)
# --------------------------------------------------------------------------
op = lut[comparator]
mask = data[column].apply(lambda x: op(x, value))
data = data[mask]
return data
def group_data(data, columns, metric, datetime_column='date'):
# type: (DataFrame, Union[str, List[str]], str, str) -> DataFrame
'''
Groups given data by given columns according to given metric.
If a legal time interval is given in the columns, then an additional special
column of that same name is added to the data for grouping.
Legal metrics:
* max ``lambda x: x.max()``
* mean ``lambda x: x.mean()``
* min ``lambda x: x.min()``
* std ``lambda x: x.std()``
* sum ``lambda x: x.sum()``
* var ``lambda x: x.var()``
* count ``lambda x: x.count()``
Legal time intervals:
* year
* quarter
* month
* two_week
* week
* day
* hour
* half_hour
* quarter_hour
* minute
* second
* microsecond
Args:
data (DataFrame): DataFrame to be grouped.
columns (str or list[str]): Columns to group data by.
metric (str): String representation of metric.
datetime_column (str, optinal): Datetime column for time grouping.
Default: date.
Raises:
EnforceError: If data is not a DataFrame.
EnforceError: If columns not in data columns.
EnforceError: If illegal metric given.
EnforceError: If time interval in columns and datetime_column not in
columns.
Returns:
DataFrame: Grouped data.
'''
# luts
met_lut = {
'max': lambda x: x.max(),
'mean': lambda x: x.mean(),
'min': lambda x: x.min(),
'std': lambda x: x.std(),
'sum': lambda x: x.sum(),
'var': lambda x: x.var(),
'count': lambda x: x.count(),
}
time_lut = {
'year': lambda x: dt.datetime(x.year, 1, 1),
'quarter': lambda x: dt.datetime(
x.year, int(np.ceil(x.month / 3) * 3 - 2), 1
),
'month': lambda x: dt.datetime(x.year, x.month, 1),
'two_week': lambda x: dt.datetime(
x.year, x.month, min(int(np.ceil(x.day / 14) * 14 - 13), 28)
),
'week': lambda x: dt.datetime(
x.year, x.month, max(1, min([int(x.month / 7) * 7, 28]))
),
'day': lambda x: dt.datetime(x.year, x.month, x.day),
'hour': lambda x: dt.datetime(x.year, x.month, x.day, x.hour),
'half_hour': lambda x: dt.datetime(
x.year, x.month, x.day, x.hour, int(x.minute / 30) * 30
),
'quarter_hour': lambda x: dt.datetime(
x.year, x.month, x.day, x.hour, int(x.minute / 15) * 15
),
'minute': lambda x: dt.datetime(
x.year, x.month, x.day, x.hour, x.minute
),
'second': lambda x: dt.datetime(
x.year, x.month, x.day, x.hour, x.minute, x.second
),
'microsecond': lambda x: dt.datetime(
x.year, x.month, x.day, x.hour, x.minute, x.second, x.microsecond
),
}
# --------------------------------------------------------------------------
# enforcements
Enforce(data, 'instance of', DataFrame)
columns_ = columns # type: Any
if type(columns_) != list:
columns_ = [columns_]
cols = list(filter(lambda x: x not in time_lut.keys(), columns_))
eft.enforce_columns_in_dataframe(cols, data)
msg = '{a} is not a legal metric. Legal metrics: {b}.'
Enforce(metric, 'in', sorted(list(met_lut.keys())), message=msg)
# time column
if len(columns_) > len(cols):
eft.enforce_columns_in_dataframe([datetime_column], data)
msg = 'Datetime column of type {a}, it must be of type {b}.'
Enforce(
data[datetime_column].dtype.type, '==', np.datetime64, message=msg
)
# --------------------------------------------------------------------------
for col in columns_:
if col in time_lut.keys():
op = time_lut[col]
data[col] = data[datetime_column].apply(op)
agg = met_lut[metric]
cols = data.columns.tolist()
grp = data.groupby(columns_, as_index=False)
output = agg(grp)
# get first value for columns that cannot be computed by given metric
diff = set(cols).difference(output.columns.tolist())
if len(diff) > 0:
first = grp.first()
for col in diff:
output[col] = first[col]
return output
def pivot_data(data, columns, values=[], index=None):
# type: (DataFrame, List[str], List[str], Optional[str]) -> DataFrame
'''
Pivots a given dataframe via a list of columns.
Legal time columns:
* date
* year
* quarter
* month
* two_week
* week
* day
* hour
* half_hour
* quarter_hour
* minute
* second
* microsecond
Args:
data (DataFrame): DataFrame to be pivoted.
columns (list[str]): Columns whose unique values become separate traces
within a plot.
values (list[str], optional): Columns whose values become the values
within each trace of a plot. Default: [].
index (str, optional): Column whose values become the y axis values of a
plot. Default: None.
Raises:
EnforceError: If data is not a DataFrame.
EnforceError: If data is of zero length.
EnforceError: If columns not in data columns.
EnforceError: If values not in data columns.
EnforceError: If index not in data columns or legal time columns.
Returns:
DataFrame: Pivoted data.
'''
time_cols = [
'date', 'year', 'quarter', 'month', 'two_week', 'week', 'day', 'hour',
'half_hour', 'quarter_hour', 'minute', 'second', 'microsecond',
]
Enforce(data, 'instance of', DataFrame)
msg = 'DataFrame must be at least 1 in length. Given length: {a}.'
Enforce(len(data), '>=', 1, message=msg)
eft.enforce_columns_in_dataframe(columns, data)
eft.enforce_columns_in_dataframe(values, data)
if index is not None:
msg = '{a} is not in legal column names: {b}.'
Enforce(index, 'in', data.columns.tolist() + time_cols, message=msg)
# --------------------------------------------------------------------------
vals = copy(values)
if index is not None and index not in values:
vals.append(index)
if index in time_cols:
data[index] = data[index] \
.apply(lambda x: x + dt.timedelta(microseconds=randint(0, 999999)))
data = data.pivot(columns=columns, values=vals, index=index)
data = data[values]
data.columns = data.columns.droplevel(0)
return data
def get_figure(
data, # type: DataFrame
filters=[], # type: List[dict]
group=None, # type: Optional[dict]
pivot=None, # type: Optional[dict]
kind='bar', # type: str
color_scheme={}, # type: Dict[str, str]
x_axis=None, # type: Optional[str]
y_axis=None, # type: Optional[str]
title=None, # type: Optional[str]
x_title=None, # type: Optional[str]
y_title=None, # type: Optional[str]
bins=50, # type: int
bar_mode='stack', # type: str
):
'''
Generates a plotly figure dictionary from given data and manipulations.
Args:
data (DataFrame): Data.
filters (list[dict], optional): List of filters for data. Default: [].
group (dict, optional): Grouping operation. Default: None.
pivot (dict, optional): Pivot operation. Default: None.
kind (str, optional): Kind of plot. Default: bar.
color_scheme (dict[str, str], optional): Color scheme. Default: {}.
x_axis (str): Column to use as x axis: Default: None.
y_axis (str): Column to use as y axis: Default: None.
title (str, optional): Title of plot. Default: None.
x_title (str, optional): Title of x axis. Default: None.
y_title (str, optional): Title of y axis. Default: None.
bins (int, optional): Number of bins if histogram. Default: 50.
bar_mode (str, optional): How bars in bar graph are presented.
Default: stack.
Raises:
DataError: If any filter in filters is invalid.
DataError: If group is invalid.
DataError: If pivot is invalid.
Returns:
dict: Plotly Figure as dictionary.
'''
data = data.copy()
# filter
for f in filters:
f = cfg.FilterAction(f)
try:
f.validate()
except DataError as e:
raise DataError({'Invalid filter': e.to_primitive()})
f = f.to_primitive()
if len(data) == 0:
break
data = filter_data(data, f['column'], f['comparator'], f['value'])
# group
if group is not None:
grp = group # type: Any
grp = cfg.GroupAction(grp)
try:
grp.validate()
except DataError as e:
raise DataError({'Invalid group': e.to_primitive()})
grp = grp.to_primitive()
data = group_data(
data,
grp['columns'],
grp['metric'],
datetime_column=grp['datetime_column'],
)
# pivot
if pivot is not None:
pvt = pivot # type: Any
pvt = cfg.PivotAction(pvt)
try:
pvt.validate()
except DataError as e:
raise DataError({'Invalid pivot': e.to_primitive()})
pvt = pvt.to_primitive()
data = pivot_data(
data, pvt['columns'], values=pvt['values'], index=pvt['index']
)
# create figure
figure = data.iplot(
kind=kind, asFigure=True, theme='henanigans', colorscale='henanigans',
x=x_axis, y=y_axis, title=title, xTitle=x_title, yTitle=y_title,
barmode=bar_mode, bins=bins
).to_dict() # type: dict
figure['layout']['title']['font']['color'] = '#F4F4F4'
figure['layout']['xaxis']['title']['font']['color'] = '#F4F4F4'
figure['layout']['yaxis']['title']['font']['color'] = '#F4F4F4'
if color_scheme != {}:
figure = conform_figure(figure, color_scheme)
return figure
def parse_rgba(string):
'''
Parses rgb and rgba strings into tuples of numbers.
Example:
>>>parse_rgba('rgb(255, 0, 0)')
(255, 0, 0)
>>>parse_rgba('rgba(255, 0, 0, 0.5)')
(255, 0, 0, 0.5)
>>>parse_rgba('foo') is None
True
Args:
string (str): String to be parsed.
Returns:
tuple: (red, green, blue) or (red, green, blue, alpha)
'''
result = re.search(r'rgba?\((\d+, \d+, \d+(, \d+\.?\d*)?)\)', string)
if result is None:
return None
result = result.group(1)
result = re.split(', ', result)
if len(result) == 3:
result = tuple(map(int, result))
return result
result = list(map(int, result[:-1])) + [float(result[-1])]
result = tuple(result)
return result
def conform_figure(figure, color_scheme):
'''
Conforms given figure to use given color scheme.
Args:
figure (dict): Plotly figure.
color_scheme (dict): Color scheme dictionary.
Returns:
dict: Conformed figure.
'''
# create hex to hex lut
lut = {}
for key, val in cfg.COLOR_SCHEME.items():
if key in color_scheme:
lut[val] = color_scheme[key]
# rgba? to hex --> coerce to standard colors --> coerce with color_scheme
figure = rpb.BlobETL(figure) \
.set(
predicate=lambda k, v: isinstance(v, str) and 'rgb' in v,
value_setter=lambda k, v: webcolors.rgb_to_hex(parse_rgba(v)[:3]).upper()) \
.set(
predicate=lambda k, v: isinstance(v, str),
value_setter=lambda k, v: COLOR_COERCION_LUT.get(v, v)) \
.set(
predicate=lambda k, v: isinstance(v, str),
value_setter=lambda k, v: lut.get(v, v)) \
.to_dict()
return figure
# SQL-PARSING-------------------------------------------------------------------
def get_sql_grammar():
'''
Creates a grammar for parsing SQL queries.
Returns:
MatchFirst: SQL parser.
'''
select = pp.Regex('select', flags=re.I) \
.setParseAction(lambda s, l, t: 'select') \
.setResultsName('operator')
from_ = pp.Suppress(pp.Regex('from', flags=re.I))
table = (from_ + pp.Regex('[a-z]+', flags=re.I)) \
.setParseAction(lambda s, l, t: t[0]) \
.setResultsName('table')
regex = pp.Regex('~|regex').setParseAction(lambda s, l, t: '~')
not_regex = pp.Regex('!~|not regex').setParseAction(lambda s, l, t: '!~')
any_op = pp.Regex('[^ ]*')
operator = pp.Or([not_regex, regex, any_op]).setResultsName('operator')
quote = pp.Suppress(pp.Optional("'"))
value = (quote + pp.Regex('[^\']+', flags=re.I) + quote) \
.setResultsName('value') \
.setParseAction(lambda s, l, t: t[0])
columns = pp.delimitedList(pp.Regex('[^, ]*'), delim=pp.Regex(', *')) \
.setResultsName('display_columns')
column = pp.Regex('[a-z]+', flags=re.I).setResultsName('column')
conditional = column + operator + value
head = select + columns + table
grammar = head | conditional
return grammar
def query_data(data, query):
'''
Parses SQL + regex query and applies it to given data.
Regex operators:
* ~, regex - Match regular expression
* !~, not regex - Do not match regular expression
Args:
data (DataFrame): DataFrame to be queried.
query (str): SQL query that may include regex operators.
Returns:
DataFrame: Data filtered by query.
'''
# split queries by where/and/or
queries = re.split(' where | and | or ', query, flags=re.I)
# detect whether any sub query has a regex operator
has_regex = False
for q in queries:
if re.search(' regex | ~ | !~ ', q, flags=re.I):
has_regex = True
break
# if no regex operator is found just submit query to pandasql
if not has_regex:
data = pandasql.sqldf(query, {'data': data})
else:
grammar = get_sql_grammar()
# move select statement to end
if 'select' in queries[0]:
q = queries.pop(0)
queries.append(q)
for q in queries:
# get column, operator and value
parse = grammar.parseString(q).asDict()
op = parse['operator']
# initial select statement
if op == 'select':
data = pandasql.sqldf(q, {'data': data})
# regex search
elif op == '~':
mask = data[parse['column']] \
.astype(str) \
.apply(lambda x: re.search(parse['value'], x, flags=re.I)) \
.astype(bool)
data = data[mask]
# regex not search
elif op == '!~':
mask = data[parse['column']] \
.astype(str) \
.apply(lambda x: re.search(parse['value'], x, flags=re.I)) \
.astype(bool)
data = data[~mask]
# ther SQL query
else:
data = pandasql.sqldf('select * from data where ' + q, {'data': data})
if len(data) == 0:
break
return data
def query_dict(data, query):
# type: (dict, str) -> dict
'''
Query a given diction with a given SQL query.
Args:
data (dict): Dictionary to be queried.
query (str): SQL query.
Returns:
dict: Queried dictionary.
'''
data_ = data # type: Any
data_ = rpb.BlobETL(data_) \
.to_flat_dict() \
.items()
data_ = DataFrame(list(data_), columns=['key', 'value'])
data_ = query_data(data_, query)
data_ = dict(zip(data_.key.tolist(), data_.value.tolist()))
data_ = rpb.BlobETL(data_).to_dict()
return data_
| [
"pandas.DatetimeIndex",
"lunchbox.enforce.Enforce",
"shekels.core.config.PivotAction",
"pyparsing.Optional",
"random.randint",
"pyparsing.Or",
"shekels.core.config.COLOR_SCHEME.items",
"re.search",
"pandasql.sqldf",
"rolling_pin.blob_etl.BlobETL",
"re.split",
"numpy.ceil",
"shekels.enforce.e... | [((2609, 2633), 'pandas.DatetimeIndex', 'DatetimeIndex', (['data.date'], {}), '(data.date)\n', (2622, 2633), False, 'from pandas import DataFrame, DatetimeIndex\n'), ((4844, 4883), 'lunchbox.enforce.Enforce', 'Enforce', (['data', '"""instance of"""', 'DataFrame'], {}), "(data, 'instance of', DataFrame)\n", (4851, 4883), False, 'from lunchbox.enforce import Enforce\n'), ((4938, 4986), 'lunchbox.enforce.Enforce', 'Enforce', (['column', '"""instance of"""', 'str'], {'message': 'msg'}), "(column, 'instance of', str, message=msg)\n", (4945, 4986), False, 'from lunchbox.enforce import Enforce\n'), ((4991, 5039), 'shekels.enforce.enforce_tools.enforce_columns_in_dataframe', 'eft.enforce_columns_in_dataframe', (['[column]', 'data'], {}), '([column], data)\n', (5023, 5039), True, 'import shekels.enforce.enforce_tools as eft\n'), ((8983, 9022), 'lunchbox.enforce.Enforce', 'Enforce', (['data', '"""instance of"""', 'DataFrame'], {}), "(data, 'instance of', DataFrame)\n", (8990, 9022), False, 'from lunchbox.enforce import Enforce\n'), ((9195, 9239), 'shekels.enforce.enforce_tools.enforce_columns_in_dataframe', 'eft.enforce_columns_in_dataframe', (['cols', 'data'], {}), '(cols, data)\n', (9227, 9239), True, 'import shekels.enforce.enforce_tools as eft\n'), ((11668, 11707), 'lunchbox.enforce.Enforce', 'Enforce', (['data', '"""instance of"""', 'DataFrame'], {}), "(data, 'instance of', DataFrame)\n", (11675, 11707), False, 'from lunchbox.enforce import Enforce\n'), ((11828, 11875), 'shekels.enforce.enforce_tools.enforce_columns_in_dataframe', 'eft.enforce_columns_in_dataframe', (['columns', 'data'], {}), '(columns, data)\n', (11860, 11875), True, 'import shekels.enforce.enforce_tools as eft\n'), ((11880, 11926), 'shekels.enforce.enforce_tools.enforce_columns_in_dataframe', 'eft.enforce_columns_in_dataframe', (['values', 'data'], {}), '(values, data)\n', (11912, 11926), True, 'import shekels.enforce.enforce_tools as eft\n'), ((12178, 12190), 'copy.copy', 'copy', (['values'], {}), '(values)\n', (12182, 12190), False, 'from copy import copy\n'), ((16505, 16572), 're.search', 're.search', (['"""rgba?\\\\((\\\\d+, \\\\d+, \\\\d+(, \\\\d+\\\\.?\\\\d*)?)\\\\)"""', 'string'], {}), "('rgba?\\\\((\\\\d+, \\\\d+, \\\\d+(, \\\\d+\\\\.?\\\\d*)?)\\\\)', string)\n", (16514, 16572), False, 'import re\n'), ((16652, 16674), 're.split', 're.split', (['""", """', 'result'], {}), "(', ', result)\n", (16660, 16674), False, 'import re\n'), ((17195, 17219), 'shekels.core.config.COLOR_SCHEME.items', 'cfg.COLOR_SCHEME.items', ([], {}), '()\n', (17217, 17219), True, 'import shekels.core.config as cfg\n'), ((18578, 18595), 'pyparsing.Regex', 'pp.Regex', (['"""[^ ]*"""'], {}), "('[^ ]*')\n", (18586, 18595), True, 'import pyparsing as pp\n'), ((19645, 19694), 're.split', 're.split', (['""" where | and | or """', 'query'], {'flags': 're.I'}), "(' where | and | or ', query, flags=re.I)\n", (19653, 19694), False, 'import re\n'), ((5643, 5690), 'lunchbox.enforce.Enforce', 'Enforce', (['value', '"""instance of"""', 'str'], {'message': 'msg'}), "(value, 'instance of', str, message=msg)\n", (5650, 5690), False, 'from lunchbox.enforce import Enforce\n'), ((9430, 9487), 'shekels.enforce.enforce_tools.enforce_columns_in_dataframe', 'eft.enforce_columns_in_dataframe', (['[datetime_column]', 'data'], {}), '([datetime_column], data)\n', (9462, 9487), True, 'import shekels.enforce.enforce_tools as eft\n'), ((9565, 9640), 'lunchbox.enforce.Enforce', 'Enforce', (['data[datetime_column].dtype.type', '"""=="""', 'np.datetime64'], {'message': 'msg'}), "(data[datetime_column].dtype.type, '==', np.datetime64, message=msg)\n", (9572, 9640), False, 'from lunchbox.enforce import Enforce\n'), ((14377, 14396), 'shekels.core.config.FilterAction', 'cfg.FilterAction', (['f'], {}), '(f)\n', (14393, 14396), True, 'import shekels.core.config as cfg\n'), ((14768, 14788), 'shekels.core.config.GroupAction', 'cfg.GroupAction', (['grp'], {}), '(grp)\n', (14783, 14788), True, 'import shekels.core.config as cfg\n'), ((15207, 15227), 'shekels.core.config.PivotAction', 'cfg.PivotAction', (['pvt'], {}), '(pvt)\n', (15222, 15227), True, 'import shekels.core.config as cfg\n'), ((18253, 18281), 'pyparsing.Regex', 'pp.Regex', (['"""from"""'], {'flags': 're.I'}), "('from', flags=re.I)\n", (18261, 18281), True, 'import pyparsing as pp\n'), ((18696, 18712), 'pyparsing.Optional', 'pp.Optional', (['"""\'"""'], {}), '("\'")\n', (18707, 18712), True, 'import pyparsing as pp\n'), ((19807, 19851), 're.search', 're.search', (['""" regex | ~ | !~ """', 'q'], {'flags': 're.I'}), "(' regex | ~ | !~ ', q, flags=re.I)\n", (19816, 19851), False, 'import re\n'), ((20004, 20041), 'pandasql.sqldf', 'pandasql.sqldf', (['query', "{'data': data}"], {}), "(query, {'data': data})\n", (20018, 20041), False, 'import pandasql\n'), ((7692, 7717), 'datetime.datetime', 'dt.datetime', (['x.year', '(1)', '(1)'], {}), '(x.year, 1, 1)\n', (7703, 7717), True, 'import datetime as dt\n'), ((7856, 7887), 'datetime.datetime', 'dt.datetime', (['x.year', 'x.month', '(1)'], {}), '(x.year, x.month, 1)\n', (7867, 7887), True, 'import datetime as dt\n'), ((8160, 8195), 'datetime.datetime', 'dt.datetime', (['x.year', 'x.month', 'x.day'], {}), '(x.year, x.month, x.day)\n', (8171, 8195), True, 'import datetime as dt\n'), ((8223, 8266), 'datetime.datetime', 'dt.datetime', (['x.year', 'x.month', 'x.day', 'x.hour'], {}), '(x.year, x.month, x.day, x.hour)\n', (8234, 8266), True, 'import datetime as dt\n'), ((8545, 8598), 'datetime.datetime', 'dt.datetime', (['x.year', 'x.month', 'x.day', 'x.hour', 'x.minute'], {}), '(x.year, x.month, x.day, x.hour, x.minute)\n', (8556, 8598), True, 'import datetime as dt\n'), ((8650, 8713), 'datetime.datetime', 'dt.datetime', (['x.year', 'x.month', 'x.day', 'x.hour', 'x.minute', 'x.second'], {}), '(x.year, x.month, x.day, x.hour, x.minute, x.second)\n', (8661, 8713), True, 'import datetime as dt\n'), ((8770, 8848), 'datetime.datetime', 'dt.datetime', (['x.year', 'x.month', 'x.day', 'x.hour', 'x.minute', 'x.second', 'x.microsecond'], {}), '(x.year, x.month, x.day, x.hour, x.minute, x.second, x.microsecond)\n', (8781, 8848), True, 'import datetime as dt\n'), ((18431, 18450), 'pyparsing.Regex', 'pp.Regex', (['"""~|regex"""'], {}), "('~|regex')\n", (18439, 18450), True, 'import pyparsing as pp\n'), ((18503, 18527), 'pyparsing.Regex', 'pp.Regex', (['"""!~|not regex"""'], {}), "('!~|not regex')\n", (18511, 18527), True, 'import pyparsing as pp\n'), ((18611, 18644), 'pyparsing.Or', 'pp.Or', (['[not_regex, regex, any_op]'], {}), '([not_regex, regex, any_op])\n', (18616, 18644), True, 'import pyparsing as pp\n'), ((18990, 19020), 'pyparsing.Regex', 'pp.Regex', (['"""[a-z]+"""'], {'flags': 're.I'}), "('[a-z]+', flags=re.I)\n", (18998, 19020), True, 'import pyparsing as pp\n'), ((21844, 21862), 'rolling_pin.blob_etl.BlobETL', 'rpb.BlobETL', (['data_'], {}), '(data_)\n', (21855, 21862), True, 'import rolling_pin.blob_etl as rpb\n'), ((2354, 2375), 'shekels.core.config.ConformAction', 'ConformAction', (['action'], {}), '(action)\n', (2367, 2375), False, 'from shekels.core.config import ConformAction\n'), ((2758, 2777), 'lunchbox.tools.to_snakecase', 'lbt.to_snakecase', (['x'], {}), '(x)\n', (2774, 2777), True, 'import lunchbox.tools as lbt\n'), ((5290, 5317), 're.search', 're.search', (['b', 'a'], {'flags': 're.I'}), '(b, a, flags=re.I)\n', (5299, 5317), False, 'import re\n'), ((18889, 18907), 'pyparsing.Regex', 'pp.Regex', (['"""[^, ]*"""'], {}), "('[^, ]*')\n", (18897, 18907), True, 'import pyparsing as pp\n'), ((20478, 20511), 'pandasql.sqldf', 'pandasql.sqldf', (['q', "{'data': data}"], {}), "(q, {'data': data})\n", (20492, 20511), False, 'import pandasql\n'), ((5356, 5383), 're.search', 're.search', (['b', 'a'], {'flags': 're.I'}), '(b, a, flags=re.I)\n', (5365, 5383), False, 'import re\n'), ((18108, 18138), 'pyparsing.Regex', 'pp.Regex', (['"""select"""'], {'flags': 're.I'}), "('select', flags=re.I)\n", (18116, 18138), True, 'import pyparsing as pp\n'), ((18915, 18930), 'pyparsing.Regex', 'pp.Regex', (['""", *"""'], {}), "(', *')\n", (18923, 18930), True, 'import pyparsing as pp\n'), ((21606, 21624), 'rolling_pin.blob_etl.BlobETL', 'rpb.BlobETL', (['data_'], {}), '(data_)\n', (21617, 21624), True, 'import rolling_pin.blob_etl as rpb\n'), ((18304, 18334), 'pyparsing.Regex', 'pp.Regex', (['"""[a-z]+"""'], {'flags': 're.I'}), "('[a-z]+', flags=re.I)\n", (18312, 18334), True, 'import pyparsing as pp\n'), ((21163, 21226), 'pandasql.sqldf', 'pandasql.sqldf', (["('select * from data where ' + q)", "{'data': data}"], {}), "('select * from data where ' + q, {'data': data})\n", (21177, 21226), False, 'import pandasql\n'), ((7785, 7805), 'numpy.ceil', 'np.ceil', (['(x.month / 3)'], {}), '(x.month / 3)\n', (7792, 7805), True, 'import numpy as np\n'), ((12391, 12409), 'random.randint', 'randint', (['(0)', '(999999)'], {}), '(0, 999999)\n', (12398, 12409), False, 'from random import randint\n'), ((18735, 18764), 'pyparsing.Regex', 'pp.Regex', (['"""[^\']+"""'], {'flags': 're.I'}), '("[^\']+", flags=re.I)\n', (18743, 18764), True, 'import pyparsing as pp\n'), ((3405, 3436), 're.search', 're.search', (['regex', 'x'], {'flags': 're.I'}), '(regex, x, flags=re.I)\n', (3414, 3436), False, 'import re\n'), ((7969, 7988), 'numpy.ceil', 'np.ceil', (['(x.day / 14)'], {}), '(x.day / 14)\n', (7976, 7988), True, 'import numpy as np\n'), ((17386, 17405), 'rolling_pin.blob_etl.BlobETL', 'rpb.BlobETL', (['figure'], {}), '(figure)\n', (17397, 17405), True, 'import rolling_pin.blob_etl as rpb\n'), ((20687, 20727), 're.search', 're.search', (["parse['value']", 'x'], {'flags': 're.I'}), "(parse['value'], x, flags=re.I)\n", (20696, 20727), False, 'import re\n'), ((20979, 21019), 're.search', 're.search', (["parse['value']", 'x'], {'flags': 're.I'}), "(parse['value'], x, flags=re.I)\n", (20988, 21019), False, 'import re\n')] |
import numpy as np
import random
import math
def set_seed(s=0):
np.random.seed(s)
random.seed(s)
def create_tasks_arrivals(n_tasks=10, lam=1.):
'''
Returns a list of samples of a Poisson distribution with lambda = lam
parameter. The summatory of the resulting vector is n_tasks
'''
result = []
acc_tasks = 0
while acc_tasks < n_tasks:
sample = np.random.poisson(lam=lam)
if acc_tasks + sample < n_tasks:
result.append(sample)
else:
# Yes, I know... the last sample is not Poisson...
result.append(acc_tasks + sample - n_tasks)
acc_tasks += result[-1]
return result
def sample_nodes(nodes):
return random.sample(nodes, 1)[0]
if __name__ == "__main__":
set_seed(0)
nodes = ['a', 'b', 'c', 'd']
print('Two random samples from nodes:{} are <{}> and <{}>'.format(nodes, sample_nodes(nodes), sample_nodes(nodes)))
import matplotlib.pyplot as plt
beta = 10.
n = 1000
x = create_tasks_arrivals(n, beta)
plt.figure()
plt.plot(x)
plt.grid()
plt.show()
| [
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"random.sample",
"matplotlib.pyplot.figure",
"random.seed",
"numpy.random.poisson",
"matplotlib.pyplot.grid"
] | [((67, 84), 'numpy.random.seed', 'np.random.seed', (['s'], {}), '(s)\n', (81, 84), True, 'import numpy as np\n'), ((87, 101), 'random.seed', 'random.seed', (['s'], {}), '(s)\n', (98, 101), False, 'import random\n'), ((974, 986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (984, 986), True, 'import matplotlib.pyplot as plt\n'), ((989, 1000), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (997, 1000), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1013), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1011, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1024, 1026), True, 'import matplotlib.pyplot as plt\n'), ((369, 395), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'lam'}), '(lam=lam)\n', (386, 395), True, 'import numpy as np\n'), ((657, 680), 'random.sample', 'random.sample', (['nodes', '(1)'], {}), '(nodes, 1)\n', (670, 680), False, 'import random\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import meshio
import pygalmesh
import numpy as np
import copy
from mshr import *
from dolfin import *
from collections import Counter
import matplotlib.pyplot as plt
import os
import sys
import json
import shutil
import scipy.optimize as opt
from EnergyMinimization import *
# # Initialisation
# User settings: What are the continuum parameters we want?
# In[2]:
# Target mesh size:
target_a = 0.2
# continuum bending modulus --- READ IN FROM THE COMMAND LINE:
kc=float(sys.argv[1])
# continuum shear modulus:
mu=1
# Energetic penalty for volume change --- READ IN FROM COMMAND LINE
B=float(sys.argv[2])
# the spring prestress values
g0start=1.0
g0end=4.0
g0step=0.1
print(B)
print(kc)
# User settings for the experimental run: What are the continuum parameters we want?
# In[3]:
# root folder for data
DataFolder=os.getcwd()+'/Data/'
# Folder for the run data
RunFolder="kc_"+"{0:0.1f}".format(kc)+"_B_"+"{0:0.1f}".format(B)+"/"
# Name of the run
RunName=""
# Name of the current file
ScriptName="EnergyMinimizationScript.ipynb"
# In[4]:
RunFolder
# Right, lets define the bond type and parameters for each bond. In 2D, we know that the elastic modulii are proportional to the microscopic spring constant. We also know that the continuum and microscopic momdulii are related by a lattice space: $\mu = O(1) k$, $k_c = k_d a$. Since I dont know any better, for know I will just set k to mu.
# In[5]:
kd=kc/target_a
k = mu
theta0=np.pi
# Set up the experiment
# In[6]:
path = DataFolder+RunFolder
# make the folder
try:
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
# try and clear out the folder if there was a previous run in it
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
#Dump all the parameters to a file in the run folder
f=open(DataFolder+RunFolder+"Parameters.log","w+")
datadict= {
"a":target_a,
"kc":kc,
"B":B,
"mu":mu,
"g0start":g0start,
"g0end":g0end,
}
json.dump(datadict,f)
f.close()
# and for good measure, dump a copy of this code into the data file too
shutil.copyfile(ScriptName,DataFolder+RunFolder+ScriptName)
# Make the mesh, write it out to the folder
# In[7]:
InputMesh, OutputMesh, interiorbonds,edgebonds,angletriples = MakeDolfinMesh(target_a,40)
InputMesh.write(DataFolder+RunFolder+RunName+"InputMesh.vtk")
# Check out the Mesh. One of the lessons learnt is that you shouldnt have much of a spread in the intial edge lengths
# In[8]:
edgelengths= MakeBondHist(InputMesh.points,edgebonds)
np.mean(edgelengths)
# # Energy Minimization
# In[9]:
def mycallback(xi):
counter=len(history)
history.append(xi)
tempP = xi.reshape((-1, 2))
# stuff to screen
print("iteration:"+"{0:0.1f}".format(counter)+"Total Area:" + "{0:0.2f}".format(vTotalArea(tempP,triangles)))
#output for visualisation
OutputMesh.points[:,0:2] = tempP
OutputMesh.write(DataFolder+RunFolder+RunName+"TempOutput"+"Output"+"{0:0.1f}".format(g0)+"_"+str(counter)+".vtk",binary=True)
# In[ ]:
# initial input points. Pout changes over time
Pout_ij =InputMesh.points[:,0:2]
N = len(Pout_ij)
# the connectivity matrix --- this doesnt change over the run
A = np.zeros( (len(Pout_ij),len(Pout_ij)) )
for bond in edgebonds+interiorbonds:
A[bond[0],bond[1]]=1
A[bond[1],bond[0]]=1
# the triangles defining the connectivity data
triangles=InputMesh.cells[0].data
# The initial area, which we want to penalise deviation from
TargetArea=TotalArea(Pout_ij,triangles)
for g0 in np.arange(g0start,g0end,g0step):
print("Current g0"+"{0:0.1f}".format(g0))
# Make the "prestress" matrix, referring to scale factors for the rest lengths
g0_ij= np.ones((N,N),)
for bond in edgebonds:
g0_ij[bond[0],bond[1]]=g0
g0_ij[bond[1],bond[0]]=g0
# the preferred rest lengths of all the springs
r0_ij = g0_ij*dist(InputMesh.points[:,0:2] )
# minimize
history=[]
Pout_ij = opt.minimize(energy, Pout_ij.ravel()
,args=(A,r0_ij,angletriples,triangles,k,kd,theta0,B,TargetArea)
#,callback=mycallback
,options={'disp': True}).x.reshape((-1, 2))
# stuff to screen
print("Total Area:" + "{0:0.2f}".format(vTotalArea(Pout_ij,triangles)))
# write the output
OutputMesh.points[:,0:2] = Pout_ij
OutputMesh.point_data={"g0": np.repeat(g0,len(InputMesh.points))}
OutputMesh.write(DataFolder+RunFolder+RunName+"g0_"+"{0:0.1f}".format(g0)+".vtk",binary=True)
# In[ ]:
| [
"json.dump",
"os.mkdir",
"os.unlink",
"os.getcwd",
"os.path.isdir",
"numpy.ones",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"os.path.islink",
"shutil.copyfile",
"shutil.rmtree",
"os.path.join",
"os.listdir"
] | [((1843, 1859), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1853, 1859), False, 'import os\n'), ((2451, 2473), 'json.dump', 'json.dump', (['datadict', 'f'], {}), '(datadict, f)\n', (2460, 2473), False, 'import json\n'), ((2556, 2620), 'shutil.copyfile', 'shutil.copyfile', (['ScriptName', '(DataFolder + RunFolder + ScriptName)'], {}), '(ScriptName, DataFolder + RunFolder + ScriptName)\n', (2571, 2620), False, 'import shutil\n'), ((3012, 3032), 'numpy.mean', 'np.mean', (['edgelengths'], {}), '(edgelengths)\n', (3019, 3032), True, 'import numpy as np\n'), ((4028, 4061), 'numpy.arange', 'np.arange', (['g0start', 'g0end', 'g0step'], {}), '(g0start, g0end, g0step)\n', (4037, 4061), True, 'import numpy as np\n'), ((879, 890), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (888, 890), False, 'import os\n'), ((1603, 1617), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1611, 1617), False, 'import os\n'), ((1877, 1905), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1889, 1905), False, 'import os\n'), ((4211, 4226), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (4218, 4226), True, 'import numpy as np\n'), ((1926, 1951), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1940, 1951), False, 'import os\n'), ((1955, 1980), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (1969, 1980), False, 'import os\n'), ((1994, 2014), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (2003, 2014), False, 'import os\n'), ((2028, 2052), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (2041, 2052), False, 'import os\n'), ((2066, 2090), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (2079, 2090), False, 'import shutil\n')] |
# Reading the csv file
import pandas as pd
data = pd.read_csv("data.csv")
# Splitting the data into X and y
import numpy as np
X = np.array(data[['x1', 'x2']])
y = np.array(data['y'])
# Import statement for train_test_split
from sklearn.cross_validation import train_test_split
# TODO: Use the train_test_split function to split the data into
# training and testing sets.
# The size of the testing set should be 20% of the total size of the data.
# Your output should contain 4 objects.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20) | [
"pandas.read_csv",
"sklearn.cross_validation.train_test_split",
"numpy.array"
] | [((52, 75), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (63, 75), True, 'import pandas as pd\n'), ((138, 166), 'numpy.array', 'np.array', (["data[['x1', 'x2']]"], {}), "(data[['x1', 'x2']])\n", (146, 166), True, 'import numpy as np\n'), ((172, 191), 'numpy.array', 'np.array', (["data['y']"], {}), "(data['y'])\n", (180, 191), True, 'import numpy as np\n'), ((541, 578), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (557, 578), False, 'from sklearn.cross_validation import train_test_split\n')] |
import socketserver
import pommerman
from pommerman.constants import GameType
from pommerman.envs.v0 import Pomme
import time
import numpy as np
def evaluate(env: Pomme, episodes, verbose, visualize, stop=False):
"""
Evaluates the given pommerman environment (already includes the agents).
:param episodes: The number of episodes
:param verbose: Whether to print verbose status information
:param visualize: Whether to visualize the execution
:param stop: Whether to wait for input after each step
:return: The results of the evaluation of shape (episodes, 5) where the first column [:, 0] contains the result
of the match (tie, win, incomplete) and the remaining columns contain the individual (final) rewards.
"""
# first element: result, additional elements: rewards
steps = np.empty(episodes)
results = np.empty((episodes, 1 + 4))
start = time.time()
# Run the episodes just like OpenAI Gym
for i_episode in range(episodes):
state = env.reset()
done = False
reward = []
info = {}
step = 0
while not done:
if visualize:
env.render()
actions = env.act(state)
state, reward, done, info = env.step(actions)
step += 1
if verbose and step % 10 == 0:
delta = time.time() - start
print('\r{:.2f} sec > Episode {} running.. step {}'.format(
delta, i_episode, step
), end='')
if stop:
input()
steps[i_episode] = step
result = info['result']
# save the result
results[i_episode, 0] = result.value
results[i_episode, 1:] = reward
if verbose:
delta = time.time() - start
print('\r{:.2f} sec > Episode {} finished with {} ({})'.format(
delta, i_episode, result, reward
))
if i_episode % 10 == 9 and i_episode != episodes - 1:
print_stats(env, results, steps, i_episode + 1)
env.close()
if verbose:
delta = time.time() - start
print("Total time: {:.2f} sec".format(delta))
print_stats(env, results, steps, episodes)
return results
def print_stats(env, results, steps, episodes):
if env._game_type == GameType.FFA:
ffa_print_stats(results, steps, episodes)
elif env._game_type == GameType.Team or env._game_type == GameType.TeamRadio:
team_print_stats(results, steps, episodes)
def team_print_stats(results, steps, episodes):
num_won, num_ties = get_stats(results, episodes)
assert num_won[0] == num_won[2]
assert num_won[1] == num_won[3]
print("Evaluated {} episodes".format(episodes))
print("Average steps: {}".format(steps[:episodes].mean()))
total_won = int(np.sum(num_won) / 2)
print("Wins: {} ({:.2f}%)".format(total_won, total_won / episodes * 100))
print("> Team 0 (Agent 0, 2): {} ({:.2f}%)".format(
num_won[0], 0 if total_won == 0 else num_won[0] / total_won * 100))
print("> Team 1 (Agent 1, 3): {} ({:.2f}%)".format(
num_won[1], 0 if total_won == 0 else num_won[1] / total_won * 100))
print("Ties: {} ({:.2f}%)".format(num_ties, num_ties / episodes * 100))
assert np.sum(num_won) / 2 + num_ties == episodes
def ffa_print_stats(results, steps, episodes):
num_won, num_ties = get_stats(results, episodes)
print("Evaluated {} episodes".format(episodes))
print("Average steps: {}".format(steps[:episodes].mean()))
total_won = np.sum(num_won)
print("Wins: {} ({:.2f}%)".format(total_won, total_won / episodes * 100))
for a in range(len(num_won)):
print("> Agent {}: {} ({:.2f}%)".format(a, num_won[a], num_won[a] / total_won * 100))
print("Ties: {} ({:.2f}%)".format(num_ties, num_ties / episodes * 100))
assert np.sum(num_won) + num_ties == episodes
def get_stats(results, episodes):
# Count how often each agent achieved a final reward of "1"
num_won = np.sum(results[0:episodes, 1:] == 1, axis=0)
# In a tie, every player receives -1 reward
num_ties = np.sum(results[0:episodes, 0] == pommerman.constants.Result.Tie.value)
return num_won, num_ties
def get_free_port():
"""
Get a random free port.
:return: a free port.
"""
# noinspection PyTypeChecker
# see https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number
with socketserver.TCPServer(("localhost", 0), None) as s:
return s.server_address[1]
| [
"numpy.empty",
"socketserver.TCPServer",
"numpy.sum",
"time.time"
] | [((836, 854), 'numpy.empty', 'np.empty', (['episodes'], {}), '(episodes)\n', (844, 854), True, 'import numpy as np\n'), ((869, 896), 'numpy.empty', 'np.empty', (['(episodes, 1 + 4)'], {}), '((episodes, 1 + 4))\n', (877, 896), True, 'import numpy as np\n'), ((910, 921), 'time.time', 'time.time', ([], {}), '()\n', (919, 921), False, 'import time\n'), ((3603, 3618), 'numpy.sum', 'np.sum', (['num_won'], {}), '(num_won)\n', (3609, 3618), True, 'import numpy as np\n'), ((4067, 4111), 'numpy.sum', 'np.sum', (['(results[0:episodes, 1:] == 1)'], {'axis': '(0)'}), '(results[0:episodes, 1:] == 1, axis=0)\n', (4073, 4111), True, 'import numpy as np\n'), ((4175, 4245), 'numpy.sum', 'np.sum', (['(results[0:episodes, 0] == pommerman.constants.Result.Tie.value)'], {}), '(results[0:episodes, 0] == pommerman.constants.Result.Tie.value)\n', (4181, 4245), True, 'import numpy as np\n'), ((4513, 4559), 'socketserver.TCPServer', 'socketserver.TCPServer', (["('localhost', 0)", 'None'], {}), "(('localhost', 0), None)\n", (4535, 4559), False, 'import socketserver\n'), ((2144, 2155), 'time.time', 'time.time', ([], {}), '()\n', (2153, 2155), False, 'import time\n'), ((2873, 2888), 'numpy.sum', 'np.sum', (['num_won'], {}), '(num_won)\n', (2879, 2888), True, 'import numpy as np\n'), ((3914, 3929), 'numpy.sum', 'np.sum', (['num_won'], {}), '(num_won)\n', (3920, 3929), True, 'import numpy as np\n'), ((1803, 1814), 'time.time', 'time.time', ([], {}), '()\n', (1812, 1814), False, 'import time\n'), ((3325, 3340), 'numpy.sum', 'np.sum', (['num_won'], {}), '(num_won)\n', (3331, 3340), True, 'import numpy as np\n'), ((1373, 1384), 'time.time', 'time.time', ([], {}), '()\n', (1382, 1384), False, 'import time\n')] |
import sys
sys.path.append("../")
import numpy as np
from tools import RSE
from tensorBasis import svd_thresholding, unfold, fold
from decomposition.TR import TR_product
'''
TRLRF(tensor ring low rank factors) algorithm for tensor completion: ADMM
Tensor Ring Decomposition with Rank Minimization on Latent Space: An Efficient Approach for Tensor Completion
'''
def trlrf(tensor_obs, index, rank):
iter_max = 1000
λ = 5
µ= 1
µ_max = 100
ρ = 1.01
tol = 1e-6
#### init
shape = tensor_obs.shape
N = len(shape)
X = tensor_obs.copy()
G_cores = [np.random.rand(rank, shape[i], rank) for i in range(N)]
Y = [[np.zeros((rank, shape[i], rank))] * 3 for i in range(N)]
M = [[np.zeros((rank, shape[i], rank))] * 3 for i in range(N)]
for i in range(1, iter_max+1):
X_ = X.copy()
###### update G
for k in range(N):
shape_core = G_cores[k].shape
G_2 = np.reshape(np.transpose(TR_product(G_cores[k+1:] + G_cores[:k], contract_border=False), \
list(range(N-k, N)) + list(range(1, N-k)) + [N, 0]), [int(np.prod(shape) / shape[k]), -1])
temp = (unfold(np.mean(M[k], axis=0) * µ + np.mean(Y[k], axis=0), 1) + λ * (unfold(X, k) @ G_2)) \
@ np.linalg.pinv((λ * (np.transpose(G_2) @ G_2) + 3 * µ * np.eye(G_2.shape[1], G_2.shape[1])))
G_cores[k] = fold(temp, 1, shape_core)
###### update M
for j in range(3):
M[k][j] = fold(svd_thresholding(unfold(G_cores[k] - Y[k][j] / µ, j), 1 / µ), j, shape_core)
###### update X
X = TR_product(G_cores, contract_border=True) * (1-index) + tensor_obs
###### update Y
for k in range(N):
for j in range(3):
Y[k][j] = Y[k][j] + µ * (M[k][j] - G_cores[k])
µ = min(ρ * µ, µ_max)
conv = RSE(X, X_)
if conv <= tol or i >= iter_max:
return X
| [
"sys.path.append",
"tensorBasis.fold",
"tensorBasis.unfold",
"numpy.zeros",
"decomposition.TR.TR_product",
"tools.RSE",
"numpy.transpose",
"numpy.mean",
"numpy.random.rand",
"numpy.eye",
"numpy.prod"
] | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((586, 622), 'numpy.random.rand', 'np.random.rand', (['rank', 'shape[i]', 'rank'], {}), '(rank, shape[i], rank)\n', (600, 622), True, 'import numpy as np\n'), ((1895, 1905), 'tools.RSE', 'RSE', (['X', 'X_'], {}), '(X, X_)\n', (1898, 1905), False, 'from tools import RSE\n'), ((1412, 1437), 'tensorBasis.fold', 'fold', (['temp', '(1)', 'shape_core'], {}), '(temp, 1, shape_core)\n', (1416, 1437), False, 'from tensorBasis import svd_thresholding, unfold, fold\n'), ((652, 684), 'numpy.zeros', 'np.zeros', (['(rank, shape[i], rank)'], {}), '((rank, shape[i], rank))\n', (660, 684), True, 'import numpy as np\n'), ((719, 751), 'numpy.zeros', 'np.zeros', (['(rank, shape[i], rank)'], {}), '((rank, shape[i], rank))\n', (727, 751), True, 'import numpy as np\n'), ((1638, 1679), 'decomposition.TR.TR_product', 'TR_product', (['G_cores'], {'contract_border': '(True)'}), '(G_cores, contract_border=True)\n', (1648, 1679), False, 'from decomposition.TR import TR_product\n'), ((968, 1032), 'decomposition.TR.TR_product', 'TR_product', (['(G_cores[k + 1:] + G_cores[:k])'], {'contract_border': '(False)'}), '(G_cores[k + 1:] + G_cores[:k], contract_border=False)\n', (978, 1032), False, 'from decomposition.TR import TR_product\n'), ((1542, 1577), 'tensorBasis.unfold', 'unfold', (['(G_cores[k] - Y[k][j] / μ)', 'j'], {}), '(G_cores[k] - Y[k][j] / μ, j)\n', (1548, 1577), False, 'from tensorBasis import svd_thresholding, unfold, fold\n'), ((1120, 1134), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1127, 1134), True, 'import numpy as np\n'), ((1209, 1230), 'numpy.mean', 'np.mean', (['Y[k]'], {'axis': '(0)'}), '(Y[k], axis=0)\n', (1216, 1230), True, 'import numpy as np\n'), ((1243, 1255), 'tensorBasis.unfold', 'unfold', (['X', 'k'], {}), '(X, k)\n', (1249, 1255), False, 'from tensorBasis import svd_thresholding, unfold, fold\n'), ((1352, 1386), 'numpy.eye', 'np.eye', (['G_2.shape[1]', 'G_2.shape[1]'], {}), '(G_2.shape[1], G_2.shape[1])\n', (1358, 1386), True, 'import numpy as np\n'), ((1180, 1201), 'numpy.mean', 'np.mean', (['M[k]'], {'axis': '(0)'}), '(M[k], axis=0)\n', (1187, 1201), True, 'import numpy as np\n'), ((1316, 1333), 'numpy.transpose', 'np.transpose', (['G_2'], {}), '(G_2)\n', (1328, 1333), True, 'import numpy as np\n')] |
from flask import Flask, flash, redirect, render_template, request, session, abort, url_for
import os
from pymongo import MongoClient
import json
from werkzeug import secure_filename
# import csv
# import subprocess
import pandas as pd
import numpy as np
from util_nlp_2 import parseS
from gcpParser import syntax_text
from bson.code import Code
from collections import defaultdict
import time
client = MongoClient("localhost:27017")
dbString = 'CalAnswers'
db = client[dbString]
app = Flask(__name__)
# UPLOAD_FOLDER = '/Users/ZeroNineSeven/research/bi_proj/answers/uploads/'
# PA = os.getcwd()
UPLOAD_FOLDER = os.getcwd() + '/uploads/'
ALLOWED_EXTENSIONS = {'csv', 'xls', 'xlsx', 'json'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# currFileName = None
currCols = None
currColsDict = dict()
sum_set = set()
avg_set = set()
# global_map = dict()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
global currFileName
currFileName = filename
# extractColumnNames(filename)
import_content(filename)
return redirect(url_for('index'))
return render_template('index.html')
# Query:
# Format 1: [ field1, field2, ..., query ]
# Format 2: [ query ]
# Where is query is space-separated
@app.route("/getRecords", methods=['POST'])
def getRecords():
try:
global currCols
if currCols is None:
return ""
recordsList = []
userInput = str(request.data.decode("utf-8")).lower()
if userInput == "":
return ""
print('userInput: ' + userInput)
projection, raw_query = parseS(userInput, list(currCols))
print("---------PROJECTION---------")
print(projection)
print("----------------------------")
print('projection: ' + projection)
print('raw_query: ' + json.dumps(raw_query))
raw_projectionList = list()
raw_projectionList += [projection]
projectionList = []
if projection is not None and projection != "":
for p in raw_projectionList:
newP = None
for columnName in currCols:
if p.lower() in columnName.lower():
newP = columnName
projectionList += [newP]
if newP is None:
return ''
projectionDict = listToProjectionDict(projectionList)
query = dict()
for k, v in raw_query.items():
newKey = None
for columnName in currCols:
if k.lower() in columnName.lower():
newKey = columnName
if newKey is None:
return ''
if v.isdecimal():
v = int(v)
elif isfloat(v):
v = float(v)
newValue = {"$in": [v]}
query[newKey] = newValue
print('query: ' + json.dumps(query))
print('projectionDict: ' + json.dumps(projectionDict))
# query = {"department": {"$in": ["economics"]}, "calender_year": {"$in": [2017]}}
# collection_name = None
# global_key = json.dumps(tuple((query, projectionDict)))
# if global_key in global_map:
# return global_map[global_key]
for collectionName in db.collection_names():
if projection is None:
records = db[collectionName].find(query)
else:
records = db[collectionName].find(query, projectionDict)
if records.count() > 0:
# collection_name = collectionName
break
for record in records:
recordItem = record
recordItem.pop("_id")
recordsList.append(recordItem)
agg_recordsList = defaultdict(list)
resultDict = dict()
if projection is not None and projection != "" and recordsList != []:
if (projection in sum_set or projection in avg_set):
for r in recordsList:
for key, value in r.items():
agg_recordsList[key].append(value)
for key, li in agg_recordsList.items():
if key in sum_set:
resultDict[key] = sum(li)
elif key in avg_set:
resultDict[key] = sum(li) / len(li)
else:
valueSet = set(value for dic in recordsList for key, value in dic.items())
resultDict[projection] = list(valueSet)
# recordsList = agg_recordsList
recordsList = []
recordsList.append(resultDict)
print("result: " + json.dumps(recordsList))
# global_map[global_key] = json.dumps(recordsList)
return json.dumps(recordsList)
except KeyError:
return ''
def listToProjectionDict(projectList):
if projectList is None or projectList == []:
return None
ones = [1 for _ in projectList]
return dict(zip(projectList, ones))
def import_content(fileName):
# cdir = os.path.dirname(__file__)
# file_res = os.path.join(cdir, filepath)
# filePath = UPLOAD_FOLDER + currFileName
filePath = os.path.join(app.config['UPLOAD_FOLDER'], fileName)
if fileName.lower().endswith(".csv"):
data = pd.read_csv(filePath)
elif fileName.lower().endswith(".xls") or fileName.lower().endswith(".xlsx"):
data = pd.read_excel(filePath)
elif fileName.lower().endwidth(".json"):
data = pd.read_json(filePath)
else:
raise ValueError("File type not supported")
data.columns = map(str.lower, data.columns)
cols = [c.lower() for c in data.columns if c.lower()[:8] != "unnamed:" and c.lower() != ""]
data = data[cols]
cols_to_lower_case = [c for c in cols if not np.issubdtype(data[c].dtype, np.number)]
data[cols_to_lower_case] = data[cols_to_lower_case].apply(lambda x: x.astype(str).str.lower())
extractColumnNames(fileName, data)
data_json = json.loads(data.to_json(orient='records'))
# db[fileName].insert(data_json)
# data_dict = data.to_dict("records")
# print(data_dict)
db[fileName].remove()
# db[fileName].insert(data_dict)
# db[fileName].insert_many(data_dict)
db[fileName].insert(data_json)
print(list(db[fileName].find()))
def extractColumnNames(fileName, data):
columnNames = data.keys()
lowerCols = set()
for c in columnNames:
lowerCols.add(c.lower())
global currCols
if currCols is None:
currCols = set(lowerCols)
else:
for name in columnNames:
currCols.add(name.lower())
currColsDict[fileName] = list(lowerCols)
print(currCols)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
# Credit: https://stackoverflow.com/questions/2298870/get-names-of-all-keys-in-the-collection
def get_keys(db_name, collection):
client = MongoClient()
db = client[db_name]
map = Code("function() { for (var key in this) { emit(key, null); } }")
reduce = Code("function(key, stuff) { return null; }")
result = db[collection].map_reduce(map, reduce, "dummy_key")
return result.distinct('_id')
# i.e. download column names and store in the program every time we load the program.
if __name__ == "__main__":
sum_set.add('count')
avg_set.add('avg_age')
avg_set.add('calender_year')
for collectionName in db.collection_names():
print(collectionName)
if collectionName == "dummy_key":
continue
keys = get_keys('CalAnswers', collectionName)
if "_id" in keys:
keys.remove("_id")
if "" in keys:
keys.remove("")
if currCols is None:
currCols = set(keys)
else:
for k in keys:
currCols.add(k.lower())
currColsDict[collectionName] = keys
app.run()
| [
"pymongo.MongoClient",
"os.getcwd",
"pandas.read_csv",
"flask.request.data.decode",
"flask.Flask",
"json.dumps",
"pandas.read_json",
"collections.defaultdict",
"bson.code.Code",
"werkzeug.secure_filename",
"pandas.read_excel",
"flask.url_for",
"flask.render_template",
"os.path.join",
"nu... | [((405, 435), 'pymongo.MongoClient', 'MongoClient', (['"""localhost:27017"""'], {}), "('localhost:27017')\n", (416, 435), False, 'from pymongo import MongoClient\n'), ((489, 504), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (494, 504), False, 'from flask import Flask, flash, redirect, render_template, request, session, abort, url_for\n'), ((615, 626), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (624, 626), False, 'import os\n'), ((1489, 1518), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1504, 1518), False, 'from flask import Flask, flash, redirect, render_template, request, session, abort, url_for\n'), ((5567, 5618), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'fileName'], {}), "(app.config['UPLOAD_FOLDER'], fileName)\n", (5579, 5618), False, 'import os\n'), ((7337, 7350), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (7348, 7350), False, 'from pymongo import MongoClient\n'), ((7386, 7451), 'bson.code.Code', 'Code', (['"""function() { for (var key in this) { emit(key, null); } }"""'], {}), "('function() { for (var key in this) { emit(key, null); } }')\n", (7390, 7451), False, 'from bson.code import Code\n'), ((7465, 7510), 'bson.code.Code', 'Code', (['"""function(key, stuff) { return null; }"""'], {}), "('function(key, stuff) { return null; }')\n", (7469, 7510), False, 'from bson.code import Code\n'), ((4150, 4167), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4161, 4167), False, 'from collections import defaultdict\n'), ((5140, 5163), 'json.dumps', 'json.dumps', (['recordsList'], {}), '(recordsList)\n', (5150, 5163), False, 'import json\n'), ((5676, 5697), 'pandas.read_csv', 'pd.read_csv', (['filePath'], {}), '(filePath)\n', (5687, 5697), True, 'import pandas as pd\n'), ((1178, 1208), 'werkzeug.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (1193, 1208), False, 'from werkzeug import secure_filename\n'), ((5795, 5818), 'pandas.read_excel', 'pd.read_excel', (['filePath'], {}), '(filePath)\n', (5808, 5818), True, 'import pandas as pd\n'), ((1231, 1282), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (1243, 1282), False, 'import os\n'), ((1460, 1476), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1467, 1476), False, 'from flask import Flask, flash, redirect, render_template, request, session, abort, url_for\n'), ((2216, 2237), 'json.dumps', 'json.dumps', (['raw_query'], {}), '(raw_query)\n', (2226, 2237), False, 'import json\n'), ((3280, 3297), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (3290, 3297), False, 'import json\n'), ((3334, 3360), 'json.dumps', 'json.dumps', (['projectionDict'], {}), '(projectionDict)\n', (3344, 3360), False, 'import json\n'), ((5041, 5064), 'json.dumps', 'json.dumps', (['recordsList'], {}), '(recordsList)\n', (5051, 5064), False, 'import json\n'), ((5879, 5901), 'pandas.read_json', 'pd.read_json', (['filePath'], {}), '(filePath)\n', (5891, 5901), True, 'import pandas as pd\n'), ((6180, 6219), 'numpy.issubdtype', 'np.issubdtype', (['data[c].dtype', 'np.number'], {}), '(data[c].dtype, np.number)\n', (6193, 6219), True, 'import numpy as np\n'), ((1828, 1856), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (1847, 1856), False, 'from flask import Flask, flash, redirect, render_template, request, session, abort, url_for\n')] |
import csv
import cv2
import matplotlib
matplotlib.use('Agg') # For AWS compatibility
from matplotlib import pyplot as plt
import random
import numpy as np
from keras.preprocessing.image import random_shift, flip_axis
from sklearn.utils import shuffle
## DONE: Visualize normal distribution for steering angles
## TODO: Create a true generator for loading on the fly
## DONE: Load second set and add to measurements and images sequentially
## DONE: Discard samples with zero angle
## DONE: Flip
## DONE: Test Translation? how will it work with the cropping?
## DONE: Use all cameras
## DONE: Crop
## DONE: Data Exploration
## About augmentation: We can't do rotation easily. We could do random alpha artifacts (shadows), change luminosity to simulate day and night driving, translate the image.
# Read and load the CSV file
local_folder='sim data/'
local_csvfile='driving_log.csv'
# Subfolders where the additional data sets are
data_sets=[
#'1/', # Full lap
#'2/', # Full lap backwards
'3/', # Red lanes
'4/', # Red lanes + Dirt road
'5/', # Red lanes + Dirt road
'6/', # dificult curve
'7/' # 2 Full laps better quality
]
# Corrections to add left and right camera images
#left_camera_steer_correction=0.25
#left_camera_steer_correction=0.1 # It works pretty well
left_camera_steer_correction=0.25
right_camera_steer_correction=-0.25
#IMAGES_INPUT_SHAPE=(160,320,3)
#IMAGES_INPUT_SHAPE=(66,200,3)
IMAGES_INPUT_SHAPE=(128,128,3)
images=[]
measurerements=[]
print('Loading datasets...')
for data in data_sets:
#Load CSV
with open(local_folder+data+local_csvfile) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
# Skipping low steering values
if float(line[3])==0.0:
#if float(line[3])<0.01:
continue
# Prepare data and local paths
for i in range(3): #load 0:center 1:left 2:right
path=line[i]
filename=path.split('/')[-1]
local_path=local_folder+data+'IMG/'+filename
image=cv2.imread(local_path)
# Resize all images including validation sets
image = cv2.resize(image, (IMAGES_INPUT_SHAPE[1],IMAGES_INPUT_SHAPE[0]))
# Camera steering correction
measurerement=float(line[3])
if (i==1):
measurerement+=left_camera_steer_correction
elif(i==2):
measurerement+=right_camera_steer_correction
measurerements.append(measurerement)
images.append(image)
# show_image(image)
# exit(0)
assert len(images)==len(measurerements)
# print('Samples Collection: {}'.format(len(measurerements)))
# ------
# Helper functions for preprocessing and augmentation
def crop_image(image):
h=int(image.shape[0])
w=int(image.shape[1])
#Crop [Y1:Y2, X1:X2] #(0,50)-(w,h-20)
return image[50:h-20, 0:w] # Top 50px # Bottom 20px
def apply_image_random_brightness(image):
image = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
image = np.array(image, dtype = np.float64)
bright = .5+np.random.uniform()
image[:,:,2] = image[:,:,2]*bright
image[:,:,2][image[:,:,2]>255] = 255
image = np.array(image, dtype = np.uint8)
image = cv2.cvtColor(image,cv2.COLOR_HSV2RGB)
return image
def apply_image_random_shadow(image):
alpha=0.3
h=image.shape[0]
w=image.shape[1]
points=np.array([ [0,0],[w-random.randint(0, w),0],[w-random.randint(0, w),h],[0,h] ], np.int32)
overlay = image.copy()
output=image.copy()
#overlay=cv2.rectangle(image, (25, 25), (w-10, h-10), (0,0,0), -1)
overlay=cv2.fillConvexPoly(image, points, (0,0,0))
cv2.addWeighted(overlay, alpha, output, 1.0 - alpha,0, image)
return image
# Sorted operations to improve performance
def preprocess_augmentation(image, measurement):
global IMAGES_CV2_RESIZE
# Crop Image
#image = crop_image(image)
# Image Vertical Shift by 20%
# image_shifted=random_shift(image, 0, 0.2, 0, 1, 2)
# images.append(image_shifted)
# measurerements.append(measurerement)
#global IMAGES_INPUT_SHAPE
#image = cv2.resize(image, (IMAGES_INPUT_SHAPE[1],IMAGES_INPUT_SHAPE[0]))
# Random brightness to simulate different light conditions
image=apply_image_random_brightness(image)
# Random shadow artefacts to improve generalization
image=apply_image_random_shadow(image)
# Flip the image 50% of the time
if (random.randint(0, 100)>50):
# Horizontal flip and steering reverse
image = flip_axis(image, 1)
measurement=-measurement
return image,measurement
# -----
# Visualization helper functions
# Helper fuction to build a gallery from a image collection
def show_collection_gallery(collection,number=120):
fig= plt.figure(figsize=(12,7))
for i in range(number):
fig.add_subplot(12,10,1+i)
image=random.choice(collection).squeeze()
image=cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.axis("off")
#fig.suptitle('Random preprocessed', fontsize=18)
plt.show()
def show_histogram(x):
# the histogram of the data
#n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75)
n, bins, patches = plt.hist(x, 100, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('Distribution')
plt.ylabel('Angles')
plt.title('Steering angles')
#plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
# Show a single image using CV2 and wait for 'q'
def show_image(image):
cv2.imshow( "Display window", image)
cv2.waitKey(0)
# ------
def process_sequential_batch_generator(X,y, batch_size=32,augmentation=False):
N = len(y)
batches_per_epoch = N // batch_size
X,y=shuffle(X,y)
i = 0
while 1:
start = i*batch_size
end = start+batch_size - 1
batch_X, batch_y = [], []
for index in range(start,end):
if (index>N-1): break
measurement = y[index]
image=X[index]
if (augmentation):
image, measurement = preprocess_augmentation(image,measurement)
batch_X.append(image)
batch_y.append(measurement)
i += 1
if (i == batches_per_epoch-1):
# reset the index so that we can cycle over the data_frame again
i = 0
yield (np.array(batch_X), np.array(batch_y))
def process_batch_generator(X, y,batch_size=64,augmentation=False):
X,y=shuffle(X,y)
while 1:
batch_X, batch_y = [], []
for i in range(batch_size):
index = random.randint(0, len(X) - 1)
measurement = y[index]
image=X[index]
if (augmentation):
image, measurement = preprocess_augmentation(image,measurement)
batch_X.append(image)
batch_y.append(measurement)
batch_X,batch_y=shuffle(batch_X,batch_y)
yield (np.array(batch_X), np.array(batch_y))
## Visualization
#_generator = process_batch_generator(images,measurerements,120)
#X_train,y_train=next(_generator)
#show_collection_gallery(X_train)
#exit(0)
"""
_generator = process_sequential_batch_generator(images,measurerements,1000)
X_train,y_train=next(_generator)
show_histogram(y_train)
exit(0)
"""
# --------------------------
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(images,measurerements, test_size=0.2, random_state=0)
assert len(X_val)==len(y_val)
print('Training datasets: {}'.format(len(y_train)))
print('Validation datasets: {}'.format(len(y_val)))
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Dropout, ELU
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Cropping2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
def foo_model():
model = Sequential()
model.add(Flatten(input_shape=(64,64,3)))
model.add(Dense(1))
return model
def original_simple_model():
global IMAGES_INPUT_SHAPE
model = Sequential()
# Cropping the images
# model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
# images normalization and centered
model.add(Lambda(
lambda x: (x / 255.0) - 0.5,
input_shape=IMAGES_INPUT_SHAPE
))
"""
model.add(Cropping2D(
cropping=((50,20), (0,0)),
input_shape=IMAGES_INPUT_SHAPE
))
"""
# first set of CONV => RELU => POOL
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# second set of CONV => RELU => POOL
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(500))
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
return model
def simple_model():
global IMAGES_INPUT_SHAPE
model = Sequential()
# Cropping the images
# model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
# images normalization and centered
model.add(Lambda(
lambda x: (x / 255.0) - 0.5,
input_shape=IMAGES_INPUT_SHAPE
))
"""
model.add(Cropping2D(
cropping=((50,20), (0,0)),
input_shape=IMAGES_INPUT_SHAPE
))
"""
# first set of CONV => RELU => POOL
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# second set of CONV => RELU => POOL
model.add(Convolution2D(6,5,5,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(24,5,5,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(36,3,3,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Dropout(.5))
model.add(Flatten())
model.add(Dense(1024))
model.add(Dropout(.5))
model.add(Dense(500))
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(32))
model.add(Dense(1))
return model
def nvidia_model2():
"""
Based on the Nvidia paper: https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
Modified with dropouts and several adjustments.
Image size: (128x128x3)
Input is normalized to around zero.
The network has 24 layers plus output. There are 10 layers with learnable weights: 5 convolutional layers, and 5 fully connected layers.
| # | Type | Description |
|--- |--- |--- |
| 1 | Input | 128x128x3 images input normalized to `zero`. |
| 2 | Convolution | 24 5x5 convolutions with stride [2 2] and `valid` border. |
| 3 | Activation | ReLU activation. |
| 4 | Convolution | 36 5x5 convolutions with stride [2 2] and `valid` border. |
| 5 | Activation | ReLU activation. |
| 6 | Convolution | 48 5x5 convolutions with stride [2 2] and `valid` border. |
| 7 | Activation | ReLU activation. |
| 8 | Dropout | 40% dropout chance. |
| 9 | Convolution | 64 3x3 convolutions with stride [1 1] and `valid` border. |
| 10 | Activation | ReLU activation. |
| 11 | Convolution | 64 3x3 convolutions with stride [1 1] and `valid` border. |
| 12 | Activation | ReLU activation. |
| 13 | Dropout | 30% dropout chance. |
| 14 | Flatten | - |
| 15 | Fully Connected | 1024 fully connected layer. |
| 16 | Dropout | 20% dropout chance. |
| 17 | Fully Connected | 100 fully connected layer. |
| 18 | Activation | ReLU activation. |
| 19 | Fully Connected | 50 fully connected layer. |
| 20 | Activation | ReLU activation. |
| 21 | Fully Connected | 10 fully connected layer. |
| 22 | Activation | ReLU activation. |
| 23 | Fully Connected | 1 fully connected layer. |
| 24 | Activation | Tanh activation. |
| -- | Output | Steering angle. |
"""
model=Sequential()
model.add(Lambda(
lambda x: (x / 255.0) - 0.5,
input_shape=IMAGES_INPUT_SHAPE
))
model.add(Convolution2D(24,5,5,border_mode='valid', activation='relu', subsample=(2,2)))
model.add(Convolution2D(36,5,5,border_mode='valid', activation='relu', subsample=(2,2)))
model.add(Convolution2D(48,5,5,border_mode='valid', activation='relu', subsample=(2,2)))
model.add(Dropout(.4))
model.add(Convolution2D(64,3,3,border_mode='valid', activation='relu', subsample=(1,1)))
model.add(Convolution2D(64,3,3,border_mode='valid', activation='relu', subsample=(1,1)))
model.add(Dropout(.3))
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='tanh'))
return model
def new_model():
model = Sequential()
model.add(Lambda(
lambda x: (x / 255.0) - 0.5,
input_shape=IMAGES_INPUT_SHAPE
))
# layer 1 output shape is 32x32x32
model.add(Convolution2D(32, 5, 5, input_shape=(64, 64, 3), subsample=(2, 2), border_mode="same"))
model.add(ELU())
# layer 2 output shape is 15x15x16
model.add(Convolution2D(16, 3, 3, subsample=(1, 1), border_mode="valid"))
model.add(ELU())
model.add(Dropout(.5)) #4
model.add(MaxPooling2D((2, 2), border_mode='valid'))
# layer 3 output shape is 12x12x16
model.add(Convolution2D(16, 3, 3, subsample=(1, 1), border_mode="valid"))
model.add(ELU())
#model.add(Dropout(.4)) #4
# Flatten the output
model.add(Flatten())
# layer 4
model.add(Dense(1024))
model.add(Dropout(.3)) #4
model.add(ELU())
# layer 5
model.add(Dense(512))
model.add(ELU())
# Finally a single output, since this is a regression problem
model.add(Dense(1))
model.compile(optimizer="adam", loss="mse")
return model
def nvidia_model():
global IMAGES_INPUT_SHAPE
model=Sequential()
model.add(Lambda(
lambda x: x/127.5-1.0,
input_shape=IMAGES_INPUT_SHAPE
))
#model.add(Cropping2D(
# cropping=((50,20), (0,0)),
# input_shape=IMAGES_INPUT_SHAPE
#))
# #1 Convolutional layers with ELU activation
model.add(Convolution2D(
24, 5, 5,
subsample=(2,2),
border_mode="valid",
init="he_normal"
))
model.add(ELU())
# #2 Convolutional layers with ELU activation
model.add(Convolution2D(
36, 5, 5,
subsample=(2,2),
border_mode="valid",
init="he_normal"
))
model.add(ELU())
# #3 Convolutional layers with ELU activation
model.add(Convolution2D(
48, 5, 5,
subsample=(2,2),
border_mode="valid",
init="he_normal"
))
model.add(ELU())
# #4 Convolutional layers with ELU activation
model.add(Convolution2D(
64, 3, 3,
subsample=(1,1),
border_mode="valid",
init="he_normal"
))
model.add(ELU())
model.add(ELU())
# #5 Convolutional layers with ELU activation
model.add(Convolution2D(
64, 3, 3,
subsample=(1,1),
border_mode="valid",
init="he_normal"
))
model.add(ELU())
model.add(Flatten())
# x4 fully-connected layers with ELU activation
for i in [1164,100,50,10]:
model.add(Dense(i,init="he_normal"))
model.add(ELU())
model.add(Dense(1,init="he_normal"))
return model
def VGG16():
model=Sequential()
model.add(Lambda(
lambda x: (x / 255.0) - 0.5,
input_shape=IMAGES_INPUT_SHAPE
))
model.add(Convolution2D(24,5,5,border_mode='valid', activation='relu', subsample=(2,2)))
model.add(Convolution2D(36,5,5,border_mode='valid', activation='relu', subsample=(2,2)))
model.add(Convolution2D(48,5,5,border_mode='valid', activation='relu', subsample=(2,2)))
model.add(Dropout(.4))
model.add(Convolution2D(64,3,3,border_mode='valid', activation='relu', subsample=(1,1)))
model.add(Convolution2D(64,3,3,border_mode='valid', activation='relu', subsample=(1,1)))
model.add(Dropout(.3))
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='tanh'))
return model
# -----------------------
def main():
# My code here
pass
if __name__ == "__main__":
main()
## ----------------------
# --- Training ---
"""
# Size 64x64
0.0057 with Nvidia2
batch_size=256
epochs=11
samples_per_epoch=(20000//batch_size)*batch_size
-
0.0056 with Nvidia2
batch_size=64
epochs=11
samples_per_epoch=(20000//batch_size)*batch_size
-
--------------
# Size 128x128
## Batch 64
Epoch 12/20
22050/22050 [==============================] - 31s - loss: 0.0048 - val_loss: 0.0046
Epoch 13/20
22050/22050 [==============================] - 30s - loss: 0.0045 - val_loss: 0.0052
Epoch 14/20
22050/22050 [==============================] - 30s - loss: 0.0045 - val_loss: 0.0044
Epoch 10/12
22050/22050 [==============================] - 30s - loss: 0.0054 - val_loss: 0.0050
Epoch 11/12
22050/22050 [==============================] - 30s - loss: 0.0051 - val_loss: 0.0048
Epoch 12/12
22050/22050 [==============================] - 30s - loss: 0.0051 - val_loss: 0.0052
## Batch 256
Epoch 12/12
22050/22050 [==============================] - 30s - loss: 0.0050 - val_loss: 0.0049
## Batch 64 (Winner??)
Epoch 12/12
22050/22050 [==============================] - 30s - loss: 0.0048 - val_loss: 0.0050
## Batch 512
"""
# Hyper parameters for manual tunning
batch_size=64
#batch_size=256
#batch_size=512
#epochs=15
epochs=12
#samples_per_epoch=8192
#samples_per_epoch=4096
#samples_per_epoch=2048
samples_per_epoch=len(y_train)
#samples_per_epoch=(samples_per_epoch//batch_size)*batch_size
samples_per_epoch=22050
#_train_gen = process_batch_generator(X_train,y_train,batch_size,augmentation=True)
#_val_gen = process_batch_generator(X_val,y_val,batch_size,augmentation=False)
_train_gen = process_sequential_batch_generator(X_train,y_train,batch_size,augmentation=True)
_val_gen = process_sequential_batch_generator(X_val,y_val,batch_size,augmentation=False)
# -- Debugging generator results --
#result=next(_train_gen)
#print(result[0].shape[1:])
# -- Model Selection --
#model = foo_model()
#model = simple_model()
#model = nvidia_model()
model =nvidia_model2()
#model = new_model()
"""
# Optimizer forcing a learning rate
adam = Adam(lr=0.0001)
model.compile(loss='mse', optimizer=adam)
"""
# Using Adam optimizer without forcing learning rate, Mean Square Error loss
model.compile(loss='mse', optimizer='adam')
# Save best losses helps manual model tunning
model_checkpoint = ModelCheckpoint(
'model_best.h5',
monitor='val_loss',
verbose=0,
save_best_only=True)
# Fit with my generators, for training and validation
history = model.fit_generator(
_train_gen,
nb_epoch=epochs,
samples_per_epoch = samples_per_epoch,
validation_data= _val_gen,
nb_val_samples= len(y_val),
verbose = 1,
callbacks=[model_checkpoint]
)
model.save('model.h5')
# Bell sound when training is over
print('\a\a\a\a\a\a\a')
# Summarize history for loss, and save chart image
import time
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
time=str(int(time.time()))
plt.savefig(time+'-loss.png')
plt.show()
| [
"matplotlib.pyplot.title",
"csv.reader",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.figure",
"cv2.imshow",
"random.randint",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"keras.layers.Flatten",
"keras.layers.ELU",
"cv2.resize",
"matplotlib.pyplot.show",
"keras.callbacks.Mo... | [((40, 61), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (54, 61), False, 'import matplotlib\n'), ((6897, 6968), 'sklearn.model_selection.train_test_split', 'train_test_split', (['images', 'measurerements'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(images, measurerements, test_size=0.2, random_state=0)\n', (6913, 6968), False, 'from sklearn.model_selection import train_test_split\n'), ((18071, 18159), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model_best.h5"""'], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(True)'}), "('model_best.h5', monitor='val_loss', verbose=0,\n save_best_only=True)\n", (18086, 18159), False, 'from keras.callbacks import ModelCheckpoint\n'), ((18602, 18635), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (18610, 18635), True, 'from matplotlib import pyplot as plt\n'), ((18636, 18673), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (18644, 18673), True, 'from matplotlib import pyplot as plt\n'), ((18674, 18697), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (18683, 18697), True, 'from matplotlib import pyplot as plt\n'), ((18698, 18716), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (18708, 18716), True, 'from matplotlib import pyplot as plt\n'), ((18717, 18736), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (18727, 18736), True, 'from matplotlib import pyplot as plt\n'), ((18737, 18790), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), "(['train', 'validation'], loc='upper left')\n", (18747, 18790), True, 'from matplotlib import pyplot as plt\n'), ((18818, 18849), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(time + '-loss.png')"], {}), "(time + '-loss.png')\n", (18829, 18849), True, 'from matplotlib import pyplot as plt\n'), ((18848, 18858), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18856, 18858), True, 'from matplotlib import pyplot as plt\n'), ((2805, 2843), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (2817, 2843), False, 'import cv2\n'), ((2855, 2888), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float64'}), '(image, dtype=np.float64)\n', (2863, 2888), True, 'import numpy as np\n'), ((3020, 3051), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (3028, 3051), True, 'import numpy as np\n'), ((3066, 3104), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2RGB'], {}), '(image, cv2.COLOR_HSV2RGB)\n', (3078, 3104), False, 'import cv2\n'), ((3434, 3478), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['image', 'points', '(0, 0, 0)'], {}), '(image, points, (0, 0, 0))\n', (3452, 3478), False, 'import cv2\n'), ((3479, 3541), 'cv2.addWeighted', 'cv2.addWeighted', (['overlay', 'alpha', 'output', '(1.0 - alpha)', '(0)', 'image'], {}), '(overlay, alpha, output, 1.0 - alpha, 0, image)\n', (3494, 3541), False, 'import cv2\n'), ((4537, 4564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (4547, 4564), True, 'from matplotlib import pyplot as plt\n'), ((4818, 4828), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4826, 4828), True, 'from matplotlib import pyplot as plt\n'), ((4980, 5037), 'matplotlib.pyplot.hist', 'plt.hist', (['x', '(100)'], {'normed': '(1)', 'facecolor': '"""green"""', 'alpha': '(0.75)'}), "(x, 100, normed=1, facecolor='green', alpha=0.75)\n", (4988, 5037), True, 'from matplotlib import pyplot as plt\n'), ((5040, 5066), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distribution"""'], {}), "('Distribution')\n", (5050, 5066), True, 'from matplotlib import pyplot as plt\n'), ((5068, 5088), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Angles"""'], {}), "('Angles')\n", (5078, 5088), True, 'from matplotlib import pyplot as plt\n'), ((5090, 5118), 'matplotlib.pyplot.title', 'plt.title', (['"""Steering angles"""'], {}), "('Steering angles')\n", (5099, 5118), True, 'from matplotlib import pyplot as plt\n'), ((5151, 5165), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5159, 5165), True, 'from matplotlib import pyplot as plt\n'), ((5168, 5178), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5176, 5178), True, 'from matplotlib import pyplot as plt\n'), ((5254, 5289), 'cv2.imshow', 'cv2.imshow', (['"""Display window"""', 'image'], {}), "('Display window', image)\n", (5264, 5289), False, 'import cv2\n'), ((5292, 5306), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5303, 5306), False, 'import cv2\n'), ((5454, 5467), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y'], {}), '(X, y)\n', (5461, 5467), False, 'from sklearn.utils import shuffle\n'), ((6059, 6072), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y'], {}), '(X, y)\n', (6066, 6072), False, 'from sklearn.utils import shuffle\n'), ((7449, 7461), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7459, 7461), False, 'from keras.models import Sequential\n'), ((7609, 7621), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7619, 7621), False, 'from keras.models import Sequential\n'), ((8436, 8448), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8446, 8448), False, 'from keras.models import Sequential\n'), ((11547, 11559), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (11557, 11559), False, 'from keras.models import Sequential\n'), ((12446, 12458), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (12456, 12458), False, 'from keras.models import Sequential\n'), ((13452, 13464), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (13462, 13464), False, 'from keras.models import Sequential\n'), ((14778, 14790), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14788, 14790), False, 'from keras.models import Sequential\n'), ((1615, 1634), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1625, 1634), False, 'import csv\n'), ((2907, 2926), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2924, 2926), True, 'import numpy as np\n'), ((4224, 4246), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (4238, 4246), False, 'import random\n'), ((4303, 4322), 'keras.preprocessing.image.flip_axis', 'flip_axis', (['image', '(1)'], {}), '(image, 1)\n', (4312, 4322), False, 'from keras.preprocessing.image import random_shift, flip_axis\n'), ((4682, 4720), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4694, 4720), False, 'import cv2\n'), ((4726, 4743), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (4736, 4743), True, 'from matplotlib import pyplot as plt\n'), ((4749, 4764), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4757, 4764), True, 'from matplotlib import pyplot as plt\n'), ((6393, 6418), 'sklearn.utils.shuffle', 'shuffle', (['batch_X', 'batch_y'], {}), '(batch_X, batch_y)\n', (6400, 6418), False, 'from sklearn.utils import shuffle\n'), ((7474, 7506), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(64, 64, 3)'}), '(input_shape=(64, 64, 3))\n', (7481, 7506), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((7517, 7525), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (7522, 7525), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((7772, 7837), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'IMAGES_INPUT_SHAPE'}), '(lambda x: x / 255.0 - 0.5, input_shape=IMAGES_INPUT_SHAPE)\n', (7778, 7837), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((8000, 8041), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (8013, 8041), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8051, 8097), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (8063, 8097), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((8149, 8190), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (8162, 8190), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8200, 8246), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (8212, 8246), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((8261, 8270), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8268, 8270), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((8284, 8294), 'keras.layers.Dense', 'Dense', (['(500)'], {}), '(500)\n', (8289, 8294), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((8307, 8317), 'keras.layers.Dense', 'Dense', (['(120)'], {}), '(120)\n', (8312, 8317), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((8330, 8339), 'keras.layers.Dense', 'Dense', (['(84)'], {}), '(84)\n', (8335, 8339), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((8353, 8361), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (8358, 8361), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((8599, 8664), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'IMAGES_INPUT_SHAPE'}), '(lambda x: x / 255.0 - 0.5, input_shape=IMAGES_INPUT_SHAPE)\n', (8605, 8664), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((8827, 8868), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (8840, 8868), False, 'from keras.layers.convolutional import Convolution2D\n'), ((8878, 8924), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (8890, 8924), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((8976, 9017), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(6)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(6, 5, 5, activation='relu')\n", (8989, 9017), False, 'from keras.layers.convolutional import Convolution2D\n'), ((9027, 9073), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (9039, 9073), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((9089, 9131), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(24, 5, 5, activation='relu')\n", (9102, 9131), False, 'from keras.layers.convolutional import Convolution2D\n'), ((9141, 9187), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (9153, 9187), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((9201, 9243), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(36, 3, 3, activation='relu')\n", (9214, 9243), False, 'from keras.layers.convolutional import Convolution2D\n'), ((9253, 9299), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(1, 1)'}), '(pool_size=(2, 2), strides=(1, 1))\n', (9265, 9299), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((9314, 9326), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (9321, 9326), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9340, 9349), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (9347, 9349), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9365, 9376), 'keras.layers.Dense', 'Dense', (['(1024)'], {}), '(1024)\n', (9370, 9376), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9389, 9401), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (9396, 9401), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9413, 9423), 'keras.layers.Dense', 'Dense', (['(500)'], {}), '(500)\n', (9418, 9423), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9436, 9446), 'keras.layers.Dense', 'Dense', (['(120)'], {}), '(120)\n', (9441, 9446), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9459, 9468), 'keras.layers.Dense', 'Dense', (['(84)'], {}), '(84)\n', (9464, 9468), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9481, 9490), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (9486, 9490), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((9504, 9512), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (9509, 9512), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((11572, 11637), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'IMAGES_INPUT_SHAPE'}), '(lambda x: x / 255.0 - 0.5, input_shape=IMAGES_INPUT_SHAPE)\n', (11578, 11637), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((11661, 11747), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(2, 2)'}), "(24, 5, 5, border_mode='valid', activation='relu', subsample=(\n 2, 2))\n", (11674, 11747), False, 'from keras.layers.convolutional import Convolution2D\n'), ((11751, 11837), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(2, 2)'}), "(36, 5, 5, border_mode='valid', activation='relu', subsample=(\n 2, 2))\n", (11764, 11837), False, 'from keras.layers.convolutional import Convolution2D\n'), ((11841, 11927), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(48)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(2, 2)'}), "(48, 5, 5, border_mode='valid', activation='relu', subsample=(\n 2, 2))\n", (11854, 11927), False, 'from keras.layers.convolutional import Convolution2D\n'), ((11931, 11943), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (11938, 11943), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((11955, 12041), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(1, 1)'}), "(64, 3, 3, border_mode='valid', activation='relu', subsample=(\n 1, 1))\n", (11968, 12041), False, 'from keras.layers.convolutional import Convolution2D\n'), ((12045, 12131), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(1, 1)'}), "(64, 3, 3, border_mode='valid', activation='relu', subsample=(\n 1, 1))\n", (12058, 12131), False, 'from keras.layers.convolutional import Convolution2D\n'), ((12135, 12147), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (12142, 12147), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12159, 12168), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (12166, 12168), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12181, 12211), 'keras.layers.Dense', 'Dense', (['(1164)'], {'activation': '"""relu"""'}), "(1164, activation='relu')\n", (12186, 12211), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12224, 12236), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (12231, 12236), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12248, 12277), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (12253, 12277), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12290, 12318), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (12295, 12318), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12331, 12359), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (12336, 12359), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12372, 12399), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""tanh"""'}), "(1, activation='tanh')\n", (12377, 12399), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12471, 12536), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'IMAGES_INPUT_SHAPE'}), '(lambda x: x / 255.0 - 0.5, input_shape=IMAGES_INPUT_SHAPE)\n', (12477, 12536), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12596, 12686), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(32)', '(5)', '(5)'], {'input_shape': '(64, 64, 3)', 'subsample': '(2, 2)', 'border_mode': '"""same"""'}), "(32, 5, 5, input_shape=(64, 64, 3), subsample=(2, 2),\n border_mode='same')\n", (12609, 12686), False, 'from keras.layers.convolutional import Convolution2D\n'), ((12695, 12700), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (12698, 12700), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12750, 12812), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(16)', '(3)', '(3)'], {'subsample': '(1, 1)', 'border_mode': '"""valid"""'}), "(16, 3, 3, subsample=(1, 1), border_mode='valid')\n", (12763, 12812), False, 'from keras.layers.convolutional import Convolution2D\n'), ((12825, 12830), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (12828, 12830), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12843, 12855), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (12850, 12855), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((12870, 12911), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'border_mode': '"""valid"""'}), "((2, 2), border_mode='valid')\n", (12882, 12911), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((12961, 13023), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(16)', '(3)', '(3)'], {'subsample': '(1, 1)', 'border_mode': '"""valid"""'}), "(16, 3, 3, subsample=(1, 1), border_mode='valid')\n", (12974, 13023), False, 'from keras.layers.convolutional import Convolution2D\n'), ((13036, 13041), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (13039, 13041), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13105, 13114), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (13112, 13114), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13139, 13150), 'keras.layers.Dense', 'Dense', (['(1024)'], {}), '(1024)\n', (13144, 13150), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13163, 13175), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (13170, 13175), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13190, 13195), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (13193, 13195), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13220, 13230), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (13225, 13230), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13243, 13248), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (13246, 13248), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13325, 13333), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (13330, 13333), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13478, 13543), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 127.5 - 1.0)'], {'input_shape': 'IMAGES_INPUT_SHAPE'}), '(lambda x: x / 127.5 - 1.0, input_shape=IMAGES_INPUT_SHAPE)\n', (13484, 13543), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13706, 13791), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5)', '(5)'], {'subsample': '(2, 2)', 'border_mode': '"""valid"""', 'init': '"""he_normal"""'}), "(24, 5, 5, subsample=(2, 2), border_mode='valid', init='he_normal'\n )\n", (13719, 13791), False, 'from keras.layers.convolutional import Convolution2D\n'), ((13816, 13821), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (13819, 13821), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((13881, 13966), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(5)', '(5)'], {'subsample': '(2, 2)', 'border_mode': '"""valid"""', 'init': '"""he_normal"""'}), "(36, 5, 5, subsample=(2, 2), border_mode='valid', init='he_normal'\n )\n", (13894, 13966), False, 'from keras.layers.convolutional import Convolution2D\n'), ((13991, 13996), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (13994, 13996), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14056, 14141), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(48)', '(5)', '(5)'], {'subsample': '(2, 2)', 'border_mode': '"""valid"""', 'init': '"""he_normal"""'}), "(48, 5, 5, subsample=(2, 2), border_mode='valid', init='he_normal'\n )\n", (14069, 14141), False, 'from keras.layers.convolutional import Convolution2D\n'), ((14166, 14171), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (14169, 14171), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14231, 14316), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'subsample': '(1, 1)', 'border_mode': '"""valid"""', 'init': '"""he_normal"""'}), "(64, 3, 3, subsample=(1, 1), border_mode='valid', init='he_normal'\n )\n", (14244, 14316), False, 'from keras.layers.convolutional import Convolution2D\n'), ((14341, 14346), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (14344, 14346), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14359, 14364), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (14362, 14364), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14424, 14509), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'subsample': '(1, 1)', 'border_mode': '"""valid"""', 'init': '"""he_normal"""'}), "(64, 3, 3, subsample=(1, 1), border_mode='valid', init='he_normal'\n )\n", (14437, 14509), False, 'from keras.layers.convolutional import Convolution2D\n'), ((14534, 14539), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (14537, 14539), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14553, 14562), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (14560, 14562), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14712, 14738), 'keras.layers.Dense', 'Dense', (['(1)'], {'init': '"""he_normal"""'}), "(1, init='he_normal')\n", (14717, 14738), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14803, 14868), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'IMAGES_INPUT_SHAPE'}), '(lambda x: x / 255.0 - 0.5, input_shape=IMAGES_INPUT_SHAPE)\n', (14809, 14868), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14895, 14981), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(2, 2)'}), "(24, 5, 5, border_mode='valid', activation='relu', subsample=(\n 2, 2))\n", (14908, 14981), False, 'from keras.layers.convolutional import Convolution2D\n'), ((14985, 15071), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(2, 2)'}), "(36, 5, 5, border_mode='valid', activation='relu', subsample=(\n 2, 2))\n", (14998, 15071), False, 'from keras.layers.convolutional import Convolution2D\n'), ((15075, 15161), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(48)', '(5)', '(5)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(2, 2)'}), "(48, 5, 5, border_mode='valid', activation='relu', subsample=(\n 2, 2))\n", (15088, 15161), False, 'from keras.layers.convolutional import Convolution2D\n'), ((15165, 15177), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (15172, 15177), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15189, 15275), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(1, 1)'}), "(64, 3, 3, border_mode='valid', activation='relu', subsample=(\n 1, 1))\n", (15202, 15275), False, 'from keras.layers.convolutional import Convolution2D\n'), ((15279, 15365), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample': '(1, 1)'}), "(64, 3, 3, border_mode='valid', activation='relu', subsample=(\n 1, 1))\n", (15292, 15365), False, 'from keras.layers.convolutional import Convolution2D\n'), ((15369, 15381), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (15376, 15381), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15393, 15402), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (15400, 15402), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15415, 15445), 'keras.layers.Dense', 'Dense', (['(1164)'], {'activation': '"""relu"""'}), "(1164, activation='relu')\n", (15420, 15445), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15458, 15470), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (15465, 15470), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15482, 15511), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (15487, 15511), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15524, 15552), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (15529, 15552), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15565, 15593), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (15570, 15593), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((15606, 15633), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""tanh"""'}), "(1, activation='tanh')\n", (15611, 15633), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((18804, 18815), 'time.time', 'time.time', ([], {}), '()\n', (18813, 18815), False, 'import time\n'), ((14654, 14680), 'keras.layers.Dense', 'Dense', (['i'], {'init': '"""he_normal"""'}), "(i, init='he_normal')\n", (14659, 14680), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((14693, 14698), 'keras.layers.ELU', 'ELU', ([], {}), '()\n', (14696, 14698), False, 'from keras.layers import Flatten, Dense, Lambda, Dropout, ELU\n'), ((1957, 1979), 'cv2.imread', 'cv2.imread', (['local_path'], {}), '(local_path)\n', (1967, 1979), False, 'import cv2\n'), ((2047, 2112), 'cv2.resize', 'cv2.resize', (['image', '(IMAGES_INPUT_SHAPE[1], IMAGES_INPUT_SHAPE[0])'], {}), '(image, (IMAGES_INPUT_SHAPE[1], IMAGES_INPUT_SHAPE[0]))\n', (2057, 2112), False, 'import cv2\n'), ((4635, 4660), 'random.choice', 'random.choice', (['collection'], {}), '(collection)\n', (4648, 4660), False, 'import random\n'), ((5946, 5963), 'numpy.array', 'np.array', (['batch_X'], {}), '(batch_X)\n', (5954, 5963), True, 'import numpy as np\n'), ((5965, 5982), 'numpy.array', 'np.array', (['batch_y'], {}), '(batch_y)\n', (5973, 5982), True, 'import numpy as np\n'), ((6427, 6444), 'numpy.array', 'np.array', (['batch_X'], {}), '(batch_X)\n', (6435, 6444), True, 'import numpy as np\n'), ((6446, 6463), 'numpy.array', 'np.array', (['batch_y'], {}), '(batch_y)\n', (6454, 6463), True, 'import numpy as np\n'), ((3239, 3259), 'random.randint', 'random.randint', (['(0)', 'w'], {}), '(0, w)\n', (3253, 3259), False, 'import random\n'), ((3266, 3286), 'random.randint', 'random.randint', (['(0)', 'w'], {}), '(0, w)\n', (3280, 3286), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
a=np.loadtxt('EXAMPLE.txt') #Retrieves data from .txt file.
b=np.linspace(0,len(a),len(a)) #Creates a variable for the data to be plotted against.
###
FT1=np.fft.rfft(a)#Performs the real Fast Fourier Transform, argument: source data.
FT2=np.fft.rfft(a)
#The for loops below zero the last 90% and the last 98% of the data respectively.
for i in range(len(FT1)):
if i>((len(FT1))//10):
FT1[i]=0
for i in range(len(FT2)):
if i>(len(FT2))*0.02:
FT2[i]=0
iFT1=np.fft.irfft(FT1) #performs the inverse real Fast Fourier Transform, argument: Fourier coefficient array.
iFT2=np.fft.irfft(FT2)
plt.figure(1)
plt.plot(b,a,'b-')
plt.plot(b,iFT1,'r-')
plt.plot(b,iFT2,'g-')
plt.xlabel('XLABEL')
plt.ylabel('YLABEL')
plt.title('TITLE')
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.fft.rfft",
"matplotlib.pyplot.show",
"numpy.fft.irfft",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((54, 79), 'numpy.loadtxt', 'np.loadtxt', (['"""EXAMPLE.txt"""'], {}), "('EXAMPLE.txt')\n", (64, 79), True, 'import numpy as np\n'), ((207, 221), 'numpy.fft.rfft', 'np.fft.rfft', (['a'], {}), '(a)\n', (218, 221), True, 'import numpy as np\n'), ((291, 305), 'numpy.fft.rfft', 'np.fft.rfft', (['a'], {}), '(a)\n', (302, 305), True, 'import numpy as np\n'), ((535, 552), 'numpy.fft.irfft', 'np.fft.irfft', (['FT1'], {}), '(FT1)\n', (547, 552), True, 'import numpy as np\n'), ((646, 663), 'numpy.fft.irfft', 'np.fft.irfft', (['FT2'], {}), '(FT2)\n', (658, 663), True, 'import numpy as np\n'), ((665, 678), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (675, 678), True, 'import matplotlib.pyplot as plt\n'), ((679, 699), 'matplotlib.pyplot.plot', 'plt.plot', (['b', 'a', '"""b-"""'], {}), "(b, a, 'b-')\n", (687, 699), True, 'import matplotlib.pyplot as plt\n'), ((698, 721), 'matplotlib.pyplot.plot', 'plt.plot', (['b', 'iFT1', '"""r-"""'], {}), "(b, iFT1, 'r-')\n", (706, 721), True, 'import matplotlib.pyplot as plt\n'), ((720, 743), 'matplotlib.pyplot.plot', 'plt.plot', (['b', 'iFT2', '"""g-"""'], {}), "(b, iFT2, 'g-')\n", (728, 743), True, 'import matplotlib.pyplot as plt\n'), ((742, 762), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""XLABEL"""'], {}), "('XLABEL')\n", (752, 762), True, 'import matplotlib.pyplot as plt\n'), ((763, 783), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""YLABEL"""'], {}), "('YLABEL')\n", (773, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 802), 'matplotlib.pyplot.title', 'plt.title', (['"""TITLE"""'], {}), "('TITLE')\n", (793, 802), True, 'import matplotlib.pyplot as plt\n'), ((803, 813), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (811, 813), True, 'import matplotlib.pyplot as plt\n')] |
import pandas as pd
from flask import Flask
from paths import PATHS
from flask_restful import Resource, Api, reqparse
from data_processing.src.data_processor import DataProcessor
import datetime
import requests
import numpy as np
import lightgbm as lgb
import os
app = Flask(__name__)
api = Api(app)
BASE_URL = "http://127.0.0.1:5000/"
def get_location(position):
global location_df
data = location_df.loc[int(position)]
return {
"Lattitude": data.Latitude,
"Longitude": data.Longitude,
"position": int(position),
}
class SimulateDisruptions(Resource):
def get(self):
date = '2021-06-10'
response = []
for pos in np.random.randint(197,420,2):
params = {"position": pos, "date":date}
print("Making historical data request")
history = requests.get(url=BASE_URL + "/api/historical", params=params).json()
location = get_location(pos)
response.append({**history, **location, **{"description": "PLACE HOLDER EVENT"}})
return {"events":response}, 200
class Coordinates(Resource):
def get(self):
global location_df
parser = reqparse.RequestParser()
# in 100 meters int. varies from 97 to 428
parser = parser.add_argument("position", required=True)
args = parser.parse_args()
try:
data = location_df.loc[int(args.position)]
except Exception as e:
data = location_df.loc[120]
return {} , 400
return {
"Lattitude": data.Latitude,
"Longitude": data.Longitude,
"position": int(args.position),
}, 200
class Prediction(Resource):
def __init__(self):
models = []
for d in range(1, 14):
model_path = os.path.join(PATHS.model, f"lgb_model_d{d}.txt")
models.append(lgb.Booster(model_file=model_path))
self.models = models
def _extract_features(self, position, history):
rssi = np.array(
[history[step]["A2_RSSI"] for step in sorted(list(history.keys()))]
)
print(position)
return [np.mean(rssi), np.std(rssi), int(position) * 1000.0]
def get(self):
parser = reqparse.RequestParser()
parser = parser.add_argument("date", required=True)
parser.add_argument("position", required=True)
args = parser.parse_args()
params = {"position": args.position, "date": args.date}
print("Making historical data request")
history = requests.get(url=BASE_URL + "/api/historical", params=params).json()
features = self._extract_features(args.position, history)
scores = []
for model in self.models:
scores.append(model.predict(np.array(features).reshape(1, -1))[0])
scores = np.array(scores)
confidence = np.abs(scores - 0.5) / 0.5 * 100
coords = get_location(args.position)
return {
**{
"position":args.position,
"date":args.date,
"predictions": ((scores > 0.5) * 1).tolist(),
"confidence": confidence.tolist(),
"description": {1: "disruption", 2: "no-disruption"},
},
**coords
},200
api.add_resource(Prediction, "/api/predict")
api.add_resource(Coordinates, "/api/coordinates")
api.add_resource(SimulateDisruptions, "/api/disruptions")
if __name__ == "__main__":
global location_df
location_path = DataProcessor.gen_proc_file_name("location.csv")
location_df = pd.read_csv(location_path, index_col="Position_m")
app.run(port=5789)
| [
"flask_restful.Api",
"numpy.abs",
"os.path.join",
"pandas.read_csv",
"numpy.std",
"flask.Flask",
"flask_restful.reqparse.RequestParser",
"lightgbm.Booster",
"numpy.random.randint",
"numpy.array",
"numpy.mean",
"requests.get",
"data_processing.src.data_processor.DataProcessor.gen_proc_file_na... | [((270, 285), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (275, 285), False, 'from flask import Flask\n'), ((292, 300), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (295, 300), False, 'from flask_restful import Resource, Api, reqparse\n'), ((3527, 3575), 'data_processing.src.data_processor.DataProcessor.gen_proc_file_name', 'DataProcessor.gen_proc_file_name', (['"""location.csv"""'], {}), "('location.csv')\n", (3559, 3575), False, 'from data_processing.src.data_processor import DataProcessor\n'), ((3594, 3644), 'pandas.read_csv', 'pd.read_csv', (['location_path'], {'index_col': '"""Position_m"""'}), "(location_path, index_col='Position_m')\n", (3605, 3644), True, 'import pandas as pd\n'), ((686, 716), 'numpy.random.randint', 'np.random.randint', (['(197)', '(420)', '(2)'], {}), '(197, 420, 2)\n', (703, 716), True, 'import numpy as np\n'), ((1186, 1210), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (1208, 1210), False, 'from flask_restful import Resource, Api, reqparse\n'), ((2253, 2277), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (2275, 2277), False, 'from flask_restful import Resource, Api, reqparse\n'), ((2843, 2859), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2851, 2859), True, 'import numpy as np\n'), ((1815, 1863), 'os.path.join', 'os.path.join', (['PATHS.model', 'f"""lgb_model_d{d}.txt"""'], {}), "(PATHS.model, f'lgb_model_d{d}.txt')\n", (1827, 1863), False, 'import os\n'), ((2163, 2176), 'numpy.mean', 'np.mean', (['rssi'], {}), '(rssi)\n', (2170, 2176), True, 'import numpy as np\n'), ((2178, 2190), 'numpy.std', 'np.std', (['rssi'], {}), '(rssi)\n', (2184, 2190), True, 'import numpy as np\n'), ((1890, 1924), 'lightgbm.Booster', 'lgb.Booster', ([], {'model_file': 'model_path'}), '(model_file=model_path)\n', (1901, 1924), True, 'import lightgbm as lgb\n'), ((2558, 2619), 'requests.get', 'requests.get', ([], {'url': "(BASE_URL + '/api/historical')", 'params': 'params'}), "(url=BASE_URL + '/api/historical', params=params)\n", (2570, 2619), False, 'import requests\n'), ((2881, 2901), 'numpy.abs', 'np.abs', (['(scores - 0.5)'], {}), '(scores - 0.5)\n', (2887, 2901), True, 'import numpy as np\n'), ((842, 903), 'requests.get', 'requests.get', ([], {'url': "(BASE_URL + '/api/historical')", 'params': 'params'}), "(url=BASE_URL + '/api/historical', params=params)\n", (854, 903), False, 'import requests\n'), ((2787, 2805), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2795, 2805), True, 'import numpy as np\n')] |
"""
Licensed Materials - Property of IBM
Restricted Materials of IBM
20190891
© Copyright IBM Corp. 2021 All Rights Reserved.
"""
"""
Module to where fusion algorithms are implemented.
"""
import logging
import numpy as np
from ibmfl.aggregator.fusion.fedavg_fusion_handler import FedAvgFusionHandler
logger = logging.getLogger(__name__)
class RLWeightedAvgFusionHandler(FedAvgFusionHandler):
"""
Class for weight based Federated Averaging aggregation.
In this class, the weighted averaging aggregation is performed over the RL
policy model weights with averaging weights depends on rewards.
"""
def __init__(self, hyperparams, protocol_handler,
fl_model=None,
data_handler=None,
**kwargs):
super().__init__(hyperparams,
protocol_handler,
data_handler,
fl_model,
**kwargs)
self.name = "RLWeightedAvg"
def fusion_collected_responses(self, lst_model_updates):
"""
Receives a list of model updates, where a model update is of the type
`ModelUpdate`, using the weights and rewards included in each
model_update, it finds the weighted average of the model weights
per layer with averaging weights depends on rewards.
:param lst_model_updates: List of model updates of type `ModelUpdate` \
to be averaged.
:type lst_model_updates: `list`
:return: results after aggregation
:rtype: `dict`
"""
weights = dict()
# Key list gives layers of the neural network
weights_key_list = list(lst_model_updates[0].get('weights').keys())
# Iterate through the layers of neutral network
for key in weights_key_list:
w = []
n_k = []
for update in lst_model_updates:
w.append(np.array(update.get('weights').get(key)))
n_k.append(update.get('train_result').get(
'episode_reward_mean'))
n_norm = n_k / (np.sum(n_k) + self._eps)
avg_weight = np.sum(
[w[i] * n_norm[i] for i in range(len(n_k))], axis=0)
weights[key] = avg_weight
return weights
| [
"numpy.sum",
"logging.getLogger"
] | [((313, 340), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (330, 340), False, 'import logging\n'), ((2112, 2123), 'numpy.sum', 'np.sum', (['n_k'], {}), '(n_k)\n', (2118, 2123), True, 'import numpy as np\n')] |
import networkx as nx
import numpy as np
def property_graph(graph='g1'):
"""
Define the properties of the graph to generate
:graph : type of desired graph. Options : ['g1','g2', 'g3', 'g4', 'g5']
"""
if graph == 'g1':
method = 'partition'
sizes = [75, 75]
probs = [[0.10, 0.005], [0.005, 0.10]]
number_class = 'binary'
elif graph == 'g2':
method = 'random'
sizes = [75, 75]
probs = [[0.10, 0.005], [0.005, 0.10]]
number_class = 'binary'
elif graph == 'g3':
method = 'partition'
sizes = [125, 25]
probs = [[0.15, 0.005], [0.005, 0.35]]
number_class = 'binary'
elif graph == 'g4':
method = 'partition'
probs = [[0.20, 0.002, 0.003], [0.002, 0.15, 0.003], [0.003, 0.003, 0.10]]
sizes = [50, 50, 50]
number_class = 'binary'
elif graph == 'g5':
method = 'partition'
probs = [[0.20, 0.002, 0.003], [0.002, 0.15, 0.003], [0.003, 0.003, 0.10]]
sizes = [50, 50, 50]
number_class = "multi"
elif graph == 'g6':
method = 'partition'
sizes = [50, 50]
probs = [[0.4, 0.005], [0.005, 0.1]]
number_class = 'binary'
return probs, sizes, number_class, method
def get_graph_prot(sizes=None, probs=None, number_class='binary',
choice='random', shuffle=0.1):
"""
Generate a graph with a community structure, and where the nodes are
assigned a protected attribute
:param sizes: number of nodes in each protected group
:param probs: probabilities of links between the protected attribute,
and within them
:param number_class: the number of protected groups (binary or multi)
:param choice: controls the dependency between the protected attribute and
the community structure
- random : the structure and the attribute are completely independent
- partition : the structure and the attribute are dependent
:param shuffle: when the choice is partition, it controls the degree of
dependency (low value corresponding to
stronger dependence.
:return: the graph where the protected attribute is a feature of the nodes
and a the attribute as a dictionary
"""
if sizes is None:
sizes = [150, 150]
if probs is None:
probs = [[0.15, 0.005], [0.005, 0.15]]
# Generate a graph following the stochastic block model
g = nx.stochastic_block_model(sizes, probs)
# Check if the graph is connected
is_connected = nx.is_connected(g)
while not is_connected:
g = nx.stochastic_block_model(sizes, probs)
is_connected = nx.is_connected(g)
# Protected attribute
n = np.sum(sizes)
prot_s = np.zeros(n)
k = np.asarray(probs).shape[0]
p = np.ones(k)
if choice == 'random':
if number_class == 'multi':
prot_s = np.random.choice(k, n, p=p * 1 / k)
elif number_class == 'binary':
prot_s = np.random.choice(2, n, p=p * 1 / 2)
elif choice == 'partition':
part_idx = g.graph['partition']
for i in range(len(part_idx)):
prot_s[list(part_idx[i])] = i
# Shuffle x% of the protected attributes
prot_s = shuffle_part(prot_s, prop_shuffle=shuffle)
# Handle the case when S is binary but the partition >2
if (np.asarray(probs).shape[0] > 2) & (number_class == 'binary'):
idx_mix = np.where(prot_s == 2)[0]
_temp = np.random.choice([0, 1], size=(len(idx_mix),),
p=[1. / 2, 1. / 2])
i = 0
for el in idx_mix:
prot_s[el] = _temp[i]
i += 1
# Assign the attribute as a feature of the nodes directly in the graph
dict_s = {i: prot_s[i] for i in range(0, len(prot_s))}
nx.set_node_attributes(g, dict_s, 's')
return g, dict_s
def shuffle_part(prot_s, prop_shuffle=0.1):
"""
Randomly shuffle some of the protected attributes
:param prot_s: the vector to shuffle
:param prop_shuffle: the proportion of label to shuffle
:return: the shuffled vector
"""
prop_shuffle = prop_shuffle
ix = np.random.choice([True, False], size=prot_s.size, replace=True,
p=[prop_shuffle, 1 - prop_shuffle])
prot_s_shuffle = prot_s[ix]
np.random.shuffle(prot_s_shuffle)
prot_s[ix] = prot_s_shuffle
return prot_s
| [
"numpy.sum",
"networkx.set_node_attributes",
"networkx.is_connected",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"networkx.stochastic_block_model",
"numpy.where",
"numpy.random.choice",
"numpy.random.shuffle"
] | [((2475, 2514), 'networkx.stochastic_block_model', 'nx.stochastic_block_model', (['sizes', 'probs'], {}), '(sizes, probs)\n', (2500, 2514), True, 'import networkx as nx\n'), ((2573, 2591), 'networkx.is_connected', 'nx.is_connected', (['g'], {}), '(g)\n', (2588, 2591), True, 'import networkx as nx\n'), ((2749, 2762), 'numpy.sum', 'np.sum', (['sizes'], {}), '(sizes)\n', (2755, 2762), True, 'import numpy as np\n'), ((2776, 2787), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2784, 2787), True, 'import numpy as np\n'), ((2831, 2841), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (2838, 2841), True, 'import numpy as np\n'), ((3869, 3907), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['g', 'dict_s', '"""s"""'], {}), "(g, dict_s, 's')\n", (3891, 3907), True, 'import networkx as nx\n'), ((4221, 4325), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'size': 'prot_s.size', 'replace': '(True)', 'p': '[prop_shuffle, 1 - prop_shuffle]'}), '([True, False], size=prot_s.size, replace=True, p=[\n prop_shuffle, 1 - prop_shuffle])\n', (4237, 4325), True, 'import numpy as np\n'), ((4385, 4418), 'numpy.random.shuffle', 'np.random.shuffle', (['prot_s_shuffle'], {}), '(prot_s_shuffle)\n', (4402, 4418), True, 'import numpy as np\n'), ((2632, 2671), 'networkx.stochastic_block_model', 'nx.stochastic_block_model', (['sizes', 'probs'], {}), '(sizes, probs)\n', (2657, 2671), True, 'import networkx as nx\n'), ((2695, 2713), 'networkx.is_connected', 'nx.is_connected', (['g'], {}), '(g)\n', (2710, 2713), True, 'import networkx as nx\n'), ((2796, 2813), 'numpy.asarray', 'np.asarray', (['probs'], {}), '(probs)\n', (2806, 2813), True, 'import numpy as np\n'), ((2927, 2962), 'numpy.random.choice', 'np.random.choice', (['k', 'n'], {'p': '(p * 1 / k)'}), '(k, n, p=p * 1 / k)\n', (2943, 2962), True, 'import numpy as np\n'), ((3023, 3058), 'numpy.random.choice', 'np.random.choice', (['(2)', 'n'], {'p': '(p * 1 / 2)'}), '(2, n, p=p * 1 / 2)\n', (3039, 3058), True, 'import numpy as np\n'), ((3484, 3505), 'numpy.where', 'np.where', (['(prot_s == 2)'], {}), '(prot_s == 2)\n', (3492, 3505), True, 'import numpy as np\n'), ((3400, 3417), 'numpy.asarray', 'np.asarray', (['probs'], {}), '(probs)\n', (3410, 3417), True, 'import numpy as np\n')] |
import numpy as np
from abc import ABC, abstractmethod
from sklearn import metrics
from . import functional as pwF
class AbstractEvaluatorResults(ABC):
"""
Objects of derives classes encapsulate results of an evaluation metric.
"""
@abstractmethod
def is_better_than(self, other_results_object):
"""
Compares these results with the results of another object.
:param other_results_object: Object of the same class.
"""
pass
@abstractmethod
def compare_to(self, other_results_object):
"""
Compares these results with the results of another object.
:param other_results_object: Object of the same class.
"""
pass
@abstractmethod
def __str__(self):
pass
def __repr__(self):
return self.__str__()
class GenericEvaluatorResults(AbstractEvaluatorResults):
"""
Generic evaluator results.
"""
def __init__(self, score, label='score', score_format='%f', is_max_better=True):
"""
:param score: Numeric value that represents the score.
:param label: String used in the str representation.
:param score_format: Format String used in the str representation.
:param is_max_better: Flag that signifies if larger means better.
"""
super(GenericEvaluatorResults, self).__init__()
self._score = score
self._label = label
self._score_format = score_format
self._is_max_better = is_max_better
@property
def score(self):
return self._score
@property
def is_max_better(self):
return self._is_max_better
def is_better_than(self, other_results_object):
if other_results_object is None:
return True
if self._is_max_better:
return self.compare_to(other_results_object) > 0
else:
return self.compare_to(other_results_object) < 0
def compare_to(self, other_results_object):
return self._score - other_results_object.score
def __str__(self):
return (self._label + ': ' + self._score_format) % self._score
class AbstractEvaluator(ABC):
"""
Objects of derived classes are used to evaluate a model on a dataset using a specific metric.
"""
def __init__(self):
self.reset()
@abstractmethod
def reset(self):
"""
(Re)initializes the object. Called at the beginning of the evaluation step.
"""
pass
@abstractmethod
def step(self, output, batch, last_activation=None):
"""
Gathers information needed for performance measurement about a single batch. Called after each batch in the
evaluation step.
:param output: Output of the model.
:param batch: Dict that contains all information needed for a single batch by the evaluator.
:param last_activation: The last activation of the model. Some losses work with logits and as such the last
activation might not be performed inside the model's forward method.
"""
pass
@abstractmethod
def calculate(self):
"""
Called after all batches have been processed. Calculates the metric.
:return: AbstractEvaluatorResults object.
"""
pass
def calculate_at_once(self, output, dataset, last_activation=None):
"""
Calculates the metric at once for the whole dataset.
:param output: Output of the model.
:param dataset: Dict that contains all information needed for a dataset by the evaluator.
:param last_activation: The last activation of the model. Some losses work with logits and as such the last
activation might not be performed inside the model's forward method.
:return: AbstractEvaluatorResults object.
"""
self.reset()
self.step(output, dataset, last_activation)
return self.calculate()
class GenericPointWiseLossEvaluator(AbstractEvaluator):
"""
Adapter that uses an object of a class derived from AbstractLossWrapper to calculate the loss during evaluation.
"""
def __init__(self, loss_wrapper, label='loss', score_format='%f', batch_target_key='target'):
"""
:param loss_wrapper: AbstractLossWrapper object that calculates the loss.
:param label: Str used as label during printing of the loss.
:param score_format: Format used for str representation of the loss.
:param batch_target_key: Key where the dict (batch) contains the target values.
"""
super(GenericPointWiseLossEvaluator, self).__init__()
self._loss_wrapper = loss_wrapper
self._label = label
self._score_format = score_format
self._batch_target_key = batch_target_key
self.reset()
def reset(self):
self._loss = 0
self._examples_nb = 0
def step(self, output, batch, last_activation=None):
current_loss = self._loss_wrapper.calculate_loss(output, batch, None, last_activation).item()
self._loss += current_loss * batch[self._batch_target_key].shape[0]
self._examples_nb += batch[self._batch_target_key].shape[0]
def calculate(self):
return GenericEvaluatorResults(
self._loss / self._examples_nb,
self._label, self._score_format,
is_max_better=False
)
class AccuracyEvaluator(AbstractEvaluator):
"""
Accuracy evaluator.
"""
def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target'):
"""
:param threshold: Threshold above which an example is considered positive.
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
"""
super(AccuracyEvaluator, self).__init__()
self._threshold = threshold
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
predictions = np.array(self._outputs) > self._threshold
targets = np.array(self._targets) > self._threshold
correct = (predictions == targets).sum()
return GenericEvaluatorResults(
100.0 * correct / predictions.size,
'acc',
'%5.2f%%',
is_max_better=True
)
class MultiClassAccuracyEvaluator(AbstractEvaluator):
"""
Multi-Class Accuracy evaluator.
"""
def __init__(self, model_output_key=None, batch_target_key='target'):
"""
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
"""
super(MultiClassAccuracyEvaluator, self).__init__()
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
predictions = np.array(self._outputs).argmax(axis=-1)
correct = (predictions == self._targets).sum()
return GenericEvaluatorResults(
100.0 * correct / predictions.shape[0],
'acc',
'%5.2f%%',
is_max_better=True
)
class AUROCEvaluator(AbstractEvaluator):
"""
AUROC evaluator.
"""
def __init__(self, model_output_key=None, batch_target_key='target', average='macro', target_threshold=0.5):
"""
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param average: Type ['macro' or 'micro'] of averaging performed on the results in case of multi-label task.
"""
super(AUROCEvaluator, self).__init__()
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._average = average
self._target_threshold = target_threshold
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
return GenericEvaluatorResults(metrics.roc_auc_score(
y_score=np.array(self._outputs, dtype='float32'),
y_true=np.array(self._targets) > self._target_threshold,
average=self._average
), 'auroc', '%5.4f', is_max_better=True)
class PrecisionEvaluator(AbstractEvaluator):
"""
Precision evaluator.
"""
def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target', average='binary'):
"""
:param threshold: Threshold above which an example is considered positive.
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param average: Type ['binary', 'macro' or 'micro'] of averaging performed on the results.
"""
super(PrecisionEvaluator, self).__init__()
self._threshold = threshold
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._average = average
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
return GenericEvaluatorResults(metrics.precision_score(
y_pred=np.array(self._outputs) > self._threshold,
y_true=np.array(self._targets) > self._threshold,
average=self._average
), self._average + '-precision', '%5.4f', is_max_better=True)
class MultiClassPrecisionEvaluator(AbstractEvaluator):
"""
Multi-Class Precision evaluator.
"""
def __init__(self, model_output_key=None, batch_target_key='target', average='macro'):
"""
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param average: Type ['macro' or 'micro'] of averaging performed on the results.
"""
super(MultiClassPrecisionEvaluator, self).__init__()
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._average = average
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
return GenericEvaluatorResults(metrics.precision_score(
y_pred=np.array(self._outputs).argmax(axis=-1),
y_true=np.array(self._targets),
average=self._average
), self._average + '-precision', '%5.4f', is_max_better=True)
class RecallEvaluator(AbstractEvaluator):
"""
Recall evaluator.
"""
def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target', average='binary'):
"""
:param threshold: Threshold above which an example is considered positive.
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param average: Type ['binary', 'macro' or 'micro'] of averaging performed on the results.
"""
super(RecallEvaluator, self).__init__()
self._threshold = threshold
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._average = average
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
return GenericEvaluatorResults(metrics.recall_score(
y_pred=np.array(self._outputs) > self._threshold,
y_true=np.array(self._targets) > self._threshold,
average=self._average
), self._average + '-recall', '%5.4f', is_max_better=True)
class MultiClassRecallEvaluator(AbstractEvaluator):
"""
Multi-Class Recall evaluator.
"""
def __init__(self, model_output_key=None, batch_target_key='target', average='macro'):
"""
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param average: Type ['macro' or 'micro'] of averaging performed on the results.
"""
super(MultiClassRecallEvaluator, self).__init__()
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._average = average
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
return GenericEvaluatorResults(metrics.recall_score(
y_pred=np.array(self._outputs).argmax(axis=-1),
y_true=np.array(self._targets),
average=self._average
), self._average + '-recall', '%5.4f', is_max_better=True)
class F1Evaluator(AbstractEvaluator):
"""
F1 evaluator.
"""
def __init__(self, threshold=0.5, model_output_key=None, batch_target_key='target', average='binary'):
"""
:param threshold: Threshold above which an example is considered positive.
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param average: Type ['binary', 'macro' or 'micro'] of averaging performed on the results.
"""
super(F1Evaluator, self).__init__()
self._threshold = threshold
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._average = average
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
return GenericEvaluatorResults(metrics.f1_score(
y_pred=np.array(self._outputs) > self._threshold,
y_true=np.array(self._targets) > self._threshold,
average=self._average
), self._average + '-f1', '%5.4f', is_max_better=True)
class MultiClassF1Evaluator(AbstractEvaluator):
"""
Multi-Class F1 evaluator.
"""
def __init__(self, model_output_key=None, batch_target_key='target', average='macro'):
"""
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param average: Type ['macro' or 'micro'] of averaging performed on the results.
"""
super(MultiClassF1Evaluator, self).__init__()
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._average = average
self.reset()
def reset(self):
self._outputs = []
self._targets = []
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
if last_activation is not None:
output = last_activation(output)
self._outputs.extend(output.tolist())
self._targets.extend(batch[self._batch_target_key].tolist())
def calculate(self):
return GenericEvaluatorResults(metrics.f1_score(
y_pred=np.array(self._outputs).argmax(axis=-1),
y_true=np.array(self._targets),
average=self._average
), self._average + '-f1', '%5.4f', is_max_better=True)
class TokenLabelingEvaluatorWrapper(AbstractEvaluator):
"""
Adapter that wraps an evaluator. It is used in token labeling tasks in order to flat the output and
target while discarding invalid values due to padding.
"""
def __init__(self, evaluator, batch_input_sequence_length_idx, batch_input_key='input', model_output_key=None,
batch_target_key='target', end_padded=True):
"""
:param evaluator: The evaluator.
:param batch_input_sequence_length_idx: The index of the input list where the lengths of the sequences can be
found.
:param batch_input_key: Key of the Dicts returned by the Dataloader objects that corresponds to the input of the
model.
:param model_output_key: Key where the dict returned by the model contains the actual predictions. Leave None
if the model returns only the predictions.
:param batch_target_key: Key where the dict (batch) contains the target values.
:param end_padded: Whether the sequences are end-padded.
"""
self._evaluator = evaluator
super(TokenLabelingEvaluatorWrapper, self).__init__()
self._batch_input_sequence_length_idx = batch_input_sequence_length_idx
self._batch_input_key = batch_input_key
self._model_output_key = model_output_key
self._batch_target_key = batch_target_key
self._end_padded = end_padded
self.reset()
def reset(self):
self._evaluator.reset()
def step(self, output, batch, last_activation=None):
if self._model_output_key is not None:
output = output[self._model_output_key]
mask = pwF.create_mask_from_length(
batch[self._batch_input_key][self._batch_input_sequence_length_idx].to(output.device),
output.shape[1],
self._end_padded
).view(-1)
new_output = output.view(output.shape[0] * output.shape[1], -1).squeeze(-1)
batch_targets = batch[self._batch_target_key]
batch_targets = batch_targets.view(batch_targets.shape[0] * batch_targets.shape[1], -1).squeeze(-1)
new_output = new_output[mask]
batch_targets = batch_targets[mask]
new_batch = {k: batch[k] for k in batch if k != self._batch_target_key}
new_batch[self._batch_target_key] = batch_targets
self._evaluator.step(new_output, new_batch, last_activation)
def calculate(self):
return self._evaluator.calculate()
| [
"numpy.array"
] | [((6665, 6688), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (6673, 6688), True, 'import numpy as np\n'), ((6725, 6748), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (6733, 6748), True, 'import numpy as np\n'), ((8033, 8056), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (8041, 8056), True, 'import numpy as np\n'), ((9691, 9731), 'numpy.array', 'np.array', (['self._outputs'], {'dtype': '"""float32"""'}), "(self._outputs, dtype='float32')\n", (9699, 9731), True, 'import numpy as np\n'), ((12933, 12956), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (12941, 12956), True, 'import numpy as np\n'), ((16083, 16106), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (16091, 16106), True, 'import numpy as np\n'), ((19194, 19217), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (19202, 19217), True, 'import numpy as np\n'), ((9752, 9775), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (9760, 9775), True, 'import numpy as np\n'), ((11331, 11354), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (11339, 11354), True, 'import numpy as np\n'), ((11393, 11416), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (11401, 11416), True, 'import numpy as np\n'), ((14496, 14519), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (14504, 14519), True, 'import numpy as np\n'), ((14558, 14581), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (14566, 14581), True, 'import numpy as np\n'), ((17627, 17650), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (17635, 17650), True, 'import numpy as np\n'), ((17689, 17712), 'numpy.array', 'np.array', (['self._targets'], {}), '(self._targets)\n', (17697, 17712), True, 'import numpy as np\n'), ((12873, 12896), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (12881, 12896), True, 'import numpy as np\n'), ((16023, 16046), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (16031, 16046), True, 'import numpy as np\n'), ((19134, 19157), 'numpy.array', 'np.array', (['self._outputs'], {}), '(self._outputs)\n', (19142, 19157), True, 'import numpy as np\n')] |
import logging
import os
import numpy as np
import vficredit.equations as eq
class Economy(object):
WAGE_PARAMS = ('z','k','l','alpha')
INTEREST_PARAMS = ('z','k','l','alpha','delta')
AGRID_PARAMS = ('minA','nA','nAneg','maxA')
def __init__(self, name = None, **kwargs):
"""
initialize economy using default settings
"""
self.params = {}
for arg in kwargs:
self.params[arg]=kwargs[arg]
if name is not None:
self.alias = name
else:
self.alias = hash(str(self.params))
logging.info('Economy class initialized')
def __str__(self):
"""
This method summarizes all relevant information for the class as a printable string
"""
params = []
params.append(f"Economy:{self.alias}")
for key in self.params.keys():
params.append(f"{key}:{str(self.params[key])}")
return os.linesep.join(params)
def asset_grid(self,**kwargs):
""" This methods creates grid points for agent assets in the economy
"""
for arg in kwargs:
if arg in self.AGRID_PARAMS:
self.params[arg] = kwargs[arg]
agrid_params = {p: self.params[p]
for p in self.AGRID_PARAMS}
minA = agrid_params['minA']
maxA = agrid_params['maxA']
nAneg = agrid_params['nAneg']
nA = agrid_params['nA']
negA = np.linspace(minA, 0, nAneg)
posA = np.linspace(-negA[-2], maxA, nA-nAneg)
self.a = np.concatenate((negA,posA),axis=0)
logging.info('asset_grid initialized')
def wage(self,**kwargs):
"""calculates wage
"""
for arg in kwargs:
if arg in self.WAGE_PARAMS:
self.params[arg] = kwargs[arg]
mpl_params = {p: self.params[p]
for p in self.WAGE_PARAMS}
self.w = eq.MPL(**mpl_params)
def interest_rate(self,**kwargs):
"""calculates deposit interest rate
"""
for arg in kwargs:
if arg in self.INTEREST_PARAMS:
self.params[arg] = kwargs[arg]
mpk_params = {p: self.params[p]
for p in self.INTEREST_PARAMS if p not in 'delta'}
self.r = eq.MPK(**mpk_params) - self.params['delta']
def states(self):
pass
def VFI(self):
""" this method solves the savings (s), consumption(c) policy functions and value fsunction for all states
"""
pass | [
"vficredit.equations.MPK",
"logging.info",
"numpy.linspace",
"os.linesep.join",
"vficredit.equations.MPL",
"numpy.concatenate"
] | [((640, 681), 'logging.info', 'logging.info', (['"""Economy class initialized"""'], {}), "('Economy class initialized')\n", (652, 681), False, 'import logging\n'), ((1020, 1043), 'os.linesep.join', 'os.linesep.join', (['params'], {}), '(params)\n', (1035, 1043), False, 'import os\n'), ((1574, 1601), 'numpy.linspace', 'np.linspace', (['minA', '(0)', 'nAneg'], {}), '(minA, 0, nAneg)\n', (1585, 1601), True, 'import numpy as np\n'), ((1617, 1657), 'numpy.linspace', 'np.linspace', (['(-negA[-2])', 'maxA', '(nA - nAneg)'], {}), '(-negA[-2], maxA, nA - nAneg)\n', (1628, 1657), True, 'import numpy as np\n'), ((1689, 1725), 'numpy.concatenate', 'np.concatenate', (['(negA, posA)'], {'axis': '(0)'}), '((negA, posA), axis=0)\n', (1703, 1725), True, 'import numpy as np\n'), ((1732, 1770), 'logging.info', 'logging.info', (['"""asset_grid initialized"""'], {}), "('asset_grid initialized')\n", (1744, 1770), False, 'import logging\n'), ((2086, 2106), 'vficredit.equations.MPL', 'eq.MPL', ([], {}), '(**mpl_params)\n', (2092, 2106), True, 'import vficredit.equations as eq\n'), ((2476, 2496), 'vficredit.equations.MPK', 'eq.MPK', ([], {}), '(**mpk_params)\n', (2482, 2496), True, 'import vficredit.equations as eq\n')] |
"""Bla."""
import tempfile
import subprocess
import time
import os
import sys
import zipfile
from copy import deepcopy
import numpy as np
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature, FeatureLocation
PYTHON3 = (sys.version_info[0] == 3)
if PYTHON3:
from io import StringIO, BytesIO
StringBytesIO = BytesIO
else:
from StringIO import StringIO
BytesIO = StringIO
StringBytesIO = StringIO
def complement(dna_sequence):
"""Return the complement of the DNA sequence.
For instance ``complement("ATGCCG")`` returns ``"TACGGC"``.
Uses BioPython for speed.
"""
return str(Seq(dna_sequence).complement())
def reverse_complement(sequence):
"""Return the reverse-complement of the DNA sequence.
For instance ``complement("ATGCCG")`` returns ``"GCCGTA"``.
Uses BioPython for speed.
"""
return complement(sequence)[::-1]
def blast_sequences(sequences=None, fasta_file=None,
blast_db=None, subject=None, word_size=4,
perc_identity=80, num_alignments=1000, num_threads=3,
use_megablast=True, evalue=None, ungapped=True):
"""Return a Biopython BLAST record of the given sequence BLASTed
against the provided database.
Parameters
----------
sequences
Either an ATGC string or a list of ATGC strings or a dict {name: seq:}
subject
Either a path to a fasta (.fa) file or an ATGC string. Subject to blast
against.
word_size
Word size to use in the blast
perc_identity
Minimal percentage of identical nucleotides in a match for it to be kept
num_alignments
Number of alignments to keep
num_threads
Number of threads for the BLAST
use_megablast
Whether to use Megablast.
ungapped
No-gaps matches only ?
Examples
--------
>>> blast_record = blast_sequence("ATTGTGCGTGTGTGCGT", "blastdb/ecoli")
>>> for alignment in blast_record.alignments:
>>> for hit in alignment.hsps:
>>> print (hit.identities)
"""
if isinstance(sequences, str):
sequences = [sequences]
if isinstance(sequences, (list, tuple)):
sequences = {"seq_%d" % i: seq for i, seq in enumerate(sequences)}
xml_file, xml_name = tempfile.mkstemp(".xml")
fasta_file, fasta_name = tempfile.mkstemp(".fa")
with open(fasta_name, "w+") as f:
for (name, seq) in sequences.items():
f.write(">%s\n%s\n" % (name, seq))
remove_subject = True
close_subject = False
if subject is not None:
close_subject = True
if isinstance(subject, str):
if subject.endswith(".fa"):
remove_subject = False
else:
subject = [subject]
if isinstance(subject, (list, tuple)):
subject = {"subject_%d" % i: seq for i, seq in enumerate(subject)}
if isinstance(subject, dict):
subject_file, fasta_subject_name = tempfile.mkstemp(".fa")
with open(fasta_subject_name, "w+") as f:
for (name, seq) in subject.items():
f.write(">%s\n%s\n" % (name, seq))
subject = fasta_subject_name
else:
close_subject = False
p = subprocess.Popen([
"blastn", "-out", xml_name,
"-outfmt", "5",
"-num_alignments", str(num_alignments),
"-query", fasta_name] +
(["-db", blast_db] if blast_db is not None
else ['-subject', subject]) +
(["-ungapped"] if ungapped else []) +
(["-evalue", str(evalue)] if evalue else []) +
(["-task", "megablast"] if use_megablast else []) + [
"-word_size", str(word_size),
"-num_threads", str(num_threads),
"-perc_identity", str(perc_identity),
"-dust", "no"
], close_fds=True)
res, blast_err = p.communicate()
error = None
for i in range(3):
try:
with open(xml_name, "r") as f:
res = list(NCBIXML.parse(f))
except ValueError as err:
error = err
time.sleep(0.1)
else:
break
else:
raise ValueError("Problem reading the blast record: " + str(error))
for j in range(3):
try:
os.fdopen(xml_file, 'w').close()
os.fdopen(fasta_file, 'w').close()
os.remove(xml_name)
os.remove(fasta_name)
if close_subject:
open(subject, 'w').close()
if remove_subject:
os.remove(subject)
except IOError as err:
error = err
time.sleep(0.1)
else:
break
return res
# RETIRING THIS ONE ASAP:
def read_records_from_zip(zip_path):
"""Return SeqRecords from all FASTA/GENBANK files in the zip."""
with zipfile.ZipFile(zip_path, 'r') as archive:
extensions_types = {".ab1": "abi", ".abi": "abi", ".gb": "genbank",
".gbk": "genbank", ".fa": "fasta",
".fasta": "fasta"}
extract = {}
failed_files = []
for f in archive.filelist:
name, ext = os.path.splitext(f.filename)
try:
if ext in extensions_types:
content = StringBytesIO(archive.read(f.filename))
extract[f.filename] = SeqIO.read(content,
extensions_types[ext])
except:
failed_files.append(f.filename)
return extract, failed_files
def rotate_circular_record(record, n_bases):
"""Changes the starting point of a circular SeqRecord by n_bases bases."""
new_record = deepcopy(record)
new_record.seq = record.seq[n_bases:] + record.seq[:n_bases]
for f in new_record.features:
f.location += (-n_bases)
if max(f.location.start, f.location.end) <= 0:
f.location += len(record)
return new_record
def group_overlapping_segments(segments, min_distance=10):
if segments == []:
return []
returned_segments = [list(segments[0])]
for start, end in segments[1:]:
if start < returned_segments[-1][-1] + min_distance:
if end > returned_segments[-1][-1]:
returned_segments[-1][-1] = end
else:
returned_segments.append([start, end])
return [tuple(s) for s in returned_segments]
def get_segment_coordinates(center, segment_length, sequence_length):
"""Return max(0, c - s/2) - min(L, c + L/2).
Where c=center, s=segment_length, L=sequence_length.
"""
half = int(segment_length / 2)
start = max(0, min(center - half, sequence_length - segment_length))
end = start + segment_length
return start, end
def find_best_primer_locations(sequence, size_range=(15, 25),
tm_range=(55, 70)):
"""Quickly compute all overhangs in the sequence.
This function uses the heuristic {A, T}=2degC, {G, C}=4degC to compute
melting temperatures.
This function uses vectorial operations for speed. The results are also
cached.
"""
lmin, lmax = size_range
tmin, tmax = tm_range
table = np.zeros((lmax + 1 - lmin, len(sequence)))
cumsum = np.cumsum([4 if nuc in "GC" else 2 for nuc in sequence])
for i, oh_size in enumerate(range(lmin, lmax + 1)):
arr = cumsum[oh_size:] - cumsum[:-oh_size]
start = int(oh_size / 2)
end = start + len(arr)
table[i, start:end] = arr
table[i, :start] = table[i, start]
table[i, end:] = table[i, end-1]
scores = - (table - tmin) * (table - tmax)
best_sizes_indices = scores.argmax(axis=0)
best_sizes = lmin + best_sizes_indices
validities = np.choose(best_sizes_indices, scores) >= 0
osizes_and_validities = zip(best_sizes, validities)
return [
None if not valid
else get_segment_coordinates(i, ovh_size, len(sequence))
for i, (ovh_size, valid) in enumerate(osizes_and_validities)
]
def find_non_unique_segments(sequence, perc_identity=80):
blast_record = blast_sequences(sequence, subject=sequence,
perc_identity=perc_identity,
ungapped=False, word_size=4)[0]
segments_with_alignments = sorted(set([
(h.query_start, h.query_end)
for al in blast_record.alignments
for h in al.hsps
if (h.query_start, h.query_end) != (1, len(sequence))
]))
return group_overlapping_segments(segments_with_alignments)
def load_record(filename, linear=True, name='auto'):
if filename.lower().endswith(("gb", "gbk")):
record = SeqIO.read(filename, "genbank")
elif filename.lower().endswith(('fa', 'fasta')):
record = SeqIO.read(filename, "fasta")
else:
raise ValueError('Unknown format for file: %s' % filename)
record.linear = linear
if name == 'auto':
name = os.path.splitext(os.path.basename(filename))[0]
record.id = name
record.name = name.replace(" ", "_")[:20]
return record
def annotate_record(seqrecord, location="full", feature_type="misc_feature",
margin=0, **qualifiers):
"""Add a feature to a Biopython SeqRecord.
Parameters
----------
seqrecord
The biopython seqrecord to be annotated.
location
Either (start, end) or (start, end, strand). (strand defaults to +1)
feature_type
The type associated with the feature
margin
Number of extra bases added on each side of the given location.
qualifiers
Dictionnary that will be the Biopython feature's `qualifiers` attribute.
"""
if location == "full":
location = (margin, len(seqrecord) - margin)
strand = location[2] if len(location) == 3 else 1
seqrecord.features.append(
SeqFeature(
FeatureLocation(location[0], location[1], strand),
qualifiers=qualifiers,
type=feature_type
)
)
| [
"copy.deepcopy",
"os.remove",
"zipfile.ZipFile",
"Bio.SeqIO.read",
"tempfile.mkstemp",
"Bio.Seq.Seq",
"Bio.SeqFeature.FeatureLocation",
"os.path.basename",
"time.sleep",
"numpy.cumsum",
"Bio.Blast.NCBIXML.parse",
"os.path.splitext",
"os.fdopen",
"numpy.choose"
] | [((2348, 2372), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".xml"""'], {}), "('.xml')\n", (2364, 2372), False, 'import tempfile\n'), ((2402, 2425), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".fa"""'], {}), "('.fa')\n", (2418, 2425), False, 'import tempfile\n'), ((5777, 5793), 'copy.deepcopy', 'deepcopy', (['record'], {}), '(record)\n', (5785, 5793), False, 'from copy import deepcopy\n'), ((7335, 7393), 'numpy.cumsum', 'np.cumsum', (["[(4 if nuc in 'GC' else 2) for nuc in sequence]"], {}), "([(4 if nuc in 'GC' else 2) for nuc in sequence])\n", (7344, 7393), True, 'import numpy as np\n'), ((4901, 4931), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (4916, 4931), False, 'import zipfile\n'), ((7835, 7872), 'numpy.choose', 'np.choose', (['best_sizes_indices', 'scores'], {}), '(best_sizes_indices, scores)\n', (7844, 7872), True, 'import numpy as np\n'), ((8762, 8793), 'Bio.SeqIO.read', 'SeqIO.read', (['filename', '"""genbank"""'], {}), "(filename, 'genbank')\n", (8772, 8793), False, 'from Bio import SeqIO\n'), ((3048, 3071), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".fa"""'], {}), "('.fa')\n", (3064, 3071), False, 'import tempfile\n'), ((4427, 4446), 'os.remove', 'os.remove', (['xml_name'], {}), '(xml_name)\n', (4436, 4446), False, 'import os\n'), ((4459, 4480), 'os.remove', 'os.remove', (['fasta_name'], {}), '(fasta_name)\n', (4468, 4480), False, 'import os\n'), ((5236, 5264), 'os.path.splitext', 'os.path.splitext', (['f.filename'], {}), '(f.filename)\n', (5252, 5264), False, 'import os\n'), ((8864, 8893), 'Bio.SeqIO.read', 'SeqIO.read', (['filename', '"""fasta"""'], {}), "(filename, 'fasta')\n", (8874, 8893), False, 'from Bio import SeqIO\n'), ((9965, 10014), 'Bio.SeqFeature.FeatureLocation', 'FeatureLocation', (['location[0]', 'location[1]', 'strand'], {}), '(location[0], location[1], strand)\n', (9980, 10014), False, 'from Bio.SeqFeature import SeqFeature, FeatureLocation\n'), ((678, 695), 'Bio.Seq.Seq', 'Seq', (['dna_sequence'], {}), '(dna_sequence)\n', (681, 695), False, 'from Bio.Seq import Seq\n'), ((4153, 4168), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4163, 4168), False, 'import time\n'), ((4695, 4710), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4705, 4710), False, 'import time\n'), ((9053, 9079), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9069, 9079), False, 'import os\n'), ((4065, 4081), 'Bio.Blast.NCBIXML.parse', 'NCBIXML.parse', (['f'], {}), '(f)\n', (4078, 4081), False, 'from Bio.Blast import NCBIXML\n'), ((4335, 4359), 'os.fdopen', 'os.fdopen', (['xml_file', '"""w"""'], {}), "(xml_file, 'w')\n", (4344, 4359), False, 'import os\n'), ((4380, 4406), 'os.fdopen', 'os.fdopen', (['fasta_file', '"""w"""'], {}), "(fasta_file, 'w')\n", (4389, 4406), False, 'import os\n'), ((4609, 4627), 'os.remove', 'os.remove', (['subject'], {}), '(subject)\n', (4618, 4627), False, 'import os\n'), ((5438, 5480), 'Bio.SeqIO.read', 'SeqIO.read', (['content', 'extensions_types[ext]'], {}), '(content, extensions_types[ext])\n', (5448, 5480), False, 'from Bio import SeqIO\n')] |
# Copyright 2019 Saarland University, Spoken Language Systems LSV
# Author: <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS*, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import pickle
import os
import numpy as np
import matplotlib.pyplot as plt
class NoiseMatrix:
def __init__(self, name):
self.name = name
self.matrix = None
self.description = None
self.idx_to_label_name_map = None
def set_matrix(self, matrix):
self.matrix = matrix
def set_description(self, description):
self.description = description
def set_idx_to_label_name_map(self, idx_to_label_name_map):
"""
A map to convert a label index to a specific name
Used e.g. for the tick-labels of the plot
"""
self.idx_to_label_name_map = idx_to_label_name_map
@staticmethod
def load_from_file(name):
dir_path = "../noise_mats/"
with open(os.path.join(dir_path, "{}.pkl".format(name)), "rb") as input_file:
return pickle.load(input_file)
def store_to_file(self):
dir_path = "../noise_mats/"
with open(os.path.join(dir_path, "{}.pkl".format(self.name)), "wb") as output_file:
pickle.dump(self, output_file)
def visualize(self, title=None, xlabel="noisy label", ylabel="true label", save_filename=None):
if title is None:
title = "Noise Matrix {}".format(self.name)
NoiseMatrix.visualize_matrix(self.matrix, title, xlabel, ylabel, self.idx_to_label_name_map, save_filename)
@staticmethod
def visualize_matrix(matrix, title="", xlabel="noisy label", ylabel="true label", idx_to_label_name_map=None, save_filename=None,
vmin=0, vmax=1):
plt.matshow(matrix, vmin=vmin, vmax=vmax, interpolation="none", cmap=plt.cm.Blues)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.colorbar()
if not idx_to_label_name_map is None:
tick_marks = np.arange(len(idx_to_label_name_map))
label_names = [idx_to_label_name_map[idx] for idx in tick_marks]
plt.xticks(tick_marks, label_names, rotation=90)
plt.yticks(tick_marks, label_names)
plt.title(title,y=1.5)
else:
plt.title(title, y=1.2)
if save_filename != None:
plt.savefig(save_filename, bbox_inches="tight")
return plt.gcf()
@staticmethod
def compute_noise_matrix(instance_as, instance_bs, num_labels, label_name_to_label_idx_map = None, row_normalize=True):
"""
For two corresponding lists of clean and noisy instance objects that have a label attribute,
compute the noise or confusion matrix.
instance_as: rows in the noise matrix (often clean-data)
instance_bs: columns in the noise matrix (often noisy-data)
"""
assert len(instance_as) == len(instance_bs)
noise_matrix = np.zeros((num_labels, num_labels))
if label_name_to_label_idx_map is None:
label_name_to_label_idx_function = lambda l: l # identity function
else:
label_name_to_label_idx_function = lambda l: label_name_to_label_idx_map[l]
for instance_a, instance_b in zip(instance_as, instance_bs):
label_a = label_name_to_label_idx_function(instance_a.label)
label_b = label_name_to_label_idx_function(instance_b.label)
noise_matrix[label_a][label_b] += 1
if row_normalize:
for row in noise_matrix:
row_sum = np.sum(row)
if row_sum != 0:
row /= row_sum
return noise_matrix
| [
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.sum",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.matshow",
"numpy.zeros",
"matplotlib.pyplot.colorbar",
"pickle.load",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.savefig",
"matplotli... | [((2368, 2455), 'matplotlib.pyplot.matshow', 'plt.matshow', (['matrix'], {'vmin': 'vmin', 'vmax': 'vmax', 'interpolation': '"""none"""', 'cmap': 'plt.cm.Blues'}), "(matrix, vmin=vmin, vmax=vmax, interpolation='none', cmap=plt.cm\n .Blues)\n", (2379, 2455), True, 'import matplotlib.pyplot as plt\n'), ((2459, 2477), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (2469, 2477), True, 'import matplotlib.pyplot as plt\n'), ((2486, 2504), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (2496, 2504), True, 'import matplotlib.pyplot as plt\n'), ((2513, 2527), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2525, 2527), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3058), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3056, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3610, 3644), 'numpy.zeros', 'np.zeros', (['(num_labels, num_labels)'], {}), '((num_labels, num_labels))\n', (3618, 3644), True, 'import numpy as np\n'), ((1604, 1627), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (1615, 1627), False, 'import pickle\n'), ((1815, 1845), 'pickle.dump', 'pickle.dump', (['self', 'output_file'], {}), '(self, output_file)\n', (1826, 1845), False, 'import pickle\n'), ((2735, 2783), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'label_names'], {'rotation': '(90)'}), '(tick_marks, label_names, rotation=90)\n', (2745, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2831), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'label_names'], {}), '(tick_marks, label_names)\n', (2806, 2831), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2868), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'y': '(1.5)'}), '(title, y=1.5)\n', (2854, 2868), True, 'import matplotlib.pyplot as plt\n'), ((2894, 2917), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'y': '(1.2)'}), '(title, y=1.2)\n', (2903, 2917), True, 'import matplotlib.pyplot as plt\n'), ((2973, 3020), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_filename'], {'bbox_inches': '"""tight"""'}), "(save_filename, bbox_inches='tight')\n", (2984, 3020), True, 'import matplotlib.pyplot as plt\n'), ((4229, 4240), 'numpy.sum', 'np.sum', (['row'], {}), '(row)\n', (4235, 4240), True, 'import numpy as np\n')] |
# Peak espectral analysis using Welch's modified periodogram
# Author : <NAME>
# Digital Signal Processing laboratory : University of São Paulo (SEL/EESC/USP)
import pandas as pd
import scipy.signal as sg
import scipy.stats as ss
import matplotlib.pyplot as plt
import numpy as np
# Setup sine
N=1e5
A=10
f=60
Fs=5*f
t=np.arange(N) / Fs
y_sine=A*np.sin(2 * np.pi * f * t)
# Insert peaks in random places
num_peaks=[10,20,30]
prop=[10,15,20]
peaked_signal=[]
for i in range(len(num_peaks)):
aux=list(y_sine)
idx_aux=np.random.randint(0,N,num_peaks[i])
for j in range(num_peaks[i]): aux[idx_aux[j]]=aux[idx_aux[j]]+y_sine[idx_aux[j]]*prop[i]
peaked_signal.append(aux)
# Power espctrum through Welch's method
f=[]
P=[]
for i in range(len(num_peaks)):
f_aux, P_aux = sg.welch(peaked_signal[i],Fs,'flattop', 1024, scaling='spectrum')
f.append(f_aux)
P.append(P_aux)
# Plots
fig,ax=plt.subplots(len(num_peaks),2)
for i in range(len(num_peaks)):
ax[i,0].plot(t,peaked_signal[i])
ax[i,0].set_xlabel('t [s]')
ax[i,0].set_ylabel('Amp [V]')
ax[i,0].set_title(str(num_peaks[i])+' picos com prop='+str(prop[i]))
ax[i,1].plot(f[i],P[i])
if i==0:
ax[i,1].set_title('Respectivos espectros')
ax[i,1].set_xlabel('f [Hz]')
ax[i,1].set_ylabel('P [V²/Ω*Hz]')
ax[i,1].set_xlim(0,100)
fig.tight_layout()
plt.show()
| [
"matplotlib.pyplot.show",
"scipy.signal.welch",
"numpy.random.randint",
"numpy.sin",
"numpy.arange"
] | [((1360, 1370), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1368, 1370), True, 'import matplotlib.pyplot as plt\n'), ((321, 333), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (330, 333), True, 'import numpy as np\n'), ((348, 373), 'numpy.sin', 'np.sin', (['(2 * np.pi * f * t)'], {}), '(2 * np.pi * f * t)\n', (354, 373), True, 'import numpy as np\n'), ((526, 563), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N', 'num_peaks[i]'], {}), '(0, N, num_peaks[i])\n', (543, 563), True, 'import numpy as np\n'), ((787, 854), 'scipy.signal.welch', 'sg.welch', (['peaked_signal[i]', 'Fs', '"""flattop"""', '(1024)'], {'scaling': '"""spectrum"""'}), "(peaked_signal[i], Fs, 'flattop', 1024, scaling='spectrum')\n", (795, 854), True, 'import scipy.signal as sg\n')] |
#!/usr/bin/env python3
import collections
import copy
import glob
import os
import re
import cv2
import numpy as np
import pandas as pd
import scipy.interpolate
import torch
import torch.nn.functional as F
import yaml
from PIL import Image
from easydict import EasyDict as edict
from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch
from inference.datasets import expand_latents, get_shape
from inference.fine_tune_pipeline import optimize_latents, fine_tune_generator
from inference.inference_utils import get_noise_for_infer, sum_dicts
from inference.metrics import LPIPSLossWrapper, SSIM
from inference.perspective import get_horizon_line_coords, make_manual_homography_kornia, warp_homography_kornia, \
RandomHomography
from inference.segmentation import SegmentationModule
from inference.train_encoder import DecoderFromLatents
from utils import get_mean_style
import constants
def noise_cycle_shift(latents, part, projective_transforms,
shift_names=None, shift_channels=None, rescale_after_shift=False,
shift_in_hr=False, horizon_line=None):
latents = copy.deepcopy(latents)
if shift_names is None:
shift_names = list(latents.keys())
if shift_channels is None:
shift_channels = (-1,)
for name in shift_names:
if not name.startswith('noise'):
continue
if shift_in_hr:
orig_size_lr = latents[name].shape[-2:]
latents[name] = F.interpolate(latents[name].squeeze(1), size=(shift_in_hr, shift_in_hr),
mode='bicubic', align_corners=False).unsqueeze(1)
before = latents[name][:, 0, shift_channels]
before_mean = before.mean()
before_std = before.std()
after = warp_homography_kornia(before, projective_transforms,
n_iter=part, horizon_line=horizon_line).unsqueeze(1)
if rescale_after_shift:
after = (after - after.mean()) / after.std() * before_std + before_mean
latents[name][:, 0, shift_channels] = after
if shift_in_hr:
latents[name] = F.interpolate(latents[name].squeeze(1), size=orig_size_lr,
mode='bicubic', align_corners=False).unsqueeze(1)
return latents
def rescale_img_tensor(tensor, out_size):
return F.interpolate(tensor.unsqueeze(0), size=out_size, mode='bilinear', align_corners=False)[0]
def gen_images_cycle_shift(latents, decoder, steps=10, shift_names=None, shift_channels=None, rescale_after_shift=False,
min_shift=0, max_shift=2, animate_w_names=(), target_z_func=None, projective_transforms=None,
shift_in_hr=False, horizon_line=None):
images = []
all_latents = []
latents_for_shift = copy.deepcopy(latents)
if target_z_func is not None:
all_times = np.linspace(0, 1, steps)
z_interpolations = {name: target_z_func(latents_for_shift[name], all_times) for name in animate_w_names}
for step_i, shift in enumerate(np.linspace(min_shift, max_shift, steps)):
new_latents = copy.deepcopy(latents_for_shift)
new_latents = noise_cycle_shift(new_latents, shift, projective_transforms=projective_transforms,
shift_names=shift_names, shift_channels=shift_channels,
rescale_after_shift=rescale_after_shift, shift_in_hr=shift_in_hr,
horizon_line=horizon_line)
if target_z_func is not None:
for key in animate_w_names:
new_latents[key] = z_interpolations[key][step_i]
all_latents.append(new_latents)
new_img = batch2array(decoder(new_latents))[0]
images.append(new_img)
return images
ZTimeStep = collections.namedtuple('ZTimeStep', 'time z'.split(' '))
class SplineStyleAnimation:
def __init__(self, mlp_approximator, *steps, **spline_kwargs):
self.mlp_approximator = mlp_approximator
self.steps = steps
self.spline_kwargs = spline_kwargs
def __call__(self, styles, new_times):
with torch.no_grad():
intermediate_points = []
for step in self.steps:
cur_data = torch.cat((styles,
torch.tensor(step.z).to(styles)[None, None, ...]),
dim=-1)
intermediate_points.append(self.mlp_approximator(cur_data))
intermediate_points_flat = torch.stack(intermediate_points).view(-1, styles.shape[-1]).detach().cpu().numpy()
times = [step.time for step in self.steps]
new_styles_flat = scipy.interpolate.make_interp_spline(times, intermediate_points_flat, **self.spline_kwargs)(new_times)
new_styles = torch.from_numpy(new_styles_flat).to(styles.device).view(len(new_times), *styles.shape).float()
return new_styles
def write_video(out_path, frames, fps=24, write_frames=False):
channels, height, width = frames[0].shape
if write_frames:
frames_dirname = out_path + '_frames'
os.makedirs(frames_dirname, exist_ok=True)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
try:
for i, frame in enumerate(frames):
frame = np.array(frame)
frame /= 2
frame += 0.5
frame *= 255
frame = np.clip(frame, 0, 255)
frame = frame[[2, 1, 0]]
frame = np.transpose(frame, (1, 2, 0)).astype('uint8')
writer.write(frame)
if write_frames:
cv2.imwrite(os.path.join(frames_dirname, f'{i:05d}.jpg'), frame)
finally:
writer.release()
def calc_segmentation_posterior_error(segm_model, target_segm, animated_frames,
still_segm_mask, first_frame, lpips_model, ssim_model, **predict_kwargs):
with torch.no_grad():
result = collections.defaultdict(float)
discrete_target = target_segm.argmax(dim=1)
first_frame_still = first_frame * still_segm_mask
for frame_i, cur_frame in enumerate(animated_frames):
cur_frame = torch.from_numpy(cur_frame).cuda().unsqueeze(0)
cur_segm = segm_model.predict(cur_frame, **predict_kwargs)
cur_segm_discrete = cur_segm.argmax(dim=1)
result[f'acc_{frame_i}'] = float((discrete_target == cur_segm_discrete).float().mean())
cur_frame_still = cur_frame * still_segm_mask
result[f'lpips_{frame_i}'] = float(lpips_model(cur_frame_still, first_frame_still).mean())
result[f'ssim_{frame_i}'] = float(ssim_model(cur_frame_still, first_frame_still).mean())
return result
def main(args):
with open(args.config) as f:
config = edict(yaml.load(f, Loader=yaml.SafeLoader))
os.makedirs(args.outdir, exist_ok=True)
if config.encoder_checkpoint is None or config.encoder_checkpoint.lower() == 'none':
encoder = None
else:
encoder = torch.load(os.path.join(constants.RESULT_DIR, config.encoder_checkpoint)).cuda()
decoder = DecoderFromLatents(**config.decoder_kwargs)
target_size = decoder.infer_model['resolution']
if config.segmentation:
config.segmentation.module_kwargs['models_dirname'] = os.path.join(
constants.RESULT_DIR, config.segmentation.module_kwargs['models_dirname'])
segmentation_network = SegmentationModule(**config.segmentation.module_kwargs).cuda()
segmentation_network.eval()
else:
segmentation_network = None
if 'target_z_func' in config.shift_kwargs:
mlp_approx_model = torch.load(os.path.join(
constants.RESULT_DIR, config.shift_kwargs.target_z_func.mlp_approx_model)).cuda()
target_z_func_kwargs = config.shift_kwargs.target_z_func.kwargs
steps = config.shift_kwargs.target_z_func.steps
config.shift_kwargs.target_z_func = SplineStyleAnimation(mlp_approx_model,
*steps,
**target_z_func_kwargs)
homography_kwargs = args.homography_dir
if 'num_real_homs_per_image' in config.shift_kwargs:
num_real_homs_per_image = config.shift_kwargs.pop('num_real_homs_per_image')
random_hom = RandomHomography(homography_kwargs)
else:
num_real_homs_per_image = 0
full_output = config.get('full_output', True)
save_frames_as_jpg = full_output or config.get('save_frames_as_jpg', True)
calc_metrics = config.get('calc_metrics', False)
infer_using_mask = config.get('infer_using_mask', False)
fine_tune_generator_using_mask = config.get('fine_tune_generator_using_mask', False)
if calc_metrics:
lpips_criterion = LPIPSLossWrapper(model_path=os.path.join(
constants.RESULT_DIR, config.get('lpips_model_path', None))).cuda()
ssim_criterion = SSIM().cuda()
sum_metrics = []
sum_metrics_idx = []
for src_path in sorted(glob.glob(args.inglob)):
print()
print('Animating', src_path)
fname = os.path.splitext(os.path.basename(src_path))[0]
src_image = Image.open(src_path).convert('RGB')
src_image = src_image.resize(get_scale_size(config.max_in_resolution, src_image.size))
img_batch_orig = image2batch(src_image).cuda()
scaled_size = get_scale_size(target_size, img_batch_orig.shape[2:])
img_batch_scaled = F.interpolate(img_batch_orig, size=scaled_size, mode='bilinear', align_corners=False)
crop_y1, crop_y2, crop_x1, crop_x2 = choose_center_full_size_crop_params(*img_batch_scaled.shape[2:])
img_batch_cropped = img_batch_scaled[:, :, crop_y1:crop_y2, crop_x1:crop_x2]
img_batch_cropped01 = img_batch_cropped / 2 + 0.5
config.shift_kwargs['horizon_line'] = None
with torch.no_grad():
shift_mask = None
if segmentation_network is not None:
img_batch_for_segm = img_batch_orig / 2 + 0.5
cls_scores = segmentation_network.predict(img_batch_for_segm, **config.segmentation.predict_kwargs)
cls_scores = F.interpolate(cls_scores, size=scaled_size, mode='bilinear', align_corners=False)
cls_scores = cls_scores[:, :, crop_y1:crop_y2, crop_x1:crop_x2]
cls_proba = F.softmax(cls_scores, dim=1)
config.shift_kwargs['horizon_line'] = get_horizon_line_coords(cls_scores)[0] # if infer_using_mask else 1
movable_scores = cls_scores[:, config.segmentation.movable_classes].max(1, keepdim=True)[0]
cls_scores[:, config.segmentation.movable_classes] = 0
immovable_scores = cls_scores.max(1, keepdim=True)[0]
shift_mask = (movable_scores > immovable_scores).float()
shift_mask_np = shift_mask.detach().cpu().numpy()[0, 0]
if config.segmentation.erode > 0:
shift_mask_np = cv2.erode(shift_mask_np, dilation_kernel)
shift_mask = torch.from_numpy(shift_mask_np).to(shift_mask)[None, None, ...]
else:
config.shift_kwargs['horizon_line'] = 1
if homography_kwargs is not None:
if num_real_homs_per_image == 0:
homography_kwargs = copy.deepcopy(homography_kwargs)
homography_kwargs['horizon_line'] = config.shift_kwargs['horizon_line']
config.shift_kwargs['projective_transforms'] = make_manual_homography_kornia(**homography_kwargs)
else:
hom_id, hom = random_hom(config.shift_kwargs['horizon_line'])
config.shift_kwargs['projective_transforms'] = hom
if encoder is None:
mean_style = get_mean_style(decoder.infer_model['g_running'], 'cuda', 512)
latents = {f'latent_wprime:{level_i}:{j}': mean_style.clone().detach().unsqueeze(0)
for level_i in range(decoder.infer_model['step'] + 1)
for j in range(2)}
else:
latents = encoder(img_batch_cropped)
if config.get('take_only_latents', None):
latents = {name: var for name, var in latents.items()
if re.search(config['take_only_latents'], name)}
for name in list(latents):
latents[name] = latents[name].unsqueeze(1)
noise = get_noise_for_infer(decoder.infer_model.g_running, batch_size=1, step=decoder.infer_model.step,
scale=config.get('init_noise_scale', 1))
noise = expand_latents(noise, name_prefix='noise')
for name, var in noise.items():
if name not in latents:
latents[name] = var
latents_for_encoder_vis = copy.deepcopy(latents)
latents_for_encoder_vis.update(
expand_latents(get_noise_for_infer(decoder.infer_model.g_running, batch_size=1,
step=decoder.infer_model.step),
name_prefix='noise')
)
encoder_image_tensor = decoder(latents_for_encoder_vis)
encoder_image = batch2array(encoder_image_tensor)[0]
encoder_image_tensor01 = encoder_image_tensor / 2 + 0.5
if full_output or calc_metrics:
encoder_frames = [encoder_image]
encoder_frames.extend(gen_images_cycle_shift(latents_for_encoder_vis, decoder,
**config.shift_kwargs))
if calc_metrics:
cur_metrics = collections.defaultdict(float)
cur_metrics.update(dict(lpips_1_enc=float(lpips_criterion(encoder_image_tensor01.squeeze(1),
img_batch_cropped01).mean()),
ssim_1_enc=float(ssim_criterion(encoder_image_tensor01.squeeze(1),
img_batch_cropped01).mean())))
if segmentation_network is not None:
sum_dicts(cur_metrics,
calc_segmentation_posterior_error(segmentation_network,
cls_proba,
[fr / 2 + 0.5 for fr in encoder_frames],
still_segm_mask=1 - shift_mask,
first_frame=img_batch_cropped01,
lpips_model=lpips_criterion,
ssim_model=ssim_criterion),
prefix='segm_1_enc')
latents = optimize_latents(img_batch_cropped, latents, decoder,
still_segm_mask=(1 - shift_mask) if infer_using_mask else None,
**config.fine_tune_kwargs)
real_image_cropped = batch2array(img_batch_cropped)
tuned_image_tensor = decoder(latents)
tuned_image_tensor01 = tuned_image_tensor / 2 + 0.5
tuned_image = batch2array(tuned_image_tensor)[0]
if full_output or calc_metrics:
frames = [tuned_image]
frames.extend(gen_images_cycle_shift(latents, decoder,
**config.shift_kwargs))
if calc_metrics:
cur_metrics.update(dict(lpips_2_opt=float(lpips_criterion(tuned_image_tensor01.squeeze(1),
img_batch_cropped01).mean()),
ssim_2_opt=float(ssim_criterion(tuned_image_tensor01.squeeze(1),
img_batch_cropped01).mean())))
if segmentation_network is not None:
sum_dicts(cur_metrics,
calc_segmentation_posterior_error(segmentation_network,
cls_proba,
[fr / 2 + 0.5 for fr in frames],
still_segm_mask=1 - shift_mask,
first_frame=img_batch_cropped01,
lpips_model=lpips_criterion,
ssim_model=ssim_criterion),
prefix='segm_2_opt')
tuned_decoder = fine_tune_generator(latents, img_batch_cropped, decoder,
still_segm_mask=(1 - shift_mask) if fine_tune_generator_using_mask else None,
**config.generator_fine_tune_kwargs)[0]
tuned2_image_tensor = tuned_decoder(latents)
tuned2_image_tensor01 = tuned2_image_tensor / 2 + 0.5
tuned2_image = batch2array(tuned2_image_tensor)[0]
if calc_metrics:
cur_metrics.update(dict(lpips_3_ft=float(lpips_criterion(tuned2_image_tensor01.squeeze(1),
img_batch_cropped01).mean()),
ssim_3_ft=float(ssim_criterion(tuned2_image_tensor01.squeeze(1),
img_batch_cropped01).mean())))
if num_real_homs_per_image > 0 and homography_kwargs is not None:
used_homs = set()
actual_homs_n = 0
for _ in range(num_real_homs_per_image):
found_new_hom = False
for _ in range(1000):
hom_id, hom = random_hom(config.shift_kwargs['horizon_line'])
if hom_id not in used_homs:
used_homs.add(hom_id)
found_new_hom = True
break
if not found_new_hom:
break
actual_homs_n += 1
config.shift_kwargs['projective_transforms'] = hom
tuned_frames = [tuned2_image.copy()]
tuned_frames.extend(gen_images_cycle_shift(latents, tuned_decoder,
**config.shift_kwargs))
if calc_metrics and segmentation_network is not None:
sum_dicts(cur_metrics,
calc_segmentation_posterior_error(segmentation_network,
cls_proba,
[fr / 2 + 0.5 for fr in tuned_frames],
still_segm_mask=1 - shift_mask,
first_frame=img_batch_cropped01,
lpips_model=lpips_criterion,
ssim_model=ssim_criterion),
prefix='segm_3_ft')
if full_output:
frames = [np.concatenate((np.concatenate((real_image_cropped, encoder_image, enc_frame), axis=2),
np.concatenate((real_image_cropped, tuned_image, frame), axis=2),
np.concatenate((real_image_cropped, tuned2_image, frame2), axis=2)),
axis=1)
for enc_frame, frame, frame2 in zip(encoder_frames, frames, tuned_frames)]
else:
frames = tuned_frames
write_video(os.path.join(args.outdir, f'{fname}_hom{hom_id}.avi'), frames,
write_frames=save_frames_as_jpg, **config.video_kwargs)
if calc_metrics and segmentation_network is not None:
for k in list(cur_metrics):
if k.startswith('segm_3_ft'):
cur_metrics[k] /= actual_homs_n
else:
tuned_frames = [tuned2_image]
tuned_frames.extend(gen_images_cycle_shift(latents, tuned_decoder,
**config.shift_kwargs))
if full_output:
frames = [np.concatenate((np.concatenate((real_image_cropped, encoder_image, enc_frame), axis=2),
np.concatenate((real_image_cropped, tuned_image, frame), axis=2),
np.concatenate((real_image_cropped, tuned2_image, frame2), axis=2)),
axis=1)
for enc_frame, frame, frame2 in zip(encoder_frames, frames, tuned_frames)]
else:
frames = tuned_frames
write_video(os.path.join(args.outdir, fname + '.avi'), frames, write_frames=save_frames_as_jpg,
**config.video_kwargs)
if calc_metrics:
sum_metrics.append(cur_metrics)
sum_metrics_idx.append(fname)
if segmentation_network is not None:
del shift_mask
del cls_scores
del movable_scores
del immovable_scores
del latents
torch.cuda.empty_cache()
if calc_metrics:
sum_metrics = pd.DataFrame(sum_metrics, index=sum_metrics_idx)
sum_metrics.to_csv(os.path.join(args.outdir, f'metrics{args.suffix}.tsv'), sep='\t')
if __name__ == '__main__':
import argparse
aparser = argparse.ArgumentParser()
aparser.add_argument('config')
aparser.add_argument('inglob')
aparser.add_argument('outdir')
aparser.add_argument('homography_dir')
aparser.add_argument('--suffix', type=str, default='', help='Suffix to metrics filename')
args = aparser.parse_args()
main(args)
| [
"inference.base_image_utils.image2batch",
"yaml.load",
"cv2.VideoWriter_fourcc",
"argparse.ArgumentParser",
"numpy.clip",
"collections.defaultdict",
"inference.perspective.make_manual_homography_kornia",
"cv2.VideoWriter",
"glob.glob",
"cv2.erode",
"torch.no_grad",
"os.path.join",
"pandas.Da... | [((1178, 1200), 'copy.deepcopy', 'copy.deepcopy', (['latents'], {}), '(latents)\n', (1191, 1200), False, 'import copy\n'), ((2892, 2914), 'copy.deepcopy', 'copy.deepcopy', (['latents'], {}), '(latents)\n', (2905, 2914), False, 'import copy\n'), ((5301, 5332), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (5323, 5332), False, 'import cv2\n'), ((5346, 5401), 'cv2.VideoWriter', 'cv2.VideoWriter', (['out_path', 'fourcc', 'fps', '(width, height)'], {}), '(out_path, fourcc, fps, (width, height))\n', (5361, 5401), False, 'import cv2\n'), ((7034, 7073), 'os.makedirs', 'os.makedirs', (['args.outdir'], {'exist_ok': '(True)'}), '(args.outdir, exist_ok=True)\n', (7045, 7073), False, 'import os\n'), ((7311, 7354), 'inference.train_encoder.DecoderFromLatents', 'DecoderFromLatents', ([], {}), '(**config.decoder_kwargs)\n', (7329, 7354), False, 'from inference.train_encoder import DecoderFromLatents\n'), ((22083, 22108), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (22106, 22108), False, 'import argparse\n'), ((2970, 2994), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'steps'], {}), '(0, 1, steps)\n', (2981, 2994), True, 'import numpy as np\n'), ((3144, 3184), 'numpy.linspace', 'np.linspace', (['min_shift', 'max_shift', 'steps'], {}), '(min_shift, max_shift, steps)\n', (3155, 3184), True, 'import numpy as np\n'), ((3209, 3241), 'copy.deepcopy', 'copy.deepcopy', (['latents_for_shift'], {}), '(latents_for_shift)\n', (3222, 3241), False, 'import copy\n'), ((5244, 5286), 'os.makedirs', 'os.makedirs', (['frames_dirname'], {'exist_ok': '(True)'}), '(frames_dirname, exist_ok=True)\n', (5255, 5286), False, 'import os\n'), ((6093, 6108), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6106, 6108), False, 'import torch\n'), ((6127, 6157), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (6150, 6157), False, 'import collections\n'), ((7498, 7590), 'os.path.join', 'os.path.join', (['constants.RESULT_DIR', "config.segmentation.module_kwargs['models_dirname']"], {}), "(constants.RESULT_DIR, config.segmentation.module_kwargs[\n 'models_dirname'])\n", (7510, 7590), False, 'import os\n'), ((8551, 8586), 'inference.perspective.RandomHomography', 'RandomHomography', (['homography_kwargs'], {}), '(homography_kwargs)\n', (8567, 8586), False, 'from inference.perspective import get_horizon_line_coords, make_manual_homography_kornia, warp_homography_kornia, RandomHomography\n'), ((9257, 9279), 'glob.glob', 'glob.glob', (['args.inglob'], {}), '(args.inglob)\n', (9266, 9279), False, 'import glob\n'), ((9629, 9682), 'inference.base_image_utils.get_scale_size', 'get_scale_size', (['target_size', 'img_batch_orig.shape[2:]'], {}), '(target_size, img_batch_orig.shape[2:])\n', (9643, 9682), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((9710, 9799), 'torch.nn.functional.interpolate', 'F.interpolate', (['img_batch_orig'], {'size': 'scaled_size', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(img_batch_orig, size=scaled_size, mode='bilinear',\n align_corners=False)\n", (9723, 9799), True, 'import torch.nn.functional as F\n'), ((9842, 9906), 'inference.base_image_utils.choose_center_full_size_crop_params', 'choose_center_full_size_crop_params', (['*img_batch_scaled.shape[2:]'], {}), '(*img_batch_scaled.shape[2:])\n', (9877, 9906), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((12957, 12999), 'inference.datasets.expand_latents', 'expand_latents', (['noise'], {'name_prefix': '"""noise"""'}), "(noise, name_prefix='noise')\n", (12971, 12999), False, 'from inference.datasets import expand_latents, get_shape\n'), ((13147, 13169), 'copy.deepcopy', 'copy.deepcopy', (['latents'], {}), '(latents)\n', (13160, 13169), False, 'import copy\n'), ((15147, 15293), 'inference.fine_tune_pipeline.optimize_latents', 'optimize_latents', (['img_batch_cropped', 'latents', 'decoder'], {'still_segm_mask': '(1 - shift_mask if infer_using_mask else None)'}), '(img_batch_cropped, latents, decoder, still_segm_mask=1 -\n shift_mask if infer_using_mask else None, **config.fine_tune_kwargs)\n', (15163, 15293), False, 'from inference.fine_tune_pipeline import optimize_latents, fine_tune_generator\n'), ((15391, 15421), 'inference.base_image_utils.batch2array', 'batch2array', (['img_batch_cropped'], {}), '(img_batch_cropped)\n', (15402, 15421), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((21808, 21832), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (21830, 21832), False, 'import torch\n'), ((21877, 21925), 'pandas.DataFrame', 'pd.DataFrame', (['sum_metrics'], {'index': 'sum_metrics_idx'}), '(sum_metrics, index=sum_metrics_idx)\n', (21889, 21925), True, 'import pandas as pd\n'), ((4250, 4265), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4263, 4265), False, 'import torch\n'), ((5474, 5489), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (5482, 5489), True, 'import numpy as np\n'), ((5583, 5605), 'numpy.clip', 'np.clip', (['frame', '(0)', '(255)'], {}), '(frame, 0, 255)\n', (5590, 5605), True, 'import numpy as np\n'), ((6991, 7027), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (7000, 7027), False, 'import yaml\n'), ((9493, 9549), 'inference.base_image_utils.get_scale_size', 'get_scale_size', (['config.max_in_resolution', 'src_image.size'], {}), '(config.max_in_resolution, src_image.size)\n', (9507, 9549), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((10116, 10131), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10129, 10131), False, 'import torch\n'), ((13528, 13561), 'inference.base_image_utils.batch2array', 'batch2array', (['encoder_image_tensor'], {}), '(encoder_image_tensor)\n', (13539, 13561), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((13939, 13969), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (13962, 13969), False, 'import collections\n'), ((15550, 15581), 'inference.base_image_utils.batch2array', 'batch2array', (['tuned_image_tensor'], {}), '(tuned_image_tensor)\n', (15561, 15581), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((16998, 17176), 'inference.fine_tune_pipeline.fine_tune_generator', 'fine_tune_generator', (['latents', 'img_batch_cropped', 'decoder'], {'still_segm_mask': '(1 - shift_mask if fine_tune_generator_using_mask else None)'}), '(latents, img_batch_cropped, decoder, still_segm_mask=1 -\n shift_mask if fine_tune_generator_using_mask else None, **config.\n generator_fine_tune_kwargs)\n', (17017, 17176), False, 'from inference.fine_tune_pipeline import optimize_latents, fine_tune_generator\n'), ((17399, 17431), 'inference.base_image_utils.batch2array', 'batch2array', (['tuned2_image_tensor'], {}), '(tuned2_image_tensor)\n', (17410, 17431), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((21953, 22007), 'os.path.join', 'os.path.join', (['args.outdir', 'f"""metrics{args.suffix}.tsv"""'], {}), "(args.outdir, f'metrics{args.suffix}.tsv')\n", (21965, 22007), False, 'import os\n'), ((1837, 1934), 'inference.perspective.warp_homography_kornia', 'warp_homography_kornia', (['before', 'projective_transforms'], {'n_iter': 'part', 'horizon_line': 'horizon_line'}), '(before, projective_transforms, n_iter=part,\n horizon_line=horizon_line)\n', (1859, 1934), False, 'from inference.perspective import get_horizon_line_coords, make_manual_homography_kornia, warp_homography_kornia, RandomHomography\n'), ((7630, 7685), 'inference.segmentation.SegmentationModule', 'SegmentationModule', ([], {}), '(**config.segmentation.module_kwargs)\n', (7648, 7685), False, 'from inference.segmentation import SegmentationModule\n'), ((9161, 9167), 'inference.metrics.SSIM', 'SSIM', ([], {}), '()\n', (9165, 9167), False, 'from inference.metrics import LPIPSLossWrapper, SSIM\n'), ((9368, 9394), 'os.path.basename', 'os.path.basename', (['src_path'], {}), '(src_path)\n', (9384, 9394), False, 'import os\n'), ((9420, 9440), 'PIL.Image.open', 'Image.open', (['src_path'], {}), '(src_path)\n', (9430, 9440), False, 'from PIL import Image\n'), ((9577, 9599), 'inference.base_image_utils.image2batch', 'image2batch', (['src_image'], {}), '(src_image)\n', (9588, 9599), False, 'from inference.base_image_utils import get_scale_size, choose_center_full_size_crop_params, batch2array, image2batch\n'), ((10419, 10505), 'torch.nn.functional.interpolate', 'F.interpolate', (['cls_scores'], {'size': 'scaled_size', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(cls_scores, size=scaled_size, mode='bilinear', align_corners=\n False)\n", (10432, 10505), True, 'import torch.nn.functional as F\n'), ((10610, 10638), 'torch.nn.functional.softmax', 'F.softmax', (['cls_scores'], {'dim': '(1)'}), '(cls_scores, dim=1)\n', (10619, 10638), True, 'import torch.nn.functional as F\n'), ((12071, 12132), 'utils.get_mean_style', 'get_mean_style', (["decoder.infer_model['g_running']", '"""cuda"""', '(512)'], {}), "(decoder.infer_model['g_running'], 'cuda', 512)\n", (12085, 12132), False, 'from utils import get_mean_style\n'), ((13237, 13337), 'inference.inference_utils.get_noise_for_infer', 'get_noise_for_infer', (['decoder.infer_model.g_running'], {'batch_size': '(1)', 'step': 'decoder.infer_model.step'}), '(decoder.infer_model.g_running, batch_size=1, step=\n decoder.infer_model.step)\n', (13256, 13337), False, 'from inference.inference_utils import get_noise_for_infer, sum_dicts\n'), ((21369, 21410), 'os.path.join', 'os.path.join', (['args.outdir', "(fname + '.avi')"], {}), "(args.outdir, fname + '.avi')\n", (21381, 21410), False, 'import os\n'), ((5663, 5693), 'numpy.transpose', 'np.transpose', (['frame', '(1, 2, 0)'], {}), '(frame, (1, 2, 0))\n', (5675, 5693), True, 'import numpy as np\n'), ((5799, 5843), 'os.path.join', 'os.path.join', (['frames_dirname', 'f"""{i:05d}.jpg"""'], {}), "(frames_dirname, f'{i:05d}.jpg')\n", (5811, 5843), False, 'import os\n'), ((7226, 7287), 'os.path.join', 'os.path.join', (['constants.RESULT_DIR', 'config.encoder_checkpoint'], {}), '(constants.RESULT_DIR, config.encoder_checkpoint)\n', (7238, 7287), False, 'import os\n'), ((7861, 7952), 'os.path.join', 'os.path.join', (['constants.RESULT_DIR', 'config.shift_kwargs.target_z_func.mlp_approx_model'], {}), '(constants.RESULT_DIR, config.shift_kwargs.target_z_func.\n mlp_approx_model)\n', (7873, 7952), False, 'import os\n'), ((10694, 10729), 'inference.perspective.get_horizon_line_coords', 'get_horizon_line_coords', (['cls_scores'], {}), '(cls_scores)\n', (10717, 10729), False, 'from inference.perspective import get_horizon_line_coords, make_manual_homography_kornia, warp_homography_kornia, RandomHomography\n'), ((11246, 11287), 'cv2.erode', 'cv2.erode', (['shift_mask_np', 'dilation_kernel'], {}), '(shift_mask_np, dilation_kernel)\n', (11255, 11287), False, 'import cv2\n'), ((11591, 11623), 'copy.deepcopy', 'copy.deepcopy', (['homography_kwargs'], {}), '(homography_kwargs)\n', (11604, 11623), False, 'import copy\n'), ((11783, 11833), 'inference.perspective.make_manual_homography_kornia', 'make_manual_homography_kornia', ([], {}), '(**homography_kwargs)\n', (11812, 11833), False, 'from inference.perspective import get_horizon_line_coords, make_manual_homography_kornia, warp_homography_kornia, RandomHomography\n'), ((20198, 20251), 'os.path.join', 'os.path.join', (['args.outdir', 'f"""{fname}_hom{hom_id}.avi"""'], {}), "(args.outdir, f'{fname}_hom{hom_id}.avi')\n", (20210, 20251), False, 'import os\n'), ((6356, 6383), 'torch.from_numpy', 'torch.from_numpy', (['cur_frame'], {}), '(cur_frame)\n', (6372, 6383), False, 'import torch\n'), ((11317, 11348), 'torch.from_numpy', 'torch.from_numpy', (['shift_mask_np'], {}), '(shift_mask_np)\n', (11333, 11348), False, 'import torch\n'), ((12598, 12642), 're.search', 're.search', (["config['take_only_latents']", 'name'], {}), "(config['take_only_latents'], name)\n", (12607, 12642), False, 'import re\n'), ((20847, 20917), 'numpy.concatenate', 'np.concatenate', (['(real_image_cropped, encoder_image, enc_frame)'], {'axis': '(2)'}), '((real_image_cropped, encoder_image, enc_frame), axis=2)\n', (20861, 20917), True, 'import numpy as np\n'), ((20961, 21025), 'numpy.concatenate', 'np.concatenate', (['(real_image_cropped, tuned_image, frame)'], {'axis': '(2)'}), '((real_image_cropped, tuned_image, frame), axis=2)\n', (20975, 21025), True, 'import numpy as np\n'), ((21069, 21135), 'numpy.concatenate', 'np.concatenate', (['(real_image_cropped, tuned2_image, frame2)'], {'axis': '(2)'}), '((real_image_cropped, tuned2_image, frame2), axis=2)\n', (21083, 21135), True, 'import numpy as np\n'), ((19648, 19718), 'numpy.concatenate', 'np.concatenate', (['(real_image_cropped, encoder_image, enc_frame)'], {'axis': '(2)'}), '((real_image_cropped, encoder_image, enc_frame), axis=2)\n', (19662, 19718), True, 'import numpy as np\n'), ((19766, 19830), 'numpy.concatenate', 'np.concatenate', (['(real_image_cropped, tuned_image, frame)'], {'axis': '(2)'}), '((real_image_cropped, tuned_image, frame), axis=2)\n', (19780, 19830), True, 'import numpy as np\n'), ((19878, 19944), 'numpy.concatenate', 'np.concatenate', (['(real_image_cropped, tuned2_image, frame2)'], {'axis': '(2)'}), '((real_image_cropped, tuned2_image, frame2), axis=2)\n', (19892, 19944), True, 'import numpy as np\n'), ((4424, 4444), 'torch.tensor', 'torch.tensor', (['step.z'], {}), '(step.z)\n', (4436, 4444), False, 'import torch\n'), ((4931, 4964), 'torch.from_numpy', 'torch.from_numpy', (['new_styles_flat'], {}), '(new_styles_flat)\n', (4947, 4964), False, 'import torch\n'), ((4635, 4667), 'torch.stack', 'torch.stack', (['intermediate_points'], {}), '(intermediate_points)\n', (4646, 4667), False, 'import torch\n')] |
import sys
sys.path.append("..")
sys.path.append("D:/ml_from_scratch/")
from recurrent_neural_network import RecurrentNeuralNetwork
import numpy as np
from keras.utils.np_utils import to_categorical
from optimizations_algorithms.optimizers import SGD
from rnn_keras import RNNKeras
def main(use_keras=False):
start_token = " "
pad_token = "#"
data_path = "D:/ml_from_scratch/recurrent_neural_network/names"
with open(data_path) as f:
names = f.read()[:-1].split('\n')
names = [start_token + name for name in names]
print('number of samples:', len(names))
MAX_LENGTH = max(map(len, names))
print("max length:", MAX_LENGTH)
tokens = set()
for name in names:
temp_name = set(list(name))
for t_n in temp_name:
tokens.add(t_n)
tokens = [pad_token] + list(tokens)
n_tokens = len(tokens)
print ('n_tokens:', n_tokens)
token_to_id = dict()
for ind, token in enumerate(tokens):
token_to_id[token] = ind
print(token_to_id[pad_token])
def to_matrix(names, max_len=None, pad=token_to_id[pad_token], dtype=np.int32):
"""Casts a list of names into rnn-digestable padded matrix"""
max_len = max_len or max(map(len, names))
names_ix = np.zeros([len(names), max_len], dtype) + pad
for i in range(len(names)):
name_ix = list(map(token_to_id.get, names[i]))
names_ix[i, :len(name_ix)] = name_ix
return names_ix
matrix_sequences = to_matrix(names)
train_X = matrix_sequences[:, :-1]
m, length = matrix_sequences.shape
input_sequences = np.zeros(shape=(m, length, n_tokens))
for i in range(m):
input_sequences[i] = to_categorical(matrix_sequences[i], n_tokens, dtype='int32')
del matrix_sequences
if not use_keras:
train_X = input_sequences[:, :-1, :]
train_Y = input_sequences[:, 1:, :]
epochs = 20
batch_size = 32
learning_rate = 0.01
if use_keras:
from keras.optimizers import SGD as SGDKeras
optimizer = SGDKeras(lr=learning_rate)
rnn = RNNKeras(hidden_units=64, epochs=epochs, optimizer=optimizer, batch_size=batch_size)
else:
optimizer = SGD(alpha=learning_rate)
rnn = RecurrentNeuralNetwork(hidden_units=64, epochs=epochs, optimizer=optimizer, batch_size=batch_size)
rnn.train(train_X, train_Y)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="A RNN program.")
parser.add_argument("--keras", action="store_true", help="Whether use keras or not.")
args = parser.parse_args()
main(use_keras=args.keras) | [
"sys.path.append",
"keras.optimizers.SGD",
"argparse.ArgumentParser",
"numpy.zeros",
"keras.utils.np_utils.to_categorical",
"optimizations_algorithms.optimizers.SGD",
"recurrent_neural_network.RecurrentNeuralNetwork",
"rnn_keras.RNNKeras"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((33, 71), 'sys.path.append', 'sys.path.append', (['"""D:/ml_from_scratch/"""'], {}), "('D:/ml_from_scratch/')\n", (48, 71), False, 'import sys\n'), ((1654, 1691), 'numpy.zeros', 'np.zeros', ([], {'shape': '(m, length, n_tokens)'}), '(shape=(m, length, n_tokens))\n', (1662, 1691), True, 'import numpy as np\n'), ((2479, 2532), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A RNN program."""'}), "(description='A RNN program.')\n", (2502, 2532), False, 'import argparse\n'), ((1744, 1804), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['matrix_sequences[i]', 'n_tokens'], {'dtype': '"""int32"""'}), "(matrix_sequences[i], n_tokens, dtype='int32')\n", (1758, 1804), False, 'from keras.utils.np_utils import to_categorical\n'), ((2090, 2116), 'keras.optimizers.SGD', 'SGDKeras', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2098, 2116), True, 'from keras.optimizers import SGD as SGDKeras\n'), ((2131, 2220), 'rnn_keras.RNNKeras', 'RNNKeras', ([], {'hidden_units': '(64)', 'epochs': 'epochs', 'optimizer': 'optimizer', 'batch_size': 'batch_size'}), '(hidden_units=64, epochs=epochs, optimizer=optimizer, batch_size=\n batch_size)\n', (2139, 2220), False, 'from rnn_keras import RNNKeras\n'), ((2246, 2270), 'optimizations_algorithms.optimizers.SGD', 'SGD', ([], {'alpha': 'learning_rate'}), '(alpha=learning_rate)\n', (2249, 2270), False, 'from optimizations_algorithms.optimizers import SGD\n'), ((2285, 2387), 'recurrent_neural_network.RecurrentNeuralNetwork', 'RecurrentNeuralNetwork', ([], {'hidden_units': '(64)', 'epochs': 'epochs', 'optimizer': 'optimizer', 'batch_size': 'batch_size'}), '(hidden_units=64, epochs=epochs, optimizer=optimizer,\n batch_size=batch_size)\n', (2307, 2387), False, 'from recurrent_neural_network import RecurrentNeuralNetwork\n')] |
import numpy as np
class Vectorizer:
unique_words = dict()
cur_idx = 0
def fit(self, X):
for x in X:
for word in x:
if word not in self.unique_words:
self.unique_words[word] = self.cur_idx
self.cur_idx += 1
return self
def transform(self, X):
output = np.zeros((len(X), len(self.unique_words)), dtype=int)
for idx, x in enumerate(X):
for word in x:
if word in self.unique_words:
output[idx, self.unique_words[word]] = 1
return output
class NaiveBayesClassifier:
def __init__(self, num_classes, alpha, penalties):
self.alpha = alpha
self.penalties = penalties
self._is_fitted = False
self.num_classes = num_classes
self.prior_ = None
self.word_likelihood_ = None
self._Q = 2
def fit(self, X, y):
num_samples = X.shape[0]
X_by_class = [
[x for x, t in zip(X, y) if t == c] for c in range(self.num_classes)]
self.prior_ = np.array([len(i) / num_samples for i in X_by_class])
num_samples_per_class = np.array([len(x) for x in X_by_class])
word_counts_per_class = np.array(
[np.array(i).sum(axis=0) for i in X_by_class])
# Bernoulli with Laplace Smoothing
self.word_likelihood_ = (word_counts_per_class + self.alpha) / (
num_samples_per_class.reshape(-1, 1) + self.alpha * self._Q)
self._is_fitted = True
def predict_one(self, x):
probabilities = np.zeros(self.num_classes)
for class_idx in range(self.num_classes):
if class_idx >= len(self.prior_) or self.prior_[class_idx] == 0:
continue
temp = np.zeros(self.num_classes)
for other_class_idx in range(self.num_classes):
if other_class_idx >= len(self.prior_) or class_idx == other_class_idx:
continue
t = self.penalties[other_class_idx] / self.penalties[class_idx] \
* (self.prior_[other_class_idx] / self.prior_[class_idx])
for feature_idx in range(len(x)):
if x[feature_idx] == 0:
prob = 1 - self.word_likelihood_[class_idx, feature_idx]
other_prob = 1 - self.word_likelihood_[other_class_idx, feature_idx]
else:
prob = self.word_likelihood_[class_idx, feature_idx]
other_prob = self.word_likelihood_[other_class_idx, feature_idx]
t *= other_prob / prob
temp[other_class_idx] = t
probabilities[class_idx] = 1 / (1 + temp.sum())
return probabilities
def main(X_train, y_train, X_test, used_labels, num_classes, lambda_c, alpha):
v = Vectorizer().fit(X_train)
X_train_vectorized = v.transform(X_train)
X_test_vectorized = v.transform(X_test)
for j in range(num_classes):
if j not in used_labels:
num_samples, num_words = X_train_vectorized.shape
new_X = np.zeros(num_samples + 1, num_words)
new_X[:num_samples, num_words] = X_train_vectorized
new_X[num_samples, num_words] = np.zeros(num_words)
X_train_vectorized = new_X
classifier = NaiveBayesClassifier(num_classes, alpha, lambda_c)
classifier.fit(X_train_vectorized, np.array(y_train))
for X_sample in X_test_vectorized:
print(*classifier.predict_one(X_sample))
if __name__ == '__main__':
k = int(input())
lambda_c = list(map(int, input().split()))
alpha = int(input())
N = int(input())
X_train = list()
y_train = list()
used_labels = set()
for _ in range(N):
line = input().split()
label = int(line[0])
words = line[2:]
X_train.append(words)
y_train.append(label - 1)
used_labels.add(label - 1)
M = int(input())
X_test = list()
for _ in range(M):
words = input().split()[1:]
X_test.append(words)
main(X_train, y_train, X_test, used_labels, k, lambda_c, alpha)
| [
"numpy.array",
"numpy.zeros"
] | [((1610, 1636), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {}), '(self.num_classes)\n', (1618, 1636), True, 'import numpy as np\n'), ((3487, 3504), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (3495, 3504), True, 'import numpy as np\n'), ((1810, 1836), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {}), '(self.num_classes)\n', (1818, 1836), True, 'import numpy as np\n'), ((3174, 3210), 'numpy.zeros', 'np.zeros', (['(num_samples + 1)', 'num_words'], {}), '(num_samples + 1, num_words)\n', (3182, 3210), True, 'import numpy as np\n'), ((3319, 3338), 'numpy.zeros', 'np.zeros', (['num_words'], {}), '(num_words)\n', (3327, 3338), True, 'import numpy as np\n'), ((1283, 1294), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (1291, 1294), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from sqpdfo.runtime import *
import sqpdfo.sqpdfo_global_variables as glob
from numpy import array, zeros, concatenate, zeros_like, inf
def f_benchmark(x, prob):
"""
#-----------------------------------------------------------------------
# Computation of f, ci, ce
#-----------------------------------------------------------------------
"""
if prob == 1:
f = - (5 - (x[0] - 2) ** 2 - 2 * (x[1] - 1) ** 2)
elif prob == 2:
f = 2 * x[0] ** 2 + x[1] ** 2
elif prob == 3:
f = x[0] ** 2 + x[1] ** 2 + x[2] ** 2
elif prob == 4:
f = x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3]
elif prob == 5:
# Powells function from solnp - manual
# x* = (-1.717, 1.5957, 1.8272, -0.7636, -0.7636)
f = exp_(x[0] * x[1] * x[2] * x[3] * x[4])
elif prob == 6:
f = -(0.592 * ((exp_(1) - 1) * x[0]) / ((-0.408 * x[0] + 1) * (exp_(x[0]) - 1)) - 1)
elif prob == 7:
# alkyl problem found here :http://www.gamsworld.org/global/globallib/alkyl.htm
# best known solution found by Baron f*=-1.76499964633
# x1=-1.76499964633; x2=1.70370291772; x3=1.58470999786; x4=0.543084200389;
# x5=3.03582206371; x6=2.0; x6=-0.882499871995;
# x7=0.901319381076; x8=0.95; x8=-17.3201583536; x9=10.4754765591;
# x10=1.56163636364; x11=1.53535353535;
# x12=0.99; x12=2.06361745003; x13=0.99; x13=21.9374937958;
# x14= 1.11111111111; x14=-0.488775780565;
# x15=0.99; x15=10.7759591879;
# e1=1.0; e2=-5.13314985138; e3=12.7096102404; e4=0.035;
# e5=-0.679755732296; e6=-23.0920987324; e7=0.312989497393; e8=-7.01855236578
f = x[0]
elif prob == 10: # problem 19 from Hock-Schittkowskis collection
f = (x[0] - 10) ** 3 + (x[1] - 20) ** 3
elif prob == 11: # problem 21 from Hock-Schittkowskis collection
f = 0.01 * x[0] ** 2 + x[1] ** 2 - 100
elif prob == 12: # problem 35 (Beale) from Hock-Schittkowskis collection
f = 9.0 - 8 * x[0] - 6 * x[1] - 4 * x[2] + 2 * x[0] ** 2 + 2 * x[1] ** 2 + x[2] ** 2 \
+ 2 * x[0] * x[1] + 2 * x[0] * x[2]
elif prob == 13: # problem 76 from Hock-Schittkowskis collection
f = x[0] ** 2 + 0.5 * x[1] ** 2 + x[2] ** 2 + 0.5 * x[3] ** 2 - x[0] * x[2] \
+ x[2] * x[3] - x[0] - 3 * x[1] + x[2] - x[3]
elif prob == 14: # problem 44 from Hock-Schittkowskis collection
f = x[0] - x[1] - x[2] - x[0] * x[2] + x[0] * x[3] + x[1] * x[2] - x[1] * x[3]
elif prob == 15: # 2D Rosenbrock with 2 eq + 1 ineq
f = (1 - x[0]) ** 2 + 100 * (x[1] - x[0] ** 2) ** 2
elif prob == 16: # 2D Rosenbrock with 1 eq + 2 ineq
f = (1 - x[0]) ** 2 + 100 * (x[1] - x[0] ** 2) ** 2
elif prob == 1000:
# CUTEr problems
cproblem = glob.get_prob_cuter()
f = cproblem.obj(x.reshape(-1))
else:
raise RuntimeError("Unknown Problem number: ", prob)
return f
def c_benchmark(x, prob):
"""
#-----------------------------------------------------------------------
# Computation of f, ci, ce
#-----------------------------------------------------------------------
"""
# Initialization
ce = array([])
c = array([])
if prob == 1:
ce = zeros(1)
ce = x[0] + 4 * x[1] - 3
c = ce.reshape(-1, 1)
elif prob == 2:
ce = zeros(1)
ce = x[0] + x[1] - 1
c = ce.reshape(-1, 1)
elif prob == 3:
ce = zeros(2)
ce[0] = x[0] + x[1] + x[2]
ce[1] = x[0] + 2 * x[1] + 3 * x[2] - 1
c = ce.reshape(-1, 1)
elif prob == 4:
ce = zeros(3)
ce[0] = x[0] + x[1] + x[2]
ce[1] = x[0] + 2 * x[1] + 3 * x[2] - 1
ce[2] = x[3] ** 3 - 1
c = ce.reshape(-1, 1)
elif prob == 5:
# Powells function from solnp - manual
# x* = (-1.717, 1.5957, 1.8272, -0.7636, -0.7636)
ce = zeros(3)
ce[0] = x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3] ** 2 + x[4] ** 2 - 10
ce[1] = x[1] * x[2] - 5 * x[3] * x[4]
ce[2] = x[0] ** 3 + x[1] ** 3 + 1
c = ce.reshape(-1, 1)
elif prob == 6:
pass
elif prob == 7:
# alkyl problem found here :http://www.gamsworld.org/global/globallib/alkyl.htm
# best known solution found by <NAME>*=-1.76499964633
# x1=-1.76499964633; x2=1.70370291772; x3=1.58470999786; x4=0.543084200389;
# x5=3.03582206371; x6=2.0; x6=-0.882499871995;
# x7=0.901319381076; x8=0.95; x8=-17.3201583536; x9=10.4754765591;
# x10=1.56163636364; x11=1.53535353535;
# x12=0.99; x12=2.06361745003; x13=0.99; x13=21.9374937958;
# x14= 1.11111111111; x14=-0.488775780565;
# x15=0.99; x15=10.7759591879;
# e1=1.0; e2=-5.13314985138; e3=12.7096102404; e4=0.035;
# e5=-0.679755732296; e6=-23.0920987324; e7=0.312989497393; e8=-7.01855236578
ce = zeros(8)
ce[0] = 6.3 * x[4] * x[7] + x[0] - 5.04 * x[1] - 0.35 * x[2] - x[3] - 3.36 * x[5]
ce[1] = -0.819672131147541 * x[1] + x[4] - 0.819672131147541 * x[5]
ce[2] = 0.98 * x[3] - x[6] * (0.01 * x[4] * x[9] + x[3])
ce[3] = -x[1] * x[8] + 10 * x[2] + x[5]
ce[4] = x[4] * x[11] - x[1] * (1.12 + 0.13167 * x[8] - 0.0067 * x[8] * x[8])
ce[5] = x[7] * x[12] - 0.01 * (1.098 * x[8] - 0.038 * x[8] * x[8]) - 0.325 * x[6] - 0.57425
ce[6] = x[9] * x[13] + 22.2 * x[10] - 35.82
ce[7] = x[10] * x[14] - 3 * x[7] + 1.33
c = ce.reshape(-1, 1)
elif prob == 10: # problem 19 from Hock-Schittkowskis collection
ci = zeros(2)
ci[0] = (x[0] - 5) ** 2 + (x[1] - 5) ** 2 - 100
ci[1] = -(x[1] - 5) ** 2 - (x[0] - 6) ** 2 + 82.81
c = concatenate((ce, ci))
c = c.reshape(-1, 1)
elif prob == 11: # problem 21 from Hock-Schittkowskis collection
ci = zeros(1)
ci[0] = 10 * x[0] - x[1] - 10 # - x[2]
c = concatenate((ce, ci))
c = c.reshape(-1, 1)
elif prob == 12: # problem 35 (Beale) from Hock-Schittkowskis collection
ci = zeros(1)
ci[0] = 3 - x[0] - x[1] - 2 * x[2]
c = concatenate((ce, ci))
c = c.reshape(-1, 1)
elif prob == 13: # problem 76 from Hock-Schittkowskis collection
ci = zeros(3)
ci[0] = 5 - x[0] - 2 * x[1] - x[2] - x[3]
ci[1] = 4 - 3 * x[0] - x[1] - 2 * x[2] + x[3]
ci[2] = x[1] + 4 * x[2] - 1.5
c = concatenate((ce, ci))
c = c.reshape(-1, 1)
elif prob == 14: # problem 44 from Hock-Schittkowskis collection
ci = zeros(6)
ci[0] = 8 - x[0] - 2 * x[1]
ci[1] = 12 - 4 * x[0] - x[1]
ci[2] = 12 - 3 * x[0] - 4 * x[1]
ci[3] = 8 - 2 * x[2] - x[3]
ci[4] = 8 - x[2] - 2 * x[3]
ci[5] = 5 - x[2] - x[3]
c = concatenate((ce, ci))
c = c.reshape(-1, 1)
elif prob == 15: # 2D Rosenbrock with 2 eq + 1 ineq
ce = np.zeros(2)
ce[0] = x[0] ** 2 + x[1] ** 2 - 2
ce[1] = - (x[0] - 1) ** 3 + x[1] - 1
ci = np.zeros(1)
ci[0] = - x[0] - x[1] + 2
ce = ce.reshape(-1, 1)
ci = ci.reshape(-1, 1)
c = concatenate((ce, ci))
elif prob == 16: # 2D Rosenbrock with 1 eq + 2 ineq
ce = np.zeros(1)
ce[0] = x[0] ** 2 + x[1] ** 2 - 2
ci = np.zeros(2)
ci[0] = - x[0] - x[1] + 2
ci[1] = - (x[0] - 1) ** 3 + x[1] - 1
ce = ce.reshape(-1, 1)
ci = ci.reshape(-1, 1)
c = concatenate((ce, ci))
elif prob == 1000:
# CUTEr problems
cproblem = glob.get_prob_cuter()
(_, c) = cproblem.objcons(x.reshape(-1))
if cproblem.m > 0:
me = sum(cproblem.is_eq_cons)
mi = cproblem.m - me
li = cproblem.cl
ui = cproblem.cu
ce_new = []
ci_new = []
cnew = []
# re-order c such that ce first and then ci
if mi > 0:
for i in range(0, cproblem.m):
if li[i] == ui[i]: # equalities
ce_new.append(c[i] - li[i])
# print('eq')
else: # inequalities
if li[i] == -1e20 and ui[i] == 0.0:
ci_new.append(-c[i])
# print('ineq to switch')
elif li[i] == -1e20 and ui[i] < 1e7:
ci_new.append(-c[i] + ui[i])
# print('ineq to switch and to change')
elif li[i] == 0.0 and ui[i] == 1e20:
ci_new.append(c[i])
# print('ineq good bounds')
elif li[i] > -1e7 and ui[i] == 1e20:
ci_new.append(c[i] - li[i])
# print('ineq to change')
else:
# Handling of two-sided inequalities !!!!
# print('ineq two-sided')
# print(li[i],ui[i])
if li[i] > -1e7:
ci_new.append(c[i] - li[i])
if ui[i] < 1e7:
ci_new.append(-c[i] + ui[i])
cnew = concatenate((ce_new, ci_new))
c = cnew.reshape(-1, 1)
else:
c = c.reshape(-1, 1)
if sum(li) > 0 or sum(ui) > 0:
print('sqpdfo_func: Warning! ce must not be zero! Check li and ui!')
else:
raise RuntimeError("Unknown Problem number: ", prob)
return c
def benchmark_start_values(prob):
"""
# This function returns the dimensions of the problem:
# . n = number of variables,
# . nb = number of variables with bounds,
# . mi = number of inequality constraints,
# . me = number of equality constraints.
"""
# Set output variables
x0 = array([])
lx = array([])
ux = array([])
li = None
ui = None
# dxmin = sqrt(eps);
if prob == 1:
n = 2
nb = 2
mi = 0
me = 1
x0 = array([[4.6], [0.0]]).T
lx = array([1.95, - 1e+20]).reshape(-1, 1)
ux = array([1e+20, 0.3]).reshape(-1, 1)
elif prob == 2:
n = 2
nb = 0
mi = 0
me = 1
x0 = array([[- 1], [2.54378]]).T
lx = - inf * ones_(n, 1)
ux = inf * ones_(n, 1)
elif prob == 3:
nb = 2
mi = 0
me = 2
x0 = array([[0.0], [0.0], [0.5]]).T
n = length_(x0)
lx = array([- 0.5, 0.0, - inf]).reshape(-1, 1)
ux = array([inf, inf, inf]).reshape(-1, 1)
elif prob == 4:
nb = 0
mi = 0
me = 3
x0 = array([[1.0], [1.0], [1.0], [0.0]]).T
n = length_(x0)
lx = - inf * ones_(n, 1)
ux = inf * ones_(n, 1)
elif prob == 5:
nb = 0
mi = 0
me = 3
x0 = array([[- 2.0], [2.0], [2.0], [1.0], [1.0]]).T
n = 5
lx = - inf * ones_(n, 1)
ux = inf * ones_(n, 1)
elif prob == 6:
n = 1
nb = 1
mi = 0
me = 0
x0 = array([[0.6]])
lx = array([0.5]).reshape(-1, 1)
ux = array([0.8]).reshape(-1, 1)
elif prob == 7: # alkyl problem found here :http://www.gamsworld.org/global/globallib/alkyl.htm
n = 15
nb = 14
me = 8
mi = 0
x0 = array([[-0.9, 1.745, 1.2, 1.1, 3.048, 1.974, 0.893, 0.928, 8, 3.6, 1.50, 1, 1, 1, 1]]).T
lx = array([-inf, 0, 0, 0, 0, 0, 0.85, 0.9, 3, 1.2, 1.45, 0.99, 0.99, 0.9, 0.99]).reshape(-1, 1)
ux = array(
[inf, 2, 1.6, 1.2, 5, 2, 0.93, 0.95, 12, 4, 1.62, 1.01010101010101, 1.01010101010101, 1.11111111111111,
1.01010101010101]).reshape(-1, 1)
elif prob == 10: # problem 19 from Hock-Schittkowskis collection
n = 2
nb = 4
me = 0
mi = 2
x0 = array([[20.1, 5.84]])
lx = array([[13.0, 0.0]]).reshape(-1, 1)
ux = array([[100.0, 100.0]]).reshape(-1, 1)
elif prob == 11: # problem 21 from Hock-Schittkowskis collection
n = 2
nb = 4
me = 0
mi = 1
x0 = array([[-1.0, -1.0]])
lx = array([[2.0, -50.0]]).reshape(-1, 1)
ux = array([[50.0, 50.0]]).reshape(-1, 1)
elif prob == 12: # problem 35 (Beale) from HS collection
n = 3
nb = 3
me = 0
mi = 1
x0 = array([[0.5, 0.5, 0.5]])
lx = array([[0.0, 0.0, 0.0]]).reshape(-1, 1)
ux = array([[1e20, 1e20, 1e20]]).reshape(-1, 1)
elif prob == 13: # problem 76 from Hock-Schittkowskis collection
n = 4
nb = 4
me = 0
mi = 3
x0 = array([[0.5, 0.5, 0.5, 0.5]])
lx = array([[0.0, 0.0, 0.0, 0.0]]).reshape(-1, 1)
ux = array([[1e20, 1e20, 1e20, 1e20]]).reshape(-1, 1)
elif prob == 14: # problem 44 from Hock-Schittkowskis collection
n = 4
nb = 4
me = 0
mi = 6
x0 = array([[0.0, 0.0, 0.0, 0.0]])
lx = array([[0.0, 0.0, 0.0, 0.0]]).reshape(-1, 1)
ux = array([[1e20, 1e20, 1e20, 1e20]]).reshape(-1, 1)
elif prob == 15:
n = 2
nb = 4
me = 2
mi = 1
x0 = array([[-1.2, 1.0]])
lx = array([[-5.0, -5.0]]).reshape(-1, 1)
ux = array([[10.0, 10.0]]).reshape(-1, 1)
elif prob == 16:
n = 2
nb = 4
me = 1
mi = 2
x0 = array([[0.3, 0.3]])
lx = array([[-5.0, -5.0]]).reshape(-1, 1)
ux = array([[10.0, 10.0]]).reshape(-1, 1)
elif prob == 1000:
# Warning : here the CUTEst interface from this website has to be
# installed in order to use CUTEst problems :
# https://jfowkes.github.io/pycutest/_build/html/index.html
# Thanks to <NAME> and <NAME>
cproblem = glob.get_prob_cuter()
n = cproblem.n
m = cproblem.m
me = sum(cproblem.is_eq_cons)
mi = m - me
x0 = cproblem.x0.reshape(-1, 1)
lx = cproblem.bl.reshape(-1, 1)
ux = cproblem.bu.reshape(-1, 1)
li = cproblem.cl
ui = cproblem.cu
nb = sum_(min_((lx[0:n] > -inf) + (inf > ux[0:n]), 1))
# print(cproblem.eq_cons_first)
else:
raise RuntimeError("Unknown Problem number: ", prob)
return x0, lx, ux, li, ui, n, nb, mi, me
def get(prob):
"""
Returns the benchmark problem, including function, constraint function,
bounds ...
"""
def f_func(x):
return f_benchmark(x, prob)
def c_func(x):
return c_benchmark(x, prob)
x0, lx, ux, li, ui, n, nb, mi, me = benchmark_start_values(prob)
return f_func, x0, lx, ux, me, mi, c_func, li, ui
def set_test_prob(prob):
f_func, x0, lx, ux, me, mi, c_func, li, ui = get(prob)
glob.set_filename_f(f_func)
glob.set_filename_cons(c_func)
| [
"sqpdfo.sqpdfo_global_variables.set_filename_f",
"numpy.zeros",
"sqpdfo.sqpdfo_global_variables.set_filename_cons",
"numpy.array",
"sqpdfo.sqpdfo_global_variables.get_prob_cuter",
"numpy.concatenate"
] | [((3263, 3272), 'numpy.array', 'array', (['[]'], {}), '([])\n', (3268, 3272), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((3281, 3290), 'numpy.array', 'array', (['[]'], {}), '([])\n', (3286, 3290), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10041, 10050), 'numpy.array', 'array', (['[]'], {}), '([])\n', (10046, 10050), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10060, 10069), 'numpy.array', 'array', (['[]'], {}), '([])\n', (10065, 10069), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10079, 10088), 'numpy.array', 'array', (['[]'], {}), '([])\n', (10084, 10088), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((14981, 15008), 'sqpdfo.sqpdfo_global_variables.set_filename_f', 'glob.set_filename_f', (['f_func'], {}), '(f_func)\n', (15000, 15008), True, 'import sqpdfo.sqpdfo_global_variables as glob\n'), ((15013, 15043), 'sqpdfo.sqpdfo_global_variables.set_filename_cons', 'glob.set_filename_cons', (['c_func'], {}), '(c_func)\n', (15035, 15043), True, 'import sqpdfo.sqpdfo_global_variables as glob\n'), ((3323, 3331), 'numpy.zeros', 'zeros', (['(1)'], {}), '(1)\n', (3328, 3331), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((3429, 3437), 'numpy.zeros', 'zeros', (['(1)'], {}), '(1)\n', (3434, 3437), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10236, 10257), 'numpy.array', 'array', (['[[4.6], [0.0]]'], {}), '([[4.6], [0.0]])\n', (10241, 10257), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((3531, 3539), 'numpy.zeros', 'zeros', (['(2)'], {}), '(2)\n', (3536, 3539), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10273, 10294), 'numpy.array', 'array', (['[1.95, -1e+20]'], {}), '([1.95, -1e+20])\n', (10278, 10294), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10324, 10343), 'numpy.array', 'array', (['[1e+20, 0.3]'], {}), '([1e+20, 0.3])\n', (10329, 10343), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10451, 10475), 'numpy.array', 'array', (['[[-1], [2.54378]]'], {}), '([[-1], [2.54378]])\n', (10456, 10475), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((3686, 3694), 'numpy.zeros', 'zeros', (['(3)'], {}), '(3)\n', (3691, 3694), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10621, 10649), 'numpy.array', 'array', (['[[0.0], [0.0], [0.5]]'], {}), '([[0.0], [0.0], [0.5]])\n', (10626, 10649), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((3979, 3987), 'numpy.zeros', 'zeros', (['(3)'], {}), '(3)\n', (3984, 3987), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10689, 10713), 'numpy.array', 'array', (['[-0.5, 0.0, -inf]'], {}), '([-0.5, 0.0, -inf])\n', (10694, 10713), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10744, 10766), 'numpy.array', 'array', (['[inf, inf, inf]'], {}), '([inf, inf, inf])\n', (10749, 10766), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((10860, 10895), 'numpy.array', 'array', (['[[1.0], [1.0], [1.0], [0.0]]'], {}), '([[1.0], [1.0], [1.0], [0.0]])\n', (10865, 10895), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((11064, 11107), 'numpy.array', 'array', (['[[-2.0], [2.0], [2.0], [1.0], [1.0]]'], {}), '([[-2.0], [2.0], [2.0], [1.0], [1.0]])\n', (11069, 11107), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((11281, 11295), 'numpy.array', 'array', (['[[0.6]]'], {}), '([[0.6]])\n', (11286, 11295), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((4975, 4983), 'numpy.zeros', 'zeros', (['(8)'], {}), '(8)\n', (4980, 4983), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((5662, 5670), 'numpy.zeros', 'zeros', (['(2)'], {}), '(2)\n', (5667, 5670), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((5798, 5819), 'numpy.concatenate', 'concatenate', (['(ce, ci)'], {}), '((ce, ci))\n', (5809, 5819), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((11309, 11321), 'numpy.array', 'array', (['[0.5]'], {}), '([0.5])\n', (11314, 11321), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((11350, 11362), 'numpy.array', 'array', (['[0.8]'], {}), '([0.8])\n', (11355, 11362), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((11553, 11643), 'numpy.array', 'array', (['[[-0.9, 1.745, 1.2, 1.1, 3.048, 1.974, 0.893, 0.928, 8, 3.6, 1.5, 1, 1, 1, 1]]'], {}), '([[-0.9, 1.745, 1.2, 1.1, 3.048, 1.974, 0.893, 0.928, 8, 3.6, 1.5, 1, \n 1, 1, 1]])\n', (11558, 11643), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12072, 12093), 'numpy.array', 'array', (['[[20.1, 5.84]]'], {}), '([[20.1, 5.84]])\n', (12077, 12093), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((5933, 5941), 'numpy.zeros', 'zeros', (['(1)'], {}), '(1)\n', (5938, 5941), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((6002, 6023), 'numpy.concatenate', 'concatenate', (['(ce, ci)'], {}), '((ce, ci))\n', (6013, 6023), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((11655, 11731), 'numpy.array', 'array', (['[-inf, 0, 0, 0, 0, 0, 0.85, 0.9, 3, 1.2, 1.45, 0.99, 0.99, 0.9, 0.99]'], {}), '([-inf, 0, 0, 0, 0, 0, 0.85, 0.9, 3, 1.2, 1.45, 0.99, 0.99, 0.9, 0.99])\n', (11660, 11731), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((11760, 11893), 'numpy.array', 'array', (['[inf, 2, 1.6, 1.2, 5, 2, 0.93, 0.95, 12, 4, 1.62, 1.01010101010101, \n 1.01010101010101, 1.11111111111111, 1.01010101010101]'], {}), '([inf, 2, 1.6, 1.2, 5, 2, 0.93, 0.95, 12, 4, 1.62, 1.01010101010101, \n 1.01010101010101, 1.11111111111111, 1.01010101010101])\n', (11765, 11893), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12337, 12358), 'numpy.array', 'array', (['[[-1.0, -1.0]]'], {}), '([[-1.0, -1.0]])\n', (12342, 12358), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((6145, 6153), 'numpy.zeros', 'zeros', (['(1)'], {}), '(1)\n', (6150, 6153), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((6209, 6230), 'numpy.concatenate', 'concatenate', (['(ce, ci)'], {}), '((ce, ci))\n', (6220, 6230), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12107, 12127), 'numpy.array', 'array', (['[[13.0, 0.0]]'], {}), '([[13.0, 0.0]])\n', (12112, 12127), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12156, 12179), 'numpy.array', 'array', (['[[100.0, 100.0]]'], {}), '([[100.0, 100.0]])\n', (12161, 12179), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12593, 12617), 'numpy.array', 'array', (['[[0.5, 0.5, 0.5]]'], {}), '([[0.5, 0.5, 0.5]])\n', (12598, 12617), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((6344, 6352), 'numpy.zeros', 'zeros', (['(3)'], {}), '(3)\n', (6349, 6352), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((6507, 6528), 'numpy.concatenate', 'concatenate', (['(ce, ci)'], {}), '((ce, ci))\n', (6518, 6528), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12372, 12393), 'numpy.array', 'array', (['[[2.0, -50.0]]'], {}), '([[2.0, -50.0]])\n', (12377, 12393), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12422, 12443), 'numpy.array', 'array', (['[[50.0, 50.0]]'], {}), '([[50.0, 50.0]])\n', (12427, 12443), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12869, 12898), 'numpy.array', 'array', (['[[0.5, 0.5, 0.5, 0.5]]'], {}), '([[0.5, 0.5, 0.5, 0.5]])\n', (12874, 12898), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((6642, 6650), 'numpy.zeros', 'zeros', (['(6)'], {}), '(6)\n', (6647, 6650), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((6881, 6902), 'numpy.concatenate', 'concatenate', (['(ce, ci)'], {}), '((ce, ci))\n', (6892, 6902), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12631, 12655), 'numpy.array', 'array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (12636, 12655), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12684, 12714), 'numpy.array', 'array', (['[[1e+20, 1e+20, 1e+20]]'], {}), '([[1e+20, 1e+20, 1e+20]])\n', (12689, 12714), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((13161, 13190), 'numpy.array', 'array', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (13166, 13190), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((7235, 7256), 'numpy.concatenate', 'concatenate', (['(ce, ci)'], {}), '((ce, ci))\n', (7246, 7256), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12912, 12941), 'numpy.array', 'array', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (12917, 12941), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((12970, 13007), 'numpy.array', 'array', (['[[1e+20, 1e+20, 1e+20, 1e+20]]'], {}), '([[1e+20, 1e+20, 1e+20, 1e+20]])\n', (12975, 13007), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((13404, 13424), 'numpy.array', 'array', (['[[-1.2, 1.0]]'], {}), '([[-1.2, 1.0]])\n', (13409, 13424), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((7560, 7581), 'numpy.concatenate', 'concatenate', (['(ce, ci)'], {}), '((ce, ci))\n', (7571, 7581), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((13204, 13233), 'numpy.array', 'array', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (13209, 13233), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((13262, 13299), 'numpy.array', 'array', (['[[1e+20, 1e+20, 1e+20, 1e+20]]'], {}), '([[1e+20, 1e+20, 1e+20, 1e+20]])\n', (13267, 13299), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((13618, 13637), 'numpy.array', 'array', (['[[0.3, 0.3]]'], {}), '([[0.3, 0.3]])\n', (13623, 13637), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((2856, 2877), 'sqpdfo.sqpdfo_global_variables.get_prob_cuter', 'glob.get_prob_cuter', ([], {}), '()\n', (2875, 2877), True, 'import sqpdfo.sqpdfo_global_variables as glob\n'), ((7650, 7671), 'sqpdfo.sqpdfo_global_variables.get_prob_cuter', 'glob.get_prob_cuter', ([], {}), '()\n', (7669, 7671), True, 'import sqpdfo.sqpdfo_global_variables as glob\n'), ((13438, 13459), 'numpy.array', 'array', (['[[-5.0, -5.0]]'], {}), '([[-5.0, -5.0]])\n', (13443, 13459), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((13488, 13509), 'numpy.array', 'array', (['[[10.0, 10.0]]'], {}), '([[10.0, 10.0]])\n', (13493, 13509), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((14015, 14036), 'sqpdfo.sqpdfo_global_variables.get_prob_cuter', 'glob.get_prob_cuter', ([], {}), '()\n', (14034, 14036), True, 'import sqpdfo.sqpdfo_global_variables as glob\n'), ((13651, 13672), 'numpy.array', 'array', (['[[-5.0, -5.0]]'], {}), '([[-5.0, -5.0]])\n', (13656, 13672), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((13701, 13722), 'numpy.array', 'array', (['[[10.0, 10.0]]'], {}), '([[10.0, 10.0]])\n', (13706, 13722), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n'), ((9388, 9417), 'numpy.concatenate', 'concatenate', (['(ce_new, ci_new)'], {}), '((ce_new, ci_new))\n', (9399, 9417), False, 'from numpy import array, zeros, concatenate, zeros_like, inf\n')] |
import os
import numpy
import math
import cv2
path = '/yyong119/Documents/image'
gt_path = '/yyong119/Documents/label'
out_path = 're_image'
if not os.path.exists(out_path):
os.makedirs(out_path)
files = os.listdir(path)
files.sort()
files = files[:100]
for file in files:
_, basename = os.path.split(file)
if basename.lower().split('.')[-1] not in ['jpg', 'png', 'JPG', 'PNG']:
continue
stem, ext = os.path.splitext(basename)
gt_file = os.path.join(gt_path, 'gt_' + stem + '.txt')
img_path = os.path.join(path, file)
print(img_path)
img = cv2.imread(img_path)
img_size = img.shape
im_size_min = numpy.min(img_size[0:2])
im_size_max = numpy.max(img_size[0:2])
im_scale = float(600) / float(im_size_min)
if numpy.round(im_scale * im_size_max) > 1200:
im_scale = float(1200) / float(im_size_max)
re_im = cv2.resize(img, None, None, fx = im_scale, fy = im_scale, interpolation = cv2.INTER_LINEAR)
re_size = re_im.shape
cv2.imwrite(os.path.join(out_path, stem) + '.jpg', re_im)
with open(gt_file, 'r') as f:
lines = f.readlines()
for line in lines:
splitted_line = line.strip().lower().split(',')
pt_x = numpy.zeros((4, 1))
pt_y = numpy.zeros((4, 1))
pt_x[0, 0] = int(float(splitted_line[0]) / img_size[1] * re_size[1])
pt_y[0, 0] = int(float(splitted_line[1]) / img_size[0] * re_size[0])
pt_x[1, 0] = int(float(splitted_line[2]) / img_size[1] * re_size[1])
pt_y[1, 0] = int(float(splitted_line[3]) / img_size[0] * re_size[0])
pt_x[2, 0] = int(float(splitted_line[4]) / img_size[1] * re_size[1])
pt_y[2, 0] = int(float(splitted_line[5]) / img_size[0] * re_size[0])
pt_x[3, 0] = int(float(splitted_line[6]) / img_size[1] * re_size[1])
pt_y[3, 0] = int(float(splitted_line[7]) / img_size[0] * re_size[0])
ind_x = numpy.argsort(pt_x, axis=0)
pt_x = pt_x[ind_x]
pt_y = pt_y[ind_x]
if pt_y[0] < pt_y[1]:
pt1 = (pt_x[0], pt_y[0])
pt3 = (pt_x[1], pt_y[1])
else:
pt1 = (pt_x[1], pt_y[1])
pt3 = (pt_x[0], pt_y[0])
if pt_y[2] < pt_y[3]:
pt2 = (pt_x[2], pt_y[2])
pt4 = (pt_x[3], pt_y[3])
else:
pt2 = (pt_x[3], pt_y[3])
pt4 = (pt_x[2], pt_y[2])
xmin = max(int(min(pt1[0], pt2[0])), 0)
ymin = max(int(min(pt1[1], pt2[1])), 0)
xmax = min(int(max(pt2[0], pt4[0])), re_size[1] - 1)
ymax = min(int(max(pt3[1], pt4[1])), re_size[0] - 1)
width = xmax - xmin
height = ymax - ymin
# reimplement
step = 16.0
x_left = []
x_right = []
x_left.append(xmin)
x_left_start = int(math.ceil(xmin / 16.0) * 16.0)
if x_left_start == xmin:
x_left_start = xmin + 16
for i in numpy.arange(x_left_start, xmax, 16):
x_left.append(i)
x_left = numpy.array(x_left)
x_right.append(x_left_start - 1)
for i in range(1, len(x_left) - 1):
x_right.append(x_left[i] + 15)
x_right.append(xmax)
x_right = numpy.array(x_right)
idx = numpy.where(x_left == x_right)
x_left = numpy.delete(x_left, idx, axis=0)
x_right = numpy.delete(x_right, idx, axis=0)
if not os.path.exists('label_tmp'):
os.makedirs('label_tmp')
with open(os.path.join('label_tmp', stem) + '.txt', 'a') as f:
for i in range(len(x_left)):
f.writelines("text\t")
f.writelines(str(int(x_left[i])))
f.writelines("\t")
f.writelines(str(int(ymin)))
f.writelines("\t")
f.writelines(str(int(x_right[i])))
f.writelines("\t")
f.writelines(str(int(ymax)))
f.writelines("\n")
| [
"numpy.delete",
"os.makedirs",
"math.ceil",
"os.path.exists",
"numpy.zeros",
"numpy.argsort",
"cv2.imread",
"numpy.min",
"numpy.max",
"numpy.arange",
"os.path.splitext",
"numpy.array",
"numpy.where",
"numpy.round",
"os.path.split",
"os.path.join",
"os.listdir",
"cv2.resize"
] | [((209, 225), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (219, 225), False, 'import os\n'), ((149, 173), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (163, 173), False, 'import os\n'), ((179, 200), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (190, 200), False, 'import os\n'), ((296, 315), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (309, 315), False, 'import os\n'), ((425, 451), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (441, 451), False, 'import os\n'), ((466, 510), 'os.path.join', 'os.path.join', (['gt_path', "('gt_' + stem + '.txt')"], {}), "(gt_path, 'gt_' + stem + '.txt')\n", (478, 510), False, 'import os\n'), ((526, 550), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (538, 550), False, 'import os\n'), ((581, 601), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (591, 601), False, 'import cv2\n'), ((645, 669), 'numpy.min', 'numpy.min', (['img_size[0:2]'], {}), '(img_size[0:2])\n', (654, 669), False, 'import numpy\n'), ((688, 712), 'numpy.max', 'numpy.max', (['img_size[0:2]'], {}), '(img_size[0:2])\n', (697, 712), False, 'import numpy\n'), ((876, 966), 'cv2.resize', 'cv2.resize', (['img', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(img, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.\n INTER_LINEAR)\n', (886, 966), False, 'import cv2\n'), ((768, 803), 'numpy.round', 'numpy.round', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (779, 803), False, 'import numpy\n'), ((1215, 1234), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (1226, 1234), False, 'import numpy\n'), ((1250, 1269), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (1261, 1269), False, 'import numpy\n'), ((1903, 1930), 'numpy.argsort', 'numpy.argsort', (['pt_x'], {'axis': '(0)'}), '(pt_x, axis=0)\n', (1916, 1930), False, 'import numpy\n'), ((2905, 2941), 'numpy.arange', 'numpy.arange', (['x_left_start', 'xmax', '(16)'], {}), '(x_left_start, xmax, 16)\n', (2917, 2941), False, 'import numpy\n'), ((2989, 3008), 'numpy.array', 'numpy.array', (['x_left'], {}), '(x_left)\n', (3000, 3008), False, 'import numpy\n'), ((3185, 3205), 'numpy.array', 'numpy.array', (['x_right'], {}), '(x_right)\n', (3196, 3205), False, 'import numpy\n'), ((3221, 3251), 'numpy.where', 'numpy.where', (['(x_left == x_right)'], {}), '(x_left == x_right)\n', (3232, 3251), False, 'import numpy\n'), ((3269, 3302), 'numpy.delete', 'numpy.delete', (['x_left', 'idx'], {'axis': '(0)'}), '(x_left, idx, axis=0)\n', (3281, 3302), False, 'import numpy\n'), ((3321, 3355), 'numpy.delete', 'numpy.delete', (['x_right', 'idx'], {'axis': '(0)'}), '(x_right, idx, axis=0)\n', (3333, 3355), False, 'import numpy\n'), ((1010, 1038), 'os.path.join', 'os.path.join', (['out_path', 'stem'], {}), '(out_path, stem)\n', (1022, 1038), False, 'import os\n'), ((3372, 3399), 'os.path.exists', 'os.path.exists', (['"""label_tmp"""'], {}), "('label_tmp')\n", (3386, 3399), False, 'import os\n'), ((3413, 3437), 'os.makedirs', 'os.makedirs', (['"""label_tmp"""'], {}), "('label_tmp')\n", (3424, 3437), False, 'import os\n'), ((2787, 2809), 'math.ceil', 'math.ceil', (['(xmin / 16.0)'], {}), '(xmin / 16.0)\n', (2796, 2809), False, 'import math\n'), ((3456, 3487), 'os.path.join', 'os.path.join', (['"""label_tmp"""', 'stem'], {}), "('label_tmp', stem)\n", (3468, 3487), False, 'import os\n')] |
import helpers
import numpy as np
import scipy
import scipy.sparse
import sys
import math
import configparser as cp
import Visualize as vis
import os
def findclusters(U,cluster_center_vectors):
legit = 0
cdict = [[] for i in range(len(cluster_center_vectors))]
no_of_nodes = U.shape[0]
for ii in range(no_of_nodes):
mxsim = 0
cluster_c = np.random.randint(len(cluster_center_vectors))
for jj in range(len(cluster_center_vectors)):
dp = np.sum(U[ii]*cluster_center_vectors[jj])
m1 = np.sum(U[ii]*U[ii])
m2 = np.sum(cluster_center_vectors[jj]*cluster_center_vectors[jj])
tl = dp/(np.sqrt(m1*m2)+math.pow(10,-6))
if tl>mxsim:
mxsim = tl
cluster_c = jj
cdict[cluster_c].append(ii)
return cdict
def updatecenters(U,cdict):
new_cluster_centers = []
for cd in cdict:
ncenter = np.full([U.shape[1]],0.0)
for vv in cd:
ncenter += U[vv]
ncenter /= len(cd)
new_cluster_centers.append(ncenter)
return new_cluster_centers
def sort_closest_first(U,clist,cluster_center_vectors):
rlist = []
for ii in range(len(cluster_center_vectors)):
cscore = []
for jj in range(len(clist[ii])):
dp = np.sum(U[clist[ii][jj]]*cluster_center_vectors[ii])
m1 = np.sum(U[clist[ii][jj]]*U[clist[ii][jj]])
m2 = np.sum(cluster_center_vectors[ii]*cluster_center_vectors[ii])
tl = dp/(np.sqrt(m1*m2)+math.pow(10,-6))
cscore.append(tl)
rt = np.flip(np.argsort(cscore))
for jj in range(len(rt)):
t = clist[ii][rt[jj]]
rt[jj] = t
rlist.append(rt)
return rlist
def main():
ip = input('Please enter the nearest neighbors k -- ')
if sys.argv[1]=='U':
if ip=='5':
B = (scipy.sparse.load_npz('./data/B_5.npz')).todense()
C = (scipy.sparse.load_npz('./data/C_5.npz')).todense()
elif ip == '3':
B = (scipy.sparse.load_npz('./data/B_3.npz')).todense()
C = (scipy.sparse.load_npz('./data/C_3.npz')).todense()
else:
B = (scipy.sparse.load_npz('./data/B_10.npz')).todense()
C = (scipy.sparse.load_npz('./data/C_10.npz')).todense()
B = np.array(B)
C = np.array(C)
U = B+C
else:
if ip=='5':
W = (scipy.sparse.load_npz('./data/W_5.npz')).todense()
elif ip=='3':
W = (scipy.sparse.load_npz('./data/W_3.npz')).todense()
else:
W = (scipy.sparse.load_npz('./data/W_10.npz')).todense()
W = np.array(W)
U = W
fname = 'IdxIdsMap.txt'
idx2id = Helper.load(fname)
cluster_centers = []
cluster_center_vectors = []
no_of_clusters = input("Please enter the number of clusters required -- ")
no_of_clusters = int(no_of_clusters)
for ii in range(no_of_clusters):
center = np.random.randint(U.shape[0])
cluster_centers.append(center)
cluster_center_vectors.append(U[center])
its=4
clist = findclusters(U,cluster_center_vectors)
while its!=1:
cluster_center_vectors = updatecenters(U,clist)
clist = findclusters(U,cluster_center_vectors)
its-=1
rlist = sort_closest_first(U,clist,cluster_center_vectors)
for ii in range(len(rlist)):
for jj in range(len(rlist[ii])):
rlist[ii][jj] = idx2id[rlist[ii][jj]]
cluster_dict = {}
for ii in range(len(rlist)):
cluster_dict[str(ii)] = list(rlist[ii])
config = cp.ConfigParser()
config.read(os.getcwd()+'/config.ini')
dpath = config['TASK2']['devset_path']
a = True
vis.showclusters(cluster_dict,dpath)
if __name__ == '__main__':
main()
| [
"numpy.full",
"numpy.sum",
"math.pow",
"os.getcwd",
"scipy.sparse.load_npz",
"numpy.argsort",
"numpy.random.randint",
"numpy.array",
"configparser.ConfigParser",
"Visualize.showclusters",
"numpy.sqrt"
] | [((3618, 3635), 'configparser.ConfigParser', 'cp.ConfigParser', ([], {}), '()\n', (3633, 3635), True, 'import configparser as cp\n'), ((3740, 3777), 'Visualize.showclusters', 'vis.showclusters', (['cluster_dict', 'dpath'], {}), '(cluster_dict, dpath)\n', (3756, 3777), True, 'import Visualize as vis\n'), ((932, 958), 'numpy.full', 'np.full', (['[U.shape[1]]', '(0.0)'], {}), '([U.shape[1]], 0.0)\n', (939, 958), True, 'import numpy as np\n'), ((2339, 2350), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (2347, 2350), True, 'import numpy as np\n'), ((2363, 2374), 'numpy.array', 'np.array', (['C'], {}), '(C)\n', (2371, 2374), True, 'import numpy as np\n'), ((2675, 2686), 'numpy.array', 'np.array', (['W'], {}), '(W)\n', (2683, 2686), True, 'import numpy as np\n'), ((2996, 3025), 'numpy.random.randint', 'np.random.randint', (['U.shape[0]'], {}), '(U.shape[0])\n', (3013, 3025), True, 'import numpy as np\n'), ((489, 531), 'numpy.sum', 'np.sum', (['(U[ii] * cluster_center_vectors[jj])'], {}), '(U[ii] * cluster_center_vectors[jj])\n', (495, 531), True, 'import numpy as np\n'), ((547, 568), 'numpy.sum', 'np.sum', (['(U[ii] * U[ii])'], {}), '(U[ii] * U[ii])\n', (553, 568), True, 'import numpy as np\n'), ((584, 647), 'numpy.sum', 'np.sum', (['(cluster_center_vectors[jj] * cluster_center_vectors[jj])'], {}), '(cluster_center_vectors[jj] * cluster_center_vectors[jj])\n', (590, 647), True, 'import numpy as np\n'), ((1319, 1372), 'numpy.sum', 'np.sum', (['(U[clist[ii][jj]] * cluster_center_vectors[ii])'], {}), '(U[clist[ii][jj]] * cluster_center_vectors[ii])\n', (1325, 1372), True, 'import numpy as np\n'), ((1388, 1431), 'numpy.sum', 'np.sum', (['(U[clist[ii][jj]] * U[clist[ii][jj]])'], {}), '(U[clist[ii][jj]] * U[clist[ii][jj]])\n', (1394, 1431), True, 'import numpy as np\n'), ((1447, 1510), 'numpy.sum', 'np.sum', (['(cluster_center_vectors[ii] * cluster_center_vectors[ii])'], {}), '(cluster_center_vectors[ii] * cluster_center_vectors[ii])\n', (1453, 1510), True, 'import numpy as np\n'), ((1613, 1631), 'numpy.argsort', 'np.argsort', (['cscore'], {}), '(cscore)\n', (1623, 1631), True, 'import numpy as np\n'), ((3652, 3663), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3661, 3663), False, 'import os\n'), ((667, 683), 'numpy.sqrt', 'np.sqrt', (['(m1 * m2)'], {}), '(m1 * m2)\n', (674, 683), True, 'import numpy as np\n'), ((682, 698), 'math.pow', 'math.pow', (['(10)', '(-6)'], {}), '(10, -6)\n', (690, 698), False, 'import math\n'), ((1530, 1546), 'numpy.sqrt', 'np.sqrt', (['(m1 * m2)'], {}), '(m1 * m2)\n', (1537, 1546), True, 'import numpy as np\n'), ((1545, 1561), 'math.pow', 'math.pow', (['(10)', '(-6)'], {}), '(10, -6)\n', (1553, 1561), False, 'import math\n'), ((1896, 1935), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/B_5.npz"""'], {}), "('./data/B_5.npz')\n", (1917, 1935), False, 'import scipy\n'), ((1964, 2003), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/C_5.npz"""'], {}), "('./data/C_5.npz')\n", (1985, 2003), False, 'import scipy\n'), ((2439, 2478), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/W_5.npz"""'], {}), "('./data/W_5.npz')\n", (2460, 2478), False, 'import scipy\n'), ((2056, 2095), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/B_3.npz"""'], {}), "('./data/B_3.npz')\n", (2077, 2095), False, 'import scipy\n'), ((2124, 2163), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/C_3.npz"""'], {}), "('./data/C_3.npz')\n", (2145, 2163), False, 'import scipy\n'), ((2206, 2246), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/B_10.npz"""'], {}), "('./data/B_10.npz')\n", (2227, 2246), False, 'import scipy\n'), ((2275, 2315), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/C_10.npz"""'], {}), "('./data/C_10.npz')\n", (2296, 2315), False, 'import scipy\n'), ((2529, 2568), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/W_3.npz"""'], {}), "('./data/W_3.npz')\n", (2550, 2568), False, 'import scipy\n'), ((2611, 2651), 'scipy.sparse.load_npz', 'scipy.sparse.load_npz', (['"""./data/W_10.npz"""'], {}), "('./data/W_10.npz')\n", (2632, 2651), False, 'import scipy\n')] |
from __future__ import division
from __future__ import print_function
import numpy as np
import argparse
import torch
import random
import torch.nn.functional as F
import torch.nn as nn
from utils import load_data
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False, help='Validate during training pass.')
parser.add_argument('--sparse', action='store_true', default=False, help='GAT with sparse version or not.')
parser.add_argument('--seed', type=int, default=72, help='Random seed.')
parser.add_argument('--epochs', type=int, default=10000, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.005, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=8, help='Number of hidden units.')
parser.add_argument('--nb_heads', type=int, default=8, help='Number of head attentions.')
parser.add_argument('--dropout', type=float, default=0.6, help='Dropout rate (1 - keep probability).')
parser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.')
parser.add_argument('--patience', type=int, default=100, help='Patience')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.mnual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_vl, idx_test = load_data()
class GraphAttentionLayer():
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain = 1.41.as_integer_ratio())
# why is this 2*out_feature, 1??
self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))
nn.init.xavier_uniform_(self.a.data,gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
# TODO understnd how attention works here
def forward(self, input, adj):
h = torch.mm(input, self.w)
N = h.size()[0]
a_input = torch.cat([h.repeat(1,N).view(N*N, -1), h.repeat(N,1)], dim=1).view(N, -1, 2*self.out_features)
e = F.leaky_relu(a_input, self.a)
zero_vec = -9e15*torch.ones_like(e) # why cna't you just use torch.zeros()???
attention = torch.where(adj> 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout,training= self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return h_prime
else:
return
class GAT(nn.Module):
def __init__(self, nfeat,nhid,nclass,dropout,alpha,nhead):
"""Denseversion of Gat."""
super(GAT, self).__init__()
self.dropout = dropout
self.attention = [GraphAttentionLayer(nfeat, nhid, dropout, alpha, concat=True) for _ in range(nhead)]
for i, attention in enumerate(self.attention):
self.add_module("attention+{}".format(i), attention )
self.out_att = GraphAttentionLayer(nhead* nfeat, nhid, dropout, alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attenions], dim=1)
x = F.dropout(x, self.dropout, training =self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=1 )
model = GAT(nfeat=features.shape[1],
nhid=args.hidden,
nclass=int(labels.mx())+1,
dropout=args.dropout,
nheads=args.np_heads,
alpha=args.alpha)
| [
"torch.ones_like",
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.load_data",
"torch.where",
"torch.manual_seed",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.dropout",
"torch.mm",
"torch.cuda.mnual_seed",
"torch.nn.functional.softmax",
"random.seed",
"torch.cuda.is_availabl... | [((228, 253), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (251, 253), False, 'import argparse\n'), ((1463, 1485), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1474, 1485), False, 'import random\n'), ((1486, 1511), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1500, 1511), True, 'import numpy as np\n'), ((1512, 1540), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1529, 1540), False, 'import torch\n'), ((1659, 1670), 'utils.load_data', 'load_data', ([], {}), '()\n', (1668, 1670), False, 'from utils import load_data\n'), ((1436, 1461), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1459, 1461), False, 'import torch\n'), ((1559, 1591), 'torch.cuda.mnual_seed', 'torch.cuda.mnual_seed', (['args.seed'], {}), '(args.seed)\n', (1580, 1591), False, 'import torch\n'), ((2276, 2324), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.a.data'], {'gain': '(1.414)'}), '(self.a.data, gain=1.414)\n', (2299, 2324), True, 'import torch.nn as nn\n'), ((2350, 2374), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['self.alpha'], {}), '(self.alpha)\n', (2362, 2374), True, 'import torch.nn as nn\n'), ((2469, 2492), 'torch.mm', 'torch.mm', (['input', 'self.w'], {}), '(input, self.w)\n', (2477, 2492), False, 'import torch\n'), ((2644, 2673), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['a_input', 'self.a'], {}), '(a_input, self.a)\n', (2656, 2673), True, 'import torch.nn.functional as F\n'), ((2781, 2814), 'torch.where', 'torch.where', (['(adj > 0)', 'e', 'zero_vec'], {}), '(adj > 0, e, zero_vec)\n', (2792, 2814), False, 'import torch\n'), ((2834, 2861), 'torch.nn.functional.softmax', 'F.softmax', (['attention'], {'dim': '(1)'}), '(attention, dim=1)\n', (2843, 2861), True, 'import torch.nn.functional as F\n'), ((2882, 2940), 'torch.nn.functional.dropout', 'F.dropout', (['attention', 'self.dropout'], {'training': 'self.training'}), '(attention, self.dropout, training=self.training)\n', (2891, 2940), True, 'import torch.nn.functional as F\n'), ((2959, 2985), 'torch.matmul', 'torch.matmul', (['attention', 'h'], {}), '(attention, h)\n', (2971, 2985), False, 'import torch\n'), ((3631, 3681), 'torch.nn.functional.dropout', 'F.dropout', (['x', 'self.dropout'], {'training': 'self.training'}), '(x, self.dropout, training=self.training)\n', (3640, 3681), True, 'import torch.nn.functional as F\n'), ((3764, 3814), 'torch.nn.functional.dropout', 'F.dropout', (['x', 'self.dropout'], {'training': 'self.training'}), '(x, self.dropout, training=self.training)\n', (3773, 3814), True, 'import torch.nn.functional as F\n'), ((3871, 3894), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (3884, 3894), True, 'import torch.nn.functional as F\n'), ((2033, 2078), 'torch.zeros', 'torch.zeros', ([], {'size': '(in_features, out_features)'}), '(size=(in_features, out_features))\n', (2044, 2078), False, 'import torch\n'), ((2229, 2268), 'torch.zeros', 'torch.zeros', ([], {'size': '(2 * out_features, 1)'}), '(size=(2 * out_features, 1))\n', (2240, 2268), False, 'import torch\n'), ((2700, 2718), 'torch.ones_like', 'torch.ones_like', (['e'], {}), '(e)\n', (2715, 2718), False, 'import torch\n')] |
import os
import pickle
import numpy as np
from tqdm import tqdm
from useful_wsi import open_image, get_whole_image
from train_umap import normalise_csv, drop_na_axis
def options_parser():
import argparse
parser = argparse.ArgumentParser(
description='Creating heatmap')
parser.add_argument('--umap_transform', required=False,
metavar="str", type=str,
help='folder for umap transform')
parser.add_argument('--resolution', required=False,
metavar="int", type=int,
help='resolution of heatmap')
parser.add_argument('--path', required=True,
metavar="str", type=str,
help='path to tiff files')
parser.add_argument('--table', required=False,
metavar="str", type=str,
help='path to table file')
parser.add_argument('--type', required=True,
metavar="str", type=str,
help='U2MAP or U3MAP')
args = parser.parse_args()
return args
def load_umap_transform(name):
"""
Function to load the necessary models.
A folder can contain two models (PCA+UMAP) or one (UMAP)
Parameters
----------
name: string,
folder name where to find the models.
Returns
-------
A function to apply to a table, the function is:
- the sequential application of PCA+UMAP
- application of UMAP
"""
files = os.listdir(name)
if len(files) == 2:
pca = pickle.load(open(os.path.join(name, files[0]), 'rb'))
umap = pickle.load(open(os.path.join(name, files[1]), 'rb'))
def predict_pca(z):
z = pca.transform(z)
pred = umap.transform(z)
return pred
return predict_pca
else:
umap = pickle.load(open(os.path.join(name, files[0]), 'rb'))
def predict(z):
try:
pred = umap.transform(z)
except:
pred = umap.transform(z) # really weird if this works.
return pred
return predict
def f(slide, line, shape_slide_level):
"""
Modified function from package useful_wsi.
Instead of a taken a point, this function takes a table line.
Parameters
----------
slide : wsi object,
openslide object from which we extract.
line : dictionnary like object,
this line has two options: Centroid_x and Centroid_y
corresponding to a point_l at a given level dimension.
level : int,
level of the associated point.
shape_slide_level : tuple of integer,
corresponding to the size of the slide at level 'level'.
Returns
-------
Returns the coordinates at a resolution level
of a given nuclei whose coordinates are at a level 0.
"""
x_0, y_0 = (line["Centroid_x"], line["Centroid_y"])
size_x_l = shape_slide_level[1]
size_y_l = shape_slide_level[0]
size_x_0 = float(slide.level_dimensions[0][0])
size_y_0 = float(slide.level_dimensions[0][1])
x_l = x_0 * size_x_l / size_x_0
y_l = y_0 * size_y_l / size_y_0
point_l = (round(x_l), round(y_l))
return point_l
def from_cell_to_heatmap(slide, trans, cell_table, filter_out="LBP", level=7, n_comp=2):
"""
Parameters
----------
slide : wsi object,
openslide object from which we extract.
trans : function,
infers the new coordinates of a given point. It is or:
- the sequential application of PCA+UMAP
- application of UMAP.
cell_table : pandas dataframe,
patient table, where each line corresponds to a nucleus.
filter_out: str,
String pattern to filter out columns from the feature table, in 'glob' form.
If pattern in the feature name, exclude feature.
level : int,
level of the resulting heatmap.
n_comp : int,
number of components after UMAP projection.
Returns
-------
Returns a heatmap with the projected components of a given slide
at a given resolution.
"""
slide = open_image(slide)
f1, f2 = normalise_csv(cell_table)
feat = f1.columns
feat = [el for el in feat if filter_out not in el]
f1 = f1[feat]
f1 = drop_na_axis(f1)
standard_embedding = trans(f1)
x = standard_embedding[:, 0]
y = standard_embedding[:, 1]
if level < slide.level_count:
# if the pyramid scheme has a the png at the correct resolution
shape_slide_level = get_whole_image(slide, level=level, numpy=True).shape
within_slide_levels = True
else:
# if the pyramid scheme doesn't have the png at the correct resolution
within_slide_levels = False
high_pyramid_level = slide.level_count - 1
power = level - high_pyramid_level
shape_slide_level = get_whole_image(slide, level=high_pyramid_level, numpy=True).shape
shape_slide_level = tuple((int(shape_slide_level[0] / (2 ** power)),
int(shape_slide_level[1] / (2 ** power)),
3))
xshape, yshape = shape_slide_level[0:2]
f2["coord_l"] = f2.apply(lambda row: f(slide, row, shape_slide_level), axis=1)
heatmap = np.zeros(shape=(xshape, yshape, 3))
f1 = f1.reset_index(drop=True)
f2 = f2.reset_index(drop=True)
for coord_l, group in tqdm(f2.groupby("coord_l")):
y_l, x_l = [int(el) for el in coord_l]
heatmap[x_l, y_l, 0] = np.mean(x[group.index])
heatmap[x_l, y_l, 1] = np.mean(y[group.index])
if n_comp == 2:
count = group.shape[0]
heatmap[x_l, y_l, 2] = count
else:
z = standard_embedding[:, 2]
heatmap[x_l, y_l, 2] = np.mean(z[group.index])
return heatmap
def save_heat_map(name, arr):
"""
Save heatmaps png.
Parameters
----------
name : string,
name to save the numpy array to.
arr : numpy array.
"""
np.save(name, arr)
def main():
options = options_parser()
n_comp = int(options.umap_transform.split('MAP')[0][-1])
level = options.resolution
umap_transform = load_umap_transform(options.umap_transform)
cell_table = options.table
slide = os.path.join(options.path, os.path.basename(options.table))
slide = slide.split('.cs')[0] + ".tiff"
heat_map_3D = from_cell_to_heatmap(slide, umap_transform, cell_table, level=level, n_comp=n_comp)
num = os.path.basename(cell_table).split('.')[0]
num = os.path.basename(options.table).split('.')[0]
name = "{}.npy".format(num)
save_heat_map(name, heat_map_3D)
if __name__ == '__main__':
main()
| [
"numpy.save",
"argparse.ArgumentParser",
"os.path.basename",
"numpy.zeros",
"useful_wsi.open_image",
"train_umap.drop_na_axis",
"useful_wsi.get_whole_image",
"numpy.mean",
"os.path.join",
"os.listdir",
"train_umap.normalise_csv"
] | [((226, 281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Creating heatmap"""'}), "(description='Creating heatmap')\n", (249, 281), False, 'import argparse\n'), ((1517, 1533), 'os.listdir', 'os.listdir', (['name'], {}), '(name)\n', (1527, 1533), False, 'import os\n'), ((4145, 4162), 'useful_wsi.open_image', 'open_image', (['slide'], {}), '(slide)\n', (4155, 4162), False, 'from useful_wsi import open_image, get_whole_image\n'), ((4176, 4201), 'train_umap.normalise_csv', 'normalise_csv', (['cell_table'], {}), '(cell_table)\n', (4189, 4201), False, 'from train_umap import normalise_csv, drop_na_axis\n'), ((4306, 4322), 'train_umap.drop_na_axis', 'drop_na_axis', (['f1'], {}), '(f1)\n', (4318, 4322), False, 'from train_umap import normalise_csv, drop_na_axis\n'), ((5302, 5337), 'numpy.zeros', 'np.zeros', ([], {'shape': '(xshape, yshape, 3)'}), '(shape=(xshape, yshape, 3))\n', (5310, 5337), True, 'import numpy as np\n'), ((6041, 6059), 'numpy.save', 'np.save', (['name', 'arr'], {}), '(name, arr)\n', (6048, 6059), True, 'import numpy as np\n'), ((5542, 5565), 'numpy.mean', 'np.mean', (['x[group.index]'], {}), '(x[group.index])\n', (5549, 5565), True, 'import numpy as np\n'), ((5597, 5620), 'numpy.mean', 'np.mean', (['y[group.index]'], {}), '(y[group.index])\n', (5604, 5620), True, 'import numpy as np\n'), ((6336, 6367), 'os.path.basename', 'os.path.basename', (['options.table'], {}), '(options.table)\n', (6352, 6367), False, 'import os\n'), ((4563, 4610), 'useful_wsi.get_whole_image', 'get_whole_image', (['slide'], {'level': 'level', 'numpy': '(True)'}), '(slide, level=level, numpy=True)\n', (4578, 4610), False, 'from useful_wsi import open_image, get_whole_image\n'), ((4901, 4961), 'useful_wsi.get_whole_image', 'get_whole_image', (['slide'], {'level': 'high_pyramid_level', 'numpy': '(True)'}), '(slide, level=high_pyramid_level, numpy=True)\n', (4916, 4961), False, 'from useful_wsi import open_image, get_whole_image\n'), ((5811, 5834), 'numpy.mean', 'np.mean', (['z[group.index]'], {}), '(z[group.index])\n', (5818, 5834), True, 'import numpy as np\n'), ((1589, 1617), 'os.path.join', 'os.path.join', (['name', 'files[0]'], {}), '(name, files[0])\n', (1601, 1617), False, 'import os\n'), ((1658, 1686), 'os.path.join', 'os.path.join', (['name', 'files[1]'], {}), '(name, files[1])\n', (1670, 1686), False, 'import os\n'), ((1886, 1914), 'os.path.join', 'os.path.join', (['name', 'files[0]'], {}), '(name, files[0])\n', (1898, 1914), False, 'import os\n'), ((6525, 6553), 'os.path.basename', 'os.path.basename', (['cell_table'], {}), '(cell_table)\n', (6541, 6553), False, 'import os\n'), ((6578, 6609), 'os.path.basename', 'os.path.basename', (['options.table'], {}), '(options.table)\n', (6594, 6609), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('./../..')
from util import myfigure
# Construct time series
noise1 = np.random.randn(20,1)
noise2 = np.random.randn(20,1)
t11 = np.vstack([np.zeros([20,1]), 20+noise1, np.zeros([20,1]), 30+noise2, np.zeros([20,1])])
t12 = np.vstack([np.zeros([20,1]), 25+noise1, np.zeros([20,1]), 25+noise2, np.zeros([20,1])])
t21 = np.vstack([np.zeros([37,1]), np.ones([6,1]), np.zeros([12,1]), np.ones([6,1]), np.zeros([39,1])])
t22 = np.vstack([np.zeros([29,1]), np.ones([6,1]), np.zeros([32,1]), np.ones([6,1]), np.zeros([27,1])])
# Normalise
t11 = t11 - np.mean(t11)
t11 /= np.std(t11, ddof=1)
t12 = t12 - np.mean(t12)
t12 /= np.std(t12, ddof=1)
t21 = t21 - np.mean(t21)
t21 /= np.std(t21, ddof=1)
t22 = t22 - np.mean(t22)
t22 /= np.std(t22, ddof=1)
fig, ax = myfigure(nrows=1, ncols=1, fig_ratio=0.5, fig_scale=1.7)
ax.axis([0, 100, -1, 3.5])
ax.plot(t21)
ax.plot(t22, '--')
ax.plot([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig('shift_in_time.pdf')
fig, ax = myfigure(nrows=1, ncols=1, fig_ratio=0.5, fig_scale=1.7)
ax.axis([0, 100, -1, 3.5])
ax.plot(t11)
ax.plot(t12, '--')
ax.plot([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig('shift_in_value.pdf')
| [
"sys.path.append",
"util.myfigure",
"numpy.random.randn",
"numpy.std",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] | [((62, 88), 'sys.path.append', 'sys.path.append', (['"""./../.."""'], {}), "('./../..')\n", (77, 88), False, 'import sys\n'), ((150, 172), 'numpy.random.randn', 'np.random.randn', (['(20)', '(1)'], {}), '(20, 1)\n', (165, 172), True, 'import numpy as np\n'), ((181, 203), 'numpy.random.randn', 'np.random.randn', (['(20)', '(1)'], {}), '(20, 1)\n', (196, 203), True, 'import numpy as np\n'), ((646, 665), 'numpy.std', 'np.std', (['t11'], {'ddof': '(1)'}), '(t11, ddof=1)\n', (652, 665), True, 'import numpy as np\n'), ((698, 717), 'numpy.std', 'np.std', (['t12'], {'ddof': '(1)'}), '(t12, ddof=1)\n', (704, 717), True, 'import numpy as np\n'), ((750, 769), 'numpy.std', 'np.std', (['t21'], {'ddof': '(1)'}), '(t21, ddof=1)\n', (756, 769), True, 'import numpy as np\n'), ((802, 821), 'numpy.std', 'np.std', (['t22'], {'ddof': '(1)'}), '(t22, ddof=1)\n', (808, 821), True, 'import numpy as np\n'), ((833, 889), 'util.myfigure', 'myfigure', ([], {'nrows': '(1)', 'ncols': '(1)', 'fig_ratio': '(0.5)', 'fig_scale': '(1.7)'}), '(nrows=1, ncols=1, fig_ratio=0.5, fig_scale=1.7)\n', (841, 889), False, 'from util import myfigure\n'), ((1035, 1053), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1051, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1054, 1086), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""shift_in_time.pdf"""'], {}), "('shift_in_time.pdf')\n", (1065, 1086), True, 'import matplotlib.pyplot as plt\n'), ((1098, 1154), 'util.myfigure', 'myfigure', ([], {'nrows': '(1)', 'ncols': '(1)', 'fig_ratio': '(0.5)', 'fig_scale': '(1.7)'}), '(nrows=1, ncols=1, fig_ratio=0.5, fig_scale=1.7)\n', (1106, 1154), False, 'from util import myfigure\n'), ((1300, 1318), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1316, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1352), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""shift_in_value.pdf"""'], {}), "('shift_in_value.pdf')\n", (1330, 1352), True, 'import matplotlib.pyplot as plt\n'), ((626, 638), 'numpy.mean', 'np.mean', (['t11'], {}), '(t11)\n', (633, 638), True, 'import numpy as np\n'), ((678, 690), 'numpy.mean', 'np.mean', (['t12'], {}), '(t12)\n', (685, 690), True, 'import numpy as np\n'), ((730, 742), 'numpy.mean', 'np.mean', (['t21'], {}), '(t21)\n', (737, 742), True, 'import numpy as np\n'), ((782, 794), 'numpy.mean', 'np.mean', (['t22'], {}), '(t22)\n', (789, 794), True, 'import numpy as np\n'), ((220, 237), 'numpy.zeros', 'np.zeros', (['[20, 1]'], {}), '([20, 1])\n', (228, 237), True, 'import numpy as np\n'), ((249, 266), 'numpy.zeros', 'np.zeros', (['[20, 1]'], {}), '([20, 1])\n', (257, 266), True, 'import numpy as np\n'), ((278, 295), 'numpy.zeros', 'np.zeros', (['[20, 1]'], {}), '([20, 1])\n', (286, 295), True, 'import numpy as np\n'), ((314, 331), 'numpy.zeros', 'np.zeros', (['[20, 1]'], {}), '([20, 1])\n', (322, 331), True, 'import numpy as np\n'), ((343, 360), 'numpy.zeros', 'np.zeros', (['[20, 1]'], {}), '([20, 1])\n', (351, 360), True, 'import numpy as np\n'), ((372, 389), 'numpy.zeros', 'np.zeros', (['[20, 1]'], {}), '([20, 1])\n', (380, 389), True, 'import numpy as np\n'), ((409, 426), 'numpy.zeros', 'np.zeros', (['[37, 1]'], {}), '([37, 1])\n', (417, 426), True, 'import numpy as np\n'), ((427, 442), 'numpy.ones', 'np.ones', (['[6, 1]'], {}), '([6, 1])\n', (434, 442), True, 'import numpy as np\n'), ((443, 460), 'numpy.zeros', 'np.zeros', (['[12, 1]'], {}), '([12, 1])\n', (451, 460), True, 'import numpy as np\n'), ((461, 476), 'numpy.ones', 'np.ones', (['[6, 1]'], {}), '([6, 1])\n', (468, 476), True, 'import numpy as np\n'), ((477, 494), 'numpy.zeros', 'np.zeros', (['[39, 1]'], {}), '([39, 1])\n', (485, 494), True, 'import numpy as np\n'), ((513, 530), 'numpy.zeros', 'np.zeros', (['[29, 1]'], {}), '([29, 1])\n', (521, 530), True, 'import numpy as np\n'), ((531, 546), 'numpy.ones', 'np.ones', (['[6, 1]'], {}), '([6, 1])\n', (538, 546), True, 'import numpy as np\n'), ((547, 564), 'numpy.zeros', 'np.zeros', (['[32, 1]'], {}), '([32, 1])\n', (555, 564), True, 'import numpy as np\n'), ((565, 580), 'numpy.ones', 'np.ones', (['[6, 1]'], {}), '([6, 1])\n', (572, 580), True, 'import numpy as np\n'), ((581, 598), 'numpy.zeros', 'np.zeros', (['[27, 1]'], {}), '([27, 1])\n', (589, 598), True, 'import numpy as np\n')] |
# OpenCV のインポート
import cv2
import numpy as np
import time
import generate_char_img as gci
# VideoCaptureのインスタンスを作成する。
# 引数でカメラを選べれる。
cap = cv2.VideoCapture(0)
threshold = 0.52
# w, h = temp.shape[::-1]
WIDTH = 1280
HEIGHT = 720
FPS = 30
cap.set(cv2.CAP_PROP_FPS, FPS)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT)
fonts = ['msmincho.ttc','msgothic.ttc','HGRME.TTC','yumin.ttf']
print("search_char(len=1) >> ",end='')
search_char = input()
print("font_size >> ",end='')
font_size = int(input())
template_imgs = gci.gen_char_imgs(search_char,fonts,font_size)
for template_img in template_imgs:
cv2.imshow('template', template_img)
cv2.waitKey(1000)
cv2.destroyAllWindows()
w, h = font_size, font_size
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
while True:
# VideoCaptureから1フレーム読み込む
ret, frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for template_img in template_imgs:
template_img = cv2.cvtColor(template_img, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(frame_gray, template_img, cv2.TM_CCOEFF_NORMED)
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(frame, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imshow('Frame', frame)
# キー入力を1ms待って、k が27(ESC)だったらBreakする
k = cv2.waitKey(1)
if k == 27:
break
# キャプチャをリリースして、ウィンドウをすべて閉じる
cap.release()
cv2.destroyAllWindows() | [
"generate_char_img.gen_char_imgs",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.where",
"cv2.rectangle",
"cv2.destroyAllWindows",
"cv2.matchTemplate"
] | [((148, 167), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (164, 167), False, 'import cv2\n'), ((571, 619), 'generate_char_img.gen_char_imgs', 'gci.gen_char_imgs', (['search_char', 'fonts', 'font_size'], {}), '(search_char, fonts, font_size)\n', (588, 619), True, 'import generate_char_img as gci\n'), ((721, 744), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (742, 744), False, 'import cv2\n'), ((1485, 1508), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1506, 1508), False, 'import cv2\n'), ((660, 696), 'cv2.imshow', 'cv2.imshow', (['"""template"""', 'template_img'], {}), "('template', template_img)\n", (670, 696), False, 'import cv2\n'), ((702, 719), 'cv2.waitKey', 'cv2.waitKey', (['(1000)'], {}), '(1000)\n', (713, 719), False, 'import cv2\n'), ((911, 950), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (923, 950), False, 'import cv2\n'), ((1308, 1334), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (1318, 1334), False, 'import cv2\n'), ((1387, 1401), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1398, 1401), False, 'import cv2\n'), ((1019, 1065), 'cv2.cvtColor', 'cv2.cvtColor', (['template_img', 'cv2.COLOR_BGR2GRAY'], {}), '(template_img, cv2.COLOR_BGR2GRAY)\n', (1031, 1065), False, 'import cv2\n'), ((1081, 1146), 'cv2.matchTemplate', 'cv2.matchTemplate', (['frame_gray', 'template_img', 'cv2.TM_CCOEFF_NORMED'], {}), '(frame_gray, template_img, cv2.TM_CCOEFF_NORMED)\n', (1098, 1146), False, 'import cv2\n'), ((1162, 1188), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (1170, 1188), True, 'import numpy as np\n'), ((1238, 1302), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 0, 255)', '(2)'], {}), '(frame, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n', (1251, 1302), False, 'import cv2\n')] |
import math
import numpy as np
def amounts(balance):
return [ v['amount'] for k, v in balance.items() ]
def getValueEachAsset(balance):
valueList = []
for k, v in balance.items():
valueList.append(v['price'] * v['amount'])
return valueList
def getValue(balance):
return sum(getValueEachAsset(balance))
def rebalance(balance, prices):
assets = np.multiply(amounts(balance), prices)
ratios = [v['ratio'] for k, v in balance.items()]
normAssets = assets / np.abs(assets).sum()
normRatios = ratios / np.abs(ratios).sum()
sellRatios = list(map(lambda a, r: (a - r)/a if a > r else 0, normAssets, normRatios))
buyRatios = list(map(lambda a, r: (r - a)/a if r > a else 0, normAssets, normRatios))
keys = [ k for k, v in balance.items()]
sellAmounts = dict(zip(keys,
list(map(lambda a, r: math.floor(a * r), amounts(balance), sellRatios))
))
buyAmounts = dict(zip(keys,
list(map(lambda a, r: math.floor(a * r), amounts(balance), buyRatios))
))
change = 0
balanceExceptCash = dict(filter(lambda elem: elem[0] != 'cash', balance.items()))
for k, v in balanceExceptCash.items():
v['amount'] -= sellAmounts[k]
change += (v['price'] * sellAmounts[k])
for k, v in balanceExceptCash.items():
ableAmount = math.floor(change / v['price'])
amount = buyAmounts[k] if buyAmounts[k] <= ableAmount else ableAmount
v['amount'] += amount
change -= v['price'] * amount
if change > 0:
balance['cash']['amount'] += change
| [
"numpy.abs",
"math.floor"
] | [((1334, 1365), 'math.floor', 'math.floor', (["(change / v['price'])"], {}), "(change / v['price'])\n", (1344, 1365), False, 'import math\n'), ((497, 511), 'numpy.abs', 'np.abs', (['assets'], {}), '(assets)\n', (503, 511), True, 'import numpy as np\n'), ((544, 558), 'numpy.abs', 'np.abs', (['ratios'], {}), '(ratios)\n', (550, 558), True, 'import numpy as np\n'), ((855, 872), 'math.floor', 'math.floor', (['(a * r)'], {}), '(a * r)\n', (865, 872), False, 'import math\n'), ((980, 997), 'math.floor', 'math.floor', (['(a * r)'], {}), '(a * r)\n', (990, 997), False, 'import math\n')] |
from scipy.special import gamma
import numpy as np
import math
'''
We model the terminal depth as a random variable drawn i.i.d. from
a negative binomial distribution
d_i ~ NB(r, p)
We know that in a negative binomial process, there must be at least r
failures. For this reason, we can see that the largest possible value of
r to look at is the smallest d in the dataset.
'''
tds = dict()
with open('td', 'r') as td_f:
lines = [int(line) for line in td_f.readlines()]
for line in lines:
if line not in tds:
tds[line] = 1
else:
tds[line] += 1
def log_likelihood(x, r, p):
if r == 0 or p == 1 or p == 0:
return float("-inf")
return math.log(gamma(r + x) * p**r * (1 - p)**x) - math.log(gamma(r) * gamma(x + 1))
# grid search
max_r = min(tds.keys())
rs = list(range(1, max_r))
ps = np.linspace(0, 1, 50)
max_r = rs[0]
max_p = ps[0]
max_ll = float("-inf")
for r in rs:
for p in ps:
ll = 0
for key in tds:
ll += tds[key] * log_likelihood(key, r, p)
if ll > max_ll:
max_ll = ll
max_r = r
max_p = p
print("max_ll: " + str(max_ll))
print("max_r: " + str(max_r))
print("max_p: " + str(max_p))
| [
"scipy.special.gamma",
"numpy.linspace"
] | [((830, 851), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (841, 851), True, 'import numpy as np\n'), ((734, 742), 'scipy.special.gamma', 'gamma', (['r'], {}), '(r)\n', (739, 742), False, 'from scipy.special import gamma\n'), ((745, 757), 'scipy.special.gamma', 'gamma', (['(x + 1)'], {}), '(x + 1)\n', (750, 757), False, 'from scipy.special import gamma\n'), ((689, 701), 'scipy.special.gamma', 'gamma', (['(r + x)'], {}), '(r + x)\n', (694, 701), False, 'from scipy.special import gamma\n')] |
import numpy as np
import ctypes as ct
from typing import Tuple, List, Optional
from .. import mathctypes
from .. import bindingbase as bb
from .. import plib
from .indices import *
class pMatrix(ct.Structure):
_fields_ = [
('data', bb.c_float_p),
('cols', bb.c_int),
('rows', bb.c_int)
]
pMatrix_p = ct.POINTER(pMatrix)
plib.p_matrix_kill.argtypes = [pMatrix_p]
def p_matrix_valid(self: pMatrix):
return self.data is not None and self.cols > 0 and self.rows > 0
class NpMatrix(np.ndarray):
def __new__(cls, matrix: pMatrix):
shape = matrix.rows, matrix.cols
# create a numpy array from a ct pointer
arr = np.ctypeslib.as_array(matrix.data, shape=shape)
# create the NumpyCloud and set p_cloud
res = super(NpMatrix, cls).__new__(cls, shape=arr.shape, dtype=np.float32, buffer=arr)
res.p_matrix = matrix
return res
def __array_finalize__(self, obj):
# in the creation process of __new__, so p_matrix will be set in new to the real matrix
if obj is None:
return
# view, so set to None (views del shouldn't kill it)
self.p_matrix = None
def __del__(self):
# only kill, if its the real matrix and not a view
if self.p_matrix is not None:
plib.p_matrix_kill(bb.ref(self.p_matrix))
def cast_from_pMatrix(data: pMatrix) -> NpMatrix:
if not p_matrix_valid(data):
raise RuntimeError("cast_from_pMatrix failed, matrix is not valid")
return NpMatrix(data)
def cast_into_pMatrix(data: np.ndarray) -> pMatrix:
if data.dtype != np.float32:
raise RuntimeError('cast_np_pMatrix failed: must be float32')
if data.ndim != 2:
raise RuntimeError('cast_np_pMatrix failed: must be a matrix')
rows = data.shape[0]
cols = data.shape[1]
return pMatrix(data.ctypes.data_as(bb.c_float_p), cols, rows)
def cast_into_pMatrix_p(data: Optional[np.ndarray]) -> Optional[pMatrix_p]:
if data is None or data.size == 0:
return None
return ct.pointer(cast_into_pMatrix(data))
# /** Prints the whole matrix data to stdout */
# void p_matrix_print(pMatrix self);
plib.p_matrix_print.argtypes = [pMatrix]
def matrix_print(self: np.ndarray):
'''
Prints the whole matrix data to stdout
'''
plib.p_matrix_print(cast_into_pMatrix(self))
| [
"numpy.ctypeslib.as_array",
"ctypes.POINTER"
] | [((340, 359), 'ctypes.POINTER', 'ct.POINTER', (['pMatrix'], {}), '(pMatrix)\n', (350, 359), True, 'import ctypes as ct\n'), ((682, 729), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['matrix.data'], {'shape': 'shape'}), '(matrix.data, shape=shape)\n', (703, 729), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: 代码医生工作室
@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)
@来源: <PyTorch深度学习和图神经网络(卷 1)——基础知识>配套代码
@配套代码技术支持:bbs.aianaconda.com
Created on Sun Nov 3 15:36:39 2019
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from scipy import stats
import pandas as pd
titanic_data = pd.read_csv("titanic3.csv")
print(titanic_data.columns )
#用哑变量将指定字段转成one-hot
titanic_data = pd.concat([titanic_data,
pd.get_dummies(titanic_data['sex']),
pd.get_dummies(titanic_data['embarked'],prefix="embark"),
pd.get_dummies(titanic_data['pclass'],prefix="class")], axis=1)
print(titanic_data.columns )
print(titanic_data['sex'])
print(titanic_data['female'])
#处理None值
titanic_data["age"] = titanic_data["age"].fillna(titanic_data["age"].mean())
titanic_data["fare"] = titanic_data["fare"].fillna(titanic_data["fare"].mean())#乘客票价
#删去无用的列
titanic_data = titanic_data.drop(['name','ticket','cabin','boat','body','home.dest','sex','embarked','pclass'], axis=1)
print(titanic_data.columns )
#
####################################
#分离样本和标签
labels = titanic_data["survived"].to_numpy()
titanic_data = titanic_data.drop(['survived'], axis=1)
data = titanic_data.to_numpy()
#样本的属性名称
feature_names = list(titanic_data.columns)
#将样本分为训练和测试两部分
np.random.seed(10)#设置种子,保证每次运行所分的样本一致
train_indices = np.random.choice(len(labels), int(0.7*len(labels)), replace=False)
test_indices = list(set(range(len(labels))) - set(train_indices))
train_features = data[train_indices]
train_labels = labels[train_indices]
test_features = data[test_indices]
test_labels = labels[test_indices]
len(test_labels)#393
###########################################
class Mish(nn.Module):#Mish激活函数
def __init__(self):
super().__init__()
print("Mish activation loaded...")
def forward(self,x):
x = x * (torch.tanh(F.softplus(x)))
return x
torch.manual_seed(0) #设置随机种子
class ThreelinearModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(12, 12)
self.mish1 = Mish()
self.linear2 = nn.Linear(12, 8)
self.mish2 = Mish()
self.linear3 = nn.Linear(8, 2)
self.softmax = nn.Softmax(dim=1)
self.criterion = nn.CrossEntropyLoss() #定义交叉熵函数
def forward(self, x): #定义一个全连接网络
lin1_out = self.linear1(x)
out1 = self.mish1(lin1_out)
out2 = self.mish2(self.linear2(out1))
return self.softmax(self.linear3(out2))
def getloss(self,x,y): #实现LogicNet类的损失值计算接口
y_pred = self.forward(x)
loss = self.criterion(y_pred,y)#计算损失值得交叉熵
return loss
##############################
net = ThreelinearModel()
num_epochs = 200
optimizer = torch.optim.Adam(net.parameters(), lr=0.04)
input_tensor = torch.from_numpy(train_features).type(torch.FloatTensor)
label_tensor = torch.from_numpy(train_labels)
losses = []#定义列表,用于接收每一步的损失值
for epoch in range(num_epochs):
loss = net.getloss(input_tensor,label_tensor)
losses.append(loss.item())
optimizer.zero_grad()#清空之前的梯度
loss.backward()#反向传播损失值
optimizer.step()#更新参数
if epoch % 20 == 0:
print ('Epoch {}/{} => Loss: {:.2f}'.format(epoch+1, num_epochs, loss.item()))
os.makedirs('models', exist_ok=True)
torch.save(net.state_dict(), 'models/titanic_model.pt')
from code_02_moons_fun import plot_losses
plot_losses(losses)
#输出训练结果
out_probs = net(input_tensor).detach().numpy()
out_classes = np.argmax(out_probs, axis=1)
print("Train Accuracy:", sum(out_classes == train_labels) / len(train_labels))
#测试模型
test_input_tensor = torch.from_numpy(test_features).type(torch.FloatTensor)
out_probs = net(test_input_tensor).detach().numpy()
out_classes = np.argmax(out_probs, axis=1)
print("Test Accuracy:", sum(out_classes == test_labels) / len(test_labels))
#####################################
| [
"numpy.random.seed",
"os.makedirs",
"numpy.argmax",
"pandas.read_csv",
"torch.manual_seed",
"pandas.get_dummies",
"torch.nn.CrossEntropyLoss",
"torch.nn.Softmax",
"code_02_moons_fun.plot_losses",
"torch.nn.Linear",
"torch.nn.functional.softplus",
"torch.from_numpy"
] | [((376, 403), 'pandas.read_csv', 'pd.read_csv', (['"""titanic3.csv"""'], {}), "('titanic3.csv')\n", (387, 403), True, 'import pandas as pd\n'), ((1441, 1459), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (1455, 1459), True, 'import numpy as np\n'), ((2071, 2091), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2088, 2091), False, 'import torch\n'), ((3092, 3122), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (3108, 3122), False, 'import torch\n'), ((3481, 3517), 'os.makedirs', 'os.makedirs', (['"""models"""'], {'exist_ok': '(True)'}), "('models', exist_ok=True)\n", (3492, 3517), False, 'import os\n'), ((3625, 3644), 'code_02_moons_fun.plot_losses', 'plot_losses', (['losses'], {}), '(losses)\n', (3636, 3644), False, 'from code_02_moons_fun import plot_losses\n'), ((3719, 3747), 'numpy.argmax', 'np.argmax', (['out_probs'], {'axis': '(1)'}), '(out_probs, axis=1)\n', (3728, 3747), True, 'import numpy as np\n'), ((3982, 4010), 'numpy.argmax', 'np.argmax', (['out_probs'], {'axis': '(1)'}), '(out_probs, axis=1)\n', (3991, 4010), True, 'import numpy as np\n'), ((531, 566), 'pandas.get_dummies', 'pd.get_dummies', (["titanic_data['sex']"], {}), "(titanic_data['sex'])\n", (545, 566), True, 'import pandas as pd\n'), ((595, 652), 'pandas.get_dummies', 'pd.get_dummies', (["titanic_data['embarked']"], {'prefix': '"""embark"""'}), "(titanic_data['embarked'], prefix='embark')\n", (609, 652), True, 'import pandas as pd\n'), ((680, 734), 'pandas.get_dummies', 'pd.get_dummies', (["titanic_data['pclass']"], {'prefix': '"""class"""'}), "(titanic_data['pclass'], prefix='class')\n", (694, 734), True, 'import pandas as pd\n'), ((2216, 2233), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(12)'], {}), '(12, 12)\n', (2225, 2233), True, 'import torch.nn as nn\n'), ((2287, 2303), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(8)'], {}), '(12, 8)\n', (2296, 2303), True, 'import torch.nn as nn\n'), ((2357, 2372), 'torch.nn.Linear', 'nn.Linear', (['(8)', '(2)'], {}), '(8, 2)\n', (2366, 2372), True, 'import torch.nn as nn\n'), ((2397, 2414), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2407, 2414), True, 'import torch.nn as nn\n'), ((2441, 2462), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2460, 2462), True, 'import torch.nn as nn\n'), ((3019, 3051), 'torch.from_numpy', 'torch.from_numpy', (['train_features'], {}), '(train_features)\n', (3035, 3051), False, 'import torch\n'), ((3858, 3889), 'torch.from_numpy', 'torch.from_numpy', (['test_features'], {}), '(test_features)\n', (3874, 3889), False, 'import torch\n'), ((2032, 2045), 'torch.nn.functional.softplus', 'F.softplus', (['x'], {}), '(x)\n', (2042, 2045), True, 'import torch.nn.functional as F\n')] |
import numpy as np
import inspect
import os
LOCATION = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
import sys
sys.path.insert(0, LOCATION)
from hmf.transfer import Transfer
from hmf.transfer_models import EH_BAO
def rms(a):
print(a)
print("RMS: ", np.sqrt(np.mean(np.square(a))))
return np.sqrt(np.mean(np.square(a)))
def check_close(t, t2, fit):
t.update(transfer_model=fit)
assert np.mean(np.abs((t.power - t2.power) / t.power)) < 1
def check_update(t, t2, k, v):
t.update(**{k:v})
assert np.mean(np.abs((t.power - t2.power) / t.power)) < 1 and np.mean(np.abs((t.power - t2.power) / t.power)) > 1e-6
def test_updates():
t = Transfer()
t2 = Transfer()
for k, v in {"z":0.1,
"sigma_8":0.82,
"n":0.95,
"cosmo_params":{"H0":68.0}}.items():
yield check_update, t, t2, k, v
def test_halofit():
t = Transfer(lnk_min=-20, lnk_max=20, dlnk=0.05, transfer_model="EH")
print(EH_BAO._defaults)
print("in test_transfer, params are: ", t.transfer_params)
assert np.isclose(t.power[0],t.nonlinear_power[0])
assert 5 * t.power[-1] < t.nonlinear_power[-1]
def test_ehnobao():
t = Transfer(transfer_model="EH")
tnobao = Transfer(transfer_model="EH_NoBAO")
assert np.isclose(t._unnormalised_lnT[0], tnobao._unnormalised_lnT[0],rtol=1e-5)
def test_bondefs():
t = Transfer(transfer_model="BondEfs")
print(np.exp(t._unnormalised_lnT))
assert np.isclose(np.exp(t._unnormalised_lnT[0]),1,rtol=1e-5)
# Following test is too slow... and would need to be updated whenever CAMB is updated...
# def test_data():
# cp = camb.CAMBparams()
# cp.set_matter_power(kmax=100.)
# t = Transfer(cosmo_model=LambdaCDM(Om0=0.3, Ode0=0.7, H0=70.0, Ob0=0.05), sigma_8=0.8,
# n=1, transfer_params={"camb_params":cp},
# lnk_min=np.log(1e-11), lnk_max=np.log(1e11))
# tdata = np.genfromtxt(LOCATION + "/data/transfer_for_hmf_tests.dat")
# pdata = np.genfromtxt(LOCATION + "/data/power_for_hmf_tests.dat")
# #assert rms(t._unnormalised_lnT - np.log(tdata[:, 1])) < 0.05 # Does better than 0.001 on my system...
# diff = t.power - pdata[:, 1]
# #print(t._unnormalised_lnT[400], t._unnormalised_power[400], t._power0[400])
# assert rms(t.power - pdata[:, 1]) < 0.001
| [
"numpy.abs",
"numpy.square",
"sys.path.insert",
"numpy.isclose",
"numpy.exp",
"inspect.currentframe",
"hmf.transfer.Transfer"
] | [((140, 168), 'sys.path.insert', 'sys.path.insert', (['(0)', 'LOCATION'], {}), '(0, LOCATION)\n', (155, 168), False, 'import sys\n'), ((692, 702), 'hmf.transfer.Transfer', 'Transfer', ([], {}), '()\n', (700, 702), False, 'from hmf.transfer import Transfer\n'), ((712, 722), 'hmf.transfer.Transfer', 'Transfer', ([], {}), '()\n', (720, 722), False, 'from hmf.transfer import Transfer\n'), ((929, 994), 'hmf.transfer.Transfer', 'Transfer', ([], {'lnk_min': '(-20)', 'lnk_max': '(20)', 'dlnk': '(0.05)', 'transfer_model': '"""EH"""'}), "(lnk_min=-20, lnk_max=20, dlnk=0.05, transfer_model='EH')\n", (937, 994), False, 'from hmf.transfer import Transfer\n'), ((1097, 1141), 'numpy.isclose', 'np.isclose', (['t.power[0]', 't.nonlinear_power[0]'], {}), '(t.power[0], t.nonlinear_power[0])\n', (1107, 1141), True, 'import numpy as np\n'), ((1221, 1250), 'hmf.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""EH"""'}), "(transfer_model='EH')\n", (1229, 1250), False, 'from hmf.transfer import Transfer\n'), ((1264, 1299), 'hmf.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""EH_NoBAO"""'}), "(transfer_model='EH_NoBAO')\n", (1272, 1299), False, 'from hmf.transfer import Transfer\n'), ((1312, 1387), 'numpy.isclose', 'np.isclose', (['t._unnormalised_lnT[0]', 'tnobao._unnormalised_lnT[0]'], {'rtol': '(1e-05)'}), '(t._unnormalised_lnT[0], tnobao._unnormalised_lnT[0], rtol=1e-05)\n', (1322, 1387), True, 'import numpy as np\n'), ((1415, 1449), 'hmf.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""BondEfs"""'}), "(transfer_model='BondEfs')\n", (1423, 1449), False, 'from hmf.transfer import Transfer\n'), ((1460, 1487), 'numpy.exp', 'np.exp', (['t._unnormalised_lnT'], {}), '(t._unnormalised_lnT)\n', (1466, 1487), True, 'import numpy as np\n'), ((1511, 1541), 'numpy.exp', 'np.exp', (['t._unnormalised_lnT[0]'], {}), '(t._unnormalised_lnT[0])\n', (1517, 1541), True, 'import numpy as np\n'), ((103, 125), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (123, 125), False, 'import inspect\n'), ((346, 358), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (355, 358), True, 'import numpy as np\n'), ((443, 481), 'numpy.abs', 'np.abs', (['((t.power - t2.power) / t.power)'], {}), '((t.power - t2.power) / t.power)\n', (449, 481), True, 'import numpy as np\n'), ((303, 315), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (312, 315), True, 'import numpy as np\n'), ((560, 598), 'numpy.abs', 'np.abs', (['((t.power - t2.power) / t.power)'], {}), '((t.power - t2.power) / t.power)\n', (566, 598), True, 'import numpy as np\n'), ((616, 654), 'numpy.abs', 'np.abs', (['((t.power - t2.power) / t.power)'], {}), '((t.power - t2.power) / t.power)\n', (622, 654), True, 'import numpy as np\n')] |
#!/usr/bin/python
# encoding: utf8
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def unison_shuffled(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return zip(a[p], b[p])
def non_monotone(x, w, theta):
wx = np.dot(w.T, x) - theta
exp = np.exp(-0.5 * np.square(wx))
return 2 * exp - 1
# load data
X = np.genfromtxt("data/xor-X.csv", dtype=float, delimiter=',').T
Y = np.genfromtxt("data/xor-y.csv", dtype=float, delimiter=',')
n_examples = X.shape[0]
# initialize weights, and theta, and learning rate
theta = np.random.uniform(low=-0.99, high=0.99)
w = np.random.uniform(low=-0.99, high=0.99, size=(2))
eta_w = 0.005
eta_theta = 0.001
for e in range(30):
upd_theta = 0
upd_weight = 0
# random batch
for x, y in unison_shuffled(X, Y):
# for i in np.arange(n_examples):
yhat = non_monotone(x, w, theta)
dis = yhat - y
wx = np.dot(w.T, x) - theta
exp = np.exp(-0.5 * np.square(wx))
upd_weight += dis * 2 * exp * wx * x * -1
upd_theta += dis * 2 * exp * wx
w = w - eta_w * upd_weight
theta = theta - eta_theta * upd_theta
if e % 2 == 0:
fig = plt.figure()
Yhat = [non_monotone(X[i], w, theta) for i in range(len(Y))]
Yhat = np.where(np.array(Yhat) > 0, 1, -1)
correct_predictions = np.sum(Yhat == Y)
plt.title(correct_predictions)
plt.scatter(X[:,0], X[:,1], c = Yhat);
x = np.linspace(np.amin(X[:,0])-0.1, np.amax(X[:,0])+0.1, 1000)
y = np.linspace(np.amin(X[:,1])-0.1, np.amax(X[:,1])+0.1, 1000)
CX, CY = np.meshgrid(x, y)
zi = non_monotone(np.vstack((CX.ravel(),CY.ravel())), w, theta).reshape((1000,1000))
cmap = colors.LinearSegmentedColormap.from_list("", ["blue","white","orange"])
plt.contourf(x,y,zi, alpha=0.2, levels=np.linspace(np.amin(zi.ravel()), np.amax(zi.ravel()), 101), cmap=cmap, antialiased = True)
if correct_predictions == 200:
plt.savefig("out/04/convergence_neuron.png", bbox_inches="tight", pad_inches=0)
break
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.random.uniform",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.meshgrid",
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.amin",
"matplotlib.pyplot.scatter",
"numpy.square",
"numpy.genfromtxt",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.a... | [((455, 514), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/xor-y.csv"""'], {'dtype': 'float', 'delimiter': '""","""'}), "('data/xor-y.csv', dtype=float, delimiter=',')\n", (468, 514), True, 'import numpy as np\n'), ((600, 639), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.99)', 'high': '(0.99)'}), '(low=-0.99, high=0.99)\n', (617, 639), True, 'import numpy as np\n'), ((644, 691), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.99)', 'high': '(0.99)', 'size': '(2)'}), '(low=-0.99, high=0.99, size=2)\n', (661, 691), True, 'import numpy as np\n'), ((389, 448), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/xor-X.csv"""'], {'dtype': 'float', 'delimiter': '""","""'}), "('data/xor-X.csv', dtype=float, delimiter=',')\n", (402, 448), True, 'import numpy as np\n'), ((286, 300), 'numpy.dot', 'np.dot', (['w.T', 'x'], {}), '(w.T, x)\n', (292, 300), True, 'import numpy as np\n'), ((1228, 1240), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1238, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1409), 'numpy.sum', 'np.sum', (['(Yhat == Y)'], {}), '(Yhat == Y)\n', (1398, 1409), True, 'import numpy as np\n'), ((1418, 1448), 'matplotlib.pyplot.title', 'plt.title', (['correct_predictions'], {}), '(correct_predictions)\n', (1427, 1448), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1494), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'Yhat'}), '(X[:, 0], X[:, 1], c=Yhat)\n', (1468, 1494), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1675), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1669, 1675), True, 'import numpy as np\n'), ((1784, 1857), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['""""""', "['blue', 'white', 'orange']"], {}), "('', ['blue', 'white', 'orange'])\n", (1824, 1857), True, 'import matplotlib.colors as colors\n'), ((2153, 2163), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2161, 2163), True, 'import matplotlib.pyplot as plt\n'), ((333, 346), 'numpy.square', 'np.square', (['wx'], {}), '(wx)\n', (342, 346), True, 'import numpy as np\n'), ((963, 977), 'numpy.dot', 'np.dot', (['w.T', 'x'], {}), '(w.T, x)\n', (969, 977), True, 'import numpy as np\n'), ((2046, 2125), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""out/04/convergence_neuron.png"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "('out/04/convergence_neuron.png', bbox_inches='tight', pad_inches=0)\n", (2057, 2125), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1027), 'numpy.square', 'np.square', (['wx'], {}), '(wx)\n', (1023, 1027), True, 'import numpy as np\n'), ((1334, 1348), 'numpy.array', 'np.array', (['Yhat'], {}), '(Yhat)\n', (1342, 1348), True, 'import numpy as np\n'), ((1521, 1537), 'numpy.amin', 'np.amin', (['X[:, 0]'], {}), '(X[:, 0])\n', (1528, 1537), True, 'import numpy as np\n'), ((1542, 1558), 'numpy.amax', 'np.amax', (['X[:, 0]'], {}), '(X[:, 0])\n', (1549, 1558), True, 'import numpy as np\n'), ((1593, 1609), 'numpy.amin', 'np.amin', (['X[:, 1]'], {}), '(X[:, 1])\n', (1600, 1609), True, 'import numpy as np\n'), ((1614, 1630), 'numpy.amax', 'np.amax', (['X[:, 1]'], {}), '(X[:, 1])\n', (1621, 1630), True, 'import numpy as np\n')] |
#<NAME>
#Settlers of Catan, 2020
#Imports
from string import *
import numpy as np
from hexTile import *
from hexLib import *
from player import *
#import networkx as nx
#import matplotlib.pyplot as plt
import pygame
pygame.init()
#Class to implement Catan board logic
#Use a graph representation for the board
class catanBoard(hexTile, Vertex):
'Class Definition for Catan Board Logic'
#Object Creation - creates a random board configuration with hexTiles
def __init__(self):
self.hexTileDict = {} #Dict to store all hextiles, with hexIndex as key
self.vertex_index_to_pixel_dict = {} #Dict to store the Vertices coordinates with vertex indices as keys
self.boardGraph = {} #Dict to store the vertex objects with the pixelCoordinates as keys
self.resourcesList = self.getRandomResourceList()
self.edgeLength = 80 #Specify for hex size
self.size = self.width, self.height = 1000, 800
self.flat = Layout(layout_flat, Point(self.edgeLength, self.edgeLength), Point(self.width/2, self.height/2)) #specify Layout
#Get a random permutation of indices 0-18 to use with the resource list
randomIndices = np.random.permutation([i for i in range(len(self.resourcesList))])
hexIndex_i = 0 #initialize hexIndex at 0
#Neighbors are specified in adjacency matrix - hard coded
print("Initializing Game Board...")
#Generate the hexes and the graphs with the Index, Centers and Resources defined
for rand_i in randomIndices:
#Get the coordinates of the new hex, indexed by hexIndex_i
hexCoords = self.getHexCoords(hexIndex_i)
#Create the new hexTile with index and append + increment index
newHexTile = hexTile(hexIndex_i, self.resourcesList[rand_i], hexCoords)
if(newHexTile.resource.type == 'DESERT'): #Initialize robber on Desert
newHexTile.robber = True
self.hexTileDict[hexIndex_i] = newHexTile
hexIndex_i += 1
#Create the vertex graph
self.vertexIndexCount = 0 #initialize vertex index count to 0
self.generateVertexGraph()
self.updatePorts() #Add the ports to the graph
#Initialize DevCardStack
self.devCardStack = {'KNIGHT':15, 'VP':5, 'MONOPOLY':2, 'ROADBUILDER':2, 'YEAROFPLENTY':2}
return None
def getHexCoords(self, hexInd):
#Dictionary to store Axial Coordinates (q, r) by hexIndex
coordDict = {0:Axial_Point(0,0), 1:Axial_Point(0,-1), 2:Axial_Point(1,-1), 3:Axial_Point(1,0), 4:Axial_Point(0,1), 5:Axial_Point(-1,1), 6:Axial_Point(-1,0), 7:Axial_Point(0,-2), 8:Axial_Point(1,-2), 9:Axial_Point(2,-2), 10:Axial_Point(2,-1),
11:Axial_Point(2,0), 12:Axial_Point(1,1), 13:Axial_Point(0,2), 14:Axial_Point(-1,2), 15:Axial_Point(-2,2), 16:Axial_Point(-2,1), 17:Axial_Point(-2,0), 18:Axial_Point(-1,-1)}
return coordDict[hexInd]
#Function to generate a random permutation of resources
def getRandomResourceList(self):
#Define Resources as a dict
Resource_Dict = {'DESERT':1, 'ORE':3, 'BRICK':3, 'WHEAT':4, 'WOOD':4, 'SHEEP':4}
#Get a random permutation of the numbers
NumberList = np.random.permutation([2,3,3,4,4,5,5,6,6,8,8,9,9,10,10,11,11,12])
numIndex = 0
resourceList = []
for r in Resource_Dict.keys():
numberofResource = Resource_Dict[r]
if(r != 'DESERT'):
for n in range(numberofResource):
resourceList.append(Resource(r, NumberList[numIndex]))
numIndex += 1
else:
resourceList.append(Resource(r, None))
return resourceList
#Function to generate the entire board graph
def generateVertexGraph(self):
for hexTile in self.hexTileDict.values():
hexTileCorners = polygon_corners(self.flat, hexTile.hex) #Get vertices of each hex
#Create vertex graph with this list of corners
self.updateVertexGraph(hexTileCorners, hexTile.index)
#Once all hexTiles have been added get edges
self.updateGraphEdges()
#Function to update a graph of the board with each vertex as a node
def updateVertexGraph(self, vertexCoordList, hexIndx):
for v in vertexCoordList:
#Check if vertex already exists - update adjacentHexList if it does
if v in self.vertex_index_to_pixel_dict.values():
for existingVertex in self.boardGraph.keys():
if(existingVertex == v):
self.boardGraph[v].adjacentHexList.append(hexIndx)
else:#Create new vertex if it doesn't exist
#print('Adding Vertex:', v)
newVertex = Vertex(v, hexIndx, self.vertexIndexCount)
self.vertex_index_to_pixel_dict[self.vertexIndexCount] = v #Create the index-pixel key value pair
self.boardGraph[v] = newVertex
self.vertexIndexCount += 1 #Increment index for future
#Function to add adges to graph given all vertices
def updateGraphEdges(self):
for v1 in self.boardGraph.keys():
for v2 in self.boardGraph.keys():
if(self.vertexDistance(v1, v2) == self.edgeLength):
self.boardGraph[v1].edgeList.append(v2)
@staticmethod
def vertexDistance(v1, v2):
dist = ((v1.x - v2.x)**2 + (v1.y - v2.y)**2)**0.5
return round(dist)
#View the board graph info
def printGraph(self):
print(len(self.boardGraph))
for node in self.boardGraph.keys():
print("Pixel:{}, Index:{}, NeighborVertexCount:{}, AdjacentHexes:{}".format(node, self.boardGraph[node].vertexIndex, len(self.boardGraph[node].edgeList), self.boardGraph[node].adjacentHexList))
#Update Board vertices with Port info
def updatePorts(self):
#Use this dictionary to map vertex indices to specific ports as per the game board - can add randomization later
port_dict = {'2:1 BRICK':[43,44], '2:1 SHEEP':[33,34], '2:1 WOOD':[45,49], '2:1 WHEAT':[27,53], '2:1 ORE':[24,29], '3:1 ?':[30,31,36,39,41,42,51,52]}
#Iterate thru each port and update vertex info
for portType, portVertexIndex_list in port_dict.items():
for v_index in portVertexIndex_list: #Each vertex
vertexPixel = self.vertex_index_to_pixel_dict[v_index] #Get the pixel coordinates to update the boardgraph
self.boardGraph[vertexPixel].port = portType #Update the port type
#Function to Display Catan Board Info
def displayBoardInfo(self):
for tile in self.hexTileList.values():
tile.displayHexInfo()
return None
#Function to get the list of potential roads a player can build.
#Return these roads as a dictionary where key=vertex coordinates and values is the rect
def get_potential_roads(self, player):
colonisableRoads = {}
#Check potential roads from each road the player already has
for existingRoad in player.buildGraph['ROADS']:
for vertex_i in existingRoad: #Iterate over both vertices of this road
#Check neighbors from this vertex
for indx, v_i in enumerate(self.boardGraph[vertex_i].edgeList):
if((self.boardGraph[vertex_i].edgeState[indx][1] == False) and (self.boardGraph[vertex_i].state['Player'] in [None, player])): #Edge currently does not have a road and vertex isn't colonised by another player
if((v_i, vertex_i) not in colonisableRoads.keys() and (vertex_i, v_i) not in colonisableRoads.keys()): #If the edge isn't already there in both its regular + opposite orientation
#Use boolean to keep track of potential roads
colonisableRoads[(vertex_i, v_i)] = True
#print(vertex_i, v_i)
return colonisableRoads
#Function to get available settlements for colonisation for a particular player
#Return these settlements as a dict of vertices with their Rects
def get_potential_settlements(self, player):
colonisableVertices = {}
#Check starting from each road the player already has
for existingRoad in player.buildGraph['ROADS']:
for vertex_i in existingRoad: #Iterate over both vertices of this road
#Check if vertex isn't already in the potential settlements - to remove double checks
if(vertex_i not in colonisableVertices.keys()):
if(self.boardGraph[vertex_i].isColonised): #Check if this vertex is already colonised
break
canColonise = True
for v_neighbor in self.boardGraph[vertex_i].edgeList: #Check each of the neighbors from this vertex
if(self.boardGraph[v_neighbor].isColonised):
canColonise = False
break
#If all checks are good add this vertex and its rect as the value
if(canColonise):
#colonisableVertices[vertex_i] = self.draw_possible_settlement(vertex_i, player.color)
colonisableVertices[vertex_i] = True
return colonisableVertices
#Function to get available cities for colonisation for a particular player
#Return these cities as a dict of vertex-vertexRect key value pairs
def get_potential_cities(self, player):
colonisableVertices = {}
#Check starting from each settlement the player already has
for existingSettlement in player.buildGraph['SETTLEMENTS']:
#colonisableVertices[existingSettlement] = self.draw_possible_city(existingSettlement, player.color)
colonisableVertices[existingSettlement] = True
return colonisableVertices
#Special function to get potential first settlements during setup phase
def get_setup_settlements(self, player):
colonisableVertices = {}
#Check every vertex and every neighbor of that vertex, amd if both are open then we can build a settlement there
for vertexCoord in self.boardGraph.keys():
canColonise = True
potentialVertex = self.boardGraph[vertexCoord]
if(potentialVertex.isColonised): #First check if vertex is colonised
canColonise = False
#Check each neighbor
for v_neighbor in potentialVertex.edgeList:
if(self.boardGraph[v_neighbor].isColonised): #Check if any of first neighbors are colonised
canColonise = False
break
if(canColonise): #If the vertex is colonisable add it to the dict with its Rect
#colonisableVertices[vertexCoord] = self.draw_possible_settlement(vertexCoord, player.color)
colonisableVertices[vertexCoord] = True
return colonisableVertices
#Special function to get potential first roads during setup phase
def get_setup_roads(self, player):
colonisableRoads = {}
#Can only build roads next to the latest existing player settlement
latestSettlementCoords = player.buildGraph['SETTLEMENTS'][-1]
for v_neighbor in self.boardGraph[latestSettlementCoords].edgeList:
possibleRoad = (latestSettlementCoords, v_neighbor)
#colonisableRoads[possibleRoad] = self.draw_possible_road(possibleRoad, player.color)
colonisableRoads[possibleRoad] = True
return colonisableRoads
#Function to update boardGraph with Road by player
def updateBoardGraph_road(self, v_coord1, v_coord2, player):
#Update edge from first vertex v1
for indx, v in enumerate(self.boardGraph[v_coord1].edgeList):
if(v == v_coord2):
self.boardGraph[v_coord1].edgeState[indx][0] = player
self.boardGraph[v_coord1].edgeState[indx][1] = True
#Update edge from second vertex v2
for indx, v in enumerate(self.boardGraph[v_coord2].edgeList):
if(v == v_coord1):
self.boardGraph[v_coord2].edgeState[indx][0] = player
self.boardGraph[v_coord2].edgeState[indx][1] = True
#self.draw_road([v_coord1, v_coord2], player.color) #Draw the settlement
#Function to update boardGraph with settlement on vertex v
def updateBoardGraph_settlement(self, v_coord, player):
self.boardGraph[v_coord].state['Player'] = player
self.boardGraph[v_coord].state['Settlement'] = True
self.boardGraph[v_coord].isColonised = True
#self.draw_settlement(v_coord, player.color) #Draw the settlement
#Function to update boardGraph with settlement on vertex v
def updateBoardGraph_city(self, v_coord, player):
self.boardGraph[v_coord].state['Player'] = player
self.boardGraph[v_coord].state['Settlement'] = False
self.boardGraph[v_coord].state['City'] = True
#Remove settlement from player's buildGraph
player.buildGraph['SETTLEMENTS'].remove(v_coord)
#Function to update boardGraph with Robber on hexTile
def updateBoardGraph_robber(self, hexIndex):
#Set all flags to false
for hex_tile in self.hexTileDict.values():
hex_tile.robber = False
self.hexTileDict[hexIndex].robber = True
#Function to get possible robber hexTiles
#Return robber hex spots with their hexIndex - rect representations as key-value pairs
def get_robber_spots(self):
robberHexDict = {}
for indx, hex_tile in self.hexTileDict.items():
if(hex_tile.robber == False):
#robberHexDict[indx] = self.draw_possible_robber(hex_tile.pixelCenter)
robberHexDict[indx] = hex_tile
return robberHexDict
#Get a Dict of players to rob based on the hexIndex of the robber, with the circle Rect as the value
def get_players_to_rob(self, hexIndex):
#Extract all 6 vertices of this hexTile
hexTile = self.hexTileDict[hexIndex]
vertexList = polygon_corners(self.flat, hexTile.hex)
playersToRobDict = {}
for vertex in vertexList:
if(self.boardGraph[vertex].state['Player'] != None): #There is a settlement on this vertex
playerToRob = self.boardGraph[vertex].state['Player']
if(playerToRob not in playersToRobDict.keys()): #only add a player once with his/her first settlement/city
#playersToRobDict[playerToRob] = self.draw_possible_players_to_rob(vertex)
playersToRobDict[playerToRob] = vertex
return playersToRobDict
#Function to get a hexTile with a particular number
def getHexResourceRolled(self, diceRollNum):
hexesRolled = [] #Empty list to store the hex index rolled (min 1, max 2)
for hexTile in self.hexTileDict.values():
if hexTile.resource.num == diceRollNum:
hexesRolled.append(hexTile.index)
return hexesRolled | [
"numpy.random.permutation",
"pygame.init"
] | [((218, 231), 'pygame.init', 'pygame.init', ([], {}), '()\n', (229, 231), False, 'import pygame\n'), ((3276, 3363), 'numpy.random.permutation', 'np.random.permutation', (['[2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12]'], {}), '([2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, \n 11, 12])\n', (3297, 3363), True, 'import numpy as np\n')] |
# -*- coding: utf8 -*-
import sys
import os
import warnings
warnings.filterwarnings("ignore")
# Add system path to use interactive tools
my_path = os.path.join(os.getcwd(), "bin")
if my_path not in sys.path:
sys.path.append(my_path)
with open("exchanges.ini", "r") as f:
exchange_nodes = f.read().split("=")[-1]
exchange_nodes = [x.strip() for x in exchange_nodes.split(",")]
from visGraphHigh import GraphHigh
from visTableWidget import InfoTableWidget
from LDateEdit import CheckDataEdit
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from PyQt5.QtChart import QChart
from visgraph import GraphWork
from visCheckTable import CheckTable
from ZoomLineChart import RectZoomMoveView
from visDoubleRangeSlider import RangeSlider
from ComboCheckBox import ComboCheckBox
import numpy as np
from datetime import datetime
import VisStyle
def init_config():
settings = QtCore.QSettings("exchanges.ini", QtCore.QSettings.IniFormat)
settings.setValue("Exchange-Nodes", exchange_nodes)
def get_config():
settings = QtCore.QSettings("exchanges.ini", QtCore.QSettings.IniFormat)
Exchange = settings.value("Exchange-Nodes")
return Exchange
class WorkerThreadSubGraph(QtCore.QThread):
SendDataSignal = QtCore.Signal(list)
processSignal = QtCore.Signal(int, str)
def __init__(self, parent=None):
super(WorkerThreadSubGraph, self).__init__(parent)
self.working = True
def SetData(self, G, work, filter_dic):
self.G = G
self.work = work
self.filter_dic = filter_dic
self.working = True
self.start()
def run(self):
self.processSignal.emit(0, "Generating sub graph...")
self.SubG = self.work.getSubGraphByFilter(self.G, self.filter_dic)
if self.SubG is not None:
# Compute centrality measures
self.work.InDegreeCentrality(self.SubG)
self.work.OutDegreeCentrality(self.SubG)
self.work.DegreeCentrality(self.SubG)
self.processSignal.emit(10, "Computing degree centrality...")
self.work.BetweenessCentrality(self.SubG)
self.processSignal.emit(20, "Computing betweeness centrality...")
self.work.ClosenessCentrality(self.SubG)
self.processSignal.emit(30, "Computing closeness centrality...")
self.work.PagerankCentrality(self.SubG)
self.processSignal.emit(40, "Computing PageRank centrality...")
# Compute community measures
self.work.LouvainCommunity(self.SubG)
self.processSignal.emit(50, "Computing Louvain community...")
self.work.LabelPropagationCommunity(self.SubG)
self.processSignal.emit(60, "Computing label propagation community...")
self.work.UnionFindCommunity(self.SubG)
self.processSignal.emit(70, "Computing union find community...")
nodesData = [[data["label"]] for n, data in self.SubG.nodes(data=True)]
self.SendDataSignal.emit([self.SubG, nodesData])
else:
self.SendDataSignal.emit([self.G, []])
self.working = False
class WorkerThreadGraph(QtCore.QThread):
SendDataSignal = QtCore.Signal(list)
processSignal = QtCore.Signal(int, str)
def __init__(self, parent=None):
super(WorkerThreadGraph, self).__init__(parent)
self.working = True
def SetData(self, G, work):
self.G = G
self.work = work
self.working = True
self.start()
def run(self):
# Compute centrality measures
self.work.InDegreeCentrality(self.G)
self.work.OutDegreeCentrality(self.G)
self.work.DegreeCentrality(self.G)
self.processSignal.emit(10, "Computing degree centrality...")
self.work.BetweenessCentrality(self.G)
self.processSignal.emit(20, "Computing betweeness centrality...")
self.work.ClosenessCentrality(self.G)
self.processSignal.emit(30, "Computing closeness centrality...")
self.work.PagerankCentrality(self.G)
self.processSignal.emit(40, "Computing PageRank centrality...")
# Compute community measures
self.work.LouvainCommunity(self.G)
self.processSignal.emit(50, "Computing Louvain community...")
self.work.LabelPropagationCommunity(self.G)
self.processSignal.emit(60, "Computing label propagation community...")
self.work.UnionFindCommunity(self.G)
self.processSignal.emit(70, "Computing union find community...")
# Compute node sizes
marksize = self.work.setGNodesSize(self.G)
# Compute node colors
colors = self.work.setGNodesColor(self.G)
labels = self.work.getGNodesAttrList(self.G, "label")
nodesData = [[data["label"]] for n, data in self.G.nodes(data=True)]
DateRangeData = self.getDateRangeData()
valueRangeData = self.work.getGEdgesAttrRange(self.G, "value_in_ether")
exchange = self.work.getNodesByType(self.G)
self.processSignal.emit(90, "Generating graph layout...")
# Compute graph layout
node_pos, edge_pos = self.work.pygraphviz_layout(
self.G, prog="sfdp", bundle=False
)
self.processSignal.emit(100, "Computing graph layout...")
self.SendDataSignal.emit(
[
marksize,
colors,
labels,
node_pos,
edge_pos,
nodesData,
DateRangeData,
valueRangeData,
exchange,
]
)
self.working = False
def getDateRangeData(self):
Attrs = [data["time_stamp"] for source, target, data in self.G.edges(data=True)]
if len(Attrs) == 1:
d = QtCore.QDateTime.fromString(str(Attrs[0])[:10], "yyyy-MM-dd")
return [[d], [1]]
startDate = QtCore.QDateTime.fromString(str(min(Attrs))[:10], "yyyy-MM-dd")
endDate = QtCore.QDateTime.fromString(str(max(Attrs))[:10], "yyyy-MM-dd")
Attrs = np.unique(np.array(Attrs), return_counts=True)
dataold = [
QtCore.QDateTime.fromString(str(k)[:10], "yyyy-MM-dd") for k in Attrs[0]
]
valueold = [v for v in Attrs[1]]
data = []
value = []
num = startDate.daysTo(endDate)
for i in range(num):
d = startDate.addDays(1 * i)
data.append(d)
if d in dataold:
idx = dataold.index(d)
value.append(valueold[idx])
else:
value.append(0)
return [data, value]
class GraphProcessBar(QtWidgets.QProgressBar):
ProcessSignal = QtCore.Signal(int, str)
def __init__(self, parent=None):
super(GraphProcessBar, self).__init__(parent)
# Setup graph layout, add control tools
self.hbl = QtWidgets.QHBoxLayout(self)
self.hbl.setSpacing(0)
self.hbl.setContentsMargins(0, 0, 0, 0)
self.label = QtWidgets.QLabel()
self.label.setObjectName("selfPro")
self.label.setStyleSheet("QLabel#selfPro{background:transparent}")
self.setValue(0)
self.setTextVisible(False)
self.hbl.addSpacerItem(
QtWidgets.QSpacerItem(
1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
)
)
self.hbl.addWidget(self.label)
self.hbl.addSpacerItem(
QtWidgets.QSpacerItem(
1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
)
)
self.ProcessSignal.connect(self.setProValue)
def setProValue(self, value, text):
self.label.setText(text)
self.setValue(value)
class ToolWidget(QtWidgets.QWidget):
graphsignal = QtCore.Signal(int, str)
graphsignal2 = QtCore.Signal(dict)
def __init__(self, parent=None):
super(ToolWidget, self).__init__(parent)
self.hbl = QtWidgets.QHBoxLayout(self)
self.hbl.setSpacing(0)
self.hbl.setContentsMargins(5, 0, 5, 0)
self.type_combo = QtWidgets.QComboBox()
self.type_combo.addItem(u"From File")
self.type_combo.addItem(u"From DataBase")
self.type_combo.setCurrentIndex(1)
self.type_combo.currentIndexChanged.connect(self.setType)
self.stackwidget = QtWidgets.QStackedWidget()
fileWidget = QtWidgets.QWidget()
hbl = QtWidgets.QHBoxLayout(fileWidget)
hbl.setSpacing(0)
hbl.setContentsMargins(5, 0, 5, 0)
self.dataFile = QtWidgets.QLineEdit()
self.file_button = QtWidgets.QPushButton("File")
self.file_button.setToolTip(u"select gexf File")
self.file_button.clicked.connect(self.getFile)
hbl.addWidget(self.dataFile)
hbl.addWidget(self.file_button)
databaseWidget = QtWidgets.QWidget()
hbl2 = QtWidgets.QHBoxLayout(databaseWidget)
hbl2.setSpacing(0)
hbl2.setContentsMargins(5, 0, 5, 0)
self.exchangesWidget = ComboCheckBox()
self.exchangesWidget.setFixedWidth(300)
exchanges = get_config()
exchanges = sorted(exchanges)
self.exchangesWidget.addItems(exchanges)
self.dataRangeWidget = CheckDataEdit(mysname=u"From:", myename=u" To:")
self.database_button = QtWidgets.QPushButton("Search")
self.typedataCheck = QtWidgets.QComboBox()
self.typedataCheck.addItems(
[
"Money Flow Graph (MFG)",
"Contract Creation Graph (CCG)",
"Contract Invocation Graph (CIG)",
]
)
hbl2.addWidget(self.exchangesWidget)
hbl2.addWidget(self.dataRangeWidget)
hbl2.addWidget(self.typedataCheck)
self.apply_button = QtWidgets.QPushButton("Apply")
self.apply_button.setToolTip(u"Apply")
self.apply_button.clicked.connect(self.apply)
self.stackwidget.addWidget(fileWidget)
self.stackwidget.addWidget(databaseWidget)
self.stackwidget.setCurrentIndex(1)
self.hbl.addWidget(self.type_combo)
self.hbl.addWidget(self.stackwidget)
self.hbl.addWidget(self.apply_button)
def setType(self, idx):
self.stackwidget.setCurrentIndex(idx)
def getFile(self):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
self,
u"Load gexf file",
self.dataFile.text(),
u"Gexf Files (*.gexf)",
options=options,
)
if fileName:
self.dataFile.setText(fileName)
def apply(self):
idx = self.stackwidget.currentIndex()
if idx == 0:
fileName = self.dataFile.text()
self.graphsignal.emit(0, fileName)
else:
form_data = {}
minData = self.dataRangeWidget
startDate = self.dataRangeWidget.sdataedit.date()
endDate = self.dataRangeWidget.edataedit.date()
form_data["start_date"] = datetime.strptime(
startDate.toString("yyyy-MM-dd"), "%Y-%m-%d"
)
form_data["end_date"] = datetime.strptime(
endDate.toString("yyyy-MM-dd"), "%Y-%m-%d"
)
form_data["exchange_nodes"] = self.exchangesWidget.Outputlist
form_data["graph_type"] = self.typedataCheck.currentText()
self.graphsignal2.emit(form_data)
class Radiodemo(QtWidgets.QWidget):
selectSig = QtCore.Signal(str)
def __init__(self, title, keys, parent=None):
super(Radiodemo, self).__init__(parent)
hbl = QtWidgets.QHBoxLayout(self)
hbl.setSpacing(0)
hbl.setContentsMargins(0, 0, 0, 0)
label = QtWidgets.QLabel(title)
hbl.addWidget(label)
self.cs_group = QtWidgets.QButtonGroup()
for i, k in enumerate(keys):
btn = QtWidgets.QRadioButton(k)
if i == 0:
btn.setChecked(True)
hbl.addWidget(btn)
self.cs_group.addButton(btn)
self.cs_group.buttonClicked.connect(self.btnstate)
def btnstate(self, btn):
if btn.isChecked():
self.selectSig.emit(btn.text())
class ValueRangeWidget(QtWidgets.QWidget):
rangeSig = QtCore.Signal(list)
def __init__(self, valerange=[1.01, 100.999], parent=None):
super(ValueRangeWidget, self).__init__(parent)
self.valerange = valerange
self.vbl = QtWidgets.QVBoxLayout(self)
self.vbl.setSpacing(0)
self.vbl.setContentsMargins(0, 0, 0, 0)
hbl = QtWidgets.QHBoxLayout()
hbl.setSpacing(0)
hbl.setContentsMargins(0, 0, 0, 0)
pDoubleValidator = QtGui.QDoubleValidator()
pDoubleValidator.setRange(valerange[0], valerange[1])
pDoubleValidator.setNotation(QtGui.QDoubleValidator.StandardNotation)
# Set level of accuracy
pDoubleValidator.setDecimals(8)
self.minEdit = QtWidgets.QLineEdit()
self.minEdit.setFixedWidth(120)
self.minEdit.setValidator(pDoubleValidator)
self.maxEdit = QtWidgets.QLineEdit()
self.maxEdit.setFixedWidth(120)
self.maxEdit.setValidator(pDoubleValidator)
self.minEdit.setReadOnly(True)
self.maxEdit.setReadOnly(True)
self.refreshBtn = QtWidgets.QPushButton("Reset", self)
openPicture = self.style().standardIcon(QtWidgets.QStyle.SP_BrowserReload)
self.refreshBtn.setIcon(openPicture)
self.refreshBtn.setStyleSheet("border:none;") # Remove borders
pal = self.refreshBtn.palette()
pal.setColor(QtGui.QPalette.ButtonText, QtGui.QColor("#FF6347"))
self.refreshBtn.setPalette(pal)
self.refreshBtn.setStyleSheet("QPushButton{background:transparent;border:0px;}")
self.refreshBtn.clicked.connect(self.resetRange)
hbl.addSpacerItem(
QtWidgets.QSpacerItem(
1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
)
)
self.label = QtWidgets.QLabel("")
hbl.addWidget(self.label)
label = QtWidgets.QLabel("From")
label.setStyleSheet("QLabel{background:transparent;border:0px;}")
hbl.addWidget(label)
hbl.addWidget(self.minEdit)
label = QtWidgets.QLabel(" To")
label.setStyleSheet("QLabel{background:transparent;border:0px;}")
hbl.addWidget(label)
hbl.addWidget(self.maxEdit)
hbl.addWidget(self.refreshBtn)
self.slider = RangeSlider()
self.slider.setMinimum(valerange[0])
self.slider.setMaximum(valerange[1])
self.slider.setOrientation(QtCore.Qt.Horizontal)
self.slider.rangeValueChanged.connect(self.sliderChange)
self.slider.setLowValue(self.valerange[0])
self.slider.setHighValue(self.valerange[1])
self.vbl.addLayout(hbl)
self.vbl.addWidget(self.slider)
def setRanges(self, valerange):
self.valerange = valerange
self.slider.rangeValueChanged.disconnect()
self.slider.setMinimum(self.valerange[0])
self.slider.setMaximum(self.valerange[1])
self.slider.setLowValue(self.valerange[0])
self.slider.setHighValue(self.valerange[1])
self.minEdit.setText(str(valerange[0]))
self.maxEdit.setText(str(valerange[1]))
self.slider.rangeValueChanged.connect(self.sliderChange)
def minEditFinish(self):
value = float(self.minEdit.text())
def sliderChange(self, value, value2):
self.minEdit.setText(str(value))
self.maxEdit.setText(str(value2))
self.rangeSig.emit([value, value2])
def resetRange(self):
self.slider.setLowValue(self.valerange[0])
self.slider.setHighValue(self.valerange[1])
self.rangeSig.emit(self.valerange)
class ControlWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ControlWidget, self).__init__(parent)
self.setFixedHeight(240)
self.vbl = QtWidgets.QVBoxLayout(self)
self.vbl.setSpacing(0)
self.vbl.setContentsMargins(0, 0, 0, 0)
hbl = QtWidgets.QHBoxLayout()
hbl.setSpacing(0)
hbl.setContentsMargins(0, 0, 0, 0)
self.nodetable1 = CheckTable()
self.nodetable1.setFixedWidth(300)
widget = QtWidgets.QWidget()
vbl = QtWidgets.QVBoxLayout(widget)
vbl.setSpacing(0)
vbl.setContentsMargins(5, 5, 5, 5)
self.ZoomChartView = RectZoomMoveView()
self.ZoomChartView.setStyleSheet("border:none;") # Remove borders
self.ZoomChartView.verticalScrollBar().setDisabled(True)
self.ZoomChartView.setVerticalScrollBarPolicy(1)
self.ZoomChartView.setRenderHint(QtGui.QPainter.Antialiasing)
self.ZoomChartView.setRangeColor("#666666")
self.zoomChart = self.ZoomChartView.chart()
self.zoomChart.setBackgroundVisible(False)
self.zoomChart.setAnimationOptions(QChart.SeriesAnimations)
self.zoomChart.legend().hide()
self.ZoomChartView.initSeries(chartTypes="Bar")
self.valueRangeWidget = ValueRangeWidget()
vbl.addWidget(self.ZoomChartView)
vbl.addWidget(self.valueRangeWidget)
hbl.addWidget(self.nodetable1)
hbl.addWidget(widget)
self.nodesizeWidget = Radiodemo(
"Centrality:",
["InDegree", "OutDegree", "Degree", "Betweeness", "Closeness", "PageRank"],
)
self.nodecolorWidget = Radiodemo(
"Community:", ["Louvain", "Label propagation", "Union find"]
)
hbl2 = QtWidgets.QHBoxLayout()
hbl2.setSpacing(0)
hbl2.setContentsMargins(0, 0, 0, 0)
self.resetBtn = QtWidgets.QPushButton("Reset")
self.applyBtn = QtWidgets.QPushButton("Apply")
hbl2.addSpacerItem(
QtWidgets.QSpacerItem(
1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
)
)
hbl2.addWidget(self.resetBtn)
hbl2.addWidget(self.applyBtn)
self.vbl.addLayout(hbl)
self.vbl.addWidget(self.nodesizeWidget)
self.vbl.addWidget(self.nodecolorWidget)
self.vbl.addLayout(hbl2)
class MainWidget(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.setWindowTitle(self.tr("GraphViz"))
self.setWindowIcon(QtGui.QIcon("GraphViz.ico"))
self.toolbar = self.addToolBar("tool")
self.toolwidget = ToolWidget()
self.toolwidget.graphsignal.connect(self.startWork)
self.toolwidget.graphsignal2.connect(self.startWork2)
self.toolbar.addWidget(self.toolwidget)
mwidget = QtWidgets.QWidget()
self.vbl = QtWidgets.QVBoxLayout(mwidget)
self.vbl.setSpacing(0)
self.vbl.setContentsMargins(0, 0, 0, 0)
self.stackwidget = QtWidgets.QStackedWidget()
self.controlWidget = ControlWidget()
self.controlWidget.resetBtn.clicked.connect(self.resetFilter)
self.controlWidget.applyBtn.clicked.connect(self.applyFilter)
self.process = GraphProcessBar()
self.process.setVisible(False)
self.vbl.addWidget(self.stackwidget)
self.vbl.addWidget(self.controlWidget)
self.vbl.addWidget(self.process)
self.centralWidget = GraphHigh()
self.centralWidget.draw_init()
self.centralWidget.neighbors_signal.connect(self.setNeighbors)
self.stackwidget.addWidget(QtWidgets.QWidget())
self.stackwidget.addWidget(self.centralWidget)
self.setCentralWidget(mwidget)
self.stackwidget.setCurrentIndex(0)
self.dockGraph = QtWidgets.QDockWidget(self.tr("Node attributes"), self)
self.dockGraph.setFixedWidth(350)
self.dockGraph.setFeatures(
QtWidgets.QDockWidget.DockWidgetFloatable
| QtWidgets.QDockWidget.DockWidgetMovable
)
self.dockGraph.setAllowedAreas(
QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea
)
widget = QtWidgets.QWidget()
vbl = QtWidgets.QVBoxLayout(widget)
vbl.setSpacing(0)
vbl.setContentsMargins(0, 0, 0, 0)
self.textInfo = QtWidgets.QTextEdit()
self.textInfo.setReadOnly(True)
self.graphNodesWidget = InfoTableWidget()
self.graphNodesWidget.dbclickedSig.connect(
self.centralWidget.updateMarkerVisible
)
vbl.addWidget(self.textInfo)
vbl.addWidget(self.graphNodesWidget)
self.dockGraph.setWidget(widget)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dockGraph)
self.dockGraph.hide()
self.controlWidget.hide()
self.work = GraphWork()
self.nodesAttrs = [
"label",
"node_type",
"InDegree",
"OutDegree",
"Degree",
"Betweeness",
"Closeness",
"PageRank",
"Louvain",
"Label propagation",
"Union find",
]
def setNeighbors(self, info, values):
if values is None:
self.graphNodesWidget.InitTable(["All Nodes"], self.nodesData)
elif values == []:
self.graphNodesWidget.InitTable(["Sub Nodes"], self.NeighborsnodesData)
else:
self.graphNodesWidget.InitTable(["Neighbors"], [[v] for v in values])
self.textInfo.setText(info)
def startWork(self, idx, filename):
if filename:
self.graphNodesWidget.InitTable([""], [])
self.dockGraph.hide()
self.controlWidget.hide()
self.setDisConnect()
self.stackwidget.setCurrentIndex(0)
self.subG = None
self.G, num = self.work.readFile(filename)
if num > 0:
self.process.setVisible(True)
self.workthread = WorkerThreadGraph(self)
self.workthread.SendDataSignal.connect(self.InitGraph)
self.workthread.processSignal.connect(self.changeProcess)
self.workthread.SetData(self.G, self.work)
else:
QtWidgets.QMessageBox.information(self, "Info", ("The Graph is Empty!"))
def startWork2(self, form_data):
self.work = GraphWork()
self.subG = None
self.G, num = self.work.get_from_db(form_data)
self.graphNodesWidget.InitTable([""], [])
self.dockGraph.hide()
self.controlWidget.hide()
self.setDisConnect()
self.stackwidget.setCurrentIndex(0)
idx = self.toolwidget.typedataCheck.currentIndex()
if idx == 2:
self.controlWidget.valueRangeWidget.label.setText(" Number of calls ")
elif idx == 0:
self.controlWidget.valueRangeWidget.label.setText(" Value in ether ")
if num > 0:
self.process.setVisible(True)
self.workthread = WorkerThreadGraph(self)
self.workthread.SendDataSignal.connect(self.InitGraph)
self.workthread.processSignal.connect(self.changeProcess)
self.workthread.SetData(self.G, self.work)
else:
QtWidgets.QMessageBox.information(self, "Info", ("The Graph is Empty!"))
def changeProcess(self, value, text):
self.process.ProcessSignal.emit(value, text)
def InitGraph(self, values):
(
marksize,
colors,
labels,
node_pos,
edge_pos,
self.nodesData,
DateRangeData,
valueRangeData,
exchange,
) = values
# Number of nodes
npts = self.G.number_of_nodes()
# Number of edges
nlinks = self.G.number_of_edges()
self.valueRangeData = list(valueRangeData)
if self.valueRangeData == [0, 0]:
self.controlWidget.valueRangeWidget.hide()
else:
self.controlWidget.valueRangeWidget.show()
self.controlWidget.valueRangeWidget.setRanges(self.valueRangeData)
self.controlWidget.ZoomChartView.setData(DateRangeData[0], DateRangeData[1])
self.graphNodesWidget.InitTable(["All Nodes"], self.nodesData)
self.centralWidget.draw_init()
self.centralWidget.init_data(
self.G, marksize, colors, labels, node_pos, edge_pos, npts, nlinks
)
self.controlWidget.nodetable1.initData(["exchange"], exchange)
self.process.setVisible(False)
self.process.ProcessSignal.emit(0, "")
self.controlWidget.nodesizeWidget.cs_group.buttons()[0].setChecked(True)
self.controlWidget.nodecolorWidget.cs_group.buttons()[0].setChecked(True)
self.dockGraph.show()
self.controlWidget.show()
self.stackwidget.setCurrentIndex(1)
self.controlWidget.ZoomChartView.resetView()
rect = self.controlWidget.ZoomChartView.chart().plotArea()
self.controlWidget.ZoomChartView.parentRect.setRect(rect)
self.setConnect()
self.filter_dic = {"edges": {}}
def setDisConnect(self):
try:
self.controlWidget.nodesizeWidget.selectSig.disconnect()
except:
pass
try:
self.controlWidget.nodecolorWidget.selectSig.disconnect()
except:
pass
try:
self.controlWidget.ZoomChartView.rangeSig.disconnect()
except:
pass
try:
self.controlWidget.valueRangeWidget.rangeSig.disconnect()
except:
pass
try:
self.controlWidget.nodetable1.sendData.disconnect()
except:
pass
def setConnect(self):
self.controlWidget.nodesizeWidget.selectSig.connect(
self.centralWidget.updateMarkersSize
)
self.controlWidget.nodecolorWidget.selectSig.connect(
self.centralWidget.updateMarkersColor
)
self.controlWidget.ZoomChartView.rangeSig.connect(self.setFilterDate)
self.controlWidget.valueRangeWidget.rangeSig.connect(self.setFilterValue)
self.controlWidget.nodetable1.sendData.connect(self.setFilterNode)
def resetFilter(self):
self.controlWidget.nodetable1.header.headerClick(True)
self.setDisConnect()
self.controlWidget.nodesizeWidget.cs_group.buttons()[0].setChecked(True)
self.controlWidget.nodecolorWidget.cs_group.buttons()[0].setChecked(True)
self.controlWidget.ZoomChartView.BtnsWidget.refreshBtn.click()
self.controlWidget.valueRangeWidget.refreshBtn.click()
self.controlWidget.nodetable1.myModel.headerClick(True)
self.graphNodesWidget.InitTable(["All Nodes"], self.nodesData)
self.filter_dic = {"edges": {}}
self.setConnect()
self.centralWidget.setSubG(None)
self.centralWidget.updateSubGVisible()
def applyFilter(self):
self.process.setVisible(True)
self.workthreadsub = WorkerThreadSubGraph(self)
self.workthreadsub.SendDataSignal.connect(self.subGraph)
self.workthreadsub.processSignal.connect(self.changeProcess)
self.workthreadsub.SetData(self.G, self.work, self.filter_dic)
def subGraph(self, values):
self.process.setVisible(False)
self.process.ProcessSignal.emit(0, "")
SubG, self.NeighborsnodesData = values
if SubG == self.G:
self.graphNodesWidget.InitTable(["All Nodes"], self.nodesData)
self.centralWidget.setSubG(None)
else:
self.graphNodesWidget.InitTable(["Sub Nodes"], self.NeighborsnodesData)
self.centralWidget.setSubG(SubG)
self.centralWidget.updateSubGVisible()
def setFilterDate(self, value):
ranges = [
self.controlWidget.ZoomChartView.mintimeData.toString(
"yyyy-MM-dd HH:mm:ss"
),
self.controlWidget.ZoomChartView.maxtimeData.toString(
"yyyy-MM-dd HH:mm:ss"
),
]
if value == ranges:
if "time_stamp" in self.filter_dic["edges"]:
self.filter_dic["edges"].pop("time_stamp")
else:
self.filter_dic["edges"]["time_stamp"] = {"value": value, "type": "time"}
def setFilterValue(self, value):
if value == self.valueRangeData:
if "value_in_ether" in self.filter_dic["edges"]:
self.filter_dic["edges"].pop("value_in_ether")
else:
self.filter_dic["edges"]["value_in_ether"] = {
"value": value,
"type": "float",
}
def setFilterNode(self, value):
if len(value) == len(self.controlWidget.nodetable1.myModel._data):
if "label" in self.filter_dic["edges"]:
self.filter_dic["edges"].pop("label")
else:
self.filter_dic["edges"]["label"] = {"value": value, "type": "list"}
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
file = QtCore.QFile(":pic/style.qss")
file.open(QtCore.QFile.ReadOnly)
styleSheet = file.readAll()
styleSheet = str(styleSheet, encoding="utf8")
app.setStyleSheet(styleSheet)
window = MainWidget()
window.show()
sys.exit(app.exec_())
| [
"qtpy.QtGui.QIcon",
"qtpy.QtCore.QSettings",
"qtpy.QtWidgets.QTextEdit",
"qtpy.QtWidgets.QApplication",
"qtpy.QtWidgets.QMessageBox.information",
"sys.path.append",
"qtpy.QtWidgets.QHBoxLayout",
"qtpy.QtWidgets.QLabel",
"qtpy.QtWidgets.QSpacerItem",
"qtpy.QtWidgets.QVBoxLayout",
"visGraphHigh.Gr... | [((66, 99), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (89, 99), False, 'import warnings\n'), ((168, 179), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (177, 179), False, 'import os\n'), ((222, 246), 'sys.path.append', 'sys.path.append', (['my_path'], {}), '(my_path)\n', (237, 246), False, 'import sys\n'), ((942, 1003), 'qtpy.QtCore.QSettings', 'QtCore.QSettings', (['"""exchanges.ini"""', 'QtCore.QSettings.IniFormat'], {}), "('exchanges.ini', QtCore.QSettings.IniFormat)\n", (958, 1003), False, 'from qtpy import QtCore\n'), ((1100, 1161), 'qtpy.QtCore.QSettings', 'QtCore.QSettings', (['"""exchanges.ini"""', 'QtCore.QSettings.IniFormat'], {}), "('exchanges.ini', QtCore.QSettings.IniFormat)\n", (1116, 1161), False, 'from qtpy import QtCore\n'), ((1303, 1322), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['list'], {}), '(list)\n', (1316, 1322), False, 'from qtpy import QtCore\n'), ((1344, 1367), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['int', 'str'], {}), '(int, str)\n', (1357, 1367), False, 'from qtpy import QtCore\n'), ((3305, 3324), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['list'], {}), '(list)\n', (3318, 3324), False, 'from qtpy import QtCore\n'), ((3346, 3369), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['int', 'str'], {}), '(int, str)\n', (3359, 3369), False, 'from qtpy import QtCore\n'), ((6923, 6946), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['int', 'str'], {}), '(int, str)\n', (6936, 6946), False, 'from qtpy import QtCore\n'), ((8072, 8095), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['int', 'str'], {}), '(int, str)\n', (8085, 8095), False, 'from qtpy import QtCore\n'), ((8116, 8135), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['dict'], {}), '(dict)\n', (8129, 8135), False, 'from qtpy import QtCore\n'), ((11971, 11989), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['str'], {}), '(str)\n', (11984, 11989), False, 'from qtpy import QtCore\n'), ((12776, 12795), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['list'], {}), '(list)\n', (12789, 12795), False, 'from qtpy import QtCore\n'), ((29966, 29998), 'qtpy.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (29988, 29998), False, 'from qtpy import QtWidgets\n'), ((30011, 30041), 'qtpy.QtCore.QFile', 'QtCore.QFile', (['""":pic/style.qss"""'], {}), "(':pic/style.qss')\n", (30023, 30041), False, 'from qtpy import QtCore\n'), ((7111, 7138), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self'], {}), '(self)\n', (7132, 7138), False, 'from qtpy import QtWidgets\n'), ((7242, 7260), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (7258, 7260), False, 'from qtpy import QtWidgets\n'), ((8246, 8273), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self'], {}), '(self)\n', (8267, 8273), False, 'from qtpy import QtWidgets\n'), ((8384, 8405), 'qtpy.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ([], {}), '()\n', (8403, 8405), False, 'from qtpy import QtWidgets\n'), ((8645, 8671), 'qtpy.QtWidgets.QStackedWidget', 'QtWidgets.QStackedWidget', ([], {}), '()\n', (8669, 8671), False, 'from qtpy import QtWidgets\n'), ((8696, 8715), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (8713, 8715), False, 'from qtpy import QtWidgets\n'), ((8731, 8764), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['fileWidget'], {}), '(fileWidget)\n', (8752, 8764), False, 'from qtpy import QtWidgets\n'), ((8861, 8882), 'qtpy.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (8880, 8882), False, 'from qtpy import QtWidgets\n'), ((8913, 8942), 'qtpy.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""File"""'], {}), "('File')\n", (8934, 8942), False, 'from qtpy import QtWidgets\n'), ((9164, 9183), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (9181, 9183), False, 'from qtpy import QtWidgets\n'), ((9200, 9237), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['databaseWidget'], {}), '(databaseWidget)\n', (9221, 9237), False, 'from qtpy import QtWidgets\n'), ((9343, 9358), 'ComboCheckBox.ComboCheckBox', 'ComboCheckBox', ([], {}), '()\n', (9356, 9358), False, 'from ComboCheckBox import ComboCheckBox\n'), ((9563, 9611), 'LDateEdit.CheckDataEdit', 'CheckDataEdit', ([], {'mysname': 'u"""From:"""', 'myename': 'u""" To:"""'}), "(mysname=u'From:', myename=u' To:')\n", (9576, 9611), False, 'from LDateEdit import CheckDataEdit\n'), ((9644, 9675), 'qtpy.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Search"""'], {}), "('Search')\n", (9665, 9675), False, 'from qtpy import QtWidgets\n'), ((9708, 9729), 'qtpy.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ([], {}), '()\n', (9727, 9729), False, 'from qtpy import QtWidgets\n'), ((10121, 10151), 'qtpy.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Apply"""'], {}), "('Apply')\n", (10142, 10151), False, 'from qtpy import QtWidgets\n'), ((10665, 10696), 'qtpy.QtWidgets.QFileDialog.Options', 'QtWidgets.QFileDialog.Options', ([], {}), '()\n', (10694, 10696), False, 'from qtpy import QtWidgets\n'), ((12107, 12134), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self'], {}), '(self)\n', (12128, 12134), False, 'from qtpy import QtWidgets\n'), ((12223, 12246), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['title'], {}), '(title)\n', (12239, 12246), False, 'from qtpy import QtWidgets\n'), ((12302, 12326), 'qtpy.QtWidgets.QButtonGroup', 'QtWidgets.QButtonGroup', ([], {}), '()\n', (12324, 12326), False, 'from qtpy import QtWidgets\n'), ((12975, 13002), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self'], {}), '(self)\n', (12996, 13002), False, 'from qtpy import QtWidgets\n'), ((13101, 13124), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (13122, 13124), False, 'from qtpy import QtWidgets\n'), ((13226, 13250), 'qtpy.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (13248, 13250), False, 'from qtpy import QtGui\n'), ((13495, 13516), 'qtpy.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (13514, 13516), False, 'from qtpy import QtWidgets\n'), ((13635, 13656), 'qtpy.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (13654, 13656), False, 'from qtpy import QtWidgets\n'), ((13858, 13894), 'qtpy.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Reset"""', 'self'], {}), "('Reset', self)\n", (13879, 13894), False, 'from qtpy import QtWidgets\n'), ((14602, 14622), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['""""""'], {}), "('')\n", (14618, 14622), False, 'from qtpy import QtWidgets\n'), ((14675, 14699), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""From"""'], {}), "('From')\n", (14691, 14699), False, 'from qtpy import QtWidgets\n'), ((14861, 14884), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['""" To"""'], {}), "(' To')\n", (14877, 14884), False, 'from qtpy import QtWidgets\n'), ((15092, 15105), 'visDoubleRangeSlider.RangeSlider', 'RangeSlider', ([], {}), '()\n', (15103, 15105), False, 'from visDoubleRangeSlider import RangeSlider\n'), ((16623, 16650), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self'], {}), '(self)\n', (16644, 16650), False, 'from qtpy import QtWidgets\n'), ((16749, 16772), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (16770, 16772), False, 'from qtpy import QtWidgets\n'), ((16873, 16885), 'visCheckTable.CheckTable', 'CheckTable', ([], {}), '()\n', (16883, 16885), False, 'from visCheckTable import CheckTable\n'), ((16950, 16969), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (16967, 16969), False, 'from qtpy import QtWidgets\n'), ((16985, 17014), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['widget'], {}), '(widget)\n', (17006, 17014), False, 'from qtpy import QtWidgets\n'), ((17118, 17136), 'ZoomLineChart.RectZoomMoveView', 'RectZoomMoveView', ([], {}), '()\n', (17134, 17136), False, 'from ZoomLineChart import RectZoomMoveView\n'), ((18274, 18297), 'qtpy.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (18295, 18297), False, 'from qtpy import QtWidgets\n'), ((18396, 18426), 'qtpy.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Reset"""'], {}), "('Reset')\n", (18417, 18426), False, 'from qtpy import QtWidgets\n'), ((18452, 18482), 'qtpy.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Apply"""'], {}), "('Apply')\n", (18473, 18482), False, 'from qtpy import QtWidgets\n'), ((19431, 19450), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (19448, 19450), False, 'from qtpy import QtWidgets\n'), ((19471, 19501), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['mwidget'], {}), '(mwidget)\n', (19492, 19501), False, 'from qtpy import QtWidgets\n'), ((19611, 19637), 'qtpy.QtWidgets.QStackedWidget', 'QtWidgets.QStackedWidget', ([], {}), '()\n', (19635, 19637), False, 'from qtpy import QtWidgets\n'), ((20076, 20087), 'visGraphHigh.GraphHigh', 'GraphHigh', ([], {}), '()\n', (20085, 20087), False, 'from visGraphHigh import GraphHigh\n'), ((20829, 20848), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (20846, 20848), False, 'from qtpy import QtWidgets\n'), ((20864, 20893), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['widget'], {}), '(widget)\n', (20885, 20893), False, 'from qtpy import QtWidgets\n'), ((20990, 21011), 'qtpy.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', ([], {}), '()\n', (21009, 21011), False, 'from qtpy import QtWidgets\n'), ((21086, 21103), 'visTableWidget.InfoTableWidget', 'InfoTableWidget', ([], {}), '()\n', (21101, 21103), False, 'from visTableWidget import InfoTableWidget\n'), ((21514, 21525), 'visgraph.GraphWork', 'GraphWork', ([], {}), '()\n', (21523, 21525), False, 'from visgraph import GraphWork\n'), ((23121, 23132), 'visgraph.GraphWork', 'GraphWork', ([], {}), '()\n', (23130, 23132), False, 'from visgraph import GraphWork\n'), ((6276, 6291), 'numpy.array', 'np.array', (['Attrs'], {}), '(Attrs)\n', (6284, 6291), True, 'import numpy as np\n'), ((7490, 7588), 'qtpy.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(1)', '(1)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Preferred'], {}), '(1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Preferred)\n', (7511, 7588), False, 'from qtpy import QtWidgets\n'), ((7713, 7811), 'qtpy.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(1)', '(1)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Preferred'], {}), '(1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Preferred)\n', (7734, 7811), False, 'from qtpy import QtWidgets\n'), ((12384, 12409), 'qtpy.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['k'], {}), '(k)\n', (12406, 12409), False, 'from qtpy import QtWidgets\n'), ((14188, 14211), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['"""#FF6347"""'], {}), "('#FF6347')\n", (14200, 14211), False, 'from qtpy import QtGui\n'), ((14443, 14541), 'qtpy.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(1)', '(1)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Preferred'], {}), '(1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Preferred)\n', (14464, 14541), False, 'from qtpy import QtWidgets\n'), ((18525, 18623), 'qtpy.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(1)', '(1)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Preferred'], {}), '(1, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Preferred)\n', (18546, 18623), False, 'from qtpy import QtWidgets\n'), ((19120, 19147), 'qtpy.QtGui.QIcon', 'QtGui.QIcon', (['"""GraphViz.ico"""'], {}), "('GraphViz.ico')\n", (19131, 19147), False, 'from qtpy import QtGui\n'), ((20236, 20255), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (20253, 20255), False, 'from qtpy import QtWidgets\n'), ((24033, 24103), 'qtpy.QtWidgets.QMessageBox.information', 'QtWidgets.QMessageBox.information', (['self', '"""Info"""', '"""The Graph is Empty!"""'], {}), "(self, 'Info', 'The Graph is Empty!')\n", (24066, 24103), False, 'from qtpy import QtWidgets\n'), ((22987, 23057), 'qtpy.QtWidgets.QMessageBox.information', 'QtWidgets.QMessageBox.information', (['self', '"""Info"""', '"""The Graph is Empty!"""'], {}), "(self, 'Info', 'The Graph is Empty!')\n", (23020, 23057), False, 'from qtpy import QtWidgets\n')] |
'''
A matplotlib-based function to overplot an elliptical error contour from the covariance matrix.
Copyright 2017 <NAME> (Flatiron).
Citations: <NAME> (https://github.com/joferkington/oost_paper_code/blob/master/error_ellipse.py),
<NAME> (http://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix/)
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
def error_ellipse(ax, xc, yc, cov, sigma=1, **kwargs):
'''
Plot an error ellipse contour over your data.
Inputs:
ax : matplotlib Axes() object
xc : x-coordinate of ellipse center
yc : x-coordinate of ellipse center
cov : covariance matrix
sigma : # sigma to plot (default 1)
additional kwargs passed to matplotlib.patches.Ellipse()
'''
w, v = np.linalg.eigh(cov) # assumes symmetric matrix
order = w.argsort()[::-1]
w, v = w[order], v[:,order]
theta = np.degrees(np.arctan2(*v[:,0][::-1]))
ellipse = Ellipse(xy=(xc,yc),
width=2.*sigma*np.sqrt(w[0]),
height=2.*sigma*np.sqrt(w[1]),
angle=theta, **kwargs)
ellipse.set_facecolor('none')
ax.add_artist(ellipse)
if __name__ == '__main__':
#-- Example usage -----------------------
# Generate some random, correlated data
points = np.random.multivariate_normal(
mean=(1,1), cov=[[5., 4.],[4., 6.]], size=100
)
x, y = points.T
cov = np.cov(x,y, rowvar=False)
# Plot the raw points...
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x, y, color='k')
# Plot three error ellipses
error_ellipse(ax, np.mean(x), np.mean(y), cov, ec='red')
error_ellipse(ax, np.mean(x), np.mean(y), cov, sigma=2, ec='green')
error_ellipse(ax, np.mean(x), np.mean(y), cov, sigma=3, ec='blue')
plt.show() | [
"matplotlib.pyplot.show",
"numpy.arctan2",
"numpy.linalg.eigh",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.random.multivariate_normal",
"numpy.cov",
"numpy.sqrt"
] | [((821, 840), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (835, 840), True, 'import numpy as np\n'), ((1354, 1440), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': '(1, 1)', 'cov': '[[5.0, 4.0], [4.0, 6.0]]', 'size': '(100)'}), '(mean=(1, 1), cov=[[5.0, 4.0], [4.0, 6.0]],\n size=100)\n', (1383, 1440), True, 'import numpy as np\n'), ((1487, 1513), 'numpy.cov', 'np.cov', (['x', 'y'], {'rowvar': '(False)'}), '(x, y, rowvar=False)\n', (1493, 1513), True, 'import numpy as np\n'), ((1557, 1569), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1567, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1882), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1880, 1882), True, 'import matplotlib.pyplot as plt\n'), ((953, 979), 'numpy.arctan2', 'np.arctan2', (['*v[:, 0][::-1]'], {}), '(*v[:, 0][::-1])\n', (963, 979), True, 'import numpy as np\n'), ((1686, 1696), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1693, 1696), True, 'import numpy as np\n'), ((1698, 1708), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1705, 1708), True, 'import numpy as np\n'), ((1747, 1757), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1754, 1757), True, 'import numpy as np\n'), ((1759, 1769), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1766, 1769), True, 'import numpy as np\n'), ((1819, 1829), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1826, 1829), True, 'import numpy as np\n'), ((1831, 1841), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1838, 1841), True, 'import numpy as np\n'), ((1049, 1062), 'numpy.sqrt', 'np.sqrt', (['w[0]'], {}), '(w[0])\n', (1056, 1062), True, 'import numpy as np\n'), ((1100, 1113), 'numpy.sqrt', 'np.sqrt', (['w[1]'], {}), '(w[1])\n', (1107, 1113), True, 'import numpy as np\n')] |
from tqdm import tqdm
import random
import os
import numpy as np
import argparse
import subprocess
import shlex
import sys
import torch
from util import *
RANDOM_SEED = 12345
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(RANDOM_SEED)
def train(args):
if args.load_trained:
epoch, arch, model, tokenizer, scores = load_checkpoint(args.pytorch_dump_path)
else:
model, tokenizer = load_pretrained_model_tokenizer(args.model_type, device=args.device)
train_dataset = load_data(args.data_path, args.data_name, args.batch_size, tokenizer, "train", args.device)
validate_dataset = load_data(args.data_path, args.data_name, args.batch_size, tokenizer, "dev", args.device)
test_dataset = load_data(args.data_path, args.data_name, args.batch_size, tokenizer, "test", args.device)
optimizer = init_optimizer(model, args.learning_rate, args.warmup_proportion, args.num_train_epochs, args.data_size, args.batch_size)
model.train()
global_step = 0
best_score = 0
for epoch in range(1, args.num_train_epochs+1):
tr_loss = 0
# random.shuffle(train_dataset)
for step, batch in enumerate(tqdm(train_dataset)):
if batch is None:
break
tokens_tensor, segments_tensor, mask_tensor, label_tensor, _, _ = batch
if args.model_type == "BertForNextSentencePrediction" or args.model_type == "BertForQuestionAnswering":
# print(tokens_tensor.shape, segments_tensor.shape, mask_tensor.shape, label_tensor.shape)
loss = model(tokens_tensor, segments_tensor, mask_tensor, label_tensor)
else:
loss, logits = model(tokens_tensor, segments_tensor, mask_tensor, label_tensor)
loss.backward()
tr_loss += loss.item()
optimizer.step()
model.zero_grad()
global_step += 1
if args.eval_steps > 0 and step % args.eval_steps == 0:
best_score = eval_select(model, tokenizer, validate_dataset, test_dataset, args.pytorch_dump_path, best_score, epoch, args.model_type)
print("[train] loss: {}".format(tr_loss))
best_score = eval_select(model, tokenizer, validate_dataset, test_dataset, args.pytorch_dump_path, best_score, epoch, args.model_type)
scores = test(args, split="test")
print_scores(scores)
def eval_select(model, tokenizer, validate_dataset, test_dataset, model_path, best_score, epoch, arch):
scores_dev = test(args, split="dev", model=model, tokenizer=tokenizer, test_dataset=validate_dataset)
print_scores(scores_dev, mode="dev")
scores_test = test(args, split="test", model=model, tokenizer=tokenizer, test_dataset=test_dataset)
print_scores(scores_test)
if scores_dev[1][0] > best_score:
best_score = scores_dev[1][0]
# Save pytorch-model
model_path = "{}_{}".format(model_path, epoch)
print("Save PyTorch model to {}".format(model_path))
save_checkpoint(epoch, arch, model, tokenizer, scores_dev, model_path)
return best_score
def print_scores(scores, mode="test"):
print("")
print("[{}] ".format(mode), end="")
for sn, score in zip(scores[0], scores[1]):
print("{}: {}".format(sn, score), end=" ")
print("")
def save_checkpoint(epoch, arch, model, tokenizer, scores, filename):
state = {
'epoch': epoch,
'arch': arch,
'model': model,
'tokenizer': tokenizer,
'scores': scores
}
torch.save(state, filename)
def load_checkpoint(filename):
print("Load PyTorch model from {}".format(filename))
state = torch.load(filename)
return state['epoch'], state['arch'], state['model'], state['tokenizer'], state['scores']
def test(args, split="test", model=None, tokenizer=None, test_dataset=None):
# if model is None:
# epoch, arch, model, tokenizer, scores = load_checkpoint(args.pytorch_dump_path)
# if test_dataset is None:
model, tokenizer = load_pretrained_model_tokenizer(args.model_type, device=args.device)
print("Load test set")
test_dataset = load_trec_data(args.data_path, args.data_name,
args.batch_size, tokenizer, split, args.device)
model.eval()
prediction_score_list, prediction_index_list, labels = [], [], []
f = open(args.output_path, "w")
f2 = open(args.output_path2, "w")
lineno = 1
for batch in test_dataset:
if batch is None:
break
tokens_tensor, segments_tensor, mask_tensor, label_tensor, qid_tensor, docid_tensor = batch
predictions = model(tokens_tensor, segments_tensor, mask_tensor)
scores = predictions.cpu().detach().numpy()
predicted_index = list(torch.argmax(predictions, dim=1).cpu().numpy())
prediction_index_list += predicted_index
predicted_score = list(predictions[:, 1].cpu().detach().numpy())
prediction_score_list.extend(predicted_score)
labels.extend(list(label_tensor.cpu().detach().numpy()))
qids = qid_tensor.cpu().detach().numpy()
docids = docid_tensor.cpu().detach().numpy()
for p, qid, docid, s in zip(predicted_index, qids, docids, scores):
f.write("{}\t{}\n".format(lineno, p))
f2.write("{} Q0 {} {} {} bert\n".format(qid, docid, lineno, s[1]))
lineno += 1
del predictions
f.close()
f2.close()
# acc, pre, rec, f1 = 0, 0, 0, 0
# acc = get_acc(prediction_index_list, labels)
# p1 = get_p1(prediction_score_list, labels, args.data_path, args.data_name, split)
# pre, rec, f1 = get_pre_rec_f1(prediction_index_list, labels)
map, mrr, p30 = evaluate(predictions_file=args.output_path2, \
qrels_file="./qrels.microblog.txt")
torch.cuda.empty_cache()
model.train()
return [["map", "mrr", "p30"],[map, mrr, p30]]
# return [["acc", "precision", "recall", "f1"], [acc, pre, rec, f1]]
# return [["acc", "p@1", "precision", "recall", "f1"], [acc, p1, pre, rec, f1]]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='train', help='[train, test]')
parser.add_argument('--device', default='cuda', help='[cuda, cpu]')
parser.add_argument('--batch_size', default=8, type=int, help='[1, 8, 16, 32]')
parser.add_argument('--data_size', default=41579, type=int, help='[tweet2014: 41579]')
parser.add_argument('--learning_rate', default=1e-5, type=float, help='')
parser.add_argument('--num_train_epochs', default=3, type=int, help='')
parser.add_argument('--data_path', default='./', help='')
parser.add_argument('--data_name', default='robust04_bm25', help='annotation or youzan_new or tweet')
parser.add_argument('--pytorch_dump_path', default='saved.model', help='')
parser.add_argument('--load_trained', action='store_true', default=False, help='')
parser.add_argument('--chinese', action='store_true', default=False, help='')
parser.add_argument('--eval_steps', default=-1, type=int, help='evaluation per [eval_steps] steps, -1 for evaluation per epoch')
parser.add_argument('--model_type', default='BertForNextSentencePrediction', help='')
parser.add_argument('--output_path', default='prediction.tmp', help='')
parser.add_argument('--output_path2', default='prediction.trec', help='')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup. E.g., 0.1 = 10%% of training.')
args = parser.parse_args()
# if args.mode == "train":
# train(args)
# else:
scores = test(args)
print_scores(scores)
| [
"tqdm.tqdm",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.manual_seed",
"torch.load",
"torch.argmax",
"torch.save",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"random.seed",
"torch.cuda.empty_cache"
] | [((180, 204), 'random.seed', 'random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (191, 204), False, 'import random\n'), ((205, 232), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (219, 232), True, 'import numpy as np\n'), ((233, 263), 'torch.manual_seed', 'torch.manual_seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (250, 263), False, 'import torch\n'), ((267, 292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (290, 292), False, 'import torch\n'), ((298, 337), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (324, 337), False, 'import torch\n'), ((3630, 3657), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (3640, 3657), False, 'import torch\n'), ((3759, 3779), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (3769, 3779), False, 'import torch\n'), ((5891, 5915), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5913, 5915), False, 'import torch\n'), ((6184, 6209), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6207, 6209), False, 'import argparse\n'), ((1261, 1280), 'tqdm.tqdm', 'tqdm', (['train_dataset'], {}), '(train_dataset)\n', (1265, 1280), False, 'from tqdm import tqdm\n'), ((4850, 4882), 'torch.argmax', 'torch.argmax', (['predictions'], {'dim': '(1)'}), '(predictions, dim=1)\n', (4862, 4882), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import copy
import itertools
from time import time
import matplotlib.pyplot as plt
from stochoptim.scengen.tree_structure import Node
from stochoptim.scengen.scenario_tree import ScenarioTree
Cartesian = itertools.product
class TreeSearch:
def __init__(self, scenario_process, variability_process, demerit, nber_stages):
self.scenario_process = scenario_process
self.variability_process = variability_process
self.demerit = demerit
self.nber_stages = nber_stages
self.last_stage = nber_stages - 1
self._search_methods = [("VNS", "forward"), ("VNS", "backward"),
("EXH", "forward"), ("EXH", "backward")]
self._best_fod = {}
self._best_tree = {}
self._fod_sample = {}
self._initialize()
def _initialize(self, method=None):
methods = self._search_methods if method is None else [method]
for method in methods:
self._best_fod[method] = np.inf
self._best_tree[method] = None
self._fod_sample[method] = []
def fod_sample(self, method):
return np.array(self._fod_sample[method])
def best_tree(self, method):
return self._best_tree[method]
def best_fod(self, method):
return self._best_fod[method]
def plot_fod_hist(self, method, bins=10, figsize=(5,5)):
fig, ax = plt.subplots(figsize=figsize)
ax.hist(self._fod_sample[method], bins=bins)
ax.set_title(f"P10: {np.quantile(self._fod_sample[method], 0.1):.5f} "
f"; P50: {np.quantile(self._fod_sample[method], 0.5):.5f}\n"
f"min: {min(self._fod_sample[method]):.5f} ;"
f"max: {max(self._fod_sample[method]):.5f}")
plt.show()
def plot_fod_progress(self, method, figsize=(5,5)):
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(range(len(self._fod_sample[method])), np.minimum.accumulate(self._fod_sample[method]))
best_index = np.where(np.minimum.accumulate(self._fod_sample[method]) <= self._best_fod[method] + 10**-10)[0][0]
ax.scatter(best_index, self._best_fod[method])
plt.show()
def variable_neighborhood_search(self,
nber_scenarios,
initial_tree=None,
optimized='forward',
max_iteration=np.inf,
max_no_improvement=np.inf,
num_local_samples=10,
num_neighborhoods=10,
neighborhood_shrink=0):
"""Explore the space of tree structures via a strategy of 'variable neighborhood' to find the scenario tree
of lowest demerit"""
assert optimized in ["forward", "backward"], ("`optimized` must be either 'forward' or "
f"'backward', not {optimized}.")
self._initialize(('VNS', optimized))
time0 = time()
if initial_tree is None:
bushiness = (nber_scenarios,) + (1,) * (self.nber_stages-2)
initial_tree = ScenarioTree.from_bushiness(bushiness)
initial_tree.fill(self.scenario_process, optimized, self.variability_process, self.demerit)
self._best_fod[('VNS', optimized)] = initial_tree.get_figure_of_demerit(self.demerit)
self._best_tree[('VNS', optimized)] = copy.deepcopy(initial_tree)
if initial_tree.depth <= 2:
return self._best_tree[('VNS', optimized)], self._best_fod[('VNS', optimized)]
iteration, no_improvement_count = 0, 0
while iteration < max_iteration:
try:
iteration += 1
nbreed = 1 #max(1, int(nber_scenarios / np.log(3*iteration)**neighborhood_shrink))
candidates = [copy.deepcopy(self._best_tree[('VNS', optimized)]) for i in range(num_local_samples)]
# increase neighborhood distance until improvement
for neighborhood in range(1, num_neighborhoods + 1):
improved = False
# try multiple samples in the same neighborhood
for current_tree in candidates:
# increase neighborhood of current candidate
for ibreed in range(nbreed):
TreeSearch._tree_breed(current_tree) # split or merge
current_tree.fill(self.scenario_process, optimized, self.variability_process, self.demerit)
current_fod = current_tree.get_figure_of_demerit(self.demerit)
self._fod_sample[('VNS', optimized)].append(current_fod)
if current_fod < self._best_fod[('VNS', optimized)]:
improved = True
self._best_tree[('VNS', optimized)] = current_tree
self._best_fod[('VNS', optimized)] = current_fod
# start over if at least one sample provided improvement
if improved:
no_improvement_count = 0
break
if not improved:
no_improvement_count += 1
if no_improvement_count >= max_no_improvement:
break
print(f"\riteration: {iteration} demerit: {current_fod:.5f} "
f"best demerit: {self._best_fod[('VNS', optimized)]:.5f} "
f"no improvement count: {no_improvement_count}", end="")
except KeyboardInterrupt:
break
time1 = time()
print(f"\nTotal number of iterations : {iteration} ({time1-time0:.1f} sec)")
@staticmethod
def _merge_nodes(node1, node2):
assert node2 in node1.parent.children
node1.parent.children.remove(node2)
node1.add(*node2.children)
@staticmethod
def _split_node(node1, num_children_array):
nodes = [Node() for nc in num_children_array]
# redistribute subtrees
for n, num_children in zip(nodes, num_children_array):
for nc in node1.children[:num_children]:
node1.children.remove(nc)
n.add(nc)
# remove old node to parent
node1.parent.children.remove(node1)
# add new nodes to parent
node1.parent.add(*nodes)
@staticmethod
def _tree_breed(tree):
# randomly pick a valid action
actions = ['merge']
splittable_nodes = [node for node in tree.nodes if node.parent and len(node.children) >= 2]
if len(splittable_nodes) > 0:
actions.append('split')
mergeable = [node for node in tree.nodes if not node.is_leaf and node.has_siblings]
action = 'split' if len(mergeable) == 0 else np.random.choice(actions)
if action == 'merge':
node1 = np.random.choice(mergeable)
node2 = np.random.choice([n for n in node1.parent.children if n is not node1])
TreeSearch._merge_nodes(node1, node2)
elif action == 'split':
node1 = np.random.choice(splittable_nodes)
k = np.random.randint(len(node1.children) - 1) + 1
num_children_array = [k, len(node1.children) - k]
TreeSearch._split_node(node1, num_children_array)
else:
raise ValueError("unknown action")
def exhaustive_search(self,
nber_scenarios,
optimized='forward',
min_branching_factor=1,
max_iteration=np.inf):
"""Explore exhaustively the space of tree structures to find the scenario tree of lowest demerit"""
assert optimized in ["forward", "backward"], ("`optimized` must be either 'forward' or "
f"'backward', not {optimized}.")
self._initialize(('EXH', optimized))
time0 = time()
iteration_count, no_improvement_count = 1, 0
for structure in TreeSearch._exhaustive_structures(self.nber_stages, nber_scenarios, min_branching_factor):
try:
current_tree = ScenarioTree(structure)
current_tree.fill(self.scenario_process, optimized, self.variability_process, self.demerit)
current_fod = current_tree.get_figure_of_demerit(self.demerit)
self._fod_sample[('EXH', optimized)].append(current_fod)
if current_fod < self._best_fod[('EXH', optimized)]:
improved = True
self._best_tree[('EXH', optimized)] = current_tree
self._best_fod[('EXH', optimized)] = current_fod
else:
improved = False
no_improvement_count += 1 if not improved else 0
if iteration_count % 10 == 0:
print(f"\riteration: {iteration_count} demerit: {current_fod:.5f} "
f"best demerit: {self._best_fod[('EXH', optimized)]:.5f} "
f"no improvement count: {no_improvement_count}", end="")
except KeyboardInterrupt:
break
iteration_count += 1
if iteration_count > max_iteration:
break
time1 = time()
print(f"\nTotal number of iterations : {iteration_count-1} ({time1-time0:.1f} sec)")
@staticmethod
def _exhaustive_structures(depth, N, b):
"""Generates all tree structures with depth T, N scenarios, and a branching lowerbound b"""
if depth == 2:
yield Node.from_bushiness((N,))
return
for n in range(b**(depth-1), int(N / b) + 1):
for tree in TreeSearch._exhaustive_structures(depth-1, n, b):
for new_tree in TreeSearch._extend_structure(tree, N, b):
new_tree.delete_data(["pos", "n"])
yield new_tree
@staticmethod
def _pseudo_integer_partitions(cardinality, integer, lowerbounds):
"""Enumeration of all integer partitions (not permutation free) of a fixed cardinality.
The partition of an integer k is a tuple (n_1, ..., n_m) such that:
(i) k = n_1 + ... + n_m
(ii) n_i is integer >= 1.
Unlike the regular integer partitions (method ._integer_partitions()), here the condition:
n_1 <= n_2 <= ... <= n_m
need not be satisfied.
The lowerbounds adds the condition:
(iv) n_i >= lowerbound[i], for i = 1, ...,m (componentwise lowerbound).
Arguments:
----------
cardinality: integer >= 1
The number of elements partitionning the integer.
integer: integer >= 1
The integer being partitionned.
lowerbounds: tuple of integers >= 1
The componentwise lowerbound on the elements partitionning the integer.
Returns:
--------
iterator on the partitions.
"""
if cardinality == 1:
if integer >= lowerbounds[0]:
yield (integer,)
return
for i in range(lowerbounds[0], integer-sum(lowerbounds[1:])+1):
for t in TreeSearch._pseudo_integer_partitions(cardinality-1, integer-i, lowerbounds[1:]):
yield (i,) + t
@staticmethod
def _integer_partitions(cardinality, integer, lowerbound=1):
"""Enumeration of all integer partitions of a fixed cardinality. A partition of cardinality m of
an integer k is a tuple (n_1, ..., n_m) such that:
(i) k = n_1 + ... + n_m
(ii) n_i is integer >= 1
(iii) n_1 <= n_2 <= ... <= n_m.
The lowerbound (optional) adds the condition:
(iv) n_i >= lowerbound, for i = 1, ...,m.
Arguments:
----------
cardinality: integer >= 1
The number of elements partitionning the integer.
integer: integer >= 1
The integer being partitionned.
lowerbound: integer >= 1 (default 1)
The lowerbound on the elements partitionning the integer.
Returns:
--------
iterator on the partitions.
"""
if cardinality == 1:
if integer >= lowerbound:
yield (integer,)
return
for i in range(lowerbound, integer-(cardinality-1)*lowerbound+1):
for t in TreeSearch._integer_partitions(cardinality-1, integer-i, i):
yield (i,) + t
@staticmethod
def _extend_structure(tree, N, b):
"""This generator takes any tree structure and generates all
tree structure with N scenarios and 1 stage more."""
partitionP1, partitionP2, P2, lowerboundsP2 = {}, {}, {}, {}
#Create the Partition P1 of leaf nodes
for leaf in tree.leaves:
history = tuple([len(n.children) for n in leaf.branch if n != leaf])
if history in partitionP1.keys():
partitionP1[history] += [leaf]
else:
partitionP1[history] = [leaf]
P1 = len(partitionP1.keys())
lowerboundsP1 = [b * len(partitionP1[key]) for key in partitionP1.keys()]
#Create the Partition P2 of leaf nodes
for i, key in enumerate(partitionP1.keys()):
partitionP2[key] = list(set([tuple(leaf.parent.children) for leaf in partitionP1[key]]))
P2[key] = len(partitionP2[key])
lowerboundsP2[key] = len(partitionP2[key][0])
#Create a data 'pos' (a 3-tuple) to index each leaf in its partition and subpartition
for i, key in enumerate(partitionP1.keys()):
for j, leaves in enumerate(partitionP2[key]):
for k, leaf in enumerate(leaves):
leaf.data["pos"] = (i, j, k)
#generate the integer tuples x, y, z to code the tree branching
indexing_A = TreeSearch._pseudo_integer_partitions(P1, N, lowerboundsP1)
for x in indexing_A:
indexing_B = lambda i, key: list(TreeSearch._integer_partitions(P2[key],
x[i],
b*lowerboundsP2[key]))
for y in Cartesian(*[indexing_B(i, key) for i, key in enumerate(partitionP1.keys())]):
set_B = lambda i, j, key: list(TreeSearch._integer_partitions(lowerboundsP2[key],
y[i][j],
b))
for z in Cartesian(*[Cartesian(*[set_B(i, j, key) for j in range(P2[key])])
for i, key in enumerate(partitionP1.keys())]):
new_tree = copy.deepcopy(tree)
for leaf in list(new_tree.leaves):
(i, j, k) = leaf.data["pos"]
leaf.add(*[Node() for i in range(z[i][j][k])])
yield new_tree
| [
"numpy.random.choice",
"copy.deepcopy",
"numpy.quantile",
"matplotlib.pyplot.show",
"stochoptim.scengen.scenario_tree.ScenarioTree",
"time.time",
"stochoptim.scengen.tree_structure.Node.from_bushiness",
"numpy.minimum.accumulate",
"numpy.array",
"stochoptim.scengen.scenario_tree.ScenarioTree.from_... | [((1201, 1235), 'numpy.array', 'np.array', (['self._fod_sample[method]'], {}), '(self._fod_sample[method])\n', (1209, 1235), True, 'import numpy as np\n'), ((1472, 1501), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1484, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1857, 1867), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1865, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1979), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1963, 1979), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2274, 2276), True, 'import matplotlib.pyplot as plt\n'), ((3140, 3146), 'time.time', 'time', ([], {}), '()\n', (3144, 3146), False, 'from time import time\n'), ((3575, 3602), 'copy.deepcopy', 'copy.deepcopy', (['initial_tree'], {}), '(initial_tree)\n', (3588, 3602), False, 'import copy\n'), ((5959, 5965), 'time.time', 'time', ([], {}), '()\n', (5963, 5965), False, 'from time import time\n'), ((8282, 8288), 'time.time', 'time', ([], {}), '()\n', (8286, 8288), False, 'from time import time\n'), ((9746, 9752), 'time.time', 'time', ([], {}), '()\n', (9750, 9752), False, 'from time import time\n'), ((2033, 2080), 'numpy.minimum.accumulate', 'np.minimum.accumulate', (['self._fod_sample[method]'], {}), '(self._fod_sample[method])\n', (2054, 2080), True, 'import numpy as np\n'), ((3279, 3317), 'stochoptim.scengen.scenario_tree.ScenarioTree.from_bushiness', 'ScenarioTree.from_bushiness', (['bushiness'], {}), '(bushiness)\n', (3306, 3317), False, 'from stochoptim.scengen.scenario_tree import ScenarioTree\n'), ((6315, 6321), 'stochoptim.scengen.tree_structure.Node', 'Node', ([], {}), '()\n', (6319, 6321), False, 'from stochoptim.scengen.tree_structure import Node\n'), ((7157, 7182), 'numpy.random.choice', 'np.random.choice', (['actions'], {}), '(actions)\n', (7173, 7182), True, 'import numpy as np\n'), ((7234, 7261), 'numpy.random.choice', 'np.random.choice', (['mergeable'], {}), '(mergeable)\n', (7250, 7261), True, 'import numpy as np\n'), ((7282, 7352), 'numpy.random.choice', 'np.random.choice', (['[n for n in node1.parent.children if n is not node1]'], {}), '([n for n in node1.parent.children if n is not node1])\n', (7298, 7352), True, 'import numpy as np\n'), ((7456, 7490), 'numpy.random.choice', 'np.random.choice', (['splittable_nodes'], {}), '(splittable_nodes)\n', (7472, 7490), True, 'import numpy as np\n'), ((8510, 8533), 'stochoptim.scengen.scenario_tree.ScenarioTree', 'ScenarioTree', (['structure'], {}), '(structure)\n', (8522, 8533), False, 'from stochoptim.scengen.scenario_tree import ScenarioTree\n'), ((10060, 10085), 'stochoptim.scengen.tree_structure.Node.from_bushiness', 'Node.from_bushiness', (['(N,)'], {}), '((N,))\n', (10079, 10085), False, 'from stochoptim.scengen.tree_structure import Node\n'), ((1584, 1626), 'numpy.quantile', 'np.quantile', (['self._fod_sample[method]', '(0.1)'], {}), '(self._fod_sample[method], 0.1)\n', (1595, 1626), True, 'import numpy as np\n'), ((1665, 1707), 'numpy.quantile', 'np.quantile', (['self._fod_sample[method]', '(0.5)'], {}), '(self._fod_sample[method], 0.5)\n', (1676, 1707), True, 'import numpy as np\n'), ((4009, 4057), 'copy.deepcopy', 'copy.deepcopy', (["self._best_tree['VNS', optimized]"], {}), "(self._best_tree['VNS', optimized])\n", (4022, 4057), False, 'import copy\n'), ((15628, 15647), 'copy.deepcopy', 'copy.deepcopy', (['tree'], {}), '(tree)\n', (15641, 15647), False, 'import copy\n'), ((2112, 2159), 'numpy.minimum.accumulate', 'np.minimum.accumulate', (['self._fod_sample[method]'], {}), '(self._fod_sample[method])\n', (2133, 2159), True, 'import numpy as np\n'), ((15812, 15818), 'stochoptim.scengen.tree_structure.Node', 'Node', ([], {}), '()\n', (15816, 15818), False, 'from stochoptim.scengen.tree_structure import Node\n')] |
import random
import time
import itertools
from functools import partial
import math
import re
import cachetools
import numpy as np
from scipy.stats import skew, moment
from copy import deepcopy
from deap import gp
class SimpleParametrizedPrimitiveSet(gp.PrimitiveSet):
def __init__(self, name, arity, variable_type_indices, variable_names, prefix="ARG"):
gp.PrimitiveSet.__init__(self, name, arity, prefix)
self.variable_type_indices = variable_type_indices
self.variable_names = variable_names
def add_parametrized_terminal(self, parametrized_terminal_class):
self._add(parametrized_terminal_class)
self.context[parametrized_terminal_class.__name__] = parametrized_terminal_class.call
class SimpleParametrizedPrimitiveTree(gp.PrimitiveTree):
def __init__(self, content):
gp.PrimitiveTree.__init__(self, content)
def __deepcopy__(self, memo):
new = self.__class__(self)
for i, node in enumerate(self):
if isinstance(node, SimpleParametrizedTerminal):
new[i] = deepcopy(node)
new.__dict__.update(deepcopy(self.__dict__, memo))
return new
@classmethod
def from_string(cls, string, pset):
"""Try to convert a string expression into a PrimitiveTree given a
PrimitiveSet *pset*. The primitive set needs to contain every primitive
present in the expression.
:param string: String representation of a Python expression.
:param pset: Primitive set from which primitives are selected.
:returns: PrimitiveTree populated with the deserialized primitives.
"""
tokens = re.split("[ \t\n\r\f\v(),]", string)
expr = []
def get_parts(token_string):
parts = tokens[i].split('_')
return parts[1], parts[2], parts[3]
i = 0
while i < len(tokens):
if tokens[i] == '':
i += 1
continue
if tokens[i] in pset.mapping:
primitive = pset.mapping[tokens[i]]
expr.append(primitive)
elif RangeOperationTerminal.NAME in tokens[i]:
operation, begin_range_name, end_range_name = get_parts(tokens[i])
range_operation_terminal = RangeOperationTerminal()
range_operation_terminal.initialize_parameters(pset.variable_type_indices, pset.variable_names,
operation, begin_range_name, end_range_name)
expr.append(range_operation_terminal)
elif MomentFindingTerminal.NAME in tokens[i]:
operation, begin_range_name, end_range_name = get_parts(tokens[i])
moment_operation_terminal = MomentFindingTerminal()
moment_operation_terminal.initialize_parameters(pset.variable_type_indices, pset.variable_names,
operation, begin_range_name, end_range_name)
expr.append(moment_operation_terminal)
else:
try:
token = eval(tokens[i])
except NameError:
raise TypeError("Unable to evaluate terminal: {}.".format(tokens[i]))
expr.append(gp.Terminal(token, False, gp.__type__))
i += 1
return cls(expr)
class SimpleParametrizedTerminal(gp.Terminal):
ret = object
def __init__(self, name="SimpleParametrizedTerminal", ret_type=object):
gp.Terminal.__init__(self, name, True, ret_type)
def __deepcopy__(self, memo):
new = self.__class__()
new.__dict__.update(deepcopy(self.__dict__, memo))
return new
def initialize_parameters(self, variable_type_indices, names):
raise NotImplementedError
def create_input_vector(self, predictors):
raise NotImplementedError
def call(*parameters):
pass # implement this method to make the class work with standard gp.compile
def name_operation(operation, name):
operation.__name__ = name
return operation
class RangeOperationTerminal(SimpleParametrizedTerminal):
NAME = 'RangeOperation'
def __init__(self):
SimpleParametrizedTerminal.__init__(self, RangeOperationTerminal.__name__)
self.begin_range = None
self.end_range = None
self.operation = None
self.names = None
self.lower_bound = None
self.upper_bound = None
self.operations = {
'sum': name_operation(np.sum, 'sum'),
'min': name_operation(np.min, 'min'),
'max': name_operation(np.max, 'max')
}
def initialize_parameters(self, variable_type_indices, names, operation=None, begin_range_name=None,
end_range_name=None, *args):
"""
:param variable_type_indices: A sequence of variable type indices where each entry defines the
index of a variable type in the design matrix. For example a design matrix with two variable types will have
indices [j,n] where variable type A spans 0 to j and variable type B spans j + 1 to n.
:param names:
:param args:
:param operation
:param begin_range_name
:param end_range_name
:return:
"""
self.names = names
for r in variable_type_indices:
if r[1] - r[0] < 2:
raise ValueError('Invalid range provided to Range Terminal: ' + str(r))
rng = random.choice(variable_type_indices)
self.lower_bound = rng[0]
self.upper_bound = rng[1]
if operation is not None and begin_range_name is not None and end_range_name is not None:
if self.operations.get(operation) is None:
raise ValueError('Invalid operation provided to Range Terminal: ' + operation)
if begin_range_name not in self.names:
raise ValueError('Invalid range name provided to Range Termnial: ' + str(begin_range_name))
if end_range_name not in names:
raise ValueError('Invalid range name provided to Range Termnial: ' + str(end_range_name))
begin_range = self.names.index(begin_range_name)
end_range = self.names.index(end_range_name)
valid = False
for r in variable_type_indices:
if r[0] <= begin_range < end_range <= r[1]:
valid = True
if not valid:
raise ValueError('Invalid range provided to Range Terminal: (' + str(begin_range) + ',' +
str(end_range) + ')')
self.operation = self.operations[operation]
self.begin_range = begin_range
self.end_range = end_range
else:
self.operation = random.choice(list(self.operations.values()))
self.begin_range = np.random.randint(self.lower_bound, self.upper_bound - 1)
self.end_range = np.random.randint(self.begin_range + 1, self.upper_bound)
def mutate_parameters(self, stdev_calc):
mutation = random.choice(['low', 'high'])
span = self.end_range - self.begin_range
if span == 0:
span = 1
value = random.gauss(0, stdev_calc(span))
amount = int(math.ceil(abs(value)))
if value < 0:
amount *= -1
if mutation == 'low':
location = amount + self.begin_range
if location < self.lower_bound:
self.begin_range = self.lower_bound
elif location > self.end_range - 2:
self.begin_range = self.end_range - 2
elif location > self.upper_bound - 2:
self.begin_range = self.upper_bound - 2
else:
self.begin_range = location
elif mutation == 'high':
location = amount + self.end_range
if location > self.upper_bound:
self.end_range = self.upper_bound
elif location < self.begin_range + 2:
self.end_range = self.begin_range + 2
elif location < self.lower_bound + 2:
self.end_range = self.lower_bound + 2
else:
self.end_range = location
def create_input_vector(self, predictors):
array = predictors[:, self.begin_range:self.end_range]
if array.shape[1] == 0:
return np.zeros((array.shape[0], 1))
else:
return self.operation(array, axis=1)
def format(self):
return "RangeOperation_{}_{}_{}".format(self.operation.__name__, self.names[self.begin_range],
self.names[self.end_range - 1])
class MomentFindingTerminal(RangeOperationTerminal):
NAME = 'MomentOperation'
def __init__(self):
super(MomentFindingTerminal, self).__init__()
self.operations = {
'mean': name_operation(np.mean, 'mean'),
'vari': name_operation(np.var, 'vari'),
'skew': name_operation(skew, 'skew')
}
def initialize_parameters(self, variable_type_indices, names, operation=None, begin_range_name=None,
end_range_name=None, *args):
if operation is None:
super(MomentFindingTerminal, self).initialize_parameters(variable_type_indices, names)
self.operation = random.choice(list(self.operations.values()))
else:
super(MomentFindingTerminal, self).initialize_parameters(variable_type_indices, names, operation,
begin_range_name, end_range_name, *args)
def format(self):
return "MomentOperation_{}_{}_{}".format(self.operation.__name__, self.names[self.begin_range],
self.names[self.end_range - 1])
class PolynomialFindingTerminal(RangeOperationTerminal):
NAME = 'PolynomialOperation'
def __init__(self):
super(PolynomialFindingTerminal, self).__init__()
self.operations = {
'first': self.first,
'second': self.second,
'third': self.third
}
def first(self, X, axis=1):
return self.polynomial(X, 1)
def second(self, X, axis=1):
return self.polynomial(X, 2)
def third(self, X, axis=1):
return self.polynomial(X, 3)
def polynomial(self, X, order, interactions=False):
start = time.time()
orders = []
for o in range(1, order + 1):
orders.append(np.apply_along_axis(lambda x: np.power(x, o), 1, X))
matrix = np.concatenate(orders, axis=1)
rows = matrix.shape[0]
cols = matrix.shape[1]
result = np.zeros(rows)
if interactions:
indices = [x for x in range(cols)]
for c in range(1, cols):
for comb in itertools.combinations(indices, c):
M = np.ones(rows)
for j in comb:
M *= matrix[:, j].reshape(rows)
result += M
else:
result = np.sum(matrix, axis=1)
return result
def initialize_parameters(self, variable_type_indices, names, operation=None, begin_range_name=None,
end_range_name=None, *args):
if operation is None:
super(PolynomialFindingTerminal, self).initialize_parameters(variable_type_indices, names)
self.operation = random.choice(list(self.operations.values()))
else:
super(PolynomialFindingTerminal, self).initialize_parameters(variable_type_indices, names, operation,
begin_range_name, end_range_name, *args)
def format(self):
return "PolynomialOperation{}_{}_{}".format(self.operation.__name__, self.names[self.begin_range],
self.names[self.end_range - 1])
def named_moment(number):
def f(vector, axis=0):
return moment(vector, moment=number, axis=axis)
f.__name__ = "moment_" + str(number)
return f
def generate_parametrized_expression(generate_expression, variable_type_indices, names):
expr = generate_expression()
for node in expr:
if isinstance(node, SimpleParametrizedTerminal):
node.initialize_parameters(variable_type_indices, names)
return expr
def evolve_parametrized_expression(stdev_calc):
def decorator(func):
def wrapper(*args, **kargs):
offspring = list(func(*args, **kargs))
for ind in offspring:
for node in ind:
if isinstance(node, SimpleParametrizedTerminal):
node.mutate_parameters(stdev_calc)
return offspring
return wrapper
return decorator
def get_parametrized_nodes(ind):
return list(filter(lambda node: isinstance(node, SimpleParametrizedTerminal), ind))
def mutate_parametrized_nodes(ind, stdev_calc):
param_nodes = get_parametrized_nodes(ind)
map(lambda node: node.mutate_parameters(stdev_calc), param_nodes)
return ind,
def mutate_single_parametrized_node(ind, stdev_calc):
param_nodes = get_parametrized_nodes(ind)
if len(param_nodes) != 0:
random.choice(param_nodes).mutate_parameters(stdev_calc)
return ind,
def search_entire_space(node, evaluate_function):
fitness = []
parameters = []
begin = node.lower_bound
while begin <= node.upper_bound:
end = begin + 1
while end <= node.upper_bound:
node.begin_range = begin
node.end_range = end
fitness.append(evaluate_function())
parameters.append((begin, end))
end += 1
begin += 1
return parameters, fitness
def optimize_node(node, evaluate_function, optimization_objective_function):
parameters, fitness = search_entire_space(node, evaluate_function)
best_value = optimization_objective_function(fitness)
optimal_index = fitness.index(best_value)
begin, end = parameters[optimal_index]
node.begin_range = begin
node.end_range = end
return parameters, fitness
def mutate_single_parametrized_node_optimal(ind, evaluate_function, optimization_objective_function):
param_nodes = get_parametrized_nodes(ind)
if len(param_nodes) != 0:
node = random.choice(param_nodes)
optimize_node(node, partial(evaluate_function, ind=ind), optimization_objective_function)
return ind,
def simple_parametrized_evaluate(ind, context, predictors, error_function=None, expression_dict=None):
semantics_stack = []
expressions_stack = []
if expression_dict is None:
expression_dict = cachetools.LRUCache(maxsize=100)
for node in reversed(ind):
expression = node.format(*[expressions_stack.pop() for _ in range(node.arity)])
subtree_semantics = [semantics_stack.pop() for _ in range(node.arity)]
if expression in expression_dict:
vector = expression_dict[expression]
else:
vector = get_node_semantics(node, subtree_semantics, predictors, context)
expression_dict[expression] = vector
expressions_stack.append(expression)
semantics_stack.append(vector)
if error_function is None:
return semantics_stack.pop()
else:
return error_function(semantics_stack.pop())
def get_terminal_semantics(node, context, predictors):
if isinstance(node, gp.Ephemeral) or isinstance(node.value, float) or isinstance(node.value, int):
return np.ones(len(predictors)) * node.value
if node.value in context:
return np.ones(len(predictors)) * context[node.value]
arg_index = re.findall('\d+', node.name)
return predictors[:, int(arg_index[0])]
def get_node_semantics(node, subtree_semantics, predictors, context):
if isinstance(node, SimpleParametrizedTerminal):
vector = node.create_input_vector(predictors)
elif isinstance(node, gp.Terminal):
vector = get_terminal_semantics(node, context, predictors)
else:
with np.errstate(over='ignore', divide='ignore', invalid='ignore'):
vector = context[node.name](*list(map(lambda x: x.astype(float) if type(x) != float else x,
subtree_semantics)))
return vector
def graph(expr):
nodes = range(len(expr))
edges = list()
labels = dict()
stack = []
for i, node in enumerate(expr):
if stack:
edges.append((stack[-1][0], i))
stack[-1][1] -= 1
if isinstance(node, gp.Primitive):
labels[i] = node.name
elif isinstance(node, SimpleParametrizedTerminal):
labels[i] = node.format()
else:
labels[i] = node.value
stack.append([i, node.arity])
while stack and stack[-1][1] == 0:
stack.pop()
return nodes, edges, labels
| [
"numpy.sum",
"numpy.ones",
"numpy.random.randint",
"scipy.stats.moment",
"cachetools.LRUCache",
"numpy.power",
"re.findall",
"deap.gp.PrimitiveSet.__init__",
"deap.gp.PrimitiveTree.__init__",
"functools.partial",
"copy.deepcopy",
"re.split",
"itertools.combinations",
"numpy.concatenate",
... | [((15849, 15878), 're.findall', 're.findall', (['"""\\\\d+"""', 'node.name'], {}), "('\\\\d+', node.name)\n", (15859, 15878), False, 'import re\n'), ((372, 423), 'deap.gp.PrimitiveSet.__init__', 'gp.PrimitiveSet.__init__', (['self', 'name', 'arity', 'prefix'], {}), '(self, name, arity, prefix)\n', (396, 423), False, 'from deap import gp\n'), ((840, 880), 'deap.gp.PrimitiveTree.__init__', 'gp.PrimitiveTree.__init__', (['self', 'content'], {}), '(self, content)\n', (865, 880), False, 'from deap import gp\n'), ((1664, 1704), 're.split', 're.split', (["'[ \\t\\n\\r\\x0c\\x0b(),]'", 'string'], {}), "('[ \\t\\n\\r\\x0c\\x0b(),]', string)\n", (1672, 1704), False, 'import re\n'), ((3544, 3592), 'deap.gp.Terminal.__init__', 'gp.Terminal.__init__', (['self', 'name', '(True)', 'ret_type'], {}), '(self, name, True, ret_type)\n', (3564, 3592), False, 'from deap import gp\n'), ((5542, 5578), 'random.choice', 'random.choice', (['variable_type_indices'], {}), '(variable_type_indices)\n', (5555, 5578), False, 'import random\n'), ((7140, 7170), 'random.choice', 'random.choice', (["['low', 'high']"], {}), "(['low', 'high'])\n", (7153, 7170), False, 'import random\n'), ((10515, 10526), 'time.time', 'time.time', ([], {}), '()\n', (10524, 10526), False, 'import time\n'), ((10681, 10711), 'numpy.concatenate', 'np.concatenate', (['orders'], {'axis': '(1)'}), '(orders, axis=1)\n', (10695, 10711), True, 'import numpy as np\n'), ((10791, 10805), 'numpy.zeros', 'np.zeros', (['rows'], {}), '(rows)\n', (10799, 10805), True, 'import numpy as np\n'), ((12119, 12159), 'scipy.stats.moment', 'moment', (['vector'], {'moment': 'number', 'axis': 'axis'}), '(vector, moment=number, axis=axis)\n', (12125, 12159), False, 'from scipy.stats import skew, moment\n'), ((14479, 14505), 'random.choice', 'random.choice', (['param_nodes'], {}), '(param_nodes)\n', (14492, 14505), False, 'import random\n'), ((14836, 14868), 'cachetools.LRUCache', 'cachetools.LRUCache', ([], {'maxsize': '(100)'}), '(maxsize=100)\n', (14855, 14868), False, 'import cachetools\n'), ((1120, 1149), 'copy.deepcopy', 'deepcopy', (['self.__dict__', 'memo'], {}), '(self.__dict__, memo)\n', (1128, 1149), False, 'from copy import deepcopy\n'), ((3687, 3716), 'copy.deepcopy', 'deepcopy', (['self.__dict__', 'memo'], {}), '(self.__dict__, memo)\n', (3695, 3716), False, 'from copy import deepcopy\n'), ((6930, 6987), 'numpy.random.randint', 'np.random.randint', (['self.lower_bound', '(self.upper_bound - 1)'], {}), '(self.lower_bound, self.upper_bound - 1)\n', (6947, 6987), True, 'import numpy as np\n'), ((7017, 7074), 'numpy.random.randint', 'np.random.randint', (['(self.begin_range + 1)', 'self.upper_bound'], {}), '(self.begin_range + 1, self.upper_bound)\n', (7034, 7074), True, 'import numpy as np\n'), ((8453, 8482), 'numpy.zeros', 'np.zeros', (['(array.shape[0], 1)'], {}), '((array.shape[0], 1))\n', (8461, 8482), True, 'import numpy as np\n'), ((11175, 11197), 'numpy.sum', 'np.sum', (['matrix'], {'axis': '(1)'}), '(matrix, axis=1)\n', (11181, 11197), True, 'import numpy as np\n'), ((14534, 14569), 'functools.partial', 'partial', (['evaluate_function'], {'ind': 'ind'}), '(evaluate_function, ind=ind)\n', (14541, 14569), False, 'from functools import partial\n'), ((1077, 1091), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (1085, 1091), False, 'from copy import deepcopy\n'), ((10943, 10977), 'itertools.combinations', 'itertools.combinations', (['indices', 'c'], {}), '(indices, c)\n', (10965, 10977), False, 'import itertools\n'), ((13378, 13404), 'random.choice', 'random.choice', (['param_nodes'], {}), '(param_nodes)\n', (13391, 13404), False, 'import random\n'), ((16231, 16292), 'numpy.errstate', 'np.errstate', ([], {'over': '"""ignore"""', 'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(over='ignore', divide='ignore', invalid='ignore')\n", (16242, 16292), True, 'import numpy as np\n'), ((11003, 11016), 'numpy.ones', 'np.ones', (['rows'], {}), '(rows)\n', (11010, 11016), True, 'import numpy as np\n'), ((10641, 10655), 'numpy.power', 'np.power', (['x', 'o'], {}), '(x, o)\n', (10649, 10655), True, 'import numpy as np\n'), ((3309, 3347), 'deap.gp.Terminal', 'gp.Terminal', (['token', '(False)', 'gp.__type__'], {}), '(token, False, gp.__type__)\n', (3320, 3347), False, 'from deap import gp\n')] |
# save coco_caption images: train & valid
import os
import torch
import cv2
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
def __mk_idform__(id):
if len(id) == 1:
id = '00000' + f'{id}'
elif len(id) == 2:
id = '0000' + f'{id}'
elif len(id) == 3:
id = '000' + f'{id}'
elif len(id) == 4:
id = '00' + f'{id}'
elif len(id) == 5:
id = '0' + f'{id}'
else:
id = id
return id
for i in range(42):
os.mkdir('/content/gdrive/My Drive/coco_image_caption/train/images/coco_part' + f'{i+1}')
for i in range(21):
os.mkdir('/content/gdrive/My Drive/coco_image_caption/valid/images/coco_part' + f'{i+1}')
class save_train_cocoimage_dataset(torch.utils.data.Dataset):
def __init__(self, captions):
self.captions = captions
def __len__(self):
return len(self.captions)
def __getitem__(self, index):
caption = self.captions.loc[index]
image_id = caption['image_id']
image_id = __mk_idform__(f'{image_id}')
image = cv2.imread('./train2014/COCO_train2014_000000' + f'{image_id}.jpg')
direc = caption['directory'] + 1
return direc, image_id, image
save_train_cocoimage_dataset = save_train_cocoimage_dataset(train_coco_captions)
save_train_cocoimage_dataloader = torch.utils.data.DataLoader(
save_train_cocoimage_dataset,
batch_size = 1,
pin_memory = False,
drop_last = False,
shuffle = False,
num_workers = 1)
save_train_cocoimage_book = tqdm(save_train_cocoimage_dataloader,
total = len(save_train_cocoimage_dataloader))
for step, data in enumerate(save_train_cocoimage_book):
direc, image_id, image = data
_ = cv2.imwrite('/content/gdrive/My Drive/coco_image_caption/train/images/coco_part'
+ f'{direc.tolist()[0]}/' + f'{image_id[0]}.jpg', np.array(image[0]))
class save_valid_cocoimage_dataset(torch.utils.data.Dataset):
def __init__(self, captions):
self.captions = captions
def __len__(self):
return len(self.captions)
def __getitem__(self, index):
caption = self.captions.loc[index]
image_id = caption['image_id']
image_id = __mk_idform__(f'{image_id}')
image = cv2.imread('./val2014/COCO_val2014_000000' + f'{image_id}.jpg')
direc = caption['directory'] + 1
return direc, image_id, image
save_valid_cocoimage_dataset = save_valid_cocoimage_dataset(valid_coco_captions)
save_valid_cocoimage_dataloader = torch.utils.data.DataLoader(
save_valid_cocoimage_dataset,
batch_size = 1,
pin_memory = False,
drop_last = False,
shuffle = False,
num_workers = 1)
save_valid_cocoimage_book = tqdm(save_valid_cocoimage_dataloader,
total = len(save_valid_cocoimage_dataloader))
for step, data in enumerate(save_valid_cocoimage_book):
direc, image_id, image = data
_ = cv2.imwrite('/content/gdrive/My Drive/coco_image_caption/valid/images/coco_part'
+ f'{direc.tolist()[0]}/' + f'{image_id[0]}.jpg', np.array(image[0]))
| [
"os.mkdir",
"numpy.array",
"cv2.imread",
"torch.utils.data.DataLoader"
] | [((1260, 1400), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['save_train_cocoimage_dataset'], {'batch_size': '(1)', 'pin_memory': '(False)', 'drop_last': '(False)', 'shuffle': '(False)', 'num_workers': '(1)'}), '(save_train_cocoimage_dataset, batch_size=1,\n pin_memory=False, drop_last=False, shuffle=False, num_workers=1)\n', (1287, 1400), False, 'import torch\n'), ((2441, 2581), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['save_valid_cocoimage_dataset'], {'batch_size': '(1)', 'pin_memory': '(False)', 'drop_last': '(False)', 'shuffle': '(False)', 'num_workers': '(1)'}), '(save_valid_cocoimage_dataset, batch_size=1,\n pin_memory=False, drop_last=False, shuffle=False, num_workers=1)\n', (2468, 2581), False, 'import torch\n'), ((458, 558), 'os.mkdir', 'os.mkdir', (["('/content/gdrive/My Drive/coco_image_caption/train/images/coco_part' +\n f'{i + 1}')"], {}), "(\n '/content/gdrive/My Drive/coco_image_caption/train/images/coco_part' +\n f'{i + 1}')\n", (466, 558), False, 'import os\n'), ((571, 671), 'os.mkdir', 'os.mkdir', (["('/content/gdrive/My Drive/coco_image_caption/valid/images/coco_part' +\n f'{i + 1}')"], {}), "(\n '/content/gdrive/My Drive/coco_image_caption/valid/images/coco_part' +\n f'{i + 1}')\n", (579, 671), False, 'import os\n'), ((1003, 1070), 'cv2.imread', 'cv2.imread', (["('./train2014/COCO_train2014_000000' + f'{image_id}.jpg')"], {}), "('./train2014/COCO_train2014_000000' + f'{image_id}.jpg')\n", (1013, 1070), False, 'import cv2\n'), ((1824, 1842), 'numpy.array', 'np.array', (['image[0]'], {}), '(image[0])\n', (1832, 1842), True, 'import numpy as np\n'), ((2188, 2251), 'cv2.imread', 'cv2.imread', (["('./val2014/COCO_val2014_000000' + f'{image_id}.jpg')"], {}), "('./val2014/COCO_val2014_000000' + f'{image_id}.jpg')\n", (2198, 2251), False, 'import cv2\n'), ((3005, 3023), 'numpy.array', 'np.array', (['image[0]'], {}), '(image[0])\n', (3013, 3023), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import cv2
import tflearn
from FaceProcessUtil import PreprocessImage as PPI
MAPPING = {0:'neutral', 1:'anger', 2:'surprise', 3:'disgust', 4:'fear', 5:'happy', 6:'sadness'}
MP = './models/'
DEFAULT_PADDING = 'SAME'
TypeThreshold=100
eye_p_shape=[None, 26, 64, 1]
midd_p_shape=[None, 49, 28, 1]
mou_p_shape=[None, 30, 54, 1]
###dependent modules for network definition
###
#4 network definition under tflearn
def FacePatches_NET_3Conv_IInception_tflear(eyep, middlep, mouthp):
e_net=tflearn.conv_2d(eyep, 8, 3, activation='relu',name='eye_conv1_1_3x3')
e_net=tflearn.conv_2d(e_net, 8, 3, activation='relu',name='eye_conv1_2_3x3')
e_net=tflearn.max_pool_2d(e_net,2,2,name='eye_pool1')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
e_net=tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc1')
mi_net=tflearn.conv_2d(middlep, 8, 3, activation='relu',name='middle_conv1_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 8, 3, activation='relu',name='middle_conv1_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net,2,2,name='middle_pool1')
mi_net=tflearn.conv_2d(mi_net, 32, 3, activation='relu', name='middle_conv2_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 32, 3, activation='relu', name='middle_conv2_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool2')
mi_net=tflearn.conv_2d(mi_net, 128, 3, activation='relu', name='middle_conv3_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 128, 3, activation='relu', name='middle_conv3_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool3')
mi_net=tflearn.fully_connected(mi_net, 1024, activation='tanh', name='middle_fc1')
mo_net=tflearn.conv_2d(mouthp, 8, 3, activation='relu',name='mouth_conv1_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 8, 3, activation='relu',name='mouth_conv1_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net,2,2,name='mouth_pool1')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
mo_net=tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc1')
fc_net=tf.concat([e_net,mi_net,mo_net], 1, name='fusion_1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc1')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc2')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop2')
softmax=tflearn.fully_connected(fc_net, 7, activation='softmax', name='prob')
return softmax
#5 network definition under tflearn
def FacePatches_NET_3Conv_2Inception_tflearn(eyep, middlep, mouthp):
e_net=tflearn.conv_2d(eyep, 8, 3, activation='relu',name='eye_conv1_1_3x3')
e_net=tflearn.conv_2d(e_net, 8, 3, activation='relu',name='eye_conv1_2_3x3')
e_net=tflearn.max_pool_2d(e_net,2,2,name='eye_pool1')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
efc2 = tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc2')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
e_net=tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc1')
e_net=tf.concat([e_net, efc2], 1, name='eye_fc')
mi_net=tflearn.conv_2d(middlep, 8, 3, activation='relu',name='middle_conv1_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 8, 3, activation='relu',name='middle_conv1_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net,2,2,name='middle_pool1')
mi_net=tflearn.conv_2d(mi_net, 32, 3, activation='relu', name='middle_conv2_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 32, 3, activation='relu', name='middle_conv2_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool2')
mifc2 = tflearn.fully_connected(mi_net, 1024, activation='tanh', name='middle_fc2')
mi_net=tflearn.conv_2d(mi_net, 128, 3, activation='relu', name='middle_conv3_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 128, 3, activation='relu', name='middle_conv3_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool3')
mi_net=tflearn.fully_connected(mi_net, 1024, activation='tanh', name='middle_fc1')
mi_net=tf.concat([mi_net, mifc2], 1, name='middle_fc')
mo_net=tflearn.conv_2d(mouthp, 8, 3, activation='relu',name='mouth_conv1_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 8, 3, activation='relu',name='mouth_conv1_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net,2,2,name='mouth_pool1')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
mfc2 = tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc2')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
mo_net=tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc1')
mo_net=tf.concat([mo_net, mfc2], 1, name='mouth_fc')
fc_net=tf.concat([e_net,mi_net,mo_net], 1, name='fusion_1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc1')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc2')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop2')
softmax=tflearn.fully_connected(fc_net, 7, activation='softmax', name='prob')
return softmax
#6 network definition under tflearn
def FacePatches_NET_3Conv_3Inception_tflearn(eyep, middlep, mouthp):
e_net=tflearn.conv_2d(eyep, 8, 3, activation='relu',name='eye_conv1_1_3x3')
e_net=tflearn.conv_2d(e_net, 8, 3, activation='relu',name='eye_conv1_2_3x3')
e_net=tflearn.max_pool_2d(e_net,2,2,name='eye_pool1')
efc3 = tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
efc2 = tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc2')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
e_net=tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc1')
e_net=tf.concat([e_net, efc2, efc3], 1, name='eye_fc')
mi_net=tflearn.conv_2d(middlep, 8, 3, activation='relu',name='middle_conv1_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 8, 3, activation='relu',name='middle_conv1_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net,2,2,name='middle_pool1')
mifc3 = tflearn.fully_connected(mi_net, 1024, activation='tanh', name='middle_fc3')
mi_net=tflearn.conv_2d(mi_net, 32, 3, activation='relu', name='middle_conv2_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 32, 3, activation='relu', name='middle_conv2_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool2')
mifc2 = tflearn.fully_connected(mi_net, 1024, activation='tanh', name='middle_fc2')
mi_net=tflearn.conv_2d(mi_net, 128, 3, activation='relu', name='middle_conv3_1_3x3')
mi_net=tflearn.conv_2d(mi_net, 128, 3, activation='relu', name='middle_conv3_2_3x3')
mi_net=tflearn.max_pool_2d(mi_net, 2, 2, name='middle_pool3')
mi_net=tflearn.fully_connected(mi_net, 1024, activation='tanh', name='middle_fc1')
mi_net=tf.concat([mi_net, mifc2, mifc3], 1, name='middle_fc')
mo_net=tflearn.conv_2d(mouthp, 8, 3, activation='relu',name='mouth_conv1_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 8, 3, activation='relu',name='mouth_conv1_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net,2,2,name='mouth_pool1')
mfc3 = tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
mfc2 = tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc2')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
mo_net=tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc1')
mo_net=tf.concat([mo_net, mfc2, mfc3], 1, name='mouth_fc')
fc_net=tf.concat([e_net,mi_net,mo_net], 1, name='fusion_1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc1')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc2')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop2')
softmax=tflearn.fully_connected(fc_net, 7, activation='softmax', name='prob')
return softmax
###using net 24
def FacePatches_NET_3C_1I_2P(eyep, mouthp):
###using net 24
e_net=tflearn.conv_2d(eyep, 8, 3, activation='relu',name='eye_conv1_1_3x3')
e_net=tflearn.conv_2d(e_net, 8, 3, activation='relu',name='eye_conv1_2_3x3')
e_net=tflearn.max_pool_2d(e_net,2,2,name='eye_pool1')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
e_net=tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc1')
mo_net=tflearn.conv_2d(mouthp, 8, 3, activation='relu',name='mouth_conv1_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 8, 3, activation='relu',name='mouth_conv1_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net,2,2,name='mouth_pool1')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
mo_net=tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc1')
fc_net=tf.concat([e_net, mo_net], 1, name='fusion_1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc1')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc2')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop2')
softmax=tflearn.fully_connected(fc_net, 7, activation='softmax', name='prob')
return softmax
###using net 25
def FacePatches_NET_3C_2I_2P(eyep, mouthp):
###using net 25
e_net=tflearn.conv_2d(eyep, 8, 3, activation='relu',name='eye_conv1_1_3x3')
e_net=tflearn.conv_2d(e_net, 8, 3, activation='relu',name='eye_conv1_2_3x3')
e_net=tflearn.max_pool_2d(e_net,2,2,name='eye_pool1')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
efc2 = tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc2')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
e_net=tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc1')
e_net=tf.concat([e_net, efc2], 1, name='eye_fc')
mo_net=tflearn.conv_2d(mouthp, 8, 3, activation='relu',name='mouth_conv1_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 8, 3, activation='relu',name='mouth_conv1_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net,2,2,name='mouth_pool1')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
mfc2 = tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc2')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
mo_net=tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc1')
mo_net=tf.concat([mo_net, mfc2], 1, name='mouth_fc')
fc_net=tf.concat([e_net, mo_net], 1, name='fusion_1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc1')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc2')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop2')
softmax=tflearn.fully_connected(fc_net, 7, activation='softmax', name='prob')
return softmax
###using net 26
def FacePatches_NET_3C_3I_2P(eyep, mouthp):
###using net 26
e_net=tflearn.conv_2d(eyep, 8, 3, activation='relu',name='eye_conv1_1_3x3')
e_net=tflearn.conv_2d(e_net, 8, 3, activation='relu',name='eye_conv1_2_3x3')
e_net=tflearn.max_pool_2d(e_net,2,2,name='eye_pool1')
efc3 = tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')
e_net=tflearn.conv_2d(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool2')
efc2 = tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc2')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')
e_net=tflearn.conv_2d(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')
e_net=tflearn.max_pool_2d(e_net, 2, 2, name='eye_pool3')
e_net=tflearn.fully_connected(e_net, 1024, activation='tanh', name='eye_fc1')
e_net=tf.concat([e_net, efc2, efc3], 1, name='eye_fc')
mo_net=tflearn.conv_2d(mouthp, 8, 3, activation='relu',name='mouth_conv1_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 8, 3, activation='relu',name='mouth_conv1_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net,2,2,name='mouth_pool1')
mfc3 = tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool2')
mfc2 = tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc2')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')
mo_net=tflearn.conv_2d(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')
mo_net=tflearn.max_pool_2d(mo_net, 2, 2, name='mouth_pool3')
mo_net=tflearn.fully_connected(mo_net, 1024, activation='tanh', name='mouth_fc1')
mo_net=tf.concat([mo_net, mfc2, mfc3], 1, name='mouth_fc')
fc_net=tf.concat([e_net,mo_net], 1, name='fusion_1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc1')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop1')
fc_net=tflearn.fully_connected(fc_net, 2048, activation='relu', name='fc2')
fc_net=tflearn.dropout(fc_net, 0.8, name='drop2')
softmax=tflearn.fully_connected(fc_net, 7, activation='softmax', name='prob')
return softmax
#
def getModelPathForPrediction(mid=0):
#if mid==300:
# mp=MP+'D502_M3_N3_T0_V0_R4_20171009235521_1.1895357370_.ckpt-16197'#0.9587
#elif mid==301:
# mp=MP+'D502_M3_N3_T4_V4_R4_20171010084104_1.2033878565_.ckpt-18110'#0.9165
#elif mid==303:
# mp=MP+'D502_M3_N3_T5_V5_R4_20171010103653_1.1808838844_.ckpt-19024'#0.9779
if mid==400:
mp=MP+'';
elif mid==500:
mp=MP+'';
elif mid==600:
mp=MP+'';
else:
print('Unexpected Model ID. TRY another one.')
exit(-1)
return mp
#model for prediction
class FacePatchesModel:
def __init__(self, mid=300):
###define the graph
self.networkGraph=tf.Graph()
with self.networkGraph.as_default():
self.eye_p = tf.placeholder(tf.float32, eye_p_shape)
self.mou_p = tf.placeholder(tf.float32, mou_p_shape)
#if (mid//TypeThreshold)==3:
# self.network = FacePatches_NET_3Conv_2Inception({'eyePatch_data':self.eye_p,
# 'middlePatch_data':self.midd_p,
# 'mouthPatch_data':self.mou_p})
if (mid//TypeThreshold)<7 and (mid//TypeThreshold)>3:
self.midd_p = tf.placeholder(tf.float32, midd_p_shape)
self.prob = FacePatches_NET_3Conv_IInception_tflear(self.eye_p,
self.midd_p, self.mou_p)
elif (mid//TypeThreshold) >23 and (mid//TypeThreshold) <27:
self.prob = FacePatches_NET_3Conv_IInception_tflear(self.eye_p, self.mou_p)
else:
print('ERROR: Unexpected network type. Try another mid')
exit(-1)
self.saver=tf.train.Saver()
###load pretrained model
self.sess=tf.InteractiveSession(graph=self.networkGraph)
try:
#must initialize the variables in the graph for compution or loading pretrained weights
self.sess.run(tf.variables_initializer(var_list=self.networkGraph.get_collection(name='variables')))
print('Network variables initialized.')
#the saver must define in the graph of its owner session, or it will occur error in restoration or saving
self.saver.restore(sess=self.sess, save_path=getModelPathForPrediction(mid))
print('Network Model loaded\n')
except:
print('ERROR: Unable to load the pretrained network.')
traceback.print_exc()
exit(2)
def predict(self, eye_p, midd_p, mou_p):#img must have the shape of [1, 128, 128, 1]
probability = self.prob.eval(feed_dict={self.eye_p:eye_p, self.midd_p:midd_p, self.mou_p:mou_p})
emotion = MAPPING[np.argmax(probability)]
return emotion, probability
| [
"tensorflow.InteractiveSession",
"tflearn.fully_connected",
"tensorflow.train.Saver",
"numpy.argmax",
"tensorflow.concat",
"tflearn.dropout",
"tensorflow.placeholder",
"tflearn.conv_2d",
"tensorflow.Graph",
"tflearn.max_pool_2d"
] | [((565, 635), 'tflearn.conv_2d', 'tflearn.conv_2d', (['eyep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_1_3x3"""'}), "(eyep, 8, 3, activation='relu', name='eye_conv1_1_3x3')\n", (580, 635), False, 'import tflearn\n'), ((646, 717), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_2_3x3"""'}), "(e_net, 8, 3, activation='relu', name='eye_conv1_2_3x3')\n", (661, 717), False, 'import tflearn\n'), ((728, 778), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool1"""'}), "(e_net, 2, 2, name='eye_pool1')\n", (747, 778), False, 'import tflearn\n'), ((787, 859), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_1_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')\n", (802, 859), False, 'import tflearn\n'), ((871, 943), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_2_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')\n", (886, 943), False, 'import tflearn\n'), ((955, 1005), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool2"""'}), "(e_net, 2, 2, name='eye_pool2')\n", (974, 1005), False, 'import tflearn\n'), ((1017, 1090), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_1_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')\n", (1032, 1090), False, 'import tflearn\n'), ((1102, 1175), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_2_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')\n", (1117, 1175), False, 'import tflearn\n'), ((1187, 1237), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool3"""'}), "(e_net, 2, 2, name='eye_pool3')\n", (1206, 1237), False, 'import tflearn\n'), ((1249, 1320), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc1"""'}), "(e_net, 1024, activation='tanh', name='eye_fc1')\n", (1272, 1320), False, 'import tflearn\n'), ((1335, 1411), 'tflearn.conv_2d', 'tflearn.conv_2d', (['middlep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv1_1_3x3"""'}), "(middlep, 8, 3, activation='relu', name='middle_conv1_1_3x3')\n", (1350, 1411), False, 'import tflearn\n'), ((1423, 1498), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv1_2_3x3"""'}), "(mi_net, 8, 3, activation='relu', name='middle_conv1_2_3x3')\n", (1438, 1498), False, 'import tflearn\n'), ((1510, 1564), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool1"""'}), "(mi_net, 2, 2, name='middle_pool1')\n", (1529, 1564), False, 'import tflearn\n'), ((1574, 1650), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv2_1_3x3"""'}), "(mi_net, 32, 3, activation='relu', name='middle_conv2_1_3x3')\n", (1589, 1650), False, 'import tflearn\n'), ((1663, 1739), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv2_2_3x3"""'}), "(mi_net, 32, 3, activation='relu', name='middle_conv2_2_3x3')\n", (1678, 1739), False, 'import tflearn\n'), ((1752, 1806), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool2"""'}), "(mi_net, 2, 2, name='middle_pool2')\n", (1771, 1806), False, 'import tflearn\n'), ((1819, 1896), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv3_1_3x3"""'}), "(mi_net, 128, 3, activation='relu', name='middle_conv3_1_3x3')\n", (1834, 1896), False, 'import tflearn\n'), ((1909, 1986), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv3_2_3x3"""'}), "(mi_net, 128, 3, activation='relu', name='middle_conv3_2_3x3')\n", (1924, 1986), False, 'import tflearn\n'), ((1999, 2053), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool3"""'}), "(mi_net, 2, 2, name='middle_pool3')\n", (2018, 2053), False, 'import tflearn\n'), ((2066, 2141), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mi_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""middle_fc1"""'}), "(mi_net, 1024, activation='tanh', name='middle_fc1')\n", (2089, 2141), False, 'import tflearn\n'), ((2156, 2230), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mouthp', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_1_3x3"""'}), "(mouthp, 8, 3, activation='relu', name='mouth_conv1_1_3x3')\n", (2171, 2230), False, 'import tflearn\n'), ((2242, 2316), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_2_3x3"""'}), "(mo_net, 8, 3, activation='relu', name='mouth_conv1_2_3x3')\n", (2257, 2316), False, 'import tflearn\n'), ((2328, 2381), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool1"""'}), "(mo_net, 2, 2, name='mouth_pool1')\n", (2347, 2381), False, 'import tflearn\n'), ((2391, 2466), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_1_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')\n", (2406, 2466), False, 'import tflearn\n'), ((2479, 2554), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_2_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')\n", (2494, 2554), False, 'import tflearn\n'), ((2567, 2620), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool2"""'}), "(mo_net, 2, 2, name='mouth_pool2')\n", (2586, 2620), False, 'import tflearn\n'), ((2633, 2709), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_1_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')\n", (2648, 2709), False, 'import tflearn\n'), ((2722, 2798), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_2_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')\n", (2737, 2798), False, 'import tflearn\n'), ((2811, 2864), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool3"""'}), "(mo_net, 2, 2, name='mouth_pool3')\n", (2830, 2864), False, 'import tflearn\n'), ((2877, 2951), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc1"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc1')\n", (2900, 2951), False, 'import tflearn\n'), ((2966, 3020), 'tensorflow.concat', 'tf.concat', (['[e_net, mi_net, mo_net]', '(1)'], {'name': '"""fusion_1"""'}), "([e_net, mi_net, mo_net], 1, name='fusion_1')\n", (2975, 3020), True, 'import tensorflow as tf\n'), ((3031, 3099), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), "(fc_net, 2048, activation='relu', name='fc1')\n", (3054, 3099), False, 'import tflearn\n'), ((3112, 3154), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop1"""'}), "(fc_net, 0.8, name='drop1')\n", (3127, 3154), False, 'import tflearn\n'), ((3167, 3235), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc2"""'}), "(fc_net, 2048, activation='relu', name='fc2')\n", (3190, 3235), False, 'import tflearn\n'), ((3248, 3290), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop2"""'}), "(fc_net, 0.8, name='drop2')\n", (3263, 3290), False, 'import tflearn\n'), ((3304, 3373), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(7)'], {'activation': '"""softmax"""', 'name': '"""prob"""'}), "(fc_net, 7, activation='softmax', name='prob')\n", (3327, 3373), False, 'import tflearn\n'), ((3512, 3582), 'tflearn.conv_2d', 'tflearn.conv_2d', (['eyep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_1_3x3"""'}), "(eyep, 8, 3, activation='relu', name='eye_conv1_1_3x3')\n", (3527, 3582), False, 'import tflearn\n'), ((3593, 3664), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_2_3x3"""'}), "(e_net, 8, 3, activation='relu', name='eye_conv1_2_3x3')\n", (3608, 3664), False, 'import tflearn\n'), ((3675, 3725), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool1"""'}), "(e_net, 2, 2, name='eye_pool1')\n", (3694, 3725), False, 'import tflearn\n'), ((3734, 3806), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_1_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')\n", (3749, 3806), False, 'import tflearn\n'), ((3818, 3890), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_2_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')\n", (3833, 3890), False, 'import tflearn\n'), ((3902, 3952), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool2"""'}), "(e_net, 2, 2, name='eye_pool2')\n", (3921, 3952), False, 'import tflearn\n'), ((3965, 4036), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc2"""'}), "(e_net, 1024, activation='tanh', name='eye_fc2')\n", (3988, 4036), False, 'import tflearn\n'), ((4048, 4121), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_1_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')\n", (4063, 4121), False, 'import tflearn\n'), ((4133, 4206), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_2_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')\n", (4148, 4206), False, 'import tflearn\n'), ((4218, 4268), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool3"""'}), "(e_net, 2, 2, name='eye_pool3')\n", (4237, 4268), False, 'import tflearn\n'), ((4280, 4351), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc1"""'}), "(e_net, 1024, activation='tanh', name='eye_fc1')\n", (4303, 4351), False, 'import tflearn\n'), ((4363, 4405), 'tensorflow.concat', 'tf.concat', (['[e_net, efc2]', '(1)'], {'name': '"""eye_fc"""'}), "([e_net, efc2], 1, name='eye_fc')\n", (4372, 4405), True, 'import tensorflow as tf\n'), ((4420, 4496), 'tflearn.conv_2d', 'tflearn.conv_2d', (['middlep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv1_1_3x3"""'}), "(middlep, 8, 3, activation='relu', name='middle_conv1_1_3x3')\n", (4435, 4496), False, 'import tflearn\n'), ((4508, 4583), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv1_2_3x3"""'}), "(mi_net, 8, 3, activation='relu', name='middle_conv1_2_3x3')\n", (4523, 4583), False, 'import tflearn\n'), ((4595, 4649), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool1"""'}), "(mi_net, 2, 2, name='middle_pool1')\n", (4614, 4649), False, 'import tflearn\n'), ((4659, 4735), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv2_1_3x3"""'}), "(mi_net, 32, 3, activation='relu', name='middle_conv2_1_3x3')\n", (4674, 4735), False, 'import tflearn\n'), ((4748, 4824), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv2_2_3x3"""'}), "(mi_net, 32, 3, activation='relu', name='middle_conv2_2_3x3')\n", (4763, 4824), False, 'import tflearn\n'), ((4837, 4891), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool2"""'}), "(mi_net, 2, 2, name='middle_pool2')\n", (4856, 4891), False, 'import tflearn\n'), ((4905, 4980), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mi_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""middle_fc2"""'}), "(mi_net, 1024, activation='tanh', name='middle_fc2')\n", (4928, 4980), False, 'import tflearn\n'), ((4993, 5070), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv3_1_3x3"""'}), "(mi_net, 128, 3, activation='relu', name='middle_conv3_1_3x3')\n", (5008, 5070), False, 'import tflearn\n'), ((5083, 5160), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv3_2_3x3"""'}), "(mi_net, 128, 3, activation='relu', name='middle_conv3_2_3x3')\n", (5098, 5160), False, 'import tflearn\n'), ((5173, 5227), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool3"""'}), "(mi_net, 2, 2, name='middle_pool3')\n", (5192, 5227), False, 'import tflearn\n'), ((5240, 5315), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mi_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""middle_fc1"""'}), "(mi_net, 1024, activation='tanh', name='middle_fc1')\n", (5263, 5315), False, 'import tflearn\n'), ((5328, 5375), 'tensorflow.concat', 'tf.concat', (['[mi_net, mifc2]', '(1)'], {'name': '"""middle_fc"""'}), "([mi_net, mifc2], 1, name='middle_fc')\n", (5337, 5375), True, 'import tensorflow as tf\n'), ((5390, 5464), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mouthp', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_1_3x3"""'}), "(mouthp, 8, 3, activation='relu', name='mouth_conv1_1_3x3')\n", (5405, 5464), False, 'import tflearn\n'), ((5476, 5550), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_2_3x3"""'}), "(mo_net, 8, 3, activation='relu', name='mouth_conv1_2_3x3')\n", (5491, 5550), False, 'import tflearn\n'), ((5562, 5615), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool1"""'}), "(mo_net, 2, 2, name='mouth_pool1')\n", (5581, 5615), False, 'import tflearn\n'), ((5625, 5700), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_1_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')\n", (5640, 5700), False, 'import tflearn\n'), ((5713, 5788), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_2_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')\n", (5728, 5788), False, 'import tflearn\n'), ((5801, 5854), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool2"""'}), "(mo_net, 2, 2, name='mouth_pool2')\n", (5820, 5854), False, 'import tflearn\n'), ((5867, 5941), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc2"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc2')\n", (5890, 5941), False, 'import tflearn\n'), ((5954, 6030), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_1_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')\n", (5969, 6030), False, 'import tflearn\n'), ((6043, 6119), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_2_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')\n", (6058, 6119), False, 'import tflearn\n'), ((6132, 6185), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool3"""'}), "(mo_net, 2, 2, name='mouth_pool3')\n", (6151, 6185), False, 'import tflearn\n'), ((6198, 6272), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc1"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc1')\n", (6221, 6272), False, 'import tflearn\n'), ((6285, 6330), 'tensorflow.concat', 'tf.concat', (['[mo_net, mfc2]', '(1)'], {'name': '"""mouth_fc"""'}), "([mo_net, mfc2], 1, name='mouth_fc')\n", (6294, 6330), True, 'import tensorflow as tf\n'), ((6345, 6399), 'tensorflow.concat', 'tf.concat', (['[e_net, mi_net, mo_net]', '(1)'], {'name': '"""fusion_1"""'}), "([e_net, mi_net, mo_net], 1, name='fusion_1')\n", (6354, 6399), True, 'import tensorflow as tf\n'), ((6410, 6478), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), "(fc_net, 2048, activation='relu', name='fc1')\n", (6433, 6478), False, 'import tflearn\n'), ((6491, 6533), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop1"""'}), "(fc_net, 0.8, name='drop1')\n", (6506, 6533), False, 'import tflearn\n'), ((6546, 6614), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc2"""'}), "(fc_net, 2048, activation='relu', name='fc2')\n", (6569, 6614), False, 'import tflearn\n'), ((6627, 6669), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop2"""'}), "(fc_net, 0.8, name='drop2')\n", (6642, 6669), False, 'import tflearn\n'), ((6683, 6752), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(7)'], {'activation': '"""softmax"""', 'name': '"""prob"""'}), "(fc_net, 7, activation='softmax', name='prob')\n", (6706, 6752), False, 'import tflearn\n'), ((6891, 6961), 'tflearn.conv_2d', 'tflearn.conv_2d', (['eyep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_1_3x3"""'}), "(eyep, 8, 3, activation='relu', name='eye_conv1_1_3x3')\n", (6906, 6961), False, 'import tflearn\n'), ((6972, 7043), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_2_3x3"""'}), "(e_net, 8, 3, activation='relu', name='eye_conv1_2_3x3')\n", (6987, 7043), False, 'import tflearn\n'), ((7054, 7104), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool1"""'}), "(e_net, 2, 2, name='eye_pool1')\n", (7073, 7104), False, 'import tflearn\n'), ((7114, 7185), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc3"""'}), "(e_net, 1024, activation='tanh', name='eye_fc3')\n", (7137, 7185), False, 'import tflearn\n'), ((7197, 7269), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_1_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')\n", (7212, 7269), False, 'import tflearn\n'), ((7281, 7353), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_2_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')\n", (7296, 7353), False, 'import tflearn\n'), ((7365, 7415), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool2"""'}), "(e_net, 2, 2, name='eye_pool2')\n", (7384, 7415), False, 'import tflearn\n'), ((7428, 7499), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc2"""'}), "(e_net, 1024, activation='tanh', name='eye_fc2')\n", (7451, 7499), False, 'import tflearn\n'), ((7511, 7584), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_1_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')\n", (7526, 7584), False, 'import tflearn\n'), ((7596, 7669), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_2_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')\n", (7611, 7669), False, 'import tflearn\n'), ((7681, 7731), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool3"""'}), "(e_net, 2, 2, name='eye_pool3')\n", (7700, 7731), False, 'import tflearn\n'), ((7743, 7814), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc1"""'}), "(e_net, 1024, activation='tanh', name='eye_fc1')\n", (7766, 7814), False, 'import tflearn\n'), ((7826, 7874), 'tensorflow.concat', 'tf.concat', (['[e_net, efc2, efc3]', '(1)'], {'name': '"""eye_fc"""'}), "([e_net, efc2, efc3], 1, name='eye_fc')\n", (7835, 7874), True, 'import tensorflow as tf\n'), ((7889, 7965), 'tflearn.conv_2d', 'tflearn.conv_2d', (['middlep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv1_1_3x3"""'}), "(middlep, 8, 3, activation='relu', name='middle_conv1_1_3x3')\n", (7904, 7965), False, 'import tflearn\n'), ((7977, 8052), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv1_2_3x3"""'}), "(mi_net, 8, 3, activation='relu', name='middle_conv1_2_3x3')\n", (7992, 8052), False, 'import tflearn\n'), ((8064, 8118), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool1"""'}), "(mi_net, 2, 2, name='middle_pool1')\n", (8083, 8118), False, 'import tflearn\n'), ((8129, 8204), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mi_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""middle_fc3"""'}), "(mi_net, 1024, activation='tanh', name='middle_fc3')\n", (8152, 8204), False, 'import tflearn\n'), ((8217, 8293), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv2_1_3x3"""'}), "(mi_net, 32, 3, activation='relu', name='middle_conv2_1_3x3')\n", (8232, 8293), False, 'import tflearn\n'), ((8306, 8382), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv2_2_3x3"""'}), "(mi_net, 32, 3, activation='relu', name='middle_conv2_2_3x3')\n", (8321, 8382), False, 'import tflearn\n'), ((8395, 8449), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool2"""'}), "(mi_net, 2, 2, name='middle_pool2')\n", (8414, 8449), False, 'import tflearn\n'), ((8463, 8538), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mi_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""middle_fc2"""'}), "(mi_net, 1024, activation='tanh', name='middle_fc2')\n", (8486, 8538), False, 'import tflearn\n'), ((8551, 8628), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv3_1_3x3"""'}), "(mi_net, 128, 3, activation='relu', name='middle_conv3_1_3x3')\n", (8566, 8628), False, 'import tflearn\n'), ((8641, 8718), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mi_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""middle_conv3_2_3x3"""'}), "(mi_net, 128, 3, activation='relu', name='middle_conv3_2_3x3')\n", (8656, 8718), False, 'import tflearn\n'), ((8731, 8785), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mi_net', '(2)', '(2)'], {'name': '"""middle_pool3"""'}), "(mi_net, 2, 2, name='middle_pool3')\n", (8750, 8785), False, 'import tflearn\n'), ((8798, 8873), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mi_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""middle_fc1"""'}), "(mi_net, 1024, activation='tanh', name='middle_fc1')\n", (8821, 8873), False, 'import tflearn\n'), ((8886, 8940), 'tensorflow.concat', 'tf.concat', (['[mi_net, mifc2, mifc3]', '(1)'], {'name': '"""middle_fc"""'}), "([mi_net, mifc2, mifc3], 1, name='middle_fc')\n", (8895, 8940), True, 'import tensorflow as tf\n'), ((8955, 9029), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mouthp', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_1_3x3"""'}), "(mouthp, 8, 3, activation='relu', name='mouth_conv1_1_3x3')\n", (8970, 9029), False, 'import tflearn\n'), ((9041, 9115), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_2_3x3"""'}), "(mo_net, 8, 3, activation='relu', name='mouth_conv1_2_3x3')\n", (9056, 9115), False, 'import tflearn\n'), ((9127, 9180), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool1"""'}), "(mo_net, 2, 2, name='mouth_pool1')\n", (9146, 9180), False, 'import tflearn\n'), ((9190, 9264), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc3"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc3')\n", (9213, 9264), False, 'import tflearn\n'), ((9277, 9352), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_1_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')\n", (9292, 9352), False, 'import tflearn\n'), ((9365, 9440), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_2_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')\n", (9380, 9440), False, 'import tflearn\n'), ((9453, 9506), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool2"""'}), "(mo_net, 2, 2, name='mouth_pool2')\n", (9472, 9506), False, 'import tflearn\n'), ((9519, 9593), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc2"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc2')\n", (9542, 9593), False, 'import tflearn\n'), ((9606, 9682), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_1_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')\n", (9621, 9682), False, 'import tflearn\n'), ((9695, 9771), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_2_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')\n", (9710, 9771), False, 'import tflearn\n'), ((9784, 9837), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool3"""'}), "(mo_net, 2, 2, name='mouth_pool3')\n", (9803, 9837), False, 'import tflearn\n'), ((9850, 9924), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc1"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc1')\n", (9873, 9924), False, 'import tflearn\n'), ((9937, 9988), 'tensorflow.concat', 'tf.concat', (['[mo_net, mfc2, mfc3]', '(1)'], {'name': '"""mouth_fc"""'}), "([mo_net, mfc2, mfc3], 1, name='mouth_fc')\n", (9946, 9988), True, 'import tensorflow as tf\n'), ((10003, 10057), 'tensorflow.concat', 'tf.concat', (['[e_net, mi_net, mo_net]', '(1)'], {'name': '"""fusion_1"""'}), "([e_net, mi_net, mo_net], 1, name='fusion_1')\n", (10012, 10057), True, 'import tensorflow as tf\n'), ((10068, 10136), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), "(fc_net, 2048, activation='relu', name='fc1')\n", (10091, 10136), False, 'import tflearn\n'), ((10149, 10191), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop1"""'}), "(fc_net, 0.8, name='drop1')\n", (10164, 10191), False, 'import tflearn\n'), ((10204, 10272), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc2"""'}), "(fc_net, 2048, activation='relu', name='fc2')\n", (10227, 10272), False, 'import tflearn\n'), ((10285, 10327), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop2"""'}), "(fc_net, 0.8, name='drop2')\n", (10300, 10327), False, 'import tflearn\n'), ((10341, 10410), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(7)'], {'activation': '"""softmax"""', 'name': '"""prob"""'}), "(fc_net, 7, activation='softmax', name='prob')\n", (10364, 10410), False, 'import tflearn\n'), ((10525, 10595), 'tflearn.conv_2d', 'tflearn.conv_2d', (['eyep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_1_3x3"""'}), "(eyep, 8, 3, activation='relu', name='eye_conv1_1_3x3')\n", (10540, 10595), False, 'import tflearn\n'), ((10606, 10677), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_2_3x3"""'}), "(e_net, 8, 3, activation='relu', name='eye_conv1_2_3x3')\n", (10621, 10677), False, 'import tflearn\n'), ((10688, 10738), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool1"""'}), "(e_net, 2, 2, name='eye_pool1')\n", (10707, 10738), False, 'import tflearn\n'), ((10747, 10819), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_1_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')\n", (10762, 10819), False, 'import tflearn\n'), ((10831, 10903), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_2_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')\n", (10846, 10903), False, 'import tflearn\n'), ((10915, 10965), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool2"""'}), "(e_net, 2, 2, name='eye_pool2')\n", (10934, 10965), False, 'import tflearn\n'), ((10977, 11050), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_1_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')\n", (10992, 11050), False, 'import tflearn\n'), ((11062, 11135), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_2_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')\n", (11077, 11135), False, 'import tflearn\n'), ((11147, 11197), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool3"""'}), "(e_net, 2, 2, name='eye_pool3')\n", (11166, 11197), False, 'import tflearn\n'), ((11209, 11280), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc1"""'}), "(e_net, 1024, activation='tanh', name='eye_fc1')\n", (11232, 11280), False, 'import tflearn\n'), ((11295, 11369), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mouthp', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_1_3x3"""'}), "(mouthp, 8, 3, activation='relu', name='mouth_conv1_1_3x3')\n", (11310, 11369), False, 'import tflearn\n'), ((11381, 11455), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_2_3x3"""'}), "(mo_net, 8, 3, activation='relu', name='mouth_conv1_2_3x3')\n", (11396, 11455), False, 'import tflearn\n'), ((11467, 11520), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool1"""'}), "(mo_net, 2, 2, name='mouth_pool1')\n", (11486, 11520), False, 'import tflearn\n'), ((11530, 11605), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_1_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')\n", (11545, 11605), False, 'import tflearn\n'), ((11618, 11693), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_2_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')\n", (11633, 11693), False, 'import tflearn\n'), ((11706, 11759), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool2"""'}), "(mo_net, 2, 2, name='mouth_pool2')\n", (11725, 11759), False, 'import tflearn\n'), ((11772, 11848), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_1_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')\n", (11787, 11848), False, 'import tflearn\n'), ((11861, 11937), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_2_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')\n", (11876, 11937), False, 'import tflearn\n'), ((11950, 12003), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool3"""'}), "(mo_net, 2, 2, name='mouth_pool3')\n", (11969, 12003), False, 'import tflearn\n'), ((12016, 12090), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc1"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc1')\n", (12039, 12090), False, 'import tflearn\n'), ((12105, 12151), 'tensorflow.concat', 'tf.concat', (['[e_net, mo_net]', '(1)'], {'name': '"""fusion_1"""'}), "([e_net, mo_net], 1, name='fusion_1')\n", (12114, 12151), True, 'import tensorflow as tf\n'), ((12164, 12232), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), "(fc_net, 2048, activation='relu', name='fc1')\n", (12187, 12232), False, 'import tflearn\n'), ((12245, 12287), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop1"""'}), "(fc_net, 0.8, name='drop1')\n", (12260, 12287), False, 'import tflearn\n'), ((12300, 12368), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc2"""'}), "(fc_net, 2048, activation='relu', name='fc2')\n", (12323, 12368), False, 'import tflearn\n'), ((12381, 12423), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop2"""'}), "(fc_net, 0.8, name='drop2')\n", (12396, 12423), False, 'import tflearn\n'), ((12437, 12506), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(7)'], {'activation': '"""softmax"""', 'name': '"""prob"""'}), "(fc_net, 7, activation='softmax', name='prob')\n", (12460, 12506), False, 'import tflearn\n'), ((12623, 12693), 'tflearn.conv_2d', 'tflearn.conv_2d', (['eyep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_1_3x3"""'}), "(eyep, 8, 3, activation='relu', name='eye_conv1_1_3x3')\n", (12638, 12693), False, 'import tflearn\n'), ((12704, 12775), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_2_3x3"""'}), "(e_net, 8, 3, activation='relu', name='eye_conv1_2_3x3')\n", (12719, 12775), False, 'import tflearn\n'), ((12786, 12836), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool1"""'}), "(e_net, 2, 2, name='eye_pool1')\n", (12805, 12836), False, 'import tflearn\n'), ((12845, 12917), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_1_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')\n", (12860, 12917), False, 'import tflearn\n'), ((12929, 13001), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_2_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')\n", (12944, 13001), False, 'import tflearn\n'), ((13013, 13063), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool2"""'}), "(e_net, 2, 2, name='eye_pool2')\n", (13032, 13063), False, 'import tflearn\n'), ((13076, 13147), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc2"""'}), "(e_net, 1024, activation='tanh', name='eye_fc2')\n", (13099, 13147), False, 'import tflearn\n'), ((13159, 13232), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_1_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')\n", (13174, 13232), False, 'import tflearn\n'), ((13244, 13317), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_2_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')\n", (13259, 13317), False, 'import tflearn\n'), ((13329, 13379), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool3"""'}), "(e_net, 2, 2, name='eye_pool3')\n", (13348, 13379), False, 'import tflearn\n'), ((13391, 13462), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc1"""'}), "(e_net, 1024, activation='tanh', name='eye_fc1')\n", (13414, 13462), False, 'import tflearn\n'), ((13474, 13516), 'tensorflow.concat', 'tf.concat', (['[e_net, efc2]', '(1)'], {'name': '"""eye_fc"""'}), "([e_net, efc2], 1, name='eye_fc')\n", (13483, 13516), True, 'import tensorflow as tf\n'), ((13531, 13605), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mouthp', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_1_3x3"""'}), "(mouthp, 8, 3, activation='relu', name='mouth_conv1_1_3x3')\n", (13546, 13605), False, 'import tflearn\n'), ((13617, 13691), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_2_3x3"""'}), "(mo_net, 8, 3, activation='relu', name='mouth_conv1_2_3x3')\n", (13632, 13691), False, 'import tflearn\n'), ((13703, 13756), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool1"""'}), "(mo_net, 2, 2, name='mouth_pool1')\n", (13722, 13756), False, 'import tflearn\n'), ((13766, 13841), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_1_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')\n", (13781, 13841), False, 'import tflearn\n'), ((13854, 13929), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_2_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')\n", (13869, 13929), False, 'import tflearn\n'), ((13942, 13995), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool2"""'}), "(mo_net, 2, 2, name='mouth_pool2')\n", (13961, 13995), False, 'import tflearn\n'), ((14008, 14082), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc2"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc2')\n", (14031, 14082), False, 'import tflearn\n'), ((14095, 14171), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_1_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')\n", (14110, 14171), False, 'import tflearn\n'), ((14184, 14260), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_2_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')\n", (14199, 14260), False, 'import tflearn\n'), ((14273, 14326), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool3"""'}), "(mo_net, 2, 2, name='mouth_pool3')\n", (14292, 14326), False, 'import tflearn\n'), ((14339, 14413), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc1"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc1')\n", (14362, 14413), False, 'import tflearn\n'), ((14426, 14471), 'tensorflow.concat', 'tf.concat', (['[mo_net, mfc2]', '(1)'], {'name': '"""mouth_fc"""'}), "([mo_net, mfc2], 1, name='mouth_fc')\n", (14435, 14471), True, 'import tensorflow as tf\n'), ((14486, 14532), 'tensorflow.concat', 'tf.concat', (['[e_net, mo_net]', '(1)'], {'name': '"""fusion_1"""'}), "([e_net, mo_net], 1, name='fusion_1')\n", (14495, 14532), True, 'import tensorflow as tf\n'), ((14545, 14613), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), "(fc_net, 2048, activation='relu', name='fc1')\n", (14568, 14613), False, 'import tflearn\n'), ((14626, 14668), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop1"""'}), "(fc_net, 0.8, name='drop1')\n", (14641, 14668), False, 'import tflearn\n'), ((14681, 14749), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc2"""'}), "(fc_net, 2048, activation='relu', name='fc2')\n", (14704, 14749), False, 'import tflearn\n'), ((14762, 14804), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop2"""'}), "(fc_net, 0.8, name='drop2')\n", (14777, 14804), False, 'import tflearn\n'), ((14818, 14887), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(7)'], {'activation': '"""softmax"""', 'name': '"""prob"""'}), "(fc_net, 7, activation='softmax', name='prob')\n", (14841, 14887), False, 'import tflearn\n'), ((15002, 15072), 'tflearn.conv_2d', 'tflearn.conv_2d', (['eyep', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_1_3x3"""'}), "(eyep, 8, 3, activation='relu', name='eye_conv1_1_3x3')\n", (15017, 15072), False, 'import tflearn\n'), ((15083, 15154), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv1_2_3x3"""'}), "(e_net, 8, 3, activation='relu', name='eye_conv1_2_3x3')\n", (15098, 15154), False, 'import tflearn\n'), ((15165, 15215), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool1"""'}), "(e_net, 2, 2, name='eye_pool1')\n", (15184, 15215), False, 'import tflearn\n'), ((15225, 15296), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc3"""'}), "(e_net, 1024, activation='tanh', name='eye_fc3')\n", (15248, 15296), False, 'import tflearn\n'), ((15308, 15380), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_1_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_1_3x3')\n", (15323, 15380), False, 'import tflearn\n'), ((15392, 15464), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv2_2_3x3"""'}), "(e_net, 32, 3, activation='relu', name='eye_conv2_2_3x3')\n", (15407, 15464), False, 'import tflearn\n'), ((15476, 15526), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool2"""'}), "(e_net, 2, 2, name='eye_pool2')\n", (15495, 15526), False, 'import tflearn\n'), ((15539, 15610), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc2"""'}), "(e_net, 1024, activation='tanh', name='eye_fc2')\n", (15562, 15610), False, 'import tflearn\n'), ((15622, 15695), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_1_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_1_3x3')\n", (15637, 15695), False, 'import tflearn\n'), ((15707, 15780), 'tflearn.conv_2d', 'tflearn.conv_2d', (['e_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""eye_conv3_2_3x3"""'}), "(e_net, 128, 3, activation='relu', name='eye_conv3_2_3x3')\n", (15722, 15780), False, 'import tflearn\n'), ((15792, 15842), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['e_net', '(2)', '(2)'], {'name': '"""eye_pool3"""'}), "(e_net, 2, 2, name='eye_pool3')\n", (15811, 15842), False, 'import tflearn\n'), ((15854, 15925), 'tflearn.fully_connected', 'tflearn.fully_connected', (['e_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""eye_fc1"""'}), "(e_net, 1024, activation='tanh', name='eye_fc1')\n", (15877, 15925), False, 'import tflearn\n'), ((15937, 15985), 'tensorflow.concat', 'tf.concat', (['[e_net, efc2, efc3]', '(1)'], {'name': '"""eye_fc"""'}), "([e_net, efc2, efc3], 1, name='eye_fc')\n", (15946, 15985), True, 'import tensorflow as tf\n'), ((16000, 16074), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mouthp', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_1_3x3"""'}), "(mouthp, 8, 3, activation='relu', name='mouth_conv1_1_3x3')\n", (16015, 16074), False, 'import tflearn\n'), ((16086, 16160), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(8)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv1_2_3x3"""'}), "(mo_net, 8, 3, activation='relu', name='mouth_conv1_2_3x3')\n", (16101, 16160), False, 'import tflearn\n'), ((16172, 16225), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool1"""'}), "(mo_net, 2, 2, name='mouth_pool1')\n", (16191, 16225), False, 'import tflearn\n'), ((16235, 16309), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc3"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc3')\n", (16258, 16309), False, 'import tflearn\n'), ((16322, 16397), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_1_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_1_3x3')\n", (16337, 16397), False, 'import tflearn\n'), ((16410, 16485), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(32)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv2_2_3x3"""'}), "(mo_net, 32, 3, activation='relu', name='mouth_conv2_2_3x3')\n", (16425, 16485), False, 'import tflearn\n'), ((16498, 16551), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool2"""'}), "(mo_net, 2, 2, name='mouth_pool2')\n", (16517, 16551), False, 'import tflearn\n'), ((16564, 16638), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc2"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc2')\n", (16587, 16638), False, 'import tflearn\n'), ((16651, 16727), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_1_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_1_3x3')\n", (16666, 16727), False, 'import tflearn\n'), ((16740, 16816), 'tflearn.conv_2d', 'tflearn.conv_2d', (['mo_net', '(128)', '(3)'], {'activation': '"""relu"""', 'name': '"""mouth_conv3_2_3x3"""'}), "(mo_net, 128, 3, activation='relu', name='mouth_conv3_2_3x3')\n", (16755, 16816), False, 'import tflearn\n'), ((16829, 16882), 'tflearn.max_pool_2d', 'tflearn.max_pool_2d', (['mo_net', '(2)', '(2)'], {'name': '"""mouth_pool3"""'}), "(mo_net, 2, 2, name='mouth_pool3')\n", (16848, 16882), False, 'import tflearn\n'), ((16895, 16969), 'tflearn.fully_connected', 'tflearn.fully_connected', (['mo_net', '(1024)'], {'activation': '"""tanh"""', 'name': '"""mouth_fc1"""'}), "(mo_net, 1024, activation='tanh', name='mouth_fc1')\n", (16918, 16969), False, 'import tflearn\n'), ((16982, 17033), 'tensorflow.concat', 'tf.concat', (['[mo_net, mfc2, mfc3]', '(1)'], {'name': '"""mouth_fc"""'}), "([mo_net, mfc2, mfc3], 1, name='mouth_fc')\n", (16991, 17033), True, 'import tensorflow as tf\n'), ((17048, 17094), 'tensorflow.concat', 'tf.concat', (['[e_net, mo_net]', '(1)'], {'name': '"""fusion_1"""'}), "([e_net, mo_net], 1, name='fusion_1')\n", (17057, 17094), True, 'import tensorflow as tf\n'), ((17106, 17174), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), "(fc_net, 2048, activation='relu', name='fc1')\n", (17129, 17174), False, 'import tflearn\n'), ((17187, 17229), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop1"""'}), "(fc_net, 0.8, name='drop1')\n", (17202, 17229), False, 'import tflearn\n'), ((17242, 17310), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(2048)'], {'activation': '"""relu"""', 'name': '"""fc2"""'}), "(fc_net, 2048, activation='relu', name='fc2')\n", (17265, 17310), False, 'import tflearn\n'), ((17323, 17365), 'tflearn.dropout', 'tflearn.dropout', (['fc_net', '(0.8)'], {'name': '"""drop2"""'}), "(fc_net, 0.8, name='drop2')\n", (17338, 17365), False, 'import tflearn\n'), ((17379, 17448), 'tflearn.fully_connected', 'tflearn.fully_connected', (['fc_net', '(7)'], {'activation': '"""softmax"""', 'name': '"""prob"""'}), "(fc_net, 7, activation='softmax', name='prob')\n", (17402, 17448), False, 'import tflearn\n'), ((18189, 18199), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (18197, 18199), True, 'import tensorflow as tf\n'), ((18272, 18311), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'eye_p_shape'], {}), '(tf.float32, eye_p_shape)\n', (18286, 18311), True, 'import tensorflow as tf\n'), ((18338, 18377), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'mou_p_shape'], {}), '(tf.float32, mou_p_shape)\n', (18352, 18377), True, 'import tensorflow as tf\n'), ((19331, 19347), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (19345, 19347), True, 'import tensorflow as tf\n'), ((19411, 19457), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'self.networkGraph'}), '(graph=self.networkGraph)\n', (19432, 19457), True, 'import tensorflow as tf\n'), ((20404, 20426), 'numpy.argmax', 'np.argmax', (['probability'], {}), '(probability)\n', (20413, 20426), True, 'import numpy as np\n'), ((18808, 18848), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'midd_p_shape'], {}), '(tf.float32, midd_p_shape)\n', (18822, 18848), True, 'import tensorflow as tf\n')] |
"""
Place this file within the generatd data folder
data_folder/
- data.csv
- player.py <== Like such
- images/
- 1589202544.57268.jpg
- 1589202545.33127.jpg
...
- 1589203451.23581.jpg
"""
import cv2
import os
import numpy as np
import traceback
import time
data = open("data.csv", "r")
#data = open("new_data.csv", "r")
data = data.read()
data_imagefile, steering_angle, speed, throttle, brakes = list(range(5))
throttle = speed
brakes = speed
IMAGES = os.listdir('images')
IMAGES.sort()
BLUE = (255, 0, 0)
def region_of_interest(img, vertices):
mask = np.zeros_like(img)
#channel_count = img.shape[2]
match_mask_color = 255
cv2.fillPoly(mask, vertices, match_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def drow_the_lines(img, lines):
img = np.copy(img)
blank_image = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(blank_image, (x1,y1), (x2,y2), (0, 255, 0), thickness=10)
img = cv2.addWeighted(img, 0.8, blank_image, 1, 0.0)
return img
# = cv2.imread('road.jpg')
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def process(image):
try:
#print(image.shape)
height = image.shape[0]
width = image.shape[1]
region_of_interest_vertices = [
(0, 380),
(0, 260),
(width-20, 260),
(width-20, 380)
]
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
canny_image = cv2.Canny(gray_image, 60, 80)
cropped_image = region_of_interest(canny_image,
np.array([region_of_interest_vertices], np.int32),)
#return canny_image
lines = cv2.HoughLinesP(cropped_image,
rho=2,
theta=np.pi/180,
threshold=50,
lines=np.array([]),
minLineLength=40,
maxLineGap=100)
#image_with_lines = drow_the_lines(image, lines)
blank_image = np.zeros((height,width,3), np.uint8)
blank_image = cv2.cvtColor(canny_image, cv2.COLOR_GRAY2RGB)
image_with_lines = drow_the_lines(blank_image, lines)
return image_with_lines
except:
traceback.print_exc()
return image
OUTPUT_MODE = True
while True:
for line in data.split('\n'):
if line:
instance = line.split(",")
print(instance[data_imagefile])
#myCsvRow = ",".join(list(map(str, [IMAGES[0], instance[steering_angle], instance[speed], instance[throttle], instance[brakes]])))
#IMAGES.pop(0)
#with open('new_data.csv', 'a') as fd: # Append to file
# fd.write(myCsvRow + '\n')
filename = instance[data_imagefile].split("/")[1]
current_frame = filename.split(".")[0] + "." + filename.split(".")[1][:3]
img = cv2.imread(instance[data_imagefile])
img = process(img)
if not OUTPUT_MODE:
img = cv2.putText(img, 'steering_angle ' + instance[steering_angle], (20,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLUE)
img = cv2.putText(img, 'throttle ' + instance[throttle], (20,40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLUE)
img = cv2.putText(img, 'time ' + current_frame, (20,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLUE)
if np.array(img).any():
if img.shape[0]>0 and img.shape[1]>0:
if not OUTPUT_MODE:
cv2.imshow('player.py', img)
#cv2.waitKey(0) # waits until a key is pressed
if cv2.waitKey(25) & 0xFF == ord('q'):
break
time.sleep(0.015)
else:
new_file_path = os.path.join(os.getcwd(), 'output', filename)
print(new_file_path)
cv2.imwrite(new_file_path, img)
if OUTPUT_MODE:
print("DONE")
break
cv2.destroyAllWindows() # destroys the window showing image
| [
"cv2.bitwise_and",
"cv2.fillPoly",
"cv2.imshow",
"cv2.line",
"numpy.zeros_like",
"traceback.print_exc",
"numpy.copy",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.destroyAllWindows",
"cv2.Canny",
"cv2.waitKey",
"cv2.addWeighted",
"time.sleep",
"os.listdir",
"cv2.putText",
"os.getcwd",
"nump... | [((480, 500), 'os.listdir', 'os.listdir', (['"""images"""'], {}), "('images')\n", (490, 500), False, 'import os\n'), ((3733, 3756), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3754, 3756), False, 'import cv2\n'), ((587, 605), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (600, 605), True, 'import numpy as np\n'), ((671, 717), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'vertices', 'match_mask_color'], {}), '(mask, vertices, match_mask_color)\n', (683, 717), False, 'import cv2\n'), ((737, 763), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask'], {}), '(img, mask)\n', (752, 763), False, 'import cv2\n'), ((831, 843), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (838, 843), True, 'import numpy as np\n'), ((862, 919), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1], 3)'], {'dtype': 'np.uint8'}), '((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n', (870, 919), True, 'import numpy as np\n'), ((1070, 1116), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(0.8)', 'blank_image', '(1)', '(0.0)'], {}), '(img, 0.8, blank_image, 1, 0.0)\n', (1085, 1116), False, 'import cv2\n'), ((1467, 1506), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (1479, 1506), False, 'import cv2\n'), ((1526, 1555), 'cv2.Canny', 'cv2.Canny', (['gray_image', '(60)', '(80)'], {}), '(gray_image, 60, 80)\n', (1535, 1555), False, 'import cv2\n'), ((2091, 2129), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (2099, 2129), True, 'import numpy as np\n'), ((2147, 2192), 'cv2.cvtColor', 'cv2.cvtColor', (['canny_image', 'cv2.COLOR_GRAY2RGB'], {}), '(canny_image, cv2.COLOR_GRAY2RGB)\n', (2159, 2192), False, 'import cv2\n'), ((992, 1060), 'cv2.line', 'cv2.line', (['blank_image', '(x1, y1)', '(x2, y2)', '(0, 255, 0)'], {'thickness': '(10)'}), '(blank_image, (x1, y1), (x2, y2), (0, 255, 0), thickness=10)\n', (1000, 1060), False, 'import cv2\n'), ((1630, 1679), 'numpy.array', 'np.array', (['[region_of_interest_vertices]', 'np.int32'], {}), '([region_of_interest_vertices], np.int32)\n', (1638, 1679), True, 'import numpy as np\n'), ((2292, 2313), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2311, 2313), False, 'import traceback\n'), ((2858, 2894), 'cv2.imread', 'cv2.imread', (['instance[data_imagefile]'], {}), '(instance[data_imagefile])\n', (2868, 2894), False, 'import cv2\n'), ((1911, 1923), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1919, 1923), True, 'import numpy as np\n'), ((2950, 3063), 'cv2.putText', 'cv2.putText', (['img', "('steering_angle ' + instance[steering_angle])", '(20, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'BLUE'], {}), "(img, 'steering_angle ' + instance[steering_angle], (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLUE)\n", (2961, 3063), False, 'import cv2\n'), ((3070, 3178), 'cv2.putText', 'cv2.putText', (['img', "('throttle ' + instance[throttle])", '(20, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'BLUE'], {}), "(img, 'throttle ' + instance[throttle], (20, 40), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, BLUE)\n", (3081, 3178), False, 'import cv2\n'), ((3184, 3287), 'cv2.putText', 'cv2.putText', (['img', "('time ' + current_frame)", '(20, 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'BLUE'], {}), "(img, 'time ' + current_frame, (20, 60), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, BLUE)\n", (3195, 3287), False, 'import cv2\n'), ((3289, 3302), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3297, 3302), True, 'import numpy as np\n'), ((3383, 3411), 'cv2.imshow', 'cv2.imshow', (['"""player.py"""', 'img'], {}), "('player.py', img)\n", (3393, 3411), False, 'import cv2\n'), ((3529, 3546), 'time.sleep', 'time.sleep', (['(0.015)'], {}), '(0.015)\n', (3539, 3546), False, 'import time\n'), ((3659, 3690), 'cv2.imwrite', 'cv2.imwrite', (['new_file_path', 'img'], {}), '(new_file_path, img)\n', (3670, 3690), False, 'import cv2\n'), ((3593, 3604), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3602, 3604), False, 'import os\n'), ((3474, 3489), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (3485, 3489), False, 'import cv2\n')] |
import tensorflow as tf
import numpy as np
x= np.random.randn(4,5,20)
input = tf.constant(x)
x = tf.cast(input,'float32')
con = tf.get_variable("weight",[20, 10])
z=tf.dot(x,con)
# z=tf.nn.conv2d(tf.cast(input,'float32'),con,strides=[1,1,1,1],padding="VALID")
sess=tf.Session()
sess.run(tf.global_variables_initializer())
output = sess.run(z)
print(output.shape) | [
"tensorflow.dot",
"numpy.random.randn",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.cast",
"tensorflow.get_variable"
] | [((47, 72), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)', '(20)'], {}), '(4, 5, 20)\n', (62, 72), True, 'import numpy as np\n'), ((79, 93), 'tensorflow.constant', 'tf.constant', (['x'], {}), '(x)\n', (90, 93), True, 'import tensorflow as tf\n'), ((98, 123), 'tensorflow.cast', 'tf.cast', (['input', '"""float32"""'], {}), "(input, 'float32')\n", (105, 123), True, 'import tensorflow as tf\n'), ((129, 164), 'tensorflow.get_variable', 'tf.get_variable', (['"""weight"""', '[20, 10]'], {}), "('weight', [20, 10])\n", (144, 164), True, 'import tensorflow as tf\n'), ((168, 182), 'tensorflow.dot', 'tf.dot', (['x', 'con'], {}), '(x, con)\n', (174, 182), True, 'import tensorflow as tf\n'), ((269, 281), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (279, 281), True, 'import tensorflow as tf\n'), ((291, 324), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (322, 324), True, 'import tensorflow as tf\n')] |
from os import system
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import time
import concurrent
from pebble import ProcessPool
np.warnings.filterwarnings('ignore', 'overflow') #disabled warnings for the sigmoid function
CPU_PROCESSES = 12 #CPU processes, reduce this if you are running on a lower spec machine
LOGISTIC_REGRESSION_LOOPS = 50 #Number of iterations inside the logistic regression. #50 should take about 3 mins
LEARNING_RATE = 0.02 #Learning rate... this seems to be the most efficent based on my testing
MIN_GAMES_FOR_GROUPING = 40 #Min number of games required for a group. I.e If the 'mp_nuketown6' map has only been played 30 times and will not be considered
class LogisticRegression:
#logistic sigmoid function
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def fit(self, X, y):
#setup variables
numberOfInputs, variables = X.shape #split the rows and columns into 2 variables
self.weights = [0.0] * variables #fill array with zeros
self.bias = 0 #reset our bias
#loop for LOGISTIC_REGRESSION_LOOPS number of iterations
for _ in range(LOGISTIC_REGRESSION_LOOPS):
linear_model = np.dot(X, self.weights) + self.bias #Computes the weighted sum of input data
prediction = self._sigmoid(linear_model)
#Getting the gradients of loss
dw = (1 / numberOfInputs) * np.dot(X.T, (prediction - y)) #multply numberOfInputs by the results / transposing of X and of the sigmoid less the y (win:1 / loss:0)
db = (1 / numberOfInputs) * np.sum(prediction - y) #multiply the numberOfInputs by the number of predicted games less y
#Update the weight and bias using our learning rate
self.weights = self.weights - (LEARNING_RATE * dw)
self.bias = self.bias- (LEARNING_RATE * db)
#...then we loop again with the new weight / bias values
#each loop gets closer to a accurate set of weights and bias (as long as the learning rate wasn't too big to start off with)
#return the final model
def getModel(self):
return {'weights': self.weights, 'bias':self.bias }
#Predict a result using the current weights and bias
def predict(self, X):
linear_model = np.dot(X, self.weights) + self.bias
prediction = self._sigmoid(linear_model)
result = [1 if i >= 0.5 else 0 for i in prediction] #if the result of the sigmoid is more than or equal to 0.5 it is a win, else game will be a loss
return np.array(result)
class CODHelper:
iv_groups=[]
independant_vars = ['duration', 'kills', 'ekiadRatio',
'rankAtEnd', 'shotsLanded', 'highestMultikill', 'score',
'headshots', 'assists', 'scorePerMinute', 'deaths', 'damageDealt',
'shotsMissed', 'multikills', 'highestStreak', 'hits', 'timePlayed',
'suicides', 'timePlayedAlive', 'objectives', 'shotsFired']
def __init__(self):
self.loadFullDataSet()
#Load CSV and prepare data
#COD_Games.csv has been generated using historical match results from Activision's API using my personal key
def loadFullDataSet(self):
df = pd.read_csv('COD_Games.csv', index_col="matchID")
df = df[(df.isPresentAtEnd == 1)] #Restrict games to the playing the whole game till the end
df = df[(df.result == 'win') | (df.result == 'loss')] #Only interested in wins or losses
df.result = df.result.map( {'win':1 , 'loss':0} ) #Convert string win to 1 and string loss to 0
df['map_mode'] = df['map'].str.cat(df['mode'],sep="-") #Combine map and mode to one single column
self.df = df
#Create unique independant variable combinations to feed the learning with different data
#i.e ['kills','headshots','objectives']
# group_count sets the number of combinations 1, 2 or 3
def buildUniqueIndependantVariableCombinations(self, group_count):
iv_groups=[]
for i in range(0, len(self.independant_vars)-1):
if group_count==1:
iv_groups.append([self.independant_vars[i]])
else:
for j in range(i+1, len(self.independant_vars)-1):
if group_count ==2 :
iv_groups.append([self.independant_vars[i], self.independant_vars[j] ])
else:
for k in range(j+1, len(self.independant_vars)-1):
iv_groups.append([self.independant_vars[i], self.independant_vars[j], self.independant_vars[k]])
return iv_groups
#Cacluate the accuracy of the results by comparing the model result to the actual game result
#Divide total of correct guesses by the total games to get the accruacy (between 0.0 and 1.0)
def accuracy(self, y_true, y_pred):
accuracy = np.sum(y_true == y_pred) / len(y_true)
return accuracy
#Run a test for a particular type of game and loop through independant variables to see what is the best result
#If the accuracy is higher than the last independant variable set then it becomes the winner
def runLRFilter(self, pool, filterVariableName, iv_group_length ):
bestresults = {}
#generate our iv combinations
iv_groups = self.buildUniqueIndependantVariableCombinations(iv_group_length)
#no grouping scenario, we are looking at all records
if filterVariableName == 'no_grouping':
filters = ['no_grouping']
else:
#We filter on the overall data set to find games grouped by type weare interested in.
#Groupings are ignored if the count falls below the MIN_GAMES_FOR_GROUPING value
#This is done so that you don't compare low game combinations that don't have enough data
filters = self.df.groupby(filterVariableName).filter(lambda x: x.shape[0] > MIN_GAMES_FOR_GROUPING)[filterVariableName].unique()
for filterValue in filters:
bestresults[filterValue] = {'score':0}
my_iterable = []
#build a list of iv combinations we want to run LRs over
for iv_group in iv_groups:
my_iterable.append([filterValue, filterVariableName, iv_group])
#run the LRs in a process pool. This speeds things up quite a bit
results = [pool.schedule(self.runLogisticRegression, args=[value]) for value in my_iterable]
completed, pending = concurrent.futures.wait(results)
# cancel pending futures
for future in pending:
future.cancel()
#Once pool is finished compare results and pick the best performing IV group
#The best performing is the one with the highest accuracy
for r in completed:
result = r.result()
br = bestresults[filterValue]
if result['score'] > br['score']:
br['score']= result['score']
br['iv_group'] = result['iv_group']
br['model'] = result['model']
br['gamesPlayed'] = result['gamesPlayed']
return bestresults
#Save results to file + print to screen
def printResultsSummary(self, game_type, iv_group_count, results):
totalScore = 0.0
print("\n\nGame type: {game_type}, iv_groupings: {iv_group_count} with {LOGISTIC_REGRESSION_LOOPS} LR loops and {LEARNING_RATE} learning rate:".format(game_type=game_type, LEARNING_RATE = LEARNING_RATE, LOGISTIC_REGRESSION_LOOPS = LOGISTIC_REGRESSION_LOOPS, iv_group_count=iv_group_count ))
for key in results:
r = results[key]
totalScore += r['score']
print(f" {key}, score:{r['score']}, best ivs:{r['iv_group']}\n model:{r['model']}, gamesPlayed:{r['gamesPlayed']}")
averageAccuracy = totalScore/len(results)
print(f"Average accuracy: {averageAccuracy}")
f = open(f"run_logs/stats_LR_{LOGISTIC_REGRESSION_LOOPS}.txt", "a")
f.write(f"averageAccuracy: {averageAccuracy}, game_type: {game_type}, iv_group_count:{iv_group_count}\n")
f.close()
#Run the Logistic regression for a filter with a specific independant variable
#i.e for 'map' of type 'mp_miami' run a LR for ['kills','timePlayedAlive', 'objectives']
#After LR has been run, check the test games against the model to calculate the accuracy
def runLogisticRegression(self, args):
filterValue, filterVariableName, iv_group = args
lr = LogisticRegression()
filteredDf = self.df if filterVariableName == 'no_grouping' else self.df[(self.df[filterVariableName] == filterValue)]
y = filteredDf.result #extract the game result (win/loss)
X = filteredDf.loc[:, iv_group] #only select the columns we want to run a LR on
#split our data into 70% training, 30% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
#fit our training data
lr.fit(X_train, y_train)
#run the test data to see how we went
predictions = lr.predict(X_test)
#work out how well we did
score = self.accuracy(y_test, predictions)
#return the stats
return {'score':score, 'gamesPlayed':len(filteredDf), 'filterValue':filterValue, 'iv_group':iv_group, 'model':lr.getModel()}
'''
Run logistic regressions
For each game type: map, mode and map+mode combined
map = Call of Duty maps https://www.gamesatlas.com/cod-black-ops-cold-war/maps/
mode = Type of game played https://www.callofduty.com/blog/2020/11/Black-Ops-Cold-War-Multiplayer-Modes
map_mode = combinations of game modes played on specific maps
The results:
- 'map' will return the best independant variables that would tell us if the game was going to result in a win or loss on a given map
- 'mode' will return the best independant variables for game mode that would tell us if the game was going to be a win or loss
- 'map_mode' will return the best independant variables for a map and mode combination to predict a win/loss
- 'no_grouping' will return the best independant variables that would tell us if the game was going to result in a win or loss on any map / mode
All combinations need to have at least 40 games (MIN_GAMES_FOR_GROUPING) so not to create a poor model
iv_group_count 1-3 will try check different iv groupings:
(1) ['kills']
(2) ['kills','headshots']
(3) ['kills','headshots','objectives'] ...etc
LOGISTIC_REGRESSION_LOOPS gives us interesting results (2017 Macbook Pro):
5000 iterations takes 4+ hours and best avg. accuracy for map_mode is 0.9153439153439153
1000 iterations, 60 mins, 0.9116090880796763
100 iterations, 7 mins, 0.8734827264239029
50 iterations, 3 mins, 0.8930905695611577
The best results are for map_mode combinations with 3 independant variables.
Game type: map_mode, iv_groupings: 3 with 1000 LR loops and 0.02 learning rate:
mp_kgb-control_cdl, score:0.9411764705882353, best ivs:['kills', 'ekiadRatio', 'headshots']
model:{'weights': array([-0.12139226, 1.27216461, 0.25889435]), 'bias': -0.27291896604698096}, gamesPlayed:54
mp_tank-control_cdl, score:0.8888888888888888, best ivs:['ekiadRatio', 'highestMultikill', 'deaths']
model:{'weights': array([ 1.02329268, -0.32187085, -0.05403984]), 'bias': 0.20085351710537208}, gamesPlayed:59
mp_raid_rm-control_cdl, score:0.9047619047619048, best ivs:['kills', 'highestStreak', 'objectives']
model:{'weights': array([-0.34176689, 1.1054231 , 0.20881256]), 'bias': -1.1330294091635222}, gamesPlayed:68
Average accuracy: 0.9116090880796763
'''
def run(pool):
start = time.time()
helper = CODHelper()
#Loop through filters
for game_type in ['no_grouping','map','mode','map_mode']:
#iv grouping variations
for iv_group_count in [1,2,3]:
results = helper.runLRFilter(pool, game_type, iv_group_count)
helper.printResultsSummary(game_type, iv_group_count, results)
#calculate total time taken & print to screen
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("Total Time: {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == '__main__':
with ProcessPool(CPU_PROCESSES) as pool:
run(pool) | [
"pebble.ProcessPool",
"numpy.sum",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"time.time",
"numpy.array",
"numpy.exp",
"concurrent.futures.wait",
"numpy.dot",
"numpy.warnings.filterwarnings"
] | [((176, 224), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""', '"""overflow"""'], {}), "('ignore', 'overflow')\n", (202, 224), True, 'import numpy as np\n'), ((11906, 11917), 'time.time', 'time.time', ([], {}), '()\n', (11915, 11917), False, 'import time\n'), ((12329, 12340), 'time.time', 'time.time', ([], {}), '()\n', (12338, 12340), False, 'import time\n'), ((2637, 2653), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (2645, 2653), True, 'import numpy as np\n'), ((3279, 3328), 'pandas.read_csv', 'pd.read_csv', (['"""COD_Games.csv"""'], {'index_col': '"""matchID"""'}), "('COD_Games.csv', index_col='matchID')\n", (3290, 3328), True, 'import pandas as pd\n'), ((9096, 9150), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X, y, test_size=0.3, random_state=42)\n', (9112, 9150), False, 'from sklearn.model_selection import train_test_split\n'), ((12547, 12573), 'pebble.ProcessPool', 'ProcessPool', (['CPU_PROCESSES'], {}), '(CPU_PROCESSES)\n', (12558, 12573), False, 'from pebble import ProcessPool\n'), ((2371, 2394), 'numpy.dot', 'np.dot', (['X', 'self.weights'], {}), '(X, self.weights)\n', (2377, 2394), True, 'import numpy as np\n'), ((4945, 4969), 'numpy.sum', 'np.sum', (['(y_true == y_pred)'], {}), '(y_true == y_pred)\n', (4951, 4969), True, 'import numpy as np\n'), ((6599, 6631), 'concurrent.futures.wait', 'concurrent.futures.wait', (['results'], {}), '(results)\n', (6622, 6631), False, 'import concurrent\n'), ((869, 879), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (875, 879), True, 'import numpy as np\n'), ((1268, 1291), 'numpy.dot', 'np.dot', (['X', 'self.weights'], {}), '(X, self.weights)\n', (1274, 1291), True, 'import numpy as np\n'), ((1482, 1509), 'numpy.dot', 'np.dot', (['X.T', '(prediction - y)'], {}), '(X.T, prediction - y)\n', (1488, 1509), True, 'import numpy as np\n'), ((1657, 1679), 'numpy.sum', 'np.sum', (['(prediction - y)'], {}), '(prediction - y)\n', (1663, 1679), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Class to train the Neural Network."""
import numpy as np
from config import CFG
from mcts import MonteCarloTreeSearch, TreeNode
from neural_net import NeuralNetworkWrapper
from evaluate import Evaluate
from copy import deepcopy
class Train(object):
"""Class with functions to train the Neural Network using MCTS.
Attributes:
game: An object containing the game state.
net: An object containing the neural network.
"""
def __init__(self, game, net):
"""Initializes Train with the board state and neural network."""
self.game = game
self.net = net
self.eval_net = NeuralNetworkWrapper(game)
def start(self):
"""Main training loop."""
for i in range(CFG.num_iterations):
print("Iteration", i + 1)
training_data = [] # list to store self play states, pis and vs
for j in range(CFG.num_games):
print("Start Training Self-Play Game", j + 1)
game = self.game.clone() # Create a fresh clone for each game.
self.play_game(game, training_data)
# Save the current neural network model.
self.net.save_model()
# Load the recently saved model into the evaluator network.
self.eval_net.load_model()
# Train the network using self play values.
self.net.train(training_data)
# Initialize MonteCarloTreeSearch objects for both networks.
current_mcts = MonteCarloTreeSearch(self.net)
eval_mcts = MonteCarloTreeSearch(self.eval_net)
evaluator = Evaluate(current_mcts=current_mcts, eval_mcts=eval_mcts,
game=self.game)
wins, losses = evaluator.evaluate()
print("wins:", wins)
print("losses:", losses)
num_games = wins + losses
if num_games == 0:
win_rate = 0
else:
win_rate = wins / num_games
print("win rate:", win_rate)
if win_rate > CFG.eval_win_rate:
# Save current model as the best model.
print("New model saved as best model.")
self.net.save_model("best_model")
else:
print("New model discarded and previous model loaded.")
# Discard current model and use previous best model.
self.net.load_model()
def play_game(self, game, training_data):
"""Loop for each self-play game.
Runs MCTS for each game state and plays a move based on the MCTS output.
Stops when the game is over and prints out a winner.
Args:
game: An object containing the game state.
training_data: A list to store self play states, pis and vs.
"""
mcts = MonteCarloTreeSearch(self.net)
game_over = False
value = 0
self_play_data = []
count = 0
node = TreeNode()
# Keep playing until the game is in a terminal state.
while not game_over:
# MCTS simulations to get the best child node.
if count < CFG.temp_thresh:
best_child = mcts.search(game, node, CFG.temp_init)
else:
best_child = mcts.search(game, node, CFG.temp_final)
# Store state, prob and v for training.
self_play_data.append([deepcopy(game.state),
deepcopy(best_child.parent.child_psas),
0])
action = best_child.action
game.play_action(action) # Play the child node's action.
count += 1
game_over, value = game.check_game_over(game.current_player)
best_child.parent = None
node = best_child # Make the child node the root node.
# Update v as the value of the game result.
for game_state in self_play_data:
value = -value
game_state[2] = value
self.augment_data(game_state, training_data, game.row, game.column)
def augment_data(self, game_state, training_data, row, column):
"""Loop for each self-play game.
Runs MCTS for each game state and plays a move based on the MCTS output.
Stops when the game is over and prints out a winner.
Args:
game_state: An object containing the state, pis and value.
training_data: A list to store self play states, pis and vs.
row: An integer indicating the length of the board row.
column: An integer indicating the length of the board column.
"""
state = deepcopy(game_state[0])
psa_vector = deepcopy(game_state[1])
if CFG.game == 2 or CFG.game == 1:
training_data.append([state, psa_vector, game_state[2]])
else:
psa_vector = np.reshape(psa_vector, (row, column))
# Augment data by rotating and flipping the game state.
for i in range(4):
training_data.append([np.rot90(state, i),
np.rot90(psa_vector, i).flatten(),
game_state[2]])
training_data.append([np.fliplr(np.rot90(state, i)),
np.fliplr(
np.rot90(psa_vector, i)).flatten(),
game_state[2]])
| [
"neural_net.NeuralNetworkWrapper",
"copy.deepcopy",
"evaluate.Evaluate",
"mcts.MonteCarloTreeSearch",
"numpy.rot90",
"numpy.reshape",
"mcts.TreeNode"
] | [((1816, 1842), 'neural_net.NeuralNetworkWrapper', 'NeuralNetworkWrapper', (['game'], {}), '(game)\n', (1836, 1842), False, 'from neural_net import NeuralNetworkWrapper\n'), ((4048, 4078), 'mcts.MonteCarloTreeSearch', 'MonteCarloTreeSearch', (['self.net'], {}), '(self.net)\n', (4068, 4078), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((4186, 4196), 'mcts.TreeNode', 'TreeNode', ([], {}), '()\n', (4194, 4196), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((5898, 5921), 'copy.deepcopy', 'deepcopy', (['game_state[0]'], {}), '(game_state[0])\n', (5906, 5921), False, 'from copy import deepcopy\n'), ((5943, 5966), 'copy.deepcopy', 'deepcopy', (['game_state[1]'], {}), '(game_state[1])\n', (5951, 5966), False, 'from copy import deepcopy\n'), ((2697, 2727), 'mcts.MonteCarloTreeSearch', 'MonteCarloTreeSearch', (['self.net'], {}), '(self.net)\n', (2717, 2727), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((2752, 2787), 'mcts.MonteCarloTreeSearch', 'MonteCarloTreeSearch', (['self.eval_net'], {}), '(self.eval_net)\n', (2772, 2787), False, 'from mcts import MonteCarloTreeSearch, TreeNode\n'), ((2813, 2885), 'evaluate.Evaluate', 'Evaluate', ([], {'current_mcts': 'current_mcts', 'eval_mcts': 'eval_mcts', 'game': 'self.game'}), '(current_mcts=current_mcts, eval_mcts=eval_mcts, game=self.game)\n', (2821, 2885), False, 'from evaluate import Evaluate\n'), ((6119, 6156), 'numpy.reshape', 'np.reshape', (['psa_vector', '(row, column)'], {}), '(psa_vector, (row, column))\n', (6129, 6156), True, 'import numpy as np\n'), ((4631, 4651), 'copy.deepcopy', 'deepcopy', (['game.state'], {}), '(game.state)\n', (4639, 4651), False, 'from copy import deepcopy\n'), ((4688, 4726), 'copy.deepcopy', 'deepcopy', (['best_child.parent.child_psas'], {}), '(best_child.parent.child_psas)\n', (4696, 4726), False, 'from copy import deepcopy\n'), ((6295, 6313), 'numpy.rot90', 'np.rot90', (['state', 'i'], {}), '(state, i)\n', (6303, 6313), True, 'import numpy as np\n'), ((6491, 6509), 'numpy.rot90', 'np.rot90', (['state', 'i'], {}), '(state, i)\n', (6499, 6509), True, 'import numpy as np\n'), ((6353, 6376), 'numpy.rot90', 'np.rot90', (['psa_vector', 'i'], {}), '(psa_vector, i)\n', (6361, 6376), True, 'import numpy as np\n'), ((6603, 6626), 'numpy.rot90', 'np.rot90', (['psa_vector', 'i'], {}), '(psa_vector, i)\n', (6611, 6626), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from src.YachtMod import Yacht, Keel, Rudder
from src.SailMod import Main, Jib, Kite
from src.VPPMod import VPP
from src.UtilsMod import VPPResults
YD41 = Yacht(Name="YD41",
Lwl=11.90,
Vol=6.05,
Bwl=3.18,
Tc=0.4,
WSA=28.20,
Tmax=2.30,
Amax=1.051,
Mass=6500,
Ff=1.5,
Fa=1.5,
Boa=4.2,
Loa=12.5,
App=[Keel(Cu=1.00, Cl=0.78, Span=1.90),
Rudder(Cu=0.48, Cl=0.22, Span=1.15),],
Sails=[Main("MN1", P=16.60, E=5.60, Roach=0.1, BAD=1.0),
Jib("J1", I=16.20, J=5.10, LPG=5.40, HBI=1.8),
Kite("A2", area=150.0, vce=9.55)]
)
vpp = VPP(Yacht=YD41)
vpp.set_analysis(tws_range=np.arange(4.0,18.0,4.0),
twa_range=np.linspace(30.0,180.0,34))
vpp.run(verbose=False)
vpp.polar(n=3, save=False)
vpp.SailChart(save=True)
vpp.write('results')
| [
"src.SailMod.Main",
"src.SailMod.Kite",
"src.VPPMod.VPP",
"src.YachtMod.Rudder",
"numpy.arange",
"src.YachtMod.Keel",
"numpy.linspace",
"src.SailMod.Jib"
] | [((835, 850), 'src.VPPMod.VPP', 'VPP', ([], {'Yacht': 'YD41'}), '(Yacht=YD41)\n', (838, 850), False, 'from src.VPPMod import VPP\n'), ((879, 904), 'numpy.arange', 'np.arange', (['(4.0)', '(18.0)', '(4.0)'], {}), '(4.0, 18.0, 4.0)\n', (888, 904), True, 'import numpy as np\n'), ((931, 959), 'numpy.linspace', 'np.linspace', (['(30.0)', '(180.0)', '(34)'], {}), '(30.0, 180.0, 34)\n', (942, 959), True, 'import numpy as np\n'), ((535, 566), 'src.YachtMod.Keel', 'Keel', ([], {'Cu': '(1.0)', 'Cl': '(0.78)', 'Span': '(1.9)'}), '(Cu=1.0, Cl=0.78, Span=1.9)\n', (539, 566), False, 'from src.YachtMod import Yacht, Keel, Rudder\n'), ((589, 624), 'src.YachtMod.Rudder', 'Rudder', ([], {'Cu': '(0.48)', 'Cl': '(0.22)', 'Span': '(1.15)'}), '(Cu=0.48, Cl=0.22, Span=1.15)\n', (595, 624), False, 'from src.YachtMod import Yacht, Keel, Rudder\n'), ((648, 694), 'src.SailMod.Main', 'Main', (['"""MN1"""'], {'P': '(16.6)', 'E': '(5.6)', 'Roach': '(0.1)', 'BAD': '(1.0)'}), "('MN1', P=16.6, E=5.6, Roach=0.1, BAD=1.0)\n", (652, 694), False, 'from src.SailMod import Main, Jib, Kite\n'), ((718, 760), 'src.SailMod.Jib', 'Jib', (['"""J1"""'], {'I': '(16.2)', 'J': '(5.1)', 'LPG': '(5.4)', 'HBI': '(1.8)'}), "('J1', I=16.2, J=5.1, LPG=5.4, HBI=1.8)\n", (721, 760), False, 'from src.SailMod import Main, Jib, Kite\n'), ((785, 817), 'src.SailMod.Kite', 'Kite', (['"""A2"""'], {'area': '(150.0)', 'vce': '(9.55)'}), "('A2', area=150.0, vce=9.55)\n", (789, 817), False, 'from src.SailMod import Main, Jib, Kite\n')] |
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
def plot(nparray, scale=1, title="Plot"):
# Scale the image as specified
nparray = cv.resize(nparray, None, fx=scale, fy=scale,
interpolation=cv.INTER_CUBIC)
# Define the axis based on the image size
xlen, ylen = nparray.shape
x = np.linspace(0, xlen, xlen)
y = np.linspace(0, ylen, ylen)
X, Y = np.meshgrid(y, x)
# Plot the image
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, nparray, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title(title)
| [
"numpy.meshgrid",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.figure",
"numpy.linspace",
"cv2.resize"
] | [((199, 273), 'cv2.resize', 'cv.resize', (['nparray', 'None'], {'fx': 'scale', 'fy': 'scale', 'interpolation': 'cv.INTER_CUBIC'}), '(nparray, None, fx=scale, fy=scale, interpolation=cv.INTER_CUBIC)\n', (208, 273), True, 'import cv2 as cv\n'), ((380, 406), 'numpy.linspace', 'np.linspace', (['(0)', 'xlen', 'xlen'], {}), '(0, xlen, xlen)\n', (391, 406), True, 'import numpy as np\n'), ((415, 441), 'numpy.linspace', 'np.linspace', (['(0)', 'ylen', 'ylen'], {}), '(0, ylen, ylen)\n', (426, 441), True, 'import numpy as np\n'), ((453, 470), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (464, 470), True, 'import numpy as np\n'), ((503, 515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (513, 515), True, 'from matplotlib import pyplot as plt\n'), ((525, 550), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (533, 550), True, 'from matplotlib import pyplot as plt\n')] |
import os
import psutil
import math
import numpy as np
import tensorflow as tf
import tracemalloc
import argparse
import logging
from tensorflow import keras
from pathlib import Path
from time import strftime
LOG_DIR = 'logdir'
LOGGER_FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=LOGGER_FORMAT, datefmt='[%H:%M:%S]')
log = logging.getLogger()
log.setLevel(logging.INFO)
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--logdir',
type=str,
help='directory path for the logdir.',
default=None)
parser.add_argument('--profile_batch',
type=str,
help='batches to profile.',
default=None)
parser.add_argument('--tracemalloc',
type=str,
help='specify the batches for snapshots',
default=None)
parser.add_argument('--ds',
type=str,
help='datasource. such as file, db, or BigQuery.',
default='db')
parser.add_argument('--dir',
type=str,
help='directory path for training and test set.')
parser.add_argument('--parallel',
type=int,
help='database operation parallel level',
default=psutil.cpu_count(logical=False))
parser.add_argument('--prefetch',
type=int,
help='dataset prefetch batches',
default=2)
parser.add_argument('--db_host',
type=str,
help='database host address',
default=None)
parser.add_argument('--db_port',
type=int,
help='database listening port',
default=None)
parser.add_argument('--db_pwd',
type=str,
help='database password',
default=None)
parser.add_argument('--vset',
type=int,
help='validation set number',
default=None)
parser.add_argument('--db_pool',
type=int,
help='database connection pool size',
default=psutil.cpu_count(logical=False))
parser.add_argument('--start',
type=int,
help='start training at specified batch no',
default=None)
parser.add_argument('--vol_size',
type=int,
help='volume size for the dataset storage sub-folder',
default=None)
parser.add_argument('--limit_gpu_mem',
type=float,
help='pre-allocate gpu memory (in giga-bytes)',
default=None)
parser.add_argument(
'--terminate_on_nan',
help='abort training process on NaN loss.',
dest='terminate_on_nan',
action='store_true',
)
parser.add_argument(
'--enable_xla',
help='enable XLA feature',
dest='enable_xla',
action='store_true',
)
parser.add_argument(
'--check_input',
help='check inputs for NaN or Inf.',
dest='check_input',
action='store_true',
)
parser.add_argument(
'--check_weights',
help='check trainable weights for NaN or Inf.',
dest='check_weights',
action='store_true',
)
parser.add_argument(
'--gpu_grow_mem',
dest='gpu_grow_mem',
action='store_true',
help='allow gpu to allocate mem dynamically at runtime.')
parser.add_argument('--trace',
dest='trace',
action='store_true',
help='record full trace in validation step.')
parser.add_argument('--profile',
dest='profile',
action='store_true',
help='profile CG execution.')
parser.add_argument('--skip_init_test',
dest='skip_init_test',
action='store_true',
help='whether to skip the initial test.')
parser.add_argument(
'--log_device',
dest='log_device',
action='store_true',
help='record device info such as CPU and GPU in tensorboard.')
parser.add_argument('--restart',
help='restart training',
action='store_true')
return parser.parse_args()
def next_power_of_2(x):
return 1 if x == 0 else 2**(x - 1).bit_length()
def setupPath():
p1 = os.path.dirname(os.path.abspath(__file__))
p2 = os.path.dirname(p1)
p3 = os.path.dirname(p2)
p4 = os.path.dirname(p3)
os.environ[
"PYTHONPATH"] = p1 + ":" + p2 + ":" + p3 + ":" + p4 + ":" + os.environ.get(
"PYTHONPATH", "")
class DebugCallback(keras.callbacks.Callback):
def __init__(self, iterations={}, exclude_layers={}, out_file='debug.log'):
super(DebugCallback, self).__init__()
print('{} DebugCallback is enabled'.format(strftime("%H:%M:%S")))
self.iterations = iterations
self.exclude_layers = exclude_layers
self.out_file = out_file
def on_train_batch_end(self, batch, logs=None):
i = self.model.optimizer.iterations.numpy()
print('{} iteration: {}, logs={}'.format(strftime("%H:%M:%S"), i,
logs))
if not math.isnan(logs['loss']):
return
print(
'{} encountered NaN loss. checking layer weights. iteration {}, logs = {}'
.format(strftime("%H:%M:%S"), i, logs))
layers = self.model.layers
for layer in layers:
weights = layer.get_weights()
for idx, w in enumerate(weights):
found = False
if np.ma.is_masked(w):
print(
'masked array found at iteration {} for {}, weight[{}]'
.format(i, layer, idx))
found = True
nanLoc = np.argwhere(np.isnan(w))
if len(nanLoc) > 0:
print(
'nan found at iteration {} for {}, weight[{}], location: {}'
.format(i, layer.name, idx, nanLoc))
found = True
infLoc = np.argwhere(np.isinf(w))
if len(infLoc) > 0:
print(
'inf found at iteration {} for {}, weight[{}], location: {}'
.format(i, layer.name, idx, infLoc))
found = True
if found:
print(w)
tf.debugging.check_numerics(
w, 'invalid weight found at iteration {} for {}, idx[{}]'.
format(i, layer.name, idx))
class TracemallocCallback(keras.callbacks.Callback):
def __init__(self, nframe=500, batches='200,300', out_file='tracemalloc.log'):
super(TracemallocCallback, self).__init__()
tracemalloc.start(nframe)
print('{} TracemallocCallback is enabled at batches {}'.format(
strftime("%H:%M:%S"), batches))
seg = batches.split(',')
self.start = int(seg[0])
self.end = int(seg[1])
self.out_file = out_file
path_seg = os.path.splitext(self.out_file)
self.out_file_base, self.out_file_ext = path_seg[0], path_seg[1]
def on_train_batch_end(self, batch, logs=None):
i = self.model.optimizer.iterations.numpy()
if i == self.start:
self.snapshot1 = tracemalloc.take_snapshot()
dest = '{}_1{}'.format(self.out_file_base, self.out_file_ext)
self.snapshot1.dump(dest)
tf.print('tracemalloc snapshot #1 at iteration ',
i, ' has been dumped to ', dest)
elif i == self.end:
dest = '{}_2{}'.format(self.out_file_base, self.out_file_ext)
snapshot2 = tracemalloc.take_snapshot()
snapshot2.dump(dest)
tf.print('tracemalloc snapshot #2 at iteration ',
i, ' has been dumped to ', dest)
stats_diff = snapshot2.compare_to(self.snapshot1, 'lineno')
diff_dest = '{}_d{}'.format(self.out_file_base, self.out_file_ext)
with open(diff_dest, 'w') as f:
for stat in stats_diff:
print(stat, file=f)
tf.print('2 snapshot compare has been dumped to ', diff_dest)
| [
"tracemalloc.start",
"os.path.abspath",
"math.isnan",
"argparse.ArgumentParser",
"logging.basicConfig",
"tensorflow.print",
"tracemalloc.take_snapshot",
"os.path.dirname",
"time.strftime",
"numpy.isnan",
"numpy.isinf",
"os.environ.get",
"os.path.splitext",
"psutil.cpu_count",
"numpy.ma.i... | [((272, 335), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'LOGGER_FORMAT', 'datefmt': '"""[%H:%M:%S]"""'}), "(format=LOGGER_FORMAT, datefmt='[%H:%M:%S]')\n", (291, 335), False, 'import logging\n'), ((342, 361), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (359, 361), False, 'import logging\n'), ((421, 446), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (444, 446), False, 'import argparse\n'), ((4937, 4956), 'os.path.dirname', 'os.path.dirname', (['p1'], {}), '(p1)\n', (4952, 4956), False, 'import os\n'), ((4966, 4985), 'os.path.dirname', 'os.path.dirname', (['p2'], {}), '(p2)\n', (4981, 4985), False, 'import os\n'), ((4995, 5014), 'os.path.dirname', 'os.path.dirname', (['p3'], {}), '(p3)\n', (5010, 5014), False, 'import os\n'), ((4901, 4926), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4916, 4926), False, 'import os\n'), ((5099, 5131), 'os.environ.get', 'os.environ.get', (['"""PYTHONPATH"""', '""""""'], {}), "('PYTHONPATH', '')\n", (5113, 5131), False, 'import os\n'), ((7380, 7405), 'tracemalloc.start', 'tracemalloc.start', (['nframe'], {}), '(nframe)\n', (7397, 7405), False, 'import tracemalloc\n'), ((7671, 7702), 'os.path.splitext', 'os.path.splitext', (['self.out_file'], {}), '(self.out_file)\n', (7687, 7702), False, 'import os\n'), ((1454, 1485), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (1470, 1485), False, 'import psutil\n'), ((2462, 2493), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (2478, 2493), False, 'import psutil\n'), ((5759, 5783), 'math.isnan', 'math.isnan', (["logs['loss']"], {}), "(logs['loss'])\n", (5769, 5783), False, 'import math\n'), ((7938, 7965), 'tracemalloc.take_snapshot', 'tracemalloc.take_snapshot', ([], {}), '()\n', (7963, 7965), False, 'import tracemalloc\n'), ((8090, 8176), 'tensorflow.print', 'tf.print', (['"""tracemalloc snapshot #1 at iteration """', 'i', '""" has been dumped to """', 'dest'], {}), "('tracemalloc snapshot #1 at iteration ', i, ' has been dumped to ',\n dest)\n", (8098, 8176), True, 'import tensorflow as tf\n'), ((5371, 5391), 'time.strftime', 'strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (5379, 5391), False, 'from time import strftime\n'), ((5663, 5683), 'time.strftime', 'strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (5671, 5683), False, 'from time import strftime\n'), ((5926, 5946), 'time.strftime', 'strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (5934, 5946), False, 'from time import strftime\n'), ((6160, 6178), 'numpy.ma.is_masked', 'np.ma.is_masked', (['w'], {}), '(w)\n', (6175, 6178), True, 'import numpy as np\n'), ((7490, 7510), 'time.strftime', 'strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (7498, 7510), False, 'from time import strftime\n'), ((8320, 8347), 'tracemalloc.take_snapshot', 'tracemalloc.take_snapshot', ([], {}), '()\n', (8345, 8347), False, 'import tracemalloc\n'), ((8393, 8479), 'tensorflow.print', 'tf.print', (['"""tracemalloc snapshot #2 at iteration """', 'i', '""" has been dumped to """', 'dest'], {}), "('tracemalloc snapshot #2 at iteration ', i, ' has been dumped to ',\n dest)\n", (8401, 8479), True, 'import tensorflow as tf\n'), ((6406, 6417), 'numpy.isnan', 'np.isnan', (['w'], {}), '(w)\n', (6414, 6417), True, 'import numpy as np\n'), ((6699, 6710), 'numpy.isinf', 'np.isinf', (['w'], {}), '(w)\n', (6707, 6710), True, 'import numpy as np\n'), ((8788, 8849), 'tensorflow.print', 'tf.print', (['"""2 snapshot compare has been dumped to """', 'diff_dest'], {}), "('2 snapshot compare has been dumped to ', diff_dest)\n", (8796, 8849), True, 'import tensorflow as tf\n')] |
from datetime import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import numpy as np
from libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel
def clones(module, N):
"""
Produce N identical layers.
:param module: nn.Module
:param N: int
:return: torch.nn.ModuleList
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def subsequent_mask(size):
"""
mask out subsequent positions.
:param size: int
:return: (1, size, size)
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0 # 1 means reachable; 0 means unreachable
def norm_Adj(W):
"""
compute normalized Adj matrix
Parameters
----------
W: np.ndarray, shape is (N, N), N is the num of vertices
Returns
----------
normalized Adj matrix: (D^hat)^{-1} A^hat; np.ndarray, shape (N, N)
"""
assert W.shape[0] == W.shape[1]
N = W.shape[0]
W = W + np.identity(N) # 为邻接矩阵加上自连接
D = np.diag(1.0 / np.sum(W, axis=1))
norm_Adj_matrix = np.dot(D, W)
return norm_Adj_matrix
class spatialGCN(nn.Module):
def __init__(self, sym_norm_Adj_matrix, in_channels, out_channels):
super(spatialGCN, self).__init__()
self.sym_norm_Adj_matrix = sym_norm_Adj_matrix # (N, N)
self.in_channels = in_channels
self.out_channels = out_channels
self.Theta = nn.Linear(in_channels, out_channels, bias=False)
def forward(self, x):
"""
spatial graph convolution operation
:param x: (batch_size, N, T, F_in)
:return: (batch_size, N, T, F_out)
"""
batch_size, num_of_vertices, num_of_timesteps, in_channels = x.shape
x = x.permute(0, 2, 1, 3).reshape((-1, num_of_vertices, in_channels)) # (b*t,n,f_in)
return F.relu(self.Theta(torch.matmul(self.sym_norm_Adj_matrix, x)).reshape(
(batch_size, num_of_timesteps, num_of_vertices, self.out_channels)).transpose(1, 2))
class GCN(nn.Module):
def __init__(self, sym_norm_Adj_matrix, in_channels, out_channels):
super(GCN, self).__init__()
self.sym_norm_Adj_matrix = sym_norm_Adj_matrix # (N, N)
self.in_channels = in_channels
self.out_channels = out_channels
self.Theta = nn.Linear(in_channels, out_channels, bias=False)
def forward(self, x):
"""
spatial graph convolution operation
:param x: (batch_size, N, F_in)
:return: (batch_size, N, F_out)
"""
return F.relu(self.Theta(torch.matmul(self.sym_norm_Adj_matrix, x))) # (N,N)(b,N,in)->(b,N,in)->(b,N,out)
class Spatial_Attention_layer(nn.Module):
"""
compute spatial attention scores
"""
def __init__(self, dropout=.0):
super(Spatial_Attention_layer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
"""
:param x: (batch_size, N, T, F_in)
:return: (batch_size, T, N, N)
"""
batch_size, num_of_vertices, num_of_timesteps, in_channels = x.shape
x = x.permute(0, 2, 1, 3).reshape((-1, num_of_vertices, in_channels)) # (b*t,n,f_in)
score = torch.matmul(x, x.transpose(1, 2)) / math.sqrt(in_channels) # (b*t, N, F_in)(b*t, F_in, N)=(b*t, N, N)
score = self.dropout(F.softmax(score, dim=-1)) # the sum of each row is 1; (b*t, N, N)
return score.reshape((batch_size, num_of_timesteps, num_of_vertices, num_of_vertices))
class spatialAttentionGCN(nn.Module):
def __init__(self, sym_norm_Adj_matrix, in_channels, out_channels, dropout=.0):
super(spatialAttentionGCN, self).__init__()
self.sym_norm_Adj_matrix = sym_norm_Adj_matrix # (N, N)
self.in_channels = in_channels
self.out_channels = out_channels
self.Theta = nn.Linear(in_channels, out_channels, bias=False)
self.SAt = Spatial_Attention_layer(dropout=dropout)
def forward(self, x):
"""
spatial graph convolution operation
:param x: (batch_size, N, T, F_in)
:return: (batch_size, N, T, F_out)
"""
batch_size, num_of_vertices, num_of_timesteps, in_channels = x.shape
spatial_attention = self.SAt(x) # (batch, T, N, N)
x = x.permute(0, 2, 1, 3).reshape((-1, num_of_vertices, in_channels)) # (b*t,n,f_in)
spatial_attention = spatial_attention.reshape((-1, num_of_vertices, num_of_vertices)) # (b*T, n, n)
return F.relu(self.Theta(torch.matmul(self.sym_norm_Adj_matrix.mul(spatial_attention), x)).reshape(
(batch_size, num_of_timesteps, num_of_vertices, self.out_channels)).transpose(1, 2))
# (b*t, n, f_in)->(b*t, n, f_out)->(b,t,n,f_out)->(b,n,t,f_out)
class spatialAttentionScaledGCN(nn.Module):
def __init__(self, sym_norm_Adj_matrix, in_channels, out_channels, dropout=.0):
super(spatialAttentionScaledGCN, self).__init__()
self.sym_norm_Adj_matrix = sym_norm_Adj_matrix # (N, N)
self.in_channels = in_channels
self.out_channels = out_channels
self.Theta = nn.Linear(in_channels, out_channels, bias=False)
self.SAt = Spatial_Attention_layer(dropout=dropout)
def forward(self, x):
"""
spatial graph convolution operation
:param x: (batch_size, N, T, F_in)
:return: (batch_size, N, T, F_out)
"""
batch_size, num_of_vertices, num_of_timesteps, in_channels = x.shape
spatial_attention = self.SAt(x) / math.sqrt(in_channels) # scaled self attention: (batch, T, N, N)
x = x.permute(0, 2, 1, 3).reshape((-1, num_of_vertices, in_channels))
# (b, n, t, f)-permute->(b, t, n, f)->(b*t,n,f_in)
spatial_attention = spatial_attention.reshape((-1, num_of_vertices, num_of_vertices)) # (b*T, n, n)
return F.relu(self.Theta(torch.matmul(self.sym_norm_Adj_matrix.mul(spatial_attention), x)).reshape(
(batch_size, num_of_timesteps, num_of_vertices, self.out_channels)).transpose(1, 2))
# (b*t, n, f_in)->(b*t, n, f_out)->(b,t,n,f_out)->(b,n,t,f_out)
class SpatialPositionalEncoding(nn.Module):
def __init__(self, d_model, num_of_vertices, dropout, gcn=None, smooth_layer_num=0):
super(SpatialPositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.embedding = torch.nn.Embedding(num_of_vertices, d_model)
self.gcn_smooth_layers = None
if (gcn is not None) and (smooth_layer_num > 0):
self.gcn_smooth_layers = nn.ModuleList([gcn for _ in range(smooth_layer_num)])
def forward(self, x):
"""
:param x: (batch_size, N, T, F_in)
:return: (batch_size, N, T, F_out)
"""
batch, num_of_vertices, timestamps, _ = x.shape
x_indexs = torch.LongTensor(torch.arange(num_of_vertices)).to(x.device) # (N,)
embed = self.embedding(x_indexs).unsqueeze(0) # (N, d_model)->(1,N,d_model)
if self.gcn_smooth_layers is not None:
for _, l in enumerate(self.gcn_smooth_layers):
embed = l(embed) # (1,N,d_model) -> (1,N,d_model)
x = x + embed.unsqueeze(2) # (B, N, T, d_model)+(1, N, 1, d_model)
return self.dropout(x)
class TemporalPositionalEncoding(nn.Module):
def __init__(self, d_model, dropout, max_len, lookup_index=None):
super(TemporalPositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.lookup_index = lookup_index
self.max_len = max_len
# computing the positional encodings once in log space
pe = torch.zeros(max_len, d_model)
for pos in range(max_len):
for i in range(0, d_model, 2):
pe[pos, i] = math.sin(pos / (10000 ** ((2 * i) / d_model)))
pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
pe = pe.unsqueeze(0).unsqueeze(0) # (1, 1, T_max, d_model)
self.register_buffer('pe', pe)
# register_buffer:
# Adds a persistent buffer to the module.
# This is typically used to register a buffer that should not to be considered a model parameter.
def forward(self, x):
"""
:param x: (batch_size, N, T, F_in)
:return: (batch_size, N, T, F_out)
"""
if self.lookup_index is not None:
x = x + self.pe[:, :, self.lookup_index, :] # (batch_size, N, T, F_in) + (1,1,T,d_model)
else:
x = x + self.pe[:, :, :x.size(2), :]
return self.dropout(x.detach())
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm
"""
def __init__(self, size, dropout, residual_connection, use_LayerNorm):
super(SublayerConnection, self).__init__()
self.residual_connection = residual_connection
self.use_LayerNorm = use_LayerNorm
self.dropout = nn.Dropout(dropout)
if self.use_LayerNorm:
self.norm = nn.LayerNorm(size)
def forward(self, x, sublayer):
"""
:param x: (batch, N, T, d_model)
:param sublayer: nn.Module
:return: (batch, N, T, d_model)
"""
if self.residual_connection and self.use_LayerNorm:
return x + self.dropout(sublayer(self.norm(x)))
if self.residual_connection and (not self.use_LayerNorm):
return x + self.dropout(sublayer(x))
if (not self.residual_connection) and self.use_LayerNorm:
return self.dropout(sublayer(self.norm(x)))
class PositionWiseGCNFeedForward(nn.Module):
def __init__(self, gcn, dropout=.0):
super(PositionWiseGCNFeedForward, self).__init__()
self.gcn = gcn
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
:param x: (B, N_nodes, T, F_in)
:return: (B, N, T, F_out)
"""
return self.dropout(F.relu(self.gcn(x)))
def attention(query, key, value, mask=None, dropout=None):
"""
:param query: (batch, N, h, T1, d_k)
:param key: (batch, N, h, T2, d_k)
:param value: (batch, N, h, T2, d_k)
:param mask: (batch, 1, 1, T2, T2)
:param dropout:
:return: (batch, N, h, T1, d_k), (batch, N, h, T1, T2)
"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # scores: (batch, N, h, T1, T2)
if mask is not None:
scores = scores.masked_fill_(mask == 0, -1e9) # -1e9 means attention scores=0
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
# p_attn: (batch, N, h, T1, T2)
return torch.matmul(p_attn, value), p_attn # (batch, N, h, T1, d_k), (batch, N, h, T1, T2)
class MultiHeadAttention(nn.Module):
def __init__(self, nb_head, d_model, dropout=.0):
super(MultiHeadAttention, self).__init__()
assert d_model % nb_head == 0
self.d_k = d_model // nb_head
self.h = nb_head
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"""
:param query: (batch, N, T, d_model)
:param key: (batch, N, T, d_model)
:param value: (batch, N, T, d_model)
:param mask: (batch, T, T)
:return: x: (batch, N, T, d_model)
"""
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1) # (batch, 1, 1, T, T), same mask applied to all h heads.
nbatches = query.size(0)
N = query.size(1)
# (batch, N, T, d_model) -linear-> (batch, N, T, d_model) -view-> (batch, N, T, h, d_k) -permute(2,
# 3)-> (batch, N, h, T, d_k)
query, key, value = [l(x).view(nbatches, N, -1, self.h, self.d_k).transpose(2, 3) for l, x in
zip(self.linears, (query, key, value))]
# apply attention on all the projected vectors in batch
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# x:(batch, N, h, T1, d_k)
# attn:(batch, N, h, T1, T2)
x = x.transpose(2, 3).contiguous() # (batch, N, T1, h, d_k)
x = x.view(nbatches, N, -1, self.h * self.d_k) # (batch, N, T1, d_model)
return self.linears[-1](x)
class MultiHeadAttentionAwareTemporalContex_qc_kc(nn.Module): # key causal; query causal;
def __init__(self, nb_head, d_model, num_of_weeks, num_of_days, num_of_hours, points_per_hour, kernel_size=3,
dropout=.0):
"""
:param nb_head:
:param d_model:
:param num_of_weeks:
:param num_of_days:
:param num_of_hours:
:param points_per_hour:
:param kernel_size:
:param dropout:
"""
super(MultiHeadAttentionAwareTemporalContex_qc_kc, self).__init__()
assert d_model % nb_head == 0
self.d_k = d_model // nb_head
self.h = nb_head
self.linears = clones(nn.Linear(d_model, d_model), 2) # 2 linear layers: 1 for W^V, 1 for W^O
self.padding = kernel_size - 1
self.conv1Ds_aware_temporal_context = clones(
nn.Conv2d(d_model, d_model, (1, kernel_size), padding=(0, self.padding)),
2) # # 2 causal conv: 1 for query, 1 for key
self.dropout = nn.Dropout(p=dropout)
self.w_length = num_of_weeks * points_per_hour
self.d_length = num_of_days * points_per_hour
self.h_length = num_of_hours * points_per_hour
def forward(self, query, key, value, mask=None, query_multi_segment=False, key_multi_segment=False):
"""
:param query: (batch, N, T, d_model)
:param key: (batch, N, T, d_model)
:param value: (batch, N, T, d_model)
:param mask: (batch, T, T)
:param query_multi_segment: whether query has mutiple time segments
:param key_multi_segment: whether key has mutiple time segments
if query/key has multiple time segments, causal convolution should be applied separately for each time segment.
:return: (batch, N, T, d_model)
"""
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1) # (batch, 1, 1, T, T), same mask applied to all h heads.
nbatches = query.size(0)
N = query.size(1)
# deal with key and query: temporal conv (batch, N, T, d_model)->permute(0, 3, 1, 2)->(batch, d_model, N,
# T) -conv->(batch, d_model, N, T)-view->(batch, h, d_k, N, T)-permute(0,3,1,4,2)->(batch, N, h, T, d_k)
if query_multi_segment and key_multi_segment:
query_list = []
key_list = []
if self.w_length > 0:
query_w, key_w = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for
l, x in zip(self.conv1Ds_aware_temporal_context,
(query[:, :, :self.w_length, :], key[:, :, :self.w_length, :]))]
query_list.append(query_w)
key_list.append(key_w)
if self.d_length > 0:
query_d, key_d = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for
l, x in zip(self.conv1Ds_aware_temporal_context, (
query[:, :, self.w_length:self.w_length + self.d_length, :],
key[:, :, self.w_length:self.w_length + self.d_length, :]))]
query_list.append(query_d)
key_list.append(key_d)
if self.h_length > 0:
query_h, key_h = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for
l, x in zip(self.conv1Ds_aware_temporal_context, (
query[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :],
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :]))]
query_list.append(query_h)
key_list.append(key_h)
query = torch.cat(query_list, dim=3)
key = torch.cat(key_list, dim=3)
elif (not query_multi_segment) and (not key_multi_segment):
query, key = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for l, x
in zip(self.conv1Ds_aware_temporal_context, (query, key))]
elif (not query_multi_segment) and (key_multi_segment):
query = self.conv1Ds_aware_temporal_context[0](query.permute(0, 3, 1, 2))[:, :, :,
:-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list = []
if self.w_length > 0:
key_w = self.conv1Ds_aware_temporal_context[1](key[:, :, :self.w_length, :].permute(0, 3, 1, 2))[:, :,
:, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_w)
if self.d_length > 0:
key_d = self.conv1Ds_aware_temporal_context[1](
key[:, :, self.w_length:self.w_length + self.d_length, :].permute(0, 3, 1, 2))[:, :, :,
:-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_d)
if self.h_length > 0:
key_h = self.conv1Ds_aware_temporal_context[1](
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :].permute(0,
3,
1,
2))[
:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1,
4, 2)
key_list.append(key_h)
key = torch.cat(key_list, dim=3)
else:
import sys
print('error')
sys.out
# deal with value: (batch, N, T, d_model) -linear-> (batch, N, T, d_model) -view-> (batch, N, T, h,
# d_k) -permute(2,3)-> (batch, N, h, T, d_k)
value = self.linears[0](value).view(nbatches, N, -1, self.h, self.d_k).transpose(2, 3)
# apply attention on all the projected vectors in batch
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# x:(batch, N, h, T1, d_k)
# attn:(batch, N, h, T1, T2)
x = x.transpose(2, 3).contiguous() # (batch, N, T1, h, d_k)
x = x.view(nbatches, N, -1, self.h * self.d_k) # (batch, N, T1, d_model)
return self.linears[-1](x)
class MultiHeadAttentionAwareTemporalContex_qc_kc(nn.Module): # key causal; query causal;
def __init__(self, nb_head, d_model, num_of_weeks, num_of_days, num_of_hours, points_per_hour, kernel_size=3,
dropout=.0):
"""
:param nb_head:
:param d_model:
:param num_of_weeks:
:param num_of_days:
:param num_of_hours:
:param points_per_hour:
:param kernel_size:
:param dropout:
"""
super(MultiHeadAttentionAwareTemporalContex_qc_kc, self).__init__()
assert d_model % nb_head == 0
self.d_k = d_model // nb_head
self.h = nb_head
self.linears = clones(nn.Linear(d_model, d_model), 2) # 2 linear layers: 1 for W^V, 1 for W^O
self.padding = kernel_size - 1
self.conv1Ds_aware_temporal_context = clones(
nn.Conv2d(d_model, d_model, (1, kernel_size), padding=(0, self.padding)),
2) # # 2 causal conv: 1 for query, 1 for key
self.dropout = nn.Dropout(p=dropout)
self.w_length = num_of_weeks * points_per_hour
self.d_length = num_of_days * points_per_hour
self.h_length = num_of_hours * points_per_hour
def forward(self, query, key, value, mask=None, query_multi_segment=False, key_multi_segment=False):
"""
:param query: (batch, N, T, d_model)
:param key: (batch, N, T, d_model)
:param value: (batch, N, T, d_model)
:param mask: (batch, T, T)
:param query_multi_segment: whether query has mutiple time segments
:param key_multi_segment: whether key has mutiple time segments
if query/key has multiple time segments, causal convolution should be applied separately for each time segment.
:return: (batch, N, T, d_model)
"""
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1) # (batch, 1, 1, T, T), same mask applied to all h heads.
nbatches = query.size(0)
N = query.size(1)
# deal with key and query: temporal conv (batch, N, T, d_model)->permute(0, 3, 1, 2)->(batch, d_model, N,
# T) -conv->(batch, d_model, N, T)-view->(batch, h, d_k, N, T)-permute(0,3,1,4,2)->(batch, N, h, T, d_k)
if query_multi_segment and key_multi_segment:
query_list = []
key_list = []
if self.w_length > 0:
query_w, key_w = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for
l, x in zip(self.conv1Ds_aware_temporal_context,
(query[:, :, :self.w_length, :], key[:, :, :self.w_length, :]))]
query_list.append(query_w)
key_list.append(key_w)
if self.d_length > 0:
query_d, key_d = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for
l, x in zip(self.conv1Ds_aware_temporal_context, (
query[:, :, self.w_length:self.w_length + self.d_length, :],
key[:, :, self.w_length:self.w_length + self.d_length, :]))]
query_list.append(query_d)
key_list.append(key_d)
if self.h_length > 0:
query_h, key_h = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for
l, x in zip(self.conv1Ds_aware_temporal_context, (
query[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :],
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :]))]
query_list.append(query_h)
key_list.append(key_h)
query = torch.cat(query_list, dim=3)
key = torch.cat(key_list, dim=3)
elif (not query_multi_segment) and (not key_multi_segment):
query, key = [
l(x.permute(0, 3, 1, 2))[:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2) for l, x
in zip(self.conv1Ds_aware_temporal_context, (query, key))]
elif (not query_multi_segment) and (key_multi_segment):
query = self.conv1Ds_aware_temporal_context[0](query.permute(0, 3, 1, 2))[:, :, :,
:-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list = []
if self.w_length > 0:
key_w = self.conv1Ds_aware_temporal_context[1](key[:, :, :self.w_length, :].permute(0, 3, 1, 2))[:, :,
:, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_w)
if self.d_length > 0:
key_d = self.conv1Ds_aware_temporal_context[1](
key[:, :, self.w_length:self.w_length + self.d_length, :].permute(0, 3, 1, 2))[:, :, :,
:-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_d)
if self.h_length > 0:
key_h = self.conv1Ds_aware_temporal_context[1](
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :].permute(0,
3,
1,
2))[
:, :, :, :-self.padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1,
4, 2)
key_list.append(key_h)
key = torch.cat(key_list, dim=3)
else:
import sys
print('error')
sys.out
# deal with value: (batch, N, T, d_model) -linear-> (batch, N, T, d_model) -view-> (batch, N, T, h,
# d_k) -permute(2,3)-> (batch, N, h, T, d_k)
value = self.linears[0](value).view(nbatches, N, -1, self.h, self.d_k).transpose(2, 3)
# apply attention on all the projected vectors in batch
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# x:(batch, N, h, T1, d_k)
# attn:(batch, N, h, T1, T2)
x = x.transpose(2, 3).contiguous() # (batch, N, T1, h, d_k)
x = x.view(nbatches, N, -1, self.h * self.d_k) # (batch, N, T1, d_model)
return self.linears[-1](x)
class MultiHeadAttentionAwareTemporalContex_qc_k1d(nn.Module): # query: causal conv; key 1d conv
def __init__(self, nb_head, d_model, num_of_weeks, num_of_days, num_of_hours, points_per_hour, kernel_size=3,
dropout=.0):
super(MultiHeadAttentionAwareTemporalContex_qc_k1d, self).__init__()
assert d_model % nb_head == 0
self.d_k = d_model // nb_head
self.h = nb_head
self.linears = clones(nn.Linear(d_model, d_model), 2) # 2 linear layers: 1 for W^V, 1 for W^O
self.causal_padding = kernel_size - 1
self.padding_1D = (kernel_size - 1) // 2
self.query_conv1Ds_aware_temporal_context = nn.Conv2d(d_model, d_model, (1, kernel_size),
padding=(0, self.causal_padding))
self.key_conv1Ds_aware_temporal_context = nn.Conv2d(d_model, d_model, (1, kernel_size),
padding=(0, self.padding_1D))
self.dropout = nn.Dropout(p=dropout)
self.w_length = num_of_weeks * points_per_hour
self.d_length = num_of_days * points_per_hour
self.h_length = num_of_hours * points_per_hour
def forward(self, query, key, value, mask=None, query_multi_segment=False, key_multi_segment=False):
"""
:param query: (batch, N, T, d_model)
:param key: (batch, N, T, d_model)
:param value: (batch, N, T, d_model)
:param mask: (batch, T, T)
:param query_multi_segment: whether query has mutiple time segments
:param key_multi_segment: whether key has mutiple time segments
if query/key has multiple time segments, causal convolution should be applied separately for each time segment.
:return: (batch, N, T, d_model)
"""
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1) # (batch, 1, 1, T, T), same mask applied to all h heads.
nbatches = query.size(0)
N = query.size(1)
# deal with key and query: temporal conv (batch, N, T, d_model)->permute(0, 3, 1, 2)->(batch, d_model, N,
# T) -conv->(batch, d_model, N, T)-view->(batch, h, d_k, N, T)-permute(0,3,1,4,2)->(batch, N, h, T, d_k)
if query_multi_segment and key_multi_segment:
query_list = []
key_list = []
if self.w_length > 0:
query_w = self.query_conv1Ds_aware_temporal_context(query[:, :, :self.w_length, :].permute(0, 3, 1, 2))[
:, :, :, :-self.causal_padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(
0, 3, 1, 4, 2)
key_w = self.key_conv1Ds_aware_temporal_context(
key[:, :, :self.w_length, :].permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2)
query_list.append(query_w)
key_list.append(key_w)
if self.d_length > 0:
query_d = self.query_conv1Ds_aware_temporal_context(
query[:, :, self.w_length:self.w_length + self.d_length, :].permute(0, 3, 1, 2))[:, :, :,
:-self.causal_padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1,
4, 2)
key_d = self.key_conv1Ds_aware_temporal_context(
key[:, :, self.w_length:self.w_length + self.d_length, :].permute(0, 3, 1, 2)).contiguous().view(
nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
query_list.append(query_d)
key_list.append(key_d)
if self.h_length > 0:
query_h = self.query_conv1Ds_aware_temporal_context(
query[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :].permute(
0, 3, 1, 2))[:, :, :, :-self.causal_padding].contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1,
4, 2)
key_h = self.key_conv1Ds_aware_temporal_context(
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :].permute(0,
3,
1,
2)).contiguous().view(
nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
query_list.append(query_h)
key_list.append(key_h)
query = torch.cat(query_list, dim=3)
key = torch.cat(key_list, dim=3)
elif (not query_multi_segment) and (not key_multi_segment):
query = self.query_conv1Ds_aware_temporal_context(query.permute(0, 3, 1, 2))[:, :, :,
:-self.causal_padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key = self.key_conv1Ds_aware_temporal_context(query.permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h,
self.d_k, N,
-1).permute(0, 3,
1, 4,
2)
elif (not query_multi_segment) and key_multi_segment:
query = self.query_conv1Ds_aware_temporal_context(query.permute(0, 3, 1, 2))[:, :, :,
:-self.causal_padding].contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list = []
if self.w_length > 0:
key_w = self.key_conv1Ds_aware_temporal_context(
key[:, :, :self.w_length, :].permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2)
key_list.append(key_w)
if self.d_length > 0:
key_d = self.key_conv1Ds_aware_temporal_context(
key[:, :, self.w_length:self.w_length + self.d_length, :].permute(0, 3, 1, 2)).contiguous().view(
nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_d)
if self.h_length > 0:
key_h = self.key_conv1Ds_aware_temporal_context(
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :].permute(0,
3,
1,
2)).contiguous().view(
nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_h)
key = torch.cat(key_list, dim=3)
else:
import sys
print('error')
sys.out
# deal with value: (batch, N, T, d_model) -linear-> (batch, N, T, d_model) -view-> (batch, N, T, h,
# d_k) -permute(2,3)-> (batch, N, h, T, d_k)
value = self.linears[0](value).view(nbatches, N, -1, self.h, self.d_k).transpose(2, 3)
# apply attention on all the projected vectors in batch
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# x:(batch, N, h, T1, d_k)
# attn:(batch, N, h, T1, T2)
x = x.transpose(2, 3).contiguous() # (batch, N, T1, h, d_k)
x = x.view(nbatches, N, -1, self.h * self.d_k) # (batch, N, T1, d_model)
return self.linears[-1](x)
class EncoderDecoder(nn.Module):
def __init__(self, encoder, decoder, src_dense, trg_dense, generator, DEVICE):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_dense
self.trg_embed = trg_dense
self.prediction_generator = generator
self.to(DEVICE)
def forward(self, src, trg):
"""
src: (batch_size, N, T_in, F_in)
trg: (batch, N, T_out, F_out)
"""
encoder_output = self.encode(src) # (batch_size, N, T_in, d_model)
return self.decode(trg, encoder_output)
def encode(self, src):
"""
src: (batch_size, N, T_in, F_in)
"""
h = self.src_embed(src)
return self.encoder(h)
# return self.encoder(self.src_embed(src))
def decode(self, trg, encoder_output):
return self.prediction_generator(self.decoder(self.trg_embed(trg), encoder_output))
class EncoderLayer(nn.Module):
def __init__(self, size, self_attn, gcn, dropout, residual_connection=True, use_LayerNorm=True):
super(EncoderLayer, self).__init__()
self.residual_connection = residual_connection
self.use_LayerNorm = use_LayerNorm
self.self_attn = self_attn
self.feed_forward_gcn = gcn
if residual_connection or use_LayerNorm:
self.sublayer = clones(SublayerConnection(size, dropout, residual_connection, use_LayerNorm), 2)
self.size = size
def forward(self, x):
"""
:param x: src: (batch_size, N, T_in, F_in)
:return: (batch_size, N, T_in, F_in)
"""
if self.residual_connection or self.use_LayerNorm:
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, query_multi_segment=True, key_multi_segment=True))
return self.sublayer[1](x, self.feed_forward_gcn)
else:
x = self.self_attn(x, x, x, query_multi_segment=True, key_multi_segment=True)
return self.feed_forward_gcn(x)
class Encoder(nn.Module):
def __init__(self, layer, N):
"""
:param layer: EncoderLayer
:param N: int, number of EncoderLayers
"""
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x):
"""
:param x: src: (batch_size, N, T_in, F_in)
:return: (batch_size, N, T_in, F_in)
"""
for layer in self.layers:
x = layer(x)
return self.norm(x)
class DecoderLayer(nn.Module):
def __init__(self, size, self_attn, src_attn, gcn, dropout, residual_connection=True, use_LayerNorm=True):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward_gcn = gcn
self.residual_connection = residual_connection
self.use_LayerNorm = use_LayerNorm
if residual_connection or use_LayerNorm:
self.sublayer = clones(SublayerConnection(size, dropout, residual_connection, use_LayerNorm), 3)
def forward(self, x, memory):
"""
:param x: (batch_size, N, T', F_in)
:param memory: (batch_size, N, T, F_in)
:return: (batch_size, N, T', F_in)
"""
m = memory
tgt_mask = subsequent_mask(x.size(-2)).to(m.device) # (1, T', T')
if self.residual_connection or self.use_LayerNorm:
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask, query_multi_segment=False,
key_multi_segment=False)) # output: (batch, N, T', d_model)
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, query_multi_segment=False,
key_multi_segment=True)) # output: (batch, N, T', d_model)
return self.sublayer[2](x, self.feed_forward_gcn) # output: (batch, N, T', d_model)
else:
x = self.self_attn(x, x, x, tgt_mask, query_multi_segment=False,
key_multi_segment=False) # output: (batch, N, T', d_model)
x = self.src_attn(x, m, m, query_multi_segment=False,
key_multi_segment=True) # output: (batch, N, T', d_model)
return self.feed_forward_gcn(x) # output: (batch, N, T', d_model)
class Decoder(nn.Module):
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x, memory):
"""
:param x: (batch, N, T', d_model)
:param memory: (batch, N, T, d_model)
:return:(batch, N, T', d_model)
"""
for layer in self.layers:
x = layer(x, memory)
return self.norm(x)
def search_index(max_len, num_of_depend, num_for_predict, points_per_hour, units):
"""
Parameters
----------
max_len: int, length of all encoder input
num_of_depend: int,
num_for_predict: int, the number of points will be predicted for each sample
units: int, week: 7 * 24, day: 24, recent(hour): 1
points_per_hour: int, number of points per hour, depends on data
Returns
----------
list[(start_idx, end_idx)]
"""
x_idx = []
for i in range(1, num_of_depend + 1):
start_idx = max_len - points_per_hour * units * i
for j in range(num_for_predict):
end_idx = start_idx + j
x_idx.append(end_idx)
return x_idx
class MultiHeadAttentionAwareTemporalContex_q1d_k1d(nn.Module): # 1d conv on query, 1d conv on key
def __init__(self, nb_head, d_model, num_of_weeks, num_of_days, num_of_hours, points_per_hour, kernel_size=3,
dropout=.0):
super(MultiHeadAttentionAwareTemporalContex_q1d_k1d, self).__init__()
assert d_model % nb_head == 0
self.d_k = d_model // nb_head
self.h = nb_head
self.linears = clones(nn.Linear(d_model, d_model), 2) # 2 linear layers: 1 for W^V, 1 for W^O
self.padding = (kernel_size - 1) // 2
self.conv1Ds_aware_temporal_context = clones(
nn.Conv2d(d_model, d_model, (1, kernel_size), padding=(0, self.padding)),
2) # # 2 causal conv: 1 for query, 1 for key
self.dropout = nn.Dropout(p=dropout)
self.w_length = num_of_weeks * points_per_hour
self.d_length = num_of_days * points_per_hour
self.h_length = num_of_hours * points_per_hour
def forward(self, query, key, value, mask=None, query_multi_segment=False, key_multi_segment=False):
"""
:param query: (batch, N, T, d_model)
:param key: (batch, N, T, d_model)
:param value: (batch, N, T, d_model)
:param mask: (batch, T, T)
:param query_multi_segment: whether query has mutiple time segments
:param key_multi_segment: whether key has mutiple time segments
if query/key has multiple time segments, causal convolution should be applied separately for each time segment.
:return: (batch, N, T, d_model)
"""
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1) # (batch, 1, 1, T, T), same mask applied to all h heads.
nbatches = query.size(0)
N = query.size(1)
# deal with key and query: temporal conv (batch, N, T, d_model)->permute(0, 3, 1, 2)->(batch, d_model, N,
# T) -conv->(batch, d_model, N, T)-view->(batch, h, d_k, N, T)-permute(0,3,1,4,2)->(batch, N, h, T, d_k)
if query_multi_segment and key_multi_segment:
query_list = []
key_list = []
if self.w_length > 0:
query_w, key_w = [
l(x.permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
for l, x in zip(self.conv1Ds_aware_temporal_context,
(query[:, :, :self.w_length, :], key[:, :, :self.w_length, :]))]
query_list.append(query_w)
key_list.append(key_w)
if self.d_length > 0:
query_d, key_d = [
l(x.permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
for l, x in zip(self.conv1Ds_aware_temporal_context, (
query[:, :, self.w_length:self.w_length + self.d_length, :],
key[:, :, self.w_length:self.w_length + self.d_length, :]))]
query_list.append(query_d)
key_list.append(key_d)
if self.h_length > 0:
query_h, key_h = [
l(x.permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
for l, x in zip(self.conv1Ds_aware_temporal_context, (
query[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :],
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :]))]
query_list.append(query_h)
key_list.append(key_h)
query = torch.cat(query_list, dim=3)
key = torch.cat(key_list, dim=3)
elif (not query_multi_segment) and (not key_multi_segment):
query, key = [
l(x.permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2) for
l, x in zip(self.conv1Ds_aware_temporal_context, (query, key))]
elif (not query_multi_segment) and (key_multi_segment):
query = self.conv1Ds_aware_temporal_context[0](query.permute(0, 3, 1, 2)).contiguous().view(nbatches,
self.h,
self.d_k, N,
-1).permute(0,
3,
1,
4,
2)
key_list = []
if self.w_length > 0:
key_w = self.conv1Ds_aware_temporal_context[1](
key[:, :, :self.w_length, :].permute(0, 3, 1, 2)).contiguous().view(nbatches, self.h, self.d_k, N,
-1).permute(0, 3, 1, 4, 2)
key_list.append(key_w)
if self.d_length > 0:
key_d = self.conv1Ds_aware_temporal_context[1](
key[:, :, self.w_length:self.w_length + self.d_length, :].permute(0, 3, 1, 2)).contiguous().view(
nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_d)
if self.h_length > 0:
key_h = self.conv1Ds_aware_temporal_context[1](
key[:, :, self.w_length + self.d_length:self.w_length + self.d_length + self.h_length, :].permute(0,
3,
1,
2)).contiguous().view(
nbatches, self.h, self.d_k, N, -1).permute(0, 3, 1, 4, 2)
key_list.append(key_h)
key = torch.cat(key_list, dim=3)
else:
import sys
print('error')
sys.out
# deal with value: (batch, N, T, d_model) -linear-> (batch, N, T, d_model) -view-> (batch, N, T, h,
# d_k) -permute(2,3)-> (batch, N, h, T, d_k)
value = self.linears[0](value).view(nbatches, N, -1, self.h, self.d_k).transpose(2, 3)
# apply attention on all the projected vectors in batch
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# x:(batch, N, h, T1, d_k)
# attn:(batch, N, h, T1, T2)
x = x.transpose(2, 3).contiguous() # (batch, N, T1, h, d_k)
x = x.view(nbatches, N, -1, self.h * self.d_k) # (batch, N, T1, d_model)
return self.linears[-1](x)
def make_model(DEVICE, num_layers, encoder_input_size, decoder_output_size, d_model, adj_mx, nb_head, num_of_weeks,
num_of_days, num_of_hours, points_per_hour, num_for_predict, dropout=.0, aware_temporal_context=True,
ScaledSAt=True, SE=True, TE=True, kernel_size=3, smooth_layer_num=0, residual_connection=True,
use_LayerNorm=True):
# LR rate means: graph Laplacian Regularization
c = copy.deepcopy
print(adj_mx)
norm_Adj_matrix = torch.from_numpy(norm_Adj(adj_mx)).type(torch.FloatTensor).to(DEVICE) # 通过邻接矩阵,构造归一化的拉普拉斯矩阵
num_of_vertices = norm_Adj_matrix.shape[0]
src_dense = nn.Linear(encoder_input_size, d_model)
if ScaledSAt: # employ spatial self attention
position_wise_gcn = PositionWiseGCNFeedForward(spatialAttentionScaledGCN(norm_Adj_matrix, d_model, d_model),
dropout=dropout)
else: # 不带attention
position_wise_gcn = PositionWiseGCNFeedForward(spatialGCN(norm_Adj_matrix, d_model, d_model), dropout=dropout)
trg_dense = nn.Linear(decoder_output_size, d_model) # target input projection
# encoder temporal position embedding
max_len = max(num_of_weeks * 7 * 24 * num_for_predict, num_of_days * 24 * num_for_predict,
num_of_hours * num_for_predict)
w_index = search_index(max_len, num_of_weeks, num_for_predict, points_per_hour, 7 * 24)
d_index = search_index(max_len, num_of_days, num_for_predict, points_per_hour, 24)
h_index = search_index(max_len, num_of_hours, num_for_predict, points_per_hour, 1)
en_lookup_index = w_index + d_index + h_index
print('TemporalPositionalEncoding max_len:', max_len)
print('w_index:', w_index)
print('d_index:', d_index)
print('h_index:', h_index)
print('en_lookup_index:', en_lookup_index)
if aware_temporal_context: # employ temporal trend-aware attention
attn_ss = MultiHeadAttentionAwareTemporalContex_q1d_k1d(nb_head, d_model, num_of_weeks, num_of_days,
num_of_hours, num_for_predict, kernel_size,
dropout=dropout) # encoder的trend-aware attention用一维卷积
attn_st = MultiHeadAttentionAwareTemporalContex_qc_k1d(nb_head, d_model, num_of_weeks, num_of_days,
num_of_hours, num_for_predict, kernel_size,
dropout=dropout)
att_tt = MultiHeadAttentionAwareTemporalContex_qc_kc(nb_head, d_model, num_of_weeks, num_of_days, num_of_hours,
num_for_predict, kernel_size,
dropout=dropout) # decoder的trend-aware attention用因果卷积
else: # employ traditional self attention
attn_ss = MultiHeadAttention(nb_head, d_model, dropout=dropout)
attn_st = MultiHeadAttention(nb_head, d_model, dropout=dropout)
att_tt = MultiHeadAttention(nb_head, d_model, dropout=dropout)
if SE and TE:
encode_temporal_position = TemporalPositionalEncoding(d_model, dropout, max_len,
en_lookup_index) # decoder temporal position embedding
decode_temporal_position = TemporalPositionalEncoding(d_model, dropout, num_for_predict)
spatial_position = SpatialPositionalEncoding(d_model, num_of_vertices, dropout,
GCN(norm_Adj_matrix, d_model, d_model),
smooth_layer_num=smooth_layer_num)
encoder_embedding = nn.Sequential(src_dense, c(encode_temporal_position), c(spatial_position))
decoder_embedding = nn.Sequential(trg_dense, c(decode_temporal_position), c(spatial_position))
elif SE and (not TE):
spatial_position = SpatialPositionalEncoding(d_model, num_of_vertices, dropout,
GCN(norm_Adj_matrix, d_model, d_model),
smooth_layer_num=smooth_layer_num)
encoder_embedding = nn.Sequential(src_dense, c(spatial_position))
decoder_embedding = nn.Sequential(trg_dense, c(spatial_position))
elif (not SE) and (TE):
encode_temporal_position = TemporalPositionalEncoding(d_model, dropout, max_len,
en_lookup_index) # decoder temporal position embedding
decode_temporal_position = TemporalPositionalEncoding(d_model, dropout, num_for_predict)
encoder_embedding = nn.Sequential(src_dense, c(encode_temporal_position))
decoder_embedding = nn.Sequential(trg_dense, c(decode_temporal_position))
else:
encoder_embedding = nn.Sequential(src_dense)
decoder_embedding = nn.Sequential(trg_dense)
encoderLayer = EncoderLayer(d_model, attn_ss, c(position_wise_gcn), dropout,
residual_connection=residual_connection, use_LayerNorm=use_LayerNorm)
encoder = Encoder(encoderLayer, num_layers)
decoderLayer = DecoderLayer(d_model, att_tt, attn_st, c(position_wise_gcn), dropout,
residual_connection=residual_connection, use_LayerNorm=use_LayerNorm)
decoder = Decoder(decoderLayer, num_layers)
generator = nn.Linear(d_model, decoder_output_size)
model = EncoderDecoder(encoder,
decoder,
encoder_embedding,
decoder_embedding,
generator,
DEVICE)
# param init
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class ASTGNN(AbstractTrafficStateModel):
def __init__(self, config, data_feature):
super().__init__(config, data_feature)
self.Device = config.get('device', torch.device('cpu'))
self.num_layers = config.get('num_layers')
self.encoder_input_size = config.get('encoder_input_size')
self.decoder_output_size = config.get('decoder_input_size')
self.d_model = config.get('d_model')
self.adj_mx = self.data_feature.get('adj_mx')
self.nb_head = config.get('nb_head')
self.num_of_weeks = config.get('num_of_weeks')
self.num_of_days = config.get('num_of_days')
self.num_of_hours = config.get('num_of_hours')
self.points_per_hour = config.get('points_per_hour')
self.num_for_predict = config.get('output_window',12)
self.dropout = config.get('dropout')
self.aware_temporal_context = config.get('aware_temporal_context')
self.ScaledSAt = config.get('ScaledSAt')
self.SE = config.get('SE')
self.TE = config.get('TE')
self.kernel_size = config.get('kernel_size')
self.smooth_layer_num = config.get('smooth_layer_num')
self.residual_connection = config.get('residual_connection')
self.use_LayerNorm = config.get('use_LayerNorm')
self.model = make_model(self.Device, self.num_layers, self.encoder_input_size, self.decoder_output_size,
self.d_model, self.adj_mx, self.nb_head, self.num_of_weeks, self.num_of_days,
self.num_of_hours, self.points_per_hour, self.num_for_predict, self.dropout,
self.aware_temporal_context,
self.ScaledSAt, self.SE, self.TE, self.kernel_size, self.smooth_layer_num,
self.residual_connection, self.use_LayerNorm)
self._init_parameters()
self.criterion = nn.L1Loss().to(self.Device)
def _init_parameters(self):
for p in self.model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
def forward(self, batch):
encoder_inputs = batch['encoder_inputs']
decoder_inputs = batch['decoder_inputs']
return self.model(encoder_inputs,decoder_inputs)
def calculate_loss(self, batch):
y_true = batch['y']
y_predicted = self.predict(batch)
return self.criterion(y_predicted,y_true)
def predict(self, batch):
return self.forward(batch)
| [
"torch.nn.Dropout",
"numpy.sum",
"torch.nn.Embedding",
"torch.nn.init.uniform_",
"torch.cat",
"numpy.ones",
"torch.arange",
"torch.device",
"numpy.identity",
"torch.nn.LayerNorm",
"math.cos",
"torch.nn.Linear",
"torch.zeros",
"torch.matmul",
"copy.deepcopy",
"math.sqrt",
"torch.nn.in... | [((1188, 1200), 'numpy.dot', 'np.dot', (['D', 'W'], {}), '(D, W)\n', (1194, 1200), True, 'import numpy as np\n'), ((10628, 10653), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (10637, 10653), True, 'import torch.nn.functional as F\n'), ((49376, 49414), 'torch.nn.Linear', 'nn.Linear', (['encoder_input_size', 'd_model'], {}), '(encoder_input_size, d_model)\n', (49385, 49414), True, 'import torch.nn as nn\n'), ((49817, 49856), 'torch.nn.Linear', 'nn.Linear', (['decoder_output_size', 'd_model'], {}), '(decoder_output_size, d_model)\n', (49826, 49856), True, 'import torch.nn as nn\n'), ((54225, 54264), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'decoder_output_size'], {}), '(d_model, decoder_output_size)\n', (54234, 54264), True, 'import torch.nn as nn\n'), ((685, 718), 'torch.from_numpy', 'torch.from_numpy', (['subsequent_mask'], {}), '(subsequent_mask)\n', (701, 718), False, 'import torch\n'), ((1096, 1110), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (1107, 1110), True, 'import numpy as np\n'), ((1541, 1589), 'torch.nn.Linear', 'nn.Linear', (['in_channels', 'out_channels'], {'bias': '(False)'}), '(in_channels, out_channels, bias=False)\n', (1550, 1589), True, 'import torch.nn as nn\n'), ((2424, 2472), 'torch.nn.Linear', 'nn.Linear', (['in_channels', 'out_channels'], {'bias': '(False)'}), '(in_channels, out_channels, bias=False)\n', (2433, 2472), True, 'import torch.nn as nn\n'), ((2976, 2997), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (2986, 2997), True, 'import torch.nn as nn\n'), ((3959, 4007), 'torch.nn.Linear', 'nn.Linear', (['in_channels', 'out_channels'], {'bias': '(False)'}), '(in_channels, out_channels, bias=False)\n', (3968, 4007), True, 'import torch.nn as nn\n'), ((5225, 5273), 'torch.nn.Linear', 'nn.Linear', (['in_channels', 'out_channels'], {'bias': '(False)'}), '(in_channels, out_channels, bias=False)\n', (5234, 5273), True, 'import torch.nn as nn\n'), ((6443, 6464), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (6453, 6464), True, 'import torch.nn as nn\n'), ((6490, 6534), 'torch.nn.Embedding', 'torch.nn.Embedding', (['num_of_vertices', 'd_model'], {}), '(num_of_vertices, d_model)\n', (6508, 6534), False, 'import torch\n'), ((7567, 7588), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (7577, 7588), True, 'import torch.nn as nn\n'), ((7737, 7766), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {}), '(max_len, d_model)\n', (7748, 7766), False, 'import torch\n'), ((9037, 9056), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (9047, 9056), True, 'import torch.nn as nn\n'), ((9858, 9877), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (9868, 9877), True, 'import torch.nn as nn\n'), ((10454, 10468), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (10463, 10468), False, 'import math\n'), ((10763, 10790), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (10775, 10790), False, 'import torch\n'), ((11178, 11199), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (11188, 11199), True, 'import torch.nn as nn\n'), ((13434, 13455), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (13444, 13455), True, 'import torch.nn as nn\n'), ((20797, 20818), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (20807, 20818), True, 'import torch.nn as nn\n'), ((27812, 27891), 'torch.nn.Conv2d', 'nn.Conv2d', (['d_model', 'd_model', '(1, kernel_size)'], {'padding': '(0, self.causal_padding)'}), '(d_model, d_model, (1, kernel_size), padding=(0, self.causal_padding))\n', (27821, 27891), True, 'import torch.nn as nn\n'), ((28004, 28079), 'torch.nn.Conv2d', 'nn.Conv2d', (['d_model', 'd_model', '(1, kernel_size)'], {'padding': '(0, self.padding_1D)'}), '(d_model, d_model, (1, kernel_size), padding=(0, self.padding_1D))\n', (28013, 28079), True, 'import torch.nn as nn\n'), ((28163, 28184), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (28173, 28184), True, 'import torch.nn as nn\n'), ((38084, 38108), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['layer.size'], {}), '(layer.size)\n', (38096, 38108), True, 'import torch.nn as nn\n'), ((40385, 40409), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['layer.size'], {}), '(layer.size)\n', (40397, 40409), True, 'import torch.nn as nn\n'), ((42200, 42221), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (42210, 42221), True, 'import torch.nn as nn\n'), ((397, 418), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (410, 418), False, 'import copy\n'), ((1147, 1164), 'numpy.sum', 'np.sum', (['W'], {'axis': '(1)'}), '(W, axis=1)\n', (1153, 1164), True, 'import numpy as np\n'), ((3357, 3379), 'math.sqrt', 'math.sqrt', (['in_channels'], {}), '(in_channels)\n', (3366, 3379), False, 'import math\n'), ((3454, 3478), 'torch.nn.functional.softmax', 'F.softmax', (['score'], {'dim': '(-1)'}), '(score, dim=-1)\n', (3463, 3478), True, 'import torch.nn.functional as F\n'), ((5635, 5657), 'math.sqrt', 'math.sqrt', (['in_channels'], {}), '(in_channels)\n', (5644, 5657), False, 'import math\n'), ((9112, 9130), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['size'], {}), '(size)\n', (9124, 9130), True, 'import torch.nn as nn\n'), ((11123, 11150), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (11132, 11150), True, 'import torch.nn as nn\n'), ((13099, 13126), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (13108, 13126), True, 'import torch.nn as nn\n'), ((13278, 13350), 'torch.nn.Conv2d', 'nn.Conv2d', (['d_model', 'd_model', '(1, kernel_size)'], {'padding': '(0, self.padding)'}), '(d_model, d_model, (1, kernel_size), padding=(0, self.padding))\n', (13287, 13350), True, 'import torch.nn as nn\n'), ((16665, 16693), 'torch.cat', 'torch.cat', (['query_list'], {'dim': '(3)'}), '(query_list, dim=3)\n', (16674, 16693), False, 'import torch\n'), ((16712, 16738), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (16721, 16738), False, 'import torch\n'), ((20462, 20489), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (20471, 20489), True, 'import torch.nn as nn\n'), ((20641, 20713), 'torch.nn.Conv2d', 'nn.Conv2d', (['d_model', 'd_model', '(1, kernel_size)'], {'padding': '(0, self.padding)'}), '(d_model, d_model, (1, kernel_size), padding=(0, self.padding))\n', (20650, 20713), True, 'import torch.nn as nn\n'), ((24028, 24056), 'torch.cat', 'torch.cat', (['query_list'], {'dim': '(3)'}), '(query_list, dim=3)\n', (24037, 24056), False, 'import torch\n'), ((24075, 24101), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (24084, 24101), False, 'import torch\n'), ((27591, 27618), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (27600, 27618), True, 'import torch.nn as nn\n'), ((32282, 32310), 'torch.cat', 'torch.cat', (['query_list'], {'dim': '(3)'}), '(query_list, dim=3)\n', (32291, 32310), False, 'import torch\n'), ((32329, 32355), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (32338, 32355), False, 'import torch\n'), ((41856, 41883), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (41865, 41883), True, 'import torch.nn as nn\n'), ((42043, 42115), 'torch.nn.Conv2d', 'nn.Conv2d', (['d_model', 'd_model', '(1, kernel_size)'], {'padding': '(0, self.padding)'}), '(d_model, d_model, (1, kernel_size), padding=(0, self.padding))\n', (42052, 42115), True, 'import torch.nn as nn\n'), ((45096, 45124), 'torch.cat', 'torch.cat', (['query_list'], {'dim': '(3)'}), '(query_list, dim=3)\n', (45105, 45124), False, 'import torch\n'), ((45143, 45169), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (45152, 45169), False, 'import torch\n'), ((54589, 54615), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (54612, 54615), True, 'import torch.nn as nn\n'), ((54813, 54832), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (54825, 54832), False, 'import torch\n'), ((632, 651), 'numpy.ones', 'np.ones', (['attn_shape'], {}), '(attn_shape)\n', (639, 651), True, 'import numpy as np\n'), ((2681, 2722), 'torch.matmul', 'torch.matmul', (['self.sym_norm_Adj_matrix', 'x'], {}), '(self.sym_norm_Adj_matrix, x)\n', (2693, 2722), False, 'import torch\n'), ((7874, 7916), 'math.sin', 'math.sin', (['(pos / 10000 ** (2 * i / d_model))'], {}), '(pos / 10000 ** (2 * i / d_model))\n', (7882, 7916), False, 'import math\n'), ((7954, 8002), 'math.cos', 'math.cos', (['(pos / 10000 ** (2 * (i + 1) / d_model))'], {}), '(pos / 10000 ** (2 * (i + 1) / d_model))\n', (7962, 8002), False, 'import math\n'), ((53656, 53680), 'torch.nn.Sequential', 'nn.Sequential', (['src_dense'], {}), '(src_dense)\n', (53669, 53680), True, 'import torch.nn as nn\n'), ((53709, 53733), 'torch.nn.Sequential', 'nn.Sequential', (['trg_dense'], {}), '(trg_dense)\n', (53722, 53733), True, 'import torch.nn as nn\n'), ((56566, 56577), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (56575, 56577), True, 'import torch.nn as nn\n'), ((56713, 56739), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (56736, 56739), True, 'import torch.nn as nn\n'), ((56774, 56793), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['p'], {}), '(p)\n', (56790, 56793), True, 'import torch.nn as nn\n'), ((6950, 6979), 'torch.arange', 'torch.arange', (['num_of_vertices'], {}), '(num_of_vertices)\n', (6962, 6979), False, 'import torch\n'), ((18998, 19024), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (19007, 19024), False, 'import torch\n'), ((26361, 26387), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (26370, 26387), False, 'import torch\n'), ((34996, 35022), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (35005, 35022), False, 'import torch\n'), ((47944, 47970), 'torch.cat', 'torch.cat', (['key_list'], {'dim': '(3)'}), '(key_list, dim=3)\n', (47953, 47970), False, 'import torch\n'), ((1977, 2018), 'torch.matmul', 'torch.matmul', (['self.sym_norm_Adj_matrix', 'x'], {}), '(self.sym_norm_Adj_matrix, x)\n', (1989, 2018), False, 'import torch\n')] |
import cv2
from PIL import Image, ImageDraw
import os.path as osp
import os
import numpy as np
import matplotlib.pyplot as plt
import random
import OpenEXR, Imath
import open3d as o3d
import matplotlib.pyplot as plt
from mathutils import Vector, Matrix, Quaternion
class Label:
def draw_bboxes(self, syn_images_folder, num_of_images, frame_number):
dataset_file = "rendered_images/dataset_info.txt"
obj_classes = ['dove', 'toothpaste']
with open(dataset_file, "r") as dFile:
scenes = dFile.readlines()
for scene in scenes:
scene_num, num_instances = scene.split(',')
scene_num = int(scene_num)
num_instances = int(num_instances)
print ('scene_num: ', scene_num)
img_filepath = osp.join(syn_images_folder, 'image_%05d/rgb/image.png' % scene_num)
im = Image.open(img_filepath)
draw = ImageDraw.Draw(im)
class_filepath = osp.join(syn_images_folder, 'debug/class_id_%05d.txt' % scene_num)
class_list = []
with open(class_filepath, "r") as file:
lines = file.readlines()
for line in lines:
class_list.append(int(line))
file.close()
for instance_num in range(0, num_instances):
if class_list[instance_num] == -1:
print ('Invalid instance::not present in scene\n')
continue
mask_img_filepath = osp.join(syn_images_folder, 'debug/image_%05d_%02d_%04d.png' % (scene_num, instance_num, frame_number))
mask_image = cv2.imread(mask_img_filepath, cv2.IMREAD_GRAYSCALE)
x1, y1, w1, h1 = cv2.boundingRect(mask_image)
box_filepath = osp.join(syn_images_folder, 'image_%05d/labels/bbox.txt' % scene_num)
box_file = open(box_filepath,'a')
box_file.write("%s %i %i %i %i\n" % (obj_classes[int(class_list[instance_num])], int(x1), int(y1), int(x1)+int(w1), int(y1)+int(h1)))
draw.rectangle([(x1, y1), (x1 + w1, y1 + h1)], outline=(255,0,0,255))
box_file.close()
os.remove(mask_img_filepath)
# save debug image showing bounding box
del draw
new_img_filepath = osp.join(syn_images_folder, 'debug/dbg_img_%05d.png' % scene_num)
im.save(new_img_filepath)
def save_pointcloud_data(self, syn_images_folder, scene_num, frame_number):
print ('scene_num: ', scene_num)
rgb_img_filepath = osp.join(syn_images_folder, 'image_%05d/rgb/image.png' % scene_num)
depth_img_filepath = osp.join(syn_images_folder, 'image_%05d/depth/image_%04d.exr' % (scene_num, frame_number))
des_depth_path = osp.join(syn_images_folder, 'image_%05d/depth/image.png' % scene_num)
des_pcl_path = osp.join(syn_images_folder, 'image_%05d/depth/pointcloud.pcd' % scene_num)
des_pcl_path_viz = osp.join(syn_images_folder, 'image_%05d/depth/pointcloud.ply' % scene_num)
# Extracting depth image from EXR file format
depth_image_raw = OpenEXR.InputFile(depth_img_filepath)
point_type = Imath.PixelType(Imath.PixelType.FLOAT)
depthstr = depth_image_raw.channel('R', point_type)
depth = np.fromstring(depthstr, dtype = np.float32)
dw = depth_image_raw.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
depth.shape = (size[1], size[0]) # np arrays are (row, col)
depth[depth < 0] = 0
depth = depth*1000
depth = depth.astype(np.uint16)
cv2.imwrite(des_depth_path, depth)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()
pinhole_camera_intrinsic.set_intrinsics(640, 480, 615.95776367187, 615.95776367187, 320, 240)
source_color = o3d.io.read_image(rgb_img_filepath)
source_depth = o3d.io.read_image(des_depth_path)
source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(source_color, source_depth, 1000, 3, False)
source_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(source_rgbd_image, pinhole_camera_intrinsic)
cam_matrix = Matrix(((0.9657, 0.1838, -0.1833, 0.3500),
(0.2596, -0.6834, 0.6822, -0.6000),
(0.0001, -0.7064, -0.7077, 0.6000),
(0.0000, 0.0000, 0.0000, 1.0000)))
source_pcd.transform(cam_matrix)
o3d.io.write_point_cloud(des_pcl_path, source_pcd, True)
o3d.io.write_point_cloud(des_pcl_path_viz, source_pcd, True)
def save_pointcloud_data_dir(self, dir_inout, frame_number):
rgb_img_filepath = osp.join(dir_inout, 'rgb/image.png')
depth_img_filepath = osp.join(dir_inout, 'depth/image_%04d.exr' % frame_number)
des_depth_path = osp.join(dir_inout, 'depth/image.png')
des_pcl_path = osp.join(dir_inout, 'depth/pointcloud.pcd')
des_pcl_path_viz = osp.join(dir_inout, 'depth/pointcloud.ply')
# Extracting depth image from EXR file format
depth_image_raw = OpenEXR.InputFile(depth_img_filepath)
point_type = Imath.PixelType(Imath.PixelType.FLOAT)
depthstr = depth_image_raw.channel('R', point_type)
depth = np.fromstring(depthstr, dtype = np.float32)
dw = depth_image_raw.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
depth.shape = (size[1], size[0]) # np arrays are (row, col)
depth[depth < 0] = 0
depth = depth*1000
depth = depth.astype(np.uint16)
cv2.imwrite(des_depth_path, depth)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()
pinhole_camera_intrinsic.set_intrinsics(640, 480, 615.95776367187, 615.95776367187, 320, 240)
source_color = o3d.io.read_image(rgb_img_filepath)
source_depth = o3d.io.read_image(des_depth_path)
source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(source_color, source_depth, 1000, 3, False)
source_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(source_rgbd_image, pinhole_camera_intrinsic)
cam_matrix = Matrix(((0.9657, 0.1838, -0.1833, 0.3500),
(0.2596, -0.6834, 0.6822, -0.6000),
(0.0001, -0.7064, -0.7077, 0.6000),
(0.0000, 0.0000, 0.0000, 1.0000)))
source_pcd.transform(cam_matrix)
o3d.io.write_point_cloud(des_pcl_path, source_pcd, True)
o3d.io.write_point_cloud(des_pcl_path_viz, source_pcd, True)
def get_segmentation_labels(self, syn_images_folder, num_of_images, frame_number):
dataset_file = "rendered_images/dataset_info.txt"
with open(dataset_file, "r") as dFile:
scenes = dFile.readlines()
for scene in scenes:
scene_num, num_instances = scene.split(',')
scene_num = int(scene_num)
num_instances = int(num_instances)
print ('scene_num: ', scene_num)
img_filepath = osp.join(syn_images_folder, 'image_%05d/rgb/image.png' % scene_num)
im = cv2.imread(img_filepath)
class_filepath = osp.join(syn_images_folder, 'debug/class_id_%05d.txt' % scene_num)
class_list = []
with open(class_filepath, "r") as file:
lines = file.readlines()
for line in lines:
class_list.append(int(line))
file.close()
height, width, channels = im.shape
seg_img = np.zeros((height,width,1), np.uint8)
edge_img = np.zeros((height,width), np.uint8)
for instance_num in range(0, num_instances):
if class_list[instance_num] == -1:
print ('Invalid instance::not present in scene\n')
continue
mask_img_filepath = osp.join(syn_images_folder, 'debug/image_%05d_%02d_%04d.png' % (scene_num, instance_num, frame_number))
mask_image = cv2.imread(mask_img_filepath, cv2.IMREAD_GRAYSCALE)
active_indices = np.nonzero(mask_image)
# semantic label
seg_img[active_indices] = np.uint8(class_list[instance_num] + 1)
# instance label
ins_seg_img = np.zeros((height,width,1), np.uint8)
ins_seg_img[active_indices] = np.uint8(class_list[instance_num] + 1)
# boundary label
edgex = cv2.Sobel(ins_seg_img, cv2.CV_64F,1,0,ksize=1)
edgey = cv2.Sobel(ins_seg_img, cv2.CV_64F,0,1,ksize=1)
edge = np.hypot(edgex, edgey)
edge *= 255.0/np.max(edge)
edge = np.uint8(edge)
edge_img = cv2.bitwise_or(edge_img, edge)
os.remove(mask_img_filepath)
# save segmentation image
seg_img_filepath = osp.join(syn_images_folder, 'image_%05d/labels/seg_img.png' % scene_num)
cv2.imwrite(seg_img_filepath, seg_img)
seg_img_plt = Image.open(seg_img_filepath).convert('P')
seg_img_plt.putpalette([
0, 0, 0,
128, 0, 0,
0, 128, 0,
128, 128, 0,
0, 128, 128,
128, 128, 128,
64, 0, 0,
192, 0, 0,
64, 128, 0,
192, 128, 0,
64, 0, 128,
192, 0, 128,
64, 128, 128,
192, 128, 128,
0, 64, 0,
128, 64, 0,
0, 192, 0,
128, 192, 0, # defined for 18 classes currently
])
seg_img_plt.save(seg_img_filepath)
# save edge image
edge_img_filepath = osp.join(syn_images_folder, 'image_%05d/labels/edge_img.png' % scene_num)
ret, edge_img = cv2.threshold(edge_img, 10, 255, cv2.THRESH_BINARY)
kernel = np.ones((3,3), np.uint8)
edge_img = cv2.dilate(edge_img, kernel, iterations = 1)
cv2.imwrite(edge_img_filepath, edge_img) | [
"os.remove",
"numpy.ones",
"open3d.geometry.PointCloud.create_from_rgbd_image",
"os.path.join",
"cv2.dilate",
"cv2.imwrite",
"open3d.io.write_point_cloud",
"numpy.max",
"PIL.ImageDraw.Draw",
"cv2.boundingRect",
"numpy.fromstring",
"Imath.PixelType",
"numpy.uint8",
"numpy.hypot",
"cv2.bit... | [((2730, 2797), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/rgb/image.png' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/rgb/image.png' % scene_num)\n", (2738, 2797), True, 'import os.path as osp\n'), ((2827, 2921), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/depth/image_%04d.exr' % (scene_num, frame_number))"], {}), "(syn_images_folder, 'image_%05d/depth/image_%04d.exr' % (scene_num,\n frame_number))\n", (2835, 2921), True, 'import os.path as osp\n'), ((2943, 3012), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/depth/image.png' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/depth/image.png' % scene_num)\n", (2951, 3012), True, 'import os.path as osp\n'), ((3036, 3110), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/depth/pointcloud.pcd' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/depth/pointcloud.pcd' % scene_num)\n", (3044, 3110), True, 'import os.path as osp\n'), ((3138, 3212), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/depth/pointcloud.ply' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/depth/pointcloud.ply' % scene_num)\n", (3146, 3212), True, 'import os.path as osp\n'), ((3295, 3332), 'OpenEXR.InputFile', 'OpenEXR.InputFile', (['depth_img_filepath'], {}), '(depth_img_filepath)\n', (3312, 3332), False, 'import OpenEXR, Imath\n'), ((3355, 3393), 'Imath.PixelType', 'Imath.PixelType', (['Imath.PixelType.FLOAT'], {}), '(Imath.PixelType.FLOAT)\n', (3370, 3393), False, 'import OpenEXR, Imath\n'), ((3470, 3511), 'numpy.fromstring', 'np.fromstring', (['depthstr'], {'dtype': 'np.float32'}), '(depthstr, dtype=np.float32)\n', (3483, 3511), True, 'import numpy as np\n'), ((3805, 3839), 'cv2.imwrite', 'cv2.imwrite', (['des_depth_path', 'depth'], {}), '(des_depth_path, depth)\n', (3816, 3839), False, 'import cv2\n'), ((3876, 3911), 'open3d.camera.PinholeCameraIntrinsic', 'o3d.camera.PinholeCameraIntrinsic', ([], {}), '()\n', (3909, 3911), True, 'import open3d as o3d\n'), ((4038, 4073), 'open3d.io.read_image', 'o3d.io.read_image', (['rgb_img_filepath'], {}), '(rgb_img_filepath)\n', (4055, 4073), True, 'import open3d as o3d\n'), ((4097, 4130), 'open3d.io.read_image', 'o3d.io.read_image', (['des_depth_path'], {}), '(des_depth_path)\n', (4114, 4130), True, 'import open3d as o3d\n'), ((4160, 4258), 'open3d.geometry.RGBDImage.create_from_color_and_depth', 'o3d.geometry.RGBDImage.create_from_color_and_depth', (['source_color', 'source_depth', '(1000)', '(3)', '(False)'], {}), '(source_color,\n source_depth, 1000, 3, False)\n', (4210, 4258), True, 'import open3d as o3d\n'), ((4276, 4371), 'open3d.geometry.PointCloud.create_from_rgbd_image', 'o3d.geometry.PointCloud.create_from_rgbd_image', (['source_rgbd_image', 'pinhole_camera_intrinsic'], {}), '(source_rgbd_image,\n pinhole_camera_intrinsic)\n', (4322, 4371), True, 'import open3d as o3d\n'), ((4390, 4524), 'mathutils.Matrix', 'Matrix', (['((0.9657, 0.1838, -0.1833, 0.35), (0.2596, -0.6834, 0.6822, -0.6), (0.0001,\n -0.7064, -0.7077, 0.6), (0.0, 0.0, 0.0, 1.0))'], {}), '(((0.9657, 0.1838, -0.1833, 0.35), (0.2596, -0.6834, 0.6822, -0.6), (\n 0.0001, -0.7064, -0.7077, 0.6), (0.0, 0.0, 0.0, 1.0)))\n', (4396, 4524), False, 'from mathutils import Vector, Matrix, Quaternion\n'), ((4620, 4676), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['des_pcl_path', 'source_pcd', '(True)'], {}), '(des_pcl_path, source_pcd, True)\n', (4644, 4676), True, 'import open3d as o3d\n'), ((4685, 4745), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['des_pcl_path_viz', 'source_pcd', '(True)'], {}), '(des_pcl_path_viz, source_pcd, True)\n', (4709, 4745), True, 'import open3d as o3d\n'), ((4839, 4875), 'os.path.join', 'osp.join', (['dir_inout', '"""rgb/image.png"""'], {}), "(dir_inout, 'rgb/image.png')\n", (4847, 4875), True, 'import os.path as osp\n'), ((4905, 4963), 'os.path.join', 'osp.join', (['dir_inout', "('depth/image_%04d.exr' % frame_number)"], {}), "(dir_inout, 'depth/image_%04d.exr' % frame_number)\n", (4913, 4963), True, 'import os.path as osp\n'), ((4989, 5027), 'os.path.join', 'osp.join', (['dir_inout', '"""depth/image.png"""'], {}), "(dir_inout, 'depth/image.png')\n", (4997, 5027), True, 'import os.path as osp\n'), ((5051, 5094), 'os.path.join', 'osp.join', (['dir_inout', '"""depth/pointcloud.pcd"""'], {}), "(dir_inout, 'depth/pointcloud.pcd')\n", (5059, 5094), True, 'import os.path as osp\n'), ((5122, 5165), 'os.path.join', 'osp.join', (['dir_inout', '"""depth/pointcloud.ply"""'], {}), "(dir_inout, 'depth/pointcloud.ply')\n", (5130, 5165), True, 'import os.path as osp\n'), ((5248, 5285), 'OpenEXR.InputFile', 'OpenEXR.InputFile', (['depth_img_filepath'], {}), '(depth_img_filepath)\n', (5265, 5285), False, 'import OpenEXR, Imath\n'), ((5308, 5346), 'Imath.PixelType', 'Imath.PixelType', (['Imath.PixelType.FLOAT'], {}), '(Imath.PixelType.FLOAT)\n', (5323, 5346), False, 'import OpenEXR, Imath\n'), ((5423, 5464), 'numpy.fromstring', 'np.fromstring', (['depthstr'], {'dtype': 'np.float32'}), '(depthstr, dtype=np.float32)\n', (5436, 5464), True, 'import numpy as np\n'), ((5758, 5792), 'cv2.imwrite', 'cv2.imwrite', (['des_depth_path', 'depth'], {}), '(des_depth_path, depth)\n', (5769, 5792), False, 'import cv2\n'), ((5829, 5864), 'open3d.camera.PinholeCameraIntrinsic', 'o3d.camera.PinholeCameraIntrinsic', ([], {}), '()\n', (5862, 5864), True, 'import open3d as o3d\n'), ((5991, 6026), 'open3d.io.read_image', 'o3d.io.read_image', (['rgb_img_filepath'], {}), '(rgb_img_filepath)\n', (6008, 6026), True, 'import open3d as o3d\n'), ((6050, 6083), 'open3d.io.read_image', 'o3d.io.read_image', (['des_depth_path'], {}), '(des_depth_path)\n', (6067, 6083), True, 'import open3d as o3d\n'), ((6113, 6211), 'open3d.geometry.RGBDImage.create_from_color_and_depth', 'o3d.geometry.RGBDImage.create_from_color_and_depth', (['source_color', 'source_depth', '(1000)', '(3)', '(False)'], {}), '(source_color,\n source_depth, 1000, 3, False)\n', (6163, 6211), True, 'import open3d as o3d\n'), ((6229, 6324), 'open3d.geometry.PointCloud.create_from_rgbd_image', 'o3d.geometry.PointCloud.create_from_rgbd_image', (['source_rgbd_image', 'pinhole_camera_intrinsic'], {}), '(source_rgbd_image,\n pinhole_camera_intrinsic)\n', (6275, 6324), True, 'import open3d as o3d\n'), ((6343, 6477), 'mathutils.Matrix', 'Matrix', (['((0.9657, 0.1838, -0.1833, 0.35), (0.2596, -0.6834, 0.6822, -0.6), (0.0001,\n -0.7064, -0.7077, 0.6), (0.0, 0.0, 0.0, 1.0))'], {}), '(((0.9657, 0.1838, -0.1833, 0.35), (0.2596, -0.6834, 0.6822, -0.6), (\n 0.0001, -0.7064, -0.7077, 0.6), (0.0, 0.0, 0.0, 1.0)))\n', (6349, 6477), False, 'from mathutils import Vector, Matrix, Quaternion\n'), ((6573, 6629), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['des_pcl_path', 'source_pcd', '(True)'], {}), '(des_pcl_path, source_pcd, True)\n', (6597, 6629), True, 'import open3d as o3d\n'), ((6638, 6698), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['des_pcl_path_viz', 'source_pcd', '(True)'], {}), '(des_pcl_path_viz, source_pcd, True)\n', (6662, 6698), True, 'import open3d as o3d\n'), ((811, 878), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/rgb/image.png' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/rgb/image.png' % scene_num)\n", (819, 878), True, 'import os.path as osp\n'), ((900, 924), 'PIL.Image.open', 'Image.open', (['img_filepath'], {}), '(img_filepath)\n', (910, 924), False, 'from PIL import Image, ImageDraw\n'), ((948, 966), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (962, 966), False, 'from PIL import Image, ImageDraw\n'), ((1001, 1067), 'os.path.join', 'osp.join', (['syn_images_folder', "('debug/class_id_%05d.txt' % scene_num)"], {}), "(syn_images_folder, 'debug/class_id_%05d.txt' % scene_num)\n", (1009, 1067), True, 'import os.path as osp\n'), ((2473, 2538), 'os.path.join', 'osp.join', (['syn_images_folder', "('debug/dbg_img_%05d.png' % scene_num)"], {}), "(syn_images_folder, 'debug/dbg_img_%05d.png' % scene_num)\n", (2481, 2538), True, 'import os.path as osp\n'), ((7199, 7266), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/rgb/image.png' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/rgb/image.png' % scene_num)\n", (7207, 7266), True, 'import os.path as osp\n'), ((7288, 7312), 'cv2.imread', 'cv2.imread', (['img_filepath'], {}), '(img_filepath)\n', (7298, 7312), False, 'import cv2\n'), ((7347, 7413), 'os.path.join', 'osp.join', (['syn_images_folder', "('debug/class_id_%05d.txt' % scene_num)"], {}), "(syn_images_folder, 'debug/class_id_%05d.txt' % scene_num)\n", (7355, 7413), True, 'import os.path as osp\n'), ((7745, 7783), 'numpy.zeros', 'np.zeros', (['(height, width, 1)', 'np.uint8'], {}), '((height, width, 1), np.uint8)\n', (7753, 7783), True, 'import numpy as np\n'), ((7809, 7844), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (7817, 7844), True, 'import numpy as np\n'), ((9232, 9304), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/labels/seg_img.png' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/labels/seg_img.png' % scene_num)\n", (9240, 9304), True, 'import os.path as osp\n'), ((9321, 9359), 'cv2.imwrite', 'cv2.imwrite', (['seg_img_filepath', 'seg_img'], {}), '(seg_img_filepath, seg_img)\n', (9332, 9359), False, 'import cv2\n'), ((10228, 10301), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/labels/edge_img.png' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/labels/edge_img.png' % scene_num)\n", (10236, 10301), True, 'import os.path as osp\n'), ((10334, 10385), 'cv2.threshold', 'cv2.threshold', (['edge_img', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(edge_img, 10, 255, cv2.THRESH_BINARY)\n', (10347, 10385), False, 'import cv2\n'), ((10411, 10436), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (10418, 10436), True, 'import numpy as np\n'), ((10463, 10505), 'cv2.dilate', 'cv2.dilate', (['edge_img', 'kernel'], {'iterations': '(1)'}), '(edge_img, kernel, iterations=1)\n', (10473, 10505), False, 'import cv2\n'), ((10525, 10565), 'cv2.imwrite', 'cv2.imwrite', (['edge_img_filepath', 'edge_img'], {}), '(edge_img_filepath, edge_img)\n', (10536, 10565), False, 'import cv2\n'), ((1588, 1695), 'os.path.join', 'osp.join', (['syn_images_folder', "('debug/image_%05d_%02d_%04d.png' % (scene_num, instance_num, frame_number))"], {}), "(syn_images_folder, 'debug/image_%05d_%02d_%04d.png' % (scene_num,\n instance_num, frame_number))\n", (1596, 1695), True, 'import os.path as osp\n'), ((1725, 1776), 'cv2.imread', 'cv2.imread', (['mask_img_filepath', 'cv2.IMREAD_GRAYSCALE'], {}), '(mask_img_filepath, cv2.IMREAD_GRAYSCALE)\n', (1735, 1776), False, 'import cv2\n'), ((1814, 1842), 'cv2.boundingRect', 'cv2.boundingRect', (['mask_image'], {}), '(mask_image)\n', (1830, 1842), False, 'import cv2\n'), ((1899, 1968), 'os.path.join', 'osp.join', (['syn_images_folder', "('image_%05d/labels/bbox.txt' % scene_num)"], {}), "(syn_images_folder, 'image_%05d/labels/bbox.txt' % scene_num)\n", (1907, 1968), True, 'import os.path as osp\n'), ((2325, 2353), 'os.remove', 'os.remove', (['mask_img_filepath'], {}), '(mask_img_filepath)\n', (2334, 2353), False, 'import os\n'), ((8110, 8217), 'os.path.join', 'osp.join', (['syn_images_folder', "('debug/image_%05d_%02d_%04d.png' % (scene_num, instance_num, frame_number))"], {}), "(syn_images_folder, 'debug/image_%05d_%02d_%04d.png' % (scene_num,\n instance_num, frame_number))\n", (8118, 8217), True, 'import os.path as osp\n'), ((8247, 8298), 'cv2.imread', 'cv2.imread', (['mask_img_filepath', 'cv2.IMREAD_GRAYSCALE'], {}), '(mask_img_filepath, cv2.IMREAD_GRAYSCALE)\n', (8257, 8298), False, 'import cv2\n'), ((8336, 8358), 'numpy.nonzero', 'np.nonzero', (['mask_image'], {}), '(mask_image)\n', (8346, 8358), True, 'import numpy as np\n'), ((8443, 8481), 'numpy.uint8', 'np.uint8', (['(class_list[instance_num] + 1)'], {}), '(class_list[instance_num] + 1)\n', (8451, 8481), True, 'import numpy as np\n'), ((8554, 8592), 'numpy.zeros', 'np.zeros', (['(height, width, 1)', 'np.uint8'], {}), '((height, width, 1), np.uint8)\n', (8562, 8592), True, 'import numpy as np\n'), ((8641, 8679), 'numpy.uint8', 'np.uint8', (['(class_list[instance_num] + 1)'], {}), '(class_list[instance_num] + 1)\n', (8649, 8679), True, 'import numpy as np\n'), ((8766, 8815), 'cv2.Sobel', 'cv2.Sobel', (['ins_seg_img', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(1)'}), '(ins_seg_img, cv2.CV_64F, 1, 0, ksize=1)\n', (8775, 8815), False, 'import cv2\n'), ((8841, 8890), 'cv2.Sobel', 'cv2.Sobel', (['ins_seg_img', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(1)'}), '(ins_seg_img, cv2.CV_64F, 0, 1, ksize=1)\n', (8850, 8890), False, 'import cv2\n'), ((8915, 8937), 'numpy.hypot', 'np.hypot', (['edgex', 'edgey'], {}), '(edgex, edgey)\n', (8923, 8937), True, 'import numpy as np\n'), ((9012, 9026), 'numpy.uint8', 'np.uint8', (['edge'], {}), '(edge)\n', (9020, 9026), True, 'import numpy as np\n'), ((9058, 9088), 'cv2.bitwise_or', 'cv2.bitwise_or', (['edge_img', 'edge'], {}), '(edge_img, edge)\n', (9072, 9088), False, 'import cv2\n'), ((9114, 9142), 'os.remove', 'os.remove', (['mask_img_filepath'], {}), '(mask_img_filepath)\n', (9123, 9142), False, 'import os\n'), ((8972, 8984), 'numpy.max', 'np.max', (['edge'], {}), '(edge)\n', (8978, 8984), True, 'import numpy as np\n'), ((9390, 9418), 'PIL.Image.open', 'Image.open', (['seg_img_filepath'], {}), '(seg_img_filepath)\n', (9400, 9418), False, 'from PIL import Image, ImageDraw\n')] |
#!/usr/bin/env python3
"""
Mit den Funktionen in dieser Datei können Eingabedaten mit Hilfe von neuronalen Netzen klassifiziert/erzeugt/verändert werden.
"""
import sys
from collections import namedtuple
import numpy as np
import keras
from keras import backend as K
import skimage
num_classes = 43
def normalizeTensor(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
def synthesize(model, c, max_iter=200):
"""
Erzeugt eine Eingabe, die eine bestimmte Klasse für ein Model maximieren.
"""
out_layer = model.layers[-1]
numClasses = out_layer.output_shape[-1]
loss = keras.losses.mean_squared_error(keras.utils.to_categorical(c, num_classes = numClasses), out_layer.output)
grads = K.gradients(loss, model.input)[0]
grads = normalizeTensor(grads)
shape = (1,) + model.input_shape[1:]
in_data = np.random.random(shape)
iterate = K.function([model.input], [loss, grads])
step = -0.01
for j in range(max_iter):
loss_value, grads_value = iterate([in_data])
if np.max(grads_value) == 0.0:
break
grads_value -= np.min(grads_value)
grads_value /= np.max(grads_value)
grads_value -= np.mean(grads_value)
# loss may be unrealistically small
if loss_value <= 0:
if j < 100:
# if we have enough iterations left, we just restart with a new random image
in_data = np.random.random(shape)
else:
# otherwise the current image is returned
break
in_data += grads_value * step
image = in_data[0]
image /= np.max(image)
return image
def synthesize_multinets(models, c, base=None, max_iter=40, step=0.01, init_func=np.random.random):
"""
Erzeugt eine Eingabe, die für mehrere Modelle eine gegebene Klasse maximiert.
Dabei ist nicht unbedingt gegeben, dass die Eingabe für alle Modelle die gewünschte Klasse maximiert, da ein
Mittelwert gefunden wird.
"""
GradientAscentNet = namedtuple('GradientAscentNet', 'out_layer num_classes loss grads iterate')
nets = []
for model in models:
out_layer = model.layers[-1]
num_classes = out_layer.output_shape[-1]
loss = keras.losses.mean_squared_error(keras.utils.to_categorical(c, num_classes = num_classes), out_layer.output)
grads = K.gradients(loss, model.input)[0]
grads = normalizeTensor(grads)
iterate = K.function([model.input], [loss, grads])
net = GradientAscentNet(out_layer, num_classes, loss, grads, iterate)
nets.append(net)
shape = (1,) + model.input_shape[1:]
if base is None:
in_data = init_func(shape)
else:
in_data = np.array([base])
for j in range(max_iter):
print('\r%i%%'%(int(j/max_iter*100)), end='')
for net in nets:
loss_value, grads_value = iterate([in_data])
if loss_value <= 0 or np.max(grads_value) == 0.0:
continue
grads_value -= np.min(grads_value)
grads_value /= np.max(grads_value)
grads_value -= np.mean(grads_value)
in_data -= grads_value * step
print('\rdone!')
image = in_data[0]
image -= np.min(image)
image /= np.max(image)
return image
def add_loss(model, c, base, iterations=200):
"""
Modifiziert Eingabedaten so, dass eine bestimmte alternative Klasse maximiert wird.
"""
out_layer = model.layers[-1]
numClasses = out_layer.output_shape[-1]
#loss = keras.losses.mean_squared_error(keras.utils.to_categorical(c, num_classes = numClasses), out_layer.output)
loss = keras.losses.categorical_crossentropy(keras.utils.to_categorical(c, num_classes = numClasses), out_layer.output)
#loss = K.mean(out_layer.output[:c])
grads = K.gradients(loss, model.input)[0]
grads = normalizeTensor(grads)
shape = (1,) + model.input_shape[1:]
in_data = np.array([base])
#in_data = np.random.random(shape)
iterate = K.function([model.input], [loss, grads])
step = 1.0 / iterations
for i in range(iterations):
loss_value, grads_value = iterate([in_data])
print(loss_value)
print(np.max(grads_value))
if np.max(grads_value) == 0.0:
for pixel in range(10):
x = int(np.random.rand() * shape[1])
y = int(np.random.rand() * shape[2])
for c in range(3):
in_data[0,x,y,c] = np.random.rand()
else:
grads_value -= np.min(grads_value)
grads_value /= np.max(grads_value)
grads_value -= np.mean(grads_value)
in_data += grads_value * step
print(":)")
break
image = in_data[0]
image -= np.min(image)
image /= np.max(image)
return image
def bruteforce(model, c, max_iter=1000):
"""
Generiere Zufallsdaten, bis diese eine Klasse maximieren oder die maximale Anzahl an Iterationen verbraucht ist.
"""
shape = (1,) + model.input_shape[1:]
for i in range(max_iter):
image = np.random.random(shape)
prediction = model.predict(image, batch_size=1)[0]
pred_c = np.argmax(prediction)
if pred_c == c:
return image[0]
print('%i/%i'%(i, max_iter), end='\r')
raise ValueError('no suitable image found')
def makenet_idsia(weights, input_shape, classes):
"""
Erzeuge ein Netz mit dem korrekten Aufbau für die IDSIA Gruppe.
"""
input_activation = 'relu'
hidden_activation = 'relu'
output_activation = 'softmax'
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(100, input_shape=input_shape, kernel_size=(7, 7), activation=input_activation))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Dropout(0.15))
model.add(keras.layers.Conv2D(150, kernel_size=(4, 4), activation=hidden_activation))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Dropout(0.15))
model.add(keras.layers.Conv2D(250, kernel_size=(4, 4), activation=hidden_activation))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Dropout(0.15))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(classes, activation=output_activation))
return model
def test_create(model):
image_orig = skimage.io.imread('vorfahrt.png')
image = add_loss(model, 1, image_orig)
#image = bruteforce(model, 1)
#image = synthesize(model, 1)
skimage.io.imsave('created.png', image)
def test_add_loss(model):
image_orig = skimage.io.imread('vorfahrt.png')
image_orig = image_orig / 255.0
image = add_loss(model, 30, image_orig)
faked = synthesize(model, 31)
predictions = model.predict(np.array([image_orig, image, faked]), batch_size=32)
for prediction in predictions:
pred = np.argmax(prediction)
print(pred)
skimage.io.imsave('modified.png', image)
def predict_class(model, data):
return np.argmax(model.predict(np.array([data]), batch_size=1)[0])
def test_create_multinets(models):
c = 40
image = synthesize_multinets(models, c, base=None, init_func=np.random.random)
predictions = []
good_models = []
for model in models:
prediction = predict_class(model, image)
predictions.append(prediction)
if prediction == c:
good_models.append(model)
correct_preds = len(list(filter(lambda x: x == c, predictions)))
print(correct_preds)
skimage.io.imsave('created.png', image)
image = synthesize_multinets(good_models, c, base=None, init_func=np.ones)
skimage.io.imsave('created_good.png', image)
def test_predict(model, fname):
"""
Verwendet ein Netz, um den Inhalt eines Bildes zu klassifizieren.
"""
image = skimage.io.imread(fname)
image = skimage.transform.resize(image, (64, 64))
images = np.array([image])
predictions = model.predict(images, batch_size=1)
for prediction in predictions:
pred = np.argmax(prediction)
print(pred)
def test_predict_multinets(models, fname):
"""
Verwendet mehrere Netze, um den Inhalt eines Bildes zu klassifizieren.
Dabei werden die einzelnen Predictions, sowie die häufigste ausgegeben.
"""
image = skimage.io.imread(fname)
image = skimage.transform.resize(image, (64, 64))
images = np.array([image])
commitee = [0] * num_classes
for model in models:
predictions = model.predict(images, batch_size=1)
for prediction in predictions:
pred = np.argmax(prediction)
conf = prediction[pred]
print('Class: %i (%i%%)'%(pred, int(conf*100)))
commitee[pred] += 1
print('Majority: %i'%np.argmax(commitee))
"""
Hier werden alle verfügbaren Netze initialisiert und trainierte Kantengewichte werden eingelesen.
"""
models = []
for i in range(16):
model = makenet_idsia(None, (64, 64, 3), 43)
model.load_weights('trained/idsia-%i.h5'%i)
models.append(model)
"""
Joa, einfach immer das einkommentieren, was man testen will :)
"""
test_predict_multinets(models, sys.argv[1])
#test_add_loss(models[0])
#test_create(model)
#test_predict(model)
| [
"numpy.argmax",
"keras.backend.epsilon",
"numpy.mean",
"skimage.transform.resize",
"keras.layers.Flatten",
"numpy.max",
"skimage.io.imsave",
"keras.layers.MaxPooling2D",
"skimage.io.imread",
"keras.utils.to_categorical",
"keras.backend.gradients",
"keras.layers.Dropout",
"keras.backend.funct... | [((911, 934), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (927, 934), True, 'import numpy as np\n'), ((949, 989), 'keras.backend.function', 'K.function', (['[model.input]', '[loss, grads]'], {}), '([model.input], [loss, grads])\n', (959, 989), True, 'from keras import backend as K\n'), ((1688, 1701), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1694, 1701), True, 'import numpy as np\n'), ((2085, 2160), 'collections.namedtuple', 'namedtuple', (['"""GradientAscentNet"""', '"""out_layer num_classes loss grads iterate"""'], {}), "('GradientAscentNet', 'out_layer num_classes loss grads iterate')\n", (2095, 2160), False, 'from collections import namedtuple\n'), ((3299, 3312), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (3305, 3312), True, 'import numpy as np\n'), ((3326, 3339), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (3332, 3339), True, 'import numpy as np\n'), ((4005, 4021), 'numpy.array', 'np.array', (['[base]'], {}), '([base])\n', (4013, 4021), True, 'import numpy as np\n'), ((4075, 4115), 'keras.backend.function', 'K.function', (['[model.input]', '[loss, grads]'], {}), '([model.input], [loss, grads])\n', (4085, 4115), True, 'from keras import backend as K\n'), ((4838, 4851), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (4844, 4851), True, 'import numpy as np\n'), ((4865, 4878), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4871, 4878), True, 'import numpy as np\n'), ((5669, 5694), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (5692, 5694), False, 'import keras\n'), ((6521, 6554), 'skimage.io.imread', 'skimage.io.imread', (['"""vorfahrt.png"""'], {}), "('vorfahrt.png')\n", (6538, 6554), False, 'import skimage\n'), ((6671, 6710), 'skimage.io.imsave', 'skimage.io.imsave', (['"""created.png"""', 'image'], {}), "('created.png', image)\n", (6688, 6710), False, 'import skimage\n'), ((6755, 6788), 'skimage.io.imread', 'skimage.io.imread', (['"""vorfahrt.png"""'], {}), "('vorfahrt.png')\n", (6772, 6788), False, 'import skimage\n'), ((7084, 7124), 'skimage.io.imsave', 'skimage.io.imsave', (['"""modified.png"""', 'image'], {}), "('modified.png', image)\n", (7101, 7124), False, 'import skimage\n'), ((7678, 7717), 'skimage.io.imsave', 'skimage.io.imsave', (['"""created.png"""', 'image'], {}), "('created.png', image)\n", (7695, 7717), False, 'import skimage\n'), ((7801, 7845), 'skimage.io.imsave', 'skimage.io.imsave', (['"""created_good.png"""', 'image'], {}), "('created_good.png', image)\n", (7818, 7845), False, 'import skimage\n'), ((7977, 8001), 'skimage.io.imread', 'skimage.io.imread', (['fname'], {}), '(fname)\n', (7994, 8001), False, 'import skimage\n'), ((8014, 8055), 'skimage.transform.resize', 'skimage.transform.resize', (['image', '(64, 64)'], {}), '(image, (64, 64))\n', (8038, 8055), False, 'import skimage\n'), ((8069, 8086), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (8077, 8086), True, 'import numpy as np\n'), ((8457, 8481), 'skimage.io.imread', 'skimage.io.imread', (['fname'], {}), '(fname)\n', (8474, 8481), False, 'import skimage\n'), ((8494, 8535), 'skimage.transform.resize', 'skimage.transform.resize', (['image', '(64, 64)'], {}), '(image, (64, 64))\n', (8518, 8535), False, 'import skimage\n'), ((8549, 8566), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (8557, 8566), True, 'import numpy as np\n'), ((700, 753), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c'], {'num_classes': 'numClasses'}), '(c, num_classes=numClasses)\n', (726, 753), False, 'import keras\n'), ((787, 817), 'keras.backend.gradients', 'K.gradients', (['loss', 'model.input'], {}), '(loss, model.input)\n', (798, 817), True, 'from keras import backend as K\n'), ((1170, 1189), 'numpy.min', 'np.min', (['grads_value'], {}), '(grads_value)\n', (1176, 1189), True, 'import numpy as np\n'), ((1213, 1232), 'numpy.max', 'np.max', (['grads_value'], {}), '(grads_value)\n', (1219, 1232), True, 'import numpy as np\n'), ((1256, 1276), 'numpy.mean', 'np.mean', (['grads_value'], {}), '(grads_value)\n', (1263, 1276), True, 'import numpy as np\n'), ((2517, 2557), 'keras.backend.function', 'K.function', (['[model.input]', '[loss, grads]'], {}), '([model.input], [loss, grads])\n', (2527, 2557), True, 'from keras import backend as K\n'), ((2787, 2803), 'numpy.array', 'np.array', (['[base]'], {}), '([base])\n', (2795, 2803), True, 'import numpy as np\n'), ((3753, 3806), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c'], {'num_classes': 'numClasses'}), '(c, num_classes=numClasses)\n', (3779, 3806), False, 'import keras\n'), ((3881, 3911), 'keras.backend.gradients', 'K.gradients', (['loss', 'model.input'], {}), '(loss, model.input)\n', (3892, 3911), True, 'from keras import backend as K\n'), ((5158, 5181), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (5174, 5181), True, 'import numpy as np\n'), ((5258, 5279), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (5267, 5279), True, 'import numpy as np\n'), ((5710, 5812), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(100)'], {'input_shape': 'input_shape', 'kernel_size': '(7, 7)', 'activation': 'input_activation'}), '(100, input_shape=input_shape, kernel_size=(7, 7),\n activation=input_activation)\n', (5729, 5812), False, 'import keras\n'), ((5824, 5867), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5849, 5867), False, 'import keras\n'), ((5882, 5908), 'keras.layers.Dropout', 'keras.layers.Dropout', (['(0.15)'], {}), '(0.15)\n', (5902, 5908), False, 'import keras\n'), ((5925, 5999), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(150)'], {'kernel_size': '(4, 4)', 'activation': 'hidden_activation'}), '(150, kernel_size=(4, 4), activation=hidden_activation)\n', (5944, 5999), False, 'import keras\n'), ((6015, 6058), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6040, 6058), False, 'import keras\n'), ((6073, 6099), 'keras.layers.Dropout', 'keras.layers.Dropout', (['(0.15)'], {}), '(0.15)\n', (6093, 6099), False, 'import keras\n'), ((6116, 6190), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(250)'], {'kernel_size': '(4, 4)', 'activation': 'hidden_activation'}), '(250, kernel_size=(4, 4), activation=hidden_activation)\n', (6135, 6190), False, 'import keras\n'), ((6206, 6249), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6231, 6249), False, 'import keras\n'), ((6264, 6290), 'keras.layers.Dropout', 'keras.layers.Dropout', (['(0.15)'], {}), '(0.15)\n', (6284, 6290), False, 'import keras\n'), ((6307, 6329), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (6327, 6329), False, 'import keras\n'), ((6345, 6387), 'keras.layers.Dense', 'keras.layers.Dense', (['(300)'], {'activation': '"""relu"""'}), "(300, activation='relu')\n", (6363, 6387), False, 'import keras\n'), ((6403, 6460), 'keras.layers.Dense', 'keras.layers.Dense', (['classes'], {'activation': 'output_activation'}), '(classes, activation=output_activation)\n', (6421, 6460), False, 'import keras\n'), ((6935, 6971), 'numpy.array', 'np.array', (['[image_orig, image, faked]'], {}), '([image_orig, image, faked])\n', (6943, 6971), True, 'import numpy as np\n'), ((7038, 7059), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (7047, 7059), True, 'import numpy as np\n'), ((8192, 8213), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (8201, 8213), True, 'import numpy as np\n'), ((432, 443), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (441, 443), True, 'from keras import backend as K\n'), ((1101, 1120), 'numpy.max', 'np.max', (['grads_value'], {}), '(grads_value)\n', (1107, 1120), True, 'import numpy as np\n'), ((2334, 2388), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c'], {'num_classes': 'num_classes'}), '(c, num_classes=num_classes)\n', (2360, 2388), False, 'import keras\n'), ((2426, 2456), 'keras.backend.gradients', 'K.gradients', (['loss', 'model.input'], {}), '(loss, model.input)\n', (2437, 2456), True, 'from keras import backend as K\n'), ((3084, 3103), 'numpy.min', 'np.min', (['grads_value'], {}), '(grads_value)\n', (3090, 3103), True, 'import numpy as np\n'), ((3131, 3150), 'numpy.max', 'np.max', (['grads_value'], {}), '(grads_value)\n', (3137, 3150), True, 'import numpy as np\n'), ((3178, 3198), 'numpy.mean', 'np.mean', (['grads_value'], {}), '(grads_value)\n', (3185, 3198), True, 'import numpy as np\n'), ((4269, 4288), 'numpy.max', 'np.max', (['grads_value'], {}), '(grads_value)\n', (4275, 4288), True, 'import numpy as np\n'), ((4301, 4320), 'numpy.max', 'np.max', (['grads_value'], {}), '(grads_value)\n', (4307, 4320), True, 'import numpy as np\n'), ((4603, 4622), 'numpy.min', 'np.min', (['grads_value'], {}), '(grads_value)\n', (4609, 4622), True, 'import numpy as np\n'), ((4650, 4669), 'numpy.max', 'np.max', (['grads_value'], {}), '(grads_value)\n', (4656, 4669), True, 'import numpy as np\n'), ((4697, 4717), 'numpy.mean', 'np.mean', (['grads_value'], {}), '(grads_value)\n', (4704, 4717), True, 'import numpy as np\n'), ((8742, 8763), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (8751, 8763), True, 'import numpy as np\n'), ((8917, 8936), 'numpy.argmax', 'np.argmax', (['commitee'], {}), '(commitee)\n', (8926, 8936), True, 'import numpy as np\n'), ((1492, 1515), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (1508, 1515), True, 'import numpy as np\n'), ((7193, 7209), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (7201, 7209), True, 'import numpy as np\n'), ((416, 427), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (424, 427), True, 'from keras import backend as K\n'), ((3004, 3023), 'numpy.max', 'np.max', (['grads_value'], {}), '(grads_value)\n', (3010, 3023), True, 'import numpy as np\n'), ((4545, 4561), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4559, 4561), True, 'import numpy as np\n'), ((4389, 4405), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4403, 4405), True, 'import numpy as np\n'), ((4442, 4458), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4456, 4458), True, 'import numpy as np\n')] |
from keras.layers import Input,LSTM,Dense
from keras.models import Model,load_model
from keras.utils import plot_model
import pandas as pd
import numpy as np
import os
os.environ["PATH"] += os.pathsep + "E:/AAA/graphviz-2.38/release/bin"
#我的graphviz环境没配好,为了后面的Plot_model
def create_model(n_input, n_output, n_units):
# 训练阶段,所有输入字符集大小、输出字符集大小、cell数量
# encoder
print(n_input)
print(n_output)
print(n_units)
encoder_input = Input(shape=(None, n_input))
# print(encoder_input)
# encoder输入维度n_input为每个时间步的输入xt的维度,这里是用来one-hot的英文字符数
encoder = LSTM(n_units, return_state=True)
# n_units为LSTM单元中每个门的神经元的个数,return_state设为True时才会返回最后时刻的状态h,c
_, encoder_h, encoder_c = encoder(encoder_input)
encoder_state = [encoder_h, encoder_c]
# 保留下来encoder的末状态作为decoder的初始状态
# decoder
decoder_input = Input(shape=(None, n_output))
# decoder的输入维度为中文字符数
decoder = LSTM(n_units, return_sequences=True, return_state=True)
# 训练模型时需要decoder的输出序列来与结果对比优化,故return_sequences也要设为True
decoder_output, _, _ = decoder(decoder_input, initial_state=encoder_state)
# 在训练阶段只需要用到decoder的输出序列,不需要用最终状态h.c
decoder_dense = Dense(n_output, activation='softmax')
decoder_output = decoder_dense(decoder_output)
# 输出序列经过全连接层得到结果
# 生成的训练模型
model = Model([encoder_input, decoder_input], decoder_output)
# 第一个参数为训练模型的输入,包含了encoder和decoder的输入,第二个参数为模型的输出,包含了decoder的输出
# 推理阶段,用于预测过程
# 推断模型—encoder
encoder_infer = Model(encoder_input, encoder_state)
# 推断模型-decoder
decoder_state_input_h = Input(shape=(n_units,))
decoder_state_input_c = Input(shape=(n_units,))
decoder_state_input = [decoder_state_input_h, decoder_state_input_c] # 上个时刻的状态h,c
decoder_infer_output, decoder_infer_state_h, decoder_infer_state_c = decoder(decoder_input,
initial_state=decoder_state_input)
decoder_infer_state = [decoder_infer_state_h, decoder_infer_state_c] # 当前时刻得到的状态
decoder_infer_output = decoder_dense(decoder_infer_output) # 当前时刻的输出
decoder_infer = Model([decoder_input] + decoder_state_input, [decoder_infer_output] + decoder_infer_state)
return model, encoder_infer, decoder_infer
N_UNITS = 300
BATCH_SIZE = 64
EPOCH = 50
NUM_SAMPLES = 1001
data_path = 'data/cmn.txt'
def predict_chinese(source,encoder_inference, decoder_inference, n_steps, features):
#先通过推理encoder获得预测输入序列的隐状态
state = encoder_inference.predict(source)
#第一个字符'\t',为起始标志
predict_seq = np.zeros((1,1,features))
predict_seq[0,0,target_dict['\t']] = 1
output = ''
#开始对encoder获得的隐状态进行推理
#每次循环用上次预测的字符作为输入来预测下一次的字符,直到预测出了终止符
for i in range(n_steps):#n_steps为句子最大长度
#给decoder输入上一个时刻的h,c隐状态,以及上一次的预测字符predict_seq
yhat,h,c = decoder_inference.predict([predict_seq]+state)
#注意,这里的yhat为Dense之后输出的结果,因此与h不同
char_index = np.argmax(yhat[0,-1,:])
char = target_dict_reverse[char_index]
output += char
state = [h,c]#本次状态做为下一次的初始状态继续传递
predict_seq = np.zeros((1,1,features))
predict_seq[0,0,char_index] = 1
if char == '\n':#预测到了终止符则停下来
break
return output
if __name__ == '__main__':
print("helloworld")
# 读取cmn-eng.txt文件
df = pd.read_table(data_path, header=None).iloc[:NUM_SAMPLES, :, ]
df.columns = ['inputs', 'targets']
# 讲每句中文句首加上'\t'作为起始标志,句末加上'\n'作为终止标志
df['targets'] = df['targets'].apply(lambda x: '\t' + x + '\n')
input_texts = df.inputs.values.tolist()
target_texts = df.targets.values.tolist()
# 确定中英文各自包含的字符。df.unique()直接取sum可将unique数组中的各个句子拼接成一个长句子
input_characters = sorted(list(set(df.inputs.unique().sum())))
target_characters = sorted(list(set(df.targets.unique().sum())))
# print(input_characters)
# print(target_characters)
# 1 / 0
# INPUT_LENGTH,输入数据的时刻t的长度,这里为最长的英文句子长度
# OUTPUT_LENGTH,输出数据的时刻t的长度,这里为最长的中文句子长度
# INPUT_FEATURE_LENGTH,每个时刻进入encoder的lstm单元的数据xtxt的维度,这里为英文中出现的字符数
# OUTPUT_FEATURE_LENGTH,每个时刻进入decoder的lstm单元的数据xtxt的维度,这里为中文中出现的字符数
INUPT_LENGTH = max([len(i) for i in input_texts])
OUTPUT_LENGTH = max([len(i) for i in target_texts])
INPUT_FEATURE_LENGTH = len(input_characters)
OUTPUT_FEATURE_LENGTH = len(target_characters)
# encoder输入、decoder输入输出初始化为三维向量
encoder_input = np.zeros((NUM_SAMPLES, INUPT_LENGTH, INPUT_FEATURE_LENGTH))
decoder_input = np.zeros((NUM_SAMPLES, OUTPUT_LENGTH, OUTPUT_FEATURE_LENGTH))
decoder_output = np.zeros((NUM_SAMPLES, OUTPUT_LENGTH, OUTPUT_FEATURE_LENGTH))
print(encoder_input.shape)
print(decoder_input.shape)
print(decoder_output.shape)
# 其中input_dict和target_dict为中英文字符与其索引的对应词典;input_dict_reverse和target_dict_reverse与之相反,索引为键字符为值:
input_dict = {char: index for index, char in enumerate(input_characters)}
input_dict_reverse = {index: char for index, char in enumerate(input_characters)}
target_dict = {char: index for index, char in enumerate(target_characters)}
target_dict_reverse = {index: char for index, char in enumerate(target_characters)}
# 对句子进行字符级one - hot编码,将输入输出数据向量化
# encoder的输入向量one-hot
for seq_index, seq in enumerate(input_texts):
for char_index, char in enumerate(seq):
encoder_input[seq_index, char_index, input_dict[char]] = 1
# decoder的输入输出向量one-hot,训练模型时decoder的输入要比输出晚一个时间步,这样才能对输出监督
for seq_index, seq in enumerate(target_texts):
for char_index, char in enumerate(seq):
decoder_input[seq_index, char_index, target_dict[char]] = 1.0
if char_index > 0:
decoder_output[seq_index, char_index - 1, target_dict[char]] = 1.0
# print(encoder_input)
# print(decoder_output)
# 输出一些结果
print(''.join([input_dict_reverse[np.argmax(i)] for i in encoder_input[0] if max(i) != 0]))
print(''.join([target_dict_reverse[np.argmax(i)] for i in decoder_output[0] if max(i) != 0]))
print(''.join([target_dict_reverse[np.argmax(i)] for i in decoder_input[0] if max(i) != 0]))
# 创建模型
model_train, encoder_infer, decoder_infer = create_model(INPUT_FEATURE_LENGTH, OUTPUT_FEATURE_LENGTH, N_UNITS)
# 查看模型结构
plot_model(to_file='model.png', model=model_train, show_shapes=True)
plot_model(to_file='encoder.png', model=encoder_infer, show_shapes=True)
plot_model(to_file='decoder.png', model=decoder_infer, show_shapes=True)
# In [17]:
model_train.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# In [18]:
print(model_train.summary())
print(encoder_infer.summary())
print(decoder_infer.summary())
model_train.fit([encoder_input, decoder_input], decoder_output, batch_size=BATCH_SIZE, epochs=EPOCH,
validation_split=0.2)
for i in range(900, 1000):
test = encoder_input[i:i + 1, :, :] # i:i+1保持数组是三维
out = predict_chinese(test, encoder_infer, decoder_infer, OUTPUT_LENGTH, OUTPUT_FEATURE_LENGTH)
# print(input_texts[i],'\n---\n',target_texts[i],'\n---\n',out)
print(input_texts[i])
print(out)
| [
"numpy.argmax",
"keras.layers.LSTM",
"numpy.zeros",
"keras.models.Model",
"pandas.read_table",
"keras.utils.plot_model",
"keras.layers.Dense",
"keras.layers.Input"
] | [((448, 476), 'keras.layers.Input', 'Input', ([], {'shape': '(None, n_input)'}), '(shape=(None, n_input))\n', (453, 476), False, 'from keras.layers import Input, LSTM, Dense\n'), ((576, 608), 'keras.layers.LSTM', 'LSTM', (['n_units'], {'return_state': '(True)'}), '(n_units, return_state=True)\n', (580, 608), False, 'from keras.layers import Input, LSTM, Dense\n'), ((842, 871), 'keras.layers.Input', 'Input', ([], {'shape': '(None, n_output)'}), '(shape=(None, n_output))\n', (847, 871), False, 'from keras.layers import Input, LSTM, Dense\n'), ((911, 966), 'keras.layers.LSTM', 'LSTM', (['n_units'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(n_units, return_sequences=True, return_state=True)\n', (915, 966), False, 'from keras.layers import Input, LSTM, Dense\n'), ((1167, 1204), 'keras.layers.Dense', 'Dense', (['n_output'], {'activation': '"""softmax"""'}), "(n_output, activation='softmax')\n", (1172, 1204), False, 'from keras.layers import Input, LSTM, Dense\n'), ((1304, 1357), 'keras.models.Model', 'Model', (['[encoder_input, decoder_input]', 'decoder_output'], {}), '([encoder_input, decoder_input], decoder_output)\n', (1309, 1357), False, 'from keras.models import Model, load_model\n'), ((1484, 1519), 'keras.models.Model', 'Model', (['encoder_input', 'encoder_state'], {}), '(encoder_input, encoder_state)\n', (1489, 1519), False, 'from keras.models import Model, load_model\n'), ((1568, 1591), 'keras.layers.Input', 'Input', ([], {'shape': '(n_units,)'}), '(shape=(n_units,))\n', (1573, 1591), False, 'from keras.layers import Input, LSTM, Dense\n'), ((1620, 1643), 'keras.layers.Input', 'Input', ([], {'shape': '(n_units,)'}), '(shape=(n_units,))\n', (1625, 1643), False, 'from keras.layers import Input, LSTM, Dense\n'), ((2124, 2218), 'keras.models.Model', 'Model', (['([decoder_input] + decoder_state_input)', '([decoder_infer_output] + decoder_infer_state)'], {}), '([decoder_input] + decoder_state_input, [decoder_infer_output] +\n decoder_infer_state)\n', (2129, 2218), False, 'from keras.models import Model, load_model\n'), ((2556, 2582), 'numpy.zeros', 'np.zeros', (['(1, 1, features)'], {}), '((1, 1, features))\n', (2564, 2582), True, 'import numpy as np\n'), ((4383, 4442), 'numpy.zeros', 'np.zeros', (['(NUM_SAMPLES, INUPT_LENGTH, INPUT_FEATURE_LENGTH)'], {}), '((NUM_SAMPLES, INUPT_LENGTH, INPUT_FEATURE_LENGTH))\n', (4391, 4442), True, 'import numpy as np\n'), ((4463, 4524), 'numpy.zeros', 'np.zeros', (['(NUM_SAMPLES, OUTPUT_LENGTH, OUTPUT_FEATURE_LENGTH)'], {}), '((NUM_SAMPLES, OUTPUT_LENGTH, OUTPUT_FEATURE_LENGTH))\n', (4471, 4524), True, 'import numpy as np\n'), ((4546, 4607), 'numpy.zeros', 'np.zeros', (['(NUM_SAMPLES, OUTPUT_LENGTH, OUTPUT_FEATURE_LENGTH)'], {}), '((NUM_SAMPLES, OUTPUT_LENGTH, OUTPUT_FEATURE_LENGTH))\n', (4554, 4607), True, 'import numpy as np\n'), ((6221, 6289), 'keras.utils.plot_model', 'plot_model', ([], {'to_file': '"""model.png"""', 'model': 'model_train', 'show_shapes': '(True)'}), "(to_file='model.png', model=model_train, show_shapes=True)\n", (6231, 6289), False, 'from keras.utils import plot_model\n'), ((6294, 6366), 'keras.utils.plot_model', 'plot_model', ([], {'to_file': '"""encoder.png"""', 'model': 'encoder_infer', 'show_shapes': '(True)'}), "(to_file='encoder.png', model=encoder_infer, show_shapes=True)\n", (6304, 6366), False, 'from keras.utils import plot_model\n'), ((6371, 6443), 'keras.utils.plot_model', 'plot_model', ([], {'to_file': '"""decoder.png"""', 'model': 'decoder_infer', 'show_shapes': '(True)'}), "(to_file='decoder.png', model=decoder_infer, show_shapes=True)\n", (6381, 6443), False, 'from keras.utils import plot_model\n'), ((2933, 2958), 'numpy.argmax', 'np.argmax', (['yhat[0, -1, :]'], {}), '(yhat[0, -1, :])\n', (2942, 2958), True, 'import numpy as np\n'), ((3090, 3116), 'numpy.zeros', 'np.zeros', (['(1, 1, features)'], {}), '((1, 1, features))\n', (3098, 3116), True, 'import numpy as np\n'), ((3312, 3349), 'pandas.read_table', 'pd.read_table', (['data_path'], {'header': 'None'}), '(data_path, header=None)\n', (3325, 3349), True, 'import pandas as pd\n'), ((5824, 5836), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (5833, 5836), True, 'import numpy as np\n'), ((5921, 5933), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (5930, 5933), True, 'import numpy as np\n'), ((6019, 6031), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (6028, 6031), True, 'import numpy as np\n')] |
import face_recognition
import cv2
import numpy as np
video_capture = cv2.VideoCapture(0)
#loading the first criminal image who is me enock
criminal1_image = face_recognition.load_image_file("enock.jpg")
criminal1_face_encoding = face_recognition.face_encodings(criminal1_image)[0]
# Loading the second criminal image who is my team_project member
criminal2_image = face_recognition.load_image_file("faith.jpg")
criminal2_face_encoding = face_recognition.face_encodings(criminal2_image)[0]
# creating arrays of the two criminals
criminals_face_encodings = [
criminal1_face_encoding,
criminal2_face_encoding
]
criminals_names = [
"ENOCK (wanted hacker)",
"FEI"
]
# Initializing some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grabing a single frame of video
ret, frame = video_capture.read()
# Resizing frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Converting the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Finding all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(criminals_face_encodings, face_encoding)
name = "citizen"
#using the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(criminals_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = criminals_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Displaying the video results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scaling back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Drawing a box around the faces
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Drawing a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Displaying the resulting image
cv2.imshow('KENYA POLICE CCTV CRIMINAL LOCATOR', frame)
# if you Hit 't' on the keyboard it will quite
if cv2.waitKey(1) & 0xFF == ord('t'):
break
# Releasing handle to the webcam
video_capture.release()
cv2.destroyAllWindows() | [
"cv2.putText",
"face_recognition.face_distance",
"face_recognition.compare_faces",
"cv2.waitKey",
"face_recognition.face_encodings",
"face_recognition.load_image_file",
"cv2.imshow",
"numpy.argmin",
"cv2.VideoCapture",
"cv2.rectangle",
"face_recognition.face_locations",
"cv2.destroyAllWindows"... | [((71, 90), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (87, 90), False, 'import cv2\n'), ((160, 205), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['"""enock.jpg"""'], {}), "('enock.jpg')\n", (192, 205), False, 'import face_recognition\n'), ((369, 414), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['"""faith.jpg"""'], {}), "('faith.jpg')\n", (401, 414), False, 'import face_recognition\n'), ((3126, 3149), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3147, 3149), False, 'import cv2\n'), ((232, 280), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['criminal1_image'], {}), '(criminal1_image)\n', (263, 280), False, 'import face_recognition\n'), ((441, 489), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['criminal2_image'], {}), '(criminal2_image)\n', (472, 489), False, 'import face_recognition\n'), ((984, 1027), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(frame, (0, 0), fx=0.25, fy=0.25)\n', (994, 1027), False, 'import cv2\n'), ((2904, 2959), 'cv2.imshow', 'cv2.imshow', (['"""KENYA POLICE CCTV CRIMINAL LOCATOR"""', 'frame'], {}), "('KENYA POLICE CCTV CRIMINAL LOCATOR', frame)\n", (2914, 2959), False, 'import cv2\n'), ((1373, 1421), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb_small_frame'], {}), '(rgb_small_frame)\n', (1404, 1421), False, 'import face_recognition\n'), ((1447, 1511), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb_small_frame', 'face_locations'], {}), '(rgb_small_frame, face_locations)\n', (1478, 1511), False, 'import face_recognition\n'), ((2522, 2588), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n', (2535, 2588), False, 'import cv2\n'), ((2651, 2739), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, bottom - 35)', '(right, bottom)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2\n .FILLED)\n', (2664, 2739), False, 'import cv2\n'), ((2782, 2861), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left + 6, bottom - 6)', 'font', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n', (2793, 2861), False, 'import cv2\n'), ((1667, 1738), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['criminals_face_encodings', 'face_encoding'], {}), '(criminals_face_encodings, face_encoding)\n', (1697, 1738), False, 'import face_recognition\n'), ((1875, 1946), 'face_recognition.face_distance', 'face_recognition.face_distance', (['criminals_face_encodings', 'face_encoding'], {}), '(criminals_face_encodings, face_encoding)\n', (1905, 1946), False, 'import face_recognition\n'), ((1978, 2003), 'numpy.argmin', 'np.argmin', (['face_distances'], {}), '(face_distances)\n', (1987, 2003), True, 'import numpy as np\n'), ((3019, 3033), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3030, 3033), False, 'import cv2\n')] |
"""Mujoco CartPole environment from https://github.com/kchua/handful-of-trials."""
import os
import numpy as np
import torch
from gym import utils
from rllib.reward.state_action_reward import StateActionReward
class CartPoleReward(StateActionReward):
r"""A cart-pole reward model implementation.
The reward function is computed as:
r(s, a) = e^(-(end-effector / length)^2) + action_reward.
The action reward is computed from the state-action reward.
"""
dim_action = (1,)
def __init__(self, ctrl_cost_weight, pendulum_length):
super().__init__(ctrl_cost_weight=ctrl_cost_weight)
self.pendulum_length = pendulum_length
def copy(self):
"""Get copy of reward model."""
return CartPoleReward(
ctrl_cost_weight=self.ctrl_cost_weight, pendulum_length=self.pendulum_length
)
def state_reward(self, state, next_state=None):
"""Get reward that corresponds to the states."""
end_effector = self._get_ee_pos(state[..., 0], state[..., 1])
reward_state = torch.exp(
-torch.square(end_effector).sum(-1) / (self.pendulum_length ** 2)
)
return reward_state
def _get_ee_pos(self, x0, theta):
sin, cos = torch.sin(theta), torch.cos(theta)
return torch.stack(
[x0 - self.pendulum_length * sin, -self.pendulum_length * (1 + cos)], -1
)
try:
from gym.envs.mujoco import mujoco_env
class MBCartPoleEnv(mujoco_env.MujocoEnv, utils.EzPickle):
"""CartPole environment for MBRL control.
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Deep reinforcement learning in a handful of trials using probabilistic dynamics
models. NeuRIPS.
https://github.com/kchua/handful-of-trials
"""
def __init__(self, ctrl_cost_weight=0.01, pendulum_length=0.6):
self._reward_model = CartPoleReward(
pendulum_length=pendulum_length, ctrl_cost_weight=ctrl_cost_weight
)
utils.EzPickle.__init__(self)
dir_path = os.path.dirname(os.path.realpath(__file__))
mujoco_env.MujocoEnv.__init__(self, f"{dir_path}/assets/cartpole.xml", 2)
def step(self, action):
"""See `AbstractEnvironment.step()'."""
ob = self._get_obs()
reward = self._reward_model(ob, action)[0].item()
self.do_simulation(action, self.frame_skip)
next_obs = self._get_obs()
done = False
return next_obs, reward, done, self._reward_model.info
def reward_model(self):
"""Get reward model."""
return self._reward_model.copy()
def reset_model(self):
"""Reset the model."""
qpos = self.init_qpos + np.random.normal(0, 0.1, np.shape(self.init_qpos))
qvel = self.init_qvel + np.random.normal(0, 0.1, np.shape(self.init_qvel))
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def viewer_setup(self):
"""Set-up the viewer."""
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent
except Exception: # Mujoco not installed.
pass
| [
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"torch.stack",
"numpy.concatenate",
"os.path.realpath",
"torch.square",
"numpy.shape",
"torch.cos",
"torch.sin",
"gym.utils.EzPickle.__init__"
] | [((1299, 1388), 'torch.stack', 'torch.stack', (['[x0 - self.pendulum_length * sin, -self.pendulum_length * (1 + cos)]', '(-1)'], {}), '([x0 - self.pendulum_length * sin, -self.pendulum_length * (1 +\n cos)], -1)\n', (1310, 1388), False, 'import torch\n'), ((1249, 1265), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (1258, 1265), False, 'import torch\n'), ((1267, 1283), 'torch.cos', 'torch.cos', (['theta'], {}), '(theta)\n', (1276, 1283), False, 'import torch\n'), ((2071, 2100), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (2094, 2100), False, 'from gym import utils\n'), ((2180, 2253), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', 'f"""{dir_path}/assets/cartpole.xml"""', '(2)'], {}), "(self, f'{dir_path}/assets/cartpole.xml', 2)\n", (2209, 2253), False, 'from gym.envs.mujoco import mujoco_env\n'), ((2140, 2166), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2156, 2166), False, 'import os\n'), ((2864, 2888), 'numpy.shape', 'np.shape', (['self.init_qpos'], {}), '(self.init_qpos)\n', (2872, 2888), True, 'import numpy as np\n'), ((2951, 2975), 'numpy.shape', 'np.shape', (['self.init_qvel'], {}), '(self.init_qvel)\n', (2959, 2975), True, 'import numpy as np\n'), ((3099, 3155), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos, self.sim.data.qvel]'], {}), '([self.sim.data.qpos, self.sim.data.qvel])\n', (3113, 3155), True, 'import numpy as np\n'), ((1088, 1114), 'torch.square', 'torch.square', (['end_effector'], {}), '(end_effector)\n', (1100, 1114), False, 'import torch\n')] |
from math import sqrt
from networkx import DiGraph, draw_networkx_edges, draw_networkx_nodes
import numpy as np
from pandas import read_csv
import sys
import matplotlib.pyplot
sys.path.append("../../")
sys.path.append("../../../cspy")
from vrpy.main import VehicleRoutingProblem
import logging
logger = logging.getLogger(__name__)
class CordeauNode:
"""Stores coordinates of a node of Cordeau's instances."""
def __init__(self, values):
# Node ID
self.name = np.uint32(values[0]).item()
# x coordinate
self.x = np.float64(values[1]).item()
# y coordinate
self.y = np.float64(values[2]).item()
# demand
self.demand = np.uint32(values[4]).item()
class DataSet:
"""Reads a Cordeau instance and stores the network as DiGraph.
Args:
path (str) : Path to data folder.
instance_name (str) : Name of instance to read.
n_vertices (int, optional):
Only first n_vertices are read.
Defaults to None.
"""
def __init__(self, path, instance_name, n_vertices=None):
# Read vehicle capacity
with open(path + instance_name) as fp:
for i, line in enumerate(fp):
if i == 0:
self.n_customers = int(line.split()[2])
if n_vertices is not None:
self.n_vertices = min(self.n_customers, n_vertices)
else:
self.n_vertices = self.n_customers
if i == 2:
self.max_load = int(line.split()[1])
fp.close()
# Create network and store name + capacity
self.G = DiGraph(name=instance_name, vehicle_capacity=self.max_load,)
# Read nodes from file
df_cordeau = read_csv(path + instance_name, sep="\t", skiprows=4)
# Scan each line of the file and add nodes to the network
for line in df_cordeau.itertuples():
values = line[1].split()
node = CordeauNode(values)
if node.name <= self.n_vertices:
self.G.add_node(
node.name, x=node.x, y=node.y, demand=node.demand, customer=True
)
if node.name > self.n_customers:
self.G.add_node(
node.name, x=node.x, y=node.y, demand=node.demand, depot_from=True
)
self.G.add_node(
str(node.name) + "_",
x=node.x,
y=node.y,
demand=node.demand,
depot_to=True,
)
# Add Source and Sink
self.G.add_node("Source", x=0, y=0, demand=0)
self.G.add_node("Sink", x=0, y=0, demand=0)
# Add the edges, the graph is complete
for u in self.G.nodes():
if "customer" in self.G.nodes[u]:
for v in self.G.nodes():
if "customer" in self.G.nodes[v] and u != v:
self.G.add_edge(u, v, cost=round(self.distance(u, v), 1))
if "depot_to" in self.G.nodes[u]:
self.G.add_edge(u, "Sink", cost=0)
for v in self.G.nodes():
if "customer" in self.G.nodes[v]:
self.G.add_edge(v, u, cost=round(self.distance(v, u), 1))
if "depot_from" in self.G.nodes[u]:
self.G.add_edge("Source", u, cost=0)
for v in self.G.nodes():
if "customer" in self.G.nodes[v]:
self.G.add_edge(u, v, cost=round(self.distance(u, v), 1))
def distance(self, u, v):
"""2D Euclidian distance between two nodes.
Args:
u (Node) : tail node.
v (Node) : head node.
Returns:
float : Euclidian distance between u and v
"""
delta_x = self.G.nodes[u]["x"] - self.G.nodes[v]["x"]
delta_y = self.G.nodes[u]["y"] - self.G.nodes[v]["y"]
return sqrt(delta_x ** 2 + delta_y ** 2)
def solve(self, initial_routes=None, cspy=False):
"""Instantiates instance as VRP and solves."""
if cspy:
self.G.graph["subproblem"] = "cspy"
else:
self.G.graph["subproblem"] = "lp"
print(self.G.graph["name"], self.G.graph["subproblem"])
print("===========")
prob = VehicleRoutingProblem(self.G, load_capacity=self.max_load,)
prob.solve(initial_routes=initial_routes, cspy=cspy)
self.best_value, self.best_routes = prob.best_value, prob.best_routes
def plot_solution(self):
"""Plots the solution after optimization."""
# Store coordinates
pos = {}
for v in self.G.nodes():
pos[v] = np.array([self.G.nodes[v]["x"], self.G.nodes[v]["y"]])
# Draw customers
draw_networkx_nodes(
self.G, pos, node_size=10,
)
# Hide Source and Sink
draw_networkx_nodes(
self.G, pos, nodelist=["Source", "Sink"], node_size=0,
)
# Draw depots
draw_networkx_nodes(
self.G,
pos,
nodelist=[v for v in self.G.nodes() if "customer" not in self.G.nodes[v]],
node_size=30,
node_color="r",
)
# Draw best routes
options = {
"node_color": "blue",
"node_size": 10,
"line_color": "grey",
"linewidths": 0,
"width": 0.1,
}
for r in self.best_routes:
r.remove_edge("Source", list(r.successors("Source"))[0])
r.remove_edge(list(r.predecessors("Sink"))[0], "Sink")
draw_networkx_edges(r, pos, **options)
# matplotlib.pyplot.show() # Display best routes
# Save best routes as image
matplotlib.pyplot.savefig("%s.pdf" % self.G.graph["name"])
if __name__ == "__main__":
data = DataSet(path="./data/", instance_name="p01", n_vertices=8)
ini = []
# initial solution
# ugly, needs more genericity
for v in data.G.nodes():
if "customer" in data.G.nodes[v]:
ini.append(["Source", 51, v, str(51) + "_", "Sink"])
data.solve(initial_routes=ini, cspy=False)
data.plot_solution()
| [
"sys.path.append",
"numpy.uint32",
"networkx.draw_networkx_edges",
"math.sqrt",
"vrpy.main.VehicleRoutingProblem",
"pandas.read_csv",
"networkx.draw_networkx_nodes",
"numpy.array",
"numpy.float64",
"networkx.DiGraph",
"logging.getLogger"
] | [((177, 202), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (192, 202), False, 'import sys\n'), ((203, 235), 'sys.path.append', 'sys.path.append', (['"""../../../cspy"""'], {}), "('../../../cspy')\n", (218, 235), False, 'import sys\n'), ((306, 333), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (323, 333), False, 'import logging\n'), ((1685, 1744), 'networkx.DiGraph', 'DiGraph', ([], {'name': 'instance_name', 'vehicle_capacity': 'self.max_load'}), '(name=instance_name, vehicle_capacity=self.max_load)\n', (1692, 1744), False, 'from networkx import DiGraph, draw_networkx_edges, draw_networkx_nodes\n'), ((1799, 1851), 'pandas.read_csv', 'read_csv', (['(path + instance_name)'], {'sep': '"""\t"""', 'skiprows': '(4)'}), "(path + instance_name, sep='\\t', skiprows=4)\n", (1807, 1851), False, 'from pandas import read_csv\n'), ((4025, 4058), 'math.sqrt', 'sqrt', (['(delta_x ** 2 + delta_y ** 2)'], {}), '(delta_x ** 2 + delta_y ** 2)\n', (4029, 4058), False, 'from math import sqrt\n'), ((4402, 4460), 'vrpy.main.VehicleRoutingProblem', 'VehicleRoutingProblem', (['self.G'], {'load_capacity': 'self.max_load'}), '(self.G, load_capacity=self.max_load)\n', (4423, 4460), False, 'from vrpy.main import VehicleRoutingProblem\n'), ((4872, 4918), 'networkx.draw_networkx_nodes', 'draw_networkx_nodes', (['self.G', 'pos'], {'node_size': '(10)'}), '(self.G, pos, node_size=10)\n', (4891, 4918), False, 'from networkx import DiGraph, draw_networkx_edges, draw_networkx_nodes\n'), ((4981, 5055), 'networkx.draw_networkx_nodes', 'draw_networkx_nodes', (['self.G', 'pos'], {'nodelist': "['Source', 'Sink']", 'node_size': '(0)'}), "(self.G, pos, nodelist=['Source', 'Sink'], node_size=0)\n", (5000, 5055), False, 'from networkx import DiGraph, draw_networkx_edges, draw_networkx_nodes\n'), ((4783, 4837), 'numpy.array', 'np.array', (["[self.G.nodes[v]['x'], self.G.nodes[v]['y']]"], {}), "([self.G.nodes[v]['x'], self.G.nodes[v]['y']])\n", (4791, 4837), True, 'import numpy as np\n'), ((5710, 5748), 'networkx.draw_networkx_edges', 'draw_networkx_edges', (['r', 'pos'], {}), '(r, pos, **options)\n', (5729, 5748), False, 'from networkx import DiGraph, draw_networkx_edges, draw_networkx_nodes\n'), ((489, 509), 'numpy.uint32', 'np.uint32', (['values[0]'], {}), '(values[0])\n', (498, 509), True, 'import numpy as np\n'), ((557, 578), 'numpy.float64', 'np.float64', (['values[1]'], {}), '(values[1])\n', (567, 578), True, 'import numpy as np\n'), ((626, 647), 'numpy.float64', 'np.float64', (['values[2]'], {}), '(values[2])\n', (636, 647), True, 'import numpy as np\n'), ((694, 714), 'numpy.uint32', 'np.uint32', (['values[4]'], {}), '(values[4])\n', (703, 714), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from pget import Agent
tf.enable_eager_execution()
model = Sequential([
Dense(32, input_shape=[8], activation="relu"),
Dense(32, activation="relu"),
Dense(4, activation="softmax"),
])
agent = Agent(model, action_type="discrete")
agent.model.summary()
s = np.random.random(size=[8])
a = agent.get_action(s)
agent.train(1)
| [
"tensorflow.keras.layers.Dense",
"numpy.random.random",
"pget.Agent",
"tensorflow.enable_eager_execution"
] | [((158, 185), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (183, 185), True, 'import tensorflow as tf\n'), ((335, 371), 'pget.Agent', 'Agent', (['model'], {'action_type': '"""discrete"""'}), "(model, action_type='discrete')\n", (340, 371), False, 'from pget import Agent\n'), ((399, 425), 'numpy.random.random', 'np.random.random', ([], {'size': '[8]'}), '(size=[8])\n', (415, 425), True, 'import numpy as np\n'), ((210, 255), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '[8]', 'activation': '"""relu"""'}), "(32, input_shape=[8], activation='relu')\n", (215, 255), False, 'from tensorflow.keras.layers import Dense\n'), ((259, 287), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (264, 287), False, 'from tensorflow.keras.layers import Dense\n'), ((291, 321), 'tensorflow.keras.layers.Dense', 'Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (296, 321), False, 'from tensorflow.keras.layers import Dense\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: 42-wav-embedding-preprocess.ipynb (unless otherwise specified).
__all__ = ['to_millseconds', 'short_warning', 'preprocess_audio_segments_csv', 'add_audio_embeddings_info',
'check_label', 'write_nd_parquet', 'get_fixed_length_segments', 'short_embedding_csv_load']
# Cell
# modeling packages
from transformers import Wav2Vec2Processor, Wav2Vec2Model
import soundfile as sf
import torch
import librosa
import warnings
import difflib
# data science packages
import pandas as pd
import numpy as np
#saving data format files
import pyarrow as pa
import pyarrow.parquet as pq
# other python packages
import os.path
import glob
import re
# Cell
def to_millseconds(time):
'''
Function to_millseconds: converts time with timestamp string of format '\d\d:\d\d.\d*' to milliseconds
Inputs: time: String in required format
Outputs: integer of converted time in milliseconds
'''
if isinstance(time, str)==False:
raise TypeError('The input datatype of {0} must be a string to use to_milliseconds.'.format(time))
#Timestamp pattern to use later
ts_target = re.compile('\d{2}:\d{2}\.\d{1,}')
if ts_target.match(time) is None:
raise RuntimeError("The input of {0} does not match the format of \d\d:\d\d.\d*. Fix this before continuing.")
#get split pieces
sp = re.split(":|\.", time)
#get milliseconds
ms = int(sp[0])*60*1000
ms = ms + float(sp[1] + '.' + sp[2])*1000
ms = int(ms)
return ms
# Cell
def short_warning(message):
'''
Function short_warning: shortened version of warnings.warn_explicit to remove unnecessary echo
Input: message to be printed as warning message
Output: warning
'''
warnings.warn_explicit(message, UserWarning, '', 0)
# Internal Cell
def _fix_added_timestamp(row_info):
'''
Function _fix_added_timestamp: validates timestamps and tries to fix them; returns a df with column
`fatal_error` included. This is a pandas helper function and should not be applied directly without .apply.
Input: row_info: pandas Series corresponding to a single row
Returns: row_info with corrected timestamps or same timestamp with a new column 'fatal_error' with 1 if the
timestamp could not be successfully converted.
'''
#Timestamp pattern to use later
ts_target = re.compile('\d{2}:\d{2}\.\d{3}')
#Keep count of fatal errors
fatal_errors = 0
for ts_type in ['start_timestamp', 'end_timestamp']:
#Make sure it's a string
if isinstance(row_info[ts_type], str)==False:
short_warning('{0}: Row {1} has a {2} that is not a string with value {3}. Cannot automatically fix.'
.format(row_info['id'], row_info.name, ts_type, row_info[ts_type]))
fatal_errors = fatal_errors + 1
continue
#See if it has too many segments
ts_pieces = re.split(":|\.", row_info[ts_type])
if len(ts_pieces) != 3:
if len(ts_pieces) == 4:
short_warning('{0}: Row {1} {2} with value {3} has 4 time parts instead of 3. Automatically fixing...'
.format(row_info['id'], row_info.name, ts_type, row_info[ts_type]))
ts_pieces = ts_pieces[1:4]
row_info[ts_type] = ts_pieces[0] + ':' + ts_pieces[1] + '.' + ts_pieces[2]
else:
short_warning('{0}: Row {1} with value {2} has {3} pieces in {4} and cannot be fixed automatically. Please amend.'
.format(row_info['id'], row_info.name, row_info[ts_type], len(ts_pieces), ts_type))
fatal_errors = fatal_errors + 1
continue
#If it's perfect, let's just be done
if ts_target.match(row_info[ts_type]) is not None:
continue
#Otherwise, let's get it into the right format
ts_pieces[0] = ts_pieces[0].rjust(2,'0')
ts_pieces[1] = ts_pieces[1].rjust(2,'0')
ts_pieces[2] = ts_pieces[2].ljust(3,'0')
#Update values
short_warning('{0}: Row {1} {2} has the incorrect format of {3}. Automatically fixing...'
.format(row_info['id'], row_info.name, ts_type, row_info[ts_type]))
row_info[ts_type] = ts_pieces[0] + ':' + ts_pieces[1] + '.' + ts_pieces[2]
#Save fatal errors
row_info['fatal_errors'] = fatal_errors
return row_info
# Cell
def preprocess_audio_segments_csv(csv_df, duration_max=15000):
'''
Function preproces_audio_segments_csv: pre-processes manually-entered timestamps to ensure correct format
Inputs: csv_df: original dataframe with at least columns start_timestamp, end_timestamp, and id
duration_max (default 15000): maximum length allowed for an utterance
Returns: pandas dataframe with corrected or dropped timestamps, corresponding timestamps in ms, and duration
'''
#Drop unwanted "Unnamed" columns
drop_cols = [drop_col for drop_col in csv_df.columns if drop_col.startswith('Unnamed')]
csv_df.drop(columns=drop_cols, inplace=True)
#Strip any leading or trailing whitespace
csv_df['start_timestamp'] = csv_df['start_timestamp'].str.strip()
csv_df['end_timestamp'] = csv_df['end_timestamp'].str.strip()
#See if we need to drop NAs and notify of drops
na_sz = len(csv_df.dropna(subset=['start_timestamp', 'end_timestamp']))
if na_sz != len(csv_df):
orig_sz = len(csv_df)
csv_df.dropna(subset=['start_timestamp', 'end_timestamp'], inplace=True)
short_warning("You had {0} NA rows in start_timestamp or end timestamp which were dropped."
.format(na_sz))
#See if we have wrong formats on timestamps and process or notify
csv_df = csv_df.apply(_fix_added_timestamp, axis='columns')
#Determine if the df can continue forward based on timestamps
no_fatal_errors = csv_df['fatal_errors'].sum()
if no_fatal_errors != 0:
#display errors and get all rows except those with fatal errors
error_rows = csv_df.query('fatal_errors!=0')
short_warning('File {0} has {1} timestamp errors that cannot be automatically corrected. Dropping these rows.\nDropped row summary due to timestamp (truncated table):\n{2}'
.format(csv_df['id'][0], no_fatal_errors, error_rows[['id', 'start_timestamp', 'end_timestamp']]))
csv_df = csv_df.drop(index=error_rows.index)
#Convert times to milliseconds and calculate duration
csv_df["start_ms"] = csv_df["start_timestamp"].apply(to_millseconds)
csv_df["end_ms"] = csv_df["end_timestamp"].apply(to_millseconds)
csv_df["duration_ms"] = csv_df['end_ms'] - csv_df["start_ms"]
#Validate ms
csv_df['fatal_errors'] = csv_df['duration_ms'].apply(lambda x: 0 if x > 0 else 1)
csv_df['fatal_errors'] = csv_df.apply(lambda x: x['fatal_errors'] if x['duration_ms'] <= duration_max else 1,
axis=1)
no_fatal_errors = csv_df['fatal_errors'].sum()
if no_fatal_errors != 0:
#display errors and get all rows except those with fatal errors
error_rows = csv_df.query('fatal_errors!=0')
short_warning('File {0} has {1} time duration issues. Dropping these rows.\nDropped row summary due to duration (truncated table):\n{2}'
.format(csv_df['id'][0], no_fatal_errors,
error_rows[['id', 'start_ms', 'end_ms', 'duration_ms']]))
csv_df = csv_df.drop(index=error_rows.index)
#Once we've removed fatal errors (or have no fatal errors, drop the column and return)
csv_df.drop(columns=['fatal_errors'], inplace=True)
#Get the indices together correctly
csv_df.reset_index(drop=True, inplace=True)
return csv_df
# Internal Cell
def _get_audio_embeddings(row_info, wav_file, aud_processor, aud_mdl, samp_rate):
'''
Function _get_audio_embeddings: generates embeddings for a wave file using a model. Function not to be used
directly without pandas .apply function.
Inputs: row_info: pandas Series of row info with minimally start_index and end_index
wav_file: list or numpy array of wave file
aud_processor: huggingface audio processor for inputs
aud_mdl: huggingface audio model to generate embeddings
samp_rate: sampling rate of audio
Outputs: pandas Series of row info with added columns 'last_hidden_state',
'shape_state', and 'last_hidden_state_mean'
'''
#Get the processed input values using the processor
input_values = aud_processor(wav_file[row_info['start_index'] : row_info['end_index']],
return_tensors="pt", sampling_rate = samp_rate).input_values
#Get the embeddings values
last_hidden_state = aud_mdl(input_values).last_hidden_state[0,:,:]
row_info['last_hidden_state'] = last_hidden_state.tolist()
row_info['shape_state'] = list(last_hidden_state.shape)
row_info['last_hidden_state_mean'] = torch.mean(last_hidden_state, dim=0).tolist()
#Return
return row_info
# Cell
def add_audio_embeddings_info(pd_audio,
audio_no,
audio_processor,
audio_mdl,
sampling_rate = 16000,
base_prefix = "/data/p_dsi/wise/data/resampled_audio_16khz/"):
'''
Input argument:
pd_audio: cleaned dataframe with cleaned start and end timestamps (correctly formatted into xx:xx.xxx)
audio_no: String of audio_number (e.g., '083-1')
audio_processor: HF audio processor (e.g., instantiated Wav2Vec2Processor)
audio_mdl: HF audio base model (e.g., instantiated Wav2Vec2Model)
sampling_rate (default 16000): integer of sampling rate of audio
base_prefix (default '/data/p_dsi/wise/data/resampled_audio_16khz'): String of filepath to audio files
Output:
a pandas dataframe containing original csv file and addition columns including last hidden states matrix and vector
'''
#Print some info
print('Working on file:', audio_no)
#Read in timestamp csv file and corresponding audio file
audio_wave, sr = sf.read(base_prefix + audio_no + '.wav')
#Calculate indices in audio file
cal_index = lambda x: int(x) * (sampling_rate // 1000)
pd_audio["start_index"] = pd_audio["start_ms"].apply(cal_index)
pd_audio["end_index"] = pd_audio["end_ms"].apply(cal_index)
#Add embeddings information
pd_audio = pd_audio.apply(lambda x: _get_audio_embeddings(x, audio_wave,
audio_processor, audio_mdl, sampling_rate),
axis='columns')
#Reset index to make sure continuous numbering
pd_audio.reset_index(drop=True, inplace=True)
#Return
return pd_audio
# Internal Cell
def _check_label(row, label_list):
'''
Function _check_label: Internal helper function with .apply in pandas to check label. Not to be used directly.
Inputs: row: pandas Series of dataframe row with minimially 'label' column
label_list: list of accepted labels in df
Returns: warning or fixed label in row
'''
if row['label'] not in label_list:
#Get match ratio
matches = [difflib.SequenceMatcher(a=row['label'].lower(), b=test_label.lower()).ratio()
for test_label in label_list]
#Get index of best match and set it
maxindex = np.argmax(matches)
best_label = label_list[maxindex]
short_warning('File {0}: Row {1} has label {2}; replaced with {3}'
.format(row['id'], row.name, row['label'], best_label))
#Fix
row['label'] = best_label
return row
# Cell
def check_label(df, label_list=None):
"""
Check if there is any wrong labels in df
Inputs: df: pandas data frame
label_list (default None): list of accepted label names in label column or None to use defaults
Output: throw warnings when encountering wrong labels, returns corrected labels
"""
if label_list is None:
label_list = ["OTR", "NEU", "REP", "PRS"]
#Make sure label is right
df = df.apply(lambda x: _check_label(x, label_list), axis='columns')
return df
# Cell
def write_nd_parquet(df, filepath):
'''
Function write_nd_parquet: writes a parquet file with complex columns. May be unnecessary.
Inputs: df: dataframe to be written
filepath: full filepath for output
Output: None, prints the filepath that the dataframe was written to.
'''
#Convert to table
pq_table = pa.Table.from_pandas(df)
#Save file
pq.write_table(pq_table, filepath)
print('Wrote dataframe to:', filepath)
return
# Comes from 45-restructure-audio-fixed-length.ipynb, cell
# data science packages
import pandas as pd
import numpy as np
# other python packages
import os.path
import glob
import re
# Comes from 45-restructure-audio-fixed-length.ipynb, cell
def _group_statements(group_info):
'''
Function _group_statements: pandas apply helper function to group sets of statements into one fixed length row
Input: group_info: pandas group with minimally elements id, speech, label, label_id, start_ms, end_ms,
start_timestamp, end_timestamp, duration_ms
Output: new dataframe with single row for sets of statements
'''
#Get overall info
row_id = group_info['id'].iloc[0]
speech_list = group_info['speech'].tolist()
speech = ' '.join(speech_list)
label = group_info['label'].tolist()
label_id = group_info['label_id'].tolist()
#Get start info
start_ms = group_info['start_ms'].iloc[0]
start_timestamp = group_info['start_timestamp'].iloc[0]
start_index = group_info['start_index'].iloc[0]
#Get end info
end_ms = group_info['end_ms'].iloc[-1]
end_timestamp = group_info['end_timestamp'].iloc[-1]
end_index = group_info['end_index'].iloc[-1]
#Get duration info
duration_ms = group_info['duration_ms'].sum()
#Make dataframe
df = pd.DataFrame({'id':row_id, 'speech_list':[speech_list], 'speech':speech,
'label':[label], 'label_id':[label_id],
'start_timestamp':start_timestamp, 'end_timestamp':end_timestamp,
'start_ms':start_ms, 'end_ms':end_ms, 'duration_ms':duration_ms,
'start_index': start_index, 'end_index':end_index},
index=['1'])
return df
# Comes from 45-restructure-audio-fixed-length.ipynb, cell
def _add_label_counts(row_info):
'''
Function _add_label_counts: helper function for pandas apply; adds label counts as individual columns. Not to
be used directly.
Inputs: row_info: pandas Series of row info with minimally 'label'
Output: returns pandas Series of row info with new label counts added for that row.
'''
#Get counts of labels
vc = pd.Series(row_info['label']).value_counts()
#Add it back info the index
row_info[vc.index]=vc
return row_info
# Comes from 45-restructure-audio-fixed-length.ipynb, cell
def get_fixed_length_segments(csv_df, length_in_ms=2000, label_list=None):
'''
Function get_fixed_length_segments: Function to regroup dataframe into fixed length segments
Inputs: csv_df: dataframe with minimally speech, label, all timestamps, all milliseconds, duration, and indices.
length_in_ms (default 2000): integer of time of fixed length in milliseconds
label_list (default None): list of accepted labels in dataframe; default label list used if None
Outputs: regrouped dataframe with one row per fixed length statements lengths with counts of each label
'''
#Make label list and generate encodings in df
if label_list is None:
label_list = ["OTR", "NEU", "REP", "PRS"]
#Create label encoding
label2id = {lab:ind for ind, lab in enumerate(label_list)}
#Do the encoding
csv_df['label_id'] = csv_df['label'].replace(label2id)
#Add groups
csv_df['ts_group'] = csv_df['end_ms']//length_in_ms
#Get groups and get in a reasonable format
csv_df = csv_df.groupby('ts_group').apply(_group_statements).reset_index(drop=True)
#Add an area for the label counts to be filled in
csv_df[[label_list]]=0
#Add label counts
csv_df = csv_df.apply(_add_label_counts, axis=1)
#All done!
return csv_df
# Comes from 45-restructure-audio-fixed-length.ipynb, cell
def short_embedding_csv_load(fname):
'''
Function short_embedding_csv_load: Function to load a subset of data from input parquet file
Input: String of full filepath
Output: dataframe with only columns of interest
'''
df = pd.read_parquet(fname,
columns=['id', 'speech', 'label', 'start_timestamp', 'end_timestamp',
'start_ms', 'end_ms', 'duration_ms',
'start_index', 'end_index'])
return df | [
"pandas.DataFrame",
"torch.mean",
"soundfile.read",
"re.split",
"numpy.argmax",
"pyarrow.Table.from_pandas",
"warnings.warn_explicit",
"pandas.read_parquet",
"pandas.Series",
"pyarrow.parquet.write_table",
"re.compile"
] | [((1145, 1182), 're.compile', 're.compile', (['"""\\\\d{2}:\\\\d{2}\\\\.\\\\d{1,}"""'], {}), "('\\\\d{2}:\\\\d{2}\\\\.\\\\d{1,}')\n", (1155, 1182), False, 'import re\n'), ((1368, 1391), 're.split', 're.split', (['""":|\\\\."""', 'time'], {}), "(':|\\\\.', time)\n", (1376, 1391), False, 'import re\n'), ((1755, 1806), 'warnings.warn_explicit', 'warnings.warn_explicit', (['message', 'UserWarning', '""""""', '(0)'], {}), "(message, UserWarning, '', 0)\n", (1777, 1806), False, 'import warnings\n'), ((2373, 2409), 're.compile', 're.compile', (['"""\\\\d{2}:\\\\d{2}\\\\.\\\\d{3}"""'], {}), "('\\\\d{2}:\\\\d{2}\\\\.\\\\d{3}')\n", (2383, 2409), False, 'import re\n'), ((10276, 10316), 'soundfile.read', 'sf.read', (["(base_prefix + audio_no + '.wav')"], {}), "(base_prefix + audio_no + '.wav')\n", (10283, 10316), True, 'import soundfile as sf\n'), ((12738, 12762), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['df'], {}), '(df)\n', (12758, 12762), True, 'import pyarrow as pa\n'), ((12783, 12817), 'pyarrow.parquet.write_table', 'pq.write_table', (['pq_table', 'filepath'], {}), '(pq_table, filepath)\n', (12797, 12817), True, 'import pyarrow.parquet as pq\n'), ((14206, 14541), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': row_id, 'speech_list': [speech_list], 'speech': speech, 'label': [\n label], 'label_id': [label_id], 'start_timestamp': start_timestamp,\n 'end_timestamp': end_timestamp, 'start_ms': start_ms, 'end_ms': end_ms,\n 'duration_ms': duration_ms, 'start_index': start_index, 'end_index':\n end_index}"], {'index': "['1']"}), "({'id': row_id, 'speech_list': [speech_list], 'speech': speech,\n 'label': [label], 'label_id': [label_id], 'start_timestamp':\n start_timestamp, 'end_timestamp': end_timestamp, 'start_ms': start_ms,\n 'end_ms': end_ms, 'duration_ms': duration_ms, 'start_index':\n start_index, 'end_index': end_index}, index=['1'])\n", (14218, 14541), True, 'import pandas as pd\n'), ((16888, 17054), 'pandas.read_parquet', 'pd.read_parquet', (['fname'], {'columns': "['id', 'speech', 'label', 'start_timestamp', 'end_timestamp', 'start_ms',\n 'end_ms', 'duration_ms', 'start_index', 'end_index']"}), "(fname, columns=['id', 'speech', 'label', 'start_timestamp',\n 'end_timestamp', 'start_ms', 'end_ms', 'duration_ms', 'start_index',\n 'end_index'])\n", (16903, 17054), True, 'import pandas as pd\n'), ((2941, 2977), 're.split', 're.split', (['""":|\\\\."""', 'row_info[ts_type]'], {}), "(':|\\\\.', row_info[ts_type])\n", (2949, 2977), False, 'import re\n'), ((11576, 11594), 'numpy.argmax', 'np.argmax', (['matches'], {}), '(matches)\n', (11585, 11594), True, 'import numpy as np\n'), ((9049, 9085), 'torch.mean', 'torch.mean', (['last_hidden_state'], {'dim': '(0)'}), '(last_hidden_state, dim=0)\n', (9059, 9085), False, 'import torch\n'), ((15083, 15111), 'pandas.Series', 'pd.Series', (["row_info['label']"], {}), "(row_info['label'])\n", (15092, 15111), True, 'import pandas as pd\n')] |
"""
iNLTK public marathi headlines dataset
"""
import pandas as pd
import numpy as np
from datasets import load_metric
from datasets import Dataset
from datasets import ClassLabel
from transformers import TrainingArguments, Trainer, AutoConfig
from transformers import AutoTokenizer, AutoModelForSequenceClassification
MAX_LEN = 128
MODEL_NAME = "flax-community/roberta-base-mr"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
metric = load_metric("accuracy")
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
def tokenize_function(examples):
return tokenizer(examples["headline"], padding="max_length", truncation=True, max_length=MAX_LEN)
train_df = pd.read_csv("train.csv")
valid_df = pd.read_csv("valid.csv")
label_names = train_df["label"].unique().tolist()
num_labels = len(label_names)
cl = ClassLabel(num_classes=num_labels, names=label_names)
valid_df["label"] = valid_df["label"].map(lambda x: cl.str2int(x))
train_df["label"] = train_df["label"].map(lambda x: cl.str2int(x))
print(label_names)
label2id = {label : cl.str2int(label) for label in label_names}
id2label = {cl.str2int(label) : label for label in label_names}
print(label2id)
config = AutoConfig.from_pretrained(MODEL_NAME, label2id=label2id, id2label=id2label)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, from_flax=True, config=config)
train_ds = Dataset.from_pandas(train_df)
valid_ds = Dataset.from_pandas(valid_df)
valid_tokenized_data = valid_ds.map(tokenize_function, batched=True)
train_tokenized_data = train_ds.map(tokenize_function, batched=True)
training_args = TrainingArguments("inltk_trainer", report_to=None)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_tokenized_data,
eval_dataset=valid_tokenized_data,
compute_metrics=compute_metrics,
)
trainer.train()
model.save_pretrained("inltk-mr-classifier")
tokenizer.save_pretrained("inltk-mr-classifier")
trainer.evaluate() | [
"transformers.AutoConfig.from_pretrained",
"transformers.TrainingArguments",
"numpy.argmax",
"pandas.read_csv",
"transformers.AutoTokenizer.from_pretrained",
"datasets.load_metric",
"datasets.ClassLabel",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"datasets.Dataset.from_panda... | [((394, 435), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (423, 435), False, 'from transformers import AutoTokenizer, AutoModelForSequenceClassification\n'), ((446, 469), 'datasets.load_metric', 'load_metric', (['"""accuracy"""'], {}), "('accuracy')\n", (457, 469), False, 'from datasets import load_metric\n'), ((798, 822), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (809, 822), True, 'import pandas as pd\n'), ((834, 858), 'pandas.read_csv', 'pd.read_csv', (['"""valid.csv"""'], {}), "('valid.csv')\n", (845, 858), True, 'import pandas as pd\n'), ((945, 998), 'datasets.ClassLabel', 'ClassLabel', ([], {'num_classes': 'num_labels', 'names': 'label_names'}), '(num_classes=num_labels, names=label_names)\n', (955, 998), False, 'from datasets import ClassLabel\n'), ((1309, 1385), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['MODEL_NAME'], {'label2id': 'label2id', 'id2label': 'id2label'}), '(MODEL_NAME, label2id=label2id, id2label=id2label)\n', (1335, 1385), False, 'from transformers import TrainingArguments, Trainer, AutoConfig\n'), ((1394, 1492), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['MODEL_NAME'], {'from_flax': '(True)', 'config': 'config'}), '(MODEL_NAME, from_flax=\n True, config=config)\n', (1444, 1492), False, 'from transformers import AutoTokenizer, AutoModelForSequenceClassification\n'), ((1500, 1529), 'datasets.Dataset.from_pandas', 'Dataset.from_pandas', (['train_df'], {}), '(train_df)\n', (1519, 1529), False, 'from datasets import Dataset\n'), ((1541, 1570), 'datasets.Dataset.from_pandas', 'Dataset.from_pandas', (['valid_df'], {}), '(valid_df)\n', (1560, 1570), False, 'from datasets import Dataset\n'), ((1727, 1777), 'transformers.TrainingArguments', 'TrainingArguments', (['"""inltk_trainer"""'], {'report_to': 'None'}), "('inltk_trainer', report_to=None)\n", (1744, 1777), False, 'from transformers import TrainingArguments, Trainer, AutoConfig\n'), ((1789, 1937), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'training_args', 'train_dataset': 'train_tokenized_data', 'eval_dataset': 'valid_tokenized_data', 'compute_metrics': 'compute_metrics'}), '(model=model, args=training_args, train_dataset=train_tokenized_data,\n eval_dataset=valid_tokenized_data, compute_metrics=compute_metrics)\n', (1796, 1937), False, 'from transformers import TrainingArguments, Trainer, AutoConfig\n'), ((552, 578), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (561, 578), True, 'import numpy as np\n')] |
import pyclesperanto_prototype as cle
import numpy as np
def test_reduce_labels_to_centroids():
test = np.asarray([
[0,0,0, 1,1,1],
[0,2,0, 1,1,1],
[0,0,0, 1,1,1],
[3,3,3, 4,4,4],
[3,3,3, 4,4,4],
[3,3,3, 4,4,4],
])
reference = np.asarray([
[0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 0],
])
result = cle.reduce_labels_to_centroids(test)
print(result)
print(reference)
assert np.allclose(reference, result)
| [
"numpy.asarray",
"pyclesperanto_prototype.reduce_labels_to_centroids",
"numpy.allclose"
] | [((108, 244), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0, 1, 1, 1], [0, 2, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [3, 3, 3, 4, 4,\n 4], [3, 3, 3, 4, 4, 4], [3, 3, 3, 4, 4, 4]]'], {}), '([[0, 0, 0, 1, 1, 1], [0, 2, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [3,\n 3, 3, 4, 4, 4], [3, 3, 3, 4, 4, 4], [3, 3, 3, 4, 4, 4]])\n', (118, 244), True, 'import numpy as np\n'), ((289, 425), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0, 0, 0, 0], [0, 2, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0,\n 0], [0, 3, 0, 0, 4, 0], [0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0], [0, 2, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0], [0, 3, 0, 0, 4, 0], [0, 0, 0, 0, 0, 0]])\n', (299, 425), True, 'import numpy as np\n'), ((491, 527), 'pyclesperanto_prototype.reduce_labels_to_centroids', 'cle.reduce_labels_to_centroids', (['test'], {}), '(test)\n', (521, 527), True, 'import pyclesperanto_prototype as cle\n'), ((580, 610), 'numpy.allclose', 'np.allclose', (['reference', 'result'], {}), '(reference, result)\n', (591, 610), True, 'import numpy as np\n')] |
"""
Mapping between Ternary and Cartesian Coordinates.
Functions for converting ternary coordinates into cartesian system used in Matplotlib and vice-versa. All the values will be scaled so that the side length of the triangle is equal to one.
"""
import numpy as np
# Define constants to avoid code repetitions
_sqrt3 = np.sqrt(3.)
_half_sqrt3 = _sqrt3 / 2.
# Ternary to Cartesian Mapping
def ternaryToCartesian(coordinates):
"""
Maps ternary coordinates to cartesian coordinates.
Consider an equilateral ternary plot where a = 1 is placed at (0,0) and b = 1 is placed at (1,0). Then c = 1 will be at (1/2, sqrt(3)/2).
The 3-tuple (a,b,c) will have the cartesian coordinates (b+c/2, sqrt(3)c/2, z), where a+b+c = 1.
Parameters
----------
coordinates: list / tuple / numpy array of size three
The coordinates to be converted from ternary to cartesian
Returns
-------
numpy array of size two
"""
return(np.array([(coordinates[1] + coordinates[2] / 2.), (_half_sqrt3 * coordinates[2])]))
# Cartesian to Ternary Mapping
def cartesianToTernary(coordinates, sigma = 1.):
"""
Maps cartesian coordinates to ternary coordinates.
Mapping from cartesian to ternary coordinates requires an additional equation. If the sum of the ternary coordinates is known (say n), one can use the equations
for ternary to cartesian mapping and a+b+c = n to get (x-y/sqrt(3), 2y/sqrt(3), n-a-b)
Parameters
----------
coordinates: list / tuple / numpy array of size two
The coordinates to be converted from cartesian to ternary
sigma: Real
Sum of (a, b, c) that the ternary coordinates should sum to.
Returns
-------
numpy array of size three
"""
c = coordinates[1] / _half_sqrt3
b = coordinates[0] - c / 2.
a = sigma - (b + c)
return(np.array([a,b,c]))
| [
"numpy.array",
"numpy.sqrt"
] | [((324, 336), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (331, 336), True, 'import numpy as np\n'), ((974, 1053), 'numpy.array', 'np.array', (['[coordinates[1] + coordinates[2] / 2.0, _half_sqrt3 * coordinates[2]]'], {}), '([coordinates[1] + coordinates[2] / 2.0, _half_sqrt3 * coordinates[2]])\n', (982, 1053), True, 'import numpy as np\n'), ((1877, 1896), 'numpy.array', 'np.array', (['[a, b, c]'], {}), '([a, b, c])\n', (1885, 1896), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from seglearn.transform import Segment
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from keras import backend as K
import holidays
de_holidays = holidays.DE()
RANDOM_SEED = 42
data_path = '../data/'
def train_test_valid_split(df, window_size, feature_len,
split_pct_1=0.3, split_pct_2=0.33,
test_set=True):
"""Splits data into training, validation and test sets.
If you do not want a test set, set the `test_set` param
to False. """
X_train, X_valid, y_train, y_valid = train_test_split(df.iloc[:,:(window_size*feature_len)],
df.iloc[:,(window_size*feature_len):-1],
test_size=split_pct_1, shuffle=True,
random_state=RANDOM_SEED)
# print(y_valid.shape, type(y_valid),'\n' , X_train.shape, type(X_train))
if test_set:
X_valid, X_test, y_valid, y_test = train_test_split(X_valid, y_valid,
test_size=split_pct_2, shuffle=True,
random_state=RANDOM_SEED)
return X_train, X_valid, X_test, y_train.iloc[:,0].values, y_valid.iloc[:,0].values, y_test.iloc[:,0].values
return X_train, X_valid, y_train.iloc[:,0].values, y_valid.iloc[:,0].values
def calc_reg_metrics(y_true, y_pred):
"""Calculates a set of regression
metrics"""
mse = mean_squared_error(y_true, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_true, y_pred)
try:
mape = mean_absolute_percentage_error(y_true, y_pred)
except:
pass
r2 = r2_score(y_true, y_pred)
results = pd.DataFrame([mse, rmse, mae, r2],
index=['MSE', 'RMSE', 'MAE', 'R2'],
columns=['value'])
return results
def create_column_features(features, window_size):
"""Create column names from list
of features and window size"""
columns = []
for i in list(range(window_size)) + ['y']:
for f in features:
columns.append(f+'_'+str(i))
return columns
def create_features(temp, features: list, ohlc: bool=True):
"""Creates features based on list. """
if ohlc:
y = temp.px.close.values
else:
y = temp.px.values
feature_list = []
if 'weekday' in features:
weekday = np.array(temp.index.dayofweek)
feature_list.append(weekday)
if 'weekday_sin' in features:
weekday_sin = np.sin(2*np.pi*temp.index.dayofweek/6)
feature_list.append(weekday_sin)
if 'weekday_cos' in features:
weekday_cos = np.cos(2*np.pi*temp.index.dayofweek/6)
feature_list.append(weekday_cos)
if 'run_hour' in features:
feature_list.append(temp.hour)
if 'hours_to_4' in features:
# hour = temp.index.hour
hours_to_4 = np.array([40-hour if hour>16 else 16-hour for hour in temp.index.hour])/23
feature_list.append(hours_to_4)
if 'n_prev_hour_contracts' in features:
feature_list.append(temp.n_prev_hour_contracts/41)
if 'hour' in features:
hour = np.array(temp.index.hour)
feature_list.append(hour)
if 'hour_sin' in features:
hour_sin = np.sin(2*np.pi*temp.index.hour/23)
feature_list.append(hour_sin)
# 16 - temp.index.hour
if 'hour_cos' in features:
hour_cos = np.cos(2*np.pi*temp.index.hour/23)
feature_list.append(hour_cos)
if 'air_temp' in features:
feature_list.append(temp.air_temp)
if 'rel_humidity' in features:
feature_list.append(temp.rel_humidity)
if 'wind_speed' in features:
feature_list.append(temp.wind_speed)
if 'wind_dir' in features:
feature_list.append(temp.wind_dir)
if 'holidays' in features:
holidays = np.array([x in de_holidays for x in temp.index.strftime("%Y-%m-%d")])
feature_list.append(holidays)
if 'qty_open' in features:
qty_open = np.array(temp.qty.open.values)
feature_list.append(qty_open)
if 'qty_high' in features:
qty_high = np.array(temp.qty.high.values)
feature_list.append(qty_high)
if 'qty_low' in features:
qty_low = np.array(temp.qty.low.values)
feature_list.append(qty_low)
if 'qty_close' in features:
qty_close = np.array(temp.qty.close.values)
feature_list.append(qty_close)
if 'qty_var' in features:
try:
qty_var = np.array(temp.qty['var'].values)
except:
qty_var = np.array(temp.qty.qty.values)
feature_list.append(qty_var)
if 'qty_sum' in features:
try:
qty_sum = np.array(temp.qty['sum'].values)
except:
qty_sum = np.array(temp.qty.qty.values)
feature_list.append(qty_sum)
if 'act_px_open' in features:
act_px_open = np.array(temp.act_px.open.values)
feature_list.append(act_px_open)
if 'act_px_high' in features:
act_px_high = np.array(temp.act_px.high.values)
feature_list.append(act_px_high)
if 'act_px_low' in features:
act_px_low = np.array(temp.act_px.low.values)
feature_list.append(act_px_low)
if 'act_px_close' in features:
act_px_close = np.array(temp.act_px.close.values)
feature_list.append(act_px_close)
if 'px_open' in features:
px_open = np.array(temp.px.open.values)
feature_list.append(px_open)
if 'px_high' in features:
px_high = np.array(temp.px.high.values)
feature_list.append(px_high)
if 'px_low' in features:
px_low = np.array(temp.px.low.values)
feature_list.append(px_low)
if 'px_var' in features:
px_var = np.array(temp.px['var'].values)
feature_list.append(px_var)
if 'act_px_absdif' in features:
act_px_absdif = np.array(temp.act_px_absdif.values)
feature_list.append(act_px_absdif)
if 'px_absdif' in features:
px_absdif = np.array(temp.px_absdif.values)
feature_list.append(px_absdif)
return np.stack([y, *feature_list], axis=1), y
def create_rolling_windows(resampled_df: pd.DataFrame, window_size: int,
features: list, save_to_pickle: bool=True,
ohlc: bool=True) -> pd.DataFrame:
"""Creates rolling windows from the data. You need to specify
a window size and a list of feature names you have."""
if ohlc:
contracts = resampled_df['contractId']['contractId'].value_counts()\
[resampled_df['contractId']['contractId'].value_counts() > window_size].index
else:
contracts = resampled_df['contractId'].value_counts()\
[resampled_df['contractId'].value_counts() > window_size].index
columns = create_column_features(features, window_size)
segmenter = Segment(width=window_size+1, step=1)
forecast_df = pd.DataFrame()
for c in contracts:
if ohlc:
temp = resampled_df[resampled_df['contractId']['contractId']==c]
save_str = 'ohlc'
date = '27102020'
else:
temp = resampled_df[resampled_df['contractId']==c]
save_str = 'last'
date = '25102020'
X, y = create_features(temp, features, ohlc)
X_train, y_train, _ = segmenter.fit_transform([X], [y])
assert X_train.shape[0] == len(temp) - window_size
temp_rolling = pd.DataFrame(X_train.reshape(X_train.shape[0], -1), columns=columns)
temp_rolling['contractId'] = c
forecast_df = pd.concat([forecast_df, temp_rolling])
forecast_df.reset_index(drop=True, inplace=True)
if save_to_pickle:
forecast_df.to_pickle(data_path+f'rolling_{window_size}_{save_str}_{date}.pkl', compression='zip')
return forecast_df
def bin_ohlcv(df, contractId, binning_size='H'):
df_cid = df[df.contractId == contractId]
# resample for a binsize and the ohlc the result; and volume too.
data = df_cid[['px']].resample(binning_size).ohlc().px
data['volsum'] = df_cid[['qty']].resample(binning_size).sum()
return data
def plot_ohlcv(df, contractId, binning_size='H'):
data = bin_ohlcv(df, contractId, binning_size)
fig = make_subplots(specs=[[{"secondary_y": True}]])
trace1 = go.Candlestick(x=data.index,
open=data['open'],
high=data['high'],
low=data['low'],
close=data['close'],
name=contractId)
trace2 = go.Bar(x=data.index,
y=data['volsum'],
name='Volume',
opacity=.5,
marker={'color': 'blue'})
fig.add_trace(trace1)
fig.add_trace(trace2, secondary_y=True)
fig.update_layout(title=f'OHLCV for {contractId}')
fig.update_layout(xaxis_rangeslider_visible=False)
fig.show()
def remove_outliers(df, method, thresh,
window_size):
"""Removes outliers from data based on
Variance or Std. Dev."""
cols = [f't_{i}' for i in range(window_size)] + ['t_y']
if method=='stddev':
vals = df[cols].std(axis=1)
elif method=='var':
vals = df[cols].var(axis=1)
elif method=='zscore':
vals = df[cols].var(axis=1)
z = zscore(vals)
else:
raise ValueError('Outlier Removal Method \
is not supported. Try `stddev` or `var`.')
vals = vals[np.abs(vals)<thresh]
print(f'Dropped {len(df)-len(vals)} rows as outliers, \
\nkeeping {np.round((len(vals)/len(df))*100, 2)}% of rows')
return df.loc[vals.index]
def coeff_determination(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return (1 - SS_res/(SS_tot + K.epsilon())) | [
"numpy.abs",
"sklearn.model_selection.train_test_split",
"keras.backend.epsilon",
"sklearn.metrics.r2_score",
"sklearn.metrics.mean_absolute_error",
"numpy.sin",
"seglearn.transform.Segment",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"pandas.concat",
"holidays.DE",
"numpy.stack... | [((377, 390), 'holidays.DE', 'holidays.DE', ([], {}), '()\n', (388, 390), False, 'import holidays\n'), ((730, 900), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df.iloc[:, :window_size * feature_len]', 'df.iloc[:, window_size * feature_len:-1]'], {'test_size': 'split_pct_1', 'shuffle': '(True)', 'random_state': 'RANDOM_SEED'}), '(df.iloc[:, :window_size * feature_len], df.iloc[:, \n window_size * feature_len:-1], test_size=split_pct_1, shuffle=True,\n random_state=RANDOM_SEED)\n', (746, 900), False, 'from sklearn.model_selection import train_test_split\n'), ((1485, 1519), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1503, 1519), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n'), ((1528, 1540), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (1535, 1540), True, 'import numpy as np\n'), ((1548, 1583), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1567, 1583), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n'), ((1668, 1692), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1676, 1692), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n'), ((1704, 1797), 'pandas.DataFrame', 'pd.DataFrame', (['[mse, rmse, mae, r2]'], {'index': "['MSE', 'RMSE', 'MAE', 'R2']", 'columns': "['value']"}), "([mse, rmse, mae, r2], index=['MSE', 'RMSE', 'MAE', 'R2'],\n columns=['value'])\n", (1716, 1797), True, 'import pandas as pd\n'), ((6220, 6258), 'seglearn.transform.Segment', 'Segment', ([], {'width': '(window_size + 1)', 'step': '(1)'}), '(width=window_size + 1, step=1)\n', (6227, 6258), False, 'from seglearn.transform import Segment\n'), ((6272, 6286), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6284, 6286), True, 'import pandas as pd\n'), ((7454, 7500), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'specs': "[[{'secondary_y': True}]]"}), "(specs=[[{'secondary_y': True}]])\n", (7467, 7500), False, 'from plotly.subplots import make_subplots\n'), ((7511, 7637), 'plotly.graph_objects.Candlestick', 'go.Candlestick', ([], {'x': 'data.index', 'open': "data['open']", 'high': "data['high']", 'low': "data['low']", 'close': "data['close']", 'name': 'contractId'}), "(x=data.index, open=data['open'], high=data['high'], low=data\n ['low'], close=data['close'], name=contractId)\n", (7525, 7637), True, 'import plotly.graph_objects as go\n'), ((7674, 7771), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'data.index', 'y': "data['volsum']", 'name': '"""Volume"""', 'opacity': '(0.5)', 'marker': "{'color': 'blue'}"}), "(x=data.index, y=data['volsum'], name='Volume', opacity=0.5, marker={\n 'color': 'blue'})\n", (7680, 7771), True, 'import plotly.graph_objects as go\n'), ((1068, 1169), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_valid', 'y_valid'], {'test_size': 'split_pct_2', 'shuffle': '(True)', 'random_state': 'RANDOM_SEED'}), '(X_valid, y_valid, test_size=split_pct_2, shuffle=True,\n random_state=RANDOM_SEED)\n', (1084, 1169), False, 'from sklearn.model_selection import train_test_split\n'), ((2295, 2325), 'numpy.array', 'np.array', (['temp.index.dayofweek'], {}), '(temp.index.dayofweek)\n', (2303, 2325), True, 'import numpy as np\n'), ((2405, 2449), 'numpy.sin', 'np.sin', (['(2 * np.pi * temp.index.dayofweek / 6)'], {}), '(2 * np.pi * temp.index.dayofweek / 6)\n', (2411, 2449), True, 'import numpy as np\n'), ((2526, 2570), 'numpy.cos', 'np.cos', (['(2 * np.pi * temp.index.dayofweek / 6)'], {}), '(2 * np.pi * temp.index.dayofweek / 6)\n', (2532, 2570), True, 'import numpy as np\n'), ((2969, 2994), 'numpy.array', 'np.array', (['temp.index.hour'], {}), '(temp.index.hour)\n', (2977, 2994), True, 'import numpy as np\n'), ((3064, 3104), 'numpy.sin', 'np.sin', (['(2 * np.pi * temp.index.hour / 23)'], {}), '(2 * np.pi * temp.index.hour / 23)\n', (3070, 3104), True, 'import numpy as np\n'), ((3197, 3237), 'numpy.cos', 'np.cos', (['(2 * np.pi * temp.index.hour / 23)'], {}), '(2 * np.pi * temp.index.hour / 23)\n', (3203, 3237), True, 'import numpy as np\n'), ((3720, 3750), 'numpy.array', 'np.array', (['temp.qty.open.values'], {}), '(temp.qty.open.values)\n', (3728, 3750), True, 'import numpy as np\n'), ((3824, 3854), 'numpy.array', 'np.array', (['temp.qty.high.values'], {}), '(temp.qty.high.values)\n', (3832, 3854), True, 'import numpy as np\n'), ((3926, 3955), 'numpy.array', 'np.array', (['temp.qty.low.values'], {}), '(temp.qty.low.values)\n', (3934, 3955), True, 'import numpy as np\n'), ((4030, 4061), 'numpy.array', 'np.array', (['temp.qty.close.values'], {}), '(temp.qty.close.values)\n', (4038, 4061), True, 'import numpy as np\n'), ((4470, 4503), 'numpy.array', 'np.array', (['temp.act_px.open.values'], {}), '(temp.act_px.open.values)\n', (4478, 4503), True, 'import numpy as np\n'), ((4586, 4619), 'numpy.array', 'np.array', (['temp.act_px.high.values'], {}), '(temp.act_px.high.values)\n', (4594, 4619), True, 'import numpy as np\n'), ((4700, 4732), 'numpy.array', 'np.array', (['temp.act_px.low.values'], {}), '(temp.act_px.low.values)\n', (4708, 4732), True, 'import numpy as np\n'), ((4816, 4850), 'numpy.array', 'np.array', (['temp.act_px.close.values'], {}), '(temp.act_px.close.values)\n', (4824, 4850), True, 'import numpy as np\n'), ((4926, 4955), 'numpy.array', 'np.array', (['temp.px.open.values'], {}), '(temp.px.open.values)\n', (4934, 4955), True, 'import numpy as np\n'), ((5026, 5055), 'numpy.array', 'np.array', (['temp.px.high.values'], {}), '(temp.px.high.values)\n', (5034, 5055), True, 'import numpy as np\n'), ((5124, 5152), 'numpy.array', 'np.array', (['temp.px.low.values'], {}), '(temp.px.low.values)\n', (5132, 5152), True, 'import numpy as np\n'), ((5220, 5251), 'numpy.array', 'np.array', (["temp.px['var'].values"], {}), "(temp.px['var'].values)\n", (5228, 5251), True, 'import numpy as np\n'), ((5333, 5368), 'numpy.array', 'np.array', (['temp.act_px_absdif.values'], {}), '(temp.act_px_absdif.values)\n', (5341, 5368), True, 'import numpy as np\n'), ((5449, 5480), 'numpy.array', 'np.array', (['temp.px_absdif.values'], {}), '(temp.px_absdif.values)\n', (5457, 5480), True, 'import numpy as np\n'), ((5525, 5561), 'numpy.stack', 'np.stack', (['[y, *feature_list]'], {'axis': '(1)'}), '([y, *feature_list], axis=1)\n', (5533, 5561), True, 'import numpy as np\n'), ((6827, 6865), 'pandas.concat', 'pd.concat', (['[forecast_df, temp_rolling]'], {}), '([forecast_df, temp_rolling])\n', (6836, 6865), True, 'import pandas as pd\n'), ((8668, 8693), 'keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (8676, 8693), True, 'from keras import backend as K\n'), ((2733, 2812), 'numpy.array', 'np.array', (['[(40 - hour if hour > 16 else 16 - hour) for hour in temp.index.hour]'], {}), '([(40 - hour if hour > 16 else 16 - hour) for hour in temp.index.hour])\n', (2741, 2812), True, 'import numpy as np\n'), ((4142, 4174), 'numpy.array', 'np.array', (["temp.qty['var'].values"], {}), "(temp.qty['var'].values)\n", (4150, 4174), True, 'import numpy as np\n'), ((4306, 4338), 'numpy.array', 'np.array', (["temp.qty['sum'].values"], {}), "(temp.qty['sum'].values)\n", (4314, 4338), True, 'import numpy as np\n'), ((8441, 8453), 'numpy.abs', 'np.abs', (['vals'], {}), '(vals)\n', (8447, 8453), True, 'import numpy as np\n'), ((4198, 4227), 'numpy.array', 'np.array', (['temp.qty.qty.values'], {}), '(temp.qty.qty.values)\n', (4206, 4227), True, 'import numpy as np\n'), ((4362, 4391), 'numpy.array', 'np.array', (['temp.qty.qty.values'], {}), '(temp.qty.qty.values)\n', (4370, 4391), True, 'import numpy as np\n'), ((8728, 8742), 'keras.backend.mean', 'K.mean', (['y_true'], {}), '(y_true)\n', (8734, 8742), True, 'from keras import backend as K\n'), ((8777, 8788), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (8786, 8788), True, 'from keras import backend as K\n')] |
import numpy as np
def iec_calc(prod_df, prod_col_dict, meta_df, meta_col_dict,
gi_ref=1000.0):
"""
Calculates expected energy using measured irradiance
based on IEC calculations
Parameters
----------
prod_df: DataFrame
A data frame corresponding to the production data
after having been processed by the perf_om_NA_qc
and overlappingDFs functions. This data frame needs
at least the columns specified in prod_col_dict.
prod_col_dict: dict of {str : str}
A dictionary that contains the column names relevant
for the production data
- **siteid** (*string*), should be assigned to
site-ID column name in prod_df
- **timestamp** (*string*), should be assigned to
time-stamp column name in prod_df
- **irradiance** (*string*), should be assigned to
irradiance column name in prod_df, where data
should be in [W/m^2]
- **baseline** (*string*), should be assigned to
preferred column name to capture IEC calculations
in prod_df
- **dcsize**, (*string*), should be assigned to
preferred column name for site capacity in prod_df
meta_df: DataFrame
A data frame corresponding to site metadata.
At the least, the columns in meta_col_dict be
present.
meta_col_dict: dict of {str : str}
A dictionary that contains the column names relevant
for the meta-data
- **siteid** (*string*), should be assigned to site-ID
column name
- **dcsize** (*string*), should be assigned to
column name corresponding to site capacity, where
data is in [kW]
gi_ref: float
reference plane of array irradiance in W/m^2 at
which a site capacity is determined (default value
is 1000 [W/m^2])
Returns
-------
DataFrame
A data frame for production data with a new column,
iecE, which is the predicted energy calculated
based on the IEC standard using measured irradiance
data
"""
# assigning dictionary items to local variables for cleaner code
prod_site = prod_col_dict["siteid"]
prod_ts = prod_col_dict["timestamp"]
prod_irr = prod_col_dict["irradiance"]
prod_iec = prod_col_dict["baseline"]
prod_dcsize = prod_col_dict["dcsize"]
meta_site = meta_col_dict["siteid"]
meta_size = meta_col_dict["dcsize"]
# creating local dataframes to not modify originals
prod_df = prod_df.copy()
meta_df = meta_df.copy()
# setting index for metadata for alignment to production data
meta_df = meta_df.set_index(meta_site)
# Creating new column in production data corresponding to site size (in terms of KW)
prod_df[prod_dcsize] = prod_df.loc[:, prod_site].apply(
lambda x: meta_df.loc[x, meta_size]
)
# iec calculation
for sid in prod_df.loc[:, prod_site].unique():
mask = prod_df.loc[:, prod_site] == sid
tstep = prod_df.loc[mask, prod_ts].iloc[1] - \
prod_df.loc[mask, prod_ts].iloc[0]
tstep = tstep / np.timedelta64(
1, "h"
) # Converting the time-step to float (representing hours) to
# arrive at kWh for the iecE calculation
prod_df.loc[mask, prod_iec] = (
prod_df.loc[mask, prod_dcsize]
* prod_df.loc[mask, prod_irr]
* tstep
/ gi_ref
)
prod_df.drop(columns=[prod_dcsize], inplace=True)
return prod_df
| [
"numpy.timedelta64"
] | [((3157, 3179), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (3171, 3179), True, 'import numpy as np\n')] |
import rospy
import move_base_msgs.msg as ros_mb_msg
import geometry_msgs.msg as ros_geom_msg
import std_msgs.msg as ros_std_msg
import tf
import actionlib
import numpy as np
from collections import Counter
##############################################################
# HELPER FUNCTIONS
##############################################################
def most_common(lst):
data = Counter(lst)
return data.most_common(1)[0][0]
def euler_to_quaternion(yaw, pitch, roll):
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - \
np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + \
np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - \
np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + \
np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
##############################################################
# HELPER CLASSES
##############################################################
class NNOutputHandler:
def __init__(self):
self.first_callback = False
self.nn_subscriber = rospy.Subscriber(
"/gallery_detection_vector", ros_std_msg.Float32MultiArray, self.neural_network_callback)
def neural_network_callback(self, msg: ros_std_msg.Float32MultiArray):
self.vector = msg.data
self.filtered = self.filter_vector(msg.data)
self.gallery_angles = self.filtered_to_gallery_angles(self.filtered)
self.situation = self.determine_situation(self.gallery_angles)
self.quadrants = self.get_quadrants_from_angles(self.gallery_angles)
self.valid_directions = self.get_valid_directions_from_quadrants(
self.quadrants)
self.first_callback = True
def get_valid_directions_from_quadrants(self, quadrants):
valid_directions = []
for key in quadrants.keys():
if type(None) != type(quadrants[key]):
valid_directions.append(key)
return valid_directions
def has_first_callback_happened(self):
return self.first_callback
def get_quadrants(self):
return self.quadrants
def change_nn_callback(self, new_function):
self.nn_subscriber = rospy.Subscriber(
"/gallery_detection_vector", ros_std_msg.Float32MultiArray, new_function)
def filter_vector(self, vector):
filtered = np.zeros(360)
for i in range(360):
to_check = vector[i]
filtered[i] = to_check
a = 40
for j in range(a):
index_inside_subsection = ((-int(a/2) + j) + i) % 356
if vector[index_inside_subsection] > to_check:
filtered[i] = 0
return filtered
def array_position_to_angle(self, array_position):
return 180 - array_position
def filtered_to_gallery_angles(self, filtered):
max_peak = np.max(filtered)
ratio = 0.3
galleries_indices = np.nonzero(self.filtered > max_peak * ratio)[0]
galleries_angles = []
for index in galleries_indices:
galleries_angles.append(
self.array_position_to_angle(index)/180.0 * np.math.pi)
true_gallery_angles = []
for a1 in galleries_angles:
passes = True
for a2 in true_gallery_angles:
if self.min_distance(a1, a2) < 0.17: # 10 degrees
passes = False
if passes:
true_gallery_angles.append(a1)
return true_gallery_angles
def determine_situation(self, gallery_angles):
n = gallery_angles.__len__()
if n == 1:
return "in_end_of_gallery"
elif n == 2:
return "in_rect"
elif n > 2:
return "in_node"
def min_distance(self, angle, obj):
distance = (angle - obj) % (np.math.pi*2)
if distance < -np.math.pi:
distance += np.math.pi * 2
elif distance > np.math.pi:
distance -= np.math.pi * 2
distance = abs(distance)
return distance
def get_closest_angle_with_tolerance(self, angles, obj, tolerance=50):
min_distance = 4
for angle in angles:
distance = self.min_distance(angle, obj)
if distance < min_distance:
min_distance = distance
candidate = angle
if min_distance < tolerance:
return candidate
else:
return None
def get_angle_to_front(self, angles):
return self.get_closest_angle_with_tolerance(angles, 0)
def get_angle_to_right(self, angles):
return self.get_closest_angle_with_tolerance(angles, -np.math.pi / 4)
def get_angle_to_left(self, angles):
return self.get_closest_angle_with_tolerance(angles, np.math.pi / 4)
def get_angle_to_back(self, angles):
return self.get_closest_angle_with_tolerance(angles, np.math.pi)
def get_quadrants_from_angles(self, angles):
quadrants = {}
quadrants["front"] = self.get_angle_to_front(angles)
quadrants["back"] = self.get_angle_to_back(angles)
quadrants["left"] = self.get_angle_to_left(angles)
quadrants["right"] = self.get_angle_to_right(angles)
return quadrants
class MoveBaseHandler:
def __init__(self):
self.first_callback = False
self.move_base_active = False
self.seq = 0
self.listener = tf.TransformListener()
self.tf_transformer = tf.TransformerROS()
self.move_base_client = actionlib.SimpleActionClient(
"/move_base", ros_mb_msg.MoveBaseAction)
if self.move_base_client.wait_for_server(timeout=rospy.Duration.from_sec(5)):
rospy.loginfo("MOVE BASE RECIEVED")
else:
rospy.logerr("MOVE BASE NOT ACTIVE")
def angle_to_point(self, angle, d):
quaternion = euler_to_quaternion(angle, 0, 0)
point = [d * np.math.cos(angle), d * np.math.sin(angle), 0]
return point, quaternion
def get_seq(self):
self.seq += 1
return self.seq - 1
def point_to_geom_msg(self, point, quaternion):
header = ros_std_msg.Header(
self.get_seq(), rospy.Time.now(), "base_link")
position = ros_geom_msg.Point(point[0], point[1], point[2])
orientation = ros_geom_msg.Quaternion(
quaternion[0], quaternion[1], quaternion[2], quaternion[3])
pose = ros_geom_msg.Pose(position, orientation)
geom_msg = ros_geom_msg.PoseStamped(header, pose)
return geom_msg
def send_goal_from_angle(self, angle, distance=2):
point, quaternion = self.angle_to_point(angle, distance)
goal_geom_message = self.point_to_geom_msg(point, quaternion)
# Transform the goal to the map frame
t = self.listener.getLatestCommonTime("odom", "base_link")
goal_geom_message.header.stamp = t
self.tf_transformer._buffer = self.listener._buffer
goal_geom_message = self.tf_transformer.transformPose(
"odom", goal_geom_message)
goal_msg = ros_mb_msg.MoveBaseGoal(goal_geom_message)
self.current_goal = goal_msg
self.move_base_client.send_goal(
goal_msg, done_cb=self.done_cb, active_cb=self.active_cb, feedback_cb=self.feedback_cb)
def done_cb(self, msg: ros_mb_msg.MoveBaseResult, hola):
self.move_base_active = False
def active_cb(self):
self.move_base_active = True
def feedback_cb(self, msg: ros_mb_msg.MoveBaseFeedback):
current_position = msg.base_position.pose.position
x_diff = current_position.x - self.current_goal.target_pose.pose.position.x
y_diff = current_position.y - self.current_goal.target_pose.pose.position.y
distance = np.math.sqrt(x_diff**2 + y_diff**2)
if distance < 5:
self.move_base_active = False
| [
"rospy.logerr",
"rospy.Subscriber",
"move_base_msgs.msg.MoveBaseGoal",
"actionlib.SimpleActionClient",
"numpy.sin",
"rospy.Duration.from_sec",
"geometry_msgs.msg.PoseStamped",
"rospy.Time.now",
"numpy.math.sqrt",
"numpy.max",
"numpy.math.sin",
"collections.Counter",
"geometry_msgs.msg.Pose",... | [((388, 400), 'collections.Counter', 'Counter', (['lst'], {}), '(lst)\n', (395, 400), False, 'from collections import Counter\n'), ((1243, 1353), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/gallery_detection_vector"""', 'ros_std_msg.Float32MultiArray', 'self.neural_network_callback'], {}), "('/gallery_detection_vector', ros_std_msg.Float32MultiArray,\n self.neural_network_callback)\n", (1259, 1353), False, 'import rospy\n'), ((2360, 2454), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/gallery_detection_vector"""', 'ros_std_msg.Float32MultiArray', 'new_function'], {}), "('/gallery_detection_vector', ros_std_msg.Float32MultiArray,\n new_function)\n", (2376, 2454), False, 'import rospy\n'), ((2521, 2534), 'numpy.zeros', 'np.zeros', (['(360)'], {}), '(360)\n', (2529, 2534), True, 'import numpy as np\n'), ((3039, 3055), 'numpy.max', 'np.max', (['filtered'], {}), '(filtered)\n', (3045, 3055), True, 'import numpy as np\n'), ((5589, 5611), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (5609, 5611), False, 'import tf\n'), ((5642, 5661), 'tf.TransformerROS', 'tf.TransformerROS', ([], {}), '()\n', (5659, 5661), False, 'import tf\n'), ((5694, 5763), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""/move_base"""', 'ros_mb_msg.MoveBaseAction'], {}), "('/move_base', ros_mb_msg.MoveBaseAction)\n", (5722, 5763), False, 'import actionlib\n'), ((6412, 6460), 'geometry_msgs.msg.Point', 'ros_geom_msg.Point', (['point[0]', 'point[1]', 'point[2]'], {}), '(point[0], point[1], point[2])\n', (6430, 6460), True, 'import geometry_msgs.msg as ros_geom_msg\n'), ((6483, 6570), 'geometry_msgs.msg.Quaternion', 'ros_geom_msg.Quaternion', (['quaternion[0]', 'quaternion[1]', 'quaternion[2]', 'quaternion[3]'], {}), '(quaternion[0], quaternion[1], quaternion[2],\n quaternion[3])\n', (6506, 6570), True, 'import geometry_msgs.msg as ros_geom_msg\n'), ((6595, 6635), 'geometry_msgs.msg.Pose', 'ros_geom_msg.Pose', (['position', 'orientation'], {}), '(position, orientation)\n', (6612, 6635), True, 'import geometry_msgs.msg as ros_geom_msg\n'), ((6655, 6693), 'geometry_msgs.msg.PoseStamped', 'ros_geom_msg.PoseStamped', (['header', 'pose'], {}), '(header, pose)\n', (6679, 6693), True, 'import geometry_msgs.msg as ros_geom_msg\n'), ((7248, 7290), 'move_base_msgs.msg.MoveBaseGoal', 'ros_mb_msg.MoveBaseGoal', (['goal_geom_message'], {}), '(goal_geom_message)\n', (7271, 7290), True, 'import move_base_msgs.msg as ros_mb_msg\n'), ((7940, 7979), 'numpy.math.sqrt', 'np.math.sqrt', (['(x_diff ** 2 + y_diff ** 2)'], {}), '(x_diff ** 2 + y_diff ** 2)\n', (7952, 7979), True, 'import numpy as np\n'), ((526, 541), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (532, 541), True, 'import numpy as np\n'), ((587, 602), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (593, 602), True, 'import numpy as np\n'), ((645, 660), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (651, 660), True, 'import numpy as np\n'), ((706, 721), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (712, 721), True, 'import numpy as np\n'), ((764, 779), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (770, 779), True, 'import numpy as np\n'), ((825, 840), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (831, 840), True, 'import numpy as np\n'), ((883, 898), 'numpy.cos', 'np.cos', (['(yaw / 2)'], {}), '(yaw / 2)\n', (889, 898), True, 'import numpy as np\n'), ((944, 959), 'numpy.sin', 'np.sin', (['(yaw / 2)'], {}), '(yaw / 2)\n', (950, 959), True, 'import numpy as np\n'), ((3104, 3148), 'numpy.nonzero', 'np.nonzero', (['(self.filtered > max_peak * ratio)'], {}), '(self.filtered > max_peak * ratio)\n', (3114, 3148), True, 'import numpy as np\n'), ((5875, 5910), 'rospy.loginfo', 'rospy.loginfo', (['"""MOVE BASE RECIEVED"""'], {}), "('MOVE BASE RECIEVED')\n", (5888, 5910), False, 'import rospy\n'), ((5937, 5973), 'rospy.logerr', 'rospy.logerr', (['"""MOVE BASE NOT ACTIVE"""'], {}), "('MOVE BASE NOT ACTIVE')\n", (5949, 5973), False, 'import rospy\n'), ((6362, 6378), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (6376, 6378), False, 'import rospy\n'), ((491, 507), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (497, 507), True, 'import numpy as np\n'), ((508, 525), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (514, 525), True, 'import numpy as np\n'), ((552, 568), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (558, 568), True, 'import numpy as np\n'), ((569, 586), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (575, 586), True, 'import numpy as np\n'), ((610, 626), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (616, 626), True, 'import numpy as np\n'), ((627, 644), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (633, 644), True, 'import numpy as np\n'), ((671, 687), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (677, 687), True, 'import numpy as np\n'), ((688, 705), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (694, 705), True, 'import numpy as np\n'), ((729, 745), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (735, 745), True, 'import numpy as np\n'), ((746, 763), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (752, 763), True, 'import numpy as np\n'), ((790, 806), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (796, 806), True, 'import numpy as np\n'), ((807, 824), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (813, 824), True, 'import numpy as np\n'), ((848, 864), 'numpy.cos', 'np.cos', (['(roll / 2)'], {}), '(roll / 2)\n', (854, 864), True, 'import numpy as np\n'), ((865, 882), 'numpy.cos', 'np.cos', (['(pitch / 2)'], {}), '(pitch / 2)\n', (871, 882), True, 'import numpy as np\n'), ((909, 925), 'numpy.sin', 'np.sin', (['(roll / 2)'], {}), '(roll / 2)\n', (915, 925), True, 'import numpy as np\n'), ((926, 943), 'numpy.sin', 'np.sin', (['(pitch / 2)'], {}), '(pitch / 2)\n', (932, 943), True, 'import numpy as np\n'), ((5834, 5860), 'rospy.Duration.from_sec', 'rospy.Duration.from_sec', (['(5)'], {}), '(5)\n', (5857, 5860), False, 'import rospy\n'), ((6090, 6108), 'numpy.math.cos', 'np.math.cos', (['angle'], {}), '(angle)\n', (6101, 6108), True, 'import numpy as np\n'), ((6114, 6132), 'numpy.math.sin', 'np.math.sin', (['angle'], {}), '(angle)\n', (6125, 6132), True, 'import numpy as np\n')] |
"""
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
import math
import random
import numpy as np
def run(args):
bin_width = args["bin_width"]
with open(args["distances"]) as distances_fl:
dist_triplets = [(left, right, float(dist)) for left, right, dist in csv.reader(distances_fl, delimiter="\t")]
with open(args["test_set"]) as testset_fl:
testset = dict()
for left, right, duplicate_text in csv.reader(testset_fl, delimiter="\t"):
testset[frozenset((left, right))] = duplicate_text == "YES"
distances = [dist for left, right, dist in dist_triplets]
n_bins = int(math.floor(max(distances) / bin_width) + 1)
bins_duplicates = [0] * n_bins
bins_non_duplicates = [0] * n_bins
for left, right, dist in dist_triplets:
bin_idx = int(math.floor(dist / bin_width))
if frozenset((left, right)) in testset:
if testset[frozenset((left, right))]:
bins_duplicates[bin_idx] += 1
else:
bins_non_duplicates[bin_idx] += 1
cum_duplicates = np.cumsum(bins_duplicates)
cum_non_duplicates = np.cumsum(bins_non_duplicates)
total_duplicates = 0
for value in testset.values():
if value:
total_duplicates += 1
cum_precision = []
cum_recall = []
for dups, nondups in zip(cum_duplicates, cum_non_duplicates):
prec = float(dups) / float(dups + nondups)
recall = float(dups) / float(total_duplicates)
cum_precision.append(prec)
cum_recall.append(recall)
if args["output"] is not None:
with open(args["output"], "w") as output_fl:
writer = csv.writer(output_fl, delimiter="\t")
for bin_idx, (prec, recall) in enumerate(zip(cum_precision, cum_recall)):
lowerbound = float(bin_idx) * bin_width
writer.writerow([lowerbound, prec, recall])
def parseargs():
parser = argparse.ArgumentParser(description="Sample predicted duplicate pairs.")
parser.add_argument("--distances", type=str, required=True, help="Distances file")
parser.add_argument("--bin-width", type=float, required=True, help="Bin width")
parser.add_argument("--test-set", type=str, required=True, help="Test set file")
parser.add_argument("--output", type=str, help="Output file")
return vars(parser.parse_args())
if __name__ == "__main__":
args = parseargs()
run(args)
| [
"csv.reader",
"csv.writer",
"argparse.ArgumentParser",
"math.floor",
"numpy.cumsum"
] | [((1632, 1658), 'numpy.cumsum', 'np.cumsum', (['bins_duplicates'], {}), '(bins_duplicates)\n', (1641, 1658), True, 'import numpy as np\n'), ((1684, 1714), 'numpy.cumsum', 'np.cumsum', (['bins_non_duplicates'], {}), '(bins_non_duplicates)\n', (1693, 1714), True, 'import numpy as np\n'), ((2523, 2595), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sample predicted duplicate pairs."""'}), "(description='Sample predicted duplicate pairs.')\n", (2546, 2595), False, 'import argparse\n'), ((969, 1007), 'csv.reader', 'csv.reader', (['testset_fl'], {'delimiter': '"""\t"""'}), "(testset_fl, delimiter='\\t')\n", (979, 1007), False, 'import csv\n'), ((1368, 1396), 'math.floor', 'math.floor', (['(dist / bin_width)'], {}), '(dist / bin_width)\n', (1378, 1396), False, 'import math\n'), ((2234, 2271), 'csv.writer', 'csv.writer', (['output_fl'], {'delimiter': '"""\t"""'}), "(output_fl, delimiter='\\t')\n", (2244, 2271), False, 'import csv\n'), ((811, 851), 'csv.reader', 'csv.reader', (['distances_fl'], {'delimiter': '"""\t"""'}), "(distances_fl, delimiter='\\t')\n", (821, 851), False, 'import csv\n')] |
"""
This library contains functions to process image data used by GenDist
"""
import jax
import numpy as np
import jax.numpy as jnp
from multiprocessing import Pool
from augly import image
# DataAugmentationFactory
class Factory:
"""
This is a base library to process / transform the elements of a numpy
array according to a given function. To be used with gendist.TrainingConfig
"""
def __init__(self, processor):
self.processor = processor
def __call__(self, img, configs, n_processes=90):
return self.process_multiple_multiprocessing(img, configs, n_processes)
def process_single(self, X, *args, **kwargs):
"""
Process a single element.
Paramters
---------
X: np.array
A single numpy array
kwargs: dict/params
Processor's configuration parameters
"""
return self.processor(X, *args, **kwargs)
def process_multiple(self, X_batch, configurations):
"""
Process all elements of a numpy array according to a list
of configurations.
Each image is processed according to a configuration.
"""
X_out = []
n_elements = len(X_batch)
for X, configuration in zip(X_batch, configurations):
X_processed = self.process_single(X, **configuration)
X_out.append(X_processed)
X_out = np.stack(X_out, axis=0)
return X_out
def process_multiple_multiprocessing(self, X_dataset, configurations, n_processes):
"""
Process elements in a numpy array in parallel.
Parameters
----------
X_dataset: array(N, ...)
N elements of arbitrary shape
configurations: list
List of configurations to apply to each element. Each
element is a dict to pass to the processor.
n_processes: int
Number of cores to use
"""
num_elements = len(X_dataset)
if type(configurations) == dict:
configurations = [configurations] * num_elements
dataset_proc = np.array_split(X_dataset, n_processes)
config_split = np.array_split(configurations, n_processes)
elements = zip(dataset_proc, config_split)
with Pool(processes=n_processes) as pool:
dataset_proc = pool.starmap(self.process_multiple, elements)
dataset_proc = np.concatenate(dataset_proc, axis=0)
pool.join()
return dataset_proc.reshape(num_elements, -1)
def flat_and_concat_params(params_hist):
"""
Flat and concat a list of parameters trained using
a Flax model
Parameters
----------
params_hist: list of flax FrozenDicts
List of flax FrozenDicts containing trained model
weights.
Returns
-------
jnp.array: flattened and concatenated weights
function: function to unflatten (reconstruct) weights
"""
_, recontruct_pytree_fn = jax.flatten_util.ravel_pytree(params_hist[0])
flat_params = [jax.flatten_util.ravel_pytree(params)[0] for params in params_hist]
flat_params = jnp.r_[flat_params]
return flat_params, recontruct_pytree_fn | [
"numpy.stack",
"jax.flatten_util.ravel_pytree",
"multiprocessing.Pool",
"numpy.array_split",
"numpy.concatenate"
] | [((3015, 3060), 'jax.flatten_util.ravel_pytree', 'jax.flatten_util.ravel_pytree', (['params_hist[0]'], {}), '(params_hist[0])\n', (3044, 3060), False, 'import jax\n'), ((1443, 1466), 'numpy.stack', 'np.stack', (['X_out'], {'axis': '(0)'}), '(X_out, axis=0)\n', (1451, 1466), True, 'import numpy as np\n'), ((2149, 2187), 'numpy.array_split', 'np.array_split', (['X_dataset', 'n_processes'], {}), '(X_dataset, n_processes)\n', (2163, 2187), True, 'import numpy as np\n'), ((2211, 2254), 'numpy.array_split', 'np.array_split', (['configurations', 'n_processes'], {}), '(configurations, n_processes)\n', (2225, 2254), True, 'import numpy as np\n'), ((2320, 2347), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'n_processes'}), '(processes=n_processes)\n', (2324, 2347), False, 'from multiprocessing import Pool\n'), ((2461, 2497), 'numpy.concatenate', 'np.concatenate', (['dataset_proc'], {'axis': '(0)'}), '(dataset_proc, axis=0)\n', (2475, 2497), True, 'import numpy as np\n'), ((3080, 3117), 'jax.flatten_util.ravel_pytree', 'jax.flatten_util.ravel_pytree', (['params'], {}), '(params)\n', (3109, 3117), False, 'import jax\n')] |
# import the necessary packages
import numpy as np
import cv2
from ..convenience import is_cv2
from . import factories
class RootSIFT:
def __init__(self):
# initialize the SIFT feature extractor
self.extractor = factories.DescriptorExtractor_create("SIFT")
def compute(self, image, kps, eps=1e-7):
# compute SIFT descriptors for OpenCV 2.4
if is_cv2:
(kps, descs) = self.extractor.compute(image, kps)
# otherwise, computer SIFT descriptors for OpenCV 3+
else:
(kps, descs) = self.extractor.detectAndCompute(image, None)
# if there are no keypoints or descriptors, return an empty tuple
if len(kps) == 0:
return ([], None)
# apply the Hellinger kernel by first L1-normalizing and taking the
# square-root
descs /= (descs.sum(axis=1, keepdims=True) + eps)
descs = np.sqrt(descs)
# return a tuple of the keypoints and descriptors
return (kps, descs) | [
"numpy.sqrt"
] | [((802, 816), 'numpy.sqrt', 'np.sqrt', (['descs'], {}), '(descs)\n', (809, 816), True, 'import numpy as np\n')] |
#!/bin/usr/env python
from adpred import ADpred
import sys
import numpy as np
HELP = '''
using adpred version {}
list of arguments
-----------------
-h | --help
-id | --uniprot-id
-s | --sequence
-l | --local-psipred <path_to_"run_psipred">
-sm | --saturated-mutagenesis (list of start positions separated by comma. Ends are starts+30)
-o | --output-prefix (if empty will use protein.id. if prot_id not provided it will be empty)
examples:
--------
- To get only AD predictions:
run-adpred -id GCN4_YEAST
- to get also saturated mutagenesis results with AD prediction values:
run-adpred -id GCN4_YEAST -sm 108 -o gcn4_satMut108
run-adpred -id GCN4_YEAST -sm 50,108 -o gcn4_satMut_50-and-108
'''.format(ADpred.__version__)
# help is printed by default
if len(sys.argv)==1 or sys.argv[1] in ["-h","--help"] :
print(HELP)
exit(1)
# defaults
start = []
Id, Seq = None, None
out_prefix = None
# user set parameters
for n,arg in enumerate(sys.argv):
if arg in ["-ID","-id","--uniprot-id","uniprot-ID"]:
Id = sys.argv[n+1]
elif arg in ["-s","seq","-Seq","--sequence","--Sequence"]:
Seq = sys.argv[n+1]
elif arg in ["-l", "--local-psipred"]:
local_psipred = sys.argv[n+1]
elif arg in ["-sm", '--saturated-mutagenesis']:
start = [int(i) for i in sys.argv[n+1].split(",")]
elif arg in ["-o","--output-prefix"]:
out_prefix = sys.argv[n+1]
# main
if __name__ == '__main__':
sys.stderr.write("using adpred version {}".format(ADpred.__version__))
# open file to output results
if not out_prefix:
if Id:
out_prefix = Id
elif Seq:
out_prefix = Seq[:7]
else:
sys.stderr.write('You should perovide sequence or uniprot Id..., see --help')
# open output files
predictions_f = open(out_prefix + '.predictions.csv','w')
if len(start)>0:
saturated_f = open(out_prefix + '.saturated_mutagenesis.csv','w')
# iniitialize protein
if Id:
p = ADpred.protein(prot_id=Id)
sys.stderr.write('retrieving sequence...')
elif Seq:
p = ADpred.protein(sequence=Seq)
sys.stderr.write('read sequence ...')
# predict adpred probabilities
p.predict()
sys.stderr.write('calculating secondary structure and adpred...')
pred_header = "position, aa_id, raw value, smooth1, smooth2"
pred_body = zip(np.arange(1,len(p.sequence)+1),
p.sequence,
p.predictions,
np.convolve(p.predictions, np.ones(10)/10, "same"),
np.convolve(p.predictions, np.ones(15)/15, "same"))
pred_body = '\n'.join(["{},{},{},{},{}".format(i[0],i[1],i[2],i[3],i[4]) for i in pred_body])
predictions_f.write('\n'.join([pred_header, pred_body]))
# compute saturated mutagenesis
if len(start)>0:
for i in start:
p.saturated_mutagenesis(i-1)
string = [j+','+','.join(list(k.astype(str))) for j,k in zip(ADpred.aa[::-1], p.heatmaps[i-1])]
saturated_f.write('>' + str(i) + '\n' + '\n'.join(string) + '\n' +\
','+','.join(list(np.arange(i,i+30).astype(str)))+'\n'+\
','+','.join(p.sequence[i-1:i+29])+'\n')
# close written files
predictions_f.close()
try:
saturated_f.close()
except Exception:
pass
| [
"sys.stderr.write",
"numpy.arange",
"numpy.ones",
"adpred.ADpred.protein"
] | [((2423, 2488), 'sys.stderr.write', 'sys.stderr.write', (['"""calculating secondary structure and adpred..."""'], {}), "('calculating secondary structure and adpred...')\n", (2439, 2488), False, 'import sys\n'), ((2185, 2211), 'adpred.ADpred.protein', 'ADpred.protein', ([], {'prot_id': 'Id'}), '(prot_id=Id)\n', (2199, 2211), False, 'from adpred import ADpred\n'), ((2220, 2262), 'sys.stderr.write', 'sys.stderr.write', (['"""retrieving sequence..."""'], {}), "('retrieving sequence...')\n", (2236, 2262), False, 'import sys\n'), ((2289, 2317), 'adpred.ADpred.protein', 'ADpred.protein', ([], {'sequence': 'Seq'}), '(sequence=Seq)\n', (2303, 2317), False, 'from adpred import ADpred\n'), ((2329, 2366), 'sys.stderr.write', 'sys.stderr.write', (['"""read sequence ..."""'], {}), "('read sequence ...')\n", (2345, 2366), False, 'import sys\n'), ((1860, 1937), 'sys.stderr.write', 'sys.stderr.write', (['"""You should perovide sequence or uniprot Id..., see --help"""'], {}), "('You should perovide sequence or uniprot Id..., see --help')\n", (1876, 1937), False, 'import sys\n'), ((2722, 2733), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (2729, 2733), True, 'import numpy as np\n'), ((2796, 2807), 'numpy.ones', 'np.ones', (['(15)'], {}), '(15)\n', (2803, 2807), True, 'import numpy as np\n'), ((3361, 3381), 'numpy.arange', 'np.arange', (['i', '(i + 30)'], {}), '(i, i + 30)\n', (3370, 3381), True, 'import numpy as np\n')] |
"""
============================ Trade Strategy ============================
A class that holds information pertaining to the trade strategy and
makes use of the given CryptoSignals
Author: <NAME>
GitHub: gbledt
Version: 0.1
TODO:
- All of it
"""
import numpy as np
import datetime
from TextColors import TextColors
import pandas as pd
NOMINAL_BUY = 0.01
NOMINAL_SELL = 0.01
NOMINAL_USD_PER_TRADE = 25
TRADE_ACTIVE = False#True
TRADE_ACTIVE_CRYPTO = False#True
class TradeStrategy:
def __init__(self, *args, **kwargs):
# The index of the strategy being used in the object
if 'strategy' in kwargs:
self.strategy = kwargs.get('strategy')
def LinkAccount(self, account):
""" Links an account to the TradingStrategy object
"""
self.account = account
def CancelOrder(order):
open_orders.remove(order)
def TradeSignalStrategy(self, auth_client, coin):
""" Decides which trading strategy is used to generate buy or
sell trade signals.
"""
# Choose the correct strategy to use
if (self.strategy == 0):
# SMA Golden Cross strategy
print('\n SMA Golden Cross Strategy:')
strategy_results = self.SMAGoldenCross(coin)
elif (self.strategy == 1):
print('\n Derivative Prediction Strategy:')
strategy_results = self.DerivativePrediction(coin)
elif (self.strategy == 2):
# Variance Anomaly Trigger strategy
print('\n Variance Anomaly Trigger Strategy:')
strategy_results = self.VarianceAnomalyTrigger(coin)
elif (self.strategy == 3):
# Volume Anomaly Trigger strategy
print('\n Volume Weighted Variance Anomaly Trigger Strategy:')
strategy_results = self.VolumeWeightedVarianceAnomalyTrigger(coin)
else:
print(TextColors.Red + '\n Invalid Strategy!' + TextColors.RESET)
strategy_results = {'buy_signal': False, 'sell_signal': False, 'buy_price': 0, 'sell_price': 0, 'buy_size': 0, 'sell_size': 0}
# Post the Orders
resp_post = self.PostOrders(auth_client, coin, strategy_results)
def PostOrders(self, auth_client, coin, strategy_results):
""" Given the results of the strategy, post the buy and sell
orders from the signals
"""
if strategy_results.get('buy_signal'):
# Parse the results
buy_size = str(round(strategy_results.get('buy_size'),6))
buy_price = str(min(coin.price-0.10, round(strategy_results.get('buy_price'),2)))
# Print the results of the trading strategy
print(TextColors.GREEN + ' Buy: ' + TextColors.RESET + buy_size + ' at $' + buy_price)
# Post the buy order
if (TRADE_ACTIVE_CRYPTO):
resp = auth_client.buy(price=buy_price, # USD
size=buy_size, # Coin
product_id=(coin.currency_wallet + '-USD'))
# Print the result
if 'message' in resp:
print(TextColors.RED + ' ERROR: Failed to post\n ' +
resp.get(u'message') + TextColors.RESET);
else:
print(TextColors.GREEN + ' SUCCESS!' + TextColors.RESET)
else:
resp = {'message': 'No trade was posted'}
print(TextColors.YELLOW + ' Trading for ' + coin.currency_wallet + ' is inactive' + TextColors.RESET)
if strategy_results.get('sell_signal'):
# Parse the results
sell_size = str(round(strategy_results.get('sell_size'),6))
sell_price = str(max(coin.price+0.10, round(strategy_results.get('sell_price'),2)))
# Print the results of the trading strategy
print(TextColors.RED + ' Sell: ' + TextColors.RESET + sell_size + ' at $' + sell_price)
# Post orders for active cryptos
if (TRADE_ACTIVE_CRYPTO):
# Post the sell order
resp = auth_client.sell(price=sell_price, #USD
size=sell_size, #BTC
product_id=(coin.currency_wallet + '-USD'))
# Print the result
if 'message' in resp:
print(TextColors.RED + ' ERROR: Failed to post\n ' +
resp.get(u'message') + TextColors.RESET);
else:
print(TextColors.GREEN + ' SUCCESS!' + TextColors.RESET)
else:
resp = {'message': 'No trade was posted'}
print(TextColors.YELLOW + ' Trading for ' + coin.currency_wallet + ' is inactive' + TextColors.RESET)
if not strategy_results.get('buy_signal') and not strategy_results.get('sell_signal'):
# Manufacture a response
resp = {'message': 'No trade was posted'}
# Print that no signal will be posted
print(TextColors.YELLOW + ' No trade signals calculated' + TextColors.RESET)
return resp
"""
========================= Trade Strategies =========================
"""
def SMAGoldenCross(self, coin):
# Timestep
t = 1
buy_sig = False
sell_sig = False
buy_size = NOMINAL_BUY
sell_size = NOMINAL_SELL
# Find the current values of the SMAs
fast_SMA = coin.SMA_vec[0][-1]
medium_SMA = coin.SMA_vec[1][-1]
slow_SMA = coin.SMA_vec[2][-1]
# Buy signal when shorter period SMAs are greater
if (fast_SMA > medium_SMA and medium_SMA > slow_SMA):
buy_sig = True
else:
buy_sign = False
# Sell signal when shorter period SMAs are greater
if (fast_SMA <= medium_SMA and medium_SMA <= slow_SMA):
sell_sig = True
else:
sell_sig = False
# Use the current dynamics to predict a price
predicted_close = coin.price #+ t*coin.dSMAdt_vec[0][-1] + t**2/2*coin.ddSMAddt_vec[0][-1]/2
# Price to set the buy bids
buy_price = medium_SMA
# Price to set the sell bids
sell_price = medium_SMA
buy_size = NOMINAL_USD_PER_TRADE/buy_price
sell_size = NOMINAL_USD_PER_TRADE/sell_price
# return the boolean signaling if you should buy or sell the crypto
return{'buy_signal': buy_sig, 'sell_signal': sell_sig, 'buy_price': buy_price, 'sell_price': sell_price, 'buy_size': buy_size, 'sell_size': sell_size}
def DerivativePrediction(self, coin):
buy_sig = False
sell_sig = False
buy_size = 0
sell_size = 0
buy_price = 0
sell_price = 0
# return the boolean signaling if you should buy or sell the crypto
return {'buy_signal': buy_sig, 'sell_signal': sell_sig, 'buy_price': buy_price, 'sell_price': sell_price, 'buy_size': buy_size, 'sell_size': sell_size}
def VarianceAnomalyTrigger(self, coin):
# Timestep
t = 1
# Constantly place buy and sell bids hoping to catch random price fluctuations
buy_sig = True
sell_sig = True
buy_size = NOMINAL_BUY
sell_size = NOMINAL_SELL
# Find the current values of the SMAs
sigma = np.std(np.array(coin.H_vec)-np.array(coin.L_vec))
# Use the current dynamics to predict a price
predicted_close = coin.price #+ t*coin.dSMAdt_vec[0][-1] + t**2/2*coin.ddSMAddt_vec[0][-1]/2
# Price to set the buy bids
buy_price = predicted_close - 3*sigma
# Price to set the sell bids
sell_price = predicted_close + 3*sigma
buy_size = NOMINAL_USD_PER_TRADE/buy_price
sell_size = NOMINAL_USD_PER_TRADE/sell_price
# return the boolean signaling if you should buy or sell the crypto
return{'buy_signal': buy_sig, 'sell_signal': sell_sig, 'buy_price': buy_price, 'sell_price': sell_price, 'buy_size': buy_size, 'sell_size': sell_size}
def VolumeWeightedVarianceAnomalyTrigger(self, coin):
# Timestep
t = 1
# Constantly place buy and sell bids hoping to catch random price fluctuations
buy_sig = True
sell_sig = True
buy_size = 10*NOMINAL_BUY
sell_size = 10*NOMINAL_SELL
# Find the current values of the SMAs
sigma = np.std(np.multiply(np.array(coin.H_vec)-np.array(coin.L_vec),coin.V_vec/np.mean(coin.V_vec)))
# Use the current dynamics to predict a price
predicted_close = coin.SMA_vec[0][-1]#+ t*coin.dSMAdt_vec[0][-1] + t**2/2*coin.ddSMAddt_vec[0][-1]/2
# Price to set the buy bids
buy_price = predicted_close - 3*sigma
# Price to set the sell bids
sell_price = predicted_close + 3*sigma
buy_size = 3*NOMINAL_USD_PER_TRADE/buy_price
sell_size = 3*NOMINAL_USD_PER_TRADE/sell_price
# return the boolean signaling if you should buy or sell the crypto
return {'buy_signal': buy_sig, 'sell_signal': sell_sig, 'buy_price': buy_price, 'sell_price': sell_price, 'buy_size': buy_size, 'sell_size': sell_size}
| [
"numpy.mean",
"numpy.array"
] | [((6341, 6361), 'numpy.array', 'np.array', (['coin.H_vec'], {}), '(coin.H_vec)\n', (6349, 6361), True, 'import numpy as np\n'), ((6362, 6382), 'numpy.array', 'np.array', (['coin.L_vec'], {}), '(coin.L_vec)\n', (6370, 6382), True, 'import numpy as np\n'), ((7332, 7352), 'numpy.array', 'np.array', (['coin.H_vec'], {}), '(coin.H_vec)\n', (7340, 7352), True, 'import numpy as np\n'), ((7353, 7373), 'numpy.array', 'np.array', (['coin.L_vec'], {}), '(coin.L_vec)\n', (7361, 7373), True, 'import numpy as np\n'), ((7385, 7404), 'numpy.mean', 'np.mean', (['coin.V_vec'], {}), '(coin.V_vec)\n', (7392, 7404), True, 'import numpy as np\n')] |
import torch
import numpy as np
from model.stg2_generator import Generator
from model.stg2_discriminator import Discriminator
if __name__ == "__main__":
from my_utils.graph_writer import graph_writer
img_size = 256
generator = Generator(img_size, 512, 8, channel_multiplier=2)
# from my_utils.print_model_summary import summary
# summary(generator, (1, 512))
graph_writer.draw(generator, 'STG2_Original_Generator.png', (16, 38),
[torch.zeros((1, 512), dtype=torch.float32, device='cpu'), ],
randomize_noise=False)
print('Generator modle saved')
tot_gen_params = 0
for discrim_params in generator.parameters():
tot_gen_params += np.prod(discrim_params.shape)
print(f'generator n_params: {tot_gen_params}')
discriminator = Discriminator(img_size, channel_multiplier=2)
graph_writer.draw(discriminator, 'STG2_Original_Discriminator.png', (16, 38),
torch.zeros((1, 3, img_size, img_size), dtype=torch.float32, device='cpu'))
print('Generator modle saved')
tot_gen_params = 0
for discrim_params in discriminator.parameters():
tot_gen_params += np.prod(discrim_params.shape)
print(f'discriminator n_params: {tot_gen_params}') | [
"model.stg2_generator.Generator",
"torch.zeros",
"model.stg2_discriminator.Discriminator",
"numpy.prod"
] | [((242, 291), 'model.stg2_generator.Generator', 'Generator', (['img_size', '(512)', '(8)'], {'channel_multiplier': '(2)'}), '(img_size, 512, 8, channel_multiplier=2)\n', (251, 291), False, 'from model.stg2_generator import Generator\n'), ((824, 869), 'model.stg2_discriminator.Discriminator', 'Discriminator', (['img_size'], {'channel_multiplier': '(2)'}), '(img_size, channel_multiplier=2)\n', (837, 869), False, 'from model.stg2_discriminator import Discriminator\n'), ((722, 751), 'numpy.prod', 'np.prod', (['discrim_params.shape'], {}), '(discrim_params.shape)\n', (729, 751), True, 'import numpy as np\n'), ((974, 1048), 'torch.zeros', 'torch.zeros', (['(1, 3, img_size, img_size)'], {'dtype': 'torch.float32', 'device': '"""cpu"""'}), "((1, 3, img_size, img_size), dtype=torch.float32, device='cpu')\n", (985, 1048), False, 'import torch\n'), ((1189, 1218), 'numpy.prod', 'np.prod', (['discrim_params.shape'], {}), '(discrim_params.shape)\n', (1196, 1218), True, 'import numpy as np\n'), ((481, 537), 'torch.zeros', 'torch.zeros', (['(1, 512)'], {'dtype': 'torch.float32', 'device': '"""cpu"""'}), "((1, 512), dtype=torch.float32, device='cpu')\n", (492, 537), False, 'import torch\n')] |
import argparse
import os
import numpy as np
from scipy import sparse
import pickle
import timeit
class PBGENA(object):
def __init__(self,graph,p,N,alpha,b_t,b_a,l_t=1,l_a=1,f_t=2,f_a=2,f=1):
print('\nSetting up PBGENA...')
assert os.path.isdir('../../Datasets/'+graph),'Folder for {0} network does not exist'.format(graph)
self.__graph=graph
assert os.path.isfile('../../Datasets/'+self.__graph+'/edge_list.npy'),'Edge list file does not exist for {0} network'.format(self.__graph)
assert os.path.isfile('../../Datasets/'+self.__graph+'/attribute_matrix.npz'),'Attribute matrix file does not exist for {0} network'.format(self.__graph)
attribute_matrix=sparse.load_npz('../../Datasets/'+self.__graph+'/attribute_matrix.npz')
self.__nodes=attribute_matrix.shape[0]
self.__attributes=attribute_matrix.shape[1]
assert isinstance(N,int),'Dimensions must be an integer'
self.__N=N
assert alpha>=0 and alpha<=1,'alpha should lie in the range [0,1]'
self.__alpha=alpha
assert b_t>=0 and b_t<=1,'b_t should lie in the range [0,1]'
self.__b_t=b_t
assert b_a>=0 and b_a<=1,'b_a should lie in the range [0,1]'
self.__b_a=b_a
assert isinstance(p,int),'Number of processors must be an integer'
self.__p=p
assert isinstance(f,int),'Number of fragments must be an integer'
self.__f=f
assert isinstance(l_t,int),'Topology level must be an integer'
self.__l_t=l_t
assert isinstance(l_a,int),'Attribute level must be an integer'
self.__l_a=l_a
assert f_t>=1,'f_t>=1, becuase b_t cannot increase over several passes'
self.__f_t=f_t
assert f_a>=1,'f_a>=1, becuase b_a cannot increase over several passes'
self.__f_a=f_a
def preprocess_edges(self):
print('\nRemoving unwanted edges...')
edge_list=np.load('../../Datasets/'+self.__graph+'/edge_list.npy')
e=set()
for i in edge_list:
if i[0]!=i[1] and (i[0],i[1]) not in e and (i[1],i[0]) not in e:
e.add((i[0],i[1]))
edge_list=np.zeros((len(e),2),dtype=int)
j=0
for i in e:
edge_list[j]=i
j+=1
np.save('../../Datasets/'+self.__graph+'/edge_list_preprocessed.npy',edge_list)
self.__edges=edge_list.shape[0]
print('\n{0}:'.format(self.__graph))
print('#Nodes =',self.__nodes)
print('#Edges =',self.__edges)
print('#Attributes =',self.__attributes)
return edge_list,self.__nodes
def remove_edges(self,erf):
print('\nRandomly removing edges...')
edge_list=np.load('../../Datasets/'+self.__graph+'/edge_list_preprocessed.npy')
edge_indices=np.arange(self.__edges)
positive_edge_test=np.random.choice(a=edge_indices,size=int(self.__edges*erf),replace=False)
edge_list=np.delete(edge_list,positive_edge_test,axis=0)
self.__edges=edge_list.shape[0]
np.save('../../Datasets/'+self.__graph+'/edge_list_preprocessed.npy',edge_list)
return positive_edge_test,edge_indices
def embed(self):
print('\nEmbedding...')
file=open('PBGENA_parameters.txt','w+')
file.write('graph {0}\n'.format(self.__graph))
file.write('N {0}\n'.format(self.__N))
file.write('alpha {0}\n'.format(self.__alpha))
file.write('b_a {0}\n'.format(self.__b_a))
file.write('b_t {0}\n'.format(self.__b_t))
file.write('l_t {0}\n'.format(self.__l_t))
file.write('l_a {0}\n'.format(self.__l_a))
file.write('f_t {0}\n'.format(self.__f_t))
file.write('f_a {0}\n'.format(self.__f_a))
file.write('fragments {0}\n'.format(self.__f))
file.write('nodes {0}\n'.format(self.__nodes))
file.write('edges {0}\n'.format(self.__edges))
file.write('attributes {0}\n'.format(self.__attributes))
file.close()
start_time=timeit.default_timer()
os.system('mpiexec -n {0} python PBGENA_routine.py'.format(self.__p))
elapsed=timeit.default_timer()-start_time
print('Embedding Time + Graph Reading Time = %.2fs\n'%elapsed)
os.remove('../../Datasets/'+self.__graph+'/edge_list_preprocessed.npy')
emb=pickle.load(open('../../Embeddings/'+self.__graph+'_PBGENA_emb.pkl','rb'))
print('Embedding Dimension =',len(emb[0].tolist()),'\n')
return emb
def embedding_as_array(self):
print('Embedding as numpy array...\n')
emb=pickle.load(open('../../Embeddings/'+self.__graph+'_PBGENA_emb.pkl','rb'))
for i in range(len(emb)):
emb[i]=np.frombuffer(emb[i].unpack(),dtype=bool)
emb=np.array(emb)
return emb
if __name__=='__main__':
parser=argparse.ArgumentParser(description='PBGENA')
parser.add_argument('--graph',type=str,help='Network Name')
parser.add_argument('--N',type=int,default=8000,help='Embedding Dimension')
parser.add_argument('--alpha',type=float,help='Fraction of the dimensions to be used for attributes')
parser.add_argument('--b_t',type=float,help='Topology Bitset Probability')
parser.add_argument('--b_a',type=float,help='Attribute Bitset Probability')
parser.add_argument('--l_t',type=int,default=1,help='Number of passes of edge propagation over the topology embeddings')
parser.add_argument('--l_a',type=int,default=1,help='Number of passes of edge propagation over the attribute embeddings')
parser.add_argument('--f_t',type=float,default=2,help='How much to reduce b_t each pass?')
parser.add_argument('--f_a',type=float,default=2,help='How much to reduce b_a each pass?')
parser.add_argument('--p',type=int,default=32,help='Number of Cores')
parser.add_argument('--f',type=int,default=1,help='Number of Fragments')
args=parser.parse_args()
pbgena=PBGENA(graph=args.graph,p=args.p,N=args.N,alpha=args.alpha,b_t=args.b_t,b_a=args.b_a,l_t=args.l_t,l_a=args.l_a,f_t=args.f_t,f_a=args.f_a,f=args.f)
pbgena.preprocess_edges()
pbgena.embed()
| [
"numpy.load",
"numpy.save",
"os.remove",
"argparse.ArgumentParser",
"os.path.isdir",
"scipy.sparse.load_npz",
"timeit.default_timer",
"os.path.isfile",
"numpy.arange",
"numpy.array",
"numpy.delete"
] | [((4901, 4946), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PBGENA"""'}), "(description='PBGENA')\n", (4924, 4946), False, 'import argparse\n'), ((260, 300), 'os.path.isdir', 'os.path.isdir', (["('../../Datasets/' + graph)"], {}), "('../../Datasets/' + graph)\n", (273, 300), False, 'import os\n'), ((397, 464), 'os.path.isfile', 'os.path.isfile', (["('../../Datasets/' + self.__graph + '/edge_list.npy')"], {}), "('../../Datasets/' + self.__graph + '/edge_list.npy')\n", (411, 464), False, 'import os\n'), ((546, 620), 'os.path.isfile', 'os.path.isfile', (["('../../Datasets/' + self.__graph + '/attribute_matrix.npz')"], {}), "('../../Datasets/' + self.__graph + '/attribute_matrix.npz')\n", (560, 620), False, 'import os\n'), ((719, 794), 'scipy.sparse.load_npz', 'sparse.load_npz', (["('../../Datasets/' + self.__graph + '/attribute_matrix.npz')"], {}), "('../../Datasets/' + self.__graph + '/attribute_matrix.npz')\n", (734, 794), False, 'from scipy import sparse\n'), ((1963, 2023), 'numpy.load', 'np.load', (["('../../Datasets/' + self.__graph + '/edge_list.npy')"], {}), "('../../Datasets/' + self.__graph + '/edge_list.npy')\n", (1970, 2023), True, 'import numpy as np\n'), ((2319, 2407), 'numpy.save', 'np.save', (["('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy')", 'edge_list'], {}), "('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy',\n edge_list)\n", (2326, 2407), True, 'import numpy as np\n'), ((2754, 2827), 'numpy.load', 'np.load', (["('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy')"], {}), "('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy')\n", (2761, 2827), True, 'import numpy as np\n'), ((2846, 2869), 'numpy.arange', 'np.arange', (['self.__edges'], {}), '(self.__edges)\n', (2855, 2869), True, 'import numpy as np\n'), ((2991, 3039), 'numpy.delete', 'np.delete', (['edge_list', 'positive_edge_test'], {'axis': '(0)'}), '(edge_list, positive_edge_test, axis=0)\n', (3000, 3039), True, 'import numpy as np\n'), ((3088, 3176), 'numpy.save', 'np.save', (["('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy')", 'edge_list'], {}), "('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy',\n edge_list)\n", (3095, 3176), True, 'import numpy as np\n'), ((4068, 4090), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4088, 4090), False, 'import timeit\n'), ((4302, 4377), 'os.remove', 'os.remove', (["('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy')"], {}), "('../../Datasets/' + self.__graph + '/edge_list_preprocessed.npy')\n", (4311, 4377), False, 'import os\n'), ((4829, 4842), 'numpy.array', 'np.array', (['emb'], {}), '(emb)\n', (4837, 4842), True, 'import numpy as np\n'), ((4187, 4209), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4207, 4209), False, 'import timeit\n')] |
# Author: <NAME>, 2019
# License: BSD
from itertools import product
import numpy as np
def _vectorize(func, theta):
theta = np.array(theta)
flat = len(theta.shape) == 1
if flat:
return func(theta)
else:
n_samples = theta.shape[0]
ret = [func(theta[i]) for i in range(n_samples)]
return np.array(ret)
class Polytope(object):
def Euclidean_project(self, theta):
"""
Compute Euclidean projection.
Parameters
----------
theta: array, shape = n_samples x n_features
Input array.
Returns
-------
out: array, shape = n_samples x n_features
Output array
"""
return _vectorize(self._Euclidean_project, theta)
def KL_project(self, theta):
"""
Compute KL projection.
Parameters
----------
theta: array, shape = n_samples x n_features
Input array.
Returns
-------
out: array, shape = n_samples x n_features
Output array
"""
return _vectorize(self._KL_project, theta)
def project(self, theta, projection_type="Euclidean"):
if projection_type == "Euclidean":
return self.Euclidean_project(theta)
elif projection_type == "KL":
return self.KL_project(theta)
else:
raise ValueError("Unknown projection_type.")
def MAP(self, theta):
"""
Compute MAP projection.
Parameters
----------
theta: array, shape = n_samples x n_features
Input array.
Returns
-------
out: array, shape = n_samples x n_outputs
Output array
"""
return self.inv_phi(self.argmax(theta))
def argmax(self, theta):
"""
Compute argmax.
Parameters
----------
theta: array, shape = n_samples x n_features
Input array.
Returns
-------
out: array, shape = n_samples x n_features
Output array
"""
return _vectorize(self._argmax, theta)
def _MAP(self, theta):
return self._inv_phi(self._argmax(theta))
#def max(self, theta):
#return np.sum(theta * self.argmax(theta), axis=1)
def phi(self, Y):
return _vectorize(self._phi, Y)
def inv_phi(self, Y):
return _vectorize(self._inv_phi, Y)
class UnitCube(Polytope):
def Euclidean_project(self, theta):
return np.minimum(np.maximum(theta, 0), 1)
def KL_project(self, theta):
theta = np.array(theta)
return np.minimum(np.exp(theta - 1), 1)
def argmax(self, theta):
theta = np.array(theta)
return (theta > 0).astype(int)
def phi(self, y):
return y
def inv_phi(self, y):
return y
def vertices(self, size):
for tup in product([0,1], repeat=size):
yield np.array(tup)
class ProbabilitySimplex(Polytope):
def Euclidean_project(self, theta):
from simplex import project_simplex
theta = np.array(theta)
if len(theta.shape) == 1:
return project_simplex(theta)
elif len(theta.shape) == 2:
return project_simplex(theta, axis=1)
else:
raise ValueError("Invalid shape for theta.")
def KL_project(self, theta):
theta = np.array(theta)
flat = len(theta.shape) == 1
if flat:
theta = theta.reshape(1, -1)
# Just the usual softmax with the usual stability trick.
max_theta = np.max(theta, axis=1)
exp_theta = np.exp(theta - max_theta[:, np.newaxis])
ret = exp_theta / np.sum(exp_theta, axis=1)[:, np.newaxis]
if flat:
ret = np.ravel(ret)
return ret
# FIXME: vectorize
def _argmax(self, theta):
# Return one-hot vectors.
n_classes = len(theta)
ret = np.zeros(n_classes)
ret[np.argmax(theta)] = 1
return ret
def MAP(self, theta):
# Return integers.
if len(theta.shape) == 1:
return np.argmax(theta)
elif len(theta.shape) == 2:
return np.argmax(theta, axis=1)
else:
raise ValueError("Invalid shape for theta.")
def vertices(self, size):
I = np.eye(size)
for row in I:
yield row
class CartesianProduct(Polytope):
def __init__(self, polytope):
self.polytope = polytope
def _apply_func(self, theta, func):
# theta should be of shape (n_classes x n_classes,)
n_classes = int(np.sqrt(theta.shape[0]))
theta = theta.reshape(n_classes, n_classes)
u = np.zeros_like(theta)
for j in range(n_classes):
u[j] = func(theta[j])
# Need to return the same shape as theta.
return u.ravel()
def _Euclidean_project(self, theta):
return self._apply_func(theta, self.polytope.Euclidean_project)
def _KL_project(self, theta):
return self._apply_func(theta, self.polytope.KL_project)
def _argmax(self, theta):
n_classes = int(np.sqrt(theta.shape[0]))
theta = theta.reshape(n_classes, n_classes)
ret = np.zeros_like(theta)
for j in range(n_classes):
ret[j] = self.polytope.argmax(theta[j])
return ret.ravel()
def vertices(self, size): # size = len(theta)
n_classes = int(np.sqrt(size))
for prod in product(np.eye(n_classes), repeat=n_classes):
yield np.array(prod).ravel()
class Knapsack(Polytope):
def __init__(self, max_labels, min_labels=0, algo="isotonic"):
self.max_labels = max_labels
self.min_labels = min_labels
self.algo = algo
def _project_equality(self, theta, n_labels):
# Project onto {y in [0,1]^k : sum(y) = n_labels}.
if self.algo == "isotonic":
w = np.zeros(len(theta))
w[:n_labels] = 1
return Permutahedron(w, w_sorted=True).project(theta)
elif self.algo == "bisection":
eps = 1e-6
upper = np.max(theta)
lower = -upper
current = np.inf
for it in range(100):
if np.abs(current) / n_labels < eps and current < 0:
break
tau = (upper + lower) / 2.0
mu = np.minimum(np.maximum(theta - tau, 0), 1)
current = np.sum(mu) - n_labels
if current <= 0:
upper = tau
else:
lower = tau
return mu
else:
raise ValueError("Invalid algorithm name")
def _Euclidean_project(self, theta):
# First attempt to project on the unit cube.
u = np.minimum(np.maximum(theta, 0), 1)
su = np.sum(u)
if self.min_labels <= su and su <= self.max_labels:
# If the inequality is satisfied, we're done.
return u
else:
if su >= self.max_labels:
return self._project_equality(theta, self.max_labels)
else:
return self._project_equality(theta, self.min_labels)
def _KL_project(self, theta):
from simplex import constrained_softmax
theta = np.array(theta)
# First attempt to project on the unit cube.
u = np.minimum(np.exp(theta - 1), 1)
su = np.sum(u)
if self.min_labels <= su and su <= self.max_labels:
# If the inequality is satisfied, we're done.
return u
else:
if su >= self.max_labels:
n_labels = self.max_labels
else:
# su <= 0 should never happen so n_labels can't be 0
n_labels = self.min_labels
n_labels = self.max_labels
z = theta - np.log(n_labels)
u = np.ones(len(theta)) / float(n_labels)
return constrained_softmax(z, u) * n_labels
def _argmax(self, theta):
theta = np.array(theta)
sol = np.zeros_like(theta)
top = np.argsort(theta)[::-1]
# We pick labels between 'min_labels' and 'max_labels' only if the
# corresponding theta is non-negative.
sol[top[self.min_labels:self.max_labels]] = 1
sol = np.logical_and(sol.astype(bool), theta >= 0)
sol = sol.astype(int)
# If 'min_labels' is set, the first 'min_labels' labels must be picked.
sol[top[:self.min_labels]] = 1
return sol
def vertices(self, size):
max_labels = size if self.max_labels is None else self.max_labels
for tup in product([0,1], repeat=size):
ret = np.array(tup)
s = np.sum(ret)
if self.min_labels <= s and s <= max_labels:
yield ret
class Birkhoff(Polytope):
def __init__(self, max_iter=1000, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
def _project(self, theta, regul):
import ot
theta = np.array(theta)
d = theta.shape[0]
n_classes = int(np.sqrt(d))
theta = theta.reshape(n_classes, n_classes)
if regul == "l2":
regul = ot.SquaredL2(gamma=1.0)
elif regul == "entropic":
regul = ot.NegEntropy(gamma=1.0)
else:
raise ValueError
o = np.ones(n_classes)
# We want to solve argmin_T ||T - theta ||^2.
alpha = ot.solve_semi_dual(o, o, -theta, regul,
max_iter=self.max_iter, tol=self.tol)
ret = ot.get_plan_from_semi_dual(alpha, o, -theta, regul)
return ret.ravel()
def _Euclidean_project(self, theta):
return self._project(theta, "l2")
def _KL_project(self, theta):
return self._project(theta, "entropic")
def _argmax(self, theta):
from scipy.optimize import linear_sum_assignment
n_classes = int(np.sqrt(theta.shape[0]))
theta = theta.reshape(n_classes, n_classes)
# We want to maximize.
rows, cols = linear_sum_assignment(-theta)
# Construct permutation matrix.
ret = np.zeros((n_classes, n_classes))
for j in range(len(rows)):
ret[rows[j], cols[j]] = 1
return ret.ravel()
def _phi(self, y):
"""From permutation to flattend permutation matrix.
The input y should be of the form y[rank] = label.
The returned permutation matrix has the form Y[rank, label].
The matrix is flattened.
"""
n_classes = y.shape[0]
ret = np.zeros((n_classes, n_classes))
for j in range(n_classes):
ret[j, y[j]] = 1
return ret.ravel()
def _inv_phi(self, y):
"""From flattened permutation matrix to permutation."""
n_classes = int(np.sqrt(Y.shape[0]))
Y = y.reshape(n_classes, n_classes)
ret = np.zeros(n_classes)
for j in range(n_classes):
ret[j] = np.argmax(Y[j])
return ret
def _MAP(self, theta):
n_classes = int(np.sqrt(theta.shape[0]))
perm_matrix = self._argmax(theta).reshape(n_classes, n_classes)
return self._inv_phi(perm_matrix)
def vertices(self, size): # size = len(theta)
size = int(np.sqrt(size))
for y in Permutahedron().vertices(size):
yield self._phi(y)
def inv_permutation(p):
ret = np.zeros(len(p), dtype=np.int)
ret[p] = np.arange(len(p))
return ret
class Permutahedron(Polytope):
def __init__(self, w=None, w_sorted=False):
self.w = w
self.w_sorted = w_sorted
def _get_w(self, n_classes):
# Our implementation assumes that w is sorted.
# This helper function takes care of that.
w = self.w
if w is None:
w = np.arange(n_classes)[::-1]
else:
w = np.array(w)
if not self.w_sorted:
w = w[np.argsort(w)[::-1]]
return w
def _Euclidean_project(self, theta):
"""
Efficient bregman projections onto the permutahedron and
related polytopes.
<NAME> and <NAME>.
In Proc. of AISTATS, pages 1205–1213, 2016
"""
from sklearn.isotonic import isotonic_regression
n_classes = len(theta)
w = self._get_w(n_classes)
perm = np.argsort(theta)[::-1]
theta = theta[perm]
dual_sol = isotonic_regression(theta - w, increasing=False)
# Or equivalently
#dual_sol = -isotonic_regression(w - theta, increasing=True)
primal_sol = theta - dual_sol
return primal_sol[inv_permutation(perm)]
def _KL_project(self, theta):
raise NotImplementedError
def _MAP(self, theta):
n_classes = len(theta)
w = self._get_w(n_classes)
perm = np.argsort(theta)[::-1]
return w[inv_permutation(perm)]
def _argmax(self, theta):
return self._MAP(theta)
def _phi(self, y):
# FIXME: implement this for general w.
return y
def vertices(self, size):
from itertools import permutations
w = self._get_w(size)
for perm in permutations(np.arange(size)):
yield w[np.array(perm)]
class OrderSimplex(Polytope):
def _Euclidean_project(self, theta):
from sklearn.isotonic import isotonic_regression
return isotonic_regression(theta, y_min=0, y_max=1, increasing=False)
def _KL_project(self, theta):
raise NotImplementedError
def _MAP(self, theta):
n_classes = len(theta) + 1
scores = np.zeros(n_classes)
scores[0] = 0
for i in range(1, n_classes):
scores[i] = scores[i-1] + theta[i-1]
# Returns number between 1 and n_classes.
return np.argmax(scores) + 1
# FIXME: move n_classes and neg_label to __init__?
def _phi(self, y, n_classes, neg_label=0):
ret = np.zeros(n_classes - 1)
for i in range(1, n_classes): # from 1 to n_classes-1
if y > i:
ret[i-1] = 1
else:
ret[i-1] = neg_label
return ret
def phi(self, Y, k, neg_label=0):
return np.array([self._phi(y, k, neg_label) for y in Y])
def _argmax(self, theta):
n_classes = len(theta) + 1
return self._phi(self._MAP(theta), n_classes)
def vertices(self, size): # size = len(theta) = n_classes - 1
y = np.zeros(size)
yield y
for i in range(size):
y = y.copy()
y[i] = 1
yield y
| [
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.ravel",
"numpy.argmax",
"numpy.ones",
"numpy.argsort",
"numpy.arange",
"numpy.exp",
"ot.NegEntropy",
"ot.get_plan_from_semi_dual",
"simplex.constrained_softmax",
"numpy.zeros_like",
"numpy.max",
"itertools.product",
"ot.SquaredL2",
"s... | [((132, 147), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (140, 147), True, 'import numpy as np\n'), ((339, 352), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (347, 352), True, 'import numpy as np\n'), ((2611, 2626), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (2619, 2626), True, 'import numpy as np\n'), ((2721, 2736), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (2729, 2736), True, 'import numpy as np\n'), ((2910, 2938), 'itertools.product', 'product', (['[0, 1]'], {'repeat': 'size'}), '([0, 1], repeat=size)\n', (2917, 2938), False, 'from itertools import product\n'), ((3111, 3126), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (3119, 3126), True, 'import numpy as np\n'), ((3411, 3426), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (3419, 3426), True, 'import numpy as np\n'), ((3609, 3630), 'numpy.max', 'np.max', (['theta'], {'axis': '(1)'}), '(theta, axis=1)\n', (3615, 3630), True, 'import numpy as np\n'), ((3651, 3691), 'numpy.exp', 'np.exp', (['(theta - max_theta[:, np.newaxis])'], {}), '(theta - max_theta[:, np.newaxis])\n', (3657, 3691), True, 'import numpy as np\n'), ((3962, 3981), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (3970, 3981), True, 'import numpy as np\n'), ((4353, 4365), 'numpy.eye', 'np.eye', (['size'], {}), '(size)\n', (4359, 4365), True, 'import numpy as np\n'), ((4729, 4749), 'numpy.zeros_like', 'np.zeros_like', (['theta'], {}), '(theta)\n', (4742, 4749), True, 'import numpy as np\n'), ((5255, 5275), 'numpy.zeros_like', 'np.zeros_like', (['theta'], {}), '(theta)\n', (5268, 5275), True, 'import numpy as np\n'), ((6867, 6876), 'numpy.sum', 'np.sum', (['u'], {}), '(u)\n', (6873, 6876), True, 'import numpy as np\n'), ((7327, 7342), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (7335, 7342), True, 'import numpy as np\n'), ((7454, 7463), 'numpy.sum', 'np.sum', (['u'], {}), '(u)\n', (7460, 7463), True, 'import numpy as np\n'), ((8067, 8082), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (8075, 8082), True, 'import numpy as np\n'), ((8097, 8117), 'numpy.zeros_like', 'np.zeros_like', (['theta'], {}), '(theta)\n', (8110, 8117), True, 'import numpy as np\n'), ((8683, 8711), 'itertools.product', 'product', (['[0, 1]'], {'repeat': 'size'}), '([0, 1], repeat=size)\n', (8690, 8711), False, 'from itertools import product\n'), ((9063, 9078), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (9071, 9078), True, 'import numpy as np\n'), ((9400, 9418), 'numpy.ones', 'np.ones', (['n_classes'], {}), '(n_classes)\n', (9407, 9418), True, 'import numpy as np\n'), ((9490, 9567), 'ot.solve_semi_dual', 'ot.solve_semi_dual', (['o', 'o', '(-theta)', 'regul'], {'max_iter': 'self.max_iter', 'tol': 'self.tol'}), '(o, o, -theta, regul, max_iter=self.max_iter, tol=self.tol)\n', (9508, 9567), False, 'import ot\n'), ((9617, 9668), 'ot.get_plan_from_semi_dual', 'ot.get_plan_from_semi_dual', (['alpha', 'o', '(-theta)', 'regul'], {}), '(alpha, o, -theta, regul)\n', (9643, 9668), False, 'import ot\n'), ((10107, 10136), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['(-theta)'], {}), '(-theta)\n', (10128, 10136), False, 'from scipy.optimize import linear_sum_assignment\n'), ((10192, 10224), 'numpy.zeros', 'np.zeros', (['(n_classes, n_classes)'], {}), '((n_classes, n_classes))\n', (10200, 10224), True, 'import numpy as np\n'), ((10630, 10662), 'numpy.zeros', 'np.zeros', (['(n_classes, n_classes)'], {}), '((n_classes, n_classes))\n', (10638, 10662), True, 'import numpy as np\n'), ((10950, 10969), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (10958, 10969), True, 'import numpy as np\n'), ((12476, 12524), 'sklearn.isotonic.isotonic_regression', 'isotonic_regression', (['(theta - w)'], {'increasing': '(False)'}), '(theta - w, increasing=False)\n', (12495, 12524), False, 'from sklearn.isotonic import isotonic_regression\n'), ((13440, 13502), 'sklearn.isotonic.isotonic_regression', 'isotonic_regression', (['theta'], {'y_min': '(0)', 'y_max': '(1)', 'increasing': '(False)'}), '(theta, y_min=0, y_max=1, increasing=False)\n', (13459, 13502), False, 'from sklearn.isotonic import isotonic_regression\n'), ((13652, 13671), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (13660, 13671), True, 'import numpy as np\n'), ((13985, 14008), 'numpy.zeros', 'np.zeros', (['(n_classes - 1)'], {}), '(n_classes - 1)\n', (13993, 14008), True, 'import numpy as np\n'), ((14501, 14515), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (14509, 14515), True, 'import numpy as np\n'), ((2536, 2556), 'numpy.maximum', 'np.maximum', (['theta', '(0)'], {}), '(theta, 0)\n', (2546, 2556), True, 'import numpy as np\n'), ((2653, 2670), 'numpy.exp', 'np.exp', (['(theta - 1)'], {}), '(theta - 1)\n', (2659, 2670), True, 'import numpy as np\n'), ((3181, 3203), 'simplex.project_simplex', 'project_simplex', (['theta'], {}), '(theta)\n', (3196, 3203), False, 'from simplex import project_simplex\n'), ((3795, 3808), 'numpy.ravel', 'np.ravel', (['ret'], {}), '(ret)\n', (3803, 3808), True, 'import numpy as np\n'), ((3994, 4010), 'numpy.argmax', 'np.argmax', (['theta'], {}), '(theta)\n', (4003, 4010), True, 'import numpy as np\n'), ((4142, 4158), 'numpy.argmax', 'np.argmax', (['theta'], {}), '(theta)\n', (4151, 4158), True, 'import numpy as np\n'), ((4639, 4662), 'numpy.sqrt', 'np.sqrt', (['theta.shape[0]'], {}), '(theta.shape[0])\n', (4646, 4662), True, 'import numpy as np\n'), ((5164, 5187), 'numpy.sqrt', 'np.sqrt', (['theta.shape[0]'], {}), '(theta.shape[0])\n', (5171, 5187), True, 'import numpy as np\n'), ((5466, 5479), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (5473, 5479), True, 'import numpy as np\n'), ((5509, 5526), 'numpy.eye', 'np.eye', (['n_classes'], {}), '(n_classes)\n', (5515, 5526), True, 'import numpy as np\n'), ((6829, 6849), 'numpy.maximum', 'np.maximum', (['theta', '(0)'], {}), '(theta, 0)\n', (6839, 6849), True, 'import numpy as np\n'), ((7419, 7436), 'numpy.exp', 'np.exp', (['(theta - 1)'], {}), '(theta - 1)\n', (7425, 7436), True, 'import numpy as np\n'), ((8132, 8149), 'numpy.argsort', 'np.argsort', (['theta'], {}), '(theta)\n', (8142, 8149), True, 'import numpy as np\n'), ((8730, 8743), 'numpy.array', 'np.array', (['tup'], {}), '(tup)\n', (8738, 8743), True, 'import numpy as np\n'), ((8760, 8771), 'numpy.sum', 'np.sum', (['ret'], {}), '(ret)\n', (8766, 8771), True, 'import numpy as np\n'), ((9130, 9140), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (9137, 9140), True, 'import numpy as np\n'), ((9241, 9264), 'ot.SquaredL2', 'ot.SquaredL2', ([], {'gamma': '(1.0)'}), '(gamma=1.0)\n', (9253, 9264), False, 'import ot\n'), ((9977, 10000), 'numpy.sqrt', 'np.sqrt', (['theta.shape[0]'], {}), '(theta.shape[0])\n', (9984, 10000), True, 'import numpy as np\n'), ((10870, 10889), 'numpy.sqrt', 'np.sqrt', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (10877, 10889), True, 'import numpy as np\n'), ((11026, 11041), 'numpy.argmax', 'np.argmax', (['Y[j]'], {}), '(Y[j])\n', (11035, 11041), True, 'import numpy as np\n'), ((11114, 11137), 'numpy.sqrt', 'np.sqrt', (['theta.shape[0]'], {}), '(theta.shape[0])\n', (11121, 11137), True, 'import numpy as np\n'), ((11324, 11337), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (11331, 11337), True, 'import numpy as np\n'), ((11921, 11932), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (11929, 11932), True, 'import numpy as np\n'), ((12404, 12421), 'numpy.argsort', 'np.argsort', (['theta'], {}), '(theta)\n', (12414, 12421), True, 'import numpy as np\n'), ((12888, 12905), 'numpy.argsort', 'np.argsort', (['theta'], {}), '(theta)\n', (12898, 12905), True, 'import numpy as np\n'), ((13240, 13255), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (13249, 13255), True, 'import numpy as np\n'), ((13846, 13863), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (13855, 13863), True, 'import numpy as np\n'), ((2957, 2970), 'numpy.array', 'np.array', (['tup'], {}), '(tup)\n', (2965, 2970), True, 'import numpy as np\n'), ((3259, 3289), 'simplex.project_simplex', 'project_simplex', (['theta'], {'axis': '(1)'}), '(theta, axis=1)\n', (3274, 3289), False, 'from simplex import project_simplex\n'), ((3718, 3743), 'numpy.sum', 'np.sum', (['exp_theta'], {'axis': '(1)'}), '(exp_theta, axis=1)\n', (3724, 3743), True, 'import numpy as np\n'), ((4214, 4238), 'numpy.argmax', 'np.argmax', (['theta'], {'axis': '(1)'}), '(theta, axis=1)\n', (4223, 4238), True, 'import numpy as np\n'), ((6144, 6157), 'numpy.max', 'np.max', (['theta'], {}), '(theta)\n', (6150, 6157), True, 'import numpy as np\n'), ((7893, 7909), 'numpy.log', 'np.log', (['n_labels'], {}), '(n_labels)\n', (7899, 7909), True, 'import numpy as np\n'), ((7983, 8008), 'simplex.constrained_softmax', 'constrained_softmax', (['z', 'u'], {}), '(z, u)\n', (8002, 8008), False, 'from simplex import constrained_softmax\n'), ((9319, 9343), 'ot.NegEntropy', 'ot.NegEntropy', ([], {'gamma': '(1.0)'}), '(gamma=1.0)\n', (9332, 9343), False, 'import ot\n'), ((11864, 11884), 'numpy.arange', 'np.arange', (['n_classes'], {}), '(n_classes)\n', (11873, 11884), True, 'import numpy as np\n'), ((13278, 13292), 'numpy.array', 'np.array', (['perm'], {}), '(perm)\n', (13286, 13292), True, 'import numpy as np\n'), ((5565, 5579), 'numpy.array', 'np.array', (['prod'], {}), '(prod)\n', (5573, 5579), True, 'import numpy as np\n'), ((6421, 6447), 'numpy.maximum', 'np.maximum', (['(theta - tau)', '(0)'], {}), '(theta - tau, 0)\n', (6431, 6447), True, 'import numpy as np\n'), ((6478, 6488), 'numpy.sum', 'np.sum', (['mu'], {}), '(mu)\n', (6484, 6488), True, 'import numpy as np\n'), ((11989, 12002), 'numpy.argsort', 'np.argsort', (['w'], {}), '(w)\n', (11999, 12002), True, 'import numpy as np\n'), ((6268, 6283), 'numpy.abs', 'np.abs', (['current'], {}), '(current)\n', (6274, 6283), True, 'import numpy as np\n')] |
# coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
img_1=plt.imread("test_1.jpg")
img_1.ndim
img_1.shape
img_2=img_1[1:1080:2,1:1920:2]
img_2.ndim,img_2.shape
plt.imshow(img_2)
plt.show()
img_2
plt.imshow(img_1,plt.cm.gray)
plt.show()
img_3=np.zeros((img_2.shape[0:2]))
img_3.shape
img_4=np.zeros((img_2.shape[0:2]))
img_4.shape
img_5=np.zeros((img_1.shape[0:2]))
img_2=img_1
img_2.shape,img_5.shape
threshold=100
for i in range(img_2.shape[0]):
for j in range(img_2.shape[1]):
n=img_2[i,j,0]/3 + img_2[i,j,1]/3 + img_2[i,j,2]/3
img_3[i,j]=n
if n > threshold:
img_4[i,j] =255
else:
img_4[i,j]=0
plt.subplot(1,3,1),plt.imshow(img_2)
plt.subplot(1,3,2),plt.imshow(img_4, plt.cm.binary)
plt.show()
plt.imshow(img_3,plt.cm.gray)
plt.show()
plt.imshow(img_4, plt.cm.binary)
plt.show()
img_1=plt.imread("plaka.jpg")
img_1.ndim,img_1.shape
img_5=np.zeros((img_1.shape[0:2]))
img_2=img_1
img_2.shape,img_5.shape
threshold=100
for i in range(img_2.shape[0]):
for j in range(img_2.shape[1]):
n=img_2[i,j,0]/3 + img_2[i,j,1]/3 + img_2[i,j,2]/3
img_3[i,j]=n
if n > threshold:
img_5[i,j] =255
else:
img_5[i,j]=0
plt.subplot(1,3,1),plt.imshow(img_2)
plt.subplot(1,3,2),plt.imshow(img_5, plt.cm.binary)
plt.show()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"matplotlib.pyplot.imread"
] | [((74, 98), 'matplotlib.pyplot.imread', 'plt.imread', (['"""test_1.jpg"""'], {}), "('test_1.jpg')\n", (84, 98), True, 'import matplotlib.pyplot as plt\n'), ((176, 193), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_2'], {}), '(img_2)\n', (186, 193), True, 'import matplotlib.pyplot as plt\n'), ((194, 204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (202, 204), True, 'import matplotlib.pyplot as plt\n'), ((212, 242), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_1', 'plt.cm.gray'], {}), '(img_1, plt.cm.gray)\n', (222, 242), True, 'import matplotlib.pyplot as plt\n'), ((242, 252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (250, 252), True, 'import matplotlib.pyplot as plt\n'), ((260, 286), 'numpy.zeros', 'np.zeros', (['img_2.shape[0:2]'], {}), '(img_2.shape[0:2])\n', (268, 286), True, 'import numpy as np\n'), ((307, 333), 'numpy.zeros', 'np.zeros', (['img_2.shape[0:2]'], {}), '(img_2.shape[0:2])\n', (315, 333), True, 'import numpy as np\n'), ((354, 380), 'numpy.zeros', 'np.zeros', (['img_1.shape[0:2]'], {}), '(img_1.shape[0:2])\n', (362, 380), True, 'import numpy as np\n'), ((763, 773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (771, 773), True, 'import matplotlib.pyplot as plt\n'), ((783, 813), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_3', 'plt.cm.gray'], {}), '(img_3, plt.cm.gray)\n', (793, 813), True, 'import matplotlib.pyplot as plt\n'), ((813, 823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (821, 823), True, 'import matplotlib.pyplot as plt\n'), ((825, 857), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_4', 'plt.cm.binary'], {}), '(img_4, plt.cm.binary)\n', (835, 857), True, 'import matplotlib.pyplot as plt\n'), ((858, 868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (866, 868), True, 'import matplotlib.pyplot as plt\n'), ((876, 899), 'matplotlib.pyplot.imread', 'plt.imread', (['"""plaka.jpg"""'], {}), "('plaka.jpg')\n", (886, 899), True, 'import matplotlib.pyplot as plt\n'), ((929, 955), 'numpy.zeros', 'np.zeros', (['img_1.shape[0:2]'], {}), '(img_1.shape[0:2])\n', (937, 955), True, 'import numpy as np\n'), ((1338, 1348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1346, 1348), True, 'import matplotlib.pyplot as plt\n'), ((674, 694), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (685, 694), True, 'import matplotlib.pyplot as plt\n'), ((693, 710), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_2'], {}), '(img_2)\n', (703, 710), True, 'import matplotlib.pyplot as plt\n'), ((711, 731), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (722, 731), True, 'import matplotlib.pyplot as plt\n'), ((730, 762), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_4', 'plt.cm.binary'], {}), '(img_4, plt.cm.binary)\n', (740, 762), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1269), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1260, 1269), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1285), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_2'], {}), '(img_2)\n', (1278, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1306), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1297, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1337), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_5', 'plt.cm.binary'], {}), '(img_5, plt.cm.binary)\n', (1315, 1337), True, 'import matplotlib.pyplot as plt\n')] |
from tqdm import tqdm
import numpy as np
# For featurizer
import itertools
from matplotlib import pyplot as plt
from sklearn import decomposition
from time import time
from sklearn.preprocessing import StandardScaler
class Featurizer():
def __init__(self, data, n_components=9):
'''
Takes a length-N list (data) of equally-sized numpy arrays with M elements,
Calculates features on the flattened data (where each entry in the list is
interpreted as a sample of the M features)
Example:
> F = Featurizer(data, n_components = 8)
> F.fit()
F.features: length 3*n_components list of features
F.feature_coeffs: [N, 3*n_components] array of feature coefficients for each sample
F.feature_labels: length 3*n_components list of feature labels
'''
self._raw_data = data
self.n_components = n_components
self._preprocessed = False
self._estimators_estimated = False
self._features_featurized = False
def fit(self):
# Flatten and Normalise data into contiguous array
print("Preprocessing data . . .")
self.preprocessData()
# Fit estimators to data
print("Fitting estimators . . .")
self.getEstimators()
# Calculate features
print("Calculating features . . .")
self.getFeatures()
print("Done!")
def preprocessData(self):
# Stack into contiguous array
data = np.stack(self._raw_data, axis=0)
# Flatten
self._raw_data_shape = data.shape
data = data.reshape(self._raw_data_shape[0], -1)
# Zero-mean and unit-variance rescaling
self._scaler = StandardScaler()
self._scaler.fit(data)
self.data = self._scaler.transform(data)
self._preprocessed = True
def getEstimators(self):
'''
Makes list of ('name', estimator) pairs for PCA, ICA, FA
and fits estimatorsto data
'''
if not self._preprocessed:
raise ValueError("Data must be preprocessed and estimators constructed")
self._estimators = [
('PCA',
decomposition.PCA(n_components=self.n_components, svd_solver='randomized',
whiten=True)),
('FastICA',
decomposition.FastICA(n_components=self.n_components, whiten=True)),
('FactorAnalysis',
decomposition.FactorAnalysis(n_components=self.n_components, max_iter=20))
]
for name, estimator in self._estimators:
print("Calculating %d features using %s..." % (self.n_components, name))
t0 = time()
estimator.fit(self.data)
train_time = (time() - t0)
print("\tTime taken = %0.3fs" % train_time)
self._estimators_estimated = True
def getFeatures(self):
'''
Calculates coefficients of data with respect to each estimator
'''
if not self._estimators_estimated:
raise ValueError("Estimators must be fitted to data firts")
#self._coeffs = {}
features = []
feature_coeffs = []
feature_labels = []
for name, estimator in self._estimators:
features.append(estimator.components_.reshape(self.n_components,*self._raw_data_shape[1:]))
coeffs = estimator.transform(self.data)
#coeffs = np.matmul(estimator.components_, self.data.T).T
feature_coeffs.append(coeffs)
labels = []
for i in range(self.n_components):
labels.append("{} {}".format(name, i))
feature_labels.append(labels)
self.features = list(itertools.chain.from_iterable(features))
self.feature_coeffs = np.concatenate(feature_coeffs, axis=1)
self.feature_labels = list(itertools.chain.from_iterable(feature_labels))
self._features_featurized = True
def plot2DComponents(self, n_col = 3, cmap=plt.cm.gray):
'''
Makes a figure showing the components identified by each estimator
Note that this will not work for non-image data
n_col: number of columns in each plotted figure
cmap: colormap to use for plotted 2D components
'''
# check that we're using 3D data
if len(self._raw_data_shape)!=3:
raise ValueError("Cannot plot 2D components for non-2D data")
# Check that we've actually calculated features
if not self._estimators_estimated:
raise ValueError("Estimators need to be fitted to data before plotting")
n_row = int(np.ceil(self.n_components/n_col))
image_shape = (self._raw_data_shape[1], self._raw_data_shape[2])
for name, estimator in self._estimators:
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(name, size=16)
for i, comp in enumerate(estimator.components_):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=cmap,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
class FeatureEnsembler():
def __init__(self, data, transforms, n_components=9):
'''
Constructs an aggregated set of features of (data) by applying the transforms
in (transforms)
data: should be a length N list of numpy arrays of identical size
transforms: should be a length P list of ImageTransform objects
'''
self.data = data
self.transforms = transforms
self.n_components = n_components
def fit(self):
self.getAllFeatures()
def getAllFeatures(self):
features = []
feature_coeffs = []
feature_labels = []
for i, transform in enumerate(self.transforms):
data_tf = transform.apply(self.data)
F = Featurizer(data_tf, n_components=self.n_components)
F.fit()
features.append(F.features)
feature_coeffs.append(F.feature_coeffs)
feature_labels.append([transform.name+": "+x for x in F.feature_labels])
self.features = list(itertools.chain.from_iterable(features))
self.feature_coeffs = np.concatenate(feature_coeffs, axis=1)
self.feature_labels = list(itertools.chain.from_iterable(feature_labels)) | [
"numpy.stack",
"matplotlib.pyplot.subplot",
"sklearn.decomposition.FastICA",
"sklearn.preprocessing.StandardScaler",
"numpy.ceil",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.yticks",
"time.time",
"matplotlib.pyplot.figure",
"sklearn.decomposition.FactorAnalysis",
"sklearn.decomposition.PCA... | [((1570, 1602), 'numpy.stack', 'np.stack', (['self._raw_data'], {'axis': '(0)'}), '(self._raw_data, axis=0)\n', (1578, 1602), True, 'import numpy as np\n'), ((1809, 1825), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1823, 1825), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4041, 4079), 'numpy.concatenate', 'np.concatenate', (['feature_coeffs'], {'axis': '(1)'}), '(feature_coeffs, axis=1)\n', (4055, 4079), True, 'import numpy as np\n'), ((6888, 6926), 'numpy.concatenate', 'np.concatenate', (['feature_coeffs'], {'axis': '(1)'}), '(feature_coeffs, axis=1)\n', (6902, 6926), True, 'import numpy as np\n'), ((2832, 2838), 'time.time', 'time', ([], {}), '()\n', (2836, 2838), False, 'from time import time\n'), ((3970, 4009), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['features'], {}), '(features)\n', (3999, 4009), False, 'import itertools\n'), ((4115, 4160), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['feature_labels'], {}), '(feature_labels)\n', (4144, 4160), False, 'import itertools\n'), ((4958, 4992), 'numpy.ceil', 'np.ceil', (['(self.n_components / n_col)'], {}), '(self.n_components / n_col)\n', (4965, 4992), True, 'import numpy as np\n'), ((5135, 5182), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.0 * n_col, 2.26 * n_row)'}), '(figsize=(2.0 * n_col, 2.26 * n_row))\n', (5145, 5182), True, 'from matplotlib import pyplot as plt\n'), ((5194, 5221), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['name'], {'size': '(16)'}), '(name, size=16)\n', (5206, 5221), True, 'from matplotlib import pyplot as plt\n'), ((5625, 5679), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', (['(0.01)', '(0.05)', '(0.99)', '(0.93)', '(0.04)', '(0.0)'], {}), '(0.01, 0.05, 0.99, 0.93, 0.04, 0.0)\n', (5644, 5679), True, 'from matplotlib import pyplot as plt\n'), ((6817, 6856), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['features'], {}), '(features)\n', (6846, 6856), False, 'import itertools\n'), ((6962, 7007), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['feature_labels'], {}), '(feature_labels)\n', (6991, 7007), False, 'import itertools\n'), ((2311, 2402), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'self.n_components', 'svd_solver': '"""randomized"""', 'whiten': '(True)'}), "(n_components=self.n_components, svd_solver='randomized',\n whiten=True)\n", (2328, 2402), False, 'from sklearn import decomposition\n'), ((2469, 2535), 'sklearn.decomposition.FastICA', 'decomposition.FastICA', ([], {'n_components': 'self.n_components', 'whiten': '(True)'}), '(n_components=self.n_components, whiten=True)\n', (2490, 2535), False, 'from sklearn import decomposition\n'), ((2583, 2656), 'sklearn.decomposition.FactorAnalysis', 'decomposition.FactorAnalysis', ([], {'n_components': 'self.n_components', 'max_iter': '(20)'}), '(n_components=self.n_components, max_iter=20)\n', (2611, 2656), False, 'from sklearn import decomposition\n'), ((2902, 2908), 'time.time', 'time', ([], {}), '()\n', (2906, 2908), False, 'from time import time\n'), ((5299, 5331), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', 'n_col', '(i + 1)'], {}), '(n_row, n_col, i + 1)\n', (5310, 5331), True, 'from matplotlib import pyplot as plt\n'), ((5567, 5581), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (5577, 5581), True, 'from matplotlib import pyplot as plt\n'), ((5598, 5612), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (5608, 5612), True, 'from matplotlib import pyplot as plt\n')] |
import cv2
import numpy as np
import tensorflow as tf
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
from keras.models import Sequential
from keras_preprocessing.image import ImageDataGenerator
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
cv2.imshow('out3',x_train[4])
cv2.waitKey(0)
x_train = np.array([cv2.morphologyEx(x, cv2.MORPH_DILATE, np.ones((2, 2)), iterations=1) for x in x_train])
x_test = np.array([cv2.morphologyEx(x, cv2.MORPH_DILATE, np.ones((2, 2)), iterations=1) for x in x_test])
cv2.imshow('out3',x_train[4])
cv2.waitKey(0)
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
image_gen = ImageDataGenerator(
rotation_range=15,
width_shift_range=.25,
height_shift_range=.2, )
# training the image preprocessing
image_gen.fit(x_train, augment=True)
image_gen.fit(x_test, augment=True)
x_train, x_test = x_train / 255.0, x_test / 255.0
def makeModel():
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation=tf.nn.relu))
model.add(Dropout(0.4))
model.add(Dense(10, activation=tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
model = makeModel()
model.summary()
batch_size = 64
model.fit_generator(image_gen.flow(x_train, y_train, batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=50,
verbose=1, validation_data=(x_test, y_test))
model.save('nn-model/mnist_model')
model.evaluate(x_test, y_test, verbose=10)
| [
"cv2.waitKey",
"keras.layers.Dropout",
"keras.layers.Flatten",
"numpy.ones",
"keras_preprocessing.image.ImageDataGenerator",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential",
"cv2.imshow",
"keras.layers.MaxPooling2D"
] | [((311, 341), 'cv2.imshow', 'cv2.imshow', (['"""out3"""', 'x_train[4]'], {}), "('out3', x_train[4])\n", (321, 341), False, 'import cv2\n'), ((341, 355), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (352, 355), False, 'import cv2\n'), ((572, 602), 'cv2.imshow', 'cv2.imshow', (['"""out3"""', 'x_train[4]'], {}), "('out3', x_train[4])\n", (582, 602), False, 'import cv2\n'), ((602, 616), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (613, 616), False, 'import cv2\n'), ((835, 924), 'keras_preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(15)', 'width_shift_range': '(0.25)', 'height_shift_range': '(0.2)'}), '(rotation_range=15, width_shift_range=0.25,\n height_shift_range=0.2)\n', (853, 924), False, 'from keras_preprocessing.image import ImageDataGenerator\n'), ((1124, 1136), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1134, 1136), False, 'from keras.models import Sequential\n'), ((1151, 1222), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'input_shape': '(28, 28, 1)'}), "(64, kernel_size=(3, 3), padding='same', input_shape=(28, 28, 1))\n", (1157, 1222), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((1238, 1268), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1250, 1268), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((1284, 1330), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(64, kernel_size=(3, 3), padding='same')\n", (1290, 1330), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((1346, 1376), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1358, 1376), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((1392, 1401), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1399, 1401), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((1417, 1450), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': 'tf.nn.relu'}), '(256, activation=tf.nn.relu)\n', (1422, 1450), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((1466, 1478), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (1473, 1478), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((1494, 1529), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), '(10, activation=tf.nn.softmax)\n', (1499, 1529), False, 'from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\n'), ((415, 430), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (422, 430), True, 'import numpy as np\n'), ((522, 537), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (529, 537), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import logging
import numpy as np
import torch as th
import torch.nn as nn
from leibniz.nn.conv import DepthwiseSeparableConv1d, DepthwiseSeparableConv2d, DepthwiseSeparableConv3d
from leibniz.nn.layer.hyperbolic import Bottleneck
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class ResNetZ(nn.Module):
def __init__(self, in_channels, out_channels, layers=4, ratio=1, spatial=(256, 256),
conv_class=None, relu=None, normalizor=None):
super().__init__()
spatial = np.array(spatial, dtype=np.int)
dim = len(spatial)
self.dim = dim
self.ratio = np.power(2, ratio)
self.layers = layers
self.in_channels = int(in_channels)
self.num_filters = int(in_channels * self.ratio)
self.out_channels = int(out_channels)
self.spatial = [np.array(spatial, dtype=np.int)]
logger.info('---------------------------------------')
logger.info('dim: %f', self.dim)
logger.info('ratio: %f', self.ratio)
logger.info('layers: %f', self.layers)
logger.info('in_channels: %f', self.in_channels)
logger.info('out_channels: %f', self.out_channels)
logger.info('num_filters: %f', self.num_filters)
logger.info('normalizor: %s', normalizor)
logger.info('---------------------------------------')
if dim == 1:
self.bn = nn.BatchNorm1d(self.num_filters, affine=True)
elif dim == 2:
self.bn = nn.BatchNorm2d(self.num_filters, affine=True)
elif dim == 3:
self.bn = nn.BatchNorm3d(self.num_filters, affine=True)
if conv_class is None:
self.conv_class = self.get_conv_class()
else:
self.conv_class = conv_class
if relu is None:
self.relu = nn.ReLU(inplace=True)
else:
self.relu = relu
if normalizor == 'relu6':
self.normalizor = nn.ReLU6()
self.scale = 1.0 / 6.0
self.bias = 0.0
elif normalizor == 'sigmoid':
self.normalizor = nn.Sigmoid()
self.scale = 1.0
self.bias = 0.0
elif normalizor == 'tanh':
self.normalizor = nn.Tanh()
self.scale = 1.0
self.bias = 0.0
elif normalizor == 'softmax':
self.normalizor = nn.Softmax()
self.scale = 1.0
self.bias = 0.0
else:
self.normalizor = None
self.scale = 1.0
self.bias = 0.0
self.iconv = self.conv_class(self.in_channels, self.num_filters, kernel_size=7, padding=3, groups=1)
self.oconv = self.conv_class(self.num_filters, self.out_channels, kernel_size=3, padding=1, groups=1, bias=False)
step_length = 1.0 / self.layers
self.order1 = Bottleneck(self.num_filters, 2 * self.num_filters, step_length, self.relu, self.conv_class, reduction=16)
self.order2 = Bottleneck(4 * self.num_filters + 1, 2 * self.num_filters, step_length, self.relu, self.conv_class, reduction=16)
self.order3 = Bottleneck(7 * self.num_filters + 1, 2 * self.num_filters, step_length, self.relu, self.conv_class, reduction=16)
def get_conv_class(self):
if self.dim == 1:
conv = DepthwiseSeparableConv1d
elif self.dim == 2:
conv = DepthwiseSeparableConv2d
elif self.dim == 3:
conv = DepthwiseSeparableConv3d
else:
raise ValueError('dim %d is not supported!' % self.dim)
return conv
def forward(self, x):
x0 = self.bn(self.iconv(x))
rslt = self.order1(x0)
velo = rslt[:, :self.num_filters]
theta = rslt[:, self.num_filters:]
du0 = velo * th.cos(theta)
dv0 = velo * th.sin(theta)
for _ in range(self.layers):
x1 = x0 * (1 + dv0 / self.layers) + du0 / self.layers
x1 = self.relu(x1)
dd = self.order2(th.cat([x0, x1, du0, dv0, th.ones_like(x0[:, 0:1]) * _ / self.layers], dim=1))
du1 = dd[:, self.num_filters * 0:self.num_filters * 1]
dv1 = dd[:, self.num_filters * 1:self.num_filters * 2]
x2 = x1 * (1 + dv1 / self.layers) + du1 / self.layers
x2 = self.relu(x2)
dd = self.order3(th.cat([x0, x1, x2, du0, dv0, du1, dv1, th.ones_like(x1[:, 0:1]) * _ / self.layers], dim=1))
du2 = dd[:, self.num_filters * 0:self.num_filters * 1]
dv2 = dd[:, self.num_filters * 1:self.num_filters * 2]
x3 = x2 * (1 + dv2 / self.layers) + du2 / self.layers
x3 = self.relu(x3)
out = self.oconv(x3)
if self.normalizor:
return self.normalizor(out) * self.scale + self.bias
else:
return out * self.scale + self.bias
| [
"torch.ones_like",
"torch.nn.BatchNorm3d",
"torch.nn.ReLU",
"torch.nn.ReLU6",
"torch.nn.Tanh",
"numpy.power",
"torch.nn.BatchNorm1d",
"torch.cos",
"leibniz.nn.layer.hyperbolic.Bottleneck",
"torch.nn.BatchNorm2d",
"numpy.array",
"torch.nn.Softmax",
"torch.sin",
"logging.getLogger",
"torch... | [((268, 287), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (285, 287), False, 'import logging\n'), ((545, 576), 'numpy.array', 'np.array', (['spatial'], {'dtype': 'np.int'}), '(spatial, dtype=np.int)\n', (553, 576), True, 'import numpy as np\n'), ((648, 666), 'numpy.power', 'np.power', (['(2)', 'ratio'], {}), '(2, ratio)\n', (656, 666), True, 'import numpy as np\n'), ((2856, 2965), 'leibniz.nn.layer.hyperbolic.Bottleneck', 'Bottleneck', (['self.num_filters', '(2 * self.num_filters)', 'step_length', 'self.relu', 'self.conv_class'], {'reduction': '(16)'}), '(self.num_filters, 2 * self.num_filters, step_length, self.relu,\n self.conv_class, reduction=16)\n', (2866, 2965), False, 'from leibniz.nn.layer.hyperbolic import Bottleneck\n'), ((2984, 3101), 'leibniz.nn.layer.hyperbolic.Bottleneck', 'Bottleneck', (['(4 * self.num_filters + 1)', '(2 * self.num_filters)', 'step_length', 'self.relu', 'self.conv_class'], {'reduction': '(16)'}), '(4 * self.num_filters + 1, 2 * self.num_filters, step_length,\n self.relu, self.conv_class, reduction=16)\n', (2994, 3101), False, 'from leibniz.nn.layer.hyperbolic import Bottleneck\n'), ((3120, 3237), 'leibniz.nn.layer.hyperbolic.Bottleneck', 'Bottleneck', (['(7 * self.num_filters + 1)', '(2 * self.num_filters)', 'step_length', 'self.relu', 'self.conv_class'], {'reduction': '(16)'}), '(7 * self.num_filters + 1, 2 * self.num_filters, step_length,\n self.relu, self.conv_class, reduction=16)\n', (3130, 3237), False, 'from leibniz.nn.layer.hyperbolic import Bottleneck\n'), ((867, 898), 'numpy.array', 'np.array', (['spatial'], {'dtype': 'np.int'}), '(spatial, dtype=np.int)\n', (875, 898), True, 'import numpy as np\n'), ((1427, 1472), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.num_filters'], {'affine': '(True)'}), '(self.num_filters, affine=True)\n', (1441, 1472), True, 'import torch.nn as nn\n'), ((1843, 1864), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1850, 1864), True, 'import torch.nn as nn\n'), ((1973, 1983), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {}), '()\n', (1981, 1983), True, 'import torch.nn as nn\n'), ((3781, 3794), 'torch.cos', 'th.cos', (['theta'], {}), '(theta)\n', (3787, 3794), True, 'import torch as th\n'), ((3816, 3829), 'torch.sin', 'th.sin', (['theta'], {}), '(theta)\n', (3822, 3829), True, 'import torch as th\n'), ((1518, 1563), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.num_filters'], {'affine': '(True)'}), '(self.num_filters, affine=True)\n', (1532, 1563), True, 'import torch.nn as nn\n'), ((2115, 2127), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2125, 2127), True, 'import torch.nn as nn\n'), ((1609, 1654), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['self.num_filters'], {'affine': '(True)'}), '(self.num_filters, affine=True)\n', (1623, 1654), True, 'import torch.nn as nn\n'), ((2250, 2259), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2257, 2259), True, 'import torch.nn as nn\n'), ((2385, 2397), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (2395, 2397), True, 'import torch.nn as nn\n'), ((4021, 4045), 'torch.ones_like', 'th.ones_like', (['x0[:, 0:1]'], {}), '(x0[:, 0:1])\n', (4033, 4045), True, 'import torch as th\n'), ((4376, 4400), 'torch.ones_like', 'th.ones_like', (['x1[:, 0:1]'], {}), '(x1[:, 0:1])\n', (4388, 4400), True, 'import torch as th\n')] |
import json
import pickle
import numpy as np
from bedrock_client.bedrock.model import BaseModel
from typing import Any, AnyStr, BinaryIO, List, Mapping, Optional, Union
# Ordered list of model features
FEATURES = [
'LIMIT_BAL',
'SEX',
'EDUCATION',
'MARRIAGE',
'AGE',
'PAY_1',
'PAY_2',
'PAY_3',
'PAY_4',
'PAY_5',
'PAY_6',
'BILL_AMT1',
'BILL_AMT2',
'BILL_AMT3',
'BILL_AMT4',
'BILL_AMT5',
'BILL_AMT6',
'PAY_AMT1',
'PAY_AMT2',
'PAY_AMT3',
'PAY_AMT4',
'PAY_AMT5',
'PAY_AMT6'
]
class Model(BaseModel):
def __init__(self):
with open("/artefact/model.pkl", "rb") as f:
self.model = pickle.load(f)
def predict(self, features: List[List[float]]) -> List[float]:
return self.model.predict_proba(features)[:, 0].tolist()
# Optional - Pre-process
def pre_process(
self, http_body: AnyStr, files: Optional[Mapping[str, BinaryIO]] = None
) -> List[List[float]]:
# Prepare JSON HTTP body
samples = json.loads(http_body)
# Parse JSON into ordered list
features = list()
for col in FEATURES:
features.append(samples[col])
# Reshape into [[<feat1>, <feat2>, ...]]
return np.array(features).reshape(1, -1)
# Optional - Post-process
def post_process(
self, score: Union[List[float], List[Mapping[str, float]]], prediction_id: str
) -> Union[AnyStr, Mapping[str, Any]]:
return {"result": score, "prediction_id": prediction_id} | [
"pickle.load",
"numpy.array",
"json.loads"
] | [((1071, 1092), 'json.loads', 'json.loads', (['http_body'], {}), '(http_body)\n', (1081, 1092), False, 'import json\n'), ((696, 710), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (707, 710), False, 'import pickle\n'), ((1295, 1313), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1303, 1313), True, 'import numpy as np\n')] |
######################### perform optimization ##############################
import torch
from scipy.optimize import minimize
import numpy as np
import time
import sys
import os
sys.path.append(os.path.abspath("../IO"))
from import_export_vtk import export_momenta
from keops_utils import TestCuda
import pickle
params_opt=dict({"lr" : 1,"maxcor" : 10, "gtol" : 1e-3, "tol" : 1e-3, "use_scipy" : True, "method" : 'SLSQP'})
use_cuda,torchdeviceId,torchdtype,KeOpsdeviceId,KeOpsdtype,KernelMethod = TestCuda()
def opt(loss,p0,q0, maxiter = 100, folder2save = '',savename = ''):
"""
Optimization function calling either scipy or torch method.
p0 is the variable to optimize, and can either be the initial momenta or a quaternion depending on the deformation one want to implement.
"""
lr = params_opt["lr"]
maxcor = params_opt["maxcor"]
gtol = params_opt["gtol"]
tol = params_opt["tol"]
use_scipy = params_opt["use_scipy"] #If use_scipy : perform otpimization with LBFGS on scipy.
method = params_opt["method"]
options = dict( maxiter = maxiter,
ftol = tol,
gtol = gtol,
maxcor = maxcor # Number of previous gradients used to approximate the Hessian
)
loss_dict = {}
loss_dict['A'] = [0]
loss_dict['E'] = [0]
optimizer = torch.optim.LBFGS([p0], line_search_fn='strong_wolfe')
start = time.time()
print('performing optimization...')
opt.nit = -1
def closure():
opt.nit += 1; it = opt.nit
optimizer.zero_grad()
gamma,E,A = loss(p0,q0)
L = gamma*E+A
L.backward(retain_graph=True) #ATTENTION, CHANGE POUR ENCHAINER <NAME>, SINON ENLEVER RETAIN GRAPH !!!
print("Iteration ",it)
if(folder2save != ''):
if(opt.nit % 5 == 0):
loss_dict['A'].append(float(A.detach().cpu().numpy()))
loss_dict['E'].append(float(E.detach().cpu().numpy()))
return L
# Optimisation using scipy : we need to transfer the data from variable to float64
def numpy_closure(vec):
vec = lr*vec.astype('float64')
numpy_to_model(p0,vec)
c = closure().data.view(-1).cpu().numpy()[0]
dvec = model_to_numpy(p0,grad = True)
return (c,dvec)
def model_to_numpy(p, grad=False) :
if grad :
tensors = p.grad.data.view(-1).cpu().numpy()
else :
tensors = p.data.view(-1).cpu().numpy()
return np.ascontiguousarray( np.hstack(tensors) , dtype='float64' )
def numpy_to_model(p, vec) :
p.data = torch.from_numpy(vec).view(p.data.size()).type(p.data.type())
if use_scipy :
res = minimize( numpy_closure, # function to minimize
model_to_numpy(p0), # starting estimate
method = method,
jac = True, # matching_problems also returns the gradient
options = options)
print(res.message)
else :
for i in range(int(maxiter/20)+1): # Fixed number of iterations
optimizer.step(closure) # "Gradient descent" step.
total_time = round(time.time()-start,2)
print('Optimization time : ',total_time,' seconds')
if(folder2save != ''):
try:
os.mkdir(folder2save)
except OSError:
pass
loss_dict['Time'] = total_time
loss_dict['it'] = opt.nit
with open(folder2save+'/dict_'+savename+'.pkl','wb') as f:
pickle.dump(loss_dict,f)
return (p0,opt.nit,total_time)
def multiscale_opt(loss,p0,q0, maxiter = 100,folder2save = '',savename = ''):
lr = params_opt["lr"]
maxcor = params_opt["maxcor"]
gtol = params_opt["gtol"]
tol = params_opt["tol"]
use_scipy = params_opt["use_scipy"] #If use_scipy : perform otpimization with LBFGS on scipy.
method = params_opt["method"]
options = dict( maxiter = maxiter,
ftol = tol,
gtol = gtol,
maxcor = maxcor # Number of previous gradients used to approximate the Hessian
)
loss_dict = {}
loss_dict['A'] = []
loss_dict['E'] = []
loss_dict['E0'] = []
loss_dict['E1'] = []
loss_dict['E2'] = []
loss_dict['E3'] = []
optimizer = torch.optim.LBFGS([p0], line_search_fn='strong_wolfe')
start = time.time()
print('performing optimization...')
opt.nit = -1
def closure():
opt.nit += 1; it = opt.nit
optimizer.zero_grad()
E_list,E,A = loss(p0,q0)
L = E+A
L.backward(retain_graph=True) #ATTENTION, CHANGE POUR ENCHAINER APRES RIGIDE, SINON ENLEVER RETAIN GRAPH !!!
print("Iteration ",it)
print('E : ', E, " A : ", A)
if(folder2save != ''):
if(opt.nit % 5 == 0):
loss_dict['A'].append(float(A.detach().cpu().numpy()))
loss_dict['E'].append(float(E.detach().cpu().numpy()))
for i,E_i in enumerate(E_list):
loss_dict['E'+str(i)].append(float(E_i.detach().cpu().numpy()))
return L
# Optimisation using scipy : we need to transfer the data from variable to float64
def numpy_closure(vec):
vec = lr*vec.astype('float64')
numpy_to_model(p0,vec)
c = closure().data.view(-1).cpu().numpy()[0]
dvec = model_to_numpy(p0,grad = True)
return (c,dvec)
def model_to_numpy(p, grad=False) :
if grad :
tensors = p.grad.data.view(-1).cpu().numpy()
else :
tensors = p.data.view(-1).cpu().numpy()
return np.ascontiguousarray( np.hstack(tensors) , dtype='float64' )
def numpy_to_model(p, vec) :
p.data = torch.from_numpy(vec).view(p.data.size()).type(p.data.type())
#pdb.set_trace()
#print(p0)
if use_scipy :
res = minimize( numpy_closure, # function to minimize
model_to_numpy(p0), # starting estimate
method = method,
jac = True, # matching_problems also returns the gradient
options = options)
print(res.message)
else :
for i in range(int(maxiter/20)+1): # Fixed number of iterations
optimizer.step(closure) # "Gradient descent" step.
total_time = round(time.time()-start,2)
print('Optimization time : ',total_time,' seconds')
if(folder2save != ''):
try:
os.mkdir(folder2save)
except OSError:
pass
with open(folder2save+'/dict_'+savename+'.pkl','wb') as f:
pickle.dump(loss_dict,f)
return (p0,opt.nit,total_time)
def template_opt(loss,P0,template, maxiter = 100):
"""
Here P0 is the list of initial moments.
Template is also a variable.
"""
lr = params_opt["lr"]
maxcor = params_opt["maxcor"]
gtol = params_opt["gtol"]
tol = params_opt["tol"]
use_scipy = params_opt["use_scipy"] #If use_scipy : perform otpimization with LBFGS on scipy.
method = params_opt["method"]
options = dict( maxiter = maxiter,
ftol = tol,
gtol = gtol,
maxcor = maxcor # Number of previous gradients used to approximate the Hessian
)
Variables = []
for k,tensor in enumerate(P0):
Variables+=[tensor]
Variables+=[template]
optimizer = torch.optim.LBFGS(Variables,max_eval=maxiter,lr=lr, line_search_fn='strong_wolfe')
start = time.time()
print('performing optimization...')
opt.nit = -1
def closure():
opt.nit += 1; it = opt.nit
optimizer.zero_grad()
L = loss(P0,template)
L.backward(retain_graph=True)
print("Iteration ",it,", Cost = ", L.data.view(-1).cpu().numpy()[0])
return L
# Optpimisation using scipy : we need to transfer the data from variable to float64
def numpy_closure(vec):
vec = lr*vec.astype('float64')
numpy_to_model(Variables,vec)
c = closure().data.view(-1).cpu().numpy()[0]
dvec = model_to_numpy(Variables,grad = True)
return (c,dvec)
def model_to_numpy(Variables, grad=False) :
if grad :
tensors = [var.grad.data.view(-1).cpu().numpy() for var in Variables]
np.stack(tensors,axis=0)
else :
tensors = [var.data.view(-1).cpu().numpy() for var in Variables]
np.stack(tensors,axis=0)
tensor = np.ascontiguousarray( np.hstack((tensors)) , dtype='float64' )
return tensor
def numpy_to_model(torch_obj_list, np_obj) :
""" Take the numpy 1d vector of parameters and reshape it into the different tensors (moment+template) """
n_tensors = len(torch_obj_list)
len_obj = np_obj.shape[0]/n_tensors
assert len_obj==int(len_obj),'The numpy object size is no multiple of the number of tensors'
len_obj=int(len_obj)
for k,tensor in enumerate(torch_obj_list):
tensor.data = torch.from_numpy(np_obj[k*len_obj:(k+1)*len_obj]).view(tensor.data.size()).type(tensor.data.type())
#pdb.set_trace()
#print(p0)
if use_scipy :
res = minimize( numpy_closure, # function to minimize
model_to_numpy(Variables), # starting estimate
method = method,
jac = True, # matching_problems also returns the gradient
options = options )
print(res.message)
else :
for i in range(int(maxiter/20)+1): # Fixed number of iterations
optimizer.step(closure) # "Gradient descent" step.
total_time = round(time.time()-start,2)
print('Optimization time : ',total_time,' seconds')
#if use_scipy:
#numpy_to_model(p0,res.x)
#print(p0)
return (Variables[:-1],Variables[-1],opt.nit,total_time)
def flow_opt(loss,x0,p0,q0, maxiter = 100,folder2save = '',savename = ''):
lr = params_opt["lr"]
maxcor = params_opt["maxcor"]
gtol = params_opt["gtol"]
tol = params_opt["tol"]
use_scipy = params_opt["use_scipy"] #If use_scipy : perform otpimization with LBFGS on scipy.
method = params_opt["method"]
options = dict( maxiter = maxiter,
ftol = tol,
gtol = gtol,
maxcor = maxcor # Number of previous gradients used to approximate the Hessian
)
optimizer = torch.optim.LBFGS([p0], line_search_fn='strong_wolfe')
start = time.time()
print('performing optimization...')
opt.nit = -1
def closure():
opt.nit += 1; it = opt.nit
optimizer.zero_grad()
L = loss(x0,p0,q0)
L.backward(retain_graph=True) #ATTENTION, CHANGE POUR ENCHAINER APRES RIGIDE, SINON ENLEVER RETAIN GRAPH !!!
if(folder2save != ''):
if(it==10 or it==50 or it==100 or it==500):
temp = q0.detach().cpu().numpy()
p0_np = p0.detach().cpu().numpy()
export_momenta(temp, p0_np, 'Iter_'+str(it)+'_Momenta_'+savename, folder2save)
return L
# Optimisation using scipy : we need to transfer the data from variable to float64
def numpy_closure(vec):
vec = lr*vec.astype('float64')
numpy_to_model(p0,vec)
c = closure().data.view(-1).cpu().numpy()[0]
dvec = model_to_numpy(p0,grad = True)
return (c,dvec)
def model_to_numpy(p, grad=False) :
if grad :
tensors = p.grad.data.view(-1).cpu().numpy()
else :
tensors = p.data.view(-1).cpu().numpy()
return np.ascontiguousarray( np.hstack(tensors) , dtype='float64' )
def numpy_to_model(p, vec) :
p.data = torch.from_numpy(vec).view(p.data.size()).type(p.data.type())
if use_scipy :
res = minimize( numpy_closure, # function to minimize
model_to_numpy(p0), # starting estimate
method = method,
jac = True, # matching_problems also returns the gradient
options = options)
print(res.message)
else :
for i in range(int(maxiter/20)+1): # Fixed number of iterations
optimizer.step(closure) # "Gradient descent" step.
total_time = round(time.time()-start,2)
print('Optimization time : ',total_time,' seconds')
return (p0,opt.nit,total_time)
def rigid_lddmm_opt(loss, quat0, p0, q0, maxiter = 100,folder2save = '',savename = ''):
lr = params_opt["lr"]
maxcor = params_opt["maxcor"]
gtol = params_opt["gtol"]
tol = params_opt["tol"]
use_scipy = params_opt["use_scipy"] #If use_scipy : perform otpimization with LBFGS on scipy.
method = params_opt["method"]
options = dict( maxiter = maxiter,
ftol = tol,
gtol = gtol,
maxcor = maxcor # Number of previous gradients used to approximate the Hessian
)
optimizer = torch.optim.LBFGS([p0,quat0],max_eval=maxiter,lr=lr, line_search_fn='strong_wolfe')
start = time.time()
print('performing optimization...')
opt.nit = -1
loss_dict = {}
loss_dict['A'] = [0]
loss_dict['E'] = [0]
loss_dict['E100'] = [0]
loss_dict['E50'] = [0]
loss_dict['E25'] = [0]
loss_dict['E12'] = [0]
def closure():
opt.nit += 1; it = opt.nit
optimizer.zero_grad()
(gamma,E100,E50,E25,E12,A,rotation_cost) = loss(quat0,p0,q0)
E = E100+4.*E50+16.*E25+64.*E12
L = gamma*E+A+0.0001*rotation_cost
L.backward(retain_graph=True) #
print("Iteration ",it,", Cost = ", L.data.view(-1).cpu().numpy()[0])
#print('Grad : ',quat0.grad)
#print('QUAT0 : ', quat0)
if(folder2save != ''):
if(opt.nit % 5 == 0):
loss_dict['A'].append(float(A.detach().cpu().numpy()))
loss_dict['E'].append(float(E.detach().cpu().numpy()))
loss_dict['E100'].append(float(E100.detach().cpu().numpy()))
loss_dict['E50'].append(float(E50.detach().cpu().numpy()))
loss_dict['E25'].append(float(E25.detach().cpu().numpy()))
loss_dict['E12'].append(float(E12.detach().cpu().numpy()))
return L
# Optpimisation using scipy : we need to transfer the data from variable to float64
def numpy_closure(vec):
vec = lr*vec.astype('float64')
numpy_to_model(quat0,vec[-7:])
numpy_to_model(p0,vec[:-7].astype('float64'))
c = closure().data.view(-1).cpu().numpy()[0]
return (c,dvec)
def model_to_numpy(p,quat, grad=False) :
if grad :
tensors = quat.grad.data.view(-1).cpu().numpy()
p_tensors = p.grad.data.view(-1).cpu().numpy()
else :
tensors = quat.data.view(-1).cpu().numpy()
p_tensors = p.data.view(-1).cpu().numpy()
tensor = np.ascontiguousarray( np.hstack((p_tensors,tensors)) , dtype='float64' )
return tensor
def numpy_to_model(torch_obj, np_obj) :
torch_obj.data = torch.from_numpy(np_obj).view(torch_obj.data.size()).type(torch_obj.data.type())
#pdb.set_trace()
if use_scipy :
res = minimize( numpy_closure, # function to minimize
model_to_numpy(p0,quat0), # starting estimate
method = method,
jac = True, # matching_problems also returns the gradient
options = options )
print(res.message)
else :
for i in range(int(maxiter/20)+1): # Fixed number of iterations
optimizer.step(closure) # "Gradient descent" step.
total_time = round(time.time()-start,2)
print('Optimization time : ',total_time,' seconds')
if(folder2save != ''):
try:
os.mkdir(folder2save)
except OSError:
pass
with open(folder2save+'/dict_'+savename+'.pkl','wb') as f:
pickle.dump(loss_dict,f)
return (quat0,p0,opt.nit,total_time)
def rigid_opt(loss, quat0, q0, maxiter = 100,folder2save = '',savename = ''):
lr = params_opt["lr"]
maxcor = params_opt["maxcor"]
gtol = params_opt["gtol"]
tol = params_opt["tol"]
use_scipy = params_opt["use_scipy"] #If use_scipy : perform otpimization with LBFGS on scipy.
method = params_opt["method"]
options = dict( maxiter = maxiter,
ftol = tol,
gtol = gtol,
maxcor = maxcor # Number of previous gradients used to approximate the Hessian
)
optimizer = torch.optim.LBFGS([quat0], max_eval=20, lr=lr, line_search_fn='strong_wolfe')
start = time.time()
print('performing optimization...')
opt.nit = -1
loss_dict = {}
loss_dict['L'] = [0]
def closure():
opt.nit += 1; it = opt.nit
optimizer.zero_grad()
L = loss(quat0, q0)
L.backward(retain_graph=True) #
print("Iteration ",it,", Cost = ", L.data.view(-1).cpu().numpy()[0])
if(folder2save != ''):
if(opt.nit % 5 == 0):
loss_dict['L'].append(float(L.detach().cpu().numpy()))
return L
# Optpimisation using scipy : we need to transfer the data from variable to float64
def numpy_closure(vec):
vec = lr*vec.astype('float64')
numpy_to_model(quat0,vec)
c = closure().data.view(-1).cpu().numpy()[0]
dvec = model_to_numpy(quat0,grad = True)
return (c,dvec)
def model_to_numpy(p, grad=False) :
if grad :
tensors = p.grad.data.view(-1).cpu().numpy()
else :
tensors = p.data.view(-1).cpu().numpy()
return np.ascontiguousarray( np.hstack(tensors) , dtype='float64' )
def numpy_to_model(torch_obj, np_obj) :
torch_obj.data = torch.from_numpy(np_obj).view(torch_obj.data.size()).type(torch_obj.data.type())
#pdb.set_trace()
if use_scipy :
res = minimize( numpy_closure, # function to minimize
model_to_numpy(quat0), # starting estimate
method = method,
jac = True, # matching_problems also returns the gradient
options = options )
print(res.message)
else :
for i in range(int(maxiter)): # Fixed number of iterations
optimizer.step(closure) # "Gradient descent" step.
total_time = round(time.time()-start,2)
print('Optimization time : ',total_time,' seconds')
if(folder2save != ''):
try:
os.mkdir(folder2save)
except OSError:
pass
with open(folder2save+'/dict_'+savename+'.pkl','wb') as f:
pickle.dump(loss_dict,f)
return (quat0,opt.nit,total_time)
| [
"numpy.stack",
"os.mkdir",
"os.path.abspath",
"pickle.dump",
"numpy.hstack",
"time.time",
"keops_utils.TestCuda",
"torch.optim.LBFGS",
"torch.from_numpy"
] | [((503, 513), 'keops_utils.TestCuda', 'TestCuda', ([], {}), '()\n', (511, 513), False, 'from keops_utils import TestCuda\n'), ((197, 221), 'os.path.abspath', 'os.path.abspath', (['"""../IO"""'], {}), "('../IO')\n", (212, 221), False, 'import os\n'), ((1414, 1468), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[p0]'], {'line_search_fn': '"""strong_wolfe"""'}), "([p0], line_search_fn='strong_wolfe')\n", (1431, 1468), False, 'import torch\n'), ((1481, 1492), 'time.time', 'time.time', ([], {}), '()\n', (1490, 1492), False, 'import time\n'), ((4496, 4550), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[p0]'], {'line_search_fn': '"""strong_wolfe"""'}), "([p0], line_search_fn='strong_wolfe')\n", (4513, 4550), False, 'import torch\n'), ((4563, 4574), 'time.time', 'time.time', ([], {}), '()\n', (4572, 4574), False, 'import time\n'), ((7723, 7812), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['Variables'], {'max_eval': 'maxiter', 'lr': 'lr', 'line_search_fn': '"""strong_wolfe"""'}), "(Variables, max_eval=maxiter, lr=lr, line_search_fn=\n 'strong_wolfe')\n", (7740, 7812), False, 'import torch\n'), ((7818, 7829), 'time.time', 'time.time', ([], {}), '()\n', (7827, 7829), False, 'import time\n'), ((10915, 10969), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[p0]'], {'line_search_fn': '"""strong_wolfe"""'}), "([p0], line_search_fn='strong_wolfe')\n", (10932, 10969), False, 'import torch\n'), ((10982, 10993), 'time.time', 'time.time', ([], {}), '()\n', (10991, 10993), False, 'import time\n'), ((13546, 13637), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[p0, quat0]'], {'max_eval': 'maxiter', 'lr': 'lr', 'line_search_fn': '"""strong_wolfe"""'}), "([p0, quat0], max_eval=maxiter, lr=lr, line_search_fn=\n 'strong_wolfe')\n", (13563, 13637), False, 'import torch\n'), ((13642, 13653), 'time.time', 'time.time', ([], {}), '()\n', (13651, 13653), False, 'import time\n'), ((17304, 17381), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[quat0]'], {'max_eval': '(20)', 'lr': 'lr', 'line_search_fn': '"""strong_wolfe"""'}), "([quat0], max_eval=20, lr=lr, line_search_fn='strong_wolfe')\n", (17321, 17381), False, 'import torch\n'), ((17394, 17405), 'time.time', 'time.time', ([], {}), '()\n', (17403, 17405), False, 'import time\n'), ((2608, 2626), 'numpy.hstack', 'np.hstack', (['tensors'], {}), '(tensors)\n', (2617, 2626), True, 'import numpy as np\n'), ((3295, 3306), 'time.time', 'time.time', ([], {}), '()\n', (3304, 3306), False, 'import time\n'), ((3425, 3446), 'os.mkdir', 'os.mkdir', (['folder2save'], {}), '(folder2save)\n', (3433, 3446), False, 'import os\n'), ((3650, 3675), 'pickle.dump', 'pickle.dump', (['loss_dict', 'f'], {}), '(loss_dict, f)\n', (3661, 3675), False, 'import pickle\n'), ((5861, 5879), 'numpy.hstack', 'np.hstack', (['tensors'], {}), '(tensors)\n', (5870, 5879), True, 'import numpy as np\n'), ((6589, 6600), 'time.time', 'time.time', ([], {}), '()\n', (6598, 6600), False, 'import time\n'), ((6719, 6740), 'os.mkdir', 'os.mkdir', (['folder2save'], {}), '(folder2save)\n', (6727, 6740), False, 'import os\n'), ((6870, 6895), 'pickle.dump', 'pickle.dump', (['loss_dict', 'f'], {}), '(loss_dict, f)\n', (6881, 6895), False, 'import pickle\n'), ((8650, 8675), 'numpy.stack', 'np.stack', (['tensors'], {'axis': '(0)'}), '(tensors, axis=0)\n', (8658, 8675), True, 'import numpy as np\n'), ((8789, 8814), 'numpy.stack', 'np.stack', (['tensors'], {'axis': '(0)'}), '(tensors, axis=0)\n', (8797, 8814), True, 'import numpy as np\n'), ((8854, 8872), 'numpy.hstack', 'np.hstack', (['tensors'], {}), '(tensors)\n', (8863, 8872), True, 'import numpy as np\n'), ((10084, 10095), 'time.time', 'time.time', ([], {}), '()\n', (10093, 10095), False, 'import time\n'), ((12119, 12137), 'numpy.hstack', 'np.hstack', (['tensors'], {}), '(tensors)\n', (12128, 12137), True, 'import numpy as np\n'), ((12802, 12813), 'time.time', 'time.time', ([], {}), '()\n', (12811, 12813), False, 'import time\n'), ((15537, 15568), 'numpy.hstack', 'np.hstack', (['(p_tensors, tensors)'], {}), '((p_tensors, tensors))\n', (15546, 15568), True, 'import numpy as np\n'), ((16339, 16350), 'time.time', 'time.time', ([], {}), '()\n', (16348, 16350), False, 'import time\n'), ((16469, 16490), 'os.mkdir', 'os.mkdir', (['folder2save'], {}), '(folder2save)\n', (16477, 16490), False, 'import os\n'), ((16620, 16645), 'pickle.dump', 'pickle.dump', (['loss_dict', 'f'], {}), '(loss_dict, f)\n', (16631, 16645), False, 'import pickle\n'), ((18450, 18468), 'numpy.hstack', 'np.hstack', (['tensors'], {}), '(tensors)\n', (18459, 18468), True, 'import numpy as np\n'), ((19202, 19213), 'time.time', 'time.time', ([], {}), '()\n', (19211, 19213), False, 'import time\n'), ((19332, 19353), 'os.mkdir', 'os.mkdir', (['folder2save'], {}), '(folder2save)\n', (19340, 19353), False, 'import os\n'), ((19483, 19508), 'pickle.dump', 'pickle.dump', (['loss_dict', 'f'], {}), '(loss_dict, f)\n', (19494, 19508), False, 'import pickle\n'), ((2702, 2723), 'torch.from_numpy', 'torch.from_numpy', (['vec'], {}), '(vec)\n', (2718, 2723), False, 'import torch\n'), ((5955, 5976), 'torch.from_numpy', 'torch.from_numpy', (['vec'], {}), '(vec)\n', (5971, 5976), False, 'import torch\n'), ((12209, 12230), 'torch.from_numpy', 'torch.from_numpy', (['vec'], {}), '(vec)\n', (12225, 12230), False, 'import torch\n'), ((15692, 15716), 'torch.from_numpy', 'torch.from_numpy', (['np_obj'], {}), '(np_obj)\n', (15708, 15716), False, 'import torch\n'), ((18563, 18587), 'torch.from_numpy', 'torch.from_numpy', (['np_obj'], {}), '(np_obj)\n', (18579, 18587), False, 'import torch\n'), ((9388, 9443), 'torch.from_numpy', 'torch.from_numpy', (['np_obj[k * len_obj:(k + 1) * len_obj]'], {}), '(np_obj[k * len_obj:(k + 1) * len_obj])\n', (9404, 9443), False, 'import torch\n')] |
import numpy as np
class Task(object):
def __init__(self,
bullet_client,
offset=(0, 0, 0),
max_steps=100,
parameter_distributions=None,
gravity=(0, 0, 0)):
if parameter_distributions is None:
parameter_distributions = {}
self.bullet_client = bullet_client
self.offset = offset
self.parameter_distributions = parameter_distributions
self.step_counter = 0
self.bullet_client.setGravity(*gravity)
self.max_steps = max_steps
@staticmethod
def success_criterion(goal_info):
raise NotImplementedError()
def reward_function(self, done, goal_info, **kwargs):
raise NotImplementedError()
# TODO should we pass robot into method? What if gravity not in self.parameter distirbutions?
def reset(self):
gravity_distribution = self.parameter_distributions.get("gravity", {})
mean = gravity_distribution.get("mean", (0, 0, -9.81))
std = gravity_distribution.get("std", (0, 0, 0))
assert len(mean) == 3
assert len(std) == 3
gravity = np.random.normal(mean, std)
self.bullet_client.setGravity(*gravity)
self.step_counter = 0
def step(self, observation_robot):
self.step_counter += 1
observation_task, goal_info, done = self.get_status(observation_robot)
return observation_task, goal_info, done
def get_status(self, observation_robot):
raise NotImplementedError()
def get_task(task_config, bullet_client):
task_name = task_config.pop("name")
if task_name == 'reach':
from .reach import Reach
task = Reach(bullet_client, **task_config)
elif task_name == 'pick_place':
from .pick_place import Pick_Place
task = Pick_Place(bullet_client, **task_config)
else:
raise ValueError()
return task
| [
"numpy.random.normal"
] | [((1171, 1198), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1187, 1198), True, 'import numpy as np\n')] |
"""
desitarget.targets
==================
Presumably this defines targets.
.. _`DocDB 2348`: https://desi.lbl.gov/DocDB/cgi-bin/private/RetrieveFile?docid=2348
"""
import numpy as np
import healpy as hp
import numpy.lib.recfunctions as rfn
from importlib import import_module
from astropy.table import Table
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask
from desitarget.targetmask import scnd_mask, targetid_mask
from desitarget.targetmask import obsconditions
# ADM set up the DESI default logger.
from desiutil.log import get_logger
log = get_logger()
# ADM common redshift that defines a Lyman-Alpha QSO.
zcut = 2.1
# ADM common redshift that defines a QSO to be reobserved for
# ADM the Gontcho a Gontcho and Weiner et al. secondary programs.
midzcut = 1.6
def encode_targetid(objid=None, brickid=None, release=None,
mock=None, sky=None, gaiadr=None):
"""Create the DESI TARGETID from input source and imaging info.
Parameters
----------
objid : :class:`int` or :class:`~numpy.ndarray`, optional
The OBJID from Legacy Surveys imaging or the row within
a Gaia HEALPixel file in $GAIA_DIR/healpix if
`gaia` is not ``None``.
brickid : :class:`int` or :class:`~numpy.ndarray`, optional
The BRICKID from Legacy Surveys imaging.
or the Gaia HEALPixel chunk number for files in
$GAIA_DIR/healpix if `gaia` is not ``None``.
release : :class:`int` or :class:`~numpy.ndarray`, optional
The RELEASE from Legacy Surveys imaging. Or, if < 1000,
the secondary target class bit flag number from
'data/targetmask.yaml'. Or, if < 1000 and `sky` is not
``None``, the HEALPixel processing number for SUPP_SKIES.
mock : :class:`int` or :class:`~numpy.ndarray`, optional
1 if this object is a mock object (generated from mocks or from
a random catalog, not from real survey data), 0 otherwise
sky : :class:`int` or :class:`~numpy.ndarray`, optional
1 if this object is a blank sky object, 0 otherwise
gaiadr : :class:`int` or :class:`~numpy.ndarray`, optional
The Gaia Data Release number (e.g. send 2 for Gaia DR2).
A value of 1 does NOT mean DR1. Rather it has the specific
meaning of a DESI first-light commissioning target.
Returns
-------
:class:`int` or `~numpy.ndarray`
The TARGETID for DESI, encoded according to the bits listed in
:meth:`desitarget.targetid_mask`. If an integer is passed, then
an integer is returned, otherwise an array is returned.
Notes
-----
- Has maximum flexibility so that mixes of integers and arrays
can be passed, in case some value like BRICKID or SKY
is the same for a set of objects. Consider, e.g.:
print(
targets.decode_targetid(
targets.encode_targetid(objid=np.array([234,12]),
brickid=np.array([234,12]),
release=4000,
sky=[1,0]))
)
(array([234,12]), array([234,12]), array([4000,4000]),
array([0,0]), array([1,0]), array([0, 0]))
- See also `DocDB 2348`_.
"""
# ADM a flag that tracks whether the main inputs were integers.
intpassed = True
# ADM the names of the bits with RESERVED removed.
bitnames = targetid_mask.names()
if "RESERVED" in bitnames:
bitnames.remove("RESERVED")
# ADM determine the length of passed values that aren't None.
# ADM default to an integer (length 1).
nobjs = 1
inputs = [objid, brickid, release, mock, sky, gaiadr]
goodpar = [param is not None for param in inputs]
firstgoodpar = np.where(goodpar)[0][0]
if isinstance(inputs[firstgoodpar], np.ndarray):
nobjs = len(inputs[firstgoodpar])
intpassed = False
# ADM set parameters that weren't passed to zerod arrays
# ADM set integers that were passed to at least 1D arrays
for i, param in enumerate(inputs):
if param is None:
inputs[i] = np.zeros(nobjs, dtype='int64')
else:
inputs[i] = np.atleast_1d(param)
# ADM check passed parameters don't exceed their bit-allowance
# ADM and aren't negative numbers.
for param, bitname in zip(inputs, bitnames):
msg = 'Invalid range when making targetid: {} '.format(bitname)
if not np.all(param < 2**targetid_mask[bitname].nbits):
msg += 'cannot exceed {}'.format(2**targetid_mask[bitname].nbits - 1)
if not np.all(param >= 0):
msg += 'cannot be negative'
if 'cannot' in msg:
log.critical(msg)
raise IOError(msg)
# ADM set up targetid as an array of 64-bit integers.
targetid = np.zeros(nobjs, ('int64'))
# ADM populate TARGETID. Shift to type integer 64 to avoid casting.
for param, bitname in zip(inputs, bitnames):
targetid |= param.astype('int64') << targetid_mask[bitname].bitnum
# ADM if the main inputs were integers, return an integer.
if intpassed:
return targetid[0]
return targetid
def decode_targetid(targetid):
"""break a DESI TARGETID into its constituent parts.
Parameters
----------
:class:`int` or :class:`~numpy.ndarray`
The TARGETID for DESI, encoded according to the bits listed in
:meth:`desitarget.targetid_mask`.
Returns
-------
:class:`int` or :class:`~numpy.ndarray`
The OBJID from Legacy Surveys imaging or the row within
a Gaia HEALPixel file in $GAIA_DIR/healpix if
`gaia` is not ``None``.
:class:`int` or :class:`~numpy.ndarray`
The BRICKID from Legacy Surveys imaging.
or the Gaia HEALPixel chunk number for files in
$GAIA_DIR/healpix if `gaia` is not ``None``.
:class:`int` or :class:`~numpy.ndarray`
The RELEASE from Legacy Surveys imaging. Or, if < 1000,
the secondary target class bit flag number from
'data/targetmask.yaml'. Or, if < 1000 and `sky` is not
``None``, the HEALPixel processing number for SUPP_SKIES.
:class:`int` or :class:`~numpy.ndarray`
1 if this object is a mock object (generated from mocks or from
a random catalog, not from real survey data), 0 otherwise
:class:`int` or :class:`~numpy.ndarray`
1 if this object is a blank sky object, 0 otherwise
:class:`int` or :class:`~numpy.ndarray`
The Gaia Data Release number (e.g. will be 2 for Gaia DR2).
A value of 1 does NOT mean DR1. Rather it has the specific
meaning of a DESI first-light commissioning target.
Notes
-----
- if a 1-D array is passed, then an integer is returned.
Otherwise an array is returned.
- see also `DocDB 2348`_.
"""
# ADM the names of the bits with RESERVED removed.
bitnames = targetid_mask.names()
if "RESERVED" in bitnames:
bitnames.remove("RESERVED")
# ADM retrieve each value by left-shifting by the number of bits
# ADM that comprise the value, to the left-end of the value, and
# ADM then right-shifting to the right-end.
outputs = []
for bitname in bitnames:
bitnum = targetid_mask[bitname].bitnum
val = (targetid & (2**targetid_mask[bitname].nbits - 1
<< targetid_mask[bitname].bitnum)) >> bitnum
outputs.append(val)
return outputs
def encode_negative_targetid(ra, dec, group=1):
"""
Create negative 64-bit TARGETID from (ra,dec) unique to ~1.2 milliarcsec
Parameters
----------
ra : :class:`float` or :class:`~numpy.ndarray`
Right Ascension in degrees 0 <= ra <= 360
dec : :class:`float` or :class:`~numpy.ndarray`
Declination in degrees -90 <= dec <= 90
group : int, optional (default 1)
group number 1-15 to encode
Returns
-------
:class:`~numpy.int64` or :class:`~numpy.ndarray`
negative TARGETID derived from (ra,dec)
"""
# Hardcode number of bits.
nbits_ra = 30
nbits_dec = 29
nbits_group = 4
# Check input dimensionality.
scalar_input = np.isscalar(ra)
if np.isscalar(ra) != np.isscalar(dec):
raise TypeError('ra and dec must both be scalars or both be arrays')
if not (1 <= group <= 15):
raise ValueError(f'group {group} must be within 1-15')
group = np.int8(group)
# Convert to arrays to enable things like .astype(int).
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
assert np.all((0.0 <= ra) & (ra <= 360.0))
assert np.all((-90.0 <= dec) & (dec <= 90.0))
# encode ra in bits 30-59 and dec in bits 0-29.
ra_bits = ((2**nbits_ra - 1) * (ra/360.0)).astype(int)
dec_bits = ((2**nbits_dec - 1) * ((dec+90.0)/180.0)).astype(int)
group_bitshift = nbits_dec + nbits_ra
ra_bitshift = nbits_dec
targetid = -((group << group_bitshift) + (ra_bits << ra_bitshift) + dec_bits)
# return value has dimensionality of inputs.
if scalar_input:
return targetid[0]
else:
return targetid
def decode_negative_targetid(targetid):
"""
TODO: document
"""
# Hardcode number of bits.
nbits_ra = 30
nbits_dec = 29
nbits_group = 4
dec_mask = 2**nbits_dec - 1
ra_mask = 2**nbits_ra - 1
group_mask = 2**nbits_group - 1
group_bitshift = nbits_dec + nbits_ra
ra_bitshift = nbits_dec
dec_bits = (-targetid) & dec_mask
ra_bits = ((-targetid) >> ra_bitshift) & ra_mask
group = ((-targetid) >> group_bitshift) & group_mask
ra = ra_bits / (2**nbits_ra - 1) * 360.0
dec = dec_bits / (2**nbits_dec - 1) * 180.0 - 90.0
return ra, dec, group
def switch_main_cmx_or_sv(revamp, archetype):
"""change the data model of a set of targets to match another.
Parameters
----------
revamp : :class:`~numpy.ndarray`
An array of targets generated by, e.g., :mod:`~desitarget.cuts`
must include columns `DESI_TARGET`, `MWS_TARGET` and `BGS_TARGET`
or the corresponding commissioning or SV columns.
archetype : :class:`~numpy.ndarray`
Like `revamp` but with a different flavor of `DESI_TARGET`,
`MWS_TARGET` and `BGS_TARGET` columns. For instance, `revamp`
might have the Main Survey columns and `archetype` might have the
SV1 columns.
Returns
-------
:class:`~numpy.ndarray`
`revamp` but with the flavor of `DESI_TARGET`, `MWS_TARGET` and
`BGS_TARGET` updated to match that of `archetype`
"""
# ADM change the SCND_TARGET-like column too, if it exists.
scnd = np.any(["SCND_TARGET" in i for i in revamp.dtype.names])
# ADM what are the column names in the file to be changed?
oldcols, _, _ = main_cmx_or_sv(revamp, scnd=scnd)
# ADM what are the column names to change to?
newcols, _, _ = main_cmx_or_sv(archetype, scnd=scnd)
# ADM update the column names.
renamer = {oldcol: newcol for oldcol, newcol in zip(oldcols, newcols)}
renamed = rfn.rename_fields(revamp, renamer)
# ADM guard against commissioning files.
if "CMX_TARGET" in newcols:
renamed = rfn.drop_fields(renamed, oldcols)
return renamed
def main_cmx_or_sv(targets, rename=False, scnd=False):
"""whether a target array is main survey, commissioning, or SV.
Parameters
----------
targets : :class:`~numpy.ndarray`
An array of targets generated by, e.g., :mod:`~desitarget.cuts`
must include at least (all of) the columns `DESI_TARGET`, `MWS_TARGET` and
`BGS_TARGET` or the corresponding commissioning or SV columns.
rename : :class:`bool`, optional, defaults to ``False``
If ``True`` then also return a copy of `targets` with the input `_TARGET`
columns renamed to reflect the main survey format.
scnd : :class:`bool`, optional, defaults to ``False``
If ``True``, add the secondary target information to the output.
Returns
-------
:class:`list`
A list of strings corresponding to the target columns names. For the main survey
this would be [`DESI_TARGET`, `BGS_TARGET`, `MWS_TARGET`], for commissioning it
would just be [`CMX_TARGET`], for SV1 it would be [`SV1_DESI_TARGET`,
`SV1_BGS_TARGET`, `SV1_MWS_TARGET`]. Also includes, e.g. `SCND_TARGET`, if
`scnd` is passed as ``True``.
:class:`list`
A list of the masks that correspond to each column from the relevant main/cmx/sv
yaml file. Also includes the relevant SCND_MASK, if `scnd` is passed as True.
:class:`str`
The string 'main', 'cmx' or 'svX' (where X = 1, 2, 3 etc.) for the main survey,
commissioning and an iteration of SV. Specifies which type of file was sent.
:class:`~numpy.ndarray`, optional, if `rename` is ``True``
A copy of the input targets array with the `_TARGET` columns renamed to
`DESI_TARGET`, and (if they exist) `BGS_TARGET`, `MWS_TARGET`.
"""
# ADM default to the main survey.
maincolnames = ["DESI_TARGET", "BGS_TARGET", "MWS_TARGET", "SCND_TARGET"]
outcolnames = maincolnames.copy()
masks = [desi_mask, bgs_mask, mws_mask, scnd_mask]
survey = 'main'
# ADM set survey to correspond to commissioning or SV if those columns exist
# ADM and extract the column names of interest.
incolnames = np.array(targets.dtype.names)
notmain = np.array(['SV' in name or 'CMX' in name for name in incolnames])
if np.any(notmain):
outcolnames = list(incolnames[notmain])
survey = outcolnames[0].split('_')[0].lower()
if survey[:2] == 'sv':
outcolnames = ["{}_{}".format(survey.upper(), col) for col in maincolnames]
# ADM retrieve the correct masks, depending on the survey type.
if survey == 'cmx':
from desitarget.cmx.cmx_targetmask import cmx_mask
masks = [cmx_mask]
elif survey[:2] == 'sv':
try:
targmask = import_module("desitarget.{}.{}_targetmask".format(
survey, survey))
except ModuleNotFoundError:
msg = 'Bitmask yaml does not exist for survey type {}'.format(survey)
log.critical(msg)
raise ModuleNotFoundError(msg)
masks = [targmask.desi_mask, targmask.bgs_mask,
targmask.mws_mask, targmask.scnd_mask]
elif survey != 'main':
msg = "input target file must be 'main', 'cmx' or 'sv', not {}!!!".format(survey)
log.critical(msg)
raise ValueError(msg)
if not scnd:
outcolnames = outcolnames[:3]
masks = masks[:3]
# ADM if requested, rename the columns.
if rename:
mapper = {}
for i, col in enumerate(outcolnames):
mapper[col] = maincolnames[i]
return outcolnames, masks, survey, rfn.rename_fields(targets, mapper)
return outcolnames, masks, survey
def set_obsconditions(targets, scnd=False):
"""set the OBSCONDITIONS mask for each target bit.
Parameters
----------
targets : :class:`~numpy.ndarray`
An array of targets generated by, e.g., :mod:`~desitarget.cuts`.
Must include at least (all of) the columns `DESI_TARGET`,
`BGS_TARGET`, `MWS_TARGET` or corresponding cmx or SV columns.
scnd : :class:`bool`, optional, defaults to ``False``
If ``True`` then make all of the comparisons on the `SCND_TARGET`
column instead of `DESI_TARGET`, `BGS_TARGET` and `MWS_TARGET`.
Returns
-------
:class:`~numpy.ndarray`
The OBSCONDITIONS bitmask for the passed targets.
Notes
-----
- the OBSCONDITIONS for each target bit is in the file, e.g.
data/targetmask.yaml. It can be retrieved using, for example,
`obsconditions.mask(desi_mask["ELG"].obsconditions)`.
"""
colnames, masks, _ = main_cmx_or_sv(targets, scnd=scnd)
# ADM if we requested secondary targets, the needed information
# ADM was returned as the last part of each array.
if scnd:
colnames, masks = colnames[-1:], masks[-1:]
n = len(targets)
from desitarget.mtl import mtldatamodel as mtldm
obscon = np.zeros(n, dtype=mtldm["OBSCONDITIONS"].dtype)
for mask, xxx_target in zip(masks, colnames):
for name in mask.names():
# ADM which targets have this bit for this mask set?
ii = (targets[xxx_target] & mask[name]) != 0
# ADM under what conditions can that bit be observed?
if np.any(ii):
obscon[ii] |= obsconditions.mask(mask[name].obsconditions)
return obscon
def initial_priority_numobs(targets, scnd=False,
obscon="DARK|GRAY|BRIGHT|BACKUP|TWILIGHT12|TWILIGHT18"):
"""highest initial priority and numobs for an array of target bits.
Parameters
----------
targets : :class:`~numpy.ndarray`
An array of targets generated by, e.g., :mod:`~desitarget.cuts`.
Must include at least (all of) the columns `DESI_TARGET`,
`BGS_TARGET`, `MWS_TARGET` or corresponding cmx or SV columns.
scnd : :class:`bool`, optional, defaults to ``False``
If ``True`` then make all of the comparisons on the `SCND_TARGET`
column instead of `DESI_TARGET`, `BGS_TARGET` and `MWS_TARGET`.
obscon : :class:`str`, optional, defaults to almost all OBSCONDITIONS
A combination of strings that are in the desitarget bitmask yaml
file (specifically in `desitarget.targetmask.obsconditions`).
Returns
-------
:class:`~numpy.ndarray`
An array of integers corresponding to the highest initial
priority for each target consistent with the constraints
on observational conditions imposed by `obscon`.
:class:`~numpy.ndarray`
An array of integers corresponding to the largest number of
observations for each target consistent with the constraints
on observational conditions imposed by `obscon`.
Notes
-----
- the initial priority for each target bit is in the file, e.g.,
data/targetmask.yaml. It can be retrieved using, for example,
`desi_mask["ELG"].priorities["UNOBS"]`.
- the input obscon string can be converted to a bitmask using
`desitarget.targetmask.obsconditions.mask(blat)`.
"""
colnames, masks, _ = main_cmx_or_sv(targets, scnd=scnd)
# ADM if we requested secondary targets, the needed information
# ADM was returned as the last part of each array.
if scnd:
colnames, masks = colnames[-1:], masks[-1:]
# ADM set up the output arrays. Remember calibs have NUMOBS of -1.
# ADM Such calibs will be passed over as they don't have UNOBS set.
outpriority = np.zeros(len(targets), dtype='int')-1
outnumobs = np.zeros(len(targets), dtype='int')-1
# ADM convert the passed obscon string to bits.
obsbits = obsconditions.mask(obscon)
# ADM loop through the masks to establish all bitnames of interest.
for colname, mask in zip(colnames, masks):
# ADM first determine which bits actually have priorities.
bitnames = []
for name in mask.names():
try:
_ = mask[name].priorities["UNOBS"]
# ADM also only consider bits with correct OBSCONDITIONS.
obsforname = obsconditions.mask(mask[name].obsconditions)
if (obsforname & obsbits) != 0:
bitnames.append(name)
except KeyError:
pass
# ADM loop through the relevant bits updating with the highest
# ADM priority and the largest value of NUMOBS.
for name in bitnames:
# ADM indexes in the DESI/MWS/BGS_TARGET column that have this bit set
istarget = (targets[colname] & mask[name]) != 0
# ADM for each index, determine where this bit is set and the priority
# ADM for this bit is > than the currently stored priority.
w = np.where((mask[name].priorities['UNOBS'] >= outpriority) & istarget)[0]
# ADM where a larger priority trumps the stored priority, update the priority
if len(w) > 0:
outpriority[w] = mask[name].priorities['UNOBS']
# ADM for each index, determine where this bit is set and whether NUMOBS
# ADM for this bit is > than the currently stored NUMOBS.
w = np.where((mask[name].numobs >= outnumobs) & istarget)[0]
# ADM where a larger NUMOBS trumps the stored NUMOBS, update NUMOBS.
if len(w) > 0:
outnumobs[w] = mask[name].numobs
return outpriority, outnumobs
def calc_numobs_more(targets, zcat, obscon):
"""
Calculate target NUMOBS_MORE from masks, observation/redshift status.
Parameters
----------
targets : :class:`~numpy.ndarray`
numpy structured array or astropy Table of targets. Must include
the columns `DESI_TARGET`, `BGS_TARGET`, `MWS_TARGET` (or their
SV/cmx equivalents) `TARGETID` and `NUMOBS_INIT`. For Main Survey
targets, must also contain `PRIORITY` if this isn't the first
time through MTL (used to "lock in" the state of Ly-Alpha QSOs).
zcat : :class:`~numpy.ndarray`
numpy structured array or Table of redshift info. Must include
`Z`, `ZWARN`, `NUMOBS` and `TARGETID` and BE SORTED ON TARGETID
to match `targets` row-by-row. May also contain `NUMOBS_MORE` if
this isn't the first time through MTL and `NUMOBS > 0`.
obscon : :class:`str`
A combination of strings that are in the desitarget bitmask yaml
file (specifically in `desitarget.targetmask.obsconditions`), e.g.
"DARK". Governs the behavior of how priorities are set based
on "obsconditions" in the desitarget bitmask yaml file.
Returns
-------
:class:`~numpy.array`
Integer array of number of additional observations (NUMOBS_MORE).
Notes
-----
- Will automatically detect if the passed targets are main
survey, commissioning or SV and behave accordingly.
- Most targets are updated to NUMOBS_MORE = NUMOBS_INIT-NUMOBS.
Special case for the main survey is QSOs below the midz, which
get 1 extra observation.
"""
# ADM check input arrays are sorted to match row-by-row on TARGETID.
assert np.all(targets["TARGETID"] == zcat["TARGETID"])
# ADM determine whether the input targets are main survey, cmx or SV.
colnames, masks, survey = main_cmx_or_sv(targets, scnd=True)
# ADM the target bits/names should be shared between main survey and SV.
if survey != 'cmx':
desi_target, bgs_target, mws_target, scnd_target = colnames
desi_mask, bgs_mask, mws_mask, scnd_mask = masks
else:
cmx_mask = masks[0]
# ADM main case, just decrement by NUMOBS.
numobs_more = np.maximum(0, targets['NUMOBS_INIT'] - zcat['NUMOBS'])
# ADM apply special QSO behavior, but only in dark time and after
# ADM some observations have occurred.
if survey == 'main' and np.any(zcat["NUMOBS"] > 0):
if (obsconditions.mask(obscon) & obsconditions.mask("DARK")) != 0:
# ADM A QSO target that is confirmed to have a redshift at
# ADM z < midzcut will need to drop by 2 total observations
# ADM (midzcut is defined at the top of this module).
isqso = targets[desi_target] & desi_mask.QSO != 0
# ADM "lock in" the numobs state for existing Ly-Alpha QSOs.
lya = targets["PRIORITY"] == desi_mask["QSO"].priorities["MORE_ZGOOD"]
# ADM the mocks may not include the secondary targets.
if scnd_target in targets.dtype.names:
for scxname in scnd_mask.names():
if scnd_mask[scxname].flavor == "QSO":
isqso |= targets[scnd_target] & scnd_mask[scxname] != 0
# ADM the definition used for "not-LyA" in calc_priority.
midz = (zcat['Z'] < zcut) & ((zcat['Z_QN'] < zcut)
| (zcat["IS_QSO_QN"] != 1))
# ADM the likely low-z sources get fewer (2) observations.
loz = ((zcat['Z'] < midzcut) | (zcat['Z_QN'] < midzcut) |
(zcat["IS_QSO_QN"] != 1))
ii = isqso & midz & loz & ~lya
numobs_more[ii] = np.maximum(0, numobs_more[ii] - 2)
return numobs_more
def calc_priority(targets, zcat, obscon, state=False):
"""
Calculate target priorities from masks, observation/redshift status.
Parameters
----------
targets : :class:`~numpy.ndarray`
numpy structured array or astropy Table of targets. Must include
the columns `DESI_TARGET`, `BGS_TARGET`, `MWS_TARGET` (or their
SV/cmx equivalents) and `TARGETID`. For Main Survey targets, must
also contain `PRIORITY` if this isn't the first time through MTL,
which is used to "lock in" the state of Lyman-Alpha quasars.
zcat : :class:`~numpy.ndarray`
numpy structured array or Table of redshift info. Must include
`Z`, `ZWARN`, `NUMOBS` and `TARGETID` and BE SORTED ON TARGETID
to match `targets` row-by-row. May also contain `NUMOBS_MORE` if
this isn't the first time through MTL and `NUMOBS > 0`.
obscon : :class:`str`
A combination of strings that are in the desitarget bitmask yaml
file (specifically in `desitarget.targetmask.obsconditions`), e.g.
"DARK|GRAY". Governs the behavior of how priorities are set based
on "obsconditions" in the desitarget bitmask yaml file.
state : :class:`bool`
If ``True`` then also return a string denoting the state that
was set. The state is a string combining the observational
state (e.g. "DONE", "MORE_ZGOOD") from the targeting yaml file
and the target type (e.g. "ELG", "LRG").
Returns
-------
:class:`~numpy.array`
integer array of priorities.
:class:`~numpy.array`
string array of states. Only returned if `state`=``True``
Notes
-----
- If a target passes multiple selections, highest priority wins.
- Will automatically detect if the passed targets are main
survey, commissioning or SV and behave accordingly.
"""
# ADM check input arrays are sorted to match row-by-row on TARGETID.
assert np.all(targets["TARGETID"] == zcat["TARGETID"])
# ADM determine whether the input targets are main survey, cmx or SV.
colnames, masks, survey = main_cmx_or_sv(targets, scnd=True)
# ADM the target bits/names should be shared between main survey and SV.
if survey != 'cmx':
desi_target, bgs_target, mws_target, scnd_target = colnames
desi_mask, bgs_mask, mws_mask, scnd_mask = masks
else:
cmx_mask = masks[0]
# Default is 0 priority, i.e. do not observe.
priority = np.zeros(len(targets), dtype='i8')
# ADM set up a string to record the state of each target.
from desitarget.mtl import mtldatamodel
target_state = np.zeros(len(targets),
dtype=mtldatamodel["TARGET_STATE"].dtype)
# Determine which targets have been observed.
unobs = (zcat["NUMOBS"] == 0)
log.debug('calc_priority has %d unobserved targets' % (np.sum(unobs)))
if np.all(unobs):
done = np.zeros(len(targets), dtype=bool)
zgood = np.zeros(len(targets), dtype=bool)
zwarn = np.zeros(len(targets), dtype=bool)
lya = np.zeros(len(targets), dtype=bool)
else:
nmore = zcat["NUMOBS_MORE"]
assert np.all(nmore >= 0)
done = ~unobs & (nmore == 0)
zgood = ~unobs & (nmore > 0) & (zcat['ZWARN'] == 0)
zwarn = ~unobs & (nmore > 0) & (zcat['ZWARN'] != 0)
if survey == 'main':
# ADM used to "lock in" the state of LyA QSOs...
lya = targets["PRIORITY"] == desi_mask["QSO"].priorities["MORE_ZGOOD"]
# ADM ...once they're observed...
lya &= ~unobs
# ADM ... and until they're done.
lya &= ~done
# zgood, zwarn, done, and unobs should be mutually exclusive and cover all
# targets.
assert not np.any(unobs & zgood)
assert not np.any(unobs & zwarn)
assert not np.any(unobs & done)
assert not np.any(zgood & zwarn)
assert not np.any(zgood & done)
assert not np.any(zwarn & done)
assert np.all(unobs | done | zgood | zwarn)
# DESI dark time targets.
if survey != 'cmx':
if desi_target in targets.dtype.names:
# ADM set initial state of CALIB for potential calibration targets.
names = ('SKY', 'BAD_SKY', 'SUPP_SKY',
'STD_FAINT', 'STD_WD', 'STD_BRIGHT')
for name in names:
# ADM only update states for passed observing conditions.
pricon = obsconditions.mask(desi_mask[name].obsconditions)
if (obsconditions.mask(obscon) & pricon) != 0:
ii = (targets[desi_target] & desi_mask[name]) != 0
target_state[ii] = "CALIB"
names = ('ELG_VLO', 'ELG_LOP', 'ELG_HIP', 'LRG')
# ADM for sv3 the ELG guiding columns were ELG and ELG_HIP.
if survey == 'sv3':
names = ('ELG_LOP', 'ELG_HIP', 'LRG')
for name in names:
# ADM only update priorities for passed observing conditions.
pricon = obsconditions.mask(desi_mask[name].obsconditions)
if (obsconditions.mask(obscon) & pricon) != 0:
ii = (targets[desi_target] & desi_mask[name]) != 0
for sbool, sname in zip(
[unobs, done, zgood, zwarn],
["UNOBS", "DONE", "MORE_ZGOOD", "MORE_ZWARN"]
):
# ADM update priorities and target states.
Mxp = desi_mask[name].priorities[sname]
# ADM tiered system in SV3. Decrement MORE_ZWARN
# ADM priority using the bit's zwarndecrement.
if survey == "sv3" and sname == "MORE_ZWARN":
zwd = desi_mask[name].priorities["ZWARN_DECREMENT"]
Mxp -= zwd * zcat[ii & sbool]["NUMOBS"]
# ADM update states BEFORE changing priorities.
ts = "{}|{}".format(name, sname)
target_state[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])
priority[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])
# QSO could be Lyman-alpha or Tracer.
name = 'QSO'
# ADM only update priorities for passed observing conditions.
pricon = obsconditions.mask(desi_mask[name].obsconditions)
if (obsconditions.mask(obscon) & pricon) != 0:
ii = (targets[desi_target] & desi_mask[name]) != 0
# ADM LyA QSOs require more observations.
# ADM (zcut is defined at the top of this module).
# ADM Main Survey decisions are made using QN/Redrock.
if survey == "main":
good_hiz = (zcat['Z'] >= zcut) | ((zcat['Z_QN'] >= zcut) &
(zcat["IS_QSO_QN"] == 1))
# ADM all non-LyA-QSOs need more low-priority passes
# ADM in the Main Survey. The mid-z QSOs get 4 passes
# ADM at this lower priority, as requested by some
# ADM secondaries, which is set in calc_numobs_more.
good_midz = (zcat['Z'] < zcut) & ((zcat['Z_QN'] < zcut) |
(zcat["IS_QSO_QN"] != 1))
# ADM good_hiz & good_midz should never occur in
# ADM the Main Survey as they're complements.
assert not np.any(good_hiz & good_midz)
# ADM flip to the done state if we've reached it.
good_hiz &= ~done
good_midz &= ~done
# ADM Main Survey QSOs have no zwarn priority state
# ADM but do have a Lyman-alpha (lya) "locked-in" state.
sbools = [unobs, done, good_hiz, good_midz,
~good_hiz & ~good_midz, lya]
snames = ["UNOBS", "DONE", "MORE_ZGOOD", "MORE_MIDZQSO",
"DONE", "MORE_ZGOOD"]
# ADM In SV decisions were made without QN.
elif survey == "sv3":
good_hiz = zgood & (zcat['Z'] >= zcut)
# ADM SV3 specified mid-z QSOs required more passes.
good_midz = zgood & (zcat['Z'] >= midzcut) & (zcat['Z'] < zcut)
# ADM in SV3 we had a zwarn priority state for QSOs.
sbools = [unobs, done, good_hiz, good_midz,
~good_hiz & ~good_midz, zwarn]
snames = ["UNOBS", "DONE", "MORE_ZGOOD", "MORE_MIDZQSO",
"DONE", "MORE_ZWARN"]
else:
good_hiz = zgood & (zcat['Z'] >= zcut)
good_midz = zgood & (zcat['Z'] < zcut)
sbools = [unobs, done, good_hiz, good_midz,
~good_hiz & ~good_midz, zwarn]
snames = ["UNOBS", "DONE", "MORE_ZGOOD", "MORE_MIDZQSO",
"DONE", "MORE_ZWARN"]
for sbool, sname in zip(sbools, snames):
# ADM update priorities and target states.
Mxp = desi_mask[name].priorities[sname]
# ADM tiered system in SV3. Decrement MORE_ZWARN
# ADM priority using the bit's zwarndecrement.
if survey == "sv3" and sname == "MORE_ZWARN":
zwd = desi_mask[name].priorities["ZWARN_DECREMENT"]
Mxp -= zwd * zcat[ii & sbool]["NUMOBS"]
# ADM update states BEFORE changing priorities.
ts = "{}|{}".format(name, sname)
target_state[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])
priority[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])
# BGS targets.
if bgs_target in targets.dtype.names:
for name in bgs_mask.names():
# ADM only update priorities for passed observing conditions.
pricon = obsconditions.mask(bgs_mask[name].obsconditions)
if (obsconditions.mask(obscon) & pricon) != 0:
ii = (targets[bgs_target] & bgs_mask[name]) != 0
for sbool, sname in zip(
[unobs, done, zgood, zwarn],
["UNOBS", "DONE", "MORE_ZGOOD", "MORE_ZWARN"]
):
# ADM update priorities and target states.
Mxp = bgs_mask[name].priorities[sname]
# ADM tiered system in SV3. Decrement MORE_ZWARN
# ADM priority using the bit's zwarndecrement.
if survey == "sv3" and sname == "MORE_ZWARN":
zwd = bgs_mask[name].priorities["ZWARN_DECREMENT"]
Mxp -= zwd * zcat[ii & sbool]["NUMOBS"]
# ADM update states BEFORE changing priorities.
ts = "{}|{}".format(name, sname)
target_state[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])
priority[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])
# MWS targets.
if mws_target in targets.dtype.names:
# ADM set initial state of CALIB for potential calibration targets.
stdnames = ('GAIA_STD_FAINT', 'GAIA_STD_WD', 'GAIA_STD_BRIGHT')
for name in mws_mask.names():
# ADM only update priorities for passed observing conditions.
pricon = obsconditions.mask(mws_mask[name].obsconditions)
if (obsconditions.mask(obscon) & pricon) != 0:
ii = (targets[mws_target] & mws_mask[name]) != 0
# ADM standards have no priority.
if name in stdnames:
target_state[ii] = "CALIB"
else:
for sbool, sname in zip(
[unobs, done, zgood, zwarn],
["UNOBS", "DONE", "MORE_ZGOOD", "MORE_ZWARN"]
):
# ADM update priorities and target states.
Mxp = mws_mask[name].priorities[sname]
# ADM tiered system in SV3. Decrement MORE_ZWARN
# ADM priority using the bit's zwarndecrement.
if survey == "sv3" and sname == "MORE_ZWARN":
zwd = mws_mask[name].priorities["ZWARN_DECREMENT"]
Mxp -= zwd * zcat[ii & sbool]["NUMOBS"]
# ADM update states BEFORE changing priorities.
ts = "{}|{}".format(name, sname)
target_state[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])
priority[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])
# ADM Secondary targets.
if scnd_target in targets.dtype.names:
# APC Secondaries only drive updates for specific DESI_TARGET
# APC bits (https://github.com/desihub/desitarget/pull/530).
# APC Default behaviour is that targets with SCND_ANY bits set will
# APC ONLY be updated based on their secondary targetmask parameters IF
# APC they have NO primary target bits set (hence == on next line).
scnd_update = targets[desi_target] == desi_mask['SCND_ANY']
log.info('{} scnd targets to be updated as secondary-only'.format(
scnd_update.sum()))
# APC The exception to the rule above is that a subset of bits flagged
# APC with updatemws=True in the targetmask can drive updates for a
# APC subset of primary bits corresponding to MWS targets and
# APC standards. We first create a bitmask of those permitted seconday
# APC bits.
permit_scnd_bits = 0
for name in scnd_mask.names():
if survey == 'main':
# updatemws only defined for main survey targetmask.
if scnd_mask[name].updatemws:
permit_scnd_bits |= scnd_mask[name]
else:
# Before updatemws was introduced, all scnd bits
# were permitted to update MWS targets.
permit_scnd_bits |= scnd_mask[name]
# APC Now we flag any target combining the permitted secondary bits
# APC and the restricted set of primary bits.
permit_scnd = (targets[scnd_target] & permit_scnd_bits) != 0
# APC Allow changes to primaries to be driven by the status of
# APC their matched secondary bits if the DESI_TARGET bitmask has any
# APC of the following bits set, but not any other bits.
update_from_scnd_bits = (
desi_mask['SCND_ANY'] | desi_mask['MWS_ANY'] |
desi_mask['STD_BRIGHT'] | desi_mask['STD_FAINT'] |
desi_mask['STD_WD'])
permit_scnd &= ((targets[desi_target] & ~update_from_scnd_bits) == 0)
log.info('{} more scnd targets allowed to update MWS primaries'.format(
(permit_scnd & ~scnd_update).sum()))
# APC Updateable targets are either pure secondary or explicitly permitted
scnd_update |= permit_scnd
log.info('{} scnd targets to be updated in total'.format(
scnd_update.sum()))
if np.any(scnd_update):
for name in scnd_mask.names():
# ADM only update priorities for passed observing conditions.
pricon = obsconditions.mask(scnd_mask[name].obsconditions)
if (obsconditions.mask(obscon) & pricon) != 0:
ii = (targets[scnd_target] & scnd_mask[name]) != 0
ii &= scnd_update
# ADM scnd LyA QSOs may require more observations.
# ADM (zcut is defined at the top of this module).
# ADM Main Survey decisions are made using QN/Redrock.
if survey == "main":
good_hiz = (zcat['Z'] >= zcut) | ((zcat['Z_QN'] >= zcut) &
(zcat["IS_QSO_QN"] == 1))
# ADM all non-LyA-QSOs need more low-priority passes
# ADM in the Main Survey. The mid-z QSOs get 4 passes
# ADM at this lower priority, as requested by some
# ADM secondaries, which is set in calc_numobs_more.
good_midz = (zcat['Z'] < zcut) & ((zcat['Z_QN'] < zcut) |
(zcat["IS_QSO_QN"] != 1))
# ADM good_hiz & good_midz should never occur in
# ADM the Main Survey as they're complements.
assert not np.any(good_hiz & good_midz)
# ADM flip to the done state if we've reached it.
good_hiz &= ~done
good_midz &= ~done
# ADM In SV decisions were made without QN.
elif survey == "sv3":
good_hiz = zgood & (zcat['Z'] >= zcut)
# ADM SV3 specified mid-z QSOs required more passes.
good_midz = zgood & (zcat['Z'] >= midzcut) & (zcat['Z'] < zcut)
else:
good_hiz = zgood & (zcat['Z'] >= zcut)
good_midz = zgood & (zcat['Z'] < zcut)
# ADM secondary QSOs need processed like primary QSOs.
if scnd_mask[name].flavor == "QSO":
if survey == "main":
# ADM Main Survey QSOs have no zwarn priority state
# ADM but do have a Lyman-alpha (lya) "locked-in" state.
sbools = [unobs, done, good_hiz, good_midz,
~good_hiz & ~good_midz, lya]
snames = ["UNOBS", "DONE", "MORE_ZGOOD", "MORE_MIDZQSO",
"DONE", "MORE_ZGOOD"]
else:
sbools = [unobs, done, good_hiz, good_midz,
~good_hiz & ~good_midz, zwarn]
snames = ["UNOBS", "DONE", "MORE_ZGOOD", "MORE_MIDZQSO",
"DONE", "MORE_ZWARN"]
else:
sbools = [unobs, done, zgood, zwarn]
snames = ["UNOBS", "DONE", "MORE_ZGOOD", "MORE_ZWARN"]
for sbool, sname in zip(sbools, snames):
# ADM update priorities and target states.
Mxp = scnd_mask[name].priorities[sname]
# ADM tiered system in SV3. Decrement MORE_ZWARN
# ADM priority using the bit's zwarndecrement.
# if survey == "sv3" and sname == "MORE_ZWARN":
# zwd = scnd_mask[name].priorities["ZWARN_DECREMENT"]
# Mxp -= zwd * zcat[ii & sbool]["NUMOBS"]
# ADM update states BEFORE changing priorities.
ts = "{}|{}".format(name, sname)
target_state[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])
priority[ii & sbool] = np.where(
priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])
# Special case: IN_BRIGHT_OBJECT means priority=-1 no matter what.
ii = (targets[desi_target] & desi_mask.IN_BRIGHT_OBJECT) != 0
priority[ii] = -1
target_state[ii] = "IN_BRIGHT_OBJECT"
# ADM Special case: SV-like commissioning targets.
if 'CMX_TARGET' in targets.dtype.names:
priority = _cmx_calc_priority(targets, priority, obscon,
unobs, done, zgood, zwarn, cmx_mask, obsconditions)
if state:
return priority, target_state
return priority
def _cmx_calc_priority(targets, priority, obscon,
unobs, done, zgood, zwarn, cmx_mask, obsconditions):
"""Special-case logic for target priorities in CMX.
Parameters
----------
targets : :class:`~numpy.ndarray`
numpy structured array or astropy Table of targets. Must include
the column `CMX_TARGET`.
priority : :class:`~numpy.ndarray`
Initial priority values set, in calc_priorities().
obscon : :class:`str`
A combination of strings that are in the desitarget bitmask yaml
file (specifically in `desitarget.targetmask.obsconditions`), e.g.
"DARK|GRAY". Governs the behavior of how priorities are set based
on "obsconditions" in the desitarget bitmask yaml file.
unobs : :class:`~numpy.ndarray`
Boolean flag on targets indicating state UNOBS.
done : :class:`~numpy.ndarray`
Boolean flag on targets indicating state DONE.
zgood : :class:`~numpy.ndarray`
Boolean flag on targets indicating state ZGOOD.
zwarn : :class:`~numpy.ndarray`
Boolean flag on targets indicating state ZWARN.
cmx_mask : :class:`~desiutil.bitmask.BitMask`
The CMX target bitmask.
obscondtions : :class:`~desiutil.bitmask.BitMask`
The CMX obsconditions bitmask.
Returns
-------
:class:`~numpy.ndarray`
The updated priority values.
Notes
-----
- Intended to be called only from within calc_priority(), where any
pre-processing of the target state flags (uobs, done, zgood, zwarn) is
handled.
"""
# Build a permitted list of targets to update
names_to_update = ['SV0_' + label for label in ('STD_FAINT', 'STD_BRIGHT',
'BGS', 'MWS', 'WD', 'MWS_FAINT',
'MWS_CLUSTER', 'MWS_CLUSTER_VERYBRIGHT')]
names_to_update.extend(['BACKUP_BRIGHT', 'BACKUP_FAINT'])
for name in names_to_update:
pricon = obsconditions.mask(cmx_mask[name].obsconditions)
if (obsconditions.mask(obscon) & pricon) != 0:
ii = (targets['CMX_TARGET'] & cmx_mask[name]) != 0
priority[ii & unobs] = np.maximum(priority[ii & unobs], cmx_mask[name].priorities['UNOBS'])
priority[ii & done] = np.maximum(priority[ii & done], cmx_mask[name].priorities['DONE'])
priority[ii & zgood] = np.maximum(priority[ii & zgood], cmx_mask[name].priorities['MORE_ZGOOD'])
priority[ii & zwarn] = np.maximum(priority[ii & zwarn], cmx_mask[name].priorities['MORE_ZWARN'])
return priority
def resolve(targets):
"""Resolve which targets are primary in imaging overlap regions.
Parameters
----------
targets : :class:`~numpy.ndarray`
Rec array of targets. Must have columns "RA" and "DEC" and
either "RELEASE" or "PHOTSYS" or "TARGETID".
Returns
-------
:class:`~numpy.ndarray`
The original target list trimmed to only objects from the "northern"
photometry in the northern imaging area and objects from "southern"
photometry in the southern imaging area.
"""
# ADM retrieve the photometric system from the RELEASE.
from desitarget.io import release_to_photsys, desitarget_resolve_dec
if 'PHOTSYS' in targets.dtype.names:
photsys = targets["PHOTSYS"]
else:
if 'RELEASE' in targets.dtype.names:
photsys = release_to_photsys(targets["RELEASE"])
else:
_, _, release, _, _, _ = decode_targetid(targets["TARGETID"])
photsys = release_to_photsys(release)
# ADM a flag of which targets are from the 'N' photometry.
from desitarget.cuts import _isonnorthphotsys
photn = _isonnorthphotsys(photsys)
# ADM grab the declination used to resolve targets.
split = desitarget_resolve_dec()
# ADM determine which targets are north of the Galactic plane. As
# ADM a speed-up, bin in ~1 sq.deg. HEALPixels and determine
# ADM which of those pixels are north of the Galactic plane.
# ADM We should never be as close as ~1o to the plane.
from desitarget.geomask import is_in_gal_box, pixarea2nside
nside = pixarea2nside(1)
theta, phi = np.radians(90-targets["DEC"]), np.radians(targets["RA"])
pixnum = hp.ang2pix(nside, theta, phi, nest=True)
# ADM find the pixels north of the Galactic plane...
allpix = np.arange(hp.nside2npix(nside))
theta, phi = hp.pix2ang(nside, allpix, nest=True)
ra, dec = np.degrees(phi), 90-np.degrees(theta)
pixn = is_in_gal_box([ra, dec], [0., 360., 0., 90.], radec=True)
# ADM which targets are in pixels north of the Galactic plane.
galn = pixn[pixnum]
# ADM which targets are in the northern imaging area.
arean = (targets["DEC"] >= split) & galn
# ADM retain 'N' targets in 'N' area and 'S' in 'S' area.
keep = (photn & arean) | (~photn & ~arean)
return targets[keep]
def finalize(targets, desi_target, bgs_target, mws_target,
sky=False, randoms=False, survey='main', darkbright=False,
gaiadr=None, gdr=None, targetid=None, forcerelease=False):
"""Return new targets array with added/renamed columns
Parameters
----------
targets : :class:`~numpy.ndarray`
numpy structured array of targets.
desi_target : :class:`~numpy.ndarray`
1D array of target selection bit flags.
bgs_target : :class:`~numpy.ndarray`
1D array of target selection bit flags.
mws_target : :class:`~numpy.ndarray`
1D array of target selection bit flags.
sky : :class:`bool`, defaults to ``False``
Pass ``True`` for sky targets, ``False`` otherwise.
randoms : :class:`bool`, defaults to ``False``
``True`` if `targets` is a random catalog, ``False`` otherwise.
survey : :class:`str`, defaults to `main`
Specifies which target masks yaml file to use. Options are `main`,
`cmx` and `svX` (where X = 1, 2, 3 etc.) for the main survey,
commissioning and an iteration of SV.
darkbright : :class:`bool`, optional, defaults to ``False``
If sent, then split `NUMOBS_INIT` and `PRIORITY_INIT` into
`NUMOBS_INIT_DARK`, `NUMOBS_INIT_BRIGHT`, `PRIORITY_INIT_DARK`
and `PRIORITY_INIT_BRIGHT` and calculate values appropriate
to "BRIGHT" and "DARK|GRAY" observing conditions.
gaiadr : :class:`int`, optional, defaults to ``None``
If passed and not ``None``, then build the `TARGETID` from the
"GAIA_OBJID" and "GAIA_BRICKID" columns in the passed `targets`,
and set the `gaiadr` part of `TARGETID` to whatever is passed.
"RELEASE" is set to zero.
gdr : :class:`int`, defaults to ``None``
An alternate version of `gaiadr` where the "OBJID", "BRICKID" and
"RELEASE" columns are used as normal, but `gdr` is sent to
:func:`desitarget.targets.encode_targetid` as the gaiadr bit.
targetid : :class:`int64`, optional, defaults to ``None``
In the mocks we compute `TARGETID` outside this function.
Returns
-------
:class:`~numpy.ndarray`
new targets structured array with the following additions:
* renaming OBJID -> BRICK_OBJID (it is only unique within a brick).
* renaming TYPE -> MORPHTYPE (used downstream in other contexts).
* Adding new columns:
- TARGETID: unique ID across all bricks or Gaia files.
- DESI_TARGET: dark time survey target selection flags.
- MWS_TARGET: bright time MWS target selection flags.
- BGS_TARGET: bright time BGS target selection flags.
- PRIORITY_INIT: initial priority for observing target.
- SUBPRIORITY: a placeholder column that is set to zero.
- NUMOBS_INIT: initial number of observations for target.
- OBSCONDITIONS: bitmask of observation conditions.
Notes
-----
- SUBPRIORITY is the only column that isn't populated. This is
because it's easier to populate it in a reproducible fashion
when collecting targets rather than on a per-brick basis
when this function is called. It's set to all zeros.
- Only one of `gaiadr` and `gdr` can be input.
"""
if gaiadr is not None and gdr is not None:
msg = "only one of gaiadr and gdr can be input (and not None)"
log.critical(msg)
raise IOError(msg)
ntargets = len(targets)
assert ntargets == len(desi_target)
assert ntargets == len(bgs_target)
assert ntargets == len(mws_target)
# - OBJID in tractor files is only unique within the brick; rename and
# - create a new unique TARGETID
targets = rfn.rename_fields(targets,
{'OBJID': 'BRICK_OBJID', 'TYPE': 'MORPHTYPE'})
# allow TARGETID to be passed as an input (specifically for the mocks).
if targetid is None:
if gaiadr is not None:
targetid = encode_targetid(objid=targets['GAIA_OBJID'],
brickid=targets['GAIA_BRICKID'],
release=0,
mock=int(randoms),
sky=int(sky),
gaiadr=gaiadr)
else:
targetid = encode_targetid(objid=targets['BRICK_OBJID'],
brickid=targets['BRICKID'],
release=targets['RELEASE'],
mock=int(randoms),
sky=int(sky),
gaiadr=gdr)
assert ntargets == len(targetid)
nodata = np.zeros(ntargets, dtype='int')-1
subpriority = np.zeros(ntargets, dtype='float')
# ADM new columns are different depending on SV/cmx/main survey.
if survey == 'main':
colnames = ['DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET']
elif survey == 'cmx':
colnames = ['CMX_TARGET']
elif survey[:2] == 'sv':
colnames = ["{}_{}_TARGET".format(survey.upper(), tc)
for tc in ["DESI", "BGS", "MWS"]]
else:
msg = "survey must be 'main', 'cmx' or 'svX' (X=1,2..etc.), not {}!" \
.format(survey)
log.critical(msg)
raise ValueError(msg)
# ADM the columns to write out and their values and formats.
cols = ["TARGETID"] + colnames + ['SUBPRIORITY', 'OBSCONDITIONS']
vals = [targetid] + [desi_target, bgs_target, mws_target][:len(colnames)] \
+ [subpriority, nodata]
forms = ['>i8'] + ['>i8', '>i8', '>i8'][:len(colnames)] + ['>f8', '>i8']
# ADM set the initial PRIORITY and NUMOBS.
if darkbright:
# ADM populate bright/dark if splitting by survey OBSCONDITIONS.
ender = ["_DARK", "_BRIGHT", "_BACKUP"]
obscon = ["DARK|GRAY", "BRIGHT", "BACKUP"]
else:
ender, obscon = [""], ["DARK|GRAY|BRIGHT|BACKUP|TWILIGHT12|TWILIGHT18"]
for edr, oc in zip(ender, obscon):
cols += ["{}_INIT{}".format(pn, edr) for pn in ["PRIORITY", "NUMOBS"]]
vals += [nodata, nodata]
forms += ['>i8', '>i8']
# ADM write the output array.
newdt = [dt for dt in zip(cols, forms)]
done = np.array(np.zeros(len(targets)), dtype=targets.dtype.descr+newdt)
for col in targets.dtype.names:
done[col] = targets[col]
for col, val in zip(cols, vals):
done[col] = val
# ADM add PRIORITY/NUMOBS columns.
for edr, oc in zip(ender, obscon):
pc, nc = "PRIORITY_INIT"+edr, "NUMOBS_INIT"+edr
done[pc], done[nc] = initial_priority_numobs(done, obscon=oc)
# ADM set the OBSCONDITIONS.
done["OBSCONDITIONS"] = set_obsconditions(done)
# ADM some final checks that the targets conform to expectations...
# ADM check that each target has a unique ID.
if len(done["TARGETID"]) != len(set(done["TARGETID"])):
msg = 'TARGETIDs are not unique!'
log.critical(msg)
raise AssertionError(msg)
# ADM check all LRG targets have LRG_1PASS/2PASS set.
# ADM we've moved away from LRG PASSes so deprecate this for now.
# if survey == 'main':
# lrgset = done["DESI_TARGET"] & desi_mask.LRG != 0
# pass1lrgset = done["DESI_TARGET"] & desi_mask.LRG_1PASS != 0
# pass2lrgset = done["DESI_TARGET"] & desi_mask.LRG_2PASS != 0
# if not np.all(lrgset == pass1lrgset | pass2lrgset):
# msg = 'Some LRG targets do not have 1PASS/2PASS set!'
# log.critical(msg)
# raise AssertionError(msg)
return done
| [
"numpy.maximum",
"numpy.sum",
"healpy.ang2pix",
"desitarget.targetmask.scnd_mask.names",
"numpy.int8",
"desiutil.log.get_logger",
"desitarget.io.desitarget_resolve_dec",
"numpy.degrees",
"desitarget.targetmask.bgs_mask.names",
"desitarget.geomask.is_in_gal_box",
"desitarget.io.release_to_photsys... | [((564, 576), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (574, 576), False, 'from desiutil.log import get_logger\n'), ((3475, 3496), 'desitarget.targetmask.targetid_mask.names', 'targetid_mask.names', ([], {}), '()\n', (3494, 3496), False, 'from desitarget.targetmask import scnd_mask, targetid_mask\n'), ((4880, 4904), 'numpy.zeros', 'np.zeros', (['nobjs', '"""int64"""'], {}), "(nobjs, 'int64')\n", (4888, 4904), True, 'import numpy as np\n'), ((6989, 7010), 'desitarget.targetmask.targetid_mask.names', 'targetid_mask.names', ([], {}), '()\n', (7008, 7010), False, 'from desitarget.targetmask import scnd_mask, targetid_mask\n'), ((8258, 8273), 'numpy.isscalar', 'np.isscalar', (['ra'], {}), '(ra)\n', (8269, 8273), True, 'import numpy as np\n'), ((8503, 8517), 'numpy.int8', 'np.int8', (['group'], {}), '(group)\n', (8510, 8517), True, 'import numpy as np\n'), ((8588, 8605), 'numpy.atleast_1d', 'np.atleast_1d', (['ra'], {}), '(ra)\n', (8601, 8605), True, 'import numpy as np\n'), ((8616, 8634), 'numpy.atleast_1d', 'np.atleast_1d', (['dec'], {}), '(dec)\n', (8629, 8634), True, 'import numpy as np\n'), ((8647, 8682), 'numpy.all', 'np.all', (['((0.0 <= ra) & (ra <= 360.0))'], {}), '((0.0 <= ra) & (ra <= 360.0))\n', (8653, 8682), True, 'import numpy as np\n'), ((8694, 8732), 'numpy.all', 'np.all', (['((-90.0 <= dec) & (dec <= 90.0))'], {}), '((-90.0 <= dec) & (dec <= 90.0))\n', (8700, 8732), True, 'import numpy as np\n'), ((10735, 10793), 'numpy.any', 'np.any', (["[('SCND_TARGET' in i) for i in revamp.dtype.names]"], {}), "([('SCND_TARGET' in i) for i in revamp.dtype.names])\n", (10741, 10793), True, 'import numpy as np\n'), ((11142, 11176), 'numpy.lib.recfunctions.rename_fields', 'rfn.rename_fields', (['revamp', 'renamer'], {}), '(revamp, renamer)\n', (11159, 11176), True, 'import numpy.lib.recfunctions as rfn\n'), ((13483, 13512), 'numpy.array', 'np.array', (['targets.dtype.names'], {}), '(targets.dtype.names)\n', (13491, 13512), True, 'import numpy as np\n'), ((13527, 13593), 'numpy.array', 'np.array', (["[('SV' in name or 'CMX' in name) for name in incolnames]"], {}), "([('SV' in name or 'CMX' in name) for name in incolnames])\n", (13535, 13593), True, 'import numpy as np\n'), ((13599, 13614), 'numpy.any', 'np.any', (['notmain'], {}), '(notmain)\n', (13605, 13614), True, 'import numpy as np\n'), ((16266, 16313), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': "mtldm['OBSCONDITIONS'].dtype"}), "(n, dtype=mtldm['OBSCONDITIONS'].dtype)\n", (16274, 16313), True, 'import numpy as np\n'), ((19001, 19027), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (19019, 19027), False, 'from desitarget.targetmask import obsconditions\n'), ((22501, 22548), 'numpy.all', 'np.all', (["(targets['TARGETID'] == zcat['TARGETID'])"], {}), "(targets['TARGETID'] == zcat['TARGETID'])\n", (22507, 22548), True, 'import numpy as np\n'), ((23019, 23073), 'numpy.maximum', 'np.maximum', (['(0)', "(targets['NUMOBS_INIT'] - zcat['NUMOBS'])"], {}), "(0, targets['NUMOBS_INIT'] - zcat['NUMOBS'])\n", (23029, 23073), True, 'import numpy as np\n'), ((26547, 26594), 'numpy.all', 'np.all', (["(targets['TARGETID'] == zcat['TARGETID'])"], {}), "(targets['TARGETID'] == zcat['TARGETID'])\n", (26553, 26594), True, 'import numpy as np\n'), ((27485, 27498), 'numpy.all', 'np.all', (['unobs'], {}), '(unobs)\n', (27491, 27498), True, 'import numpy as np\n'), ((28579, 28615), 'numpy.all', 'np.all', (['(unobs | done | zgood | zwarn)'], {}), '(unobs | done | zgood | zwarn)\n', (28585, 28615), True, 'import numpy as np\n'), ((49608, 49634), 'desitarget.cuts._isonnorthphotsys', '_isonnorthphotsys', (['photsys'], {}), '(photsys)\n', (49625, 49634), False, 'from desitarget.cuts import _isonnorthphotsys\n'), ((49704, 49728), 'desitarget.io.desitarget_resolve_dec', 'desitarget_resolve_dec', ([], {}), '()\n', (49726, 49728), False, 'from desitarget.io import release_to_photsys, desitarget_resolve_dec\n'), ((50065, 50081), 'desitarget.geomask.pixarea2nside', 'pixarea2nside', (['(1)'], {}), '(1)\n', (50078, 50081), False, 'from desitarget.geomask import is_in_gal_box, pixarea2nside\n'), ((50169, 50209), 'healpy.ang2pix', 'hp.ang2pix', (['nside', 'theta', 'phi'], {'nest': '(True)'}), '(nside, theta, phi, nest=True)\n', (50179, 50209), True, 'import healpy as hp\n'), ((50329, 50365), 'healpy.pix2ang', 'hp.pix2ang', (['nside', 'allpix'], {'nest': '(True)'}), '(nside, allpix, nest=True)\n', (50339, 50365), True, 'import healpy as hp\n'), ((50429, 50490), 'desitarget.geomask.is_in_gal_box', 'is_in_gal_box', (['[ra, dec]', '[0.0, 360.0, 0.0, 90.0]'], {'radec': '(True)'}), '([ra, dec], [0.0, 360.0, 0.0, 90.0], radec=True)\n', (50442, 50490), False, 'from desitarget.geomask import is_in_gal_box, pixarea2nside\n'), ((54608, 54681), 'numpy.lib.recfunctions.rename_fields', 'rfn.rename_fields', (['targets', "{'OBJID': 'BRICK_OBJID', 'TYPE': 'MORPHTYPE'}"], {}), "(targets, {'OBJID': 'BRICK_OBJID', 'TYPE': 'MORPHTYPE'})\n", (54625, 54681), True, 'import numpy.lib.recfunctions as rfn\n'), ((55684, 55717), 'numpy.zeros', 'np.zeros', (['ntargets'], {'dtype': '"""float"""'}), "(ntargets, dtype='float')\n", (55692, 55717), True, 'import numpy as np\n'), ((8281, 8296), 'numpy.isscalar', 'np.isscalar', (['ra'], {}), '(ra)\n', (8292, 8296), True, 'import numpy as np\n'), ((8300, 8316), 'numpy.isscalar', 'np.isscalar', (['dec'], {}), '(dec)\n', (8311, 8316), True, 'import numpy as np\n'), ((11273, 11306), 'numpy.lib.recfunctions.drop_fields', 'rfn.drop_fields', (['renamed', 'oldcols'], {}), '(renamed, oldcols)\n', (11288, 11306), True, 'import numpy.lib.recfunctions as rfn\n'), ((23216, 23242), 'numpy.any', 'np.any', (["(zcat['NUMOBS'] > 0)"], {}), "(zcat['NUMOBS'] > 0)\n", (23222, 23242), True, 'import numpy as np\n'), ((27762, 27780), 'numpy.all', 'np.all', (['(nmore >= 0)'], {}), '(nmore >= 0)\n', (27768, 27780), True, 'import numpy as np\n'), ((28364, 28385), 'numpy.any', 'np.any', (['(unobs & zgood)'], {}), '(unobs & zgood)\n', (28370, 28385), True, 'import numpy as np\n'), ((28401, 28422), 'numpy.any', 'np.any', (['(unobs & zwarn)'], {}), '(unobs & zwarn)\n', (28407, 28422), True, 'import numpy as np\n'), ((28438, 28458), 'numpy.any', 'np.any', (['(unobs & done)'], {}), '(unobs & done)\n', (28444, 28458), True, 'import numpy as np\n'), ((28474, 28495), 'numpy.any', 'np.any', (['(zgood & zwarn)'], {}), '(zgood & zwarn)\n', (28480, 28495), True, 'import numpy as np\n'), ((28511, 28531), 'numpy.any', 'np.any', (['(zgood & done)'], {}), '(zgood & done)\n', (28517, 28531), True, 'import numpy as np\n'), ((28547, 28567), 'numpy.any', 'np.any', (['(zwarn & done)'], {}), '(zwarn & done)\n', (28553, 28567), True, 'import numpy as np\n'), ((47860, 47908), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['cmx_mask[name].obsconditions'], {}), '(cmx_mask[name].obsconditions)\n', (47878, 47908), False, 'from desitarget.targetmask import obsconditions\n'), ((50099, 50130), 'numpy.radians', 'np.radians', (["(90 - targets['DEC'])"], {}), "(90 - targets['DEC'])\n", (50109, 50130), True, 'import numpy as np\n'), ((50130, 50155), 'numpy.radians', 'np.radians', (["targets['RA']"], {}), "(targets['RA'])\n", (50140, 50155), True, 'import numpy as np\n'), ((50290, 50310), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (50303, 50310), True, 'import healpy as hp\n'), ((50380, 50395), 'numpy.degrees', 'np.degrees', (['phi'], {}), '(phi)\n', (50390, 50395), True, 'import numpy as np\n'), ((55632, 55663), 'numpy.zeros', 'np.zeros', (['ntargets'], {'dtype': '"""int"""'}), "(ntargets, dtype='int')\n", (55640, 55663), True, 'import numpy as np\n'), ((3820, 3837), 'numpy.where', 'np.where', (['goodpar'], {}), '(goodpar)\n', (3828, 3837), True, 'import numpy as np\n'), ((4178, 4208), 'numpy.zeros', 'np.zeros', (['nobjs'], {'dtype': '"""int64"""'}), "(nobjs, dtype='int64')\n", (4186, 4208), True, 'import numpy as np\n'), ((4247, 4267), 'numpy.atleast_1d', 'np.atleast_1d', (['param'], {}), '(param)\n', (4260, 4267), True, 'import numpy as np\n'), ((4511, 4560), 'numpy.all', 'np.all', (['(param < 2 ** targetid_mask[bitname].nbits)'], {}), '(param < 2 ** targetid_mask[bitname].nbits)\n', (4517, 4560), True, 'import numpy as np\n'), ((4657, 4675), 'numpy.all', 'np.all', (['(param >= 0)'], {}), '(param >= 0)\n', (4663, 4675), True, 'import numpy as np\n'), ((14927, 14961), 'numpy.lib.recfunctions.rename_fields', 'rfn.rename_fields', (['targets', 'mapper'], {}), '(targets, mapper)\n', (14944, 14961), True, 'import numpy.lib.recfunctions as rfn\n'), ((16601, 16611), 'numpy.any', 'np.any', (['ii'], {}), '(ii)\n', (16607, 16611), True, 'import numpy as np\n'), ((24514, 24548), 'numpy.maximum', 'np.maximum', (['(0)', '(numobs_more[ii] - 2)'], {}), '(0, numobs_more[ii] - 2)\n', (24524, 24548), True, 'import numpy as np\n'), ((27462, 27475), 'numpy.sum', 'np.sum', (['unobs'], {}), '(unobs)\n', (27468, 27475), True, 'import numpy as np\n'), ((31077, 31126), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['desi_mask[name].obsconditions'], {}), '(desi_mask[name].obsconditions)\n', (31095, 31126), False, 'from desitarget.targetmask import obsconditions\n'), ((34885, 34901), 'desitarget.targetmask.bgs_mask.names', 'bgs_mask.names', ([], {}), '()\n', (34899, 34901), False, 'from desitarget.targetmask import desi_mask, bgs_mask, mws_mask\n'), ((36543, 36559), 'desitarget.targetmask.mws_mask.names', 'mws_mask.names', ([], {}), '()\n', (36557, 36559), False, 'from desitarget.targetmask import desi_mask, bgs_mask, mws_mask\n'), ((39253, 39270), 'desitarget.targetmask.scnd_mask.names', 'scnd_mask.names', ([], {}), '()\n', (39268, 39270), False, 'from desitarget.targetmask import scnd_mask, targetid_mask\n'), ((40811, 40830), 'numpy.any', 'np.any', (['scnd_update'], {}), '(scnd_update)\n', (40817, 40830), True, 'import numpy as np\n'), ((48062, 48130), 'numpy.maximum', 'np.maximum', (['priority[ii & unobs]', "cmx_mask[name].priorities['UNOBS']"], {}), "(priority[ii & unobs], cmx_mask[name].priorities['UNOBS'])\n", (48072, 48130), True, 'import numpy as np\n'), ((48165, 48231), 'numpy.maximum', 'np.maximum', (['priority[ii & done]', "cmx_mask[name].priorities['DONE']"], {}), "(priority[ii & done], cmx_mask[name].priorities['DONE'])\n", (48175, 48231), True, 'import numpy as np\n'), ((48268, 48341), 'numpy.maximum', 'np.maximum', (['priority[ii & zgood]', "cmx_mask[name].priorities['MORE_ZGOOD']"], {}), "(priority[ii & zgood], cmx_mask[name].priorities['MORE_ZGOOD'])\n", (48278, 48341), True, 'import numpy as np\n'), ((48377, 48450), 'numpy.maximum', 'np.maximum', (['priority[ii & zwarn]', "cmx_mask[name].priorities['MORE_ZWARN']"], {}), "(priority[ii & zwarn], cmx_mask[name].priorities['MORE_ZWARN'])\n", (48387, 48450), True, 'import numpy as np\n'), ((49305, 49343), 'desitarget.io.release_to_photsys', 'release_to_photsys', (["targets['RELEASE']"], {}), "(targets['RELEASE'])\n", (49323, 49343), False, 'from desitarget.io import release_to_photsys, desitarget_resolve_dec\n'), ((49454, 49481), 'desitarget.io.release_to_photsys', 'release_to_photsys', (['release'], {}), '(release)\n', (49472, 49481), False, 'from desitarget.io import release_to_photsys, desitarget_resolve_dec\n'), ((50400, 50417), 'numpy.degrees', 'np.degrees', (['theta'], {}), '(theta)\n', (50410, 50417), True, 'import numpy as np\n'), ((16643, 16687), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['mask[name].obsconditions'], {}), '(mask[name].obsconditions)\n', (16661, 16687), False, 'from desitarget.targetmask import obsconditions\n'), ((19442, 19486), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['mask[name].obsconditions'], {}), '(mask[name].obsconditions)\n', (19460, 19486), False, 'from desitarget.targetmask import obsconditions\n'), ((20099, 20167), 'numpy.where', 'np.where', (["((mask[name].priorities['UNOBS'] >= outpriority) & istarget)"], {}), "((mask[name].priorities['UNOBS'] >= outpriority) & istarget)\n", (20107, 20167), True, 'import numpy as np\n'), ((20523, 20576), 'numpy.where', 'np.where', (['((mask[name].numobs >= outnumobs) & istarget)'], {}), '((mask[name].numobs >= outnumobs) & istarget)\n', (20531, 20576), True, 'import numpy as np\n'), ((23256, 23282), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (23274, 23282), False, 'from desitarget.targetmask import obsconditions\n'), ((23285, 23311), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['"""DARK"""'], {}), "('DARK')\n", (23303, 23311), False, 'from desitarget.targetmask import obsconditions\n'), ((23895, 23912), 'desitarget.targetmask.scnd_mask.names', 'scnd_mask.names', ([], {}), '()\n', (23910, 23912), False, 'from desitarget.targetmask import scnd_mask, targetid_mask\n'), ((29037, 29086), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['desi_mask[name].obsconditions'], {}), '(desi_mask[name].obsconditions)\n', (29055, 29086), False, 'from desitarget.targetmask import obsconditions\n'), ((29622, 29671), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['desi_mask[name].obsconditions'], {}), '(desi_mask[name].obsconditions)\n', (29640, 29671), False, 'from desitarget.targetmask import obsconditions\n'), ((35006, 35054), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['bgs_mask[name].obsconditions'], {}), '(bgs_mask[name].obsconditions)\n', (35024, 35054), False, 'from desitarget.targetmask import obsconditions\n'), ((36664, 36712), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['mws_mask[name].obsconditions'], {}), '(mws_mask[name].obsconditions)\n', (36682, 36712), False, 'from desitarget.targetmask import obsconditions\n'), ((40860, 40877), 'desitarget.targetmask.scnd_mask.names', 'scnd_mask.names', ([], {}), '()\n', (40875, 40877), False, 'from desitarget.targetmask import scnd_mask, targetid_mask\n'), ((47921, 47947), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (47939, 47947), False, 'from desitarget.targetmask import obsconditions\n'), ((31143, 31169), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (31161, 31169), False, 'from desitarget.targetmask import obsconditions\n'), ((34567, 34633), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'ts', 'target_state[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])\n', (34575, 34633), True, 'import numpy as np\n'), ((34702, 34765), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'Mxp', 'priority[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])\n', (34710, 34765), True, 'import numpy as np\n'), ((40990, 41039), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['scnd_mask[name].obsconditions'], {}), '(scnd_mask[name].obsconditions)\n', (41008, 41039), False, 'from desitarget.targetmask import obsconditions\n'), ((29107, 29133), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (29125, 29133), False, 'from desitarget.targetmask import obsconditions\n'), ((29692, 29718), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (29710, 29718), False, 'from desitarget.targetmask import obsconditions\n'), ((30670, 30736), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'ts', 'target_state[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])\n', (30678, 30736), True, 'import numpy as np\n'), ((30813, 30876), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'Mxp', 'priority[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])\n', (30821, 30876), True, 'import numpy as np\n'), ((32261, 32289), 'numpy.any', 'np.any', (['(good_hiz & good_midz)'], {}), '(good_hiz & good_midz)\n', (32267, 32289), True, 'import numpy as np\n'), ((35075, 35101), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (35093, 35101), False, 'from desitarget.targetmask import obsconditions\n'), ((36057, 36123), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'ts', 'target_state[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])\n', (36065, 36123), True, 'import numpy as np\n'), ((36200, 36263), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'Mxp', 'priority[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])\n', (36208, 36263), True, 'import numpy as np\n'), ((36733, 36759), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (36751, 36759), False, 'from desitarget.targetmask import obsconditions\n'), ((37943, 38009), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'ts', 'target_state[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])\n', (37951, 38009), True, 'import numpy as np\n'), ((38094, 38157), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'Mxp', 'priority[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])\n', (38102, 38157), True, 'import numpy as np\n'), ((41064, 41090), 'desitarget.targetmask.obsconditions.mask', 'obsconditions.mask', (['obscon'], {}), '(obscon)\n', (41082, 41090), False, 'from desitarget.targetmask import obsconditions\n'), ((45039, 45105), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'ts', 'target_state[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, ts, target_state[ii & sbool])\n', (45047, 45105), True, 'import numpy as np\n'), ((45190, 45253), 'numpy.where', 'np.where', (['(priority[ii & sbool] < Mxp)', 'Mxp', 'priority[ii & sbool]'], {}), '(priority[ii & sbool] < Mxp, Mxp, priority[ii & sbool])\n', (45198, 45253), True, 'import numpy as np\n'), ((42361, 42389), 'numpy.any', 'np.any', (['(good_hiz & good_midz)'], {}), '(good_hiz & good_midz)\n', (42367, 42389), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Benchmark performance of different Minhash implementations.
Results:
minhash_ref : 6187.46 ms runtime
minhash_ref_opt : 3317.13 ms runtime
minhash_ref_np : 614.44 ms runtime
minhash_ref_numba : 86.77 ms runtime
minhash_xor_192 : 11.97 ms runtime
minhash_ref_192 : 131.68 ms runtime
minhash_xor : 447.82 ms runtime
minhash_xor_np : 147.60 ms runtime
minhash_xor_numba : 10.97 ms runtime
"""
import time
from itertools import chain
import numpy as np
from xxhash import xxh32_intdigest, xxh64_intdigest
from numba import njit
from statistics import mean, variance
from iscc_bench.algos.const import MINHASH_PERMUTATIONS
from iscc_bench.algos.metrics import jaccard
from iscc_bench.algos.slide import sliding_window
from iscc_bench.readers.gutenberg import gutenberg
from iscc_bench.readers.mltext import mltext
from iscc_bench.textid.normalize import text_normalize
from iscc_bench.utils import load_text_file
rand = np.random.RandomState(seed=28)
MAX_UINT64 = (1 << 64) - 1
MASKS_64_NP = rand.randint(0, MAX_UINT64, 64, dtype=np.uint64)
MASKS_64 = MASKS_64_NP.tolist()
###############################################################################
# Reference implementation #
###############################################################################
def minhash_ref(features_32):
features_32 = features_32.tolist()
max_int64 = (1 << 64) - 1
mersenne_prime = (1 << 61) - 1
max_hash = (1 << 32) - 1
hashvalues = [max_hash] * 128
a, b = MINHASH_PERMUTATIONS
for hv in features_32:
nhs = []
for x in range(128):
nh = (((a[x] * hv + b[x]) & max_int64) % mersenne_prime) & max_hash
nhs.append(min(nh, hashvalues[x]))
hashvalues = nhs
return hashvalues
def minhash_ref_opt(features_32):
features_32 = features_32.tolist()
max_int64 = (1 << 64) - 1
mersenne_prime = (1 << 61) - 1
max_hash = (1 << 32) - 1
perms = [*zip(*MINHASH_PERMUTATIONS)]
return [
min(
(((a * f + b) & max_int64) % mersenne_prime) & max_hash for f in features_32
)
for a, b in perms[:128]
]
def minhash_ref_np(features_32):
_mersenne_prime = (1 << 61) - 1
_max_hash = (1 << 32) - 1
_hash_range = 1 << 32
hashvalues = np.ones(128, dtype=np.uint64) * _max_hash
a, b = np.array(
[MINHASH_PERMUTATIONS[0][:128], MINHASH_PERMUTATIONS[1][:128]], dtype=np.uint64
)
for hv in features_32:
phv = np.bitwise_and((a * hv + b) % _mersenne_prime, np.uint64(_max_hash))
hashvalues = np.minimum(phv, hashvalues)
return hashvalues.tolist()
PERMS_NUMBA = np.array(
[MINHASH_PERMUTATIONS[0][:128], MINHASH_PERMUTATIONS[1][:128]], dtype=np.uint64
)
@njit
def minhash_ref_numba(features_32):
_mersenne_prime = np.uint64((1 << 61) - 1)
_max_hash = np.uint32((1 << 32) - 1)
hashvalues = np.full(128, _max_hash, dtype=np.uint64)
a = PERMS_NUMBA[0]
b = PERMS_NUMBA[1]
for hv in features_32:
phv = np.bitwise_and((a * hv + b) % _mersenne_prime, np.uint64(_max_hash))
hashvalues = np.minimum(phv, hashvalues)
return hashvalues
###############################################################################
# Simplified implementations with XOR based random permutations #
###############################################################################
def minhash_xor(features, masks=MASKS_64):
"""Pure Python implementation"""
return [min([f ^ m for f in features.tolist()]) for m in masks]
def minhash_xor_np(features, masks=MASKS_64_NP):
"""Numpy supported implementation"""
hashes = np.full(64, MAX_UINT64, dtype=np.uint64)
for f in features:
hashes = np.minimum(hashes, np.bitwise_xor(masks, f))
return hashes.tolist()
@njit
def minhash_xor_numba(features, masks=MASKS_64_NP):
"""Numpy & Numba supported implementation"""
hashes = np.full(64, MAX_UINT64, dtype=np.uint64)
for f in features:
hashes = np.minimum(hashes, np.bitwise_xor(masks, f))
return hashes
###############################################################################
# Compare Universal Hash vs XOR at 192 permutations with 32 bit features #
###############################################################################
MAX_UINT32 = 2 ** 32 - 1
PERMS_192_NP = rand.randint(0, MAX_UINT32, 192, dtype=np.uint32)
@njit
def minhash_xor_192(features_32, masks=PERMS_192_NP):
"""Numpy & Numba supported implementation"""
hashes = np.full(192, np.uint32(MAX_UINT32))
for f in features_32:
hashes = np.minimum(hashes, np.bitwise_xor(masks, f))
return hashes
PERMS_192 = np.array(
[MINHASH_PERMUTATIONS[0][:192], MINHASH_PERMUTATIONS[1][:192]], dtype=np.uint64
)
@njit
def minhash_ref_192(features_32):
_mersenne_prime = np.uint64((1 << 61) - 1)
_max_hash = np.uint32((1 << 32) - 1)
hashvalues = np.full(192, _max_hash, dtype=np.uint64)
a = PERMS_192[0]
b = PERMS_192[1]
for hv in features_32:
phv = np.bitwise_and((a * hv + b) % _mersenne_prime, np.uint64(_max_hash))
hashvalues = np.minimum(phv, hashvalues)
return hashvalues
funcs_ref = (
minhash_ref,
minhash_ref_opt,
minhash_ref_np,
minhash_ref_numba,
)
funcs_xor = (
minhash_xor,
minhash_xor_np,
minhash_xor_numba,
)
funcs_f32 = (
minhash_ref,
minhash_ref_opt,
minhash_ref_np,
minhash_ref_numba,
minhash_xor_192,
minhash_ref_192,
)
def compat():
"""Test compatibility of implementations"""
features_32 = np.array(
[xxh32_intdigest(rand.bytes(13)) for _ in range(100)], dtype=np.uint32
)
results = set()
print("\nTesting minhash reference compatibility:\n")
for func in funcs_ref:
r = tuple(func(features_32))
print(f"{func.__name__:<18}: {r}")
results.add(r)
assert len(results) == 1
s = np.array([xxh64_intdigest(rand.bytes(13)) for _ in range(100)], dtype=np.uint64)
results = set()
print("\nTesting minhash xor compatibility:\n")
for func in funcs_xor:
r = tuple(func(s))
print(f"{func.__name__:<18}: {r}")
results.add(r)
# assert len(results) == 1
def performance():
"""
Compare performance of xor based implementations with reference
Results for 100k features:
minhash_ref : 6858.04 ms runtime
minhash_ref_opt : 3738.82 ms runtime
minhash_ref_np : 607.38 ms runtime
minhash_ref_numba : 90.76 ms runtime
minhash_xor : 478.72 ms runtime
minhash_xor_np : 153.59 ms runtime
minhash_xor_numba : 11.97 ms runtime
"""
nfeat = 10000
print(f"\nTesting minhash performance with {nfeat} features:\n")
# Reference
features_32 = np.array(
[xxh32_intdigest(rand.bytes(13)) for _ in range(nfeat)], dtype=np.uint32
)
for func in funcs_f32:
mh = func(features_32)
start = time.time()
mh = func(features_32)
end = time.time()
rt = (end - start) * 1000
print(f"{func.__name__:<18}: {rt:.2f} ms runtime")
# New versions
features_64 = np.array(
[xxh64_intdigest(rand.bytes(13)) for _ in range(nfeat)], dtype=np.uint64
)
for func in funcs_xor:
mh = func(features_64)
start = time.time()
mh = func(features_64)
end = time.time()
rt = (end - start) * 1000
print(f"{func.__name__:<18}: {rt:.2f} ms runtime")
def quality(seed=298):
print("\nTesting minhash quality:\n")
fps = list(chain(gutenberg(), mltext()))
def chunkify(text):
return ["".join(c) for c in sliding_window(text, 13)]
def hashify_32(chunks):
return np.array([xxh32_intdigest(f) for f in chunks], np.uint32)
def hashify_64(chunks):
return np.array([xxh64_intdigest(f) for f in chunks], np.uint64)
# Minhash XOR 64
sim_errs_ref = []
dis_errs_ref = []
for abc in sliding_window(fps, 3, 2, fillvalue=None):
abc = list(abc)
if abc[-1] is None:
continue
texts = (load_text_file(t) for t in abc)
norm_texts = (text_normalize(t) for t in texts)
chunked_texts = [chunkify(t) for t in norm_texts]
feature_texts = [hashify_32(f) for f in chunked_texts]
sim_sim = jaccard(feature_texts[0], feature_texts[1])
sim_dis = jaccard(feature_texts[0], feature_texts[2])
mhashes = [minhash_xor_numba(f) for f in feature_texts]
mh_sim_sim = jaccard(mhashes[0], mhashes[1])
mh_sim_dis = jaccard(mhashes[0], mhashes[2])
sim_errs_ref.append(abs(sim_sim - mh_sim_sim))
dis_errs_ref.append(abs(sim_dis - mh_sim_dis))
print(
f"minhash xor 64: "
f"Error Sim Mean {mean(sim_errs_ref)} - "
f"Max {max(sim_errs_ref)} - "
f"Var {variance(sim_errs_ref)} | "
f"Error Dis Mean {mean(dis_errs_ref)} - "
f"Max {max(dis_errs_ref)} - "
f"Var {variance(dis_errs_ref)}"
)
# Minhash Ref 64
sim_errs_ref = []
dis_errs_ref = []
for abc in sliding_window(fps, 3, 2, fillvalue=None):
abc = list(abc)
if abc[-1] is None:
continue
texts = (load_text_file(t) for t in abc)
norm_texts = (text_normalize(t) for t in texts)
chunked_texts = [chunkify(t) for t in norm_texts]
feature_texts = [hashify_32(f) for f in chunked_texts]
sim_sim = jaccard(feature_texts[0], feature_texts[1])
sim_dis = jaccard(feature_texts[0], feature_texts[2])
mhashes = [minhash_ref_numba(f) for f in feature_texts]
mh_sim_sim = jaccard(mhashes[0], mhashes[1])
mh_sim_dis = jaccard(mhashes[0], mhashes[2])
sim_errs_ref.append(abs(sim_sim - mh_sim_sim))
dis_errs_ref.append(abs(sim_dis - mh_sim_dis))
print(
f"minhash ref 64: "
f"Error Sim Mean {mean(sim_errs_ref)} - "
f"Max {max(sim_errs_ref)} - "
f"Var {variance(sim_errs_ref)} | "
f"Error Dis Mean {mean(dis_errs_ref)} - "
f"Max {max(dis_errs_ref)} - "
f"Var {variance(dis_errs_ref)}"
)
# Minhash Ref 192
sim_errs_ref = []
dis_errs_ref = []
for abc in sliding_window(fps, 3, 2, fillvalue=None):
abc = list(abc)
if abc[-1] is None:
continue
texts = (load_text_file(t) for t in abc)
norm_texts = (text_normalize(t) for t in texts)
chunked_texts = [chunkify(t) for t in norm_texts]
feature_texts = [hashify_32(f) for f in chunked_texts]
sim_sim = jaccard(feature_texts[0], feature_texts[1])
sim_dis = jaccard(feature_texts[0], feature_texts[2])
mhashes = [minhash_ref_192(f) for f in feature_texts]
mh_sim_sim = jaccard(mhashes[0], mhashes[1])
mh_sim_dis = jaccard(mhashes[0], mhashes[2])
sim_errs_ref.append(abs(sim_sim - mh_sim_sim))
dis_errs_ref.append(abs(sim_dis - mh_sim_dis))
print(
f"minhash ref 192: "
f"Error Sim Mean {mean(sim_errs_ref)} - "
f"Max {max(sim_errs_ref)} - "
f"Var {variance(sim_errs_ref)} | "
f"Error Dis Mean {mean(dis_errs_ref)} - "
f"Max {max(dis_errs_ref)} - "
f"Var {variance(dis_errs_ref)}"
)
if __name__ == "__main__":
compat()
performance()
quality(298)
| [
"numpy.uint32",
"numpy.uint64",
"numpy.bitwise_xor",
"numpy.ones",
"iscc_bench.utils.load_text_file",
"statistics.variance",
"numpy.full",
"xxhash.xxh64_intdigest",
"numpy.random.RandomState",
"xxhash.xxh32_intdigest",
"iscc_bench.algos.slide.sliding_window",
"numpy.minimum",
"iscc_bench.rea... | [((970, 1000), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(28)'}), '(seed=28)\n', (991, 1000), True, 'import numpy as np\n'), ((2729, 2822), 'numpy.array', 'np.array', (['[MINHASH_PERMUTATIONS[0][:128], MINHASH_PERMUTATIONS[1][:128]]'], {'dtype': 'np.uint64'}), '([MINHASH_PERMUTATIONS[0][:128], MINHASH_PERMUTATIONS[1][:128]],\n dtype=np.uint64)\n', (2737, 2822), True, 'import numpy as np\n'), ((4772, 4865), 'numpy.array', 'np.array', (['[MINHASH_PERMUTATIONS[0][:192], MINHASH_PERMUTATIONS[1][:192]]'], {'dtype': 'np.uint64'}), '([MINHASH_PERMUTATIONS[0][:192], MINHASH_PERMUTATIONS[1][:192]],\n dtype=np.uint64)\n', (4780, 4865), True, 'import numpy as np\n'), ((2419, 2512), 'numpy.array', 'np.array', (['[MINHASH_PERMUTATIONS[0][:128], MINHASH_PERMUTATIONS[1][:128]]'], {'dtype': 'np.uint64'}), '([MINHASH_PERMUTATIONS[0][:128], MINHASH_PERMUTATIONS[1][:128]],\n dtype=np.uint64)\n', (2427, 2512), True, 'import numpy as np\n'), ((2891, 2915), 'numpy.uint64', 'np.uint64', (['((1 << 61) - 1)'], {}), '((1 << 61) - 1)\n', (2900, 2915), True, 'import numpy as np\n'), ((2932, 2956), 'numpy.uint32', 'np.uint32', (['((1 << 32) - 1)'], {}), '((1 << 32) - 1)\n', (2941, 2956), True, 'import numpy as np\n'), ((2975, 3015), 'numpy.full', 'np.full', (['(128)', '_max_hash'], {'dtype': 'np.uint64'}), '(128, _max_hash, dtype=np.uint64)\n', (2982, 3015), True, 'import numpy as np\n'), ((3741, 3781), 'numpy.full', 'np.full', (['(64)', 'MAX_UINT64'], {'dtype': 'np.uint64'}), '(64, MAX_UINT64, dtype=np.uint64)\n', (3748, 3781), True, 'import numpy as np\n'), ((4016, 4056), 'numpy.full', 'np.full', (['(64)', 'MAX_UINT64'], {'dtype': 'np.uint64'}), '(64, MAX_UINT64, dtype=np.uint64)\n', (4023, 4056), True, 'import numpy as np\n'), ((4932, 4956), 'numpy.uint64', 'np.uint64', (['((1 << 61) - 1)'], {}), '((1 << 61) - 1)\n', (4941, 4956), True, 'import numpy as np\n'), ((4973, 4997), 'numpy.uint32', 'np.uint32', (['((1 << 32) - 1)'], {}), '((1 << 32) - 1)\n', (4982, 4997), True, 'import numpy as np\n'), ((5016, 5056), 'numpy.full', 'np.full', (['(192)', '_max_hash'], {'dtype': 'np.uint64'}), '(192, _max_hash, dtype=np.uint64)\n', (5023, 5056), True, 'import numpy as np\n'), ((8103, 8144), 'iscc_bench.algos.slide.sliding_window', 'sliding_window', (['fps', '(3)', '(2)'], {'fillvalue': 'None'}), '(fps, 3, 2, fillvalue=None)\n', (8117, 8144), False, 'from iscc_bench.algos.slide import sliding_window\n'), ((9234, 9275), 'iscc_bench.algos.slide.sliding_window', 'sliding_window', (['fps', '(3)', '(2)'], {'fillvalue': 'None'}), '(fps, 3, 2, fillvalue=None)\n', (9248, 9275), False, 'from iscc_bench.algos.slide import sliding_window\n'), ((10366, 10407), 'iscc_bench.algos.slide.sliding_window', 'sliding_window', (['fps', '(3)', '(2)'], {'fillvalue': 'None'}), '(fps, 3, 2, fillvalue=None)\n', (10380, 10407), False, 'from iscc_bench.algos.slide import sliding_window\n'), ((2366, 2395), 'numpy.ones', 'np.ones', (['(128)'], {'dtype': 'np.uint64'}), '(128, dtype=np.uint64)\n', (2373, 2395), True, 'import numpy as np\n'), ((2654, 2681), 'numpy.minimum', 'np.minimum', (['phv', 'hashvalues'], {}), '(phv, hashvalues)\n', (2664, 2681), True, 'import numpy as np\n'), ((3194, 3221), 'numpy.minimum', 'np.minimum', (['phv', 'hashvalues'], {}), '(phv, hashvalues)\n', (3204, 3221), True, 'import numpy as np\n'), ((4629, 4650), 'numpy.uint32', 'np.uint32', (['MAX_UINT32'], {}), '(MAX_UINT32)\n', (4638, 4650), True, 'import numpy as np\n'), ((5231, 5258), 'numpy.minimum', 'np.minimum', (['phv', 'hashvalues'], {}), '(phv, hashvalues)\n', (5241, 5258), True, 'import numpy as np\n'), ((7084, 7095), 'time.time', 'time.time', ([], {}), '()\n', (7093, 7095), False, 'import time\n'), ((7141, 7152), 'time.time', 'time.time', ([], {}), '()\n', (7150, 7152), False, 'import time\n'), ((7456, 7467), 'time.time', 'time.time', ([], {}), '()\n', (7465, 7467), False, 'import time\n'), ((7513, 7524), 'time.time', 'time.time', ([], {}), '()\n', (7522, 7524), False, 'import time\n'), ((8463, 8506), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['feature_texts[0]', 'feature_texts[1]'], {}), '(feature_texts[0], feature_texts[1])\n', (8470, 8506), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((8525, 8568), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['feature_texts[0]', 'feature_texts[2]'], {}), '(feature_texts[0], feature_texts[2])\n', (8532, 8568), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((8654, 8685), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['mhashes[0]', 'mhashes[1]'], {}), '(mhashes[0], mhashes[1])\n', (8661, 8685), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((8707, 8738), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['mhashes[0]', 'mhashes[2]'], {}), '(mhashes[0], mhashes[2])\n', (8714, 8738), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((9594, 9637), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['feature_texts[0]', 'feature_texts[1]'], {}), '(feature_texts[0], feature_texts[1])\n', (9601, 9637), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((9656, 9699), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['feature_texts[0]', 'feature_texts[2]'], {}), '(feature_texts[0], feature_texts[2])\n', (9663, 9699), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((9785, 9816), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['mhashes[0]', 'mhashes[1]'], {}), '(mhashes[0], mhashes[1])\n', (9792, 9816), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((9838, 9869), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['mhashes[0]', 'mhashes[2]'], {}), '(mhashes[0], mhashes[2])\n', (9845, 9869), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((10726, 10769), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['feature_texts[0]', 'feature_texts[1]'], {}), '(feature_texts[0], feature_texts[1])\n', (10733, 10769), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((10788, 10831), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['feature_texts[0]', 'feature_texts[2]'], {}), '(feature_texts[0], feature_texts[2])\n', (10795, 10831), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((10915, 10946), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['mhashes[0]', 'mhashes[1]'], {}), '(mhashes[0], mhashes[1])\n', (10922, 10946), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((10968, 10999), 'iscc_bench.algos.metrics.jaccard', 'jaccard', (['mhashes[0]', 'mhashes[2]'], {}), '(mhashes[0], mhashes[2])\n', (10975, 10999), False, 'from iscc_bench.algos.metrics import jaccard\n'), ((2611, 2631), 'numpy.uint64', 'np.uint64', (['_max_hash'], {}), '(_max_hash)\n', (2620, 2631), True, 'import numpy as np\n'), ((3151, 3171), 'numpy.uint64', 'np.uint64', (['_max_hash'], {}), '(_max_hash)\n', (3160, 3171), True, 'import numpy as np\n'), ((3841, 3865), 'numpy.bitwise_xor', 'np.bitwise_xor', (['masks', 'f'], {}), '(masks, f)\n', (3855, 3865), True, 'import numpy as np\n'), ((4116, 4140), 'numpy.bitwise_xor', 'np.bitwise_xor', (['masks', 'f'], {}), '(masks, f)\n', (4130, 4140), True, 'import numpy as np\n'), ((4714, 4738), 'numpy.bitwise_xor', 'np.bitwise_xor', (['masks', 'f'], {}), '(masks, f)\n', (4728, 4738), True, 'import numpy as np\n'), ((5188, 5208), 'numpy.uint64', 'np.uint64', (['_max_hash'], {}), '(_max_hash)\n', (5197, 5208), True, 'import numpy as np\n'), ((7707, 7718), 'iscc_bench.readers.gutenberg.gutenberg', 'gutenberg', ([], {}), '()\n', (7716, 7718), False, 'from iscc_bench.readers.gutenberg import gutenberg\n'), ((7720, 7728), 'iscc_bench.readers.mltext.mltext', 'mltext', ([], {}), '()\n', (7726, 7728), False, 'from iscc_bench.readers.mltext import mltext\n'), ((8236, 8253), 'iscc_bench.utils.load_text_file', 'load_text_file', (['t'], {}), '(t)\n', (8250, 8253), False, 'from iscc_bench.utils import load_text_file\n'), ((8290, 8307), 'iscc_bench.textid.normalize.text_normalize', 'text_normalize', (['t'], {}), '(t)\n', (8304, 8307), False, 'from iscc_bench.textid.normalize import text_normalize\n'), ((9367, 9384), 'iscc_bench.utils.load_text_file', 'load_text_file', (['t'], {}), '(t)\n', (9381, 9384), False, 'from iscc_bench.utils import load_text_file\n'), ((9421, 9438), 'iscc_bench.textid.normalize.text_normalize', 'text_normalize', (['t'], {}), '(t)\n', (9435, 9438), False, 'from iscc_bench.textid.normalize import text_normalize\n'), ((10499, 10516), 'iscc_bench.utils.load_text_file', 'load_text_file', (['t'], {}), '(t)\n', (10513, 10516), False, 'from iscc_bench.utils import load_text_file\n'), ((10553, 10570), 'iscc_bench.textid.normalize.text_normalize', 'text_normalize', (['t'], {}), '(t)\n', (10567, 10570), False, 'from iscc_bench.textid.normalize import text_normalize\n'), ((7792, 7816), 'iscc_bench.algos.slide.sliding_window', 'sliding_window', (['text', '(13)'], {}), '(text, 13)\n', (7806, 7816), False, 'from iscc_bench.algos.slide import sliding_window\n'), ((7872, 7890), 'xxhash.xxh32_intdigest', 'xxh32_intdigest', (['f'], {}), '(f)\n', (7887, 7890), False, 'from xxhash import xxh32_intdigest, xxh64_intdigest\n'), ((7974, 7992), 'xxhash.xxh64_intdigest', 'xxh64_intdigest', (['f'], {}), '(f)\n', (7989, 7992), False, 'from xxhash import xxh32_intdigest, xxh64_intdigest\n'), ((8914, 8932), 'statistics.mean', 'mean', (['sim_errs_ref'], {}), '(sim_errs_ref)\n', (8918, 8932), False, 'from statistics import mean, variance\n'), ((8991, 9013), 'statistics.variance', 'variance', (['sim_errs_ref'], {}), '(sim_errs_ref)\n', (8999, 9013), False, 'from statistics import mean, variance\n'), ((9045, 9063), 'statistics.mean', 'mean', (['dis_errs_ref'], {}), '(dis_errs_ref)\n', (9049, 9063), False, 'from statistics import mean, variance\n'), ((9122, 9144), 'statistics.variance', 'variance', (['dis_errs_ref'], {}), '(dis_errs_ref)\n', (9130, 9144), False, 'from statistics import mean, variance\n'), ((10045, 10063), 'statistics.mean', 'mean', (['sim_errs_ref'], {}), '(sim_errs_ref)\n', (10049, 10063), False, 'from statistics import mean, variance\n'), ((10122, 10144), 'statistics.variance', 'variance', (['sim_errs_ref'], {}), '(sim_errs_ref)\n', (10130, 10144), False, 'from statistics import mean, variance\n'), ((10176, 10194), 'statistics.mean', 'mean', (['dis_errs_ref'], {}), '(dis_errs_ref)\n', (10180, 10194), False, 'from statistics import mean, variance\n'), ((10253, 10275), 'statistics.variance', 'variance', (['dis_errs_ref'], {}), '(dis_errs_ref)\n', (10261, 10275), False, 'from statistics import mean, variance\n'), ((11176, 11194), 'statistics.mean', 'mean', (['sim_errs_ref'], {}), '(sim_errs_ref)\n', (11180, 11194), False, 'from statistics import mean, variance\n'), ((11253, 11275), 'statistics.variance', 'variance', (['sim_errs_ref'], {}), '(sim_errs_ref)\n', (11261, 11275), False, 'from statistics import mean, variance\n'), ((11307, 11325), 'statistics.mean', 'mean', (['dis_errs_ref'], {}), '(dis_errs_ref)\n', (11311, 11325), False, 'from statistics import mean, variance\n'), ((11384, 11406), 'statistics.variance', 'variance', (['dis_errs_ref'], {}), '(dis_errs_ref)\n', (11392, 11406), False, 'from statistics import mean, variance\n')] |
import datetime
import logging
import MySQLdb as mysql
import numpy as np
from api.infrastructure.mysql import connection
logger = logging.getLogger(__name__)
def getInsights(username='username', account='all', raw=False, local=False):
''' Reads result's database, manipulate the data and returns it.
'''
# username = 'username'
dbname = 'data_{}'.format(username)
data = dict()
try:
logging.debug(dbname)
mysql_connection = connection.MySQLConnection(dbname)
con = mysql_connection.connect()
cur = con.cursor()
logging.debug(con)
if account == 'all':
data['plans per account'] = plansPerAccount(cur)
data['actions per account'] = actionsPerAccount(cur)
data['activity goals'] = activityGoals(cur, account=account)
data['total sales plans'] = totalSalesPlans(cur, account=account)
data['total plan goals'] = totalPlanGoals(cur, account=account)
data['actions per day'] = actionsPerDay(cur, account=account)
data['actions per month'] = actionsPerMonth(cur, account=account)
data['actions per year'] = actionsPerYear(cur, account=account)
data['goals per quarter'] = goalsPerQuarter(cur, account=account)
data['total calls goal'] = totalCallsGoal(cur, account=account)
data['total visits goal'] = totalVisitsGoal(cur, account=account)
data['total offers goal'] = totalOffersGoal(cur, account=account)
month = str(datetime.datetime.now().month)
try:
data['actions this month'] = data['actions per month'][month]
except:
data['actions this month'] = 0
data['actions QTD'] = actionsQTD(cur, account=account)
data['actions MTD'] = actionsMTD(cur, account=account)
data['actions YTD'] = actionsYTD(cur, account=account)
today = str(datetime.datetime.now()).split(" ")[0]
firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))
wd = np.busday_count(firstday, today) * 1.0
data['actions YTD date ratio'] = round(data['actions YTD'] / wd, 2)
# data['actions YTD accounts ratio'] = data['actions YTD'] / data['number of accounts']
'''
script_nop = "\
SELECT FORMAT(SUM(buyprice),2) FROM\
(SELECT buyprice\
FROM products\
ORDER BY buyprice DESC\
LIMIT 10) price;\
"
'''
'''
SELECT orderNumber,
FORMAT(SUM(quantityOrdered * priceEach),2) total
FROM orderdetails
GROUP BY orderNumber
ORDER BY SUM(quantityOrdered * priceEach) DESC;
'''
'''
param = 'action'
customer = 'EGF'
yearMin = 2015
yearMax = 2015
script_nop = "\
SELECT tasks.account, MONTH(tasks.due),\
SUM(tasks.{}) AS NumberOfProducts FROM tasks\
LEFT JOIN account\
ON sales.customer_id=customers.id\
WHERE tasks.account = '{}' AND YEAR(tasks.due) BETWEEN {} AND {}\
GROUP BY MONTH(tasks.due);\
".format(param, customer, yearMin, yearMax)
'''
# script_nop = "\
# SELECT customers.name, sales.month,\
# SUM(sales.{}) AS NumberOfProducts FROM sales\
# LEFT JOIN customers\
# ON sales.customer_id=customers.id\
# WHERE customers.name = '{}' AND sales.year BETWEEN {} AND {}\
# GROUP BY sales.month;\
# ".format(param, customer, yearMin, yearMax)
# script_nop = "\
# SELECT `COLUMN_NAME`\
# FROM `INFORMATION_SCHEMA`.`COLUMNS`\
# WHERE `TABLE_SCHEMA`='results_userID_{}'\
# AND `TABLE_NAME`='customers';\
# ".format(username)
'''
script_nop = "\
SELECT `COLUMN_NAME`\
FROM `INFORMATION_SCHEMA`.`COLUMNS`\
WHERE `TABLE_SCHEMA`='results_{}'\
AND `TABLE_NAME`='critters';\
".format(username)
#show columns from customers;\
cur.execute(script_nop)
cols = np.ravel(np.asarray(cur.fetchall()))
results = dict()
for c in cols:
values = np.ravel(data[:, np.where(cols==c)])
if not raw:
if c != 'name':
values = values.astype(np.float)
values = np.around(np.nan_to_num(values), 2)
#if c == 'ccbm':
if c == 'risk':
results['rawRisk'] = values.tolist()
#values = colortables.convertToColor(values)
values = colortables.colorK1(values, 'json')
results[c] = values.tolist()
'''
except mysql.Error as e:
# raise
print("Error {0}: {1}".format(e.args[0], e.args[1]))
# sys.exit(1)
finally:
try:
if con:
con.close()
except:
print('No Db connection possible')
dbname = 'data_{}'.format(username)
# data = dict()
try:
mysql_connection = connection.MySQLConnection(dbname)
con = mysql_connection.connect()
cur = con.cursor()
today = datetime.datetime.now()
# list of all accounts
if account == 'all':
data['accounts'] = accounts(cur)
# active accounts and sales in the las 3 months
data['active accounts'] = activeAccounts(cur)
hoy = datetime.datetime.now()
_tmb = datetime.datetime(year=hoy.year, month=hoy.month, day=hoy.day)
try:
data['active accounts growth'] = 100. * (
len(data['active accounts'].keys()) / len(activeAccounts(cur, today=_tmb).keys()) - 1)
except:
data['active accounts growth'] = 0
# for aa in data['active accounts'].keys():
data['lost accounts'] = [a for a in data['accounts'] if a not in data['active accounts'].keys()]
# data['actions-accounts ratio'] = round(float(data['actions YTD']) / len(data['accounts']), 2)
# data['actions-active accounts ratio'] = round(float(data['actions YTD']) / len(data['active accounts'].keys()), 2)
# data['penetration ratio'] = round(100 * float(len(data['active accounts'].keys())) / len(data['accounts']), 2)
try:
data['actions-accounts ratio'] = round(float(data['actions YTD']) / len(data['accounts']), 2)
except:
data['actions-accounts ratio'] = 0.0
try:
data['actions-active accounts ratio'] = round(
float(data['actions YTD']) / len(data['active accounts'].keys()), 2)
except:
data['actions-active accounts ratio'] = 0.0
try:
data['penetration ratio'] = round(
100 * float(len(data['active accounts'].keys())) / len(data['accounts']), 2)
except:
data['penetration ratio'] = 0.0
data['sales YTD'] = round(salesYTD(cur, account=account), 2)
data['margin YTD'] = round(salesYTD(cur, param='margin', account=account), 2)
data['sales QTD'] = round(salesQTD(cur, year=today.year, account=account), 2)
data['margin QTD'] = round(salesQTD(cur, param='margin', year=today.year, account=account), 2)
data['sales MTD'] = round(salesMTD(cur, account=account), 2)
data['sales per quarter'] = salesPerQuarter(cur, param='price', year=today.year, account=account)
data['margin per quarter'] = salesPerQuarter(cur, param='margin', year=today.year, account=account)
data['monthly sales'] = monthlyParam(cur, param='price', year=today.year, account=account)
data['monthly sales last year'] = monthlyParam(cur, param='price', year=today.year - 1, account=account)
data['monthly margin'] = monthlyParam(cur, param='margin', year=today.year, account=account)
data['monthly margin last year'] = monthlyParam(cur, param='margin', year=today.year - 1, account=account)
s = 0
for d in data['monthly sales last year']:
s += d['sales']
data['sales last year'] = round(s, 2)
try:
data['sales growth YTD'] = round(100 * data['sales YTD'] / data['sales last year'], 0)
except:
data['sales growth YTD'] = 0.0
s = 0
if today.month > 1:
try:
data['sales growth month'] = round(
data['monthly sales'][today.month] / data['monthly sales'][today.month - 1], 2)
except:
data['sales growth month'] = 0.0
else:
for l in data['monthly sales last year']:
if l['month'] == 12:
sb = l['sales']
for l in data['monthly sales']:
if l['month'] == 12:
cs = l['sales']
try:
data['sales growth month'] = round(cs / sb, 2)
except:
data['sales growth month'] = 0.0
s = 0
for d in data['monthly margin last year']:
s += d['margin']
data['margin last year'] = round(s, 2)
try:
data['margin growth YTD'] = round(100 * data['margin YTD'] / data['margin last year'], 0)
except:
data['margin growth YTD'] = 0.0
s = 0
if today.month > 1:
try:
data['margin growth month'] = round(
data['monthly margin'][today.month] / data['monthly margin'][today.month - 1], 2)
except:
data['margin growth month'] = 0.0
else:
for l in data['monthly margin last year']:
if l['month'] == 12:
sb = l['margin']
for l in data['monthly margin']:
if l['month'] == 12:
cs = l['margin']
try:
data['margin growth month'] = round(cs / sb, 2)
except:
data['margin growth month'] = 0.0
# SALES
currentQuarter = (today.month - 1) // 3 + 1
salesCurrentQuarter = data['sales per quarter'][currentQuarter]
if currentQuarter == 1:
salesLastQuarter = round(salesPerQuarter(cur, year=today.year - 1, param='price', account=account)[4], 2)
else:
salesLastQuarter = round(data['sales per quarter'][currentQuarter - 1], 2)
try:
data['sales growth QTD'] = round(100 * salesCurrentQuarter / salesLastQuarter, 2)
except:
data['sales growth QTD'] = 0.0
# MARGIN
currentQuarter = (today.month - 1) // 3 + 1
marginCurrentQuarter = data['margin per quarter'][currentQuarter]
if currentQuarter == 1:
marginLastQuarter = round(salesPerQuarter(cur, year=today.year - 1, param='margin', account=account)[4], 2)
else:
marginLastQuarter = round(data['margin per quarter'][currentQuarter - 1], 2)
try:
data['margin growth QTD'] = round(100 * marginCurrentQuarter / marginLastQuarter, 2)
except:
data['margin growth QTD'] = 0.0
except Exception as e:
print(e)
# print("Error {0}: {1}".format(e.args[0], e.args[1]))
# sys.exit(1)
data = {}
finally:
try:
if con:
con.close()
except:
print('No Db connection possible')
# print(data)
# for k, v in data.iteritems():
# print("{}: {}".format(k, v))
return data
def getInsightsPerCustomer(username='username', account='all', raw=False, local=False):
''' Reads result's database, manipulate the data and returns it.
'''
# username = 'username'
dbname = 'data_{}'.format(username)
data = dict()
try:
mysql_connection = connection.MySQLConnection(dbname)
con = mysql_connection.connect()
cur = con.cursor()
# data['plans per account'] = plansPerAccount(cur)
# data['actions per account'] = actionsPerAccount(cur)
data['activity goals'] = activityGoals(cur, account=account)
data['total sales plans'] = totalSalesPlans(cur, account=account)
data['total plan goals'] = totalPlanGoals(cur, account=account)
data['actions per day'] = actionsPerDay(cur, account=account)
data['actions per month'] = actionsPerMonth(cur, account=account)
data['actions per year'] = actionsPerYear(cur, account=account)
data['goals per quarter'] = goalsPerQuarter(cur, account=account)
data['total calls goal'] = totalCallsGoal(cur, account=account)
data['total visits goal'] = totalVisitsGoal(cur, account=account)
data['total offers goal'] = totalOffersGoal(cur, account=account)
month = str(datetime.datetime.now().month)
try:
data['actions this month'] = data['actions per month'][month]
except:
data['actions this month'] = 0
data['actions QTD'] = actionsQTD(cur, account=account)
data['actions MTD'] = actionsMTD(cur, account=account)
data['actions YTD'] = actionsYTD(cur, account=account)
today = str(datetime.datetime.now()).split(" ")[0]
firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))
wd = np.busday_count(firstday, today) * 1.0
data['actions YTD date ratio'] = round(data['actions YTD'] / wd, 2)
# data['actions YTD accounts ratio'] = data['actions YTD'] / data['number of accounts']
except Exception as e:
raise
# print("Error {0}: {1}".format(e.args[0], e.args[1]))
# sys.exit(1)
finally:
try:
if con:
con.close()
except:
print('No Db connection possible')
dbname = 'data_{}'.format(username)
try:
mysql_connection = connection.MySQLConnection(dbname)
con = mysql_connection.connect()
cur = con.cursor()
today = datetime.datetime.now()
## list of all accounts
# data['accounts'] = accounts(cur)
## active accounts and sales in the las 3 months
# data['active accounts'] = activeAccounts(cur)
# data['lost accounts'] = [a for a in data['accounts'] if a not in data['active accounts'].keys()]
# data['actions-accounts ratio'] = float(data['actions YTD']) / len(data['accounts'])
# data['actions-active accounts ratio'] = float(data['actions YTD']) / len(data['active accounts'].keys())
# data['penetration ratio'] = 100 * float(len(data['active accounts'].keys())) / len(data['accounts'])
data['sales per quarter'] = salesPerQuarter(cur, param='price', year=today.year, account=account)
data['margin per quarter'] = salesPerQuarter(cur, param='margin', year=today.year, account=account)
data['sales YTD'] = salesYTD(cur, account=account)
data['margin YTD'] = salesYTD(cur, param='margin', account=account)
data['sales QTD'] = salesQTD(cur, year=today.year, account=account)
data['margin QTD'] = salesQTD(cur, param='margin', year=today.year, account=account)
data['sales MTD'] = salesMTD(cur, account=account)
data['monthly sales'] = monthlyParam(cur, param='price', year=today.year, account=account)
data['monthly sales last year'] = monthlyParam(cur, param='price', year=today.year - 1, account=account)
s = 0
for d in data['monthly sales last year']:
s += d['sales']
data['sales last year'] = round(s, 2)
data['sales growth YTD'] = round(100 * data['sales YTD'] / data['sales last year'], 2)
print(data['monthly sales'][today.month]['sales'])
s = 0
if today.month > 1:
try:
data['sales growth month'] = data['monthly sales'][today.month]['sales'] / \
data['monthly sales'][today.month - 1]['sales']
except:
data['sales growth month'] = 0.0
else:
for l in data['monthly sales last year']:
if l['month'] == 12:
sb = l['sales']
for l in data['monthly sales']:
if l['month'] == 12:
cs = l['sales']
data['sales growth month'] = round(cs / sb, 2)
currentQuarter = (today.month - 1) // 3 + 1
salesCurrentQuarter = data['sales per quarter'][currentQuarter]
if currentQuarter == 1:
salesLastQuarter = salesPerQuarter(cur, year=today.year - 1, param='price', account=account)[4]
else:
salesLastQuarter = data['sales per quarter'][currentQuarter - 1]
try:
data['sales growth QTD'] = round(100. * salesCurrentQuarter / salesLastQuarter, 2)
except:
data['sales growth QTD'] = 0.0
except Exception as e:
# print("Error {0}: {1}".format(e.args[0], e.args[1]))
# sys.exit(1)
# raise
data = {}
finally:
try:
if con:
con.close()
except:
print('No Db connection possible')
# print(data)
# for k, v in data.iteritems():
# print("{}: {}".format(k, v))
return data
def accounts(cur):
'''
'''
script_nop = "\
SELECT customers.name\
FROM customers;\
"
cur.execute(script_nop)
_data = cur.fetchall()
# d = dict()
# for i in range(len(_data)):
# d[_data[i][0]] = round(_data[i][1], 2)
# if d == {}:
# d = 0
d = [i[0] for i in _data]
# d = np.asarray(_data)
return d
def accountsThreeYD(cur):
'''
'''
today = str(datetime.datetime.now()).split(" ")[0]
today = datetime.datetime.now()
tyb = datetime.datetime(year=today.year - 3, month=1, day=1)
script_nop = "\
SELECT customers.name, SUM(sales.{})\
FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE sales.date BETWEEN DATE('{}') AND DATE('{}')\
GROUP BY customers.name;\
".format('price', tyb, today)
cur.execute(script_nop)
_data = cur.fetchall()
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = round(_data[i][1], 2)
return d
def monthlyParam(cur, param='price', yearMin=2008, year=2015, account='all'):
'''
'''
yearMin = year
if account != 'all':
# SELECT customers.name, sales.month,\
script_sales = "\
SELECT MONTH(sales.date), SUM(sales.{0}) AS NumberOfProducts FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE customers.name = '{1}' AND YEAR(sales.date) BETWEEN {2} AND {3}\
GROUP BY sales.month;\
".format(param, account, yearMin, year)
# WHERE YEAR(sales.date) BETWEEN {2} AND {3}\
else:
script_sales = "\
SELECT MONTH(sales.date), SUM(sales.{}) AS NumberOfProducts FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE YEAR(sales.date) BETWEEN {} AND {}\
GROUP BY sales.month;\
".format(param, yearMin, year)
if param == 'price':
param = 'sales'
cur.execute(script_sales)
cols = np.asarray(cur.fetchall())
# month = ['Jan', 'Feb', 'Mar', 'Apr', 'Mar', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
month = range(1, 13)
_months = []
results = []
for i in range(12):
results += [{param: 0.0, 'month': i + 1}]
for c in cols:
_results = dict()
_results[param] = round(float(c[1]), 2)
_results['month'] = int(c[0])
_months.append(month[int(c[0]) - 1])
results[int(c[0]) - 1] = _results
return results
def salesYTD(cur, param='price', account=None):
'''
'''
today = str(datetime.datetime.now()).split(" ")[0]
# today = str(datetime.datetime.now())
firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))
# firstday = str(datetime.datetime(datetime.datetime.now().year, 1, 1))
if account == 'all':
script_nop = "\
SELECT SUM(sales.{})\
FROM sales\
WHERE DATE(sales.date) BETWEEN '{}' AND '{}'\
".format(param, firstday, today)
else:
script_nop = "\
SELECT customers.name, SUM(sales.{})\
FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE customers.name = '{}' AND DATE(sales.date) BETWEEN '{}' AND '{}';\
".format(param, account, firstday, today)
# GROUP BY customers.name;\
cur.execute(script_nop)
_data = cur.fetchall()
d = dict()
try:
if _data[0][0] == None:
d = 0
else:
for i in range(len(_data)):
d[_data[i][0]] = round(_data[i][1], 2)
except:
d = 0
return d
def salesMTD(cur, param='price', account='all'):
'''
'''
month = str(datetime.datetime.now().month)
year = str(datetime.datetime.now().year)
if account == 'all':
script_nop = "\
SELECT SUM(sales.{}) from sales\
WHERE MONTH(sales.date)={} AND YEAR(sales.date)={};\
".format(param, month, year)
else:
script_nop = "\
SELECT SUM(sales.{}) FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE customers.name = '{}' AND MONTH(sales.date)={} AND YEAR(sales.date)={};\
".format(param, account, month, year)
# GROUP BY customers.name;\
cur.execute(script_nop)
_data = cur.fetchall()
try:
if _data[0][0] == None:
return 0
else:
return round(_data[0][0], 2)
except:
return 0
def salesQTD(cur, param='price', year=None, account='all'):
'''
'''
if year == None:
year = datetime.datetime.now().year
year = str(year)
today = str(datetime.datetime.now()).split(" ")[0]
if account == 'all':
script_nop = "\
SELECT SUM(sales.{}) from sales\
WHERE QUARTER(sales.date)<=QUARTER('{}') AND YEAR(sales.date)='{}';\
".format(param, today, year)
else:
script_nop = "\
SELECT customers.name, SUM(sales.{}) FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE customers.name = '{}' AND QUARTER(sales.date)<=QUARTER('{}') AND YEAR(sales.date)='{}'\
GROUP BY customers.name;\
".format(param, account, today, year)
cur.execute(script_nop)
_data = cur.fetchall()
try:
if _data[0][0] == None:
return 0
else:
return round(_data[0][0], 2)
except:
return 0
def salesPerQuarter(cur, param='price', year=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
if account == 'all':
script_nop = "\
SELECT QUARTER(sales.date), SUM(sales.{}) from sales\
WHERE YEAR(sales.date)='{}'\
GROUP BY QUARTER(sales.date);\
".format(param, year)
else:
# SELECT customers.name, QUARTER(sales.date), SUM(sales.{}) FROM sales\
script_nop = "\
SELECT QUARTER(sales.date), SUM(sales.{}) FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE customers.name = '{}' AND YEAR(sales.date)='{}'\
GROUP BY QUARTER(sales.date);\
".format(param, account, year)
# GROUP BY customers.name;\
cur.execute(script_nop)
_data = cur.fetchall()
try:
# return int(_data[0][0])
d = dict()
for i in range(len(_data)):
d[int(_data[i][0])] = round(_data[i][1], 2)
if d == {}:
today = datetime.datetime.now()
quarter = (today.month - 1) // 3 + 1
for q in range(1, quarter + 1):
d[q] = 0
except:
return 0
if 1 not in d.keys():
d[1] = 0.0
if 2 not in d.keys():
d[2] = 0.0
if 3 not in d.keys():
d[3] = 0.0
if 4 not in d.keys():
d[4] = 0.0
return d
def activeAccounts(cur, param='price', today=None):
'''
'''
if today == None:
today = str(datetime.datetime.now()).split(" ")[0]
today = datetime.datetime.now()
tmb = datetime.datetime.now()
else:
y = today.year
m = today.month
d = today.day
today = datetime.datetime(year=y, month=m, day=d)
tmb = datetime.datetime(year=y, month=m, day=d)
dif = today.month - 3
if dif <= 0:
m = 12 + dif
y = today.year - 1
tmb = datetime.datetime(year=y, month=m, day=today.day)
else:
tmb = datetime.datetime(year=today.year, month=dif, day=today.day)
script_nop = "\
SELECT customers.name, SUM(sales.{})\
FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE sales.date BETWEEN DATE('{}') AND DATE('{}')\
GROUP BY customers.name;\
".format(param, tmb, today)
cur.execute(script_nop)
_data = cur.fetchall()
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = round(_data[i][1], 2)
return d
def goalsPerQuarter(cur, minYear=None, maxYear=None, account='all'):
'''
'''
if minYear == None:
minYear = str(datetime.datetime.now().year)
# minYear = 2016
if maxYear == None:
maxYear = minYear
if account == 'all':
script_nop = "\
SELECT QUARTER(due), SUM(goal) from plans\
WHERE YEAR(due) BETWEEN {} AND {}\
GROUP BY QUARTER(due);\
".format(minYear, maxYear)
else:
script_nop = "\
SELECT QUARTER(due), SUM(goal) from plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}'\
GROUP BY QUARTER(due);\
".format(minYear, maxYear, account)
# GROUP BY YEAR(due), MONTH(due);\
cur.execute(script_nop)
_data = cur.fetchall()
# d = dict()
# for i in range(len(_data)):
# d[_data[i][0]] = int(_data[i][1])
#
# return d
try:
# return int(_data[0][0])
d = dict()
print(_data)
for i in range(len(_data)):
d[int(_data[i][0])] = int(_data[i][1])
if d == {}:
today = datetime.datetime.now()
quarter = (today.month - 1) // 3 + 1
for q in range(1, quarter + 1):
d[q] = 0
except:
return 0
if 1 not in d.keys():
d[1] = 0.0
if 2 not in d.keys():
d[2] = 0.0
if 3 not in d.keys():
d[3] = 0.0
if 4 not in d.keys():
d[4] = 0.0
return d
def actionsPerYear(cur, account='all'):
'''
'''
if account == 'all':
script_nop = "\
SELECT YEAR(due), COUNT(action) from tasks\
GROUP BY YEAR(due);\
"
else:
script_nop = "\
SELECT YEAR(due), COUNT(action) from tasks\
WHERE account = '{}'\
GROUP BY YEAR(due);\
".format(account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[int(_data[i][0])] = int(_data[i][1])
except:
return 0
return d
def actionsQTD(cur, year=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
today = str(datetime.datetime.now()).split(" ")[0]
if account == 'all':
script_nop = "\
SELECT COUNT(action) from tasks\
WHERE QUARTER(tasks.due)=QUARTER('{0}') AND DATE(tasks.due)<='{0}' AND YEAR(tasks.due)='{1}';\
".format(today, year)
else:
script_nop = "\
SELECT COUNT(action) from tasks\
WHERE QUARTER(tasks.due)=QUARTER('{0}') AND DATE(tasks.due)<='{0}' AND YEAR(tasks.due)='{1}' AND account='{2}';\
".format(today, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
return int(_data[0][0])
def actionsMTD(cur, year=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
month = str(datetime.datetime.now().month)
if account == 'all':
script_nop = "\
SELECT COUNT(action) from tasks\
WHERE MONTH(tasks.due)<={} AND YEAR(tasks.due) BETWEEN {} AND {}\
".format(month, year, year)
else:
script_nop = "\
SELECT COUNT(action) from tasks\
WHERE MONTH(tasks.due)<={} AND YEAR(tasks.due) BETWEEN {} AND {} AND account='{}';\
".format(month, year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def actionsYTD(cur, account='all'):
'''
'''
today = str(datetime.datetime.now()).split(" ")[0]
firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))
if account == 'all':
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE DATE(tasks.due) BETWEEN '{}' AND '{}';\
".format(firstday, today)
else:
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE DATE(tasks.due) BETWEEN '{}' AND '{}' AND account='{}';\
".format(firstday, today, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def actionsPerMonth(cur, year=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
if account == 'all':
script_nop = "\
SELECT MONTH(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {}\
GROUP BY MONTH(due);\
".format(year, year)
else:
script_nop = "\
SELECT MONTH(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {} AND account='{}'\
GROUP BY MONTH(due);\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[int(_data[i][0])] = int(_data[i][1])
return d
except:
return 0
def actionsPerDay(cur, year=None, yearMax=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
if yearMax == None:
yearMax = year
if account == 'all':
script_nop = "\
SELECT DATE(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {}\
GROUP BY DATE(due);\
".format(year, yearMax)
else:
script_nop = "\
SELECT DATE(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {} AND account='{}'\
GROUP BY DATE(due);\
".format(year, yearMax, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[str(_data[i][0])] = int(_data[i][1])
return d
except:
return 0
def totalPlanGoals(cur, account='all'):
'''
'''
if account == 'all':
script_nop = "\
SELECT SUM(goal) FROM plans;\
"
else:
script_nop = "\
SELECT SUM(goal) FROM plans\
WHERE account='{}';\
".format(account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return float(_data[0][0])
except:
return 0
def totalVisitsGoal(cur, year=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
if account == 'all':
script_nop = "\
SELECT SUM(visits) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {};\
".format(year, year)
else:
script_nop = "\
SELECT SUM(visits) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}';\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def totalCallsGoal(cur, year=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
if account == 'all':
script_nop = "\
SELECT SUM(calls) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {};\
".format(year, year)
else:
script_nop = "\
SELECT SUM(calls) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}';\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def totalOffersGoal(cur, year=None, account='all'):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
if account == 'all':
script_nop = "\
SELECT SUM(offers) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {};\
".format(year, year)
else:
script_nop = "\
SELECT SUM(offers) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}';\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def totalSalesPlans(cur, account='all'):
'''
'''
if account == 'all':
script_nop = "\
SELECT COUNT(*) FROM plans;\
"
else:
script_nop = "\
SELECT COUNT(*) FROM plans\
WHERE account='{}';\
".format(account)
cur.execute(script_nop)
try:
return int(cur.fetchall()[0][0])
except:
return 0
def plansPerAccount(cur):
'''
'''
script_nop = "\
SELECT account, COUNT(*) FROM plans GROUP BY account;\
"
cur.execute(script_nop)
_data = cur.fetchall()
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = int(_data[i][1])
# data['actions per account'] = np.asarray(cur.fetchall()[0])
# data['actions per account'] = np.asarray[cur.fetchall()]
return d
def actionsPerAccount(cur):
'''
'''
script_nop = "\
SELECT account, COUNT(*) FROM tasks GROUP BY account;\
"
cur.execute(script_nop)
_data = cur.fetchall()
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = int(_data[i][1])
# data['actions per account'] = np.asarray(cur.fetchall()[0])
# data['actions per account'] = np.asarray[cur.fetchall()]
return d
def activityGoals(cur, account='all'):
'''
'''
if account == 'all':
script_nop = "\
SELECT action, COUNT(*) FROM tasks GROUP BY action;\
"
else:
script_nop = "\
SELECT action, COUNT(*) FROM tasks\
WHERE account = '{}'\
GROUP BY action;\
".format(account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = int(_data[i][1])
return d
except:
raise
# return 0
if __name__ == "__main__":
import json
# data = getInsights(username='test', local=True)
data = getInsights(username='test', local=True, account='Acrion')
# print(json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')))
print(json.dumps(data))
data = getInsights(username='test', local=True, account='all')
# print(json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')))
print(json.dumps(data))
username = 'martin_masip'
dbname = 'data_userID_{}_data_test_super_reduced_8_xlsx'.format(username)
# con = mysql.connect('localhost', 'webadmin', 'Qymatix!!!', dbname);
con = mysql.connect('172.16.31.10', 'webuser', 'Qymatix!!!', dbname);
cur = con.cursor()
today = datetime.datetime.now()
account = 'Acrion'
salesLastQuarter = salesPerQuarter(cur, year=today.year - 1, param='price', account=account)[4]
print(salesLastQuarter)
salesLastQuarter = salesPerQuarter(cur, year=today.year, param='price', account=account)[4]
print(salesLastQuarter)
# data = getInsightsPerCustomer(username='test', local=True, account='all')
# print(json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')))
# data = getInsightsPerCustomer(username='test', local=True, account='Acrion')
# print(json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')))
if False:
username = 'test'
dbname = 'data_userID_{}'.format(username)
con = mysql.connect('localhost', 'webadmin', 'Qymatix!!!', dbname);
cur = con.cursor()
d = monthlyParam(cur, param='price', yearMin=2008, year=2015, account='Metro')
print(d)
d = salesPerQuarter(cur, param='price', year=2015, account='Zama')
print("///")
print(d)
| [
"logging.debug",
"MySQLdb.connect",
"json.dumps",
"datetime.datetime",
"api.infrastructure.mysql.connection.MySQLConnection",
"datetime.datetime.now",
"logging.getLogger",
"numpy.busday_count"
] | [((134, 161), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (151, 161), False, 'import logging\n'), ((17917, 17940), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17938, 17940), False, 'import datetime\n'), ((17951, 18005), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(today.year - 3)', 'month': '(1)', 'day': '(1)'}), '(year=today.year - 3, month=1, day=1)\n', (17968, 18005), False, 'import datetime\n'), ((36437, 36499), 'MySQLdb.connect', 'mysql.connect', (['"""172.16.31.10"""', '"""webuser"""', '"""Qymatix!!!"""', 'dbname'], {}), "('172.16.31.10', 'webuser', 'Qymatix!!!', dbname)\n", (36450, 36499), True, 'import MySQLdb as mysql\n'), ((36537, 36560), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (36558, 36560), False, 'import datetime\n'), ((424, 445), 'logging.debug', 'logging.debug', (['dbname'], {}), '(dbname)\n', (437, 445), False, 'import logging\n'), ((474, 508), 'api.infrastructure.mysql.connection.MySQLConnection', 'connection.MySQLConnection', (['dbname'], {}), '(dbname)\n', (500, 508), False, 'from api.infrastructure.mysql import connection\n'), ((586, 604), 'logging.debug', 'logging.debug', (['con'], {}), '(con)\n', (599, 604), False, 'import logging\n'), ((5141, 5175), 'api.infrastructure.mysql.connection.MySQLConnection', 'connection.MySQLConnection', (['dbname'], {}), '(dbname)\n', (5167, 5175), False, 'from api.infrastructure.mysql import connection\n'), ((5261, 5284), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5282, 5284), False, 'import datetime\n'), ((11989, 12023), 'api.infrastructure.mysql.connection.MySQLConnection', 'connection.MySQLConnection', (['dbname'], {}), '(dbname)\n', (12015, 12023), False, 'from api.infrastructure.mysql import connection\n'), ((14032, 14066), 'api.infrastructure.mysql.connection.MySQLConnection', 'connection.MySQLConnection', (['dbname'], {}), '(dbname)\n', (14058, 14066), False, 'from api.infrastructure.mysql import connection\n'), ((14152, 14175), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14173, 14175), False, 'import datetime\n'), ((24675, 24698), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24696, 24698), False, 'import datetime\n'), ((24713, 24736), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24734, 24736), False, 'import datetime\n'), ((24832, 24873), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'y', 'month': 'm', 'day': 'd'}), '(year=y, month=m, day=d)\n', (24849, 24873), False, 'import datetime\n'), ((24888, 24929), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'y', 'month': 'm', 'day': 'd'}), '(year=y, month=m, day=d)\n', (24905, 24929), False, 'import datetime\n'), ((25036, 25085), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'y', 'month': 'm', 'day': 'today.day'}), '(year=y, month=m, day=today.day)\n', (25053, 25085), False, 'import datetime\n'), ((25110, 25170), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'today.year', 'month': 'dif', 'day': 'today.day'}), '(year=today.year, month=dif, day=today.day)\n', (25127, 25170), False, 'import datetime\n'), ((36050, 36066), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (36060, 36066), False, 'import json\n'), ((36226, 36242), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (36236, 36242), False, 'import json\n'), ((37267, 37327), 'MySQLdb.connect', 'mysql.connect', (['"""localhost"""', '"""webadmin"""', '"""Qymatix!!!"""', 'dbname'], {}), "('localhost', 'webadmin', 'Qymatix!!!', dbname)\n", (37280, 37327), True, 'import MySQLdb as mysql\n'), ((2024, 2056), 'numpy.busday_count', 'np.busday_count', (['firstday', 'today'], {}), '(firstday, today)\n', (2039, 2056), True, 'import numpy as np\n'), ((5527, 5550), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5548, 5550), False, 'import datetime\n'), ((5570, 5632), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'hoy.year', 'month': 'hoy.month', 'day': 'hoy.day'}), '(year=hoy.year, month=hoy.month, day=hoy.day)\n', (5587, 5632), False, 'import datetime\n'), ((13478, 13510), 'numpy.busday_count', 'np.busday_count', (['firstday', 'today'], {}), '(firstday, today)\n', (13493, 13510), True, 'import numpy as np\n'), ((21238, 21261), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (21259, 21261), False, 'import datetime\n'), ((21284, 21307), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (21305, 21307), False, 'import datetime\n'), ((22168, 22191), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22189, 22191), False, 'import datetime\n'), ((24142, 24165), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24163, 24165), False, 'import datetime\n'), ((26750, 26773), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26771, 26773), False, 'import datetime\n'), ((1510, 1533), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1531, 1533), False, 'import datetime\n'), ((12964, 12987), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12985, 12987), False, 'import datetime\n'), ((20188, 20211), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20209, 20211), False, 'import datetime\n'), ((23177, 23200), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (23198, 23200), False, 'import datetime\n'), ((25757, 25780), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25778, 25780), False, 'import datetime\n'), ((27834, 27857), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (27855, 27857), False, 'import datetime\n'), ((28593, 28616), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28614, 28616), False, 'import datetime\n'), ((28643, 28666), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28664, 28666), False, 'import datetime\n'), ((29380, 29403), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29401, 29403), False, 'import datetime\n'), ((30038, 30061), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (30059, 30061), False, 'import datetime\n'), ((30872, 30895), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (30893, 30895), False, 'import datetime\n'), ((32174, 32197), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32195, 32197), False, 'import datetime\n'), ((32798, 32821), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32819, 32821), False, 'import datetime\n'), ((33421, 33444), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (33442, 33444), False, 'import datetime\n'), ((1974, 1997), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1995, 1997), False, 'import datetime\n'), ((13428, 13451), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13449, 13451), False, 'import datetime\n'), ((17866, 17889), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17887, 17889), False, 'import datetime\n'), ((20073, 20096), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20094, 20096), False, 'import datetime\n'), ((22236, 22259), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22257, 22259), False, 'import datetime\n'), ((27881, 27904), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (27902, 27904), False, 'import datetime\n'), ((29308, 29331), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29329, 29331), False, 'import datetime\n'), ((1898, 1921), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1919, 1921), False, 'import datetime\n'), ((13352, 13375), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13373, 13375), False, 'import datetime\n'), ((24620, 24643), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24641, 24643), False, 'import datetime\n')] |
import autoarray as aa
import numpy as np
import pytest
class TestWTildeImaging:
def test__w_tilde_imaging_from(self):
noise_map_2d = np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 2.0, 0.0],
[0.0, 2.0, 4.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
kernel = np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 2.0], [0.0, 1.0, 2.0]])
native_index_for_slim_index = np.array([[1, 1], [1, 2], [2, 1], [2, 2]])
w_tilde = aa.util.linear_eqn.w_tilde_curvature_imaging_from(
noise_map_native=noise_map_2d,
kernel_native=kernel,
native_index_for_slim_index=native_index_for_slim_index,
)
assert w_tilde == pytest.approx(
np.array(
[
[2.5, 1.625, 0.5, 0.375],
[1.625, 1.3125, 0.125, 0.0625],
[0.5, 0.125, 0.5, 0.375],
[0.375, 0.0625, 0.375, 0.3125],
]
),
1.0e-4,
)
def test__w_tilde_data_imaging_from(self):
image_2d = np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 2.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
noise_map_2d = np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 2.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
kernel = np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [1.0, 2.0, 0.0]])
native_index_for_slim_index = np.array([[1, 1], [1, 2], [2, 1], [2, 2]])
w_tilde_data = aa.util.linear_eqn.w_tilde_data_imaging_from(
image_native=image_2d,
noise_map_native=noise_map_2d,
kernel_native=kernel,
native_index_for_slim_index=native_index_for_slim_index,
)
assert (w_tilde_data == np.array([5.0, 5.0, 1.5, 1.5])).all()
def test__w_tilde_curvature_preload_imaging_from(self):
noise_map_2d = np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 2.0, 0.0],
[0.0, 2.0, 4.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
kernel = np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 2.0], [0.0, 1.0, 2.0]])
native_index_for_slim_index = np.array([[1, 1], [1, 2], [2, 1], [2, 2]])
w_tilde_preload, w_tilde_indexes, w_tilde_lengths = aa.util.linear_eqn.w_tilde_curvature_preload_imaging_from(
noise_map_native=noise_map_2d,
kernel_native=kernel,
native_index_for_slim_index=native_index_for_slim_index,
)
assert w_tilde_preload == pytest.approx(
np.array(
[1.25, 1.625, 0.5, 0.375, 0.65625, 0.125, 0.0625, 0.25, 0.375, 0.15625]
),
1.0e-4,
)
assert w_tilde_indexes == pytest.approx(
np.array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]), 1.0e-4
)
assert w_tilde_lengths == pytest.approx(np.array([4, 3, 2, 1]), 1.0e-4)
class TestDataVectorFromData:
def test__simple_blurred_mapping_matrix__correct_data_vector(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
data_vector = aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([2.0, 3.0, 1.0])).all()
def test__simple_blurred_mapping_matrix__change_image_values__correct_data_vector(
self,
):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([3.0, 1.0, 1.0, 10.0, 1.0, 1.0])
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
data_vector = aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([4.0, 14.0, 10.0])).all()
def test__simple_blurred_mapping_matrix__change_noise_values__correct_data_vector(
self,
):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector = aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([2.0, 3.0, 1.0])).all()
def test__data_vector_via_transformer_mapping_matrix_method__same_as_blurred_method_using_real_imag_separate(
self,
):
mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
data_real = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map_real = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector_real_via_blurred = aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=mapping_matrix,
image=data_real,
noise_map=noise_map_real,
)
data_imag = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map_imag = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector_imag_via_blurred = aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=mapping_matrix,
image=data_imag,
noise_map=noise_map_imag,
)
data_vector_complex_via_blurred = (
data_vector_real_via_blurred + data_vector_imag_via_blurred
)
transformed_mapping_matrix = np.array(
[
[1.0 + 1.0j, 1.0 + 1.0j, 0.0 + 0.0j],
[1.0 + 1.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 1.0 + 1.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 1.0 + 1.0j, 1.0 + 1.0j],
[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
]
)
data = np.array(
[4.0 + 4.0j, 1.0 + 1.0j, 1.0 + 1.0j, 16.0 + 16.0j, 1.0 + 1.0j, 1.0 + 1.0j]
)
noise_map = np.array(
[2.0 + 2.0j, 1.0 + 1.0j, 1.0 + 1.0j, 4.0 + 4.0j, 1.0 + 1.0j, 1.0 + 1.0j]
)
data_vector_via_transformed = aa.util.linear_eqn.data_vector_via_transformed_mapping_matrix_from(
transformed_mapping_matrix=transformed_mapping_matrix,
visibilities=data,
noise_map=noise_map,
)
assert (data_vector_complex_via_blurred == data_vector_via_transformed).all()
def test__data_vector_via_w_tilde_data_two_methods_agree(self):
mask = aa.Mask2D.circular(
shape_native=(51, 51), pixel_scales=0.1, sub_size=1, radius=2.0
)
image = np.random.uniform(size=mask.shape_native)
image = aa.Array2D.manual_mask(array=image, mask=mask)
noise_map = np.random.uniform(size=mask.shape_native)
noise_map = aa.Array2D.manual_mask(array=noise_map, mask=mask)
kernel = aa.Kernel2D.from_gaussian(
shape_native=(7, 7),
pixel_scales=mask.pixel_scales,
sigma=1.0,
normalize=True,
)
convolver = aa.Convolver(mask=mask, kernel=kernel)
pixelization = aa.pix.Rectangular(shape=(20, 20))
for sub_size in range(1, 3):
mask_sub = mask.mask_new_sub_size_from(mask=mask, sub_size=sub_size)
grid = aa.Grid2D.from_mask(mask=mask_sub)
mapper = pixelization.mapper_from(grid=grid)
mapping_matrix = mapper.mapping_matrix
blurred_mapping_matrix = convolver.convolve_mapping_matrix(
mapping_matrix=mapping_matrix
)
data_vector = aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
w_tilde_data = aa.util.linear_eqn.w_tilde_data_imaging_from(
image_native=image.native,
noise_map_native=noise_map.native,
kernel_native=kernel.native,
native_index_for_slim_index=mask.native_index_for_slim_index,
)
data_to_pix_unique, data_weights, pix_lengths = aa.util.mapper.data_slim_to_pixelization_unique_from(
data_pixels=w_tilde_data.shape[0],
pixelization_index_for_sub_slim_index=mapper.pixelization_index_for_sub_slim_index,
sub_size=sub_size,
)
data_vector_via_w_tilde = aa.util.linear_eqn.data_vector_via_w_tilde_data_imaging_from(
w_tilde_data=w_tilde_data,
data_to_pix_unique=data_to_pix_unique.astype("int"),
data_weights=data_weights,
pix_lengths=pix_lengths.astype("int"),
pix_pixels=pixelization.pixels,
)
assert data_vector_via_w_tilde == pytest.approx(data_vector, 1.0e-4)
class TestCurvatureMatrixImaging:
def test__curvature_matrix_from_w_tilde(self):
w_tilde = np.array(
[
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 2.0, 3.0],
[3.0, 2.0, 1.0, 2.0],
[4.0, 3.0, 2.0, 1.0],
]
)
mapping_matrix = np.array(
[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]
)
curvature_matrix = aa.util.linear_eqn.curvature_matrix_via_w_tilde_from(
w_tilde=w_tilde, mapping_matrix=mapping_matrix
)
assert (
curvature_matrix
== np.array([[6.0, 8.0, 0.0], [8.0, 8.0, 0.0], [0.0, 0.0, 0.0]])
).all()
def test__curvature_matrix_via_preload_imaging(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
curvature_matrix_preload, curvature_matrix_counts = aa.util.linear_eqn.curvature_matrix_preload_from(
mapping_matrix=blurred_mapping_matrix
)
curvature_matrix = aa.util.linear_eqn.curvature_matrix_via_sparse_preload_from(
mapping_matrix=blurred_mapping_matrix,
noise_map=noise_map,
curvature_matrix_preload=curvature_matrix_preload.astype("int"),
curvature_matrix_counts=curvature_matrix_counts.astype("int"),
)
assert (
curvature_matrix
== np.array([[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]])
).all()
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0, 0.5],
[1.0, 0.0, 0.0, 0.25],
[0.0, 1.0, 0.6, 0.75],
[0.0, 1.0, 1.0, 0.1],
[0.0, 0.0, 0.3, 1.0],
[0.0, 0.0, 0.5, 0.7],
]
)
noise_map = np.array([2.0, 1.0, 10.0, 0.5, 3.0, 7.0])
curvature_matrix_via_mapping_matrix = aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
curvature_matrix_preload, curvature_matrix_counts = aa.util.linear_eqn.curvature_matrix_preload_from(
mapping_matrix=blurred_mapping_matrix
)
curvature_matrix = aa.util.linear_eqn.curvature_matrix_via_sparse_preload_from(
mapping_matrix=blurred_mapping_matrix,
noise_map=noise_map,
curvature_matrix_preload=curvature_matrix_preload.astype("int"),
curvature_matrix_counts=curvature_matrix_counts.astype("int"),
)
assert (curvature_matrix_via_mapping_matrix == curvature_matrix).all()
def test__simple_blurred_mapping_matrix(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
curvature_matrix = aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
assert (
curvature_matrix
== np.array([[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]])
).all()
def test__simple_blurred_mapping_matrix__change_noise_values(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map = np.array([2.0, 1.0, 1.0, 1.0, 1.0, 1.0])
curvature_matrix = aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
assert (
curvature_matrix
== np.array([[1.25, 0.25, 0.0], [0.25, 2.25, 1.0], [0.0, 1.0, 1.0]])
).all()
def test__curvature_matrix_via_w_tilde_two_methods_agree(self):
mask = aa.Mask2D.circular(
shape_native=(51, 51), pixel_scales=0.1, sub_size=1, radius=2.0
)
noise_map = np.random.uniform(size=mask.shape_native)
noise_map = aa.Array2D.manual_mask(array=noise_map, mask=mask)
kernel = aa.Kernel2D.from_gaussian(
shape_native=(7, 7),
pixel_scales=mask.pixel_scales,
sigma=1.0,
normalize=True,
)
convolver = aa.Convolver(mask=mask, kernel=kernel)
pixelization = aa.pix.Rectangular(shape=(20, 20))
mapper = pixelization.mapper_from(grid=mask.masked_grid_sub_1)
mapping_matrix = mapper.mapping_matrix
w_tilde = aa.util.linear_eqn.w_tilde_curvature_imaging_from(
noise_map_native=noise_map.native,
kernel_native=kernel.native,
native_index_for_slim_index=mask.native_index_for_slim_index,
)
curvature_matrix_via_w_tilde = aa.util.linear_eqn.curvature_matrix_via_w_tilde_from(
w_tilde=w_tilde, mapping_matrix=mapping_matrix
)
blurred_mapping_matrix = convolver.convolve_mapping_matrix(
mapping_matrix=mapping_matrix
)
curvature_matrix = aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
assert curvature_matrix_via_w_tilde == pytest.approx(curvature_matrix, 1.0e-4)
def test__curvature_matrix_via_w_tilde_preload_two_methods_agree(self):
mask = aa.Mask2D.circular(
shape_native=(51, 51), pixel_scales=0.1, sub_size=1, radius=2.0
)
noise_map = np.random.uniform(size=mask.shape_native)
noise_map = aa.Array2D.manual_mask(array=noise_map, mask=mask)
kernel = aa.Kernel2D.from_gaussian(
shape_native=(7, 7),
pixel_scales=mask.pixel_scales,
sigma=1.0,
normalize=True,
)
convolver = aa.Convolver(mask=mask, kernel=kernel)
pixelization = aa.pix.Rectangular(shape=(20, 20))
for sub_size in range(1, 2, 3):
mask_sub = mask.mask_new_sub_size_from(mask=mask, sub_size=sub_size)
grid = aa.Grid2D.from_mask(mask=mask_sub)
mapper = pixelization.mapper_from(grid=grid)
mapping_matrix = mapper.mapping_matrix
w_tilde_preload, w_tilde_indexes, w_tilde_lengths = aa.util.linear_eqn.w_tilde_curvature_preload_imaging_from(
noise_map_native=noise_map.native,
kernel_native=kernel.native,
native_index_for_slim_index=mask.native_index_for_slim_index,
)
data_to_pix_unique, data_weights, pix_lengths = aa.util.mapper.data_slim_to_pixelization_unique_from(
data_pixels=w_tilde_lengths.shape[0],
pixelization_index_for_sub_slim_index=mapper.pixelization_index_for_sub_slim_index,
sub_size=sub_size,
)
curvature_matrix_via_w_tilde = aa.util.linear_eqn.curvature_matrix_via_w_tilde_curvature_preload_imaging_from(
curvature_preload=w_tilde_preload,
curvature_indexes=w_tilde_indexes.astype("int"),
curvature_lengths=w_tilde_lengths.astype("int"),
data_to_pix_unique=data_to_pix_unique.astype("int"),
data_weights=data_weights,
pix_lengths=pix_lengths.astype("int"),
pix_pixels=pixelization.pixels,
)
blurred_mapping_matrix = convolver.convolve_mapping_matrix(
mapping_matrix=mapping_matrix
)
curvature_matrix = aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
assert curvature_matrix_via_w_tilde == pytest.approx(
curvature_matrix, 1.0e-4
)
class TestMappedReconstructedDataFrom:
def test__mapped_reconstructed_data_via_mapping_matrix_from(self):
mapping_matrix = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
reconstruction = np.array([1.0, 1.0, 2.0])
mapped_reconstructed_data = aa.util.linear_eqn.mapped_reconstructed_data_via_mapping_matrix_from(
mapping_matrix=mapping_matrix, reconstruction=reconstruction
)
assert (mapped_reconstructed_data == np.array([1.0, 1.0, 2.0])).all()
mapping_matrix = np.array(
[[0.25, 0.50, 0.25], [0.0, 1.0, 0.0], [0.0, 0.25, 0.75]]
)
reconstruction = np.array([1.0, 1.0, 2.0])
mapped_reconstructed_data = aa.util.linear_eqn.mapped_reconstructed_data_via_mapping_matrix_from(
mapping_matrix=mapping_matrix, reconstruction=reconstruction
)
assert (mapped_reconstructed_data == np.array([1.25, 1.0, 1.75])).all()
def test__mapped_reconstructed_data_via_image_to_pix_unique_from(self):
pixelization_index_for_sub_slim_index = np.array([0, 1, 2])
data_to_pix_unique, data_weights, pix_lengths = aa.util.mapper.data_slim_to_pixelization_unique_from(
data_pixels=3,
pixelization_index_for_sub_slim_index=pixelization_index_for_sub_slim_index,
sub_size=1,
)
reconstruction = np.array([1.0, 1.0, 2.0])
mapped_reconstructed_data = aa.util.linear_eqn.mapped_reconstructed_data_via_image_to_pix_unique_from(
data_to_pix_unique=data_to_pix_unique.astype("int"),
data_weights=data_weights,
pix_lengths=pix_lengths.astype("int"),
reconstruction=reconstruction,
)
assert (mapped_reconstructed_data == np.array([1.0, 1.0, 2.0])).all()
pixelization_index_for_sub_slim_index = np.array(
[0, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2]
)
data_to_pix_unique, data_weights, pix_lengths = aa.util.mapper.data_slim_to_pixelization_unique_from(
data_pixels=3,
pixelization_index_for_sub_slim_index=pixelization_index_for_sub_slim_index,
sub_size=2,
)
reconstruction = np.array([1.0, 1.0, 2.0])
mapped_reconstructed_data = aa.util.linear_eqn.mapped_reconstructed_data_via_image_to_pix_unique_from(
data_to_pix_unique=data_to_pix_unique.astype("int"),
data_weights=data_weights,
pix_lengths=pix_lengths.astype("int"),
reconstruction=reconstruction,
)
assert (mapped_reconstructed_data == np.array([1.25, 1.0, 1.75])).all()
| [
"autoarray.util.linear_eqn.curvature_matrix_preload_from",
"autoarray.util.mapper.data_slim_to_pixelization_unique_from",
"autoarray.Mask2D.circular",
"autoarray.util.linear_eqn.w_tilde_curvature_preload_imaging_from",
"autoarray.util.linear_eqn.mapped_reconstructed_data_via_mapping_matrix_from",
"autoarr... | [((157, 259), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 2.0, 4.0, 0.0], [0.0, \n 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 2.0, 4.0, 0.0],\n [0.0, 0.0, 0.0, 0.0]])\n', (165, 259), True, 'import numpy as np\n'), ((384, 445), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 1.0, 2.0], [0.0, 1.0, 2.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 1.0, 2.0], [0.0, 1.0, 2.0]])\n', (392, 445), True, 'import numpy as np\n'), ((487, 529), 'numpy.array', 'np.array', (['[[1, 1], [1, 2], [2, 1], [2, 2]]'], {}), '([[1, 1], [1, 2], [2, 1], [2, 2]])\n', (495, 529), True, 'import numpy as np\n'), ((551, 720), 'autoarray.util.linear_eqn.w_tilde_curvature_imaging_from', 'aa.util.linear_eqn.w_tilde_curvature_imaging_from', ([], {'noise_map_native': 'noise_map_2d', 'kernel_native': 'kernel', 'native_index_for_slim_index': 'native_index_for_slim_index'}), '(noise_map_native=\n noise_map_2d, kernel_native=kernel, native_index_for_slim_index=\n native_index_for_slim_index)\n', (600, 720), True, 'import autoarray as aa\n'), ((1187, 1289), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, \n 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 2.0, 0.0],\n [0.0, 0.0, 0.0, 0.0]])\n', (1195, 1289), True, 'import numpy as np\n'), ((1420, 1522), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, \n 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 2.0, 0.0],\n [0.0, 0.0, 0.0, 0.0]])\n', (1428, 1522), True, 'import numpy as np\n'), ((1647, 1708), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [1.0, 2.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [1.0, 2.0, 0.0]])\n', (1655, 1708), True, 'import numpy as np\n'), ((1750, 1792), 'numpy.array', 'np.array', (['[[1, 1], [1, 2], [2, 1], [2, 2]]'], {}), '([[1, 1], [1, 2], [2, 1], [2, 2]])\n', (1758, 1792), True, 'import numpy as np\n'), ((1819, 2004), 'autoarray.util.linear_eqn.w_tilde_data_imaging_from', 'aa.util.linear_eqn.w_tilde_data_imaging_from', ([], {'image_native': 'image_2d', 'noise_map_native': 'noise_map_2d', 'kernel_native': 'kernel', 'native_index_for_slim_index': 'native_index_for_slim_index'}), '(image_native=image_2d,\n noise_map_native=noise_map_2d, kernel_native=kernel,\n native_index_for_slim_index=native_index_for_slim_index)\n', (1863, 2004), True, 'import autoarray as aa\n'), ((2223, 2325), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 2.0, 4.0, 0.0], [0.0, \n 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 2.0, 0.0], [0.0, 2.0, 4.0, 0.0],\n [0.0, 0.0, 0.0, 0.0]])\n', (2231, 2325), True, 'import numpy as np\n'), ((2450, 2511), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 1.0, 2.0], [0.0, 1.0, 2.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 1.0, 2.0], [0.0, 1.0, 2.0]])\n', (2458, 2511), True, 'import numpy as np\n'), ((2553, 2595), 'numpy.array', 'np.array', (['[[1, 1], [1, 2], [2, 1], [2, 2]]'], {}), '([[1, 1], [1, 2], [2, 1], [2, 2]])\n', (2561, 2595), True, 'import numpy as np\n'), ((2659, 2836), 'autoarray.util.linear_eqn.w_tilde_curvature_preload_imaging_from', 'aa.util.linear_eqn.w_tilde_curvature_preload_imaging_from', ([], {'noise_map_native': 'noise_map_2d', 'kernel_native': 'kernel', 'native_index_for_slim_index': 'native_index_for_slim_index'}), '(noise_map_native=\n noise_map_2d, kernel_native=kernel, native_index_for_slim_index=\n native_index_for_slim_index)\n', (2716, 2836), True, 'import autoarray as aa\n'), ((3440, 3557), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (3448, 3557), True, 'import numpy as np\n'), ((3714, 3754), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (3722, 3754), True, 'import numpy as np\n'), ((3776, 3816), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (3784, 3816), True, 'import numpy as np\n'), ((3842, 3995), 'autoarray.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', 'aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', ([], {'blurred_mapping_matrix': 'blurred_mapping_matrix', 'image': 'image', 'noise_map': 'noise_map'}), '(\n blurred_mapping_matrix=blurred_mapping_matrix, image=image, noise_map=\n noise_map)\n', (3904, 3995), True, 'import autoarray as aa\n'), ((4253, 4370), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (4261, 4370), True, 'import numpy as np\n'), ((4527, 4568), 'numpy.array', 'np.array', (['[3.0, 1.0, 1.0, 10.0, 1.0, 1.0]'], {}), '([3.0, 1.0, 1.0, 10.0, 1.0, 1.0])\n', (4535, 4568), True, 'import numpy as np\n'), ((4590, 4630), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (4598, 4630), True, 'import numpy as np\n'), ((4656, 4809), 'autoarray.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', 'aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', ([], {'blurred_mapping_matrix': 'blurred_mapping_matrix', 'image': 'image', 'noise_map': 'noise_map'}), '(\n blurred_mapping_matrix=blurred_mapping_matrix, image=image, noise_map=\n noise_map)\n', (4718, 4809), True, 'import autoarray as aa\n'), ((5069, 5186), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (5077, 5186), True, 'import numpy as np\n'), ((5343, 5384), 'numpy.array', 'np.array', (['[4.0, 1.0, 1.0, 16.0, 1.0, 1.0]'], {}), '([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])\n', (5351, 5384), True, 'import numpy as np\n'), ((5406, 5446), 'numpy.array', 'np.array', (['[2.0, 1.0, 1.0, 4.0, 1.0, 1.0]'], {}), '([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])\n', (5414, 5446), True, 'import numpy as np\n'), ((5472, 5625), 'autoarray.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', 'aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', ([], {'blurred_mapping_matrix': 'blurred_mapping_matrix', 'image': 'image', 'noise_map': 'noise_map'}), '(\n blurred_mapping_matrix=blurred_mapping_matrix, image=image, noise_map=\n noise_map)\n', (5534, 5625), True, 'import autoarray as aa\n'), ((5902, 6019), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (5910, 6019), True, 'import numpy as np\n'), ((6180, 6221), 'numpy.array', 'np.array', (['[4.0, 1.0, 1.0, 16.0, 1.0, 1.0]'], {}), '([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])\n', (6188, 6221), True, 'import numpy as np\n'), ((6248, 6288), 'numpy.array', 'np.array', (['[2.0, 1.0, 1.0, 4.0, 1.0, 1.0]'], {}), '([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])\n', (6256, 6288), True, 'import numpy as np\n'), ((6331, 6485), 'autoarray.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', 'aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', ([], {'blurred_mapping_matrix': 'mapping_matrix', 'image': 'data_real', 'noise_map': 'noise_map_real'}), '(\n blurred_mapping_matrix=mapping_matrix, image=data_real, noise_map=\n noise_map_real)\n', (6393, 6485), True, 'import autoarray as aa\n'), ((6550, 6591), 'numpy.array', 'np.array', (['[4.0, 1.0, 1.0, 16.0, 1.0, 1.0]'], {}), '([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])\n', (6558, 6591), True, 'import numpy as np\n'), ((6618, 6658), 'numpy.array', 'np.array', (['[2.0, 1.0, 1.0, 4.0, 1.0, 1.0]'], {}), '([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])\n', (6626, 6658), True, 'import numpy as np\n'), ((6701, 6855), 'autoarray.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', 'aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', ([], {'blurred_mapping_matrix': 'mapping_matrix', 'image': 'data_imag', 'noise_map': 'noise_map_imag'}), '(\n blurred_mapping_matrix=mapping_matrix, image=data_imag, noise_map=\n noise_map_imag)\n', (6763, 6855), True, 'import autoarray as aa\n'), ((7068, 7321), 'numpy.array', 'np.array', (['[[1.0 + 1.0j, 1.0 + 1.0j, 0.0 + 0.0j], [1.0 + 1.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [0.0 + 0.0j, 1.0 + 1.0j, 0.0 + 0.0j], [0.0 + 0.0j, 1.0 + 1.0j, 1.0 + \n 1.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, \n 0.0 + 0.0j]]'], {}), '([[1.0 + 1.0j, 1.0 + 1.0j, 0.0 + 0.0j], [1.0 + 1.0j, 0.0 + 0.0j, \n 0.0 + 0.0j], [0.0 + 0.0j, 1.0 + 1.0j, 0.0 + 0.0j], [0.0 + 0.0j, 1.0 + \n 1.0j, 1.0 + 1.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, \n 0.0 + 0.0j, 0.0 + 0.0j]])\n', (7076, 7321), True, 'import numpy as np\n'), ((7467, 7555), 'numpy.array', 'np.array', (['[4.0 + 4.0j, 1.0 + 1.0j, 1.0 + 1.0j, 16.0 + 16.0j, 1.0 + 1.0j, 1.0 + 1.0j]'], {}), '([4.0 + 4.0j, 1.0 + 1.0j, 1.0 + 1.0j, 16.0 + 16.0j, 1.0 + 1.0j, 1.0 +\n 1.0j])\n', (7475, 7555), True, 'import numpy as np\n'), ((7597, 7683), 'numpy.array', 'np.array', (['[2.0 + 2.0j, 1.0 + 1.0j, 1.0 + 1.0j, 4.0 + 4.0j, 1.0 + 1.0j, 1.0 + 1.0j]'], {}), '([2.0 + 2.0j, 1.0 + 1.0j, 1.0 + 1.0j, 4.0 + 4.0j, 1.0 + 1.0j, 1.0 +\n 1.0j])\n', (7605, 7683), True, 'import numpy as np\n'), ((7745, 7916), 'autoarray.util.linear_eqn.data_vector_via_transformed_mapping_matrix_from', 'aa.util.linear_eqn.data_vector_via_transformed_mapping_matrix_from', ([], {'transformed_mapping_matrix': 'transformed_mapping_matrix', 'visibilities': 'data', 'noise_map': 'noise_map'}), '(\n transformed_mapping_matrix=transformed_mapping_matrix, visibilities=\n data, noise_map=noise_map)\n', (7811, 7916), True, 'import autoarray as aa\n'), ((8136, 8223), 'autoarray.Mask2D.circular', 'aa.Mask2D.circular', ([], {'shape_native': '(51, 51)', 'pixel_scales': '(0.1)', 'sub_size': '(1)', 'radius': '(2.0)'}), '(shape_native=(51, 51), pixel_scales=0.1, sub_size=1,\n radius=2.0)\n', (8154, 8223), True, 'import autoarray as aa\n'), ((8263, 8304), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'mask.shape_native'}), '(size=mask.shape_native)\n', (8280, 8304), True, 'import numpy as np\n'), ((8322, 8368), 'autoarray.Array2D.manual_mask', 'aa.Array2D.manual_mask', ([], {'array': 'image', 'mask': 'mask'}), '(array=image, mask=mask)\n', (8344, 8368), True, 'import autoarray as aa\n'), ((8392, 8433), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'mask.shape_native'}), '(size=mask.shape_native)\n', (8409, 8433), True, 'import numpy as np\n'), ((8455, 8505), 'autoarray.Array2D.manual_mask', 'aa.Array2D.manual_mask', ([], {'array': 'noise_map', 'mask': 'mask'}), '(array=noise_map, mask=mask)\n', (8477, 8505), True, 'import autoarray as aa\n'), ((8526, 8636), 'autoarray.Kernel2D.from_gaussian', 'aa.Kernel2D.from_gaussian', ([], {'shape_native': '(7, 7)', 'pixel_scales': 'mask.pixel_scales', 'sigma': '(1.0)', 'normalize': '(True)'}), '(shape_native=(7, 7), pixel_scales=mask.\n pixel_scales, sigma=1.0, normalize=True)\n', (8551, 8636), True, 'import autoarray as aa\n'), ((8719, 8757), 'autoarray.Convolver', 'aa.Convolver', ([], {'mask': 'mask', 'kernel': 'kernel'}), '(mask=mask, kernel=kernel)\n', (8731, 8757), True, 'import autoarray as aa\n'), ((8784, 8818), 'autoarray.pix.Rectangular', 'aa.pix.Rectangular', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (8802, 8818), True, 'import autoarray as aa\n'), ((10701, 10803), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0, 4.0], [2.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 2.0], [4.0, \n 3.0, 2.0, 1.0]]'], {}), '([[1.0, 2.0, 3.0, 4.0], [2.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 2.0],\n [4.0, 3.0, 2.0, 1.0]])\n', (10709, 10803), True, 'import numpy as np\n'), ((10936, 11014), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])\n', (10944, 11014), True, 'import numpy as np\n'), ((11069, 11173), 'autoarray.util.linear_eqn.curvature_matrix_via_w_tilde_from', 'aa.util.linear_eqn.curvature_matrix_via_w_tilde_from', ([], {'w_tilde': 'w_tilde', 'mapping_matrix': 'mapping_matrix'}), '(w_tilde=w_tilde,\n mapping_matrix=mapping_matrix)\n', (11121, 11173), True, 'import autoarray as aa\n'), ((11436, 11553), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (11444, 11553), True, 'import numpy as np\n'), ((11714, 11754), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (11722, 11754), True, 'import numpy as np\n'), ((11818, 11910), 'autoarray.util.linear_eqn.curvature_matrix_preload_from', 'aa.util.linear_eqn.curvature_matrix_preload_from', ([], {'mapping_matrix': 'blurred_mapping_matrix'}), '(mapping_matrix=\n blurred_mapping_matrix)\n', (11866, 11910), True, 'import autoarray as aa\n'), ((12453, 12602), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0, 0.5], [1.0, 0.0, 0.0, 0.25], [0.0, 1.0, 0.6, 0.75], [0.0, \n 1.0, 1.0, 0.1], [0.0, 0.0, 0.3, 1.0], [0.0, 0.0, 0.5, 0.7]]'], {}), '([[1.0, 1.0, 0.0, 0.5], [1.0, 0.0, 0.0, 0.25], [0.0, 1.0, 0.6, 0.75\n ], [0.0, 1.0, 1.0, 0.1], [0.0, 0.0, 0.3, 1.0], [0.0, 0.0, 0.5, 0.7]])\n', (12461, 12602), True, 'import numpy as np\n'), ((12763, 12804), 'numpy.array', 'np.array', (['[2.0, 1.0, 10.0, 0.5, 3.0, 7.0]'], {}), '([2.0, 1.0, 10.0, 0.5, 3.0, 7.0])\n', (12771, 12804), True, 'import numpy as np\n'), ((12854, 12978), 'autoarray.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', 'aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', ([], {'mapping_matrix': 'blurred_mapping_matrix', 'noise_map': 'noise_map'}), '(mapping_matrix=\n blurred_mapping_matrix, noise_map=noise_map)\n', (12913, 12978), True, 'import autoarray as aa\n'), ((13061, 13153), 'autoarray.util.linear_eqn.curvature_matrix_preload_from', 'aa.util.linear_eqn.curvature_matrix_preload_from', ([], {'mapping_matrix': 'blurred_mapping_matrix'}), '(mapping_matrix=\n blurred_mapping_matrix)\n', (13109, 13153), True, 'import autoarray as aa\n'), ((13687, 13804), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (13695, 13804), True, 'import numpy as np\n'), ((13965, 14005), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (13973, 14005), True, 'import numpy as np\n'), ((14036, 14160), 'autoarray.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', 'aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', ([], {'mapping_matrix': 'blurred_mapping_matrix', 'noise_map': 'noise_map'}), '(mapping_matrix=\n blurred_mapping_matrix, noise_map=noise_map)\n', (14095, 14160), True, 'import autoarray as aa\n'), ((14436, 14553), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0\n ], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (14444, 14553), True, 'import numpy as np\n'), ((14714, 14754), 'numpy.array', 'np.array', (['[2.0, 1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([2.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n', (14722, 14754), True, 'import numpy as np\n'), ((14785, 14909), 'autoarray.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', 'aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', ([], {'mapping_matrix': 'blurred_mapping_matrix', 'noise_map': 'noise_map'}), '(mapping_matrix=\n blurred_mapping_matrix, noise_map=noise_map)\n', (14844, 14909), True, 'import autoarray as aa\n'), ((15167, 15254), 'autoarray.Mask2D.circular', 'aa.Mask2D.circular', ([], {'shape_native': '(51, 51)', 'pixel_scales': '(0.1)', 'sub_size': '(1)', 'radius': '(2.0)'}), '(shape_native=(51, 51), pixel_scales=0.1, sub_size=1,\n radius=2.0)\n', (15185, 15254), True, 'import autoarray as aa\n'), ((15298, 15339), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'mask.shape_native'}), '(size=mask.shape_native)\n', (15315, 15339), True, 'import numpy as np\n'), ((15361, 15411), 'autoarray.Array2D.manual_mask', 'aa.Array2D.manual_mask', ([], {'array': 'noise_map', 'mask': 'mask'}), '(array=noise_map, mask=mask)\n', (15383, 15411), True, 'import autoarray as aa\n'), ((15432, 15542), 'autoarray.Kernel2D.from_gaussian', 'aa.Kernel2D.from_gaussian', ([], {'shape_native': '(7, 7)', 'pixel_scales': 'mask.pixel_scales', 'sigma': '(1.0)', 'normalize': '(True)'}), '(shape_native=(7, 7), pixel_scales=mask.\n pixel_scales, sigma=1.0, normalize=True)\n', (15457, 15542), True, 'import autoarray as aa\n'), ((15625, 15663), 'autoarray.Convolver', 'aa.Convolver', ([], {'mask': 'mask', 'kernel': 'kernel'}), '(mask=mask, kernel=kernel)\n', (15637, 15663), True, 'import autoarray as aa\n'), ((15690, 15724), 'autoarray.pix.Rectangular', 'aa.pix.Rectangular', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (15708, 15724), True, 'import autoarray as aa\n'), ((15870, 16054), 'autoarray.util.linear_eqn.w_tilde_curvature_imaging_from', 'aa.util.linear_eqn.w_tilde_curvature_imaging_from', ([], {'noise_map_native': 'noise_map.native', 'kernel_native': 'kernel.native', 'native_index_for_slim_index': 'mask.native_index_for_slim_index'}), '(noise_map_native=\n noise_map.native, kernel_native=kernel.native,\n native_index_for_slim_index=mask.native_index_for_slim_index)\n', (15919, 16054), True, 'import autoarray as aa\n'), ((16139, 16243), 'autoarray.util.linear_eqn.curvature_matrix_via_w_tilde_from', 'aa.util.linear_eqn.curvature_matrix_via_w_tilde_from', ([], {'w_tilde': 'w_tilde', 'mapping_matrix': 'mapping_matrix'}), '(w_tilde=w_tilde,\n mapping_matrix=mapping_matrix)\n', (16191, 16243), True, 'import autoarray as aa\n'), ((16419, 16543), 'autoarray.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', 'aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', ([], {'mapping_matrix': 'blurred_mapping_matrix', 'noise_map': 'noise_map'}), '(mapping_matrix=\n blurred_mapping_matrix, noise_map=noise_map)\n', (16478, 16543), True, 'import autoarray as aa\n'), ((16748, 16835), 'autoarray.Mask2D.circular', 'aa.Mask2D.circular', ([], {'shape_native': '(51, 51)', 'pixel_scales': '(0.1)', 'sub_size': '(1)', 'radius': '(2.0)'}), '(shape_native=(51, 51), pixel_scales=0.1, sub_size=1,\n radius=2.0)\n', (16766, 16835), True, 'import autoarray as aa\n'), ((16879, 16920), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'mask.shape_native'}), '(size=mask.shape_native)\n', (16896, 16920), True, 'import numpy as np\n'), ((16942, 16992), 'autoarray.Array2D.manual_mask', 'aa.Array2D.manual_mask', ([], {'array': 'noise_map', 'mask': 'mask'}), '(array=noise_map, mask=mask)\n', (16964, 16992), True, 'import autoarray as aa\n'), ((17013, 17123), 'autoarray.Kernel2D.from_gaussian', 'aa.Kernel2D.from_gaussian', ([], {'shape_native': '(7, 7)', 'pixel_scales': 'mask.pixel_scales', 'sigma': '(1.0)', 'normalize': '(True)'}), '(shape_native=(7, 7), pixel_scales=mask.\n pixel_scales, sigma=1.0, normalize=True)\n', (17038, 17123), True, 'import autoarray as aa\n'), ((17206, 17244), 'autoarray.Convolver', 'aa.Convolver', ([], {'mask': 'mask', 'kernel': 'kernel'}), '(mask=mask, kernel=kernel)\n', (17218, 17244), True, 'import autoarray as aa\n'), ((17271, 17305), 'autoarray.pix.Rectangular', 'aa.pix.Rectangular', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (17289, 17305), True, 'import autoarray as aa\n'), ((19383, 19444), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (19391, 19444), True, 'import numpy as np\n'), ((19473, 19498), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (19481, 19498), True, 'import numpy as np\n'), ((19538, 19673), 'autoarray.util.linear_eqn.mapped_reconstructed_data_via_mapping_matrix_from', 'aa.util.linear_eqn.mapped_reconstructed_data_via_mapping_matrix_from', ([], {'mapping_matrix': 'mapping_matrix', 'reconstruction': 'reconstruction'}), '(\n mapping_matrix=mapping_matrix, reconstruction=reconstruction)\n', (19606, 19673), True, 'import autoarray as aa\n'), ((19802, 19867), 'numpy.array', 'np.array', (['[[0.25, 0.5, 0.25], [0.0, 1.0, 0.0], [0.0, 0.25, 0.75]]'], {}), '([[0.25, 0.5, 0.25], [0.0, 1.0, 0.0], [0.0, 0.25, 0.75]])\n', (19810, 19867), True, 'import numpy as np\n'), ((19921, 19946), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (19929, 19946), True, 'import numpy as np\n'), ((19986, 20121), 'autoarray.util.linear_eqn.mapped_reconstructed_data_via_mapping_matrix_from', 'aa.util.linear_eqn.mapped_reconstructed_data_via_mapping_matrix_from', ([], {'mapping_matrix': 'mapping_matrix', 'reconstruction': 'reconstruction'}), '(\n mapping_matrix=mapping_matrix, reconstruction=reconstruction)\n', (20054, 20121), True, 'import autoarray as aa\n'), ((20354, 20373), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (20362, 20373), True, 'import numpy as np\n'), ((20433, 20598), 'autoarray.util.mapper.data_slim_to_pixelization_unique_from', 'aa.util.mapper.data_slim_to_pixelization_unique_from', ([], {'data_pixels': '(3)', 'pixelization_index_for_sub_slim_index': 'pixelization_index_for_sub_slim_index', 'sub_size': '(1)'}), '(data_pixels=3,\n pixelization_index_for_sub_slim_index=\n pixelization_index_for_sub_slim_index, sub_size=1)\n', (20485, 20598), True, 'import autoarray as aa\n'), ((20669, 20694), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (20677, 20694), True, 'import numpy as np\n'), ((21154, 21200), 'numpy.array', 'np.array', (['[0, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2]'], {}), '([0, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2])\n', (21162, 21200), True, 'import numpy as np\n'), ((21284, 21449), 'autoarray.util.mapper.data_slim_to_pixelization_unique_from', 'aa.util.mapper.data_slim_to_pixelization_unique_from', ([], {'data_pixels': '(3)', 'pixelization_index_for_sub_slim_index': 'pixelization_index_for_sub_slim_index', 'sub_size': '(2)'}), '(data_pixels=3,\n pixelization_index_for_sub_slim_index=\n pixelization_index_for_sub_slim_index, sub_size=2)\n', (21336, 21449), True, 'import autoarray as aa\n'), ((21520, 21545), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (21528, 21545), True, 'import numpy as np\n'), ((8965, 8999), 'autoarray.Grid2D.from_mask', 'aa.Grid2D.from_mask', ([], {'mask': 'mask_sub'}), '(mask=mask_sub)\n', (8984, 8999), True, 'import autoarray as aa\n'), ((9280, 9433), 'autoarray.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', 'aa.util.linear_eqn.data_vector_via_blurred_mapping_matrix_from', ([], {'blurred_mapping_matrix': 'blurred_mapping_matrix', 'image': 'image', 'noise_map': 'noise_map'}), '(\n blurred_mapping_matrix=blurred_mapping_matrix, image=image, noise_map=\n noise_map)\n', (9342, 9433), True, 'import autoarray as aa\n'), ((9521, 9726), 'autoarray.util.linear_eqn.w_tilde_data_imaging_from', 'aa.util.linear_eqn.w_tilde_data_imaging_from', ([], {'image_native': 'image.native', 'noise_map_native': 'noise_map.native', 'kernel_native': 'kernel.native', 'native_index_for_slim_index': 'mask.native_index_for_slim_index'}), '(image_native=image.native,\n noise_map_native=noise_map.native, kernel_native=kernel.native,\n native_index_for_slim_index=mask.native_index_for_slim_index)\n', (9565, 9726), True, 'import autoarray as aa\n'), ((9866, 10066), 'autoarray.util.mapper.data_slim_to_pixelization_unique_from', 'aa.util.mapper.data_slim_to_pixelization_unique_from', ([], {'data_pixels': 'w_tilde_data.shape[0]', 'pixelization_index_for_sub_slim_index': 'mapper.pixelization_index_for_sub_slim_index', 'sub_size': 'sub_size'}), '(data_pixels=\n w_tilde_data.shape[0], pixelization_index_for_sub_slim_index=mapper.\n pixelization_index_for_sub_slim_index, sub_size=sub_size)\n', (9918, 10066), True, 'import autoarray as aa\n'), ((16611, 16650), 'pytest.approx', 'pytest.approx', (['curvature_matrix', '(0.0001)'], {}), '(curvature_matrix, 0.0001)\n', (16624, 16650), False, 'import pytest\n'), ((17455, 17489), 'autoarray.Grid2D.from_mask', 'aa.Grid2D.from_mask', ([], {'mask': 'mask_sub'}), '(mask=mask_sub)\n', (17474, 17489), True, 'import autoarray as aa\n'), ((17671, 17863), 'autoarray.util.linear_eqn.w_tilde_curvature_preload_imaging_from', 'aa.util.linear_eqn.w_tilde_curvature_preload_imaging_from', ([], {'noise_map_native': 'noise_map.native', 'kernel_native': 'kernel.native', 'native_index_for_slim_index': 'mask.native_index_for_slim_index'}), '(noise_map_native=\n noise_map.native, kernel_native=kernel.native,\n native_index_for_slim_index=mask.native_index_for_slim_index)\n', (17728, 17863), True, 'import autoarray as aa\n'), ((17985, 18188), 'autoarray.util.mapper.data_slim_to_pixelization_unique_from', 'aa.util.mapper.data_slim_to_pixelization_unique_from', ([], {'data_pixels': 'w_tilde_lengths.shape[0]', 'pixelization_index_for_sub_slim_index': 'mapper.pixelization_index_for_sub_slim_index', 'sub_size': 'sub_size'}), '(data_pixels=\n w_tilde_lengths.shape[0], pixelization_index_for_sub_slim_index=mapper.\n pixelization_index_for_sub_slim_index, sub_size=sub_size)\n', (18037, 18188), True, 'import autoarray as aa\n'), ((18961, 19085), 'autoarray.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', 'aa.util.linear_eqn.curvature_matrix_via_mapping_matrix_from', ([], {'mapping_matrix': 'blurred_mapping_matrix', 'noise_map': 'noise_map'}), '(mapping_matrix=\n blurred_mapping_matrix, noise_map=noise_map)\n', (19020, 19085), True, 'import autoarray as aa\n'), ((819, 950), 'numpy.array', 'np.array', (['[[2.5, 1.625, 0.5, 0.375], [1.625, 1.3125, 0.125, 0.0625], [0.5, 0.125, 0.5,\n 0.375], [0.375, 0.0625, 0.375, 0.3125]]'], {}), '([[2.5, 1.625, 0.5, 0.375], [1.625, 1.3125, 0.125, 0.0625], [0.5, \n 0.125, 0.5, 0.375], [0.375, 0.0625, 0.375, 0.3125]])\n', (827, 950), True, 'import numpy as np\n'), ((2943, 3029), 'numpy.array', 'np.array', (['[1.25, 1.625, 0.5, 0.375, 0.65625, 0.125, 0.0625, 0.25, 0.375, 0.15625]'], {}), '([1.25, 1.625, 0.5, 0.375, 0.65625, 0.125, 0.0625, 0.25, 0.375, \n 0.15625])\n', (2951, 3029), True, 'import numpy as np\n'), ((3153, 3193), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 1, 2, 3, 2, 3, 3]'], {}), '([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])\n', (3161, 3193), True, 'import numpy as np\n'), ((3264, 3286), 'numpy.array', 'np.array', (['[4, 3, 2, 1]'], {}), '([4, 3, 2, 1])\n', (3272, 3286), True, 'import numpy as np\n'), ((10554, 10588), 'pytest.approx', 'pytest.approx', (['data_vector', '(0.0001)'], {}), '(data_vector, 0.0001)\n', (10567, 10588), False, 'import pytest\n'), ((19167, 19206), 'pytest.approx', 'pytest.approx', (['curvature_matrix', '(0.0001)'], {}), '(curvature_matrix, 0.0001)\n', (19180, 19206), False, 'import pytest\n'), ((2096, 2126), 'numpy.array', 'np.array', (['[5.0, 5.0, 1.5, 1.5]'], {}), '([5.0, 5.0, 1.5, 1.5])\n', (2104, 2126), True, 'import numpy as np\n'), ((4071, 4096), 'numpy.array', 'np.array', (['[2.0, 3.0, 1.0]'], {}), '([2.0, 3.0, 1.0])\n', (4079, 4096), True, 'import numpy as np\n'), ((4885, 4912), 'numpy.array', 'np.array', (['[4.0, 14.0, 10.0]'], {}), '([4.0, 14.0, 10.0])\n', (4893, 4912), True, 'import numpy as np\n'), ((5701, 5726), 'numpy.array', 'np.array', (['[2.0, 3.0, 1.0]'], {}), '([2.0, 3.0, 1.0])\n', (5709, 5726), True, 'import numpy as np\n'), ((11260, 11321), 'numpy.array', 'np.array', (['[[6.0, 8.0, 0.0], [8.0, 8.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[6.0, 8.0, 0.0], [8.0, 8.0, 0.0], [0.0, 0.0, 0.0]])\n', (11268, 11321), True, 'import numpy as np\n'), ((12338, 12399), 'numpy.array', 'np.array', (['[[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]]'], {}), '([[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]])\n', (12346, 12399), True, 'import numpy as np\n'), ((14246, 14307), 'numpy.array', 'np.array', (['[[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]]'], {}), '([[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]])\n', (14254, 14307), True, 'import numpy as np\n'), ((14995, 15060), 'numpy.array', 'np.array', (['[[1.25, 0.25, 0.0], [0.25, 2.25, 1.0], [0.0, 1.0, 1.0]]'], {}), '([[1.25, 0.25, 0.0], [0.25, 2.25, 1.0], [0.0, 1.0, 1.0]])\n', (15003, 15060), True, 'import numpy as np\n'), ((19741, 19766), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (19749, 19766), True, 'import numpy as np\n'), ((20189, 20216), 'numpy.array', 'np.array', (['[1.25, 1.0, 1.75]'], {}), '([1.25, 1.0, 1.75])\n', (20197, 20216), True, 'import numpy as np\n'), ((21070, 21095), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (21078, 21095), True, 'import numpy as np\n'), ((21921, 21948), 'numpy.array', 'np.array', (['[1.25, 1.0, 1.75]'], {}), '([1.25, 1.0, 1.75])\n', (21929, 21948), True, 'import numpy as np\n')] |
import os
import math
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LAT, MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
from common.params import Params
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', False)
LANE_CHANGE_SPEED_MIN = 45 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP):
self.LP = LanePlanner()
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_enabled = Params().get('LaneChangeEnabled') == b'1'
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, self.steer_rate_cost)
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.angle_steers_des = 0.0
self.angle_steers_des_mpc = 0.0
self.angle_steers_des_time = 0.0
def update(self, sm, CP, VM):
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
steering_wheel_angle_offset_deg = sm['liveParameters'].angleOffset
steering_wheel_angle_deg = sm['carState'].steeringAngle
# Update vehicle model
x = max(sm['liveParameters'].stiffnessFactor, 0.1)
sr = max(sm['liveParameters'].steerRatio, 0.1)
VM.update_params(x, sr)
curvature_factor = VM.curvature_factor(v_ego)
measured_curvature = -curvature_factor * math.radians(steering_wheel_angle_deg - steering_wheel_angle_offset_deg) / VM.sR
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX) or (not self.lane_change_enabled):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
# State transitions
# off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
# pre
elif self.lane_change_state == LaneChangeState.preLaneChange:
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and not blindspot_detected:
self.lane_change_state = LaneChangeState.laneChangeStarting
# starting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
# fade out over .5s
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
# 98% certainty
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# finishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
# fade in laneline over 1s
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Turn off lanes during lane change
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
y_pts = np.interp(v_ego * self.t_idxs[:MPC_N+1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:MPC_N+1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
v_ego_mpc = max(v_ego, 5.0) # avoid mpc roughness due to low speed
assert len(y_pts) == MPC_N + 1
assert len(heading_pts) == MPC_N + 1
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego_mpc),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
# init state for next
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:MPC_N+1], self.mpc_solution.curvature)
# TODO this needs more thought, use .2s extra for now to estimate other delays
delay = CP.steerActuatorDelay + .2
next_curvature = interp(delay, self.t_idxs[:MPC_N+1], self.mpc_solution.curvature)
psi = interp(delay, self.t_idxs[:MPC_N+1], self.mpc_solution.psi)
next_curvature_rate = self.mpc_solution.curvature_rate[0]
next_curvature_from_psi = psi/(v_ego*delay)
if psi > self.mpc_solution.curvature[0] * delay * v_ego:
next_curvature = max(next_curvature_from_psi, next_curvature)
else:
next_curvature = min(next_curvature_from_psi, next_curvature)
# reset to current steer angle if not active or overriding
if active:
curvature_desired = next_curvature
desired_curvature_rate = next_curvature_rate
else:
curvature_desired = measured_curvature
desired_curvature_rate = 0.0
# negative sign, controls uses different convention
self.desired_steering_wheel_angle_deg = -float(math.degrees(curvature_desired * VM.sR)/curvature_factor) + steering_wheel_angle_offset_deg
self.desired_steering_wheel_angle_rate_deg = -float(math.degrees(desired_curvature_rate * VM.sR)/curvature_factor)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'liveParameters', 'modelV2'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.angleSteers = float(self.desired_steering_wheel_angle_deg)
plan_send.lateralPlan.rateSteers = float(self.desired_steering_wheel_angle_rate_deg)
plan_send.lateralPlan.angleOffset = float(sm['liveParameters'].angleOffsetAverage)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
pm.send('lateralPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message('liveMpc')
dat.liveMpc.x = list(self.mpc_solution[0].x)
dat.liveMpc.y = list(self.mpc_solution[0].y)
dat.liveMpc.psi = list(self.mpc_solution[0].psi)
dat.liveMpc.tire_angle = list(self.mpc_solution[0].tire_angle)
dat.liveMpc.cost = self.mpc_solution[0].cost
pm.send('liveMpc', dat)
| [
"math.isnan",
"common.params.Params",
"selfdrive.controls.lib.lane_planner.LanePlanner",
"math.radians",
"common.numpy_fast.interp",
"numpy.zeros",
"selfdrive.controls.lib.lateral_mpc.libmpc_py.ffi.new",
"common.realtime.sec_since_boot",
"os.environ.get",
"numpy.arange",
"numpy.array",
"numpy.... | [((652, 684), 'os.environ.get', 'os.environ.get', (['"""LOG_MPC"""', '(False)'], {}), "('LOG_MPC', False)\n", (666, 684), False, 'import os\n'), ((1755, 1768), 'selfdrive.controls.lib.lane_planner.LanePlanner', 'LanePlanner', ([], {}), '()\n', (1766, 1768), False, 'from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE\n'), ((2248, 2278), 'numpy.zeros', 'np.zeros', (['(TRAJECTORY_SIZE, 3)'], {}), '((TRAJECTORY_SIZE, 3))\n', (2256, 2278), True, 'import numpy as np\n'), ((2298, 2326), 'numpy.zeros', 'np.zeros', (['(TRAJECTORY_SIZE,)'], {}), '((TRAJECTORY_SIZE,))\n', (2306, 2326), True, 'import numpy as np\n'), ((2345, 2371), 'numpy.arange', 'np.arange', (['TRAJECTORY_SIZE'], {}), '(TRAJECTORY_SIZE)\n', (2354, 2371), True, 'import numpy as np\n'), ((2389, 2414), 'numpy.zeros', 'np.zeros', (['TRAJECTORY_SIZE'], {}), '(TRAJECTORY_SIZE)\n', (2397, 2414), True, 'import numpy as np\n'), ((2583, 2611), 'selfdrive.controls.lib.lateral_mpc.libmpc_py.ffi.new', 'libmpc_py.ffi.new', (['"""log_t *"""'], {}), "('log_t *')\n", (2600, 2611), False, 'from selfdrive.controls.lib.lateral_mpc import libmpc_py\n'), ((2633, 2663), 'selfdrive.controls.lib.lateral_mpc.libmpc_py.ffi.new', 'libmpc_py.ffi.new', (['"""state_t *"""'], {}), "('state_t *')\n", (2650, 2663), False, 'from selfdrive.controls.lib.lateral_mpc import libmpc_py\n'), ((7788, 7856), 'common.numpy_fast.interp', 'interp', (['DT_MDL', 'self.t_idxs[:MPC_N + 1]', 'self.mpc_solution.curvature'], {}), '(DT_MDL, self.t_idxs[:MPC_N + 1], self.mpc_solution.curvature)\n', (7794, 7856), False, 'from common.numpy_fast import interp\n'), ((7999, 8066), 'common.numpy_fast.interp', 'interp', (['delay', 'self.t_idxs[:MPC_N + 1]', 'self.mpc_solution.curvature'], {}), '(delay, self.t_idxs[:MPC_N + 1], self.mpc_solution.curvature)\n', (8005, 8066), False, 'from common.numpy_fast import interp\n'), ((8075, 8136), 'common.numpy_fast.interp', 'interp', (['delay', 'self.t_idxs[:MPC_N + 1]', 'self.mpc_solution.psi'], {}), '(delay, self.t_idxs[:MPC_N + 1], self.mpc_solution.psi)\n', (8081, 8136), False, 'from common.numpy_fast import interp\n'), ((9153, 9169), 'common.realtime.sec_since_boot', 'sec_since_boot', ([], {}), '()\n', (9167, 9169), False, 'from common.realtime import sec_since_boot, DT_MDL\n'), ((9753, 9789), 'cereal.messaging.new_message', 'messaging.new_message', (['"""lateralPlan"""'], {}), "('lateralPlan')\n", (9774, 9789), True, 'import cereal.messaging as messaging\n'), ((3651, 3713), 'numpy.column_stack', 'np.column_stack', (['[md.position.x, md.position.y, md.position.z]'], {}), '([md.position.x, md.position.y, md.position.z])\n', (3666, 3713), True, 'import numpy as np\n'), ((3734, 3757), 'numpy.array', 'np.array', (['md.position.t'], {}), '(md.position.t)\n', (3742, 3757), True, 'import numpy as np\n'), ((7084, 7118), 'numpy.linalg.norm', 'np.linalg.norm', (['d_path_xyz'], {'axis': '(1)'}), '(d_path_xyz, axis=1)\n', (7098, 7118), True, 'import numpy as np\n'), ((7196, 7233), 'numpy.linalg.norm', 'np.linalg.norm', (['self.path_xyz'], {'axis': '(1)'}), '(self.path_xyz, axis=1)\n', (7210, 7233), True, 'import numpy as np\n'), ((10806, 10838), 'cereal.messaging.new_message', 'messaging.new_message', (['"""liveMpc"""'], {}), "('liveMpc')\n", (10827, 10838), True, 'import cereal.messaging as messaging\n'), ((3393, 3465), 'math.radians', 'math.radians', (['(steering_wheel_angle_deg - steering_wheel_angle_offset_deg)'], {}), '(steering_wheel_angle_deg - steering_wheel_angle_offset_deg)\n', (3405, 3465), False, 'import math\n'), ((9093, 9106), 'math.isnan', 'math.isnan', (['x'], {}), '(x)\n', (9103, 9106), False, 'import math\n'), ((9404, 9447), 'selfdrive.swaglog.cloudlog.warning', 'cloudlog.warning', (['"""Lateral mpc - nan: True"""'], {}), "('Lateral mpc - nan: True')\n", (9420, 9447), False, 'from selfdrive.swaglog import cloudlog\n'), ((1930, 1938), 'common.params.Params', 'Params', ([], {}), '()\n', (1936, 1938), False, 'from common.params import Params\n'), ((8969, 9013), 'math.degrees', 'math.degrees', (['(desired_curvature_rate * VM.sR)'], {}), '(desired_curvature_rate * VM.sR)\n', (8981, 9013), False, 'import math\n'), ((8821, 8860), 'math.degrees', 'math.degrees', (['(curvature_desired * VM.sR)'], {}), '(curvature_desired * VM.sR)\n', (8833, 8860), False, 'import math\n')] |
import numpy as np
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
N = X.shape[0]
C = W.shape[1]
for i in range(N):
scores = X[i].dot(W)
# 直接使用会过大,进行标准化
stable_scores = scores - np.max(scores)
stable_scores = np.exp(stable_scores)
correct_scores = stable_scores[y[i]]
loss_i = -np.log(correct_scores / np.sum(stable_scores))
loss += loss_i
# 计算梯度
dScores = np.zeros(scores.shape)
dScores = stable_scores / np.sum(stable_scores)
dScores[y[i]] -= 1
dW += X[i][:, np.newaxis].dot(dScores[np.newaxis, :]) # 两个一维向量相乘成一个矩阵,所以这么写
loss = loss / N + reg * np.sum(W * W)
dW = dW / N + 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
N = X.shape[0]
C = W.shape[1]
scores = X.dot(W)
stable_scores = scores - np.max(scores, axis=1, keepdims=True)
stable_scores = np.exp(stable_scores)
loss = np.sum(
-np.log(stable_scores[np.arange(N), y] / np.sum(stable_scores, axis=1))) # 这里np.sum不能keep_dims,否则广播会出错
loss = loss / N + reg * np.sum(W * W)
# 计算梯度
dScores = stable_scores / np.sum(stable_scores, axis=1, keepdims=True)
dScores[np.arange(N), y] -= 1
dScores /= N
dW = X.T.dot(dScores)
dW = dW + 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.zeros",
"numpy.max",
"numpy.arange",
"numpy.exp"
] | [((743, 759), 'numpy.zeros_like', 'np.zeros_like', (['W'], {}), '(W)\n', (756, 759), True, 'import numpy as np\n'), ((2281, 2297), 'numpy.zeros_like', 'np.zeros_like', (['W'], {}), '(W)\n', (2294, 2297), True, 'import numpy as np\n'), ((3000, 3021), 'numpy.exp', 'np.exp', (['stable_scores'], {}), '(stable_scores)\n', (3006, 3021), True, 'import numpy as np\n'), ((1501, 1522), 'numpy.exp', 'np.exp', (['stable_scores'], {}), '(stable_scores)\n', (1507, 1522), True, 'import numpy as np\n'), ((1689, 1711), 'numpy.zeros', 'np.zeros', (['scores.shape'], {}), '(scores.shape)\n', (1697, 1711), True, 'import numpy as np\n'), ((2942, 2979), 'numpy.max', 'np.max', (['scores'], {'axis': '(1)', 'keepdims': '(True)'}), '(scores, axis=1, keepdims=True)\n', (2948, 2979), True, 'import numpy as np\n'), ((3236, 3280), 'numpy.sum', 'np.sum', (['stable_scores'], {'axis': '(1)', 'keepdims': '(True)'}), '(stable_scores, axis=1, keepdims=True)\n', (3242, 3280), True, 'import numpy as np\n'), ((1462, 1476), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (1468, 1476), True, 'import numpy as np\n'), ((1746, 1767), 'numpy.sum', 'np.sum', (['stable_scores'], {}), '(stable_scores)\n', (1752, 1767), True, 'import numpy as np\n'), ((1909, 1922), 'numpy.sum', 'np.sum', (['(W * W)'], {}), '(W * W)\n', (1915, 1922), True, 'import numpy as np\n'), ((3181, 3194), 'numpy.sum', 'np.sum', (['(W * W)'], {}), '(W * W)\n', (3187, 3194), True, 'import numpy as np\n'), ((3293, 3305), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3302, 3305), True, 'import numpy as np\n'), ((1610, 1631), 'numpy.sum', 'np.sum', (['stable_scores'], {}), '(stable_scores)\n', (1616, 1631), True, 'import numpy as np\n'), ((3090, 3119), 'numpy.sum', 'np.sum', (['stable_scores'], {'axis': '(1)'}), '(stable_scores, axis=1)\n', (3096, 3119), True, 'import numpy as np\n'), ((3071, 3083), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3080, 3083), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
path = '/Users/mac/Downloads/frametest/csv/Coming.Home/'
df = pd.read_csv(path + 'palette_array_cr.csv')
percentage = np.array(df.loc[:, 'percentage1'].fillna(1.))
c = df.hex1.fillna('#000000')
# percentage = np.array(df.loc[:, 'percentage1'])
# c = df.hex1
fig, ax = plt.subplots(subplot_kw=dict(polar=True))
fig.set_size_inches(10, 10, forward=True)
size = 0.3
vals = np.arange(1, len(df)+1)
# normalize vals to 2 pi
valsnorm = vals/np.sum(vals)*2*np.pi
# obtain the ordinates of the bar edges
valsleft = np.cumsum(np.append(0, valsnorm.flatten()[:-1])).reshape(vals.shape)
ax.bar(x=valsleft.flatten(),
width=valsnorm.flatten(), bottom=1-2.75*size, height=size*percentage,
color=c, edgecolor='w', linewidth=0, align="edge")
max_per = max(percentage)
max_height = size * max_per + 1 - 2.75*size
plt.ylim((0, 0.475))
plt.xticks([0], ('00:00',))
plt.yticks([0.475, ], ('100%',))
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
# ax.set(title="")
# ax.set_axis_off()
# plt.show()
plt.savefig(path + 'dominant_bar_plot_1st.png')
| [
"numpy.sum",
"matplotlib.pyplot.ylim",
"pandas.read_csv",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig"
] | [((135, 177), 'pandas.read_csv', 'pd.read_csv', (["(path + 'palette_array_cr.csv')"], {}), "(path + 'palette_array_cr.csv')\n", (146, 177), True, 'import pandas as pd\n'), ((890, 910), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 0.475)'], {}), '((0, 0.475))\n', (898, 910), True, 'import matplotlib.pyplot as plt\n'), ((911, 938), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0]', "('00:00',)"], {}), "([0], ('00:00',))\n", (921, 938), True, 'import matplotlib.pyplot as plt\n'), ((939, 969), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.475]', "('100%',)"], {}), "([0.475], ('100%',))\n", (949, 969), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1132), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + 'dominant_bar_plot_1st.png')"], {}), "(path + 'dominant_bar_plot_1st.png')\n", (1096, 1132), True, 'import matplotlib.pyplot as plt\n'), ((512, 524), 'numpy.sum', 'np.sum', (['vals'], {}), '(vals)\n', (518, 524), True, 'import numpy as np\n')] |
# coding: utf-8
"""
Defines the DEQATN class and sub-functions.
The capitalization of the sub-functions is important.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from numpy import (
cos, sin, tan, log, log10, mean, exp, sqrt, square, mod, abs, sum,
arcsin as asin, arccos as acos, arctan as atan, arctan2 as atan2,
arcsinh as asinh, arccosh as acosh, arctanh as atanh)
# atan2h
from numpy.linalg import norm # type: ignore
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.cards.deqatn import lines_to_eqs
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
def pi(num):
"""weird way to multiply p by a number"""
return np.pi * num
def rss(*args): # good
"""2-norm; generalized magnitude of vector for N components"""
return norm(args)
def avg(*args):
"""average"""
return np.mean(args)
def ssq(*args):
"""sum of squares"""
return np.square(args).sum()
def logx(x, y):
"""log base_x(y)"""
return np.log(y**x) / np.log(x)
def dim(x, y):
"""positive difference"""
return x - min(x, y)
def db(p, pref):
"""sound pressure in decibels"""
return 20. * np.log(p / pref)
def atan2h(x, y):
raise NotImplementedError()
def invdb(dbi, pref):
"""inverse Db"""
return 10. ** (dbi / 20. + log(pref))
def dba(p, pref, f):
"""
sound pressure in decibels (perceived)
Parameters
----------
p : float
structural responses or acoustic pressure
f : float
forcing frequency
pref : float
reference pressure
Returns
-------
dbi : float
acoustic pressure in Decibels
"""
ta1, ta2 = _get_ta(f)
return 20. * np.log(p / pref) + 10 * log(ta1) + 10. * log(ta2)
def invdba(dbai, pref, f):
"""
Inverse Dba
Parameters
----------
dbai : float
acoustic pressure in Decibels (perceived)
f : float
forcing frequency
pref : float
reference pressure
Returns
-------
p : float
structural responses or acoustic pressure
"""
ta1, ta2 = _get_ta(f)
#dbai = dba(p, pref, f)
return 10. ** ((dbai - 10. * log(ta1) - 10. * log(ta2))/20)
def _get_ta(f):
"""gets the factors for dba, invdba"""
k1 = 2.242882e16
k3 = 1.562339
p1 = 20.598997
p2 = 107.65265
p3 = 737.86223
p4 = 12194.22
ta1 = k3 * f**4 / ((f**2 + p2**2) * (f**2 + p3**2))
ta2 = k1 * f**4 / ((f**2 + p1**2)**2 * (f**2 + p4**2)**2)
return ta1, ta2
class DEQATN(BaseCard): # needs work...
"""
Design Equation Definition
Defines one or more equations for use in design sensitivity analysis.
+--------+------+-----+-----+-----+-----+-------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+========+======+=====+=====+=====+=====+=======+=====+
| DEQATN | EQID | EQUATION |
+--------+------+-------------------------------------+
| | EQUATION (cont.) |
+--------+--------------------------------------------+
"""
type = 'DEQATN'
def __init__(self, equation_id, eqs, comment=''):
"""
Creates a DEQATN card
Parameters
----------
equation_id : int
the id of the equation
eqs : List[str]
the equations, which may overbound the field
split them by a semicolon (;)
comment : str; default=''
a comment for the card
DEQATN 41 F1(A,B,C,D,R) = A+B *C–(D**3 + 10.0) + sin(PI(1) * R)
+ A**2 / (B - C); F = A + B - F1 * D
def F1(A, B, C, D, R):
F1 = A+B *C-(D**3 + 10.0) + sin(PI(1) * R) + A**2 / (B – C)
F = A + B - F1 * D
return F
eqs = [
'F1(A,B,C,D,R) = A+B *C–(D**3 + 10.0) + sin(PI(1) * R) + A**2 / (B – C)',
'F = A + B – F1 * D',
]
>>> deqatn = DEQATN(41, eq, comment='')
"""
if comment:
self.comment = comment
self.model = None
#self.dtable = None
self.func = None
#self.name = name
self.equation_id = equation_id
self.eqs = eqs
self.func_str = ''
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DEQATN card from ``BDF.add_card(...)``
Parameters
----------
card : List[str]
this card is special and is not a ``BDFCard`` like other cards
comment : str; default=''
a comment for the card
"""
#print(card)
line0 = card[0]
if '\t' in line0:
line0 = line0.expandtabs()
name_eqid = line0[:16]
#print('name_eqid = %r' % name_eqid)
assert ',' not in name_eqid, name_eqid
try:
name, eq_id = name_eqid.split()
assert name.strip().upper() == 'DEQATN', card
except ValueError:
msg = 'cannot split %r\n' % name_eqid
msg += "Expected data of the form 'DEQATN 100'\n"
msg += 'card=%s' % card
raise ValueError(msg)
equation_id = int(eq_id)
# combine the equations into a single organized block
line0_eq = line0[16:]
eqs_temp = [line0_eq] + card[1:]
eqs = lines_to_eqs(eqs_temp)
return DEQATN(equation_id, eqs, comment=comment)
def _setup_equation(self):
"""
creates an executable equation object from self.eqs
x = 10.
>>> deqatn.func(x)
42.0
>>> deqatn.func_str
def stress(x):
x = float(x)
return x + 32.
"""
default_values = {}
dtable_ref = self.model.dtable
if dtable_ref is not None:
default_values = dtable_ref.default_values
func_name, nargs, func_str = fortran_to_python(
self.eqs, default_values, str(self))
self.func_str = func_str
self.func_name = func_name
exec(func_str)
#print(locals().keys())
func = locals()[func_name]
setattr(self, func_name, func)
#print(func)
self.func = func
self.nargs = nargs
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
self.model = model
# TODO: get defaults from DTABLE
# TODO: get limits from DCONSTR
#self.dtable = model.dtable
#self.dtable_ref = self.dtable
self._setup_equation()
def uncross_reference(self) -> None:
del self.model
del self.func
del self.f
# del getattr(self, self.func_name)
del self.func_name
del self.nargs
#del self.dtable
def evaluate(self, *args):
"""Makes a call to self.func"""
#args2 = args[:self.nargs]
#print('args =', args2)
if len(args) > self.nargs:
msg = 'len(args) > nargs\n'
msg += 'nargs=%s len(args)=%s; func_name=%s' % (
self.nargs, len(args), self.func_name)
raise RuntimeError(msg)
return self.func(*args)
#self.func(*args)
def raw_fields(self):
return [self.write_card()]
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
#self.evaluate(1, 2)
eqs = split_equations(self.eqs)
equation_line0 = eqs[0]
#assert len(equation_line0) <= 56, equation_line0
msg = 'DEQATN %-8i%-56s\n' % (self.equation_id, equation_line0)
assert len(equation_line0) <= 56, equation_line0
for eq in eqs[1:]:
msg += ' %-64s\n' % eq
assert len(eq) <= 64, eq
#print(msg)
return msg
def split_equations(lines):
"""takes an overbounded DEQATN card and shortens it"""
# first line must be < 56
# second line may be < 64
lines2 = []
for i, line in enumerate(lines):
#print('-------------------------')
# we'll add ; to the end of each line
if i == 0:
lines2 += _split_equation([], line.strip() + ';', 56)
else:
lines2 += _split_equation([], line.strip() + ';', 64)
# remove the trailing semicolon
lines2[-1] = lines2[-1][:-1]
return lines2
def _split_equation(lines_out, line, n, isplit=0):
"""
Takes an overbounded DEQATN line and shortens it using recursion
Parameters
----------
lines_out : List[str]
len(lines) = 0 : first iteration
len(lines) = 1 : second iteration
line : str
the line to split
n : int
the maximum number of characters allowed
the first line of the DEQATN has a different number of fields
allowed vs. subsequent lines
isplit : int; default=0
the number of levels deep in the recursive function we are
Returns
-------
lines_out : List[str]
the long line broken into shorter lines
"""
#print('n=%s -> line=%r len=%s' % (n, line, len(line)))
if len(line) <= n:
lines_out.append(line.strip())
return lines_out
# equation must be split
line0 = line[:n][::-1].replace('**', '^')
# fore, aft = line0.split('+-()*', 1)
#print('line0 = %r; len=%s' % (str(line0[::-1]), len(line0)))
out = {}
for operator in ('+', '*', '^', '-', ')', ',', '='):
if operator in line0:
i = line0.index(operator)
out[i] = operator
try:
imin = min(out)
except ValueError:
msg = "Couldn't find an operator ()+-/*= in %r\n" % line[n:]
msg += 'line = %r' % line
raise ValueError(msg)
operator = out[imin]
#print('operator = %r' % operator)
fore, aft = line0.split(operator, 1)
i = len(aft) + 1
line_out = line[:i]
#print('appending %r; len=%s' % (line_out, len(line_out)))
#print('fore = %r' % fore[::-1])
#print('aft = %r' % aft[::-1])
lines_out.append(line_out.replace('^', '**').strip())
isplit += 1
if isplit > 10:
raise RuntimeError()
lines_out = _split_equation(lines_out, line[i:], n, isplit+1)
return lines_out
def fortran_to_python_short(line, default_values):
"""the function used by the DRESP2"""
func_str = 'def func(args):\n'
func_str += ' return %s(args)\n' % line.strip()
d = {}
exec(func_str, globals(), d)
return d['func']
def fortran_to_python(lines, default_values, comment=''):
"""
Creates the python function
Parameters
----------
lines : List[str]
the equations to write broken up by statement
default_values : dict[name] = value
the default values from the DTABLE card
def f(x, y=10.):
'''
$ deqatn
DEQATN 1000 f(x,y) = x+y
'''
try:
if isinstance(x, (int, float, str)):
x = float(x)
if isinstance(y, (int, float, str)):
y = float(y)
except Exception:
print(locals())
raise
f = x + y
return f
"""
msg = ''
variables = []
assert len(lines) > 0, lines
for i, line in enumerate(lines):
#print('--------------------')
line = line.lower()
try:
# f(x, y) = 10.
# f(x, y) = abs(x) + y
# f = 42.
f, eq = line.split('=')
except Exception:
if '=' not in line:
raise SyntaxError('= not found in %r' % (line))
else:
msg = 'only 1 = sign may be found a line\n'
msg += 'line = %r\n' % line
if len(lines) > 1:
msg += 'lines:\n%s' % '\n'.join(lines)
raise SyntaxError(msg)
f = f.strip()
eq = eq.strip().rstrip(';')
#print('f=%r eq=%r' % (f, eq))
if i == 0:
func_name, f, msg, out, variables = write_function_header(
f, eq, default_values, comment)
#print(msg)
else:
out = f
msg += ' %s = %s\n' % (out, eq)
msg += ' return %s' % f
#print(msg)
nargs = len(variables)
return func_name, nargs, msg
def write_function_header(f, eq, default_values, comment=''):
"""
initializes the python function
def f(x, y=10.):
'''
$ deqatn
DEQATN 1000 f(x,y) = x+y
'''
try:
if isinstance(x, (int, float, str)):
x = float(x)
if isinstance(y, (int, float, str)):
y = float(y)
except Exception:
print(locals())
raise
Parameters
----------
f : str
the function header
f(a, b, c)
eq : str
the value on the other side of the equals sign (f=eq)
1.
max(a, b, c)
default_values : dict[name] = value
the default values from the DTABLE card
Returns
-------
func_name : str
the name of the function ``f``
msg : str
see above
variables : List[str]
the variables used by the equation header
a, b, c
"""
msg = ''
out = ''
try:
float(eq)
is_float = True
except ValueError:
is_float = False
if is_float:
#print('float', eq)
func_name, arguments = f.strip('(,)').split('(')
func_name = func_name.strip(' ')
variables = arguments.split(',')
#print('func_name=%r' % func_name)
#val = float(eq)
msg += _write_function_line(func_name, variables, default_values)
msg += _write_comment(comment)
msg += _write_variables(variables)
msg += ' %s = %s\n' % (func_name, eq)
else:
#print('not float', eq)
#print(eq)
#asdf
func_name, arguments = f.strip('(,)').split('(')
func_name = func_name.strip(' ')
variables = arguments.split(',')
#msg += 'def %s:\n' % f
msg += _write_function_line(func_name, variables, default_values)
msg += _write_comment(comment)
msg += _write_variables(variables)
#for var in variables:
#msg += ' %s = float(%s)\n' % (var, var)
#print(msg)
#is_eq_defined = True
#print('out = %r' % out)
#print('func_name = %r' % func_name)
#print('eq = %r' % eq)
#out += eq
msg += ' %s = %s\n' % (func_name, eq)
#f = eq
return func_name, f, msg, out, variables
def _write_function_line(func_name, variables, default_values):
"""writes the ``def f(x, y, z=1.):`` part of the function"""
vals = []
is_default = False
#print('default_values = %s' % default_values)
for var in variables:
if var in default_values:
vals.append('%s=%s' % (var, default_values[var]))
is_default = True
else:
vals.append('%s' % (var))
if is_default:
msg = 'default variables must be set at the end of the function\n'
msg += 'variables = %s\n' % variables
msg += 'default_values = %s' % default_values
raise RuntimeError(msg)
vals2 = ', '.join(vals)
msg = 'def %s(%s):\n' % (func_name, vals2)
return msg
def _write_comment(comment):
"""writes the deqatn to the comment block"""
lines = comment.split('\n')
msgi = '\n '.join(lines)
msg = ' """\n %s"""\n' % msgi
return msg
def _write_variables(variables):
"""type checks the inputs"""
msg = ' try:\n'
for var in variables:
#msg += " assert isinstance(%s, float), '%s is not a float; type(%s)=%s' % (%s)")
#msg += ' %s = float(%s)\n' % (var, var)
msg += ' if isinstance(%s, (int, float, str)):\n' % var
msg += ' %s = float(%s)\n' % (var, var)
msg += ' except Exception:\n'
msg += ' print(locals())\n'
msg += ' raise\n'
return msg
| [
"numpy.log",
"numpy.square",
"pyNastran.bdf.cards.deqatn.lines_to_eqs",
"numpy.mean",
"numpy.linalg.norm"
] | [((851, 861), 'numpy.linalg.norm', 'norm', (['args'], {}), '(args)\n', (855, 861), False, 'from numpy.linalg import norm\n'), ((908, 921), 'numpy.mean', 'np.mean', (['args'], {}), '(args)\n', (915, 921), True, 'import numpy as np\n'), ((1049, 1063), 'numpy.log', 'np.log', (['(y ** x)'], {}), '(y ** x)\n', (1055, 1063), True, 'import numpy as np\n'), ((1064, 1073), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1070, 1073), True, 'import numpy as np\n'), ((1217, 1233), 'numpy.log', 'np.log', (['(p / pref)'], {}), '(p / pref)\n', (1223, 1233), True, 'import numpy as np\n'), ((5397, 5419), 'pyNastran.bdf.cards.deqatn.lines_to_eqs', 'lines_to_eqs', (['eqs_temp'], {}), '(eqs_temp)\n', (5409, 5419), False, 'from pyNastran.bdf.cards.deqatn import lines_to_eqs\n'), ((975, 990), 'numpy.square', 'np.square', (['args'], {}), '(args)\n', (984, 990), True, 'import numpy as np\n'), ((1360, 1369), 'numpy.log', 'log', (['pref'], {}), '(pref)\n', (1363, 1369), False, 'from numpy import cos, sin, tan, log, log10, mean, exp, sqrt, square, mod, abs, sum, arcsin as asin, arccos as acos, arctan as atan, arctan2 as atan2, arcsinh as asinh, arccosh as acosh, arctanh as atanh\n'), ((1794, 1802), 'numpy.log', 'log', (['ta2'], {}), '(ta2)\n', (1797, 1802), False, 'from numpy import cos, sin, tan, log, log10, mean, exp, sqrt, square, mod, abs, sum, arcsin as asin, arccos as acos, arctan as atan, arctan2 as atan2, arcsinh as asinh, arccosh as acosh, arctanh as atanh\n'), ((1753, 1769), 'numpy.log', 'np.log', (['(p / pref)'], {}), '(p / pref)\n', (1759, 1769), True, 'import numpy as np\n'), ((1777, 1785), 'numpy.log', 'log', (['ta1'], {}), '(ta1)\n', (1780, 1785), False, 'from numpy import cos, sin, tan, log, log10, mean, exp, sqrt, square, mod, abs, sum, arcsin as asin, arccos as acos, arctan as atan, arctan2 as atan2, arcsinh as asinh, arccosh as acosh, arctanh as atanh\n'), ((2238, 2246), 'numpy.log', 'log', (['ta2'], {}), '(ta2)\n', (2241, 2246), False, 'from numpy import cos, sin, tan, log, log10, mean, exp, sqrt, square, mod, abs, sum, arcsin as asin, arccos as acos, arctan as atan, arctan2 as atan2, arcsinh as asinh, arccosh as acosh, arctanh as atanh\n'), ((2221, 2229), 'numpy.log', 'log', (['ta1'], {}), '(ta1)\n', (2224, 2229), False, 'from numpy import cos, sin, tan, log, log10, mean, exp, sqrt, square, mod, abs, sum, arcsin as asin, arccos as acos, arctan as atan, arctan2 as atan2, arcsinh as asinh, arccosh as acosh, arctanh as atanh\n')] |
import matplotlib.pyplot as plt
import numpy as np
y = np.array([35, 25, 25, 15])
mylabels = ["Apples 35%", "Bananas 25%", "Cherries 25%", "Dates 15%"]
mycolors = ["black", "hotpink", "b", "#4CAF50"]
plt.pie(y, labels = mylabels, colors = mycolors)
plt.show()
| [
"matplotlib.pyplot.pie",
"numpy.array",
"matplotlib.pyplot.show"
] | [((56, 82), 'numpy.array', 'np.array', (['[35, 25, 25, 15]'], {}), '([35, 25, 25, 15])\n', (64, 82), True, 'import numpy as np\n'), ((202, 246), 'matplotlib.pyplot.pie', 'plt.pie', (['y'], {'labels': 'mylabels', 'colors': 'mycolors'}), '(y, labels=mylabels, colors=mycolors)\n', (209, 246), True, 'import matplotlib.pyplot as plt\n'), ((251, 261), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (259, 261), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
"""
GUI Frame for XRF display, reading larch MCA group
"""
import sys
import os
import time
import copy
from functools import partial
import wx
import wx.lib.mixins.inspection
import wx.lib.scrolledpanel as scrolled
import wx.dataview as dv
import wx.lib.colourselect as csel
try:
from wx._core import PyDeadObjectError
except:
PyDeadObjectError = Exception
import numpy as np
import matplotlib
from matplotlib.ticker import LogFormatter, FuncFormatter
from wxmplot import PlotPanel
from wxutils import (SimpleText, EditableListBox, Font, pack, Popup,
get_icon, SetTip, Button, Check, MenuItem, Choice,
FileOpen, FileSave, fix_filename, HLine, GridPanel,
CEN, LEFT, RIGHT)
from ..math import index_of
from ..utils import bytes2str, debugtime
from ..io import GSEMCA_File
from ..site_config import icondir
from ..interpreter import Interpreter
from .larchframe import LarchFrame
from .periodictable import PeriodicTablePanel
from .xrfdisplay_utils import (XRFCalibrationFrame, ColorsFrame,
XrayLinesFrame, XRFDisplayConfig, XRFGROUP,
MAKE_XRFGROUP_CMD, next_mcaname)
from .xrfdisplay_fitpeaks import FitSpectraFrame
FILE_WILDCARDS = "MCA File (*.mca)|*.mca|All files (*.*)|*.*"
FILE_ALREADY_READ = """The File
'%s'
has already been read.
"""
ICON_FILE = 'ptable.ico'
read_mcafile = "# {group:s}.{name:s} = read_gsemca('{filename:s}')"
def txt(panel, label, size=75, colour=None, font=None, style=None):
if style is None:
style = wx.ALIGN_LEFT|wx.ALL|wx.GROW
if colour is None:
colour = wx.Colour(0, 0, 50)
this = SimpleText(panel, label, size=(size, -1),
colour=colour, style=style)
if font is not None: this.SetFont(font)
return this
def lin(panel, len=30, wid=2, style=wx.LI_HORIZONTAL):
return wx.StaticLine(panel, size=(len, wid), style=style)
class XRFDisplayFrame(wx.Frame):
_about = """XRF Spectral Viewer
<NAME> <<EMAIL>>
"""
main_title = 'XRF Display'
def __init__(self, _larch=None, parent=None, filename=None,
size=(725, 450), axissize=None, axisbg=None,
title='XRF Display', exit_callback=None,
output_title='XRF', **kws):
if size is None: size = (725, 450)
wx.Frame.__init__(self, parent=parent,
title=title, size=size, **kws)
self.conf = XRFDisplayConfig()
self.subframes = {}
self.data = None
self.title = title
self.plotframe = None
self.wids = {}
self.larch = _larch
if isinstance(self.larch, Interpreter): # called from shell
self.larch_buffer = None
else:
self.larch_buffer = parent
if not isinstance(parent, LarchFrame):
self.larch_buffer = LarchFrame(_larch=self.larch,
is_standalone=False)
self.larch_buffer.Show()
self.larch_buffer.Raise()
self.larch_buffer.Hide()
self.subframes['larchframe'] = self.larch_buffer
self.larch = self.larch_buffer.larchshell
self.init_larch()
self.exit_callback = exit_callback
self.roi_patch = None
self.selected_roi = None
self.roilist_sel = None
self.selected_elem = None
self.mca = None
self.mca2 = None
self.xdata = np.arange(2048)*0.01
self.ydata = np.ones(2048)*1.e-4
self.x2data = None
self.y2data = None
self.rois_shown = False
self.mca_index = 0
self.major_markers = []
self.minor_markers = []
self.hold_markers = []
self.hold_lines = None
self.saved_lines = None
self.energy_for_zoom = None
self.xview_range = None
self.show_yaxis = False
self.xmarker_left = None
self.xmarker_right = None
self.highlight_xrayline = None
self.highlight_xrayline = None
self.cursor_markers = [None, None]
self.ylog_scale = True
self.SetTitle("%s: %s " % (self.main_title, title))
self._menus = []
self.createMainPanel()
self.createMenus()
self.SetFont(Font(9, serif=True))
self.statusbar = self.CreateStatusBar(4)
self.statusbar.SetStatusWidths([-5, -3, -3, -4])
statusbar_fields = ["XRF Display", " ", " ", " "]
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
if filename is not None:
self.add_mca(GSEMCA_File(filename), filename=filename, plot=True)
def ignoreEvent(self, event=None):
pass
def on_cursor(self, event=None, side='left'):
if event is None:
return
x, y = event.xdata, event.ydata
if len(self.panel.fig.axes) > 1:
try:
x, y = self.panel.axes.transData.inverted().transform((event.x, event.y))
except:
pass
ix = x
if self.mca is not None:
try:
ix = index_of(self.mca.energy, x)
except TypeError:
pass
if side == 'right':
self.xmarker_right = ix
elif side == 'left':
self.xmarker_left = ix
if self.xmarker_left is not None and self.xmarker_right is not None:
ix1, ix2 = self.xmarker_left, self.xmarker_right
self.xmarker_left = min(ix1, ix2)
self.xmarker_right = max(ix1, ix2)
if side == 'left':
self.energy_for_zoom = self.mca.energy[ix]
self.update_status()
self.draw()
def clear_lines(self, evt=None):
"remove all Line Markers"
for m in self.major_markers + self.minor_markers + self.hold_markers:
try:
m.remove()
except:
pass
if self.highlight_xrayline is not None:
try:
self.highlight_xrayline.remove()
except:
pass
self.highlight_xrayline = None
self.major_markers = []
self.minor_markers = []
self.hold_markers = []
self.draw()
def draw(self):
try:
self.panel.canvas.draw()
except:
pass
def clear_markers(self, evt=None):
"remove all Cursor Markers"
for m in self.cursor_markers:
if m is not None:
m.remove()
self.cursor_markers = [None, None]
self.xmarker_left = None
self.xmarker_right = None
self.draw()
def clear_background(self, evt=None):
"remove XRF background"
self.mca2 = None
self.plotmca(self.mca)
def update_status(self):
fmt = "{:s}:{:}, E={:.3f}, Cts={:,.0f}".format
if (self.xmarker_left is None and
self.xmarker_right is None and
self.selected_roi is None):
return
log = np.log10
axes= self.panel.axes
def draw_ymarker_range(idx, x, y):
ymin, ymax = self.panel.axes.get_ylim()
y1 = (y-ymin)/(ymax-ymin+0.0002)
if y < 1.0: y = 1.0
if self.ylog_scale:
y1 = (log(y)-log(ymin))/(log(ymax)-log(ymin)+2.e-9)
if y1 < 0.0: y1 = 0.0
y2 = min(y1+0.25, y1*0.1 + 0.9)
if self.cursor_markers[idx] is not None:
try:
self.cursor_markers[idx].remove()
except:
pass
self.cursor_markers[idx] = axes.axvline(x, y1, y2, linewidth=2.5,
color=self.conf.marker_color)
if self.xmarker_left is not None:
ix = self.xmarker_left
x, y = self.xdata[ix], self.ydata[ix]
draw_ymarker_range(0, x, y)
self.write_message(fmt("L", ix, x, y), panel=1)
if self.xmarker_right is not None:
ix = self.xmarker_right
x, y = self.xdata[ix], self.ydata[ix]
draw_ymarker_range(1, x, y)
self.write_message(fmt("R", ix, x, y), panel=2)
if self.mca is None:
return
if (self.xmarker_left is not None and
self.xmarker_right is not None):
self.ShowROIStatus(self.xmarker_left,
self.xmarker_right,
name='', panel=3)
if self.selected_roi is not None:
roi = self.selected_roi
left, right = roi.left, roi.right
self.ShowROIStatus(left, right, name=roi.name, panel=0)
self.ShowROIPatch(left, right)
def createPlotPanel(self):
"""mca plot window"""
pan = PlotPanel(self, fontsize=7,
axisbg='#FEFEFE',
# axissize=[0.01, 0.11, 0.97, 0.87],
with_data_process=False,
output_title='test.xrf',
messenger=self.write_message)
pan.conf.grid_color='#E5E5C0'
pan.conf.show_grid = False
pan.conf.canvas.figure.set_facecolor('#FCFCFE')
pan.conf.labelfont.set_size(6)
pan.conf.labelfont.set_size(6)
pan.onRightDown= partial(self.on_cursor, side='right')
pan.add_cursor_mode('zoom', motion = self.ignoreEvent,
leftup = self.ignoreEvent,
leftdown = self.on_cursor,
rightdown = partial(self.on_cursor, side='right'))
return pan
def createControlPanel(self):
ctrlpanel = wx.Panel(self, name='Ctrl Panel')
ptable = PeriodicTablePanel(ctrlpanel, onselect=self.onShowLines,
tooltip_msg='Select Element for KLM Lines',
fontsize=9)
self.wids['ptable'] = ptable
labstyle = wx.ALIGN_LEFT|wx.ALIGN_BOTTOM|wx.EXPAND
ctrlstyle = wx.ALIGN_LEFT|wx.ALIGN_BOTTOM
txtstyle=wx.ALIGN_LEFT|wx.ST_NO_AUTORESIZE|wx.TE_PROCESS_ENTER
Font9 = Font(9)
Font10 = Font(10)
Font11 = Font(11)
#
arrowpanel = wx.Panel(ctrlpanel)
ssizer = wx.BoxSizer(wx.HORIZONTAL)
for wname, dname in (('uparrow', 'up'),
('leftarrow', 'left'),
('rightarrow', 'right'),
('downarrow', 'down')):
self.wids[wname] = wx.BitmapButton(arrowpanel, -1,
get_icon(wname),
style=wx.NO_BORDER)
self.wids[wname].Bind(wx.EVT_BUTTON,
partial(ptable.onKey, name=dname))
ssizer.Add(self.wids[wname], 0, wx.EXPAND|wx.ALL)
self.wids['holdbtn'] = wx.ToggleButton(arrowpanel, -1, 'Hold ',
size=(85, -1))
self.wids['holdbtn'].Bind(wx.EVT_TOGGLEBUTTON, self.onToggleHold)
self.wids['kseries'] = Check(arrowpanel, ' K ', action=self.onKLM)
self.wids['lseries'] = Check(arrowpanel, ' L ', action=self.onKLM)
self.wids['mseries'] = Check(arrowpanel, ' M ', action=self.onKLM)
ssizer.Add(self.wids['holdbtn'], 0, wx.EXPAND|wx.ALL, 2)
ssizer.Add(self.wids['kseries'], 0, wx.EXPAND|wx.ALL, 0)
ssizer.Add(self.wids['lseries'], 0, wx.EXPAND|wx.ALL, 0)
ssizer.Add(self.wids['mseries'], 0, wx.EXPAND|wx.ALL, 0)
pack(arrowpanel, ssizer)
# roi section...
rsizer = wx.GridBagSizer(4, 6)
roipanel = wx.Panel(ctrlpanel, name='ROI Panel')
self.wids['roilist'] = wx.ListBox(roipanel, size=(140, 150))
self.wids['roilist'].Bind(wx.EVT_LISTBOX, self.onROI)
self.wids['roilist'].SetMinSize((140, 150))
self.wids['roiname'] = wx.TextCtrl(roipanel, -1, '', size=(150, -1))
#
roibtns= wx.Panel(roipanel, name='ROIButtons')
zsizer = wx.BoxSizer(wx.HORIZONTAL)
z1 = Button(roibtns, 'Add', size=(70, 30), action=self.onNewROI)
z2 = Button(roibtns, 'Delete', size=(70, 30), action=self.onConfirmDelROI)
z3 = Button(roibtns, 'Rename', size=(70, 30), action=self.onRenameROI)
zsizer.Add(z1, 0, wx.EXPAND|wx.ALL, 0)
zsizer.Add(z2, 0, wx.EXPAND|wx.ALL, 0)
zsizer.Add(z3, 0, wx.EXPAND|wx.ALL, 0)
pack(roibtns, zsizer)
rt1 = txt(roipanel, ' Channels:', size=80, font=Font10)
rt2 = txt(roipanel, ' Energy:', size=80, font=Font10)
rt3 = txt(roipanel, ' Cen, Wid:', size=80, font=Font10)
m = ''
self.wids['roi_msg1'] = txt(roipanel, m, size=135, font=Font10)
self.wids['roi_msg2'] = txt(roipanel, m, size=135, font=Font10)
self.wids['roi_msg3'] = txt(roipanel, m, size=135, font=Font10)
rsizer.Add(txt(roipanel, ' Regions of Interest:', size=125, font=Font11),
(0, 0), (1, 3), labstyle)
rsizer.Add(self.wids['roiname'], (1, 0), (1, 3), labstyle)
rsizer.Add(roibtns, (2, 0), (1, 3), labstyle)
rsizer.Add(rt1, (3, 0), (1, 1), LEFT)
rsizer.Add(rt2, (4, 0), (1, 1), LEFT)
rsizer.Add(rt3, (5, 0), (1, 1), LEFT)
rsizer.Add(self.wids['roi_msg1'], (3, 1), (1, 2), labstyle)
rsizer.Add(self.wids['roi_msg2'], (4, 1), (1, 2), labstyle)
rsizer.Add(self.wids['roi_msg3'], (5, 1), (1, 2), labstyle)
rsizer.Add(self.wids['roilist'], (0, 3), (6, 1),
wx.EXPAND|wx.ALL|wx.ALIGN_RIGHT)
rsizer.SetHGap(1)
pack(roipanel, rsizer)
# end roi section
# y scale
yscalepanel = wx.Panel(ctrlpanel, name='YScalePanel')
ysizer = wx.BoxSizer(wx.HORIZONTAL)
ytitle = txt(yscalepanel, ' Y Axis:', font=Font10, size=80)
yspace = txt(yscalepanel, ' ', font=Font10, size=20)
ylog = Choice(yscalepanel, size=(80, 30), choices=['log', 'linear'],
action=self.onLogLinear)
yaxis = Check(yscalepanel, ' Show Y Scale ', action=self.onYAxis,
default=False)
self.wids['show_yaxis'] = yaxis
ysizer.Add(ytitle, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0)
ysizer.Add(ylog, 0, wx.EXPAND|wx.ALL, 0)
ysizer.Add(yspace, 0, wx.EXPAND|wx.ALL, 0)
ysizer.Add(yaxis, 0, wx.EXPAND|wx.ALL, 0)
pack(yscalepanel, ysizer)
# zoom buttons
zoompanel = wx.Panel(ctrlpanel, name='ZoomPanel')
zsizer = wx.BoxSizer(wx.HORIZONTAL)
z1 = Button(zoompanel, 'Zoom In', size=(80, 30), action=self.onZoomIn)
z2 = Button(zoompanel, 'Zoom Out', size=(80, 30), action=self.onZoomOut)
p1 = Button(zoompanel, 'Pan Lo', size=(75, 30), action=self.onPanLo)
p2 = Button(zoompanel, 'Pan Hi', size=(75, 30), action=self.onPanHi)
zsizer.Add(p1, 0, wx.EXPAND|wx.ALL, 0)
zsizer.Add(p2, 0, wx.EXPAND|wx.ALL, 0)
zsizer.Add(z1, 0, wx.EXPAND|wx.ALL, 0)
zsizer.Add(z2, 0, wx.EXPAND|wx.ALL, 0)
pack(zoompanel, zsizer)
self.wids['xray_lines'] = None
dvstyle = dv.DV_SINGLE|dv.DV_VERT_RULES|dv.DV_ROW_LINES
xlines = dv.DataViewListCtrl(ctrlpanel, style=dvstyle)
self.wids['xray_lines'] = xlines
xlines.AppendTextColumn(' Line ', width=60)
xlines.AppendTextColumn(' Energy(keV) ', width=110)
xlines.AppendTextColumn(' Strength ', width=85)
xlines.AppendTextColumn(' Levels ', width=75)
for col in (0, 1, 2, 3):
this = xlines.Columns[col]
this.Sortable = True
align = RIGHT
if col in (0, 3):
align = wx.ALIGN_LEFT
this.Alignment = this.Renderer.Alignment = align
xlines.SetMinSize((300, 240))
xlines.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED,
self.onSelectXrayLine)
store = xlines.GetStore()
# main layout
# may have to adjust comparison....
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(roipanel, 0, labstyle)
sizer.Add(lin(ctrlpanel, 195), 0, labstyle)
sizer.Add(yscalepanel, 0, wx.ALIGN_RIGHT|wx.EXPAND|wx.ALL)
sizer.Add(zoompanel, 0, wx.ALIGN_RIGHT|wx.EXPAND|wx.ALL)
sizer.Add(lin(ctrlpanel, 195), 0, labstyle)
sizer.Add(ptable, 0, wx.ALIGN_RIGHT|wx.EXPAND|wx.ALL, 4)
sizer.Add(arrowpanel, 0, labstyle)
sizer.Add(lin(ctrlpanel, 195), 0, labstyle)
if self.wids['xray_lines'] is not None:
sizer.Add(xlines, 0, wx.ALIGN_CENTER|wx.GROW|wx.ALL|wx.EXPAND)
pack(ctrlpanel, sizer)
return ctrlpanel
def createMainPanel(self):
ctrlpanel = self.createControlPanel()
plotpanel = self.panel = self.createPlotPanel()
plotpanel.yformatter = self._formaty
tx, ty = self.wids['ptable'].GetBestSize()
cx, cy = ctrlpanel.GetBestSize()
px, py = plotpanel.GetBestSize()
self.SetSize((max(cx, tx)+px, 25+max(cy, py)))
style = wx.ALIGN_LEFT|wx.EXPAND|wx.ALL
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(ctrlpanel, 0, style, 3)
sizer.Add(plotpanel, 1, style, 2)
self.SetMinSize((450, 150))
pack(self, sizer)
self.set_roilist(mca=None)
def init_larch(self):
symtab = self.larch.symtable
if not symtab.has_symbol('_sys.wx.wxapp'):
symtab.set_symbol('_sys.wx.wxapp', wx.GetApp())
if not symtab.has_symbol('_sys.wx.parent'):
symtab.set_symbol('_sys.wx.parent', self)
if not symtab.has_group(XRFGROUP):
self.larch.eval(MAKE_XRFGROUP_CMD)
fico = os.path.join(icondir, ICON_FILE)
try:
self.SetIcon(wx.Icon(fico, wx.BITMAP_TYPE_ICO))
except:
pass
def add_mca(self, mca, filename=None, label=None, as_mca2=False, plot=True):
if as_mca2:
self.mca2 = mca
else:
self.mca2 = self.mca
self.mca = mca
xrfgroup = self.larch.symtable.get_group(XRFGROUP)
mcaname = next_mcaname(self.larch)
if filename is not None:
self.larch.eval(read_mcafile.format(group=XRFGROUP,
name=mcaname,
filename=filename))
if label is None:
label = filename
if label is None and mca.filename is not None:
label = mca.filename
if label is None:
label = mcaname
self.mca.label = label
# push mca to mca2, save id of this mca
setattr(xrfgroup, '_mca2', getattr(xrfgroup, '_mca', ''))
setattr(xrfgroup, '_mca', mcaname)
setattr(xrfgroup, mcaname, mca)
if plot:
self.plotmca(self.mca)
if as_mca2:
self.plotmca(self.mca, as_mca2=True)
def _getlims(self):
emin, emax = self.panel.axes.get_xlim()
erange = emax-emin
emid = (emax+emin)/2.0
if self.energy_for_zoom is not None:
emid = self.energy_for_zoom
dmin, dmax = emin, emax
drange = erange
if self.mca is not None:
dmin, dmax = self.mca.energy.min(), self.mca.energy.max()
return (emid, erange, dmin, dmax)
def _set_xview(self, e1, e2, keep_zoom=False):
if not keep_zoom:
self.energy_for_zoom = (e1+e2)/2.0
self.panel.axes.set_xlim((e1, e2))
self.xview_range = [e1, e2]
self.draw()
def onPanLo(self, event=None):
emid, erange, dmin, dmax = self._getlims()
e1 = max(dmin, emid-0.9*erange)
e2 = min(dmax, e1 + erange)
self._set_xview(e1, e2)
def onPanHi(self, event=None):
emid, erange, dmin, dmax = self._getlims()
e2 = min(dmax, emid+0.9*erange)
e1 = max(dmin, e2-erange)
self._set_xview(e1, e2)
def onZoomIn(self, event=None):
emid, erange, dmin, dmax = self._getlims()
e1 = max(dmin, emid-erange/3.0)
e2 = min(dmax, emid+erange/3.0)
self._set_xview(e1, e2, keep_zoom=True)
def onZoomOut(self, event=None):
emid, erange, dmin, dmax = self._getlims()
e1 = max(dmin, emid-1.25*erange)
e2 = min(dmax, emid+1.25*erange)
self._set_xview(e1, e2)
def unzoom_all(self, event=None):
emid, erange, dmin, dmax = self._getlims()
self._set_xview(dmin, dmax)
self.xview_range = None
def toggle_grid(self, event=None):
self.panel.toggle_grid()
def set_roilist(self, mca=None):
""" Add Roi names to roilist"""
self.wids['roilist'].Clear()
if mca is not None:
for roi in mca.rois:
name = bytes2str(roi.name.strip())
if len(name) > 0:
self.wids['roilist'].Append(roi.name)
def clear_roihighlight(self, event=None):
self.selected_roi = None
try:
self.roi_patch.remove()
except:
pass
self.roi_patch = None
self.wids['roiname'].SetValue('')
self.draw()
def get_roiname(self):
roiname = self.wids['roiname'].GetValue()
if len(roiname) < 1:
roiname = 'ROI 1'
names = [str(r.name.lower()) for r in self.mca.rois]
if str(roiname.lower()) in names:
ix = 1
while str(roiname.lower()) in names:
roiname = "ROI %i" % (ix)
ix += 1
return roiname
def onNewROI(self, event=None):
if (self.xmarker_left is None or
self.xmarker_right is None or self.mca is None):
return
roiname = self.get_roiname()
names = [str(r.name.lower()) for r in self.mca.rois]
if str(roiname.lower()) in names:
msg = "Overwrite Definition of ROI {:s}?".format(roiname)
if (wx.ID_YES != Popup(self, msg, 'Overwrite ROI?', style=wx.YES_NO)):
return False
left, right = self.xmarker_left, self.xmarker_right
if left > right:
left, right = right, left
self.mca.add_roi(name=roiname, left=left, right=right, sort=True)
self.set_roilist(mca=self.mca)
for roi in self.mca.rois:
if roi.name.lower()==roiname:
selected_roi = roi
self.plot(self.xdata, self.ydata)
self.onROI(label=roiname)
if self.selected_elem is not None:
self.onShowLines(elem=self.selected_elem)
return True
def onConfirmDelROI(self, event=None):
roiname = self.wids['roiname'].GetValue()
msg = "Delete ROI {:s}?".format(roiname)
if (wx.ID_YES == Popup(self, msg, 'Delete ROI?', style=wx.YES_NO)):
self.onDelROI()
def onRenameROI(self, event=None):
roiname = self.get_roiname()
if self.roilist_sel is not None:
names = self.wids['roilist'].GetStrings()
names[self.roilist_sel] = roiname
self.wids['roilist'].Clear()
for sname in names:
self.wids['roilist'].Append(sname)
self.wids['roilist'].SetSelection(self.roilist_sel)
def onDelROI(self):
roiname = self.wids['roiname'].GetValue()
rdat = []
if self.mca is None:
return
for i in range(len(self.mca.rois)):
roi = self.mca.rois.pop(0)
if roi.name.lower() != roiname.lower():
rdat.append((roi.name, roi.left, roi.right))
for name, left, right in rdat:
self.mca.add_roi(name=name, left=left, right=right, sort=False)
self.mca.rois.sort()
self.set_roilist(mca=self.mca)
self.wids['roiname'].SetValue('')
try:
self.roi_patch.remove()
except:
pass
self.plot(self.xdata, self.ydata)
if self.selected_elem is not None:
self.onShowLines(elem=self.selected_elem)
def ShowROIStatus(self, left, right, name='', panel=0):
if left > right:
return
sum = self.ydata[left:right].sum()
dt = self.mca.real_time
nmsg, cmsg, rmsg = '', '', ''
if len(name) > 0:
nmsg = " %s" % name
cmsg = " Cts={:10,.0f}".format(sum)
if dt is not None and dt > 1.e-9:
rmsg = " CPS={:10,.1f}".format(sum/dt)
self.write_message("%s%s%s" % (nmsg, cmsg, rmsg), panel=panel)
def ShowROIPatch(self, left, right):
"""show colored XRF Patch:
Note: ROIs larger than half the energy are not colored"""
# xnpts = 1.0/len(self.mca.energy)
# if xnpts*(right - left) > 0.5:
# return
try:
self.roi_patch.remove()
except:
pass
e = np.zeros(right-left+2)
r = np.ones(right-left+2)
e[1:-1] = self.mca.energy[left:right]
r[1:-1] = self.mca.counts[left:right]
e[0] = e[1]
e[-1] = e[-2]
self.roi_patch = self.panel.axes.fill_between(e, r, zorder=-20,
color=self.conf.roi_fillcolor)
def onROI(self, event=None, label=None):
if label is None and event is not None:
label = event.GetString()
self.roilist_sel = event.GetSelection()
self.wids['roiname'].SetValue(label)
name, left, right= None, -1, -1
label = bytes2str(label.lower().strip())
self.selected_roi = None
if self.mca is not None:
for roi in self.mca.rois:
if bytes2str(roi.name.lower())==label:
left, right, name = roi.left, roi.right, roi.name
elo = self.mca.energy[left]
ehi = self.mca.energy[right]
self.selected_roi = roi
break
if name is None or right == -1:
return
self.ShowROIStatus(left, right, name=name)
self.ShowROIPatch(left, right)
roi_msg1 = '[{:}:{:}]'.format(left, right)
roi_msg2 = '[{:6.3f}:{:6.3f}]'.format(elo, ehi)
roi_msg3 = '{:6.3f}, {:6.3f}'.format((elo+ehi)/2., (ehi - elo))
self.energy_for_zoom = (elo+ehi)/2.0
self.wids['roi_msg1'].SetLabel(roi_msg1)
self.wids['roi_msg2'].SetLabel(roi_msg2)
self.wids['roi_msg3'].SetLabel(roi_msg3)
self.draw()
self.panel.Refresh()
def onSaveROIs(self, event=None):
pass
def onRestoreROIs(self, event=None):
pass
def createCustomMenus(self):
return
def createBaseMenus(self):
fmenu = wx.Menu()
MenuItem(self, fmenu, "&Read MCA Spectra File\tCtrl+O",
"Read GSECARS MCA File", self.onReadMCAFile)
MenuItem(self, fmenu, "&Save MCA File\tCtrl+S",
"Save GSECARS MCA File", self.onSaveMCAFile)
MenuItem(self, fmenu, "&Save ASCII Column File\tCtrl+A",
"Save Column File", self.onSaveColumnFile)
fmenu.AppendSeparator()
# MenuItem(self, fmenu, "Save ROIs to File",
# "Save ROIs to File", self.onSaveROIs)
# MenuItem(self, fmenu, "Restore ROIs File",
# "Read ROIs from File", self.onRestoreROIs)
# fmenu.AppendSeparator()
MenuItem(self, fmenu, 'Show Larch Buffer\tCtrl+L',
'Show Larch Programming Buffer',
self.onShowLarchBuffer)
MenuItem(self, fmenu, "Save Plot\tCtrl+I",
"Save PNG Image of Plot", self.onSavePNG)
MenuItem(self, fmenu, "&Copy Plot\tCtrl+C",
"Copy Plot Image to Clipboard",
self.onCopyImage)
MenuItem(self, fmenu, 'Page Setup...', 'Printer Setup', self.onPageSetup)
MenuItem(self, fmenu, 'Print Preview...', 'Print Preview', self.onPrintPreview)
MenuItem(self, fmenu, "&Print\tCtrl+P", "Print Plot", self.onPrint)
fmenu.AppendSeparator()
MenuItem(self, fmenu, "&Quit\tCtrl+Q", "Quit program", self.onClose)
omenu = wx.Menu()
MenuItem(self, omenu, "Configure Colors",
"Configure Colors", self.config_colors)
MenuItem(self, omenu, "Configure X-ray Lines",
"Configure which X-ray Lines are shown", self.config_xraylines)
MenuItem(self, omenu, "Configure Plot\tCtrl+K",
"Configure Plot Colors, etc", self.panel.configure)
MenuItem(self, omenu, "Zoom Out\tCtrl+Z",
"Zoom out to full data range", self.unzoom_all)
MenuItem(self, omenu, "Toggle Grid\tCtrl+G",
"Toggle Grid Display", self.toggle_grid)
MenuItem(self, omenu, "Toggle Plot legend",
"Toggle Plot Legend", self.onToggleLegend)
omenu.AppendSeparator()
MenuItem(self, omenu, "Hide X-ray Lines",
"Hide all X-ray Lines", self.clear_lines)
MenuItem(self, omenu, "Hide selected ROI ",
"Hide selected ROI", self.clear_roihighlight)
MenuItem(self, omenu, "Hide Markers ",
"Hide cursor markers", self.clear_markers)
MenuItem(self, omenu, "Hide XRF Background ",
"Hide cursor markers", self.clear_background)
omenu.AppendSeparator()
MenuItem(self, omenu, "Swap MCA and Background MCA",
"Swap Foreground and Background MCAs", self.swap_mcas)
MenuItem(self, omenu, "Close Background MCA",
"Close Background MCA", self.close_bkg_mca)
amenu = wx.Menu()
MenuItem(self, amenu, "Show Pileup Prediction",
"Show Pileup Prediction", kind=wx.ITEM_CHECK,
checked=False, action=self.onPileupPrediction)
MenuItem(self, amenu, "Show Escape Prediction",
"Show Escape Prediction", kind=wx.ITEM_CHECK,
checked=False, action=self.onEscapePrediction)
MenuItem(self, amenu, "&Calibrate Energy\tCtrl+E",
"Calibrate Energy", self.onCalibrateEnergy)
MenuItem(self, amenu, "Fit Spectrum\tCtrl+F",
"Fit Spectrum for Elemental Contributiosn",
self.onFitSpectrum)
self._menus = [(fmenu, '&File'),
(omenu, '&Options'),
(amenu, '&Analysis')]
def createMenus(self):
self.menubar = wx.MenuBar()
self.createBaseMenus()
self.createCustomMenus()
for menu, title in self._menus:
self.menubar.Append(menu, title)
self.SetMenuBar(self.menubar)
self.Bind(wx.EVT_CLOSE, self.onClose)
def onShowLarchBuffer(self, evt=None):
if self.larch_buffer is not None:
self.larch_buffer.Show()
self.larch_buffer.Raise()
def onSavePNG(self, event=None):
if self.panel is not None:
self.panel.save_figure(event=event)
def onCopyImage(self, event=None):
if self.panel is not None:
self.panel.canvas.Copy_to_Clipboard(event=event)
def onPageSetup(self, event=None):
if self.panel is not None:
self.panel.PrintSetup(event=event)
def onPrintPreview(self, event=None):
if self.panel is not None:
self.panel.PrintPreview(event=event)
def onPrint(self, event=None):
if self.panel is not None:
self.panel.Print(event=event)
def onClose(self, event=None):
try:
if callable(self.exit_callback):
self.exit_callback()
except:
pass
try:
if self.panel is not None:
self.panel.win_config.Close(True)
if self.panel is not None:
self.panel.win_config.Destroy()
except:
pass
if hasattr(self.larch.symtable, '_plotter'):
wx.CallAfter(self.larch.symtable._plotter.close_all_displays)
for name, wid in self.subframes.items():
if hasattr(wid, 'Destroy'):
wx.CallAfter(wid.Destroy)
self.Destroy()
def config_colors(self, event=None):
"""show configuration frame"""
try:
self.win_config.Raise()
except:
self.win_config = ColorsFrame(parent=self)
def config_xraylines(self, event=None):
"""show configuration frame"""
try:
self.win_config.Raise()
except:
self.win_config = XrayLinesFrame(parent=self)
def onToggleLegend(self, event=None):
self.panel.conf.show_legend = not self.panel.conf.show_legend
self.panel.conf.draw_legend()
def onKLM(self, event=None):
"""selected K, L, or M Markers"""
if self.selected_elem is not None:
self.onShowLines(elem = self.selected_elem)
def onToggleHold(self, event=None):
if event.IsChecked():
self.wids['holdbtn'].SetLabel("Hide %s" % self.selected_elem)
self.hold_lines = self.saved_lines[:]
else:
self.wids['holdbtn'].SetLabel("Hold %s" % self.selected_elem)
self.hold_lines = None
for m in self.hold_markers:
try:
m.remove()
except:
pass
self.hold_markers = []
self.draw()
def onSelectXrayLine(self, evt=None):
if self.wids['xray_lines'] is None:
return
if not self.wids['xray_lines'].HasSelection():
return
item = self.wids['xray_lines'].GetSelectedRow()
en = self.wids['xray_linesdata'][item]
if self.highlight_xrayline is not None:
self.highlight_xrayline.remove()
self.energy_for_zoom = en
self.highlight_xrayline = self.panel.axes.axvline(en,
color=self.conf.emph_elinecolor,
linewidth=2.5, zorder=-15)
self.draw()
def onShowLines(self, event=None, elem=None):
if elem is None:
elem = event.GetString()
vline = self.panel.axes.axvline
elines = self.larch.symtable._xray.xray_lines(elem)
self.selected_elem = elem
self.clear_lines()
self.energy_for_zoom = None
xlines = self.wids['xray_lines']
if xlines is not None:
xlines.DeleteAllItems()
self.wids['xray_linesdata'] = []
minors, majors = [], []
conf = self.conf
line_data = {}
for line in (conf.K_major+conf.K_minor+conf.L_major+
conf.L_minor+conf.M_major):
line_data[line] = line, -1, 0, '', ''
if line in elines:
dat = elines[line]
line_data[line] = line, dat[0], dat[1], dat[2], dat[3]
if self.wids['kseries'].IsChecked():
majors.extend([line_data[l] for l in conf.K_major])
minors.extend([line_data[l] for l in conf.K_minor])
if self.wids['lseries'].IsChecked():
majors.extend([line_data[l] for l in conf.L_major])
minors.extend([line_data[l] for l in conf.L_minor])
if self.wids['mseries'].IsChecked():
majors.extend([line_data[l] for l in conf.M_major])
self.saved_lines = majors[:] + minors[:]
erange = [max(conf.e_min, self.xdata.min()),
min(conf.e_max, self.xdata.max())]
view_mid, view_range, d1, d2 = self._getlims()
view_emin = view_mid - view_range/2.0
view_emax = view_mid + view_range/2.0
for label, eev, frac, ilevel, flevel in majors:
e = float(eev) * 0.001
# print( 'Major ', label, eev, e, frac, ilevel, flevel)
if (e >= erange[0] and e <= erange[1]):
l = vline(e, color= self.conf.major_elinecolor,
linewidth=1.50, zorder=-5)
l.set_label(label)
dat = (label, "%.4f" % e, "%.4f" % frac,
"%s->%s" % (ilevel, flevel))
self.wids['xray_linesdata'].append(e)
if xlines is not None:
xlines.AppendItem(dat)
self.major_markers.append(l)
if (self.energy_for_zoom is None and
e > view_emin and e < view_emax):
self.energy_for_zoom = e
for label, eev, frac, ilevel, flevel in minors:
e = float(eev) * 0.001
if (e >= erange[0] and e <= erange[1]):
l = vline(e, color= self.conf.minor_elinecolor,
linewidth=1.25, zorder=-7)
l.set_label(label)
# dat = (label, "%.4f" % e, "%.4f" % frac,
# "%s->%s" % (ilevel, flevel))
dat = (label, "%.4f" % e, "%.4f" % frac,
"%s->%s" % (ilevel, flevel))
self.wids['xray_linesdata'].append(e)
if xlines is not None:
xlines.AppendItem(dat)
self.minor_markers.append(l)
if not self.wids['holdbtn'].GetValue():
self.wids['holdbtn'].SetLabel("Hold %s" % elem)
elif self.hold_lines is not None:
for label, eev, frac, ilevel, flevel in self.hold_lines:
e = float(eev) * 0.001
if (e >= erange[0] and e <= erange[1]):
l = vline(e, color=self.conf.hold_elinecolor,
linewidth=1.5, zorder=-20, dashes=(3, 3))
l.set_label(label)
self.hold_markers.append(l)
if xlines is not None:
xlines.Refresh()
edge_en = {}
for edge in ('K', 'M5', 'L3', 'L2', 'L1'):
edge_en[edge] = None
xex = self.larch.symtable._xray.xray_edge(elem, edge)
if xex is not None:
en = xex[0]*0.001
if en > erange[0] and en < erange[1]:
edge_en[edge] = en
out = ''
for key in ('M5', 'K'):
if edge_en[key] is not None:
out = "%s=%.3f" % (key, edge_en[key])
if len(out) > 1:
self.wids['ptable'].set_subtitle(out, index=0)
s, v, out = [], [], ''
for key in ('L3', 'L2', 'L1'):
if edge_en[key] is not None:
s.append(key)
v.append("%.3f" % edge_en[key])
if len(s) > 0:
out = "%s=%s" %(', '.join(s), ', '.join(v))
self.wids['ptable'].set_subtitle(out, index=1)
self.draw()
def onPileupPrediction(self, event=None):
if event.IsChecked():
self.mca.predict_pileup()
self.oplot(self.mca.energy, self.mca.pileup,
color=self.conf.pileup_color, label='pileup prediction')
else:
self.plotmca(self.mca)
def onEscapePrediction(self, event=None):
if event.IsChecked():
self.mca.predict_escape()
self.oplot(self.mca.energy, self.mca.escape,
color=self.conf.escape_color, label='escape prediction')
else:
self.plotmca(self.mca)
def onYAxis(self, event=None):
self.show_yaxis = self.wids['show_yaxis'].IsChecked()
ax = self.panel.axes
ax.yaxis.set_major_formatter(FuncFormatter(self._formaty))
ax.get_yaxis().set_visible(self.show_yaxis)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
self.draw()
def _formaty(self, val, index=0, **kws):
try:
decade = int(np.log10(val))
except:
decade = 0
scale = 10**decade
out = "%.1fe%i" % (val/scale, decade)
if abs(decade) < 1.9:
out = "%.1f" % val
elif abs(decade) < 3.9:
out = "%.0f" % val
return out
def onLogLinear(self, event=None):
self.ylog_scale = 'log' == event.GetString()
roiname = None
if self.selected_roi is not None:
roiname = self.selected_roi.name
self.plot(self.xdata, self.ydata)
if self.selected_elem is not None:
self.onShowLines(elem=self.selected_elem)
if roiname is not None:
self.onROI(label=roiname)
if self.y2data is not None:
self.oplot(self.x2data, self.y2data)
def plotmca(self, mca, title=None, set_title=True, as_mca2=False,
fullrange=False, init=False, **kws):
if as_mca2:
self.mca2 = mca
kws['new'] = False
else:
self.mca = mca
self.panel.conf.show_grid = False
xview_range = self.panel.axes.get_xlim()
if init or xview_range == (0.0, 1.0):
self.xview_range = (min(self.mca.energy), max(self.mca.energy))
else:
self.xview_range = xview_range
atitles = []
if self.mca is not None:
if getattr(self.mca, 'title', None) is not None:
atitles.append(bytes2str(self.mca.title))
if getattr(self.mca, 'filename', None) is not None:
atitles.append(" File={:s}".format(self.mca.filename))
if getattr(self.mca, 'npixels', None) is not None:
atitles.append(" {:.0f} Pixels".format(self.mca.npixels))
if getattr(self.mca, 'real_time', None) is not None:
try:
rtime_str = " RealTime={:.2f} sec".format(self.mca.real_time)
except ValueError:
rtime_str = " RealTime= %s sec".format(str(self.mca.real_time))
atitles.append(rtime_str)
try:
self.plot(self.mca.energy, self.mca.counts,
mca=self.mca, **kws)
except ValueError:
pass
if as_mca2:
if getattr(self.mca2, 'title', None) is not None:
atitles.append(" BG={:s}".format(self.mca2.title))
elif getattr(self.mca2, 'filename', None) is not None:
atitles.append(" BG_File={:s}".format(self.mca2.filename))
if getattr(self.mca, 'real_time', None) is not None:
atitles.append(" BG_RealTime={:.2f} sec".format(self.mca2.real_time))
self.oplot(self.mca2.energy, self.mca2.counts,
mca=self.mca2, **kws)
if title is None:
title = ' '.join(atitles)
if set_title:
self.SetTitle(title)
def plot(self, x, y=None, mca=None, init=False, with_rois=True, **kws):
if mca is not None:
self.mca = mca
mca = self.mca
panel = self.panel
panel.yformatter = self._formaty
panel.axes.get_yaxis().set_visible(False)
kwargs = {'xmin': 0,
'linewidth': 2.5,
'delay_draw': True,
'grid': panel.conf.show_grid,
'ylog_scale': self.ylog_scale,
'xlabel': 'E (keV)',
'axes_style': 'bottom',
'color': self.conf.spectra_color}
kwargs.update(kws)
self.xdata = 1.0*x[:]
self.ydata = 1.0*y[:]
ydat = 1.0*y[:] + 1.e-9
kwargs['ymax'] = max(ydat)*1.25
kwargs['ymin'] = 0.9
kwargs['xmax'] = max(self.xdata)
kwargs['xmin'] = min(self.xdata)
if self.xview_range is not None:
kwargs['xmin'] = self.xview_range[0]
kwargs['xmax'] = self.xview_range[1]
panel.plot(x, ydat, label='spectrum', **kwargs)
if with_rois and mca is not None:
if not self.rois_shown:
self.set_roilist(mca=mca)
yroi = -1.0*np.ones(len(y))
max_width = 0.5*len(self.mca.energy) # suppress very large ROIs
for r in mca.rois:
if ((r.left, r.right) in ((0, 0), (-1, -1)) or
(r.right - r.left) > max_width):
continue
yroi[r.left:r.right] = y[r.left:r.right]
yroi = np.ma.masked_less(yroi, 0)
if yroi.max() > 0:
kwargs['color'] = self.conf.roi_color
panel.oplot(x, yroi, label='rois', **kwargs)
yscale = {False:'linear', True:'log'}[self.ylog_scale]
panel.set_viewlimits()
panel.set_logscale(yscale=yscale)
panel.axes.get_yaxis().set_visible(self.show_yaxis)
panel.cursor_mode = 'zoom'
self.draw()
panel.canvas.Refresh()
def update_mca(self, counts, energy=None, with_rois=True,
is_mca2=False, draw=True):
"""update counts (and optionally energy) for mca, and update plot"""
mca = self.mca
ix = 0
if is_mca2:
mca = self.mca2
ix = 2
mca.counts = counts[:]
if energy is not None:
mca.energy = energy[:]
xnpts = 1.0/len(energy)
nrois = len(mca.rois)
if not is_mca2 and with_rois and nrois > 0:
yroi = -1*np.ones(len(counts))
for r in mca.rois:
if xnpts*(r.right - r.left) > 0.5:
continue
yroi[r.left:r.right] = counts[r.left:r.right]
yroi = np.ma.masked_less(yroi, 0)
self.panel.update_line(1, mca.energy, yroi, draw=False,
update_limits=False)
self.panel.update_line(ix, mca.energy, counts,
draw=False, update_limits=False)
max_counts = max_counts2 = max(self.mca.counts)
try:
max_counts2 = max(self.mca2.counts)
except:
pass
self.panel.axes.set_ylim(0.9, 1.25*max(max_counts, max_counts2))
if mca == self.mca:
self.ydata = 1.0*counts[:]
self.update_status()
if draw: self.draw()
def oplot(self, x, y, color='darkgreen', label='spectrum2',
mca=None, zorder=-2, **kws):
if mca is not None:
self.mca2 = mca
self.x2data = 1.0*x[:]
self.y2data = 1.0*y[:]
if hasattr(self, 'ydata'):
ymax = max(max(self.ydata), max(y))*1.25
else:
ymax = max(y)*1.25
kws.update({'zorder': zorder, 'label': label,
'ymax' : ymax, 'axes_style': 'bottom',
'ylog_scale': self.ylog_scale})
self.panel.oplot(self.x2data, self.y2data, color=color, **kws)
def swap_mcas(self, event=None):
if self.mca2 is None:
return
self.mca, self.mca2 = self.mca2, self.mca
xrfgroup = self.larch.symtable.get_group(XRFGROUP)
_mca = getattr(xrfgroup, '_mca', '')
_mca2 = getattr(xrfgroup, '_mca2', '')
setattr(xrfgroup, '_mca2', _mca)
setattr(xrfgroup, '_mca', _mca2)
self.plotmca(self.mca)
self.plotmca(self.mca2, as_mca2=True)
def close_bkg_mca(self, event=None):
self.mca2 = None
xrfgroup = self.larch.symtable.get_group(XRFGROUP)
setattr(xrfgroup, '_mca2', '')
self.plotmca(self.mca)
def onReadMCAFile(self, event=None):
dlg = wx.FileDialog(self, message="Open MCA File for reading",
defaultDir=os.getcwd(),
wildcard=FILE_WILDCARDS,
style = wx.FD_OPEN|wx.FD_CHANGE_DIR)
filename = None
if dlg.ShowModal() == wx.ID_OK:
filename = os.path.abspath(dlg.GetPath())
dlg.Destroy()
if filename is None:
return
if self.mca is not None:
self.mca2 = copy.deepcopy(self.mca)
self.add_mca(GSEMCA_File(filename), filename=filename)
def onSaveMCAFile(self, event=None, **kws):
deffile = ''
if getattr(self.mca, 'sourcefile', None) is not None:
deffile = "%s%s" % (deffile, self.mca.sourcefile)
elif getattr(self.mca, 'filename', None) is not None:
deffile = "%s%s" % (deffile, self.mca.filename)
if getattr(self.mca, 'areaname', None) is not None:
deffile = "%s_%s" % (deffile, self.mca.areaname)
if deffile == '':
deffile ='test'
if not deffile.endswith('.mca'):
deffile = deffile + '.mca'
_, deffile = os.path.split(deffile)
deffile = fix_filename(str(deffile))
outfile = FileSave(self, "Save MCA File",
default_file=deffile,
wildcard=FILE_WILDCARDS)
if outfile is not None:
self.mca.save_mcafile(outfile)
def onSaveColumnFile(self, event=None, **kws):
deffile = ''
if getattr(self.mca, 'sourcefile', None) is not None:
deffile = "%s%s" % (deffile, self.mca.sourcefile)
elif getattr(self.mca, 'filename', None) is not None:
deffile = "%s%s" % (deffile, self.mca.filename)
if getattr(self.mca, 'areaname', None) is not None:
deffile = "%s_%s" % (deffile, self.mca.areaname)
if deffile == '':
deffile ='test'
if not deffile.endswith('.dat'):
deffile = deffile + '.dat'
_, deffile = os.path.split(deffile)
deffile = fix_filename(str(deffile))
ASCII_WILDCARDS = "Data File (*.dat)|*.dat|All files (*.*)|*.*"
outfile = FileSave(self, "Save ASCII File for MCA Data",
default_file=deffile,
wildcard=ASCII_WILDCARDS)
if outfile is not None:
self.mca.save_ascii(outfile)
def onCalibrateEnergy(self, event=None, **kws):
try:
self.win_calib.Raise()
except:
self.win_calib = XRFCalibrationFrame(self, mca=self.mca,
callback=self.onCalibrationChange)
def onCalibrationChange(self, mca):
"""update whenn mca changed calibration"""
self.plotmca(mca)
def onFitSpectrum(self, event=None, **kws):
try:
self.win_fit.Raise()
except:
self.win_fit = FitSpectraFrame(self)
def write_message(self, s, panel=0):
"""write a message to the Status Bar"""
self.SetStatusText(s, panel)
def onAbout(self, event=None):
dlg = wx.MessageDialog(self,
"""XRF Spectral Viewer
<NAME> <<EMAIL>>
""",
"About XRF Viewer",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def onReadFile(self, event=None):
dlg = wx.FileDialog(self, message="Read MCA File",
defaultDir=os.getcwd(),
wildcard=FILE_WILDCARDS,
style=wx.FD_OPEN)
path, re1ad = None, False
if dlg.ShowModal() == wx.ID_OK:
read = True
path = dlg.GetPath().replace('\\', '/')
if path in self.filemap:
read = (wx.ID_YES == Popup(self, "Re-read file '%s'?" % path,
'Re-read file?', style=wx.YES_NO))
dlg.Destroy()
if read:
try:
parent, fname = os.path.split(path)
except:
return
class XRFApp(wx.App, wx.lib.mixins.inspection.InspectionMixin):
def __init__(self, filename=None, **kws):
self.filename = filename
wx.App.__init__(self)
def OnInit(self):
self.Init()
frame = XRFDisplayFrame(filename=self.filename)
frame.Show()
self.SetTopWindow(frame)
return True
| [
"wx.Menu",
"wxutils.Check",
"wxutils.SimpleText",
"wx.ToggleButton",
"numpy.ones",
"wx.GridBagSizer",
"wx.CallAfter",
"wxutils.FileSave",
"numpy.arange",
"os.path.join",
"wx.Colour",
"wx.App.__init__",
"wxutils.Choice",
"wx.Panel",
"wxutils.pack",
"wx.ListBox",
"wx.TextCtrl",
"wx.d... | [((1714, 1783), 'wxutils.SimpleText', 'SimpleText', (['panel', 'label'], {'size': '(size, -1)', 'colour': 'colour', 'style': 'style'}), '(panel, label, size=(size, -1), colour=colour, style=style)\n', (1724, 1783), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((1933, 1983), 'wx.StaticLine', 'wx.StaticLine', (['panel'], {'size': '(len, wid)', 'style': 'style'}), '(panel, size=(len, wid), style=style)\n', (1946, 1983), False, 'import wx\n'), ((1683, 1702), 'wx.Colour', 'wx.Colour', (['(0)', '(0)', '(50)'], {}), '(0, 0, 50)\n', (1692, 1702), False, 'import wx\n'), ((2392, 2461), 'wx.Frame.__init__', 'wx.Frame.__init__', (['self'], {'parent': 'parent', 'title': 'title', 'size': 'size'}), '(self, parent=parent, title=title, size=size, **kws)\n', (2409, 2461), False, 'import wx\n'), ((8932, 9061), 'wxmplot.PlotPanel', 'PlotPanel', (['self'], {'fontsize': '(7)', 'axisbg': '"""#FEFEFE"""', 'with_data_process': '(False)', 'output_title': '"""test.xrf"""', 'messenger': 'self.write_message'}), "(self, fontsize=7, axisbg='#FEFEFE', with_data_process=False,\n output_title='test.xrf', messenger=self.write_message)\n", (8941, 9061), False, 'from wxmplot import PlotPanel\n'), ((9447, 9484), 'functools.partial', 'partial', (['self.on_cursor'], {'side': '"""right"""'}), "(self.on_cursor, side='right')\n", (9454, 9484), False, 'from functools import partial\n'), ((9814, 9847), 'wx.Panel', 'wx.Panel', (['self'], {'name': '"""Ctrl Panel"""'}), "(self, name='Ctrl Panel')\n", (9822, 9847), False, 'import wx\n'), ((10286, 10293), 'wxutils.Font', 'Font', (['(9)'], {}), '(9)\n', (10290, 10293), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((10311, 10319), 'wxutils.Font', 'Font', (['(10)'], {}), '(10)\n', (10315, 10319), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((10337, 10345), 'wxutils.Font', 'Font', (['(11)'], {}), '(11)\n', (10341, 10345), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((10377, 10396), 'wx.Panel', 'wx.Panel', (['ctrlpanel'], {}), '(ctrlpanel)\n', (10385, 10396), False, 'import wx\n'), ((10414, 10440), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (10425, 10440), False, 'import wx\n'), ((11054, 11111), 'wx.ToggleButton', 'wx.ToggleButton', (['arrowpanel', '(-1)', '"""Hold """'], {'size': '(85, -1)'}), "(arrowpanel, -1, 'Hold ', size=(85, -1))\n", (11069, 11111), False, 'import wx\n'), ((11264, 11307), 'wxutils.Check', 'Check', (['arrowpanel', '""" K """'], {'action': 'self.onKLM'}), "(arrowpanel, ' K ', action=self.onKLM)\n", (11269, 11307), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((11339, 11382), 'wxutils.Check', 'Check', (['arrowpanel', '""" L """'], {'action': 'self.onKLM'}), "(arrowpanel, ' L ', action=self.onKLM)\n", (11344, 11382), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((11414, 11457), 'wxutils.Check', 'Check', (['arrowpanel', '""" M """'], {'action': 'self.onKLM'}), "(arrowpanel, ' M ', action=self.onKLM)\n", (11419, 11457), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((11739, 11763), 'wxutils.pack', 'pack', (['arrowpanel', 'ssizer'], {}), '(arrowpanel, ssizer)\n', (11743, 11763), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((11807, 11828), 'wx.GridBagSizer', 'wx.GridBagSizer', (['(4)', '(6)'], {}), '(4, 6)\n', (11822, 11828), False, 'import wx\n'), ((11848, 11885), 'wx.Panel', 'wx.Panel', (['ctrlpanel'], {'name': '"""ROI Panel"""'}), "(ctrlpanel, name='ROI Panel')\n", (11856, 11885), False, 'import wx\n'), ((11917, 11954), 'wx.ListBox', 'wx.ListBox', (['roipanel'], {'size': '(140, 150)'}), '(roipanel, size=(140, 150))\n', (11927, 11954), False, 'import wx\n'), ((12100, 12145), 'wx.TextCtrl', 'wx.TextCtrl', (['roipanel', '(-1)', '""""""'], {'size': '(150, -1)'}), "(roipanel, -1, '', size=(150, -1))\n", (12111, 12145), False, 'import wx\n'), ((12174, 12211), 'wx.Panel', 'wx.Panel', (['roipanel'], {'name': '"""ROIButtons"""'}), "(roipanel, name='ROIButtons')\n", (12182, 12211), False, 'import wx\n'), ((12229, 12255), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (12240, 12255), False, 'import wx\n'), ((12269, 12328), 'wxutils.Button', 'Button', (['roibtns', '"""Add"""'], {'size': '(70, 30)', 'action': 'self.onNewROI'}), "(roibtns, 'Add', size=(70, 30), action=self.onNewROI)\n", (12275, 12328), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((12345, 12414), 'wxutils.Button', 'Button', (['roibtns', '"""Delete"""'], {'size': '(70, 30)', 'action': 'self.onConfirmDelROI'}), "(roibtns, 'Delete', size=(70, 30), action=self.onConfirmDelROI)\n", (12351, 12414), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((12428, 12493), 'wxutils.Button', 'Button', (['roibtns', '"""Rename"""'], {'size': '(70, 30)', 'action': 'self.onRenameROI'}), "(roibtns, 'Rename', size=(70, 30), action=self.onRenameROI)\n", (12434, 12493), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((12653, 12674), 'wxutils.pack', 'pack', (['roibtns', 'zsizer'], {}), '(roibtns, zsizer)\n', (12657, 12674), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((13923, 13945), 'wxutils.pack', 'pack', (['roipanel', 'rsizer'], {}), '(roipanel, rsizer)\n', (13927, 13945), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((14013, 14052), 'wx.Panel', 'wx.Panel', (['ctrlpanel'], {'name': '"""YScalePanel"""'}), "(ctrlpanel, name='YScalePanel')\n", (14021, 14052), False, 'import wx\n'), ((14070, 14096), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (14081, 14096), False, 'import wx\n'), ((14243, 14334), 'wxutils.Choice', 'Choice', (['yscalepanel'], {'size': '(80, 30)', 'choices': "['log', 'linear']", 'action': 'self.onLogLinear'}), "(yscalepanel, size=(80, 30), choices=['log', 'linear'], action=self.\n onLogLinear)\n", (14249, 14334), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((14369, 14441), 'wxutils.Check', 'Check', (['yscalepanel', '""" Show Y Scale """'], {'action': 'self.onYAxis', 'default': '(False)'}), "(yscalepanel, ' Show Y Scale ', action=self.onYAxis, default=False)\n", (14374, 14441), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((14735, 14760), 'wxutils.pack', 'pack', (['yscalepanel', 'ysizer'], {}), '(yscalepanel, ysizer)\n', (14739, 14760), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((14805, 14842), 'wx.Panel', 'wx.Panel', (['ctrlpanel'], {'name': '"""ZoomPanel"""'}), "(ctrlpanel, name='ZoomPanel')\n", (14813, 14842), False, 'import wx\n'), ((14860, 14886), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (14871, 14886), False, 'import wx\n'), ((14900, 14965), 'wxutils.Button', 'Button', (['zoompanel', '"""Zoom In"""'], {'size': '(80, 30)', 'action': 'self.onZoomIn'}), "(zoompanel, 'Zoom In', size=(80, 30), action=self.onZoomIn)\n", (14906, 14965), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((14981, 15048), 'wxutils.Button', 'Button', (['zoompanel', '"""Zoom Out"""'], {'size': '(80, 30)', 'action': 'self.onZoomOut'}), "(zoompanel, 'Zoom Out', size=(80, 30), action=self.onZoomOut)\n", (14987, 15048), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((15063, 15126), 'wxutils.Button', 'Button', (['zoompanel', '"""Pan Lo"""'], {'size': '(75, 30)', 'action': 'self.onPanLo'}), "(zoompanel, 'Pan Lo', size=(75, 30), action=self.onPanLo)\n", (15069, 15126), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((15143, 15206), 'wxutils.Button', 'Button', (['zoompanel', '"""Pan Hi"""'], {'size': '(75, 30)', 'action': 'self.onPanHi'}), "(zoompanel, 'Pan Hi', size=(75, 30), action=self.onPanHi)\n", (15149, 15206), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((15427, 15450), 'wxutils.pack', 'pack', (['zoompanel', 'zsizer'], {}), '(zoompanel, zsizer)\n', (15431, 15450), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((15573, 15618), 'wx.dataview.DataViewListCtrl', 'dv.DataViewListCtrl', (['ctrlpanel'], {'style': 'dvstyle'}), '(ctrlpanel, style=dvstyle)\n', (15592, 15618), True, 'import wx.dataview as dv\n'), ((16416, 16440), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (16427, 16440), False, 'import wx\n'), ((17063, 17085), 'wxutils.pack', 'pack', (['ctrlpanel', 'sizer'], {}), '(ctrlpanel, sizer)\n', (17067, 17085), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((17544, 17570), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (17555, 17570), False, 'import wx\n'), ((17700, 17717), 'wxutils.pack', 'pack', (['self', 'sizer'], {}), '(self, sizer)\n', (17704, 17717), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((18141, 18173), 'os.path.join', 'os.path.join', (['icondir', 'ICON_FILE'], {}), '(icondir, ICON_FILE)\n', (18153, 18173), False, 'import os\n'), ((25342, 25368), 'numpy.zeros', 'np.zeros', (['(right - left + 2)'], {}), '(right - left + 2)\n', (25350, 25368), True, 'import numpy as np\n'), ((25377, 25402), 'numpy.ones', 'np.ones', (['(right - left + 2)'], {}), '(right - left + 2)\n', (25384, 25402), True, 'import numpy as np\n'), ((27176, 27185), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (27183, 27185), False, 'import wx\n'), ((27194, 27298), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""&Read MCA Spectra File\tCtrl+O"""', '"""Read GSECARS MCA File"""', 'self.onReadMCAFile'], {}), "(self, fmenu, '&Read MCA Spectra File\\tCtrl+O',\n 'Read GSECARS MCA File', self.onReadMCAFile)\n", (27202, 27298), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((27322, 27418), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""&Save MCA File\tCtrl+S"""', '"""Save GSECARS MCA File"""', 'self.onSaveMCAFile'], {}), "(self, fmenu, '&Save MCA File\\tCtrl+S', 'Save GSECARS MCA File',\n self.onSaveMCAFile)\n", (27330, 27418), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((27441, 27544), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""&Save ASCII Column File\tCtrl+A"""', '"""Save Column File"""', 'self.onSaveColumnFile'], {}), "(self, fmenu, '&Save ASCII Column File\\tCtrl+A', 'Save Column File',\n self.onSaveColumnFile)\n", (27449, 27544), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((27859, 27970), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""Show Larch Buffer\tCtrl+L"""', '"""Show Larch Programming Buffer"""', 'self.onShowLarchBuffer'], {}), "(self, fmenu, 'Show Larch Buffer\\tCtrl+L',\n 'Show Larch Programming Buffer', self.onShowLarchBuffer)\n", (27867, 27970), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28009, 28098), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""Save Plot\tCtrl+I"""', '"""Save PNG Image of Plot"""', 'self.onSavePNG'], {}), "(self, fmenu, 'Save Plot\\tCtrl+I', 'Save PNG Image of Plot', self.\n onSavePNG)\n", (28017, 28098), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28120, 28217), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""&Copy Plot\tCtrl+C"""', '"""Copy Plot Image to Clipboard"""', 'self.onCopyImage'], {}), "(self, fmenu, '&Copy Plot\\tCtrl+C', 'Copy Plot Image to Clipboard',\n self.onCopyImage)\n", (28128, 28217), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28256, 28329), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""Page Setup..."""', '"""Printer Setup"""', 'self.onPageSetup'], {}), "(self, fmenu, 'Page Setup...', 'Printer Setup', self.onPageSetup)\n", (28264, 28329), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28338, 28417), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""Print Preview..."""', '"""Print Preview"""', 'self.onPrintPreview'], {}), "(self, fmenu, 'Print Preview...', 'Print Preview', self.onPrintPreview)\n", (28346, 28417), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28426, 28493), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""&Print\tCtrl+P"""', '"""Print Plot"""', 'self.onPrint'], {}), "(self, fmenu, '&Print\\tCtrl+P', 'Print Plot', self.onPrint)\n", (28434, 28493), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28535, 28603), 'wxutils.MenuItem', 'MenuItem', (['self', 'fmenu', '"""&Quit\tCtrl+Q"""', '"""Quit program"""', 'self.onClose'], {}), "(self, fmenu, '&Quit\\tCtrl+Q', 'Quit program', self.onClose)\n", (28543, 28603), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28621, 28630), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (28628, 28630), False, 'import wx\n'), ((28639, 28725), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Configure Colors"""', '"""Configure Colors"""', 'self.config_colors'], {}), "(self, omenu, 'Configure Colors', 'Configure Colors', self.\n config_colors)\n", (28647, 28725), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28746, 28860), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Configure X-ray Lines"""', '"""Configure which X-ray Lines are shown"""', 'self.config_xraylines'], {}), "(self, omenu, 'Configure X-ray Lines',\n 'Configure which X-ray Lines are shown', self.config_xraylines)\n", (28754, 28860), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((28882, 28985), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Configure Plot\tCtrl+K"""', '"""Configure Plot Colors, etc"""', 'self.panel.configure'], {}), "(self, omenu, 'Configure Plot\\tCtrl+K',\n 'Configure Plot Colors, etc', self.panel.configure)\n", (28890, 28985), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29007, 29100), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Zoom Out\tCtrl+Z"""', '"""Zoom out to full data range"""', 'self.unzoom_all'], {}), "(self, omenu, 'Zoom Out\\tCtrl+Z', 'Zoom out to full data range',\n self.unzoom_all)\n", (29015, 29100), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29122, 29212), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Toggle Grid\tCtrl+G"""', '"""Toggle Grid Display"""', 'self.toggle_grid'], {}), "(self, omenu, 'Toggle Grid\\tCtrl+G', 'Toggle Grid Display', self.\n toggle_grid)\n", (29130, 29212), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29233, 29324), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Toggle Plot legend"""', '"""Toggle Plot Legend"""', 'self.onToggleLegend'], {}), "(self, omenu, 'Toggle Plot legend', 'Toggle Plot Legend', self.\n onToggleLegend)\n", (29241, 29324), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29378, 29466), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Hide X-ray Lines"""', '"""Hide all X-ray Lines"""', 'self.clear_lines'], {}), "(self, omenu, 'Hide X-ray Lines', 'Hide all X-ray Lines', self.\n clear_lines)\n", (29386, 29466), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29487, 29581), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Hide selected ROI """', '"""Hide selected ROI"""', 'self.clear_roihighlight'], {}), "(self, omenu, 'Hide selected ROI ', 'Hide selected ROI', self.\n clear_roihighlight)\n", (29495, 29581), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29602, 29688), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Hide Markers """', '"""Hide cursor markers"""', 'self.clear_markers'], {}), "(self, omenu, 'Hide Markers ', 'Hide cursor markers', self.\n clear_markers)\n", (29610, 29688), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29709, 29805), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Hide XRF Background """', '"""Hide cursor markers"""', 'self.clear_background'], {}), "(self, omenu, 'Hide XRF Background ', 'Hide cursor markers', self.\n clear_background)\n", (29717, 29805), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29859, 29970), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Swap MCA and Background MCA"""', '"""Swap Foreground and Background MCAs"""', 'self.swap_mcas'], {}), "(self, omenu, 'Swap MCA and Background MCA',\n 'Swap Foreground and Background MCAs', self.swap_mcas)\n", (29867, 29970), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((29992, 30086), 'wxutils.MenuItem', 'MenuItem', (['self', 'omenu', '"""Close Background MCA"""', '"""Close Background MCA"""', 'self.close_bkg_mca'], {}), "(self, omenu, 'Close Background MCA', 'Close Background MCA', self.\n close_bkg_mca)\n", (30000, 30086), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((30116, 30125), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (30123, 30125), False, 'import wx\n'), ((30134, 30278), 'wxutils.MenuItem', 'MenuItem', (['self', 'amenu', '"""Show Pileup Prediction"""', '"""Show Pileup Prediction"""'], {'kind': 'wx.ITEM_CHECK', 'checked': '(False)', 'action': 'self.onPileupPrediction'}), "(self, amenu, 'Show Pileup Prediction', 'Show Pileup Prediction',\n kind=wx.ITEM_CHECK, checked=False, action=self.onPileupPrediction)\n", (30142, 30278), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((30317, 30461), 'wxutils.MenuItem', 'MenuItem', (['self', 'amenu', '"""Show Escape Prediction"""', '"""Show Escape Prediction"""'], {'kind': 'wx.ITEM_CHECK', 'checked': '(False)', 'action': 'self.onEscapePrediction'}), "(self, amenu, 'Show Escape Prediction', 'Show Escape Prediction',\n kind=wx.ITEM_CHECK, checked=False, action=self.onEscapePrediction)\n", (30325, 30461), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((30500, 30599), 'wxutils.MenuItem', 'MenuItem', (['self', 'amenu', '"""&Calibrate Energy\tCtrl+E"""', '"""Calibrate Energy"""', 'self.onCalibrateEnergy'], {}), "(self, amenu, '&Calibrate Energy\\tCtrl+E', 'Calibrate Energy', self\n .onCalibrateEnergy)\n", (30508, 30599), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((30621, 30734), 'wxutils.MenuItem', 'MenuItem', (['self', 'amenu', '"""Fit Spectrum\tCtrl+F"""', '"""Fit Spectrum for Elemental Contributiosn"""', 'self.onFitSpectrum'], {}), "(self, amenu, 'Fit Spectrum\\tCtrl+F',\n 'Fit Spectrum for Elemental Contributiosn', self.onFitSpectrum)\n", (30629, 30734), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((30946, 30958), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (30956, 30958), False, 'import wx\n'), ((48891, 48913), 'os.path.split', 'os.path.split', (['deffile'], {}), '(deffile)\n', (48904, 48913), False, 'import os\n'), ((48977, 49055), 'wxutils.FileSave', 'FileSave', (['self', '"""Save MCA File"""'], {'default_file': 'deffile', 'wildcard': 'FILE_WILDCARDS'}), "(self, 'Save MCA File', default_file=deffile, wildcard=FILE_WILDCARDS)\n", (48985, 49055), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((49782, 49804), 'os.path.split', 'os.path.split', (['deffile'], {}), '(deffile)\n', (49795, 49804), False, 'import os\n'), ((49940, 50038), 'wxutils.FileSave', 'FileSave', (['self', '"""Save ASCII File for MCA Data"""'], {'default_file': 'deffile', 'wildcard': 'ASCII_WILDCARDS'}), "(self, 'Save ASCII File for MCA Data', default_file=deffile,\n wildcard=ASCII_WILDCARDS)\n", (49948, 50038), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((50887, 51074), 'wx.MessageDialog', 'wx.MessageDialog', (['self', '"""XRF Spectral Viewer\n <NAME> <<EMAIL>>\n """', '"""About XRF Viewer"""', '(wx.OK | wx.ICON_INFORMATION)'], {}), '(self,\n """XRF Spectral Viewer\n <NAME> <<EMAIL>>\n """\n , \'About XRF Viewer\', wx.OK | wx.ICON_INFORMATION)\n', (50903, 51074), False, 'import wx\n'), ((52101, 52122), 'wx.App.__init__', 'wx.App.__init__', (['self'], {}), '(self)\n', (52116, 52122), False, 'import wx\n'), ((3546, 3561), 'numpy.arange', 'np.arange', (['(2048)'], {}), '(2048)\n', (3555, 3561), True, 'import numpy as np\n'), ((3588, 3601), 'numpy.ones', 'np.ones', (['(2048)'], {}), '(2048)\n', (3595, 3601), True, 'import numpy as np\n'), ((4365, 4384), 'wxutils.Font', 'Font', (['(9)'], {'serif': '(True)'}), '(9, serif=True)\n', (4369, 4384), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((23242, 23290), 'wxutils.Popup', 'Popup', (['self', 'msg', '"""Delete ROI?"""'], {'style': 'wx.YES_NO'}), "(self, msg, 'Delete ROI?', style=wx.YES_NO)\n", (23247, 23290), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((32424, 32485), 'wx.CallAfter', 'wx.CallAfter', (['self.larch.symtable._plotter.close_all_displays'], {}), '(self.larch.symtable._plotter.close_all_displays)\n', (32436, 32485), False, 'import wx\n'), ((39882, 39910), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['self._formaty'], {}), '(self._formaty)\n', (39895, 39910), False, 'from matplotlib.ticker import LogFormatter, FuncFormatter\n'), ((44625, 44651), 'numpy.ma.masked_less', 'np.ma.masked_less', (['yroi', '(0)'], {}), '(yroi, 0)\n', (44642, 44651), True, 'import numpy as np\n'), ((45818, 45844), 'numpy.ma.masked_less', 'np.ma.masked_less', (['yroi', '(0)'], {}), '(yroi, 0)\n', (45835, 45844), True, 'import numpy as np\n'), ((48210, 48233), 'copy.deepcopy', 'copy.deepcopy', (['self.mca'], {}), '(self.mca)\n', (48223, 48233), False, 'import copy\n'), ((9701, 9738), 'functools.partial', 'partial', (['self.on_cursor'], {'side': '"""right"""'}), "(self.on_cursor, side='right')\n", (9708, 9738), False, 'from functools import partial\n'), ((10758, 10773), 'wxutils.get_icon', 'get_icon', (['wname'], {}), '(wname)\n', (10766, 10773), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((10924, 10957), 'functools.partial', 'partial', (['ptable.onKey'], {'name': 'dname'}), '(ptable.onKey, name=dname)\n', (10931, 10957), False, 'from functools import partial\n'), ((17915, 17926), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (17924, 17926), False, 'import wx\n'), ((18212, 18245), 'wx.Icon', 'wx.Icon', (['fico', 'wx.BITMAP_TYPE_ICO'], {}), '(fico, wx.BITMAP_TYPE_ICO)\n', (18219, 18245), False, 'import wx\n'), ((22449, 22500), 'wxutils.Popup', 'Popup', (['self', 'msg', '"""Overwrite ROI?"""'], {'style': 'wx.YES_NO'}), "(self, msg, 'Overwrite ROI?', style=wx.YES_NO)\n", (22454, 22500), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n'), ((32592, 32617), 'wx.CallAfter', 'wx.CallAfter', (['wid.Destroy'], {}), '(wid.Destroy)\n', (32604, 32617), False, 'import wx\n'), ((40158, 40171), 'numpy.log10', 'np.log10', (['val'], {}), '(val)\n', (40166, 40171), True, 'import numpy as np\n'), ((47832, 47843), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (47841, 47843), False, 'import os\n'), ((51342, 51353), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (51351, 51353), False, 'import os\n'), ((51886, 51905), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (51899, 51905), False, 'import os\n'), ((51678, 51752), 'wxutils.Popup', 'Popup', (['self', '("Re-read file \'%s\'?" % path)', '"""Re-read file?"""'], {'style': 'wx.YES_NO'}), '(self, "Re-read file \'%s\'?" % path, \'Re-read file?\', style=wx.YES_NO)\n', (51683, 51752), False, 'from wxutils import SimpleText, EditableListBox, Font, pack, Popup, get_icon, SetTip, Button, Check, MenuItem, Choice, FileOpen, FileSave, fix_filename, HLine, GridPanel, CEN, LEFT, RIGHT\n')] |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python TF-Lite interpreter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import platform
import sys
import numpy as np
# pylint: disable=g-import-not-at-top
if not __file__.endswith('tflite_runtime/interpreter.py'):
# This file is part of tensorflow package.
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Lazy load since some of the performance benchmark skylark rules
# break dependencies. Must use double quotes to match code internal rewrite
# rule.
# pylint: disable=g-inconsistent-quotes
_interpreter_wrapper = LazyLoader(
"_interpreter_wrapper", globals(),
"tensorflow.lite.python.interpreter_wrapper."
"tensorflow_wrap_interpreter_wrapper")
# pylint: enable=g-inconsistent-quotes
del LazyLoader
else:
# This file is part of tflite_runtime package.
from tflite_runtime import interpreter_wrapper as _interpreter_wrapper
def _tf_export(*x, **kwargs):
del x, kwargs
return lambda x: x
class Delegate(object):
"""Python wrapper class to manage TfLiteDelegate objects.
The shared library is expected to have two functions:
TfLiteDelegate* tflite_plugin_create_delegate(
char**, char**, size_t, void (*report_error)(const char *))
void tflite_plugin_destroy_delegate(TfLiteDelegate*)
The first one creates a delegate object. It may return NULL to indicate an
error (with a suitable error message reported by calling report_error()).
The second one destroys delegate object and must be called for every
created delegate object. Passing NULL as argument value is allowed, i.e.
tflite_plugin_destroy_delegate(tflite_plugin_create_delegate(...))
always works.
"""
def __init__(self, library, options=None):
"""Loads delegate from the shared library.
Args:
library: Shared library name.
options: Dictionary of options that are required to load the delegate. All
keys and values in the dictionary should be serializable. Consult the
documentation of the specific delegate for required and legal options.
(default None)
Raises:
RuntimeError: This is raised if the Python implementation is not CPython.
"""
# TODO(b/136468453): Remove need for __del__ ordering needs of CPython
# by using explicit closes(). See implementation of Interpreter __del__.
if platform.python_implementation() != 'CPython':
raise RuntimeError('Delegates are currently only supported into CPython'
'due to missing immediate reference counting.')
self._library = ctypes.pydll.LoadLibrary(library)
self._library.tflite_plugin_create_delegate.argtypes = [
ctypes.POINTER(ctypes.c_char_p),
ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.CFUNCTYPE(None, ctypes.c_char_p)
]
self._library.tflite_plugin_create_delegate.restype = ctypes.c_void_p
# Convert the options from a dictionary to lists of char pointers.
options = options or {}
options_keys = (ctypes.c_char_p * len(options))()
options_values = (ctypes.c_char_p * len(options))()
for idx, (key, value) in enumerate(options.items()):
options_keys[idx] = str(key).encode('utf-8')
options_values[idx] = str(value).encode('utf-8')
class ErrorMessageCapture(object):
def __init__(self):
self.message = ''
def report(self, x):
self.message += x if isinstance(x, str) else x.decode('utf-8')
capture = ErrorMessageCapture()
error_capturer_cb = ctypes.CFUNCTYPE(None, ctypes.c_char_p)(capture.report)
# Do not make a copy of _delegate_ptr. It is freed by Delegate's finalizer.
self._delegate_ptr = self._library.tflite_plugin_create_delegate(
options_keys, options_values, len(options), error_capturer_cb)
if self._delegate_ptr is None:
raise ValueError(capture.message)
def __del__(self):
# __del__ can be called multiple times, so if the delegate is destroyed.
# don't try to destroy it twice.
if self._library is not None:
self._library.tflite_plugin_destroy_delegate.argtypes = [ctypes.c_void_p]
self._library.tflite_plugin_destroy_delegate(self._delegate_ptr)
self._library = None
def _get_native_delegate_pointer(self):
"""Returns the native TfLiteDelegate pointer.
It is not safe to copy this pointer because it needs to be freed.
Returns:
TfLiteDelegate *
"""
return self._delegate_ptr
@_tf_export('lite.experimental.load_delegate')
def load_delegate(library, options=None):
"""Returns loaded Delegate object.
Args:
library: Name of shared library containing the
[TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates).
options: Dictionary of options that are required to load the delegate. All
keys and values in the dictionary should be convertible to str. Consult
the documentation of the specific delegate for required and legal options.
(default None)
Returns:
Delegate object.
Raises:
ValueError: Delegate failed to load.
RuntimeError: If delegate loading is used on unsupported platform.
"""
# TODO(b/137299813): Fix darwin support for delegates.
if sys.platform == 'darwin':
raise RuntimeError('Dynamic loading of delegates on Darwin not supported.')
try:
delegate = Delegate(library, options)
except ValueError as e:
raise ValueError('Failed to load delegate from {}\n{}'.format(
library, str(e)))
return delegate
@_tf_export('lite.Interpreter')
class Interpreter(object):
"""Interpreter interface for TensorFlow Lite Models.
This makes the TensorFlow Lite interpreter accessible in Python.
It is possible to use this interpreter in a multithreaded Python environment,
but you must be sure to call functions of a particular instance from only
one thread at a time. So if you want to have 4 threads running different
inferences simultaneously, create an interpreter for each one as thread-local
data. Similarly, if you are calling invoke() in one thread on a single
interpreter but you want to use tensor() on another thread once it is done,
you must use a synchronization primitive between the threads to ensure invoke
has returned before calling tensor().
"""
def __init__(self,
model_path=None,
model_content=None,
experimental_delegates=None):
"""Constructor.
Args:
model_path: Path to TF-Lite Flatbuffer file.
model_content: Content of model.
experimental_delegates: Experimental. Subject to change. List of
[TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)
objects returned by lite.load_delegate().
Raises:
ValueError: If the interpreter was unable to create.
"""
if not hasattr(self, '_custom_op_registerers'):
self._custom_op_registerers = []
if model_path and not model_content:
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile(
model_path, self._custom_op_registerers))
if not self._interpreter:
raise ValueError('Failed to open {}'.format(model_path))
elif model_content and not model_path:
# Take a reference, so the pointer remains valid.
# Since python strings are immutable then PyString_XX functions
# will always return the same pointer.
self._model_content = model_content
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer(
model_content, self._custom_op_registerers))
elif not model_path and not model_path:
raise ValueError('`model_path` or `model_content` must be specified.')
else:
raise ValueError('Can\'t both provide `model_path` and `model_content`')
# Each delegate is a wrapper that owns the delegates that have been loaded
# as plugins. The interpreter wrapper will be using them, but we need to
# hold them in a list so that the lifetime is preserved at least as long as
# the interpreter wrapper.
self._delegates = []
if experimental_delegates:
self._delegates = experimental_delegates
for delegate in self._delegates:
self._interpreter.ModifyGraphWithDelegate(
delegate._get_native_delegate_pointer()) # pylint: disable=protected-access
def __del__(self):
# Must make sure the interpreter is destroyed before things that
# are used by it like the delegates. NOTE this only works on CPython
# probably.
# TODO(b/136468453): Remove need for __del__ ordering needs of CPython
# by using explicit closes(). See implementation of Interpreter __del__.
self._interpreter = None
self._delegates = None
def allocate_tensors(self):
self._ensure_safe()
return self._interpreter.AllocateTensors()
def _safe_to_run(self):
"""Returns true if there exist no numpy array buffers.
This means it is safe to run tflite calls that may destroy internally
allocated memory. This works, because in the wrapper.cc we have made
the numpy base be the self._interpreter.
"""
# NOTE, our tensor() call in cpp will use _interpreter as a base pointer.
# If this environment is the only _interpreter, then the ref count should be
# 2 (1 in self and 1 in temporary of sys.getrefcount).
return sys.getrefcount(self._interpreter) == 2
def _ensure_safe(self):
"""Makes sure no numpy arrays pointing to internal buffers are active.
This should be called from any function that will call a function on
_interpreter that may reallocate memory e.g. invoke(), ...
Raises:
RuntimeError: If there exist numpy objects pointing to internal memory
then we throw.
"""
if not self._safe_to_run():
raise RuntimeError("""There is at least 1 reference to internal data
in the interpreter in the form of a numpy array or slice. Be sure to
only hold the function returned from tensor() if you are using raw
data access.""")
# Experimental and subject to change
def _get_op_details(self, op_index):
"""Gets a dictionary with arrays of ids for tensors involved with an op.
Args:
op_index: Operation/node index of node to query.
Returns:
a dictionary containing the index, op name, and arrays with lists of the
indices for the inputs and outputs of the op/node.
"""
op_index = int(op_index)
op_name = self._interpreter.NodeName(op_index)
op_inputs = self._interpreter.NodeInputs(op_index)
op_outputs = self._interpreter.NodeOutputs(op_index)
details = {
'index': op_index,
'op_name': op_name,
'inputs': op_inputs,
'outputs': op_outputs,
}
return details
def _get_tensor_details(self, tensor_index):
"""Gets tensor details.
Args:
tensor_index: Tensor index of tensor to query.
Returns:
A dictionary containing the following fields of the tensor:
'name': The tensor name.
'index': The tensor index in the interpreter.
'shape': The shape of the tensor.
'quantization': Deprecated, use 'quantization_parameters'. This field
only works for per-tensor quantization, whereas
'quantization_parameters' works in all cases.
'quantization_parameters': The parameters used to quantize the tensor:
'scales': List of scales (one if per-tensor quantization)
'zero_points': List of zero_points (one if per-tensor quantization)
'quantized_dimension': Specifies the dimension of per-axis
quantization, in the case of multiple scales/zero_points.
Raises:
ValueError: If tensor_index is invalid.
"""
tensor_index = int(tensor_index)
tensor_name = self._interpreter.TensorName(tensor_index)
tensor_size = self._interpreter.TensorSize(tensor_index)
tensor_type = self._interpreter.TensorType(tensor_index)
tensor_quantization = self._interpreter.TensorQuantization(tensor_index)
tensor_quantization_params = self._interpreter.TensorQuantizationParameters(
tensor_index)
if not tensor_name or not tensor_type:
raise ValueError('Could not get tensor details')
details = {
'name': tensor_name,
'index': tensor_index,
'shape': tensor_size,
'dtype': tensor_type,
'quantization': tensor_quantization,
'quantization_parameters': {
'scales': tensor_quantization_params[0],
'zero_points': tensor_quantization_params[1],
'quantized_dimension': tensor_quantization_params[2],
}
}
return details
# Experimental and subject to change
def _get_ops_details(self):
"""Gets op details for every node.
Returns:
A list of dictionaries containing arrays with lists of tensor ids for
tensors involved in the op.
"""
return [
self._get_op_details(idx) for idx in range(self._interpreter.NumNodes())
]
def get_tensor_details(self):
"""Gets tensor details for every tensor with valid tensor details.
Tensors where required information about the tensor is not found are not
added to the list. This includes temporary tensors without a name.
Returns:
A list of dictionaries containing tensor information.
"""
tensor_details = []
for idx in range(self._interpreter.NumTensors()):
try:
tensor_details.append(self._get_tensor_details(idx))
except ValueError:
pass
return tensor_details
def get_input_details(self):
"""Gets model input details.
Returns:
A list of input details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.InputIndices()
]
def set_tensor(self, tensor_index, value):
"""Sets the value of the input tensor. Note this copies data in `value`.
If you want to avoid copying, you can use the `tensor()` function to get a
numpy buffer pointing to the input buffer in the tflite interpreter.
Args:
tensor_index: Tensor index of tensor to set. This value can be gotten from
the 'index' field in get_input_details.
value: Value of tensor to set.
Raises:
ValueError: If the interpreter could not set the tensor.
"""
self._interpreter.SetTensor(tensor_index, value)
def resize_tensor_input(self, input_index, tensor_size):
"""Resizes an input tensor.
Args:
input_index: Tensor index of input to set. This value can be gotten from
the 'index' field in get_input_details.
tensor_size: The tensor_shape to resize the input to.
Raises:
ValueError: If the interpreter could not resize the input tensor.
"""
self._ensure_safe()
# `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size
# parameter.
tensor_size = np.array(tensor_size, dtype=np.int32)
self._interpreter.ResizeInputTensor(input_index, tensor_size)
def get_output_details(self):
"""Gets model output details.
Returns:
A list of output details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.OutputIndices()
]
def get_tensor(self, tensor_index):
"""Gets the value of the input tensor (get a copy).
If you wish to avoid the copy, use `tensor()`. This function cannot be used
to read intermediate results.
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
a numpy array.
"""
return self._interpreter.GetTensor(tensor_index)
def tensor(self, tensor_index):
"""Returns function that gives a numpy view of the current tensor buffer.
This allows reading and writing to this tensors w/o copies. This more
closely mirrors the C++ Interpreter class interface's tensor() member, hence
the name. Be careful to not hold these output references through calls
to `allocate_tensors()` and `invoke()`. This function cannot be used to read
intermediate results.
Usage:
```
interpreter.allocate_tensors()
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])
for i in range(10):
input().fill(3.)
interpreter.invoke()
print("inference %s" % output())
```
Notice how this function avoids making a numpy array directly. This is
because it is important to not hold actual numpy views to the data longer
than necessary. If you do, then the interpreter can no longer be invoked,
because it is possible the interpreter would resize and invalidate the
referenced tensors. The NumPy API doesn't allow any mutability of the
the underlying buffers.
WRONG:
```
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])()
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])()
interpreter.allocate_tensors() # This will throw RuntimeError
for i in range(10):
input.fill(3.)
interpreter.invoke() # this will throw RuntimeError since input,output
```
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
A function that can return a new numpy array pointing to the internal
TFLite tensor state at any point. It is safe to hold the function forever,
but it is not safe to hold the numpy array forever.
"""
return lambda: self._interpreter.tensor(self._interpreter, tensor_index)
def invoke(self):
"""Invoke the interpreter.
Be sure to set the input sizes, allocate tensors and fill values before
calling this. Also, note that this function releases the GIL so heavy
computation can be done in the background while the Python interpreter
continues. No other function on this object should be called while the
invoke() call has not finished.
Raises:
ValueError: When the underlying interpreter fails raise ValueError.
"""
self._ensure_safe()
self._interpreter.Invoke()
def reset_all_variables(self):
return self._interpreter.ResetVariableTensors()
class InterpreterWithCustomOps(Interpreter):
"""Interpreter interface for TensorFlow Lite Models that accepts custom ops.
The interface provided by this class is experimenal and therefore not exposed
as part of the public API.
Wraps the tf.lite.Interpreter class and adds the ability to load custom ops
by providing the names of functions that take a pointer to a BuiltinOpResolver
and add a custom op.
"""
def __init__(self,
model_path=None,
model_content=None,
experimental_delegates=None,
custom_op_registerers=None):
"""Constructor.
Args:
model_path: Path to TF-Lite Flatbuffer file.
model_content: Content of model.
experimental_delegates: Experimental. Subject to change. List of
[TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates)
objects returned by lite.load_delegate().
custom_op_registerers: List of str, symbol names of functions that take a
pointer to a MutableOpResolver and register a custom op.
Raises:
ValueError: If the interpreter was unable to create.
"""
self._custom_op_registerers = custom_op_registerers
super(InterpreterWithCustomOps, self).__init__(
model_path=model_path,
model_content=model_content,
experimental_delegates=experimental_delegates)
| [
"platform.python_implementation",
"ctypes.pydll.LoadLibrary",
"tflite_runtime.interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer",
"tensorflow.python.util.tf_export.tf_export",
"sys.getrefcount",
"numpy.array",
"ctypes.CFUNCTYPE",
"tflite_runtime.interpreter_wrapper.InterpreterWrapper_C... | [((5313, 5358), 'tensorflow.python.util.tf_export.tf_export', '_tf_export', (['"""lite.experimental.load_delegate"""'], {}), "('lite.experimental.load_delegate')\n", (5323, 5358), True, 'from tensorflow.python.util.tf_export import tf_export as _tf_export\n'), ((6357, 6387), 'tensorflow.python.util.tf_export.tf_export', '_tf_export', (['"""lite.Interpreter"""'], {}), "('lite.Interpreter')\n", (6367, 6387), True, 'from tensorflow.python.util.tf_export import tf_export as _tf_export\n'), ((3426, 3459), 'ctypes.pydll.LoadLibrary', 'ctypes.pydll.LoadLibrary', (['library'], {}), '(library)\n', (3450, 3459), False, 'import ctypes\n'), ((15793, 15830), 'numpy.array', 'np.array', (['tensor_size'], {'dtype': 'np.int32'}), '(tensor_size, dtype=np.int32)\n', (15801, 15830), True, 'import numpy as np\n'), ((3206, 3238), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (3236, 3238), False, 'import platform\n'), ((3529, 3560), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char_p'], {}), '(ctypes.c_char_p)\n', (3543, 3560), False, 'import ctypes\n'), ((3570, 3601), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char_p'], {}), '(ctypes.c_char_p)\n', (3584, 3601), False, 'import ctypes\n'), ((3625, 3664), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['None', 'ctypes.c_char_p'], {}), '(None, ctypes.c_char_p)\n', (3641, 3664), False, 'import ctypes\n'), ((4371, 4410), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['None', 'ctypes.c_char_p'], {}), '(None, ctypes.c_char_p)\n', (4387, 4410), False, 'import ctypes\n'), ((7833, 7942), 'tflite_runtime.interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile', '_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile', (['model_path', 'self._custom_op_registerers'], {}), '(model_path,\n self._custom_op_registerers)\n', (7897, 7942), True, 'from tflite_runtime import interpreter_wrapper as _interpreter_wrapper\n'), ((10239, 10273), 'sys.getrefcount', 'sys.getrefcount', (['self._interpreter'], {}), '(self._interpreter)\n', (10254, 10273), False, 'import sys\n'), ((8346, 8461), 'tflite_runtime.interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer', '_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer', (['model_content', 'self._custom_op_registerers'], {}), '(\n model_content, self._custom_op_registerers)\n', (8412, 8461), True, 'from tflite_runtime import interpreter_wrapper as _interpreter_wrapper\n')] |
# Importing Libraries
import cv2
import numpy as np
from matplotlib import pyplot as plt
#Capturing video
cap = cv2.VideoCapture(0)
while True:
_,frame = cap.read()
img_size = 680
# Plotting four circles on the video of the object you want to see the transformation of.
cv2.circle(frame,(143, 93),5,(0,0,255),-5)
cv2.circle(frame, (494, 93), 5, (0, 0, 255), -1)
cv2.circle(frame, (143, 447), 5, (0, 0, 255), -1)
cv2.circle(frame, (497, 445), 5, (0, 0, 255), -1) # selecting all the above four points in an array
imgPts = np.float32([[143, 93],[494, 93],[143, 447],[497, 445]])
# selecting four points in an array for the destination video( the one you want to see as your output)
objPoints = np.float32([[-10, -10],[685, -10],[-10, 685],[687, 687]]) #Apply perspective transformation function of openCV2. This function will return the matrix which you can feed into warpPerspective function to get the warped image.
matrix = cv2.getPerspectiveTransform(imgPts,objPoints)
result = cv2.warpPerspective(frame, matrix, (img_size, img_size)) #Now Plotting both the videos(original, warped video)using matplotlib
# ColorSpace
hsvFrame = cv2.cvtColor(result, cv2.COLOR_BGR2HSV)
# Set range for red color
red_lower = np.array([0, 114, 84], np.uint8)
red_upper = np.array([69, 255, 255], np.uint8)
red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)
# Set range for blue color
blue_lower = np.array([98, 91, 116], np.uint8)
blue_upper = np.array([165, 255, 255], np.uint8)
blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)
# For red color
res_red = cv2.bitwise_and(result, result,
mask = red_mask)
# For blue color
res_blue = cv2.bitwise_and(result, result,
mask = blue_mask)
# Creating circle for Red Color
gray_img = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(gray_img, 5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(red_mask,cv2.HOUGH_GRADIENT, 1, 39,
param1=150, param2=10, minRadius=25, maxRadius=35)
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
cv2.circle(result, (x, y), r, (0, 0, 255), 4)
cv2.rectangle(result, (x - 2, y - 2), (x + 1, y + 1), (0, 0, 0), -1)
# Creating circle for Blue Color
gray_img = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(gray_img, 5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(blue_mask,cv2.HOUGH_GRADIENT, 1, 39,
param1=150, param2=10, minRadius=25, maxRadius=35)
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
cv2.circle(result, (x, y), r, (255, 0, 0), 4)
cv2.rectangle(result, (x - 2, y - 2), (x + 1, y + 1), (0, 0, 0), -1)
cv2.imshow('frame',frame)
cv2.imshow('Circle Finder', result)
if cv2.waitKey(1) & 0xff == 27:
cv2.destroyAllWindows()
| [
"cv2.warpPerspective",
"cv2.circle",
"cv2.HoughCircles",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.getPerspectiveTransform",
"cv2.cvtColor",
"numpy.float32",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.VideoCapture",
"numpy.array",
"cv2.rectangle",
"numpy.round",
"cv2.imshow",
"cv2.in... | [((114, 133), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (130, 133), False, 'import cv2\n'), ((298, 346), 'cv2.circle', 'cv2.circle', (['frame', '(143, 93)', '(5)', '(0, 0, 255)', '(-5)'], {}), '(frame, (143, 93), 5, (0, 0, 255), -5)\n', (308, 346), False, 'import cv2\n'), ((345, 393), 'cv2.circle', 'cv2.circle', (['frame', '(494, 93)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, (494, 93), 5, (0, 0, 255), -1)\n', (355, 393), False, 'import cv2\n'), ((398, 447), 'cv2.circle', 'cv2.circle', (['frame', '(143, 447)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, (143, 447), 5, (0, 0, 255), -1)\n', (408, 447), False, 'import cv2\n'), ((452, 501), 'cv2.circle', 'cv2.circle', (['frame', '(497, 445)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, (497, 445), 5, (0, 0, 255), -1)\n', (462, 501), False, 'import cv2\n'), ((573, 631), 'numpy.float32', 'np.float32', (['[[143, 93], [494, 93], [143, 447], [497, 445]]'], {}), '([[143, 93], [494, 93], [143, 447], [497, 445]])\n', (583, 631), True, 'import numpy as np\n'), ((759, 819), 'numpy.float32', 'np.float32', (['[[-10, -10], [685, -10], [-10, 685], [687, 687]]'], {}), '([[-10, -10], [685, -10], [-10, 685], [687, 687]])\n', (769, 819), True, 'import numpy as np\n'), ((1004, 1050), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['imgPts', 'objPoints'], {}), '(imgPts, objPoints)\n', (1031, 1050), False, 'import cv2\n'), ((1068, 1124), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'matrix', '(img_size, img_size)'], {}), '(frame, matrix, (img_size, img_size))\n', (1087, 1124), False, 'import cv2\n'), ((1238, 1277), 'cv2.cvtColor', 'cv2.cvtColor', (['result', 'cv2.COLOR_BGR2HSV'], {}), '(result, cv2.COLOR_BGR2HSV)\n', (1250, 1277), False, 'import cv2\n'), ((1325, 1357), 'numpy.array', 'np.array', (['[0, 114, 84]', 'np.uint8'], {}), '([0, 114, 84], np.uint8)\n', (1333, 1357), True, 'import numpy as np\n'), ((1374, 1408), 'numpy.array', 'np.array', (['[69, 255, 255]', 'np.uint8'], {}), '([69, 255, 255], np.uint8)\n', (1382, 1408), True, 'import numpy as np\n'), ((1424, 1467), 'cv2.inRange', 'cv2.inRange', (['hsvFrame', 'red_lower', 'red_upper'], {}), '(hsvFrame, red_lower, red_upper)\n', (1435, 1467), False, 'import cv2\n'), ((1517, 1550), 'numpy.array', 'np.array', (['[98, 91, 116]', 'np.uint8'], {}), '([98, 91, 116], np.uint8)\n', (1525, 1550), True, 'import numpy as np\n'), ((1568, 1603), 'numpy.array', 'np.array', (['[165, 255, 255]', 'np.uint8'], {}), '([165, 255, 255], np.uint8)\n', (1576, 1603), True, 'import numpy as np\n'), ((1620, 1665), 'cv2.inRange', 'cv2.inRange', (['hsvFrame', 'blue_lower', 'blue_upper'], {}), '(hsvFrame, blue_lower, blue_upper)\n', (1631, 1665), False, 'import cv2\n'), ((1701, 1747), 'cv2.bitwise_and', 'cv2.bitwise_and', (['result', 'result'], {'mask': 'red_mask'}), '(result, result, mask=red_mask)\n', (1716, 1747), False, 'import cv2\n'), ((1818, 1865), 'cv2.bitwise_and', 'cv2.bitwise_and', (['result', 'result'], {'mask': 'blue_mask'}), '(result, result, mask=blue_mask)\n', (1833, 1865), False, 'import cv2\n'), ((1954, 1994), 'cv2.cvtColor', 'cv2.cvtColor', (['result', 'cv2.COLOR_BGR2GRAY'], {}), '(result, cv2.COLOR_BGR2GRAY)\n', (1966, 1994), False, 'import cv2\n'), ((2013, 2040), 'cv2.medianBlur', 'cv2.medianBlur', (['gray_img', '(5)'], {}), '(gray_img, 5)\n', (2027, 2040), False, 'import cv2\n'), ((2059, 2096), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (2071, 2096), False, 'import cv2\n'), ((2114, 2222), 'cv2.HoughCircles', 'cv2.HoughCircles', (['red_mask', 'cv2.HOUGH_GRADIENT', '(1)', '(39)'], {'param1': '(150)', 'param2': '(10)', 'minRadius': '(25)', 'maxRadius': '(35)'}), '(red_mask, cv2.HOUGH_GRADIENT, 1, 39, param1=150, param2=10,\n minRadius=25, maxRadius=35)\n', (2130, 2222), False, 'import cv2\n'), ((2571, 2611), 'cv2.cvtColor', 'cv2.cvtColor', (['result', 'cv2.COLOR_BGR2GRAY'], {}), '(result, cv2.COLOR_BGR2GRAY)\n', (2583, 2611), False, 'import cv2\n'), ((2630, 2657), 'cv2.medianBlur', 'cv2.medianBlur', (['gray_img', '(5)'], {}), '(gray_img, 5)\n', (2644, 2657), False, 'import cv2\n'), ((2676, 2713), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (2688, 2713), False, 'import cv2\n'), ((2731, 2841), 'cv2.HoughCircles', 'cv2.HoughCircles', (['blue_mask', 'cv2.HOUGH_GRADIENT', '(1)', '(39)'], {'param1': '(150)', 'param2': '(10)', 'minRadius': '(25)', 'maxRadius': '(35)'}), '(blue_mask, cv2.HOUGH_GRADIENT, 1, 39, param1=150, param2=\n 10, minRadius=25, maxRadius=35)\n', (2747, 2841), False, 'import cv2\n'), ((3139, 3165), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (3149, 3165), False, 'import cv2\n'), ((3169, 3204), 'cv2.imshow', 'cv2.imshow', (['"""Circle Finder"""', 'result'], {}), "('Circle Finder', result)\n", (3179, 3204), False, 'import cv2\n'), ((3251, 3274), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3272, 3274), False, 'import cv2\n'), ((2388, 2433), 'cv2.circle', 'cv2.circle', (['result', '(x, y)', 'r', '(0, 0, 255)', '(4)'], {}), '(result, (x, y), r, (0, 0, 255), 4)\n', (2398, 2433), False, 'import cv2\n'), ((2446, 2514), 'cv2.rectangle', 'cv2.rectangle', (['result', '(x - 2, y - 2)', '(x + 1, y + 1)', '(0, 0, 0)', '(-1)'], {}), '(result, (x - 2, y - 2), (x + 1, y + 1), (0, 0, 0), -1)\n', (2459, 2514), False, 'import cv2\n'), ((3006, 3051), 'cv2.circle', 'cv2.circle', (['result', '(x, y)', 'r', '(255, 0, 0)', '(4)'], {}), '(result, (x, y), r, (255, 0, 0), 4)\n', (3016, 3051), False, 'import cv2\n'), ((3064, 3132), 'cv2.rectangle', 'cv2.rectangle', (['result', '(x - 2, y - 2)', '(x + 1, y + 1)', '(0, 0, 0)', '(-1)'], {}), '(result, (x - 2, y - 2), (x + 1, y + 1), (0, 0, 0), -1)\n', (3077, 3132), False, 'import cv2\n'), ((3214, 3228), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3225, 3228), False, 'import cv2\n'), ((2304, 2327), 'numpy.round', 'np.round', (['circles[0, :]'], {}), '(circles[0, :])\n', (2312, 2327), True, 'import numpy as np\n'), ((2922, 2945), 'numpy.round', 'np.round', (['circles[0, :]'], {}), '(circles[0, :])\n', (2930, 2945), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
##################################################################################
# Initialization
##################################################################################
# Xavier : tf.contrib.layers.xavier_initializer()
# He : tf.contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# Truncated_normal : tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
# Orthogonal : tf.orthogonal_initializer(1.0) / # relu = sqrt(2), the others = 1.0
##################################################################################
# Regularization
##################################################################################
# l2_decay : tf.contrib.layers.l2_regularizer(0.0001)
# orthogonal_regularizer : orthogonal_regularizer(0.0001) # orthogonal_regularizer_fully(0.0001)
weight_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
weight_regularizer = tf.contrib.layers.l2_regularizer(0.0001)
weight_regularizer_fully = tf.contrib.layers.l2_regularizer(0.0001)
##################################################################################
# Layers
##################################################################################
# padding='SAME' ======> pad = ceil[ (kernel - stride) / 2 ]
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad > 0:
h = x.get_shape().as_list()[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = pad // 2
pad_bottom = pad - pad_top
pad_left = pad // 2
pad_right = pad - pad_left
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])
if pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode='REFLECT')
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias)
return x
def partial_conv(x, channels, kernel=3, stride=2, use_bias=True, padding='SAME', sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if padding.lower() == 'SAME'.lower():
with tf.variable_scope('mask'):
_, h, w, _ = x.get_shape().as_list()
slide_window = kernel * kernel
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1,
kernel_size=kernel, kernel_initializer=tf.constant_initializer(1.0),
strides=stride, padding=padding, use_bias=False, trainable=False)
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
with tf.variable_scope('x'):
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels],
initializer=weight_init, regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w), strides=[1, stride, stride, 1], padding=padding)
else:
x = tf.layers.conv2d(x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, padding=padding, use_bias=False)
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
x = x * update_mask
else:
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels],
initializer=weight_init, regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w), strides=[1, stride, stride, 1], padding=padding)
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d(x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, padding=padding, use_bias=use_bias)
return x
def dilate_conv(x, channels, kernel=3, rate=2, use_bias=True, padding='SAME', sn=False, scope='conv_0'):
with tf.variable_scope(scope):
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
if sn:
x = tf.nn.atrous_conv2d(x, spectral_norm(w), rate=rate, padding=padding)
else:
x = tf.nn.atrous_conv2d(x, w, rate=rate, padding=padding)
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x
def deconv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, sn=False, scope='deconv_0'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
if padding == 'SAME':
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, channels]
else:
output_shape = [x_shape[0], x_shape[1] * stride + max(kernel - stride, 0),
x_shape[2] * stride + max(kernel - stride, 0), channels]
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape,
strides=[1, stride, stride, 1], padding=padding)
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, padding=padding, use_bias=use_bias)
return x
def conv_pixel_shuffle_up(x, scale_factor=2, use_bias=True, sn=False, scope='pixel_shuffle'):
channel = x.get_shape()[-1] * (scale_factor ** 2)
x = conv(x, channel, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope=scope)
x = tf.depth_to_space(x, block_size=scale_factor)
return x
def conv_pixel_shuffle_down(x, scale_factor=2, use_bias=True, sn=False, scope='pixel_shuffle'):
channel = x.get_shape()[-1] // (scale_factor ** 2)
x = conv(x, channel, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope=scope)
x = tf.space_to_depth(x, block_size=scale_factor)
return x
def fully_conneted(x, units, use_bias=True, sn=False, scope='linear'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn:
w = tf.get_variable("kernel", [channels, units], tf.float32,
initializer=weight_init, regularizer=weight_regularizer_fully)
if use_bias:
bias = tf.get_variable("bias", [units],
initializer=tf.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w)) + bias
else:
x = tf.matmul(x, spectral_norm(w))
else:
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer_fully,
use_bias=use_bias)
return x
##################################################################################
# Blocks
##################################################################################
def resblock(x_init, channels, use_bias=True, is_training=True, sn=False, scope='resblock'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
x = relu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
return x + x_init
def resblock_up(x_init, channels, use_bias=True, is_training=True, sn=False, scope='resblock_up'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = deconv(x_init, channels, kernel=3, stride=2, use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
x = relu(x)
with tf.variable_scope('res2'):
x = deconv(x, channels, kernel=3, stride=1, use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
with tf.variable_scope('skip'):
x_init = deconv(x_init, channels, kernel=3, stride=2, use_bias=use_bias, sn=sn)
return relu(x + x_init)
def resblock_up_condition(x_init, z, channels, use_bias=True, is_training=True, sn=False, scope='resblock_up'):
# See https://github.com/taki0112/BigGAN-Tensorflow
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = deconv(x_init, channels, kernel=3, stride=2, use_bias=use_bias, sn=sn)
x = condition_batch_norm(x, z, is_training)
x = relu(x)
with tf.variable_scope('res2'):
x = deconv(x, channels, kernel=3, stride=1, use_bias=use_bias, sn=sn)
x = condition_batch_norm(x, z, is_training)
with tf.variable_scope('skip'):
x_init = deconv(x_init, channels, kernel=3, stride=2, use_bias=use_bias, sn=sn)
return relu(x + x_init)
def resblock_down(x_init, channels, use_bias=True, is_training=True, sn=False, scope='resblock_down'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=2, pad=1, use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
x = relu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn)
x = batch_norm(x, is_training)
with tf.variable_scope('skip'):
x_init = conv(x_init, channels, kernel=3, stride=2, pad=1, use_bias=use_bias, sn=sn)
return relu(x + x_init)
def denseblock(x_init, channels, n_db=6, use_bias=True, is_training=True, sn=False, scope='denseblock') :
with tf.variable_scope(scope) :
layers = []
layers.append(x_init)
with tf.variable_scope('bottle_neck_0') :
x = conv(x_init, 4 * channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_0')
x = relu(x)
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn, scope='conv_1')
x = batch_norm(x, is_training, scope='batch_norm_1')
x = relu(x)
layers.append(x)
for i in range(1, n_db) :
with tf.variable_scope('bottle_neck_' + str(i)) :
x = tf.concat(layers, axis=-1)
x = conv(x, 4 * channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_0')
x = relu(x)
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn, scope='conv_1')
x = batch_norm(x, is_training, scope='batch_norm_1')
x = relu(x)
layers.append(x)
x = tf.concat(layers, axis=-1)
return x
def res_denseblock(x_init, channels, n_rdb=20, n_rdb_conv=6, use_bias=True, is_training=True, sn=False, scope='res_denseblock'):
with tf.variable_scope(scope):
RDBs = []
x_input = x_init
"""
n_rdb = 20 ( RDB number )
n_rdb_conv = 6 ( per RDB conv layer )
"""
for k in range(n_rdb):
with tf.variable_scope('RDB_' + str(k)):
layers = []
layers.append(x_init)
x = conv(x_init, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_0')
x = relu(x)
layers.append(x)
for i in range(1, n_rdb_conv):
x = tf.concat(layers, axis=-1)
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn, scope='conv_' + str(i))
x = batch_norm(x, is_training, scope='batch_norm_' + str(i))
x = relu(x)
layers.append(x)
# Local feature fusion
x = tf.concat(layers, axis=-1)
x = conv(x, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv_last')
# Local residual learning
x = x_init + x
RDBs.append(x)
x_init = x
with tf.variable_scope('GFF_1x1'):
x = tf.concat(RDBs, axis=-1)
x = conv(x, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv')
with tf.variable_scope('GFF_3x3'):
x = conv(x, channels, kernel=3, stride=1, pad=1, use_bias=use_bias, sn=sn, scope='conv')
# Global residual learning
x = x_input + x
return x
def self_attention(x, channels, use_bias=True, sn=False, scope='self_attention'):
with tf.variable_scope(scope):
f = conv(x, channels // 8, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='f_conv') # [bs, h, w, c']
g = conv(x, channels // 8, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='g_conv') # [bs, h, w, c']
h = conv(x, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='h_conv') # [bs, h, w, c]
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s) # attention map
o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
o = tf.reshape(o, shape=x.shape) # [bs, h, w, C]
x = gamma * o + x
return x
def self_attention_with_pooling(x, channels, use_bias=True, sn=False, scope='self_attention'):
with tf.variable_scope(scope):
f = conv(x, channels // 8, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='f_conv') # [bs, h, w, c']
f = max_pooling(f)
g = conv(x, channels // 8, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='g_conv') # [bs, h, w, c']
h = conv(x, channels // 2, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='h_conv') # [bs, h, w, c]
h = max_pooling(h)
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s) # attention map
o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
o = tf.reshape(o, shape=[x.shape[0], x.shape[1], x.shape[2], channels // 2]) # [bs, h, w, C]
o = conv(o, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='attn_conv')
x = gamma * o + x
return x
def squeeze_excitation(x, channels, ratio=16, use_bias=True, sn=False, scope='senet'):
with tf.variable_scope(scope):
squeeze = global_avg_pooling(x)
excitation = fully_conneted(squeeze, units=channels // ratio, use_bias=use_bias, sn=sn, scope='fc1')
excitation = relu(excitation)
excitation = fully_conneted(excitation, units=channels, use_bias=use_bias, sn=sn, scope='fc2')
excitation = sigmoid(excitation)
excitation = tf.reshape(excitation, [-1, 1, 1, channels])
scale = x * excitation
return scale
def convolution_block_attention(x, channels, ratio=16, use_bias=True, sn=False, scope='cbam'):
with tf.variable_scope(scope):
with tf.variable_scope('channel_attention'):
x_gap = global_avg_pooling(x)
x_gap = fully_conneted(x_gap, units=channels // ratio, use_bias=use_bias, sn=sn, scope='fc1')
x_gap = relu(x_gap)
x_gap = fully_conneted(x_gap, units=channels, use_bias=use_bias, sn=sn, scope='fc2')
with tf.variable_scope('channel_attention', reuse=True):
x_gmp = global_max_pooling(x)
x_gmp = fully_conneted(x_gmp, units=channels // ratio, use_bias=use_bias, sn=sn, scope='fc1')
x_gmp = relu(x_gmp)
x_gmp = fully_conneted(x_gmp, units=channels, use_bias=use_bias, sn=sn, scope='fc2')
scale = tf.reshape(x_gap + x_gmp, [-1, 1, 1, channels])
scale = sigmoid(scale)
x = x * scale
with tf.variable_scope('spatial_attention'):
x_channel_avg_pooling = tf.reduce_mean(x, axis=-1, keepdims=True)
x_channel_max_pooling = tf.reduce_max(x, axis=-1, keepdims=True)
scale = tf.concat([x_channel_avg_pooling, x_channel_max_pooling], axis=-1)
scale = conv(scale, channels=1, kernel=7, stride=1, pad=3, pad_type='reflect', use_bias=False, sn=sn,
scope='conv')
scale = sigmoid(scale)
x = x * scale
return x
##################################################################################
# Normalization
##################################################################################
def batch_norm(x, is_training=False, scope='batch_norm'):
"""
if x_norm = tf.layers.batch_normalization
# ...
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.minimize(loss)
"""
return tf.contrib.layers.batch_norm(x,
decay=0.9, epsilon=1e-05,
center=True, scale=True, renorm=True, updates_collections=None,
is_training=is_training, scope=scope)
# return tf.layers.batch_normalization(x, momentum=0.9, epsilon=1e-05, center=True, scale=True, renorm=True, training=is_training, name=scope)
def instance_norm(x, scope='instance_norm'):
return tf.contrib.layers.instance_norm(x,
epsilon=1e-05,
center=True, scale=True,
scope=scope)
def layer_norm(x, scope='layer_norm'):
return tf.contrib.layers.layer_norm(x,
center=True, scale=True,
scope=scope)
def group_norm(x, groups=32, scope='group_norm'):
return tf.contrib.layers.group_norm(x, groups=groups, epsilon=1e-05,
center=True, scale=True,
scope=scope)
def adaptive_instance_norm(content, gamma, beta, epsilon=1e-5):
# gamma, beta = style_mean, style_std from MLP
# See https://github.com/taki0112/MUNIT-Tensorflow
c_mean, c_var = tf.nn.moments(content, axes=[1, 2], keep_dims=True)
c_std = tf.sqrt(c_var + epsilon)
return gamma * ((content - c_mean) / c_std) + beta
def pixel_norm(x, epsilon=1e-8):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=-1, keepdims=True) + epsilon)
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def condition_batch_norm(x, z, is_training=True, scope='batch_norm'):
# See https://github.com/taki0112/BigGAN-Tensorflow
with tf.variable_scope(scope):
_, _, _, c = x.get_shape().as_list()
decay = 0.9
epsilon = 1e-05
test_mean = tf.get_variable("pop_mean", shape=[c], dtype=tf.float32,
initializer=tf.constant_initializer(0.0), trainable=False)
test_var = tf.get_variable("pop_var", shape=[c], dtype=tf.float32, initializer=tf.constant_initializer(1.0),
trainable=False)
beta = fully_conneted(z, units=c, scope='beta')
gamma = fully_conneted(z, units=c, scope='gamma')
beta = tf.reshape(beta, shape=[-1, 1, 1, c])
gamma = tf.reshape(gamma, shape=[-1, 1, 1, c])
if is_training:
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])
ema_mean = tf.assign(test_mean, test_mean * decay + batch_mean * (1 - decay))
ema_var = tf.assign(test_var, test_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([ema_mean, ema_var]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)
else:
return tf.nn.batch_normalization(x, test_mean, test_var, beta, gamma, epsilon)
def batch_instance_norm(x, scope='batch_instance_norm'):
with tf.variable_scope(scope):
ch = x.shape[-1]
eps = 1e-5
batch_mean, batch_sigma = tf.nn.moments(x, axes=[0, 1, 2], keep_dims=True)
x_batch = (x - batch_mean) / (tf.sqrt(batch_sigma + eps))
ins_mean, ins_sigma = tf.nn.moments(x, axes=[1, 2], keep_dims=True)
x_ins = (x - ins_mean) / (tf.sqrt(ins_sigma + eps))
rho = tf.get_variable("rho", [ch], initializer=tf.constant_initializer(1.0),
constraint=lambda x: tf.clip_by_value(x, clip_value_min=0.0, clip_value_max=1.0))
gamma = tf.get_variable("gamma", [ch], initializer=tf.constant_initializer(1.0))
beta = tf.get_variable("beta", [ch], initializer=tf.constant_initializer(0.0))
x_hat = rho * x_batch + (1 - rho) * x_ins
x_hat = x_hat * gamma + beta
return x_hat
##################################################################################
# Activation Function
##################################################################################
def lrelu(x, alpha=0.01):
# pytorch alpha is 0.01
return tf.nn.leaky_relu(x, alpha)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.tanh(x)
def sigmoid(x):
return tf.sigmoid(x)
def swish(x):
return x * tf.sigmoid(x)
##################################################################################
# Pooling & Resize
##################################################################################
def up_sample(x, scale_factor=2):
_, h, w, _ = x.get_shape().as_list()
new_size = [h * scale_factor, w * scale_factor]
return tf.image.resize_nearest_neighbor(x, size=new_size)
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
def global_max_pooling(x):
gmp = tf.reduce_max(x, axis=[1, 2], keepdims=True)
return gmp
def max_pooling(x, pool_size=2):
x = tf.layers.max_pooling2d(x, pool_size=pool_size, strides=pool_size, padding='SAME')
return x
def avg_pooling(x, pool_size=2):
x = tf.layers.average_pooling2d(x, pool_size=pool_size, strides=pool_size, padding='SAME')
return x
def flatten(x):
return tf.layers.flatten(x)
def hw_flatten(x):
return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])
##################################################################################
# Loss Function
##################################################################################
def classification_loss(logit, label) :
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logit))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
def L1_loss(x, y):
loss = tf.reduce_mean(tf.abs(x - y))
return loss
def L2_loss(x, y):
loss = tf.reduce_mean(tf.square(x - y))
return loss
def huber_loss(x, y):
return tf.losses.huber_loss(x, y)
def histogram_loss(x, y):
histogram_x = get_histogram(x)
histogram_y = get_histogram(y)
hist_loss = L1_loss(histogram_x, histogram_y)
return hist_loss
def get_histogram(img, bin_size=0.2):
hist_entries = []
img_r, img_g, img_b = tf.split(img, num_or_size_splits=3, axis=-1)
for img_chan in [img_r, img_g, img_b]:
for i in np.arange(-1, 1, bin_size):
gt = tf.greater(img_chan, i)
leq = tf.less_equal(img_chan, i + bin_size)
condition = tf.cast(tf.logical_and(gt, leq), tf.float32)
hist_entries.append(tf.reduce_sum(condition))
hist = normalization(hist_entries)
return hist
def normalization(x):
x = (x - tf.reduce_min(x)) / (tf.reduce_max(x) - tf.reduce_min(x))
return x
##################################################################################
# GAN Loss Function
##################################################################################
def discriminator_loss(Ra, loss_func, real, fake):
# Ra = Relativistic
real_loss = 0
fake_loss = 0
if Ra and loss_func.__contains__('wgan'):
print("No exist [Ra + WGAN], so use the {} loss function".format(loss_func))
Ra = False
if Ra:
real_logit = (real - tf.reduce_mean(fake))
fake_logit = (fake - tf.reduce_mean(real))
if loss_func == 'lsgan':
real_loss = tf.reduce_mean(tf.square(real_logit - 1.0))
fake_loss = tf.reduce_mean(tf.square(fake_logit + 1.0))
if loss_func == 'gan' or loss_func == 'gan-gp' or loss_func == 'dragan':
real_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real), logits=real_logit))
fake_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake), logits=fake_logit))
if loss_func == 'hinge':
real_loss = tf.reduce_mean(relu(1.0 - real_logit))
fake_loss = tf.reduce_mean(relu(1.0 + fake_logit))
else:
if loss_func.__contains__('wgan'):
real_loss = -tf.reduce_mean(real)
fake_loss = tf.reduce_mean(fake)
if loss_func == 'lsgan':
real_loss = tf.reduce_mean(tf.square(real - 1.0))
fake_loss = tf.reduce_mean(tf.square(fake))
if loss_func == 'gan' or loss_func == 'gan-gp' or loss_func == 'dragan':
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real), logits=real))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake), logits=fake))
if loss_func == 'hinge':
real_loss = tf.reduce_mean(relu(1.0 - real))
fake_loss = tf.reduce_mean(relu(1.0 + fake))
loss = real_loss + fake_loss
return loss
def generator_loss(Ra, loss_func, real, fake):
# Ra = Relativistic
fake_loss = 0
real_loss = 0
if Ra and loss_func.__contains__('wgan'):
print("No exist [Ra + WGAN], so use the {} loss function".format(loss_func))
Ra = False
if Ra:
fake_logit = (fake - tf.reduce_mean(real))
real_logit = (real - tf.reduce_mean(fake))
if loss_func == 'lsgan':
fake_loss = tf.reduce_mean(tf.square(fake_logit - 1.0))
real_loss = tf.reduce_mean(tf.square(real_logit + 1.0))
if loss_func == 'gan' or loss_func == 'gan-gp' or loss_func == 'dragan':
fake_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake_logit))
real_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(real), logits=real_logit))
if loss_func == 'hinge':
fake_loss = tf.reduce_mean(relu(1.0 - fake_logit))
real_loss = tf.reduce_mean(relu(1.0 + real_logit))
else:
if loss_func.__contains__('wgan'):
fake_loss = -tf.reduce_mean(fake)
if loss_func == 'lsgan':
fake_loss = tf.reduce_mean(tf.square(fake - 1.0))
if loss_func == 'gan' or loss_func == 'gan-gp' or loss_func == 'dragan':
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake))
if loss_func == 'hinge':
fake_loss = -tf.reduce_mean(fake)
loss = fake_loss + real_loss
return loss
##################################################################################
# KL-Divergence Loss Function
##################################################################################
# typical version
def z_sample(mean, logvar):
eps = tf.random_normal(tf.shape(mean), mean=0.0, stddev=1.0, dtype=tf.float32)
return mean + tf.exp(logvar * 0.5) * eps
def kl_loss(mean, logvar):
# shape : [batch_size, channel]
loss = 0.5 * tf.reduce_sum(tf.square(mean) + tf.exp(logvar) - 1 - logvar, axis=-1)
loss = tf.reduce_mean(loss)
return loss
# version 2
def z_sample_2(mean, var):
eps = tf.random_normal(tf.shape(mean), mean=0.0, stddev=1.0, dtype=tf.float32)
return mean + var * eps
def kl_loss_2(mean, var):
# shape : [batch_size, channel]
loss = 0.5 * tf.reduce_sum(tf.square(mean) + tf.square(var) - tf.log(1e-8 + tf.square(var)) - 1, axis=-1)
loss = tf.reduce_mean(loss)
return loss
| [
"tensorflow.nn.batch_normalization",
"tensorflow.reduce_sum",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.clip_by_value",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.nn.l2_normalize",
"tensorflow.zeros_like",
"tensorf... | [((907, 961), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.02)'}), '(mean=0.0, stddev=0.02)\n', (938, 961), True, 'import tensorflow as tf\n'), ((983, 1023), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.0001)'], {}), '(0.0001)\n', (1015, 1023), True, 'import tensorflow as tf\n'), ((1051, 1091), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.0001)'], {}), '(0.0001)\n', (1083, 1091), True, 'import tensorflow as tf\n'), ((7995, 8040), 'tensorflow.depth_to_space', 'tf.depth_to_space', (['x'], {'block_size': 'scale_factor'}), '(x, block_size=scale_factor)\n', (8012, 8040), True, 'import tensorflow as tf\n'), ((8300, 8345), 'tensorflow.space_to_depth', 'tf.space_to_depth', (['x'], {'block_size': 'scale_factor'}), '(x, block_size=scale_factor)\n', (8317, 8345), True, 'import tensorflow as tf\n'), ((19605, 19773), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['x'], {'decay': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'renorm': '(True)', 'updates_collections': 'None', 'is_training': 'is_training', 'scope': 'scope'}), '(x, decay=0.9, epsilon=1e-05, center=True,\n scale=True, renorm=True, updates_collections=None, is_training=\n is_training, scope=scope)\n', (19633, 19773), True, 'import tensorflow as tf\n'), ((20091, 20182), 'tensorflow.contrib.layers.instance_norm', 'tf.contrib.layers.instance_norm', (['x'], {'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'scope': 'scope'}), '(x, epsilon=1e-05, center=True, scale=True,\n scope=scope)\n', (20122, 20182), True, 'import tensorflow as tf\n'), ((20360, 20429), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['x'], {'center': '(True)', 'scale': '(True)', 'scope': 'scope'}), '(x, center=True, scale=True, scope=scope)\n', (20388, 20429), True, 'import tensorflow as tf\n'), ((20573, 20676), 'tensorflow.contrib.layers.group_norm', 'tf.contrib.layers.group_norm', (['x'], {'groups': 'groups', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'scope': 'scope'}), '(x, groups=groups, epsilon=1e-05, center=True,\n scale=True, scope=scope)\n', (20601, 20676), True, 'import tensorflow as tf\n'), ((20946, 20997), 'tensorflow.nn.moments', 'tf.nn.moments', (['content'], {'axes': '[1, 2]', 'keep_dims': '(True)'}), '(content, axes=[1, 2], keep_dims=True)\n', (20959, 20997), True, 'import tensorflow as tf\n'), ((21010, 21034), 'tensorflow.sqrt', 'tf.sqrt', (['(c_var + epsilon)'], {}), '(c_var + epsilon)\n', (21017, 21034), True, 'import tensorflow as tf\n'), ((21291, 21323), 'tensorflow.reshape', 'tf.reshape', (['w', '[-1, w_shape[-1]]'], {}), '(w, [-1, w_shape[-1]])\n', (21301, 21323), True, 'import tensorflow as tf\n'), ((21761, 21784), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['u_hat'], {}), '(u_hat)\n', (21777, 21784), True, 'import tensorflow as tf\n'), ((21797, 21820), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['v_hat'], {}), '(v_hat)\n', (21813, 21820), True, 'import tensorflow as tf\n'), ((24545, 24571), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['x', 'alpha'], {}), '(x, alpha)\n', (24561, 24571), True, 'import tensorflow as tf\n'), ((24598, 24611), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (24608, 24611), True, 'import tensorflow as tf\n'), ((24638, 24648), 'tensorflow.tanh', 'tf.tanh', (['x'], {}), '(x)\n', (24645, 24648), True, 'import tensorflow as tf\n'), ((24678, 24691), 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {}), '(x)\n', (24688, 24691), True, 'import tensorflow as tf\n'), ((25063, 25113), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['x'], {'size': 'new_size'}), '(x, size=new_size)\n', (25095, 25113), True, 'import tensorflow as tf\n'), ((25153, 25198), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '[1, 2]', 'keepdims': '(True)'}), '(x, axis=[1, 2], keepdims=True)\n', (25167, 25198), True, 'import tensorflow as tf\n'), ((25253, 25297), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': '[1, 2]', 'keepdims': '(True)'}), '(x, axis=[1, 2], keepdims=True)\n', (25266, 25297), True, 'import tensorflow as tf\n'), ((25356, 25443), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['x'], {'pool_size': 'pool_size', 'strides': 'pool_size', 'padding': '"""SAME"""'}), "(x, pool_size=pool_size, strides=pool_size, padding=\n 'SAME')\n", (25379, 25443), True, 'import tensorflow as tf\n'), ((25495, 25585), 'tensorflow.layers.average_pooling2d', 'tf.layers.average_pooling2d', (['x'], {'pool_size': 'pool_size', 'strides': 'pool_size', 'padding': '"""SAME"""'}), "(x, pool_size=pool_size, strides=pool_size,\n padding='SAME')\n", (25522, 25585), True, 'import tensorflow as tf\n'), ((25624, 25644), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['x'], {}), '(x)\n', (25641, 25644), True, 'import tensorflow as tf\n'), ((25677, 25727), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[x.shape[0], -1, x.shape[-1]]'}), '(x, shape=[x.shape[0], -1, x.shape[-1]])\n', (25687, 25727), True, 'import tensorflow as tf\n'), ((26406, 26432), 'tensorflow.losses.huber_loss', 'tf.losses.huber_loss', (['x', 'y'], {}), '(x, y)\n', (26426, 26432), True, 'import tensorflow as tf\n'), ((26693, 26737), 'tensorflow.split', 'tf.split', (['img'], {'num_or_size_splits': '(3)', 'axis': '(-1)'}), '(img, num_or_size_splits=3, axis=-1)\n', (26701, 26737), True, 'import tensorflow as tf\n'), ((31439, 31459), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (31453, 31459), True, 'import tensorflow as tf\n'), ((31815, 31835), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (31829, 31835), True, 'import tensorflow as tf\n'), ((1447, 1471), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (1464, 1471), True, 'import tensorflow as tf\n'), ((3037, 3061), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (3054, 3061), True, 'import tensorflow as tf\n'), ((5768, 5792), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (5785, 5792), True, 'import tensorflow as tf\n'), ((6446, 6470), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (6463, 6470), True, 'import tensorflow as tf\n'), ((8442, 8466), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (8459, 8466), True, 'import tensorflow as tf\n'), ((9555, 9579), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (9572, 9579), True, 'import tensorflow as tf\n'), ((10088, 10112), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (10105, 10112), True, 'import tensorflow as tf\n'), ((10815, 10839), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (10832, 10839), True, 'import tensorflow as tf\n'), ((11503, 11527), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (11520, 11527), True, 'import tensorflow as tf\n'), ((12182, 12206), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (12199, 12206), True, 'import tensorflow as tf\n'), ((13325, 13351), 'tensorflow.concat', 'tf.concat', (['layers'], {'axis': '(-1)'}), '(layers, axis=-1)\n', (13334, 13351), True, 'import tensorflow as tf\n'), ((13510, 13534), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (13527, 13534), True, 'import tensorflow as tf\n'), ((15267, 15291), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (15284, 15291), True, 'import tensorflow as tf\n'), ((15755, 15771), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['s'], {}), '(s)\n', (15768, 15771), True, 'import tensorflow as tf\n'), ((15948, 15976), 'tensorflow.reshape', 'tf.reshape', (['o'], {'shape': 'x.shape'}), '(o, shape=x.shape)\n', (15958, 15976), True, 'import tensorflow as tf\n'), ((16140, 16164), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (16157, 16164), True, 'import tensorflow as tf\n'), ((16689, 16705), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['s'], {}), '(s)\n', (16702, 16705), True, 'import tensorflow as tf\n'), ((16882, 16954), 'tensorflow.reshape', 'tf.reshape', (['o'], {'shape': '[x.shape[0], x.shape[1], x.shape[2], channels // 2]'}), '(o, shape=[x.shape[0], x.shape[1], x.shape[2], channels // 2])\n', (16892, 16954), True, 'import tensorflow as tf\n'), ((17205, 17229), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (17222, 17229), True, 'import tensorflow as tf\n'), ((17585, 17629), 'tensorflow.reshape', 'tf.reshape', (['excitation', '[-1, 1, 1, channels]'], {}), '(excitation, [-1, 1, 1, channels])\n', (17595, 17629), True, 'import tensorflow as tf\n'), ((17790, 17814), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (17807, 17814), True, 'import tensorflow as tf\n'), ((21652, 21674), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['v_'], {}), '(v_)\n', (21670, 21674), True, 'import tensorflow as tf\n'), ((21689, 21708), 'tensorflow.matmul', 'tf.matmul', (['v_hat', 'w'], {}), '(v_hat, w)\n', (21698, 21708), True, 'import tensorflow as tf\n'), ((21725, 21747), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['u_'], {}), '(u_)\n', (21743, 21747), True, 'import tensorflow as tf\n'), ((21844, 21863), 'tensorflow.matmul', 'tf.matmul', (['v_hat', 'w'], {}), '(v_hat, w)\n', (21853, 21863), True, 'import tensorflow as tf\n'), ((21865, 21884), 'tensorflow.transpose', 'tf.transpose', (['u_hat'], {}), '(u_hat)\n', (21877, 21884), True, 'import tensorflow as tf\n'), ((21984, 22011), 'tensorflow.reshape', 'tf.reshape', (['w_norm', 'w_shape'], {}), '(w_norm, w_shape)\n', (21994, 22011), True, 'import tensorflow as tf\n'), ((22168, 22192), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (22185, 22192), True, 'import tensorflow as tf\n'), ((22756, 22793), 'tensorflow.reshape', 'tf.reshape', (['beta'], {'shape': '[-1, 1, 1, c]'}), '(beta, shape=[-1, 1, 1, c])\n', (22766, 22793), True, 'import tensorflow as tf\n'), ((22810, 22848), 'tensorflow.reshape', 'tf.reshape', (['gamma'], {'shape': '[-1, 1, 1, c]'}), '(gamma, shape=[-1, 1, 1, c])\n', (22820, 22848), True, 'import tensorflow as tf\n'), ((23448, 23472), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (23465, 23472), True, 'import tensorflow as tf\n'), ((23553, 23601), 'tensorflow.nn.moments', 'tf.nn.moments', (['x'], {'axes': '[0, 1, 2]', 'keep_dims': '(True)'}), '(x, axes=[0, 1, 2], keep_dims=True)\n', (23566, 23601), True, 'import tensorflow as tf\n'), ((23699, 23744), 'tensorflow.nn.moments', 'tf.nn.moments', (['x'], {'axes': '[1, 2]', 'keep_dims': '(True)'}), '(x, axes=[1, 2], keep_dims=True)\n', (23712, 23744), True, 'import tensorflow as tf\n'), ((24723, 24736), 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {}), '(x)\n', (24733, 24736), True, 'import tensorflow as tf\n'), ((25979, 26049), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'label', 'logits': 'logit'}), '(labels=label, logits=logit)\n', (26021, 26049), True, 'import tensorflow as tf\n'), ((26077, 26097), 'tensorflow.argmax', 'tf.argmax', (['logit', '(-1)'], {}), '(logit, -1)\n', (26086, 26097), True, 'import tensorflow as tf\n'), ((26099, 26119), 'tensorflow.argmax', 'tf.argmax', (['label', '(-1)'], {}), '(label, -1)\n', (26108, 26119), True, 'import tensorflow as tf\n'), ((26151, 26182), 'tensorflow.cast', 'tf.cast', (['prediction', 'tf.float32'], {}), '(prediction, tf.float32)\n', (26158, 26182), True, 'import tensorflow as tf\n'), ((26257, 26270), 'tensorflow.abs', 'tf.abs', (['(x - y)'], {}), '(x - y)\n', (26263, 26270), True, 'import tensorflow as tf\n'), ((26336, 26352), 'tensorflow.square', 'tf.square', (['(x - y)'], {}), '(x - y)\n', (26345, 26352), True, 'import tensorflow as tf\n'), ((26799, 26825), 'numpy.arange', 'np.arange', (['(-1)', '(1)', 'bin_size'], {}), '(-1, 1, bin_size)\n', (26808, 26825), True, 'import numpy as np\n'), ((31174, 31188), 'tensorflow.shape', 'tf.shape', (['mean'], {}), '(mean)\n', (31182, 31188), True, 'import tensorflow as tf\n'), ((31545, 31559), 'tensorflow.shape', 'tf.shape', (['mean'], {}), '(mean)\n', (31553, 31559), True, 'import tensorflow as tf\n'), ((2630, 2808), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'x', 'filters': 'channels', 'kernel_size': 'kernel', 'kernel_initializer': 'weight_init', 'kernel_regularizer': 'weight_regularizer', 'strides': 'stride', 'use_bias': 'use_bias'}), '(inputs=x, filters=channels, kernel_size=kernel,\n kernel_initializer=weight_init, kernel_regularizer=weight_regularizer,\n strides=stride, use_bias=use_bias)\n', (2646, 2808), True, 'import tensorflow as tf\n'), ((6100, 6153), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['x', 'w'], {'rate': 'rate', 'padding': 'padding'}), '(x, w, rate=rate, padding=padding)\n', (6119, 6153), True, 'import tensorflow as tf\n'), ((6289, 6312), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (6303, 6312), True, 'import tensorflow as tf\n'), ((7408, 7613), 'tensorflow.layers.conv2d_transpose', 'tf.layers.conv2d_transpose', ([], {'inputs': 'x', 'filters': 'channels', 'kernel_size': 'kernel', 'kernel_initializer': 'weight_init', 'kernel_regularizer': 'weight_regularizer', 'strides': 'stride', 'padding': 'padding', 'use_bias': 'use_bias'}), '(inputs=x, filters=channels, kernel_size=kernel,\n kernel_initializer=weight_init, kernel_regularizer=weight_regularizer,\n strides=stride, padding=padding, use_bias=use_bias)\n', (7434, 7613), True, 'import tensorflow as tf\n'), ((8592, 8716), 'tensorflow.get_variable', 'tf.get_variable', (['"""kernel"""', '[channels, units]', 'tf.float32'], {'initializer': 'weight_init', 'regularizer': 'weight_regularizer_fully'}), "('kernel', [channels, units], tf.float32, initializer=\n weight_init, regularizer=weight_regularizer_fully)\n", (8607, 8716), True, 'import tensorflow as tf\n'), ((9065, 9196), 'tensorflow.layers.dense', 'tf.layers.dense', (['x'], {'units': 'units', 'kernel_initializer': 'weight_init', 'kernel_regularizer': 'weight_regularizer_fully', 'use_bias': 'use_bias'}), '(x, units=units, kernel_initializer=weight_init,\n kernel_regularizer=weight_regularizer_fully, use_bias=use_bias)\n', (9080, 9196), True, 'import tensorflow as tf\n'), ((9594, 9619), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res1"""'], {}), "('res1')\n", (9611, 9619), True, 'import tensorflow as tf\n'), ((9794, 9819), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res2"""'], {}), "('res2')\n", (9811, 9819), True, 'import tensorflow as tf\n'), ((10127, 10152), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res1"""'], {}), "('res1')\n", (10144, 10152), True, 'import tensorflow as tf\n'), ((10322, 10347), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res2"""'], {}), "('res2')\n", (10339, 10347), True, 'import tensorflow as tf\n'), ((10488, 10513), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""skip"""'], {}), "('skip')\n", (10505, 10513), True, 'import tensorflow as tf\n'), ((10854, 10879), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res1"""'], {}), "('res1')\n", (10871, 10879), True, 'import tensorflow as tf\n'), ((11062, 11087), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res2"""'], {}), "('res2')\n", (11079, 11087), True, 'import tensorflow as tf\n'), ((11241, 11266), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""skip"""'], {}), "('skip')\n", (11258, 11266), True, 'import tensorflow as tf\n'), ((11542, 11567), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res1"""'], {}), "('res1')\n", (11559, 11567), True, 'import tensorflow as tf\n'), ((11742, 11767), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""res2"""'], {}), "('res2')\n", (11759, 11767), True, 'import tensorflow as tf\n'), ((11913, 11938), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""skip"""'], {}), "('skip')\n", (11930, 11938), True, 'import tensorflow as tf\n'), ((12273, 12307), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bottle_neck_0"""'], {}), "('bottle_neck_0')\n", (12290, 12307), True, 'import tensorflow as tf\n'), ((14786, 14814), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GFF_1x1"""'], {}), "('GFF_1x1')\n", (14803, 14814), True, 'import tensorflow as tf\n'), ((14832, 14856), 'tensorflow.concat', 'tf.concat', (['RDBs'], {'axis': '(-1)'}), '(RDBs, axis=-1)\n', (14841, 14856), True, 'import tensorflow as tf\n'), ((14965, 14993), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GFF_3x3"""'], {}), "('GFF_3x3')\n", (14982, 14993), True, 'import tensorflow as tf\n'), ((17829, 17867), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""channel_attention"""'], {}), "('channel_attention')\n", (17846, 17867), True, 'import tensorflow as tf\n'), ((18160, 18210), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""channel_attention"""'], {'reuse': '(True)'}), "('channel_attention', reuse=True)\n", (18177, 18210), True, 'import tensorflow as tf\n'), ((18510, 18557), 'tensorflow.reshape', 'tf.reshape', (['(x_gap + x_gmp)', '[-1, 1, 1, channels]'], {}), '(x_gap + x_gmp, [-1, 1, 1, channels])\n', (18520, 18557), True, 'import tensorflow as tf\n'), ((18634, 18672), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""spatial_attention"""'], {}), "('spatial_attention')\n", (18651, 18672), True, 'import tensorflow as tf\n'), ((18710, 18751), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (18724, 18751), True, 'import tensorflow as tf\n'), ((18788, 18828), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (18801, 18828), True, 'import tensorflow as tf\n'), ((18849, 18915), 'tensorflow.concat', 'tf.concat', (['[x_channel_avg_pooling, x_channel_max_pooling]'], {'axis': '(-1)'}), '([x_channel_avg_pooling, x_channel_max_pooling], axis=-1)\n', (18858, 18915), True, 'import tensorflow as tf\n'), ((21384, 21414), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (21412, 21414), True, 'import tensorflow as tf\n'), ((21619, 21634), 'tensorflow.transpose', 'tf.transpose', (['w'], {}), '(w)\n', (21631, 21634), True, 'import tensorflow as tf\n'), ((22910, 22937), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0, 1, 2]'], {}), '(x, [0, 1, 2])\n', (22923, 22937), True, 'import tensorflow as tf\n'), ((22961, 23027), 'tensorflow.assign', 'tf.assign', (['test_mean', '(test_mean * decay + batch_mean * (1 - decay))'], {}), '(test_mean, test_mean * decay + batch_mean * (1 - decay))\n', (22970, 23027), True, 'import tensorflow as tf\n'), ((23050, 23113), 'tensorflow.assign', 'tf.assign', (['test_var', '(test_var * decay + batch_var * (1 - decay))'], {}), '(test_var, test_var * decay + batch_var * (1 - decay))\n', (23059, 23113), True, 'import tensorflow as tf\n'), ((23308, 23379), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['x', 'test_mean', 'test_var', 'beta', 'gamma', 'epsilon'], {}), '(x, test_mean, test_var, beta, gamma, epsilon)\n', (23333, 23379), True, 'import tensorflow as tf\n'), ((23640, 23666), 'tensorflow.sqrt', 'tf.sqrt', (['(batch_sigma + eps)'], {}), '(batch_sigma + eps)\n', (23647, 23666), True, 'import tensorflow as tf\n'), ((23779, 23803), 'tensorflow.sqrt', 'tf.sqrt', (['(ins_sigma + eps)'], {}), '(ins_sigma + eps)\n', (23786, 23803), True, 'import tensorflow as tf\n'), ((26844, 26867), 'tensorflow.greater', 'tf.greater', (['img_chan', 'i'], {}), '(img_chan, i)\n', (26854, 26867), True, 'import tensorflow as tf\n'), ((26886, 26923), 'tensorflow.less_equal', 'tf.less_equal', (['img_chan', '(i + bin_size)'], {}), '(img_chan, i + bin_size)\n', (26899, 26923), True, 'import tensorflow as tf\n'), ((27146, 27162), 'tensorflow.reduce_min', 'tf.reduce_min', (['x'], {}), '(x)\n', (27159, 27162), True, 'import tensorflow as tf\n'), ((27167, 27183), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {}), '(x)\n', (27180, 27183), True, 'import tensorflow as tf\n'), ((27186, 27202), 'tensorflow.reduce_min', 'tf.reduce_min', (['x'], {}), '(x)\n', (27199, 27202), True, 'import tensorflow as tf\n'), ((27709, 27729), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fake'], {}), '(fake)\n', (27723, 27729), True, 'import tensorflow as tf\n'), ((27760, 27780), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['real'], {}), '(real)\n', (27774, 27780), True, 'import tensorflow as tf\n'), ((28605, 28625), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fake'], {}), '(fake)\n', (28619, 28625), True, 'import tensorflow as tf\n'), ((29601, 29621), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['real'], {}), '(real)\n', (29615, 29621), True, 'import tensorflow as tf\n'), ((29652, 29672), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fake'], {}), '(fake)\n', (29666, 29672), True, 'import tensorflow as tf\n'), ((31249, 31269), 'tensorflow.exp', 'tf.exp', (['(logvar * 0.5)'], {}), '(logvar * 0.5)\n', (31255, 31269), True, 'import tensorflow as tf\n'), ((1866, 1939), 'tensorflow.pad', 'tf.pad', (['x', '[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]'], {}), '(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])\n', (1872, 1939), True, 'import tensorflow as tf\n'), ((1998, 2091), 'tensorflow.pad', 'tf.pad', (['x', '[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]'], {'mode': '"""REFLECT"""'}), "(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],\n mode='REFLECT')\n", (2004, 2091), True, 'import tensorflow as tf\n'), ((2575, 2598), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (2589, 2598), True, 'import tensorflow as tf\n'), ((3126, 3151), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mask"""'], {}), "('mask')\n", (3143, 3151), True, 'import tensorflow as tf\n'), ((3277, 3304), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[1, h, w, 1]'}), '(shape=[1, h, w, 1])\n', (3284, 3304), True, 'import tensorflow as tf\n'), ((3695, 3734), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['update_mask', '(0.0)', '(1.0)'], {}), '(update_mask, 0.0, 1.0)\n', (3711, 3734), True, 'import tensorflow as tf\n'), ((3807, 3829), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""x"""'], {}), "('x')\n", (3824, 3829), True, 'import tensorflow as tf\n'), ((5342, 5530), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'filters': 'channels', 'kernel_size': 'kernel', 'kernel_initializer': 'weight_init', 'kernel_regularizer': 'weight_regularizer', 'strides': 'stride', 'padding': 'padding', 'use_bias': 'use_bias'}), '(x, filters=channels, kernel_size=kernel,\n kernel_initializer=weight_init, kernel_regularizer=weight_regularizer,\n strides=stride, padding=padding, use_bias=use_bias)\n', (5358, 5530), True, 'import tensorflow as tf\n'), ((7353, 7376), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (7367, 7376), True, 'import tensorflow as tf\n'), ((12844, 12870), 'tensorflow.concat', 'tf.concat', (['layers'], {'axis': '(-1)'}), '(layers, axis=-1)\n', (12853, 12870), True, 'import tensorflow as tf\n'), ((14509, 14535), 'tensorflow.concat', 'tf.concat', (['layers'], {'axis': '(-1)'}), '(layers, axis=-1)\n', (14518, 14535), True, 'import tensorflow as tf\n'), ((15905, 15933), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (15928, 15933), True, 'import tensorflow as tf\n'), ((16839, 16867), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (16862, 16867), True, 'import tensorflow as tf\n'), ((22409, 22437), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (22432, 22437), True, 'import tensorflow as tf\n'), ((22543, 22571), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (22566, 22571), True, 'import tensorflow as tf\n'), ((23132, 23176), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[ema_mean, ema_var]'], {}), '([ema_mean, ema_var])\n', (23155, 23176), True, 'import tensorflow as tf\n'), ((23201, 23274), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['x', 'batch_mean', 'batch_var', 'beta', 'gamma', 'epsilon'], {}), '(x, batch_mean, batch_var, beta, gamma, epsilon)\n', (23226, 23274), True, 'import tensorflow as tf\n'), ((23861, 23889), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (23884, 23889), True, 'import tensorflow as tf\n'), ((24062, 24090), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (24085, 24090), True, 'import tensorflow as tf\n'), ((24149, 24177), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (24172, 24177), True, 'import tensorflow as tf\n'), ((26957, 26980), 'tensorflow.logical_and', 'tf.logical_and', (['gt', 'leq'], {}), '(gt, leq)\n', (26971, 26980), True, 'import tensorflow as tf\n'), ((27026, 27050), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['condition'], {}), '(condition)\n', (27039, 27050), True, 'import tensorflow as tf\n'), ((27855, 27882), 'tensorflow.square', 'tf.square', (['(real_logit - 1.0)'], {}), '(real_logit - 1.0)\n', (27864, 27882), True, 'import tensorflow as tf\n'), ((27923, 27950), 'tensorflow.square', 'tf.square', (['(fake_logit + 1.0)'], {}), '(fake_logit + 1.0)\n', (27932, 27950), True, 'import tensorflow as tf\n'), ((28560, 28580), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['real'], {}), '(real)\n', (28574, 28580), True, 'import tensorflow as tf\n'), ((28699, 28720), 'tensorflow.square', 'tf.square', (['(real - 1.0)'], {}), '(real - 1.0)\n', (28708, 28720), True, 'import tensorflow as tf\n'), ((28761, 28776), 'tensorflow.square', 'tf.square', (['fake'], {}), '(fake)\n', (28770, 28776), True, 'import tensorflow as tf\n'), ((29747, 29774), 'tensorflow.square', 'tf.square', (['(fake_logit - 1.0)'], {}), '(fake_logit - 1.0)\n', (29756, 29774), True, 'import tensorflow as tf\n'), ((29815, 29842), 'tensorflow.square', 'tf.square', (['(real_logit + 1.0)'], {}), '(real_logit + 1.0)\n', (29824, 29842), True, 'import tensorflow as tf\n'), ((30452, 30472), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fake'], {}), '(fake)\n', (30466, 30472), True, 'import tensorflow as tf\n'), ((30546, 30567), 'tensorflow.square', 'tf.square', (['(fake - 1.0)'], {}), '(fake - 1.0)\n', (30555, 30567), True, 'import tensorflow as tf\n'), ((30830, 30850), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fake'], {}), '(fake)\n', (30844, 30850), True, 'import tensorflow as tf\n'), ((4220, 4405), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'filters': 'channels', 'kernel_size': 'kernel', 'kernel_initializer': 'weight_init', 'kernel_regularizer': 'weight_regularizer', 'strides': 'stride', 'padding': 'padding', 'use_bias': '(False)'}), '(x, filters=channels, kernel_size=kernel,\n kernel_initializer=weight_init, kernel_regularizer=weight_regularizer,\n strides=stride, padding=padding, use_bias=False)\n', (4236, 4405), True, 'import tensorflow as tf\n'), ((4716, 4739), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (4730, 4739), True, 'import tensorflow as tf\n'), ((5280, 5303), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (5294, 5303), True, 'import tensorflow as tf\n'), ((6243, 6271), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (6266, 6271), True, 'import tensorflow as tf\n'), ((14151, 14177), 'tensorflow.concat', 'tf.concat', (['layers'], {'axis': '(-1)'}), '(layers, axis=-1)\n', (14160, 14177), True, 'import tensorflow as tf\n'), ((21165, 21177), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (21174, 21177), True, 'import tensorflow as tf\n'), ((23942, 24001), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x'], {'clip_value_min': '(0.0)', 'clip_value_max': '(1.0)'}), '(x, clip_value_min=0.0, clip_value_max=1.0)\n', (23958, 24001), True, 'import tensorflow as tf\n'), ((2525, 2553), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2548, 2553), True, 'import tensorflow as tf\n'), ((3456, 3484), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (3479, 3484), True, 'import tensorflow as tf\n'), ((7303, 7331), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (7326, 7331), True, 'import tensorflow as tf\n'), ((8876, 8904), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (8899, 8904), True, 'import tensorflow as tf\n'), ((28137, 28155), 'tensorflow.ones_like', 'tf.ones_like', (['real'], {}), '(real)\n', (28149, 28155), True, 'import tensorflow as tf\n'), ((28280, 28299), 'tensorflow.zeros_like', 'tf.zeros_like', (['fake'], {}), '(fake)\n', (28293, 28299), True, 'import tensorflow as tf\n'), ((28946, 28964), 'tensorflow.ones_like', 'tf.ones_like', (['real'], {}), '(real)\n', (28958, 28964), True, 'import tensorflow as tf\n'), ((29066, 29085), 'tensorflow.zeros_like', 'tf.zeros_like', (['fake'], {}), '(fake)\n', (29079, 29085), True, 'import tensorflow as tf\n'), ((30029, 30047), 'tensorflow.ones_like', 'tf.ones_like', (['fake'], {}), '(fake)\n', (30041, 30047), True, 'import tensorflow as tf\n'), ((30172, 30191), 'tensorflow.zeros_like', 'tf.zeros_like', (['real'], {}), '(real)\n', (30185, 30191), True, 'import tensorflow as tf\n'), ((30737, 30755), 'tensorflow.ones_like', 'tf.ones_like', (['fake'], {}), '(fake)\n', (30749, 30755), True, 'import tensorflow as tf\n'), ((31372, 31387), 'tensorflow.square', 'tf.square', (['mean'], {}), '(mean)\n', (31381, 31387), True, 'import tensorflow as tf\n'), ((31390, 31404), 'tensorflow.exp', 'tf.exp', (['logvar'], {}), '(logvar)\n', (31396, 31404), True, 'import tensorflow as tf\n'), ((31725, 31740), 'tensorflow.square', 'tf.square', (['mean'], {}), '(mean)\n', (31734, 31740), True, 'import tensorflow as tf\n'), ((31743, 31757), 'tensorflow.square', 'tf.square', (['var'], {}), '(var)\n', (31752, 31757), True, 'import tensorflow as tf\n'), ((4661, 4689), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (4684, 4689), True, 'import tensorflow as tf\n'), ((5225, 5253), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (5248, 5253), True, 'import tensorflow as tf\n'), ((31774, 31788), 'tensorflow.square', 'tf.square', (['var'], {}), '(var)\n', (31783, 31788), True, 'import tensorflow as tf\n')] |
###
# A simple polarising michelson inteferometer model.
#
# This script produces Figure 5 in "Polarisation-sensitive transfer matrix
# modelling for displacement measuring interferometry", <NAME>, <NAME>,
# <NAME> and <NAME>.
#
# Last updated by <NAME>, 24/03/2020.
###
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import strapy as ts
import pyctmm
model = ts.Model()
model.add_component(ts.components.Source, 'laser', 'n0')
model.add_component(ts.components.Stack, 'sIn', ('n0', 'n1'))
model.add_component(ts.components.PolarisingBeamSplitter, 'pbs', \
('n1', 'n2', 'n3', 'n4'))
model.add_component(ts.components.Stack, 'sRefA', ('n2', 'n5'))
model.add_component(ts.components.Waveplate, 'qwpRef', ('n5', 'n6'))
model.add_component(ts.components.Stack, 'sRefB', ('n6', 'n7'))
model.add_component(ts.components.Mirror, 'mRef', 'n7')
model.add_component(ts.components.Stack, 'sMesA', ('n3', 'n8'))
model.add_component(ts.components.Waveplate, 'qwpMes', ('n8', 'n9'))
model.add_component(ts.components.Stack, 'sMesB', ('n9', 'n10'))
model.add_component(ts.components.Mirror, 'mMes', 'n10')
model.add_component(ts.components.Stack, 'sOutA', ('n4', 'n11'))
model.add_component(ts.components.BeamSplitter, 'npbs', \
('n11', 'n12', 'n13', 'nNPBSdumpA'))
model.add_component(ts.components.Stack, 'sNPBSdump', \
('nNPBSdumpA', 'nNPBSdumpB'))
model.add_component(ts.components.Dump, 'dNPBS', 'nNPBSdumpB')
model.add_component(ts.components.Stack, 'sCosA', ('n12', 'n14'))
model.add_component(ts.components.Waveplate, 'qwpCos', ('n14', 'n15'))
model.add_component(ts.components.Stack, 'sCosB', ('n15', 'n16'))
model.add_component(ts.components.Polariser, 'polCos', ('n16', 'n17'))
model.add_component(ts.components.Stack, 'sCosC', ('n17', 'n18'))
model.add_component(ts.components.Dump, 'dCos', 'n18')
model.add_component(ts.components.Stack, 'sSinA', ('n13', 'n19'))
model.add_component(ts.components.Polariser, 'polSin', ('n19', 'n20'))
model.add_component(ts.components.Stack, 'sSinB', ('n20', 'n21'))
model.add_component(ts.components.Dump, 'dSin', 'n21')
model.add_detector('pd2', 'n18', properties=('amplitude', 'intensity'))
model.add_detector('pd1', 'n21', properties=('amplitude', 'intensity'))
model.components['laser'].amplitude = [1/np.sqrt(2), 1/np.sqrt(2)]
model.build()
model.components['qwpRef'].retardance = 2*np.pi/4
model.components['qwpRef'].rotation = np.pi/4
model.components['qwpRef'].update()
model.components['qwpMes'].retardance = 2*np.pi/4
model.components['qwpMes'].rotation = np.pi/4
model.components['qwpMes'].update()
model.components['qwpCos'].retardance = 2*np.pi/4
model.components['qwpCos'].rotation = 20*np.pi/180
model.components['qwpCos'].update()
model.components['polCos'].rotation = np.pi/4
model.components['polCos'].update()
model.components['polSin'].rotation = np.pi/4
model.components['polSin'].update()
stack = pyctmm.create_stack(2, model.wavelength, 0)
pyctmm.set_ind(stack, 0, 1, 0)
pyctmm.set_ind(stack, 1, 1, 0)
pyctmm.set_d(stack, 0, 0)
pyctmm.set_d(stack, 1, 0)
model.components['sMesA'].set_pyctmm(stack)
nPoints = 100
xs = np.linspace(0, 1, nPoints)
ints1 = np.empty(xs.shape, dtype=float)
ints2 = np.empty(xs.shape, dtype=float)
for i, x in enumerate(xs):
model.components['sMesB'].set_length(x)
model.evaluate()
ints1[i] = model.detectors['pd1'].intensity
ints2[i] = model.detectors['pd2'].intensity
fig = plt.figure(figsize=(6, 2))
gs = fig.add_gridspec(1, 3)
ax0 = fig.add_subplot(gs[0,:2])
ax0.plot(xs, ints1, label='PD1', color='k')
ax0.plot(xs, ints2, label='PD2', color='k', ls='--')
lgd = ax0.legend()
ax0.set_xlabel('Displacement (wavelengths)')
ax0.set_ylabel('Intensity')
ax0.set_yticks([0, 0.25, 0.5])
ax1 = fig.add_subplot(gs[0, 2])
ax1.plot(ints1, ints2, color='k')
ax1.set_aspect('equal')
ax1.set_xlabel('PD1 intensity')
ax1.set_ylabel('PD2 intensity')
ax1.set_xticks([0, 0.25, 0.5])
ax1.set_yticks([0, 0.25, 0.5])
plt.tight_layout()
plt.show() | [
"pyctmm.create_stack",
"matplotlib.pyplot.show",
"pyctmm.set_d",
"numpy.empty",
"pyctmm.set_ind",
"matplotlib.pyplot.figure",
"numpy.linspace",
"strapy.Model",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt"
] | [((406, 416), 'strapy.Model', 'ts.Model', ([], {}), '()\n', (414, 416), True, 'import strapy as ts\n'), ((2918, 2961), 'pyctmm.create_stack', 'pyctmm.create_stack', (['(2)', 'model.wavelength', '(0)'], {}), '(2, model.wavelength, 0)\n', (2937, 2961), False, 'import pyctmm\n'), ((2962, 2992), 'pyctmm.set_ind', 'pyctmm.set_ind', (['stack', '(0)', '(1)', '(0)'], {}), '(stack, 0, 1, 0)\n', (2976, 2992), False, 'import pyctmm\n'), ((2993, 3023), 'pyctmm.set_ind', 'pyctmm.set_ind', (['stack', '(1)', '(1)', '(0)'], {}), '(stack, 1, 1, 0)\n', (3007, 3023), False, 'import pyctmm\n'), ((3024, 3049), 'pyctmm.set_d', 'pyctmm.set_d', (['stack', '(0)', '(0)'], {}), '(stack, 0, 0)\n', (3036, 3049), False, 'import pyctmm\n'), ((3050, 3075), 'pyctmm.set_d', 'pyctmm.set_d', (['stack', '(1)', '(0)'], {}), '(stack, 1, 0)\n', (3062, 3075), False, 'import pyctmm\n'), ((3141, 3167), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nPoints'], {}), '(0, 1, nPoints)\n', (3152, 3167), True, 'import numpy as np\n'), ((3176, 3207), 'numpy.empty', 'np.empty', (['xs.shape'], {'dtype': 'float'}), '(xs.shape, dtype=float)\n', (3184, 3207), True, 'import numpy as np\n'), ((3216, 3247), 'numpy.empty', 'np.empty', (['xs.shape'], {'dtype': 'float'}), '(xs.shape, dtype=float)\n', (3224, 3247), True, 'import numpy as np\n'), ((3444, 3470), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 2)'}), '(figsize=(6, 2))\n', (3454, 3470), True, 'from matplotlib import pyplot as plt\n'), ((3969, 3987), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3985, 3987), True, 'from matplotlib import pyplot as plt\n'), ((3988, 3998), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3996, 3998), True, 'from matplotlib import pyplot as plt\n'), ((2298, 2308), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2305, 2308), True, 'import numpy as np\n'), ((2312, 2322), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2319, 2322), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.